(!!(((struct cmpl_base *)(cmp))->info3_v & CMPL_BASE_V) == \
!((raw_cons) & ((ring)->ring_size)))
+#define CMPL_VALID(cmp, v) \
+ (!!(((struct cmpl_base *)(cmp))->info3_v & CMPL_BASE_V) == !(v))
+
#define CMP_TYPE(cmp) \
(((struct cmpl_base *)cmp)->type & CMPL_BASE_TYPE_MASK)
#define NEXT_RAW_CMP(idx) ADV_RAW_CMP(idx, 1)
#define RING_CMP(ring, idx) ((idx) & (ring)->ring_mask)
#define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1))
+#define FLIP_VALID(cons, mask, val) ((cons) >= (mask) ? !(val) : (val))
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
struct bnxt_ring *cp_ring_struct;
uint16_t cp_cons;
- bool v;
+ bool valid;
};
#define RX_CMP_L2_ERRORS \
return bnxt_hwrm_port_led_cfg(bp, false);
}
+static uint32_t
+bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ uint32_t desc = 0, raw_cons = 0, cons;
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_rx_queue *rxq;
+ struct rx_pkt_cmpl *rxcmp;
+ uint16_t cmp_type;
+ uint8_t cmp = 1;
+ bool valid;
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+ cpr = rxq->cp_ring;
+ valid = cpr->valid;
+
+ while (raw_cons < rxq->nb_rx_desc) {
+ cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
+ rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
+
+ if (!CMPL_VALID(rxcmp, valid))
+ goto nothing_to_do;
+ valid = FLIP_VALID(cons, cpr->cp_ring_struct->ring_mask, valid);
+ cmp_type = CMP_TYPE(rxcmp);
+ if (cmp_type == RX_PKT_CMPL_TYPE_RX_L2_TPA_END) {
+ cmp = (rte_le_to_cpu_32(
+ ((struct rx_tpa_end_cmpl *)
+ (rxcmp))->agg_bufs_v1) &
+ RX_TPA_END_CMPL_AGG_BUFS_MASK) >>
+ RX_TPA_END_CMPL_AGG_BUFS_SFT;
+ desc++;
+ } else if (cmp_type == 0x11) {
+ desc++;
+ cmp = (rxcmp->agg_bufs_v1 &
+ RX_PKT_CMPL_AGG_BUFS_MASK) >>
+ RX_PKT_CMPL_AGG_BUFS_SFT;
+ } else {
+ cmp = 1;
+ }
+nothing_to_do:
+ raw_cons += cmp ? cmp : 2;
+ }
+
+ return desc;
+}
+
/*
* Initialization
*/
.dev_led_off = bnxt_dev_led_off_op,
.xstats_get_by_id = bnxt_dev_xstats_get_by_id_op,
.xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op,
+ .rx_queue_count = bnxt_rx_queue_count_op,
};
static bool bnxt_vf_pciid(uint16_t id)
raw_cp_cons = ADV_RAW_CMP(raw_cp_cons, agg_bufs);
last_cp_cons = RING_CMP(cpr->cp_ring_struct, raw_cp_cons);
agg_cmpl = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[last_cp_cons];
+ cpr->valid = FLIP_VALID(raw_cp_cons,
+ cpr->cp_ring_struct->ring_mask,
+ cpr->valid);
return CMP_VALID(agg_cmpl, raw_cp_cons, cpr->cp_ring_struct);
}
if (!CMP_VALID(rxcmp1, tmp_raw_cons, cpr->cp_ring_struct))
return -EBUSY;
+ cpr->valid = FLIP_VALID(cp_cons,
+ cpr->cp_ring_struct->ring_mask,
+ cpr->valid);
+
cmp_type = CMP_TYPE(rxcmp);
if (cmp_type == RX_PKT_CMPL_TYPE_RX_L2_TPA_START) {
bnxt_tpa_start(rxq, (struct rx_tpa_start_cmpl *)rxcmp,
/* Handle RX burst request */
while (1) {
- int rc;
-
cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
rte_prefetch0(&cpr->cp_desc_ring[cons]);
rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
break;
+ cpr->valid = FLIP_VALID(cons,
+ cpr->cp_ring_struct->ring_mask,
+ cpr->valid);
/* TODO: Avoid magic numbers... */
if ((CMP_TYPE(rxcmp) & 0x30) == 0x10) {