+ cpr = rxq->cp_ring;
+
+ /*
+ * For the vector receive case, the completion at the requested
+ * offset can be indexed directly.
+ */
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
+ if (bp->flags & BNXT_FLAG_RX_VECTOR_PKT_MODE) {
+ struct rx_pkt_cmpl *rxcmp;
+ uint32_t cons;
+
+ /* Check status of completion descriptor. */
+ raw_cons = cpr->cp_raw_cons +
+ offset * CMP_LEN(CMPL_BASE_TYPE_RX_L2);
+ cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
+ rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
+
+ if (CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
+ return RTE_ETH_RX_DESC_DONE;
+
+ /* Check whether rx desc has an mbuf attached. */
+ cons = RING_CMP(rxr->rx_ring_struct, raw_cons / 2);
+ if (cons >= rxq->rxrearm_start &&
+ cons < rxq->rxrearm_start + rxq->rxrearm_nb) {
+ return RTE_ETH_RX_DESC_UNAVAIL;
+ }
+
+ return RTE_ETH_RX_DESC_AVAIL;
+ }
+#endif
+
+ /*
+ * For the non-vector receive case, scan the completion ring to
+ * locate the completion descriptor for the requested offset.
+ */
+ raw_cons = cpr->cp_raw_cons;
+ desc = 0;
+ while (1) {
+ uint32_t agg_cnt, cons, cmpl_type;
+
+ cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
+ rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
+
+ if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
+ break;
+
+ cmpl_type = CMP_TYPE(rxcmp);
+
+ switch (cmpl_type) {
+ case CMPL_BASE_TYPE_RX_L2:
+ case CMPL_BASE_TYPE_RX_L2_V2:
+ if (desc == offset) {
+ cons = rxcmp->opaque;
+ if (rxr->rx_buf_ring[cons])
+ return RTE_ETH_RX_DESC_DONE;
+ else
+ return RTE_ETH_RX_DESC_UNAVAIL;
+ }
+ agg_cnt = BNXT_RX_L2_AGG_BUFS(rxcmp);
+ raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt;
+ desc++;
+ break;
+
+ case CMPL_BASE_TYPE_RX_TPA_END:
+ if (desc == offset)
+ return RTE_ETH_RX_DESC_DONE;
+
+ if (BNXT_CHIP_P5(rxq->bp)) {
+ struct rx_tpa_v2_end_cmpl_hi *p5_tpa_end;
+
+ p5_tpa_end = (void *)rxcmp;
+ agg_cnt = BNXT_TPA_END_AGG_BUFS_TH(p5_tpa_end);
+ } else {
+ struct rx_tpa_end_cmpl *tpa_end;