+#endif
+
+static inline
+struct rte_mbuf *bnxt_consume_rx_buf(struct bnxt_rx_ring_info *rxr,
+ uint16_t cons)
+{
+ struct bnxt_sw_rx_bd *cons_rx_buf;
+ struct rte_mbuf *mbuf;
+
+ cons_rx_buf = &rxr->rx_buf_ring[cons];
+ RTE_ASSERT(cons_rx_buf->mbuf != NULL);
+ mbuf = cons_rx_buf->mbuf;
+ cons_rx_buf->mbuf = NULL;
+ return mbuf;
+}
+
+static void bnxt_tpa_start(struct bnxt_rx_queue *rxq,
+ struct rx_tpa_start_cmpl *tpa_start,
+ struct rx_tpa_start_cmpl_hi *tpa_start1)
+{
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ uint8_t agg_id = rte_le_to_cpu_32(tpa_start->agg_id &
+ RX_TPA_START_CMPL_AGG_ID_MASK) >> RX_TPA_START_CMPL_AGG_ID_SFT;
+ uint16_t data_cons;
+ struct bnxt_tpa_info *tpa_info;
+ struct rte_mbuf *mbuf;
+
+ data_cons = tpa_start->opaque;
+ tpa_info = &rxr->tpa_info[agg_id];
+
+ mbuf = bnxt_consume_rx_buf(rxr, data_cons);
+
+ bnxt_reuse_rx_mbuf(rxr, tpa_info->mbuf);
+
+ tpa_info->mbuf = mbuf;
+ tpa_info->len = rte_le_to_cpu_32(tpa_start->len);
+
+ mbuf->nb_segs = 1;
+ mbuf->next = NULL;
+ mbuf->pkt_len = rte_le_to_cpu_32(tpa_start->len);
+ mbuf->data_len = mbuf->pkt_len;
+ mbuf->port = rxq->port_id;
+ mbuf->ol_flags = PKT_RX_LRO;
+ if (likely(tpa_start->flags_type &
+ rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS_RSS_VALID))) {
+ mbuf->hash.rss = rte_le_to_cpu_32(tpa_start->rss_hash);
+ mbuf->ol_flags |= PKT_RX_RSS_HASH;
+ } else {
+ mbuf->hash.fdir.id = rte_le_to_cpu_16(tpa_start1->cfa_code);
+ mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ }
+ if (tpa_start1->flags2 &
+ rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN)) {
+ mbuf->vlan_tci = rte_le_to_cpu_32(tpa_start1->metadata);
+ mbuf->ol_flags |= PKT_RX_VLAN;
+ }
+ if (likely(tpa_start1->flags2 &
+ rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC)))
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+
+ /* recycle next mbuf */
+ data_cons = RING_NEXT(rxr->rx_ring_struct, data_cons);
+ bnxt_reuse_rx_mbuf(rxr, bnxt_consume_rx_buf(rxr, data_cons));
+}
+
+static int bnxt_agg_bufs_valid(struct bnxt_cp_ring_info *cpr,
+ uint8_t agg_bufs, uint32_t raw_cp_cons)
+{
+ uint16_t last_cp_cons;
+ struct rx_pkt_cmpl *agg_cmpl;
+
+ raw_cp_cons = ADV_RAW_CMP(raw_cp_cons, agg_bufs);
+ last_cp_cons = RING_CMP(cpr->cp_ring_struct, raw_cp_cons);
+ agg_cmpl = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[last_cp_cons];
+ cpr->valid = FLIP_VALID(raw_cp_cons,
+ cpr->cp_ring_struct->ring_mask,
+ cpr->valid);
+ return CMP_VALID(agg_cmpl, raw_cp_cons, cpr->cp_ring_struct);
+}
+
+/* TPA consume agg buffer out of order, allocate connected data only */
+static int bnxt_prod_ag_mbuf(struct bnxt_rx_queue *rxq)
+{
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ uint16_t next = RING_NEXT(rxr->ag_ring_struct, rxr->ag_prod);
+
+ /* TODO batch allocation for better performance */
+ while (rte_bitmap_get(rxr->ag_bitmap, next)) {
+ if (unlikely(bnxt_alloc_ag_data(rxq, rxr, next))) {
+ PMD_DRV_LOG(ERR,
+ "agg mbuf alloc failed: prod=0x%x\n", next);
+ break;
+ }
+ rte_bitmap_clear(rxr->ag_bitmap, next);
+ rxr->ag_prod = next;
+ next = RING_NEXT(rxr->ag_ring_struct, next);
+ }
+
+ return 0;
+}
+
+static int bnxt_rx_pages(struct bnxt_rx_queue *rxq,
+ struct rte_mbuf *mbuf, uint32_t *tmp_raw_cons,
+ uint8_t agg_buf)
+{
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ int i;
+ uint16_t cp_cons, ag_cons;
+ struct rx_pkt_cmpl *rxcmp;
+ struct rte_mbuf *last = mbuf;
+
+ for (i = 0; i < agg_buf; i++) {
+ struct bnxt_sw_rx_bd *ag_buf;
+ struct rte_mbuf *ag_mbuf;
+ *tmp_raw_cons = NEXT_RAW_CMP(*tmp_raw_cons);
+ cp_cons = RING_CMP(cpr->cp_ring_struct, *tmp_raw_cons);
+ rxcmp = (struct rx_pkt_cmpl *)
+ &cpr->cp_desc_ring[cp_cons];
+
+#ifdef BNXT_DEBUG
+ bnxt_dump_cmpl(cp_cons, rxcmp);
+#endif
+
+ ag_cons = rxcmp->opaque;
+ RTE_ASSERT(ag_cons <= rxr->ag_ring_struct->ring_mask);
+ ag_buf = &rxr->ag_buf_ring[ag_cons];
+ ag_mbuf = ag_buf->mbuf;
+ RTE_ASSERT(ag_mbuf != NULL);
+
+ ag_mbuf->data_len = rte_le_to_cpu_16(rxcmp->len);
+
+ mbuf->nb_segs++;
+ mbuf->pkt_len += ag_mbuf->data_len;
+
+ last->next = ag_mbuf;
+ last = ag_mbuf;