+static inline
+struct rte_mbuf *bnxt_consume_rx_buf(struct bnxt_rx_ring_info *rxr,
+ uint16_t cons)
+{
+ struct rte_mbuf **cons_rx_buf;
+ struct rte_mbuf *mbuf;
+
+ cons_rx_buf = &rxr->rx_buf_ring[RING_IDX(rxr->rx_ring_struct, cons)];
+ RTE_ASSERT(*cons_rx_buf != NULL);
+ mbuf = *cons_rx_buf;
+ *cons_rx_buf = NULL;
+
+ return mbuf;
+}
+
+static void bnxt_tpa_get_metadata(struct bnxt *bp,
+ struct bnxt_tpa_info *tpa_info,
+ struct rx_tpa_start_cmpl *tpa_start,
+ struct rx_tpa_start_cmpl_hi *tpa_start1)
+{
+ tpa_info->cfa_code_valid = 0;
+ tpa_info->vlan_valid = 0;
+ tpa_info->hash_valid = 0;
+ tpa_info->l4_csum_valid = 0;
+
+ if (likely(tpa_start->flags_type &
+ rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS_RSS_VALID))) {
+ tpa_info->hash_valid = 1;
+ tpa_info->rss_hash = rte_le_to_cpu_32(tpa_start->rss_hash);
+ }
+
+ if (bp->vnic_cap_flags & BNXT_VNIC_CAP_RX_CMPL_V2) {
+ struct rx_tpa_start_v2_cmpl *v2_tpa_start = (void *)tpa_start;
+ struct rx_tpa_start_v2_cmpl_hi *v2_tpa_start1 =
+ (void *)tpa_start1;
+
+ if (v2_tpa_start->agg_id &
+ RX_TPA_START_V2_CMPL_METADATA1_VALID) {
+ tpa_info->vlan_valid = 1;
+ tpa_info->vlan =
+ rte_le_to_cpu_16(v2_tpa_start1->metadata0);
+ }
+
+ if (v2_tpa_start1->flags2 & RX_CMP_FLAGS2_L4_CSUM_ALL_OK_MASK)
+ tpa_info->l4_csum_valid = 1;
+
+ return;
+ }
+
+ tpa_info->cfa_code_valid = 1;
+ tpa_info->cfa_code = rte_le_to_cpu_16(tpa_start1->cfa_code);
+ if (tpa_start1->flags2 &
+ rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN)) {
+ tpa_info->vlan_valid = 1;
+ tpa_info->vlan = rte_le_to_cpu_32(tpa_start1->metadata);
+ }
+
+ if (likely(tpa_start1->flags2 &
+ rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC)))
+ tpa_info->l4_csum_valid = 1;
+}
+
+static void bnxt_tpa_start(struct bnxt_rx_queue *rxq,
+ struct rx_tpa_start_cmpl *tpa_start,
+ struct rx_tpa_start_cmpl_hi *tpa_start1)
+{
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ uint16_t agg_id;
+ uint16_t data_cons;
+ struct bnxt_tpa_info *tpa_info;
+ struct rte_mbuf *mbuf;
+
+ agg_id = bnxt_tpa_start_agg_id(rxq->bp, tpa_start);
+
+ data_cons = tpa_start->opaque;
+ tpa_info = &rxr->tpa_info[agg_id];
+
+ mbuf = bnxt_consume_rx_buf(rxr, data_cons);
+
+ bnxt_reuse_rx_mbuf(rxr, tpa_info->mbuf);
+
+ tpa_info->agg_count = 0;
+ tpa_info->mbuf = mbuf;
+ tpa_info->len = rte_le_to_cpu_32(tpa_start->len);
+
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->nb_segs = 1;
+ mbuf->next = NULL;
+ mbuf->pkt_len = rte_le_to_cpu_32(tpa_start->len);
+ mbuf->data_len = mbuf->pkt_len;
+ mbuf->port = rxq->port_id;
+ mbuf->ol_flags = PKT_RX_LRO;
+
+ bnxt_tpa_get_metadata(rxq->bp, tpa_info, tpa_start, tpa_start1);
+
+ if (likely(tpa_info->hash_valid)) {
+ mbuf->hash.rss = tpa_info->rss_hash;
+ mbuf->ol_flags |= PKT_RX_RSS_HASH;
+ } else if (tpa_info->cfa_code_valid) {
+ mbuf->hash.fdir.id = tpa_info->cfa_code;
+ mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ }
+
+ if (tpa_info->vlan_valid) {
+ mbuf->vlan_tci = tpa_info->vlan;
+ mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ }
+
+ if (likely(tpa_info->l4_csum_valid))
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+
+ /* recycle next mbuf */
+ data_cons = RING_NEXT(data_cons);
+ bnxt_reuse_rx_mbuf(rxr, bnxt_consume_rx_buf(rxr, data_cons));
+}
+
+static int bnxt_agg_bufs_valid(struct bnxt_cp_ring_info *cpr,
+ uint8_t agg_bufs, uint32_t raw_cp_cons)
+{
+ uint16_t last_cp_cons;
+ struct rx_pkt_cmpl *agg_cmpl;
+
+ raw_cp_cons = ADV_RAW_CMP(raw_cp_cons, agg_bufs);
+ last_cp_cons = RING_CMP(cpr->cp_ring_struct, raw_cp_cons);
+ agg_cmpl = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[last_cp_cons];
+ cpr->valid = FLIP_VALID(raw_cp_cons,
+ cpr->cp_ring_struct->ring_mask,
+ cpr->valid);
+ return CMP_VALID(agg_cmpl, raw_cp_cons, cpr->cp_ring_struct);
+}
+
+/* TPA consume agg buffer out of order, allocate connected data only */
+static int bnxt_prod_ag_mbuf(struct bnxt_rx_queue *rxq)
+{
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ uint16_t raw_next = RING_NEXT(rxr->ag_raw_prod);
+ uint16_t bmap_next = RING_IDX(rxr->ag_ring_struct, raw_next);
+
+ /* TODO batch allocation for better performance */
+ while (rte_bitmap_get(rxr->ag_bitmap, bmap_next)) {
+ if (unlikely(bnxt_alloc_ag_data(rxq, rxr, raw_next))) {
+ PMD_DRV_LOG(ERR, "agg mbuf alloc failed: prod=0x%x\n",
+ raw_next);
+ break;
+ }
+ rte_bitmap_clear(rxr->ag_bitmap, bmap_next);
+ rxr->ag_raw_prod = raw_next;
+ raw_next = RING_NEXT(raw_next);
+ bmap_next = RING_IDX(rxr->ag_ring_struct, raw_next);
+ }
+
+ return 0;
+}
+
+static int bnxt_rx_pages(struct bnxt_rx_queue *rxq,
+ struct rte_mbuf *mbuf, uint32_t *tmp_raw_cons,
+ uint8_t agg_buf, struct bnxt_tpa_info *tpa_info)
+{
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ int i;
+ uint16_t cp_cons, ag_cons;
+ struct rx_pkt_cmpl *rxcmp;
+ struct rte_mbuf *last = mbuf;
+ bool is_p5_tpa = tpa_info && BNXT_CHIP_P5(rxq->bp);
+
+ for (i = 0; i < agg_buf; i++) {
+ struct rte_mbuf **ag_buf;
+ struct rte_mbuf *ag_mbuf;
+
+ if (is_p5_tpa) {
+ rxcmp = (void *)&tpa_info->agg_arr[i];
+ } else {
+ *tmp_raw_cons = NEXT_RAW_CMP(*tmp_raw_cons);
+ cp_cons = RING_CMP(cpr->cp_ring_struct, *tmp_raw_cons);
+ rxcmp = (struct rx_pkt_cmpl *)
+ &cpr->cp_desc_ring[cp_cons];
+ }
+
+#ifdef BNXT_DEBUG
+ bnxt_dump_cmpl(cp_cons, rxcmp);
+#endif
+
+ ag_cons = rxcmp->opaque;
+ RTE_ASSERT(ag_cons <= rxr->ag_ring_struct->ring_mask);
+ ag_buf = &rxr->ag_buf_ring[ag_cons];
+ ag_mbuf = *ag_buf;
+ RTE_ASSERT(ag_mbuf != NULL);
+
+ ag_mbuf->data_len = rte_le_to_cpu_16(rxcmp->len);
+
+ mbuf->nb_segs++;
+ mbuf->pkt_len += ag_mbuf->data_len;
+
+ last->next = ag_mbuf;
+ last = ag_mbuf;
+
+ *ag_buf = NULL;
+
+ /*
+ * As aggregation buffer consumed out of order in TPA module,
+ * use bitmap to track freed slots to be allocated and notified
+ * to NIC
+ */
+ rte_bitmap_set(rxr->ag_bitmap, ag_cons);
+ }
+ last->next = NULL;
+ bnxt_prod_ag_mbuf(rxq);
+ return 0;
+}
+
+static inline struct rte_mbuf *bnxt_tpa_end(
+ struct bnxt_rx_queue *rxq,
+ uint32_t *raw_cp_cons,
+ struct rx_tpa_end_cmpl *tpa_end,
+ struct rx_tpa_end_cmpl_hi *tpa_end1)
+{
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ uint16_t agg_id;
+ struct rte_mbuf *mbuf;
+ uint8_t agg_bufs;
+ uint8_t payload_offset;
+ struct bnxt_tpa_info *tpa_info;
+
+ if (BNXT_CHIP_P5(rxq->bp)) {
+ struct rx_tpa_v2_end_cmpl *th_tpa_end;
+ struct rx_tpa_v2_end_cmpl_hi *th_tpa_end1;
+
+ th_tpa_end = (void *)tpa_end;
+ th_tpa_end1 = (void *)tpa_end1;
+ agg_id = BNXT_TPA_END_AGG_ID_TH(th_tpa_end);
+ agg_bufs = BNXT_TPA_END_AGG_BUFS_TH(th_tpa_end1);
+ payload_offset = th_tpa_end1->payload_offset;
+ } else {
+ agg_id = BNXT_TPA_END_AGG_ID(tpa_end);
+ agg_bufs = BNXT_TPA_END_AGG_BUFS(tpa_end);
+ if (!bnxt_agg_bufs_valid(cpr, agg_bufs, *raw_cp_cons))
+ return NULL;
+ payload_offset = tpa_end->payload_offset;
+ }
+
+ tpa_info = &rxr->tpa_info[agg_id];
+ mbuf = tpa_info->mbuf;
+ RTE_ASSERT(mbuf != NULL);
+
+ if (agg_bufs) {
+ bnxt_rx_pages(rxq, mbuf, raw_cp_cons, agg_bufs, tpa_info);
+ }
+ mbuf->l4_len = payload_offset;
+
+ struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool);
+ RTE_ASSERT(new_data != NULL);
+ if (!new_data) {
+ rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
+ return NULL;
+ }
+ tpa_info->mbuf = new_data;
+
+ return mbuf;
+}
+
+uint32_t bnxt_ptype_table[BNXT_PTYPE_TBL_DIM] __rte_cache_aligned;
+
+static void __rte_cold
+bnxt_init_ptype_table(void)
+{
+ uint32_t *pt = bnxt_ptype_table;
+ static bool initialized;
+ int ip6, tun, type;
+ uint32_t l3;
+ int i;
+
+ if (initialized)
+ return;
+
+ for (i = 0; i < BNXT_PTYPE_TBL_DIM; i++) {
+ if (i & (RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN >> 2))
+ pt[i] = RTE_PTYPE_L2_ETHER_VLAN;
+ else
+ pt[i] = RTE_PTYPE_L2_ETHER;
+
+ ip6 = i & (RX_PKT_CMPL_FLAGS2_IP_TYPE >> 7);
+ tun = i & (RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC >> 2);
+ type = (i & 0x78) << 9;
+
+ if (!tun && !ip6)
+ l3 = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+ else if (!tun && ip6)
+ l3 = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+ else if (tun && !ip6)
+ l3 = RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
+ else
+ l3 = RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
+
+ switch (type) {
+ case RX_PKT_CMPL_FLAGS_ITYPE_ICMP:
+ if (tun)
+ pt[i] |= l3 | RTE_PTYPE_INNER_L4_ICMP;
+ else
+ pt[i] |= l3 | RTE_PTYPE_L4_ICMP;
+ break;
+ case RX_PKT_CMPL_FLAGS_ITYPE_TCP:
+ if (tun)
+ pt[i] |= l3 | RTE_PTYPE_INNER_L4_TCP;
+ else
+ pt[i] |= l3 | RTE_PTYPE_L4_TCP;
+ break;
+ case RX_PKT_CMPL_FLAGS_ITYPE_UDP:
+ if (tun)
+ pt[i] |= l3 | RTE_PTYPE_INNER_L4_UDP;
+ else
+ pt[i] |= l3 | RTE_PTYPE_L4_UDP;
+ break;
+ case RX_PKT_CMPL_FLAGS_ITYPE_IP:
+ pt[i] |= l3;
+ break;
+ }
+ }
+ initialized = true;
+}
+
+static uint32_t
+bnxt_parse_pkt_type(struct rx_pkt_cmpl *rxcmp, struct rx_pkt_cmpl_hi *rxcmp1)
+{
+ uint32_t flags_type, flags2;
+ uint8_t index;
+
+ flags_type = rte_le_to_cpu_16(rxcmp->flags_type);
+ flags2 = rte_le_to_cpu_32(rxcmp1->flags2);
+
+ /*
+ * Index format:
+ * bit 0: RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC
+ * bit 1: RX_CMPL_FLAGS2_IP_TYPE
+ * bit 2: RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN
+ * bits 3-6: RX_PKT_CMPL_FLAGS_ITYPE
+ */
+ index = ((flags_type & RX_PKT_CMPL_FLAGS_ITYPE_MASK) >> 9) |
+ ((flags2 & (RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN |
+ RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)) >> 2) |
+ ((flags2 & RX_PKT_CMPL_FLAGS2_IP_TYPE) >> 7);
+
+ return bnxt_ptype_table[index];
+}
+
+static void __rte_cold
+bnxt_init_ol_flags_tables(struct bnxt_rx_queue *rxq)
+{
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ struct rte_eth_conf *dev_conf;
+ bool outer_cksum_enabled;
+ uint64_t offloads;
+ uint32_t *pt;
+ int i;
+
+ dev_conf = &rxq->bp->eth_dev->data->dev_conf;
+ offloads = dev_conf->rxmode.offloads;
+
+ outer_cksum_enabled = !!(offloads & (DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_UDP_CKSUM));
+
+ /* Initialize ol_flags table. */
+ pt = rxr->ol_flags_table;
+ for (i = 0; i < BNXT_OL_FLAGS_TBL_DIM; i++) {
+ pt[i] = 0;
+
+ if (i & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN)
+ pt[i] |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+
+ if (i & (RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC << 3)) {
+ /* Tunnel case. */
+ if (outer_cksum_enabled) {
+ if (i & RX_PKT_CMPL_FLAGS2_IP_CS_CALC)
+ pt[i] |= PKT_RX_IP_CKSUM_GOOD;
+
+ if (i & RX_PKT_CMPL_FLAGS2_L4_CS_CALC)
+ pt[i] |= PKT_RX_L4_CKSUM_GOOD;
+
+ if (i & RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)
+ pt[i] |= PKT_RX_OUTER_L4_CKSUM_GOOD;
+ } else {
+ if (i & RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)
+ pt[i] |= PKT_RX_IP_CKSUM_GOOD;
+
+ if (i & RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)
+ pt[i] |= PKT_RX_L4_CKSUM_GOOD;
+ }
+ } else {
+ /* Non-tunnel case. */
+ if (i & RX_PKT_CMPL_FLAGS2_IP_CS_CALC)
+ pt[i] |= PKT_RX_IP_CKSUM_GOOD;
+
+ if (i & RX_PKT_CMPL_FLAGS2_L4_CS_CALC)
+ pt[i] |= PKT_RX_L4_CKSUM_GOOD;
+ }
+ }
+
+ /* Initialize checksum error table. */
+ pt = rxr->ol_flags_err_table;
+ for (i = 0; i < BNXT_OL_FLAGS_ERR_TBL_DIM; i++) {
+ pt[i] = 0;
+
+ if (i & (RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC << 2)) {
+ /* Tunnel case. */
+ if (outer_cksum_enabled) {
+ if (i & (RX_PKT_CMPL_ERRORS_IP_CS_ERROR >> 4))
+ pt[i] |= PKT_RX_IP_CKSUM_BAD;
+
+ if (i & (RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR >> 4))
+ pt[i] |= PKT_RX_OUTER_IP_CKSUM_BAD;
+
+ if (i & (RX_PKT_CMPL_ERRORS_L4_CS_ERROR >> 4))
+ pt[i] |= PKT_RX_L4_CKSUM_BAD;
+
+ if (i & (RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR >> 4))
+ pt[i] |= PKT_RX_OUTER_L4_CKSUM_BAD;
+ } else {
+ if (i & (RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR >> 4))
+ pt[i] |= PKT_RX_IP_CKSUM_BAD;
+
+ if (i & (RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR >> 4))
+ pt[i] |= PKT_RX_L4_CKSUM_BAD;
+ }
+ } else {
+ /* Non-tunnel case. */
+ if (i & (RX_PKT_CMPL_ERRORS_IP_CS_ERROR >> 4))
+ pt[i] |= PKT_RX_IP_CKSUM_BAD;
+
+ if (i & (RX_PKT_CMPL_ERRORS_L4_CS_ERROR >> 4))
+ pt[i] |= PKT_RX_L4_CKSUM_BAD;
+ }
+ }
+}
+
+static void
+bnxt_set_ol_flags(struct bnxt_rx_ring_info *rxr, struct rx_pkt_cmpl *rxcmp,
+ struct rx_pkt_cmpl_hi *rxcmp1, struct rte_mbuf *mbuf)