+ prod_bd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+
+ rxr->rx_prod = prod;
+}
+
+static inline
+struct rte_mbuf *bnxt_consume_rx_buf(struct bnxt_rx_ring_info *rxr,
+ uint16_t cons)
+{
+ struct bnxt_sw_rx_bd *cons_rx_buf;
+ struct rte_mbuf *mbuf;
+
+ cons_rx_buf = &rxr->rx_buf_ring[cons];
+ RTE_ASSERT(cons_rx_buf->mbuf != NULL);
+ mbuf = cons_rx_buf->mbuf;
+ cons_rx_buf->mbuf = NULL;
+ return mbuf;
+}
+
+static void bnxt_tpa_start(struct bnxt_rx_queue *rxq,
+ struct rx_tpa_start_cmpl *tpa_start,
+ struct rx_tpa_start_cmpl_hi *tpa_start1)
+{
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ uint16_t agg_id;
+ uint16_t data_cons;
+ struct bnxt_tpa_info *tpa_info;
+ struct rte_mbuf *mbuf;
+
+ agg_id = bnxt_tpa_start_agg_id(rxq->bp, tpa_start);
+
+ data_cons = tpa_start->opaque;
+ tpa_info = &rxr->tpa_info[agg_id];
+
+ mbuf = bnxt_consume_rx_buf(rxr, data_cons);
+
+ bnxt_reuse_rx_mbuf(rxr, tpa_info->mbuf);
+
+ tpa_info->agg_count = 0;
+ tpa_info->mbuf = mbuf;
+ tpa_info->len = rte_le_to_cpu_32(tpa_start->len);
+
+ mbuf->nb_segs = 1;
+ mbuf->next = NULL;
+ mbuf->pkt_len = rte_le_to_cpu_32(tpa_start->len);
+ mbuf->data_len = mbuf->pkt_len;
+ mbuf->port = rxq->port_id;
+ mbuf->ol_flags = PKT_RX_LRO;
+ if (likely(tpa_start->flags_type &
+ rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS_RSS_VALID))) {
+ mbuf->hash.rss = rte_le_to_cpu_32(tpa_start->rss_hash);
+ mbuf->ol_flags |= PKT_RX_RSS_HASH;
+ } else {
+ mbuf->hash.fdir.id = rte_le_to_cpu_16(tpa_start1->cfa_code);
+ mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ }
+ if (tpa_start1->flags2 &
+ rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN)) {
+ mbuf->vlan_tci = rte_le_to_cpu_32(tpa_start1->metadata);
+ mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ }
+ if (likely(tpa_start1->flags2 &
+ rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC)))
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+
+ /* recycle next mbuf */
+ data_cons = RING_NEXT(rxr->rx_ring_struct, data_cons);
+ bnxt_reuse_rx_mbuf(rxr, bnxt_consume_rx_buf(rxr, data_cons));
+}
+
+static int bnxt_agg_bufs_valid(struct bnxt_cp_ring_info *cpr,
+ uint8_t agg_bufs, uint32_t raw_cp_cons)
+{
+ uint16_t last_cp_cons;
+ struct rx_pkt_cmpl *agg_cmpl;
+
+ raw_cp_cons = ADV_RAW_CMP(raw_cp_cons, agg_bufs);
+ last_cp_cons = RING_CMP(cpr->cp_ring_struct, raw_cp_cons);
+ agg_cmpl = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[last_cp_cons];
+ cpr->valid = FLIP_VALID(raw_cp_cons,
+ cpr->cp_ring_struct->ring_mask,
+ cpr->valid);
+ return CMP_VALID(agg_cmpl, raw_cp_cons, cpr->cp_ring_struct);
+}
+
+/* TPA consume agg buffer out of order, allocate connected data only */
+static int bnxt_prod_ag_mbuf(struct bnxt_rx_queue *rxq)
+{
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ uint16_t next = RING_NEXT(rxr->ag_ring_struct, rxr->ag_prod);
+
+ /* TODO batch allocation for better performance */
+ while (rte_bitmap_get(rxr->ag_bitmap, next)) {
+ if (unlikely(bnxt_alloc_ag_data(rxq, rxr, next))) {
+ PMD_DRV_LOG(ERR,
+ "agg mbuf alloc failed: prod=0x%x\n", next);
+ break;
+ }
+ rte_bitmap_clear(rxr->ag_bitmap, next);
+ rxr->ag_prod = next;
+ next = RING_NEXT(rxr->ag_ring_struct, next);
+ }
+
+ return 0;
+}
+
+static int bnxt_rx_pages(struct bnxt_rx_queue *rxq,
+ struct rte_mbuf *mbuf, uint32_t *tmp_raw_cons,
+ uint8_t agg_buf, struct bnxt_tpa_info *tpa_info)
+{
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ int i;
+ uint16_t cp_cons, ag_cons;
+ struct rx_pkt_cmpl *rxcmp;
+ struct rte_mbuf *last = mbuf;
+ bool is_thor_tpa = tpa_info && BNXT_CHIP_THOR(rxq->bp);
+
+ for (i = 0; i < agg_buf; i++) {
+ struct bnxt_sw_rx_bd *ag_buf;
+ struct rte_mbuf *ag_mbuf;
+
+ if (is_thor_tpa) {
+ rxcmp = (void *)&tpa_info->agg_arr[i];
+ } else {
+ *tmp_raw_cons = NEXT_RAW_CMP(*tmp_raw_cons);
+ cp_cons = RING_CMP(cpr->cp_ring_struct, *tmp_raw_cons);
+ rxcmp = (struct rx_pkt_cmpl *)
+ &cpr->cp_desc_ring[cp_cons];
+ }
+
+#ifdef BNXT_DEBUG
+ bnxt_dump_cmpl(cp_cons, rxcmp);
+#endif
+
+ ag_cons = rxcmp->opaque;
+ RTE_ASSERT(ag_cons <= rxr->ag_ring_struct->ring_mask);
+ ag_buf = &rxr->ag_buf_ring[ag_cons];
+ ag_mbuf = ag_buf->mbuf;
+ RTE_ASSERT(ag_mbuf != NULL);
+
+ ag_mbuf->data_len = rte_le_to_cpu_16(rxcmp->len);
+
+ mbuf->nb_segs++;
+ mbuf->pkt_len += ag_mbuf->data_len;
+
+ last->next = ag_mbuf;
+ last = ag_mbuf;
+
+ ag_buf->mbuf = NULL;
+
+ /*
+ * As aggregation buffer consumed out of order in TPA module,
+ * use bitmap to track freed slots to be allocated and notified
+ * to NIC
+ */
+ rte_bitmap_set(rxr->ag_bitmap, ag_cons);
+ }
+ bnxt_prod_ag_mbuf(rxq);
+ return 0;
+}
+
+static inline struct rte_mbuf *bnxt_tpa_end(
+ struct bnxt_rx_queue *rxq,
+ uint32_t *raw_cp_cons,
+ struct rx_tpa_end_cmpl *tpa_end,
+ struct rx_tpa_end_cmpl_hi *tpa_end1)
+{
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ uint16_t agg_id;
+ struct rte_mbuf *mbuf;
+ uint8_t agg_bufs;
+ uint8_t payload_offset;
+ struct bnxt_tpa_info *tpa_info;
+
+ if (BNXT_CHIP_THOR(rxq->bp)) {
+ struct rx_tpa_v2_end_cmpl *th_tpa_end;
+ struct rx_tpa_v2_end_cmpl_hi *th_tpa_end1;
+
+ th_tpa_end = (void *)tpa_end;
+ th_tpa_end1 = (void *)tpa_end1;
+ agg_id = BNXT_TPA_END_AGG_ID_TH(th_tpa_end);
+ agg_bufs = BNXT_TPA_END_AGG_BUFS_TH(th_tpa_end1);
+ payload_offset = th_tpa_end1->payload_offset;
+ } else {
+ agg_id = BNXT_TPA_END_AGG_ID(tpa_end);
+ agg_bufs = BNXT_TPA_END_AGG_BUFS(tpa_end);
+ if (!bnxt_agg_bufs_valid(cpr, agg_bufs, *raw_cp_cons))
+ return NULL;
+ payload_offset = tpa_end->payload_offset;
+ }
+
+ tpa_info = &rxr->tpa_info[agg_id];
+ mbuf = tpa_info->mbuf;
+ RTE_ASSERT(mbuf != NULL);
+
+ rte_prefetch0(mbuf);
+ if (agg_bufs) {
+ bnxt_rx_pages(rxq, mbuf, raw_cp_cons, agg_bufs, tpa_info);
+ }
+ mbuf->l4_len = payload_offset;
+
+ struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool);
+ RTE_ASSERT(new_data != NULL);
+ if (!new_data) {
+ rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
+ return NULL;
+ }
+ tpa_info->mbuf = new_data;
+
+ return mbuf;
+}
+
+static uint32_t
+bnxt_parse_pkt_type(struct rx_pkt_cmpl *rxcmp, struct rx_pkt_cmpl_hi *rxcmp1)
+{
+ uint32_t l3, pkt_type = 0;
+ uint32_t t_ipcs = 0, ip6 = 0, vlan = 0;
+ uint32_t flags_type;
+
+ vlan = !!(rxcmp1->flags2 &
+ rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN));
+ pkt_type |= vlan ? RTE_PTYPE_L2_ETHER_VLAN : RTE_PTYPE_L2_ETHER;
+
+ t_ipcs = !!(rxcmp1->flags2 &
+ rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC));
+ ip6 = !!(rxcmp1->flags2 &
+ rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_TYPE));
+
+ flags_type = rxcmp->flags_type &
+ rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS_ITYPE_MASK);
+
+ if (!t_ipcs && !ip6)
+ l3 = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+ else if (!t_ipcs && ip6)
+ l3 = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+ else if (t_ipcs && !ip6)
+ l3 = RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
+ else
+ l3 = RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
+
+ switch (flags_type) {
+ case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_ICMP):
+ if (!t_ipcs)
+ pkt_type |= l3 | RTE_PTYPE_L4_ICMP;
+ else
+ pkt_type |= l3 | RTE_PTYPE_INNER_L4_ICMP;
+ break;
+
+ case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_TCP):
+ if (!t_ipcs)
+ pkt_type |= l3 | RTE_PTYPE_L4_TCP;
+ else
+ pkt_type |= l3 | RTE_PTYPE_INNER_L4_TCP;
+ break;
+
+ case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_UDP):
+ if (!t_ipcs)
+ pkt_type |= l3 | RTE_PTYPE_L4_UDP;
+ else
+ pkt_type |= l3 | RTE_PTYPE_INNER_L4_UDP;
+ break;
+
+ case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_IP):
+ pkt_type |= l3;
+ break;
+ }
+
+ return pkt_type;
+}
+
+#ifdef RTE_LIBRTE_IEEE1588
+static void
+bnxt_get_rx_ts_thor(struct bnxt *bp, uint32_t rx_ts_cmpl)
+{
+ uint64_t systime_cycles = 0;
+
+ if (!BNXT_CHIP_THOR(bp))
+ return;
+
+ /* On Thor, Rx timestamps are provided directly in the
+ * Rx completion records to the driver. Only 32 bits of
+ * the timestamp is present in the completion. Driver needs
+ * to read the current 48 bit free running timer using the
+ * HWRM_PORT_TS_QUERY command and combine the upper 16 bits
+ * from the HWRM response with the lower 32 bits in the
+ * Rx completion to produce the 48 bit timestamp for the Rx packet
+ */
+ bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME,
+ &systime_cycles);
+ bp->ptp_cfg->rx_timestamp = (systime_cycles & 0xFFFF00000000);
+ bp->ptp_cfg->rx_timestamp |= rx_ts_cmpl;
+}
+#endif
+
+static void
+bnxt_ulp_set_mark_in_mbuf(struct bnxt *bp, struct rx_pkt_cmpl_hi *rxcmp1,
+ struct rte_mbuf *mbuf)
+{
+ uint32_t cfa_code;
+ uint32_t meta_fmt;
+ uint32_t meta;
+ bool gfid = false;
+ uint32_t mark_id;
+ uint32_t flags2;
+ uint32_t gfid_support = 0;
+ int rc;
+ uint32_t vfr_flag;
+
+
+ if (BNXT_GFID_ENABLED(bp))
+ gfid_support = 1;
+
+ cfa_code = rte_le_to_cpu_16(rxcmp1->cfa_code);
+ flags2 = rte_le_to_cpu_32(rxcmp1->flags2);
+ meta = rte_le_to_cpu_32(rxcmp1->metadata);
+
+ /*
+ * The flags field holds extra bits of info from [6:4]
+ * which indicate if the flow is in TCAM or EM or EEM
+ */
+ meta_fmt = (flags2 & BNXT_CFA_META_FMT_MASK) >>
+ BNXT_CFA_META_FMT_SHFT;
+
+ switch (meta_fmt) {
+ case 0:
+ if (gfid_support) {
+ /* Not an LFID or GFID, a flush cmd. */
+ goto skip_mark;
+ } else {
+ /* LFID mode, no vlan scenario */
+ gfid = false;
+ }
+ break;
+ case 4:
+ case 5:
+ /*
+ * EM/TCAM case
+ * Assume that EM doesn't support Mark due to GFID
+ * collisions with EEM. Simply return without setting the mark
+ * in the mbuf.
+ */
+ if (BNXT_CFA_META_EM_TEST(meta)) {
+ /*This is EM hit {EM(1), GFID[27:16], 19'd0 or vtag } */
+ gfid = true;
+ meta >>= BNXT_RX_META_CFA_CODE_SHIFT;
+ cfa_code |= meta << BNXT_CFA_CODE_META_SHIFT;
+ } else {
+ /*
+ * It is a TCAM entry, so it is an LFID.
+ * The TCAM IDX and Mode can also be determined
+ * by decoding the meta_data. We are not
+ * using these for now.
+ */
+ }
+ break;
+ case 6:
+ case 7:
+ /* EEM Case, only using gfid in EEM for now. */
+ gfid = true;
+
+ /*
+ * For EEM flows, The first part of cfa_code is 16 bits.
+ * The second part is embedded in the
+ * metadata field from bit 19 onwards. The driver needs to
+ * ignore the first 19 bits of metadata and use the next 12
+ * bits as higher 12 bits of cfa_code.
+ */
+ meta >>= BNXT_RX_META_CFA_CODE_SHIFT;
+ cfa_code |= meta << BNXT_CFA_CODE_META_SHIFT;
+ break;
+ default:
+ /* For other values, the cfa_code is assumed to be an LFID. */
+ break;
+ }
+
+ rc = ulp_mark_db_mark_get(bp->ulp_ctx, gfid,
+ cfa_code, &vfr_flag, &mark_id);
+ if (!rc) {
+ /* Got the mark, write it to the mbuf and return */
+ mbuf->hash.fdir.hi = mark_id;
+ mbuf->udata64 = (cfa_code & 0xffffffffull) << 32;
+ mbuf->hash.fdir.id = rxcmp1->cfa_code;
+ mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ return;
+ }
+
+skip_mark:
+ mbuf->hash.fdir.hi = 0;
+ mbuf->hash.fdir.id = 0;
+}
+
+void bnxt_set_mark_in_mbuf(struct bnxt *bp,
+ struct rx_pkt_cmpl_hi *rxcmp1,
+ struct rte_mbuf *mbuf)
+{
+ uint32_t cfa_code = 0;
+ uint8_t meta_fmt = 0;
+ uint16_t flags2 = 0;
+ uint32_t meta = 0;
+
+ cfa_code = rte_le_to_cpu_16(rxcmp1->cfa_code);
+ if (!cfa_code)
+ return;
+
+ if (cfa_code && !bp->mark_table[cfa_code].valid)
+ return;
+
+ flags2 = rte_le_to_cpu_16(rxcmp1->flags2);
+ meta = rte_le_to_cpu_32(rxcmp1->metadata);
+ if (meta) {
+ meta >>= BNXT_RX_META_CFA_CODE_SHIFT;
+
+ /* The flags field holds extra bits of info from [6:4]
+ * which indicate if the flow is in TCAM or EM or EEM
+ */
+ meta_fmt = (flags2 & BNXT_CFA_META_FMT_MASK) >>
+ BNXT_CFA_META_FMT_SHFT;
+
+ /* meta_fmt == 4 => 'b100 => 'b10x => EM.
+ * meta_fmt == 5 => 'b101 => 'b10x => EM + VLAN
+ * meta_fmt == 6 => 'b110 => 'b11x => EEM
+ * meta_fmt == 7 => 'b111 => 'b11x => EEM + VLAN.
+ */
+ meta_fmt >>= BNXT_CFA_META_FMT_EM_EEM_SHFT;
+ }
+
+ mbuf->hash.fdir.hi = bp->mark_table[cfa_code].mark_id;
+ mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;