1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
9 #include <rte_bitmap.h>
10 #include <rte_byteorder.h>
11 #include <rte_malloc.h>
12 #include <rte_memory.h>
15 #include "bnxt_reps.h"
16 #include "bnxt_ring.h"
19 #include "hsi_struct_def_dpdk.h"
20 #ifdef RTE_LIBRTE_IEEE1588
21 #include "bnxt_hwrm.h"
24 #include <bnxt_tf_common.h>
25 #include <ulp_mark_mgr.h>
31 static inline struct rte_mbuf *__bnxt_alloc_rx_data(struct rte_mempool *mb)
33 struct rte_mbuf *data;
35 data = rte_mbuf_raw_alloc(mb);
40 static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
41 struct bnxt_rx_ring_info *rxr,
44 struct rx_prod_pkt_bd *rxbd = &rxr->rx_desc_ring[prod];
45 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
46 struct rte_mbuf *mbuf;
48 mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
50 rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
55 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
57 rxbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
62 static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
63 struct bnxt_rx_ring_info *rxr,
66 struct rx_prod_pkt_bd *rxbd = &rxr->ag_desc_ring[prod];
67 struct bnxt_sw_rx_bd *rx_buf = &rxr->ag_buf_ring[prod];
68 struct rte_mbuf *mbuf;
71 PMD_DRV_LOG(ERR, "Jumbo Frame. rxbd is NULL\n");
76 PMD_DRV_LOG(ERR, "Jumbo Frame. rx_buf is NULL\n");
80 mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
82 rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
87 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
89 rxbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
94 static inline void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr,
95 struct rte_mbuf *mbuf)
97 uint16_t prod = RING_NEXT(rxr->rx_ring_struct, rxr->rx_prod);
98 struct bnxt_sw_rx_bd *prod_rx_buf;
99 struct rx_prod_pkt_bd *prod_bd;
101 prod_rx_buf = &rxr->rx_buf_ring[prod];
103 RTE_ASSERT(prod_rx_buf->mbuf == NULL);
104 RTE_ASSERT(mbuf != NULL);
106 prod_rx_buf->mbuf = mbuf;
108 prod_bd = &rxr->rx_desc_ring[prod];
110 prod_bd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
116 struct rte_mbuf *bnxt_consume_rx_buf(struct bnxt_rx_ring_info *rxr,
119 struct bnxt_sw_rx_bd *cons_rx_buf;
120 struct rte_mbuf *mbuf;
122 cons_rx_buf = &rxr->rx_buf_ring[cons];
123 RTE_ASSERT(cons_rx_buf->mbuf != NULL);
124 mbuf = cons_rx_buf->mbuf;
125 cons_rx_buf->mbuf = NULL;
129 static void bnxt_tpa_start(struct bnxt_rx_queue *rxq,
130 struct rx_tpa_start_cmpl *tpa_start,
131 struct rx_tpa_start_cmpl_hi *tpa_start1)
133 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
136 struct bnxt_tpa_info *tpa_info;
137 struct rte_mbuf *mbuf;
139 agg_id = bnxt_tpa_start_agg_id(rxq->bp, tpa_start);
141 data_cons = tpa_start->opaque;
142 tpa_info = &rxr->tpa_info[agg_id];
144 mbuf = bnxt_consume_rx_buf(rxr, data_cons);
146 bnxt_reuse_rx_mbuf(rxr, tpa_info->mbuf);
148 tpa_info->agg_count = 0;
149 tpa_info->mbuf = mbuf;
150 tpa_info->len = rte_le_to_cpu_32(tpa_start->len);
154 mbuf->pkt_len = rte_le_to_cpu_32(tpa_start->len);
155 mbuf->data_len = mbuf->pkt_len;
156 mbuf->port = rxq->port_id;
157 mbuf->ol_flags = PKT_RX_LRO;
158 if (likely(tpa_start->flags_type &
159 rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS_RSS_VALID))) {
160 mbuf->hash.rss = rte_le_to_cpu_32(tpa_start->rss_hash);
161 mbuf->ol_flags |= PKT_RX_RSS_HASH;
163 mbuf->hash.fdir.id = rte_le_to_cpu_16(tpa_start1->cfa_code);
164 mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
166 if (tpa_start1->flags2 &
167 rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN)) {
168 mbuf->vlan_tci = rte_le_to_cpu_32(tpa_start1->metadata);
169 mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
171 if (likely(tpa_start1->flags2 &
172 rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC)))
173 mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
175 /* recycle next mbuf */
176 data_cons = RING_NEXT(rxr->rx_ring_struct, data_cons);
177 bnxt_reuse_rx_mbuf(rxr, bnxt_consume_rx_buf(rxr, data_cons));
180 static int bnxt_agg_bufs_valid(struct bnxt_cp_ring_info *cpr,
181 uint8_t agg_bufs, uint32_t raw_cp_cons)
183 uint16_t last_cp_cons;
184 struct rx_pkt_cmpl *agg_cmpl;
186 raw_cp_cons = ADV_RAW_CMP(raw_cp_cons, agg_bufs);
187 last_cp_cons = RING_CMP(cpr->cp_ring_struct, raw_cp_cons);
188 agg_cmpl = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[last_cp_cons];
189 cpr->valid = FLIP_VALID(raw_cp_cons,
190 cpr->cp_ring_struct->ring_mask,
192 return CMP_VALID(agg_cmpl, raw_cp_cons, cpr->cp_ring_struct);
195 /* TPA consume agg buffer out of order, allocate connected data only */
196 static int bnxt_prod_ag_mbuf(struct bnxt_rx_queue *rxq)
198 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
199 uint16_t next = RING_NEXT(rxr->ag_ring_struct, rxr->ag_prod);
201 /* TODO batch allocation for better performance */
202 while (rte_bitmap_get(rxr->ag_bitmap, next)) {
203 if (unlikely(bnxt_alloc_ag_data(rxq, rxr, next))) {
205 "agg mbuf alloc failed: prod=0x%x\n", next);
208 rte_bitmap_clear(rxr->ag_bitmap, next);
210 next = RING_NEXT(rxr->ag_ring_struct, next);
216 static int bnxt_rx_pages(struct bnxt_rx_queue *rxq,
217 struct rte_mbuf *mbuf, uint32_t *tmp_raw_cons,
218 uint8_t agg_buf, struct bnxt_tpa_info *tpa_info)
220 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
221 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
223 uint16_t cp_cons, ag_cons;
224 struct rx_pkt_cmpl *rxcmp;
225 struct rte_mbuf *last = mbuf;
226 bool is_thor_tpa = tpa_info && BNXT_CHIP_THOR(rxq->bp);
228 for (i = 0; i < agg_buf; i++) {
229 struct bnxt_sw_rx_bd *ag_buf;
230 struct rte_mbuf *ag_mbuf;
233 rxcmp = (void *)&tpa_info->agg_arr[i];
235 *tmp_raw_cons = NEXT_RAW_CMP(*tmp_raw_cons);
236 cp_cons = RING_CMP(cpr->cp_ring_struct, *tmp_raw_cons);
237 rxcmp = (struct rx_pkt_cmpl *)
238 &cpr->cp_desc_ring[cp_cons];
242 bnxt_dump_cmpl(cp_cons, rxcmp);
245 ag_cons = rxcmp->opaque;
246 RTE_ASSERT(ag_cons <= rxr->ag_ring_struct->ring_mask);
247 ag_buf = &rxr->ag_buf_ring[ag_cons];
248 ag_mbuf = ag_buf->mbuf;
249 RTE_ASSERT(ag_mbuf != NULL);
251 ag_mbuf->data_len = rte_le_to_cpu_16(rxcmp->len);
254 mbuf->pkt_len += ag_mbuf->data_len;
256 last->next = ag_mbuf;
262 * As aggregation buffer consumed out of order in TPA module,
263 * use bitmap to track freed slots to be allocated and notified
266 rte_bitmap_set(rxr->ag_bitmap, ag_cons);
268 bnxt_prod_ag_mbuf(rxq);
272 static inline struct rte_mbuf *bnxt_tpa_end(
273 struct bnxt_rx_queue *rxq,
274 uint32_t *raw_cp_cons,
275 struct rx_tpa_end_cmpl *tpa_end,
276 struct rx_tpa_end_cmpl_hi *tpa_end1)
278 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
279 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
281 struct rte_mbuf *mbuf;
283 uint8_t payload_offset;
284 struct bnxt_tpa_info *tpa_info;
286 if (BNXT_CHIP_THOR(rxq->bp)) {
287 struct rx_tpa_v2_end_cmpl *th_tpa_end;
288 struct rx_tpa_v2_end_cmpl_hi *th_tpa_end1;
290 th_tpa_end = (void *)tpa_end;
291 th_tpa_end1 = (void *)tpa_end1;
292 agg_id = BNXT_TPA_END_AGG_ID_TH(th_tpa_end);
293 agg_bufs = BNXT_TPA_END_AGG_BUFS_TH(th_tpa_end1);
294 payload_offset = th_tpa_end1->payload_offset;
296 agg_id = BNXT_TPA_END_AGG_ID(tpa_end);
297 agg_bufs = BNXT_TPA_END_AGG_BUFS(tpa_end);
298 if (!bnxt_agg_bufs_valid(cpr, agg_bufs, *raw_cp_cons))
300 payload_offset = tpa_end->payload_offset;
303 tpa_info = &rxr->tpa_info[agg_id];
304 mbuf = tpa_info->mbuf;
305 RTE_ASSERT(mbuf != NULL);
309 bnxt_rx_pages(rxq, mbuf, raw_cp_cons, agg_bufs, tpa_info);
311 mbuf->l4_len = payload_offset;
313 struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool);
314 RTE_ASSERT(new_data != NULL);
316 rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
319 tpa_info->mbuf = new_data;
325 bnxt_parse_pkt_type(struct rx_pkt_cmpl *rxcmp, struct rx_pkt_cmpl_hi *rxcmp1)
327 uint32_t l3, pkt_type = 0;
328 uint32_t t_ipcs = 0, ip6 = 0, vlan = 0;
331 vlan = !!(rxcmp1->flags2 &
332 rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN));
333 pkt_type |= vlan ? RTE_PTYPE_L2_ETHER_VLAN : RTE_PTYPE_L2_ETHER;
335 t_ipcs = !!(rxcmp1->flags2 &
336 rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC));
337 ip6 = !!(rxcmp1->flags2 &
338 rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_TYPE));
340 flags_type = rxcmp->flags_type &
341 rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS_ITYPE_MASK);
344 l3 = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
345 else if (!t_ipcs && ip6)
346 l3 = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
347 else if (t_ipcs && !ip6)
348 l3 = RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
350 l3 = RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
352 switch (flags_type) {
353 case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_ICMP):
355 pkt_type |= l3 | RTE_PTYPE_L4_ICMP;
357 pkt_type |= l3 | RTE_PTYPE_INNER_L4_ICMP;
360 case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_TCP):
362 pkt_type |= l3 | RTE_PTYPE_L4_TCP;
364 pkt_type |= l3 | RTE_PTYPE_INNER_L4_TCP;
367 case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_UDP):
369 pkt_type |= l3 | RTE_PTYPE_L4_UDP;
371 pkt_type |= l3 | RTE_PTYPE_INNER_L4_UDP;
374 case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_IP):
382 #ifdef RTE_LIBRTE_IEEE1588
384 bnxt_get_rx_ts_thor(struct bnxt *bp, uint32_t rx_ts_cmpl)
386 uint64_t systime_cycles = 0;
388 if (!BNXT_CHIP_THOR(bp))
391 /* On Thor, Rx timestamps are provided directly in the
392 * Rx completion records to the driver. Only 32 bits of
393 * the timestamp is present in the completion. Driver needs
394 * to read the current 48 bit free running timer using the
395 * HWRM_PORT_TS_QUERY command and combine the upper 16 bits
396 * from the HWRM response with the lower 32 bits in the
397 * Rx completion to produce the 48 bit timestamp for the Rx packet
399 bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME,
401 bp->ptp_cfg->rx_timestamp = (systime_cycles & 0xFFFF00000000);
402 bp->ptp_cfg->rx_timestamp |= rx_ts_cmpl;
407 bnxt_ulp_set_mark_in_mbuf(struct bnxt *bp, struct rx_pkt_cmpl_hi *rxcmp1,
408 struct rte_mbuf *mbuf, uint32_t *vfr_flag)
416 uint32_t gfid_support = 0;
419 if (BNXT_GFID_ENABLED(bp))
422 cfa_code = rte_le_to_cpu_16(rxcmp1->cfa_code);
423 flags2 = rte_le_to_cpu_32(rxcmp1->flags2);
424 meta = rte_le_to_cpu_32(rxcmp1->metadata);
427 * The flags field holds extra bits of info from [6:4]
428 * which indicate if the flow is in TCAM or EM or EEM
430 meta_fmt = (flags2 & BNXT_CFA_META_FMT_MASK) >>
431 BNXT_CFA_META_FMT_SHFT;
436 /* Not an LFID or GFID, a flush cmd. */
439 /* LFID mode, no vlan scenario */
447 * Assume that EM doesn't support Mark due to GFID
448 * collisions with EEM. Simply return without setting the mark
451 if (BNXT_CFA_META_EM_TEST(meta)) {
452 /*This is EM hit {EM(1), GFID[27:16], 19'd0 or vtag } */
454 meta >>= BNXT_RX_META_CFA_CODE_SHIFT;
455 cfa_code |= meta << BNXT_CFA_CODE_META_SHIFT;
458 * It is a TCAM entry, so it is an LFID.
459 * The TCAM IDX and Mode can also be determined
460 * by decoding the meta_data. We are not
461 * using these for now.
467 /* EEM Case, only using gfid in EEM for now. */
471 * For EEM flows, The first part of cfa_code is 16 bits.
472 * The second part is embedded in the
473 * metadata field from bit 19 onwards. The driver needs to
474 * ignore the first 19 bits of metadata and use the next 12
475 * bits as higher 12 bits of cfa_code.
477 meta >>= BNXT_RX_META_CFA_CODE_SHIFT;
478 cfa_code |= meta << BNXT_CFA_CODE_META_SHIFT;
481 /* For other values, the cfa_code is assumed to be an LFID. */
485 rc = ulp_mark_db_mark_get(bp->ulp_ctx, gfid,
486 cfa_code, vfr_flag, &mark_id);
488 /* VF to VFR Rx path. So, skip mark_id injection in mbuf */
489 if (vfr_flag && *vfr_flag)
491 /* Got the mark, write it to the mbuf and return */
492 mbuf->hash.fdir.hi = mark_id;
493 mbuf->udata64 = (cfa_code & 0xffffffffull) << 32;
494 mbuf->hash.fdir.id = rxcmp1->cfa_code;
495 mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
500 mbuf->hash.fdir.hi = 0;
501 mbuf->hash.fdir.id = 0;
506 void bnxt_set_mark_in_mbuf(struct bnxt *bp,
507 struct rx_pkt_cmpl_hi *rxcmp1,
508 struct rte_mbuf *mbuf)
510 uint32_t cfa_code = 0;
511 uint8_t meta_fmt = 0;
515 cfa_code = rte_le_to_cpu_16(rxcmp1->cfa_code);
519 if (cfa_code && !bp->mark_table[cfa_code].valid)
522 flags2 = rte_le_to_cpu_16(rxcmp1->flags2);
523 meta = rte_le_to_cpu_32(rxcmp1->metadata);
525 meta >>= BNXT_RX_META_CFA_CODE_SHIFT;
527 /* The flags field holds extra bits of info from [6:4]
528 * which indicate if the flow is in TCAM or EM or EEM
530 meta_fmt = (flags2 & BNXT_CFA_META_FMT_MASK) >>
531 BNXT_CFA_META_FMT_SHFT;
533 /* meta_fmt == 4 => 'b100 => 'b10x => EM.
534 * meta_fmt == 5 => 'b101 => 'b10x => EM + VLAN
535 * meta_fmt == 6 => 'b110 => 'b11x => EEM
536 * meta_fmt == 7 => 'b111 => 'b11x => EEM + VLAN.
538 meta_fmt >>= BNXT_CFA_META_FMT_EM_EEM_SHFT;
541 mbuf->hash.fdir.hi = bp->mark_table[cfa_code].mark_id;
542 mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
545 static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
546 struct bnxt_rx_queue *rxq, uint32_t *raw_cons)
548 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
549 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
550 struct rx_pkt_cmpl *rxcmp;
551 struct rx_pkt_cmpl_hi *rxcmp1;
552 uint32_t tmp_raw_cons = *raw_cons;
553 uint16_t cons, prod, cp_cons =
554 RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
555 struct rte_mbuf *mbuf;
559 uint32_t flags2_f = 0, vfr_flag = 0, mark_id = 0;
561 struct bnxt *bp = rxq->bp;
563 rxcmp = (struct rx_pkt_cmpl *)
564 &cpr->cp_desc_ring[cp_cons];
566 cmp_type = CMP_TYPE(rxcmp);
568 if (cmp_type == RX_TPA_V2_ABUF_CMPL_TYPE_RX_TPA_AGG) {
569 struct rx_tpa_v2_abuf_cmpl *rx_agg = (void *)rxcmp;
570 uint16_t agg_id = rte_cpu_to_le_16(rx_agg->agg_id);
571 struct bnxt_tpa_info *tpa_info;
573 tpa_info = &rxr->tpa_info[agg_id];
574 RTE_ASSERT(tpa_info->agg_count < 16);
575 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
576 rc = -EINVAL; /* Continue w/o new mbuf */
580 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
581 cp_cons = RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
582 rxcmp1 = (struct rx_pkt_cmpl_hi *)&cpr->cp_desc_ring[cp_cons];
584 if (!CMP_VALID(rxcmp1, tmp_raw_cons, cpr->cp_ring_struct))
587 cpr->valid = FLIP_VALID(cp_cons,
588 cpr->cp_ring_struct->ring_mask,
591 if (cmp_type == RX_TPA_START_CMPL_TYPE_RX_TPA_START) {
592 bnxt_tpa_start(rxq, (struct rx_tpa_start_cmpl *)rxcmp,
593 (struct rx_tpa_start_cmpl_hi *)rxcmp1);
594 rc = -EINVAL; /* Continue w/o new mbuf */
596 } else if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) {
597 mbuf = bnxt_tpa_end(rxq, &tmp_raw_cons,
598 (struct rx_tpa_end_cmpl *)rxcmp,
599 (struct rx_tpa_end_cmpl_hi *)rxcmp1);
604 } else if (cmp_type != 0x11) {
609 agg_buf = (rxcmp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK)
610 >> RX_PKT_CMPL_AGG_BUFS_SFT;
611 if (agg_buf && !bnxt_agg_bufs_valid(cpr, agg_buf, tmp_raw_cons))
616 cons = rxcmp->opaque;
617 mbuf = bnxt_consume_rx_buf(rxr, cons);
623 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
626 mbuf->pkt_len = rxcmp->len;
627 mbuf->data_len = mbuf->pkt_len;
628 mbuf->port = rxq->port_id;
631 flags_type = rte_le_to_cpu_16(rxcmp->flags_type);
632 if (flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) {
633 mbuf->hash.rss = rxcmp->rss_hash;
634 mbuf->ol_flags |= PKT_RX_RSS_HASH;
637 if (BNXT_TRUFLOW_EN(bp))
638 mark_id = bnxt_ulp_set_mark_in_mbuf(rxq->bp, rxcmp1, mbuf,
641 bnxt_set_mark_in_mbuf(rxq->bp, rxcmp1, mbuf);
643 #ifdef RTE_LIBRTE_IEEE1588
644 if (unlikely((flags_type & RX_PKT_CMPL_FLAGS_MASK) ==
645 RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP)) {
646 mbuf->ol_flags |= PKT_RX_IEEE1588_PTP | PKT_RX_IEEE1588_TMST;
647 bnxt_get_rx_ts_thor(rxq->bp, rxcmp1->reorder);
651 bnxt_rx_pages(rxq, mbuf, &tmp_raw_cons, agg_buf, NULL);
653 if (rxcmp1->flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) {
654 mbuf->vlan_tci = rxcmp1->metadata &
655 (RX_PKT_CMPL_METADATA_VID_MASK |
656 RX_PKT_CMPL_METADATA_DE |
657 RX_PKT_CMPL_METADATA_PRI_MASK);
658 mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
661 flags2_f = flags2_0xf(rxcmp1);
663 if (likely(IS_IP_NONTUNNEL_PKT(flags2_f))) {
664 if (unlikely(RX_CMP_IP_CS_ERROR(rxcmp1)))
665 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
666 else if (unlikely(RX_CMP_IP_CS_UNKNOWN(rxcmp1)))
667 mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
669 mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
670 } else if (IS_IP_TUNNEL_PKT(flags2_f)) {
671 if (unlikely(RX_CMP_IP_OUTER_CS_ERROR(rxcmp1) ||
672 RX_CMP_IP_CS_ERROR(rxcmp1)))
673 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
674 else if (unlikely(RX_CMP_IP_CS_UNKNOWN(rxcmp1)))
675 mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
677 mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
681 if (likely(IS_L4_NONTUNNEL_PKT(flags2_f))) {
682 if (unlikely(RX_CMP_L4_INNER_CS_ERR2(rxcmp1)))
683 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
685 mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
686 } else if (IS_L4_TUNNEL_PKT(flags2_f)) {
687 if (unlikely(RX_CMP_L4_INNER_CS_ERR2(rxcmp1)))
688 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
690 mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
691 if (unlikely(RX_CMP_L4_OUTER_CS_ERR2(rxcmp1))) {
692 mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
693 } else if (unlikely(IS_L4_TUNNEL_PKT_ONLY_INNER_L4_CS
695 mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_UNKNOWN;
697 mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;
699 } else if (unlikely(RX_CMP_L4_CS_UNKNOWN(rxcmp1))) {
700 mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
703 mbuf->packet_type = bnxt_parse_pkt_type(rxcmp, rxcmp1);
706 if (rxcmp1->errors_v2 & RX_CMP_L2_ERRORS) {
707 /* Re-install the mbuf back to the rx ring */
708 bnxt_reuse_rx_mbuf(rxr, cons, mbuf);
715 * TODO: Redesign this....
716 * If the allocation fails, the packet does not get received.
717 * Simply returning this will result in slowly falling behind
718 * on the producer ring buffers.
719 * Instead, "filling up" the producer just before ringing the
720 * doorbell could be a better solution since it will let the
721 * producer ring starve until memory is available again pushing
722 * the drops into hardware and getting them out of the driver
723 * allowing recovery to a full producer ring.
725 * This could also help with cache usage by preventing per-packet
726 * calls in favour of a tight loop with the same function being called
729 prod = RING_NEXT(rxr->rx_ring_struct, prod);
730 if (bnxt_alloc_rx_data(rxq, rxr, prod)) {
731 PMD_DRV_LOG(ERR, "mbuf alloc failed with prod=0x%x\n", prod);
737 * All MBUFs are allocated with the same size under DPDK,
738 * no optimization for rx_copy_thresh
743 if (BNXT_TRUFLOW_EN(bp) &&
744 (BNXT_VF_IS_TRUSTED(bp) || BNXT_PF(bp)) &&
746 if (!bnxt_vfr_recv(mark_id, rxq->queue_id, mbuf)) {
747 /* Now return an error so that nb_rx_pkts is not
749 * This packet was meant to be given to the representor.
750 * So no need to account the packet and give it to
751 * parent Rx burst function.
759 *raw_cons = tmp_raw_cons;
764 uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
767 struct bnxt_rx_queue *rxq = rx_queue;
768 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
769 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
770 uint32_t raw_cons = cpr->cp_raw_cons;
773 int nb_rep_rx_pkts = 0;
774 struct rx_pkt_cmpl *rxcmp;
775 uint16_t prod = rxr->rx_prod;
776 uint16_t ag_prod = rxr->ag_prod;
780 if (unlikely(is_bnxt_in_error(rxq->bp)))
783 /* If Rx Q was stopped return */
784 if (unlikely(!rxq->rx_started ||
785 !rte_spinlock_trylock(&rxq->lock)))
788 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
790 * Replenish buffers if needed when a transition has been made from
791 * vector- to non-vector- receive processing.
793 while (unlikely(rxq->rxrearm_nb)) {
794 if (!bnxt_alloc_rx_data(rxq, rxr, rxq->rxrearm_start)) {
795 rxr->rx_prod = rxq->rxrearm_start;
796 bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
797 rxq->rxrearm_start++;
800 /* Retry allocation on next call. */
806 /* Handle RX burst request */
808 cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
809 rte_prefetch0(&cpr->cp_desc_ring[cons]);
810 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
812 if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
814 cpr->valid = FLIP_VALID(cons,
815 cpr->cp_ring_struct->ring_mask,
818 /* TODO: Avoid magic numbers... */
819 if ((CMP_TYPE(rxcmp) & 0x30) == 0x10) {
820 rc = bnxt_rx_pkt(&rx_pkts[nb_rx_pkts], rxq, &raw_cons);
821 if (likely(!rc) || rc == -ENOMEM)
823 if (rc == -EBUSY) /* partial completion */
825 if (rc == -ENODEV) /* completion for representor */
827 } else if (!BNXT_NUM_ASYNC_CPR(rxq->bp)) {
829 bnxt_event_hwrm_resp_handler(rxq->bp,
830 (struct cmpl_base *)rxcmp);
831 /* If the async event is Fatal error, return */
832 if (unlikely(is_bnxt_in_error(rxq->bp)))
836 raw_cons = NEXT_RAW_CMP(raw_cons);
837 if (nb_rx_pkts == nb_pkts || evt)
839 /* Post some Rx buf early in case of larger burst processing */
840 if (nb_rx_pkts == BNXT_RX_POST_THRESH)
841 bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
844 cpr->cp_raw_cons = raw_cons;
845 if (!nb_rx_pkts && !nb_rep_rx_pkts && !evt) {
847 * For PMD, there is no need to keep on pushing to REARM
848 * the doorbell if there are no new completions
853 if (prod != rxr->rx_prod)
854 bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
856 /* Ring the AGG ring DB */
857 if (ag_prod != rxr->ag_prod)
858 bnxt_db_write(&rxr->ag_db, rxr->ag_prod);
862 /* Attempt to alloc Rx buf in case of a previous allocation failure. */
864 int i = RING_NEXT(rxr->rx_ring_struct, prod);
865 int cnt = nb_rx_pkts;
868 i = RING_NEXT(rxr->rx_ring_struct, i), cnt--) {
869 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
871 /* Buffer already allocated for this index. */
872 if (rx_buf->mbuf != NULL)
875 /* This slot is empty. Alloc buffer for Rx */
876 if (!bnxt_alloc_rx_data(rxq, rxr, i)) {
878 bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
880 PMD_DRV_LOG(ERR, "Alloc mbuf failed\n");
887 rte_spinlock_unlock(&rxq->lock);
893 * Dummy DPDK callback for RX.
895 * This function is used to temporarily replace the real callback during
896 * unsafe control operations on the queue, or in case of error.
899 bnxt_dummy_recv_pkts(void *rx_queue __rte_unused,
900 struct rte_mbuf **rx_pkts __rte_unused,
901 uint16_t nb_pkts __rte_unused)
906 void bnxt_free_rx_rings(struct bnxt *bp)
909 struct bnxt_rx_queue *rxq;
914 for (i = 0; i < (int)bp->rx_nr_rings; i++) {
915 rxq = bp->rx_queues[i];
919 bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
920 rte_free(rxq->rx_ring->rx_ring_struct);
922 /* Free the Aggregator ring */
923 bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
924 rte_free(rxq->rx_ring->ag_ring_struct);
925 rxq->rx_ring->ag_ring_struct = NULL;
927 rte_free(rxq->rx_ring);
929 bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
930 rte_free(rxq->cp_ring->cp_ring_struct);
931 rte_free(rxq->cp_ring);
934 bp->rx_queues[i] = NULL;
938 int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
940 struct bnxt_cp_ring_info *cpr;
941 struct bnxt_rx_ring_info *rxr;
942 struct bnxt_ring *ring;
944 rxq->rx_buf_size = BNXT_MAX_PKT_LEN + sizeof(struct rte_mbuf);
946 rxr = rte_zmalloc_socket("bnxt_rx_ring",
947 sizeof(struct bnxt_rx_ring_info),
948 RTE_CACHE_LINE_SIZE, socket_id);
953 ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
954 sizeof(struct bnxt_ring),
955 RTE_CACHE_LINE_SIZE, socket_id);
958 rxr->rx_ring_struct = ring;
959 ring->ring_size = rte_align32pow2(rxq->nb_rx_desc);
960 ring->ring_mask = ring->ring_size - 1;
961 ring->bd = (void *)rxr->rx_desc_ring;
962 ring->bd_dma = rxr->rx_desc_mapping;
963 ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_rx_bd);
964 ring->vmem = (void **)&rxr->rx_buf_ring;
965 ring->fw_ring_id = INVALID_HW_RING_ID;
967 cpr = rte_zmalloc_socket("bnxt_rx_ring",
968 sizeof(struct bnxt_cp_ring_info),
969 RTE_CACHE_LINE_SIZE, socket_id);
974 ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
975 sizeof(struct bnxt_ring),
976 RTE_CACHE_LINE_SIZE, socket_id);
979 cpr->cp_ring_struct = ring;
980 ring->ring_size = rte_align32pow2(rxr->rx_ring_struct->ring_size *
981 (2 + AGG_RING_SIZE_FACTOR));
982 ring->ring_mask = ring->ring_size - 1;
983 ring->bd = (void *)cpr->cp_desc_ring;
984 ring->bd_dma = cpr->cp_desc_mapping;
987 ring->fw_ring_id = INVALID_HW_RING_ID;
989 /* Allocate Aggregator rings */
990 ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
991 sizeof(struct bnxt_ring),
992 RTE_CACHE_LINE_SIZE, socket_id);
995 rxr->ag_ring_struct = ring;
996 ring->ring_size = rte_align32pow2(rxq->nb_rx_desc *
997 AGG_RING_SIZE_FACTOR);
998 ring->ring_mask = ring->ring_size - 1;
999 ring->bd = (void *)rxr->ag_desc_ring;
1000 ring->bd_dma = rxr->ag_desc_mapping;
1001 ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_rx_bd);
1002 ring->vmem = (void **)&rxr->ag_buf_ring;
1003 ring->fw_ring_id = INVALID_HW_RING_ID;
1008 static void bnxt_init_rxbds(struct bnxt_ring *ring, uint32_t type,
1012 struct rx_prod_pkt_bd *rx_bd_ring = (struct rx_prod_pkt_bd *)ring->bd;
1016 for (j = 0; j < ring->ring_size; j++) {
1017 rx_bd_ring[j].flags_type = rte_cpu_to_le_16(type);
1018 rx_bd_ring[j].len = rte_cpu_to_le_16(len);
1019 rx_bd_ring[j].opaque = j;
1023 int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
1025 struct bnxt_rx_ring_info *rxr;
1026 struct bnxt_ring *ring;
1027 uint32_t prod, type;
1031 size = rte_pktmbuf_data_room_size(rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
1032 size = RTE_MIN(BNXT_MAX_PKT_LEN, size);
1034 type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT | RX_PROD_PKT_BD_FLAGS_EOP_PAD;
1037 ring = rxr->rx_ring_struct;
1038 bnxt_init_rxbds(ring, type, size);
1040 prod = rxr->rx_prod;
1041 for (i = 0; i < ring->ring_size; i++) {
1042 if (unlikely(!rxr->rx_buf_ring[i].mbuf)) {
1043 if (bnxt_alloc_rx_data(rxq, rxr, prod) != 0) {
1044 PMD_DRV_LOG(WARNING,
1045 "init'ed rx ring %d with %d/%d mbufs only\n",
1046 rxq->queue_id, i, ring->ring_size);
1050 rxr->rx_prod = prod;
1051 prod = RING_NEXT(rxr->rx_ring_struct, prod);
1054 ring = rxr->ag_ring_struct;
1055 type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG;
1056 bnxt_init_rxbds(ring, type, size);
1057 prod = rxr->ag_prod;
1059 for (i = 0; i < ring->ring_size; i++) {
1060 if (unlikely(!rxr->ag_buf_ring[i].mbuf)) {
1061 if (bnxt_alloc_ag_data(rxq, rxr, prod) != 0) {
1062 PMD_DRV_LOG(WARNING,
1063 "init'ed AG ring %d with %d/%d mbufs only\n",
1064 rxq->queue_id, i, ring->ring_size);
1068 rxr->ag_prod = prod;
1069 prod = RING_NEXT(rxr->ag_ring_struct, prod);
1071 PMD_DRV_LOG(DEBUG, "AGG Done!\n");
1073 if (rxr->tpa_info) {
1074 unsigned int max_aggs = BNXT_TPA_MAX_AGGS(rxq->bp);
1076 for (i = 0; i < max_aggs; i++) {
1077 if (unlikely(!rxr->tpa_info[i].mbuf)) {
1078 rxr->tpa_info[i].mbuf =
1079 __bnxt_alloc_rx_data(rxq->mb_pool);
1080 if (!rxr->tpa_info[i].mbuf) {
1081 rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
1087 PMD_DRV_LOG(DEBUG, "TPA alloc Done!\n");