1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
9 #include <rte_bitmap.h>
10 #include <rte_byteorder.h>
11 #include <rte_malloc.h>
12 #include <rte_memory.h>
15 #include "bnxt_reps.h"
16 #include "bnxt_ring.h"
19 #include "hsi_struct_def_dpdk.h"
20 #ifdef RTE_LIBRTE_IEEE1588
21 #include "bnxt_hwrm.h"
24 #include <bnxt_tf_common.h>
25 #include <ulp_mark_mgr.h>
31 static inline struct rte_mbuf *__bnxt_alloc_rx_data(struct rte_mempool *mb)
33 struct rte_mbuf *data;
35 data = rte_mbuf_raw_alloc(mb);
40 static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
41 struct bnxt_rx_ring_info *rxr,
44 struct rx_prod_pkt_bd *rxbd = &rxr->rx_desc_ring[prod];
45 struct rte_mbuf **rx_buf = &rxr->rx_buf_ring[prod];
46 struct rte_mbuf *mbuf;
48 mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
50 rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
55 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
57 rxbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
62 static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
63 struct bnxt_rx_ring_info *rxr,
66 struct rx_prod_pkt_bd *rxbd = &rxr->ag_desc_ring[prod];
67 struct rte_mbuf **rx_buf = &rxr->ag_buf_ring[prod];
68 struct rte_mbuf *mbuf;
71 PMD_DRV_LOG(ERR, "Jumbo Frame. rxbd is NULL\n");
76 PMD_DRV_LOG(ERR, "Jumbo Frame. rx_buf is NULL\n");
80 mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
82 rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
87 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
89 rxbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
94 static inline void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr,
95 struct rte_mbuf *mbuf)
97 uint16_t prod = RING_NEXT(rxr->rx_ring_struct, rxr->rx_prod);
98 struct rte_mbuf **prod_rx_buf;
99 struct rx_prod_pkt_bd *prod_bd;
101 prod_rx_buf = &rxr->rx_buf_ring[prod];
103 RTE_ASSERT(*prod_rx_buf == NULL);
104 RTE_ASSERT(mbuf != NULL);
108 prod_bd = &rxr->rx_desc_ring[prod];
110 prod_bd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
116 struct rte_mbuf *bnxt_consume_rx_buf(struct bnxt_rx_ring_info *rxr,
119 struct rte_mbuf **cons_rx_buf;
120 struct rte_mbuf *mbuf;
122 cons_rx_buf = &rxr->rx_buf_ring[cons];
123 RTE_ASSERT(*cons_rx_buf != NULL);
130 static void bnxt_tpa_start(struct bnxt_rx_queue *rxq,
131 struct rx_tpa_start_cmpl *tpa_start,
132 struct rx_tpa_start_cmpl_hi *tpa_start1)
134 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
137 struct bnxt_tpa_info *tpa_info;
138 struct rte_mbuf *mbuf;
140 agg_id = bnxt_tpa_start_agg_id(rxq->bp, tpa_start);
142 data_cons = tpa_start->opaque;
143 tpa_info = &rxr->tpa_info[agg_id];
145 mbuf = bnxt_consume_rx_buf(rxr, data_cons);
147 bnxt_reuse_rx_mbuf(rxr, tpa_info->mbuf);
149 tpa_info->agg_count = 0;
150 tpa_info->mbuf = mbuf;
151 tpa_info->len = rte_le_to_cpu_32(tpa_start->len);
153 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
156 mbuf->pkt_len = rte_le_to_cpu_32(tpa_start->len);
157 mbuf->data_len = mbuf->pkt_len;
158 mbuf->port = rxq->port_id;
159 mbuf->ol_flags = PKT_RX_LRO;
160 if (likely(tpa_start->flags_type &
161 rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS_RSS_VALID))) {
162 mbuf->hash.rss = rte_le_to_cpu_32(tpa_start->rss_hash);
163 mbuf->ol_flags |= PKT_RX_RSS_HASH;
165 mbuf->hash.fdir.id = rte_le_to_cpu_16(tpa_start1->cfa_code);
166 mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
168 if (tpa_start1->flags2 &
169 rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN)) {
170 mbuf->vlan_tci = rte_le_to_cpu_32(tpa_start1->metadata);
171 mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
173 if (likely(tpa_start1->flags2 &
174 rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC)))
175 mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
177 /* recycle next mbuf */
178 data_cons = RING_NEXT(rxr->rx_ring_struct, data_cons);
179 bnxt_reuse_rx_mbuf(rxr, bnxt_consume_rx_buf(rxr, data_cons));
182 static int bnxt_agg_bufs_valid(struct bnxt_cp_ring_info *cpr,
183 uint8_t agg_bufs, uint32_t raw_cp_cons)
185 uint16_t last_cp_cons;
186 struct rx_pkt_cmpl *agg_cmpl;
188 raw_cp_cons = ADV_RAW_CMP(raw_cp_cons, agg_bufs);
189 last_cp_cons = RING_CMP(cpr->cp_ring_struct, raw_cp_cons);
190 agg_cmpl = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[last_cp_cons];
191 cpr->valid = FLIP_VALID(raw_cp_cons,
192 cpr->cp_ring_struct->ring_mask,
194 return CMP_VALID(agg_cmpl, raw_cp_cons, cpr->cp_ring_struct);
197 /* TPA consume agg buffer out of order, allocate connected data only */
198 static int bnxt_prod_ag_mbuf(struct bnxt_rx_queue *rxq)
200 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
201 uint16_t next = RING_NEXT(rxr->ag_ring_struct, rxr->ag_prod);
203 /* TODO batch allocation for better performance */
204 while (rte_bitmap_get(rxr->ag_bitmap, next)) {
205 if (unlikely(bnxt_alloc_ag_data(rxq, rxr, next))) {
207 "agg mbuf alloc failed: prod=0x%x\n", next);
210 rte_bitmap_clear(rxr->ag_bitmap, next);
212 next = RING_NEXT(rxr->ag_ring_struct, next);
218 static int bnxt_rx_pages(struct bnxt_rx_queue *rxq,
219 struct rte_mbuf *mbuf, uint32_t *tmp_raw_cons,
220 uint8_t agg_buf, struct bnxt_tpa_info *tpa_info)
222 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
223 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
225 uint16_t cp_cons, ag_cons;
226 struct rx_pkt_cmpl *rxcmp;
227 struct rte_mbuf *last = mbuf;
228 bool is_thor_tpa = tpa_info && BNXT_CHIP_THOR(rxq->bp);
230 for (i = 0; i < agg_buf; i++) {
231 struct rte_mbuf **ag_buf;
232 struct rte_mbuf *ag_mbuf;
235 rxcmp = (void *)&tpa_info->agg_arr[i];
237 *tmp_raw_cons = NEXT_RAW_CMP(*tmp_raw_cons);
238 cp_cons = RING_CMP(cpr->cp_ring_struct, *tmp_raw_cons);
239 rxcmp = (struct rx_pkt_cmpl *)
240 &cpr->cp_desc_ring[cp_cons];
244 bnxt_dump_cmpl(cp_cons, rxcmp);
247 ag_cons = rxcmp->opaque;
248 RTE_ASSERT(ag_cons <= rxr->ag_ring_struct->ring_mask);
249 ag_buf = &rxr->ag_buf_ring[ag_cons];
251 RTE_ASSERT(ag_mbuf != NULL);
253 ag_mbuf->data_len = rte_le_to_cpu_16(rxcmp->len);
256 mbuf->pkt_len += ag_mbuf->data_len;
258 last->next = ag_mbuf;
264 * As aggregation buffer consumed out of order in TPA module,
265 * use bitmap to track freed slots to be allocated and notified
268 rte_bitmap_set(rxr->ag_bitmap, ag_cons);
270 bnxt_prod_ag_mbuf(rxq);
274 static inline struct rte_mbuf *bnxt_tpa_end(
275 struct bnxt_rx_queue *rxq,
276 uint32_t *raw_cp_cons,
277 struct rx_tpa_end_cmpl *tpa_end,
278 struct rx_tpa_end_cmpl_hi *tpa_end1)
280 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
281 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
283 struct rte_mbuf *mbuf;
285 uint8_t payload_offset;
286 struct bnxt_tpa_info *tpa_info;
288 if (BNXT_CHIP_THOR(rxq->bp)) {
289 struct rx_tpa_v2_end_cmpl *th_tpa_end;
290 struct rx_tpa_v2_end_cmpl_hi *th_tpa_end1;
292 th_tpa_end = (void *)tpa_end;
293 th_tpa_end1 = (void *)tpa_end1;
294 agg_id = BNXT_TPA_END_AGG_ID_TH(th_tpa_end);
295 agg_bufs = BNXT_TPA_END_AGG_BUFS_TH(th_tpa_end1);
296 payload_offset = th_tpa_end1->payload_offset;
298 agg_id = BNXT_TPA_END_AGG_ID(tpa_end);
299 agg_bufs = BNXT_TPA_END_AGG_BUFS(tpa_end);
300 if (!bnxt_agg_bufs_valid(cpr, agg_bufs, *raw_cp_cons))
302 payload_offset = tpa_end->payload_offset;
305 tpa_info = &rxr->tpa_info[agg_id];
306 mbuf = tpa_info->mbuf;
307 RTE_ASSERT(mbuf != NULL);
310 bnxt_rx_pages(rxq, mbuf, raw_cp_cons, agg_bufs, tpa_info);
312 mbuf->l4_len = payload_offset;
314 struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool);
315 RTE_ASSERT(new_data != NULL);
317 rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
320 tpa_info->mbuf = new_data;
325 uint32_t bnxt_ptype_table[BNXT_PTYPE_TBL_DIM] __rte_cache_aligned;
327 static void __rte_cold
328 bnxt_init_ptype_table(void)
330 uint32_t *pt = bnxt_ptype_table;
331 static bool initialized;
339 for (i = 0; i < BNXT_PTYPE_TBL_DIM; i++) {
340 if (i & (RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN >> 2))
341 pt[i] = RTE_PTYPE_L2_ETHER_VLAN;
343 pt[i] = RTE_PTYPE_L2_ETHER;
345 ip6 = i & (RX_PKT_CMPL_FLAGS2_IP_TYPE >> 7);
346 tun = i & (RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC >> 2);
347 type = (i & 0x38) << 9;
350 l3 = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
351 else if (!tun && ip6)
352 l3 = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
353 else if (tun && !ip6)
354 l3 = RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
356 l3 = RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
359 case RX_PKT_CMPL_FLAGS_ITYPE_ICMP:
361 pt[i] |= l3 | RTE_PTYPE_INNER_L4_ICMP;
363 pt[i] |= l3 | RTE_PTYPE_L4_ICMP;
365 case RX_PKT_CMPL_FLAGS_ITYPE_TCP:
367 pt[i] |= l3 | RTE_PTYPE_INNER_L4_TCP;
369 pt[i] |= l3 | RTE_PTYPE_L4_TCP;
371 case RX_PKT_CMPL_FLAGS_ITYPE_UDP:
373 pt[i] |= l3 | RTE_PTYPE_INNER_L4_UDP;
375 pt[i] |= l3 | RTE_PTYPE_L4_UDP;
377 case RX_PKT_CMPL_FLAGS_ITYPE_IP:
386 bnxt_parse_pkt_type(struct rx_pkt_cmpl *rxcmp, struct rx_pkt_cmpl_hi *rxcmp1)
388 uint32_t flags_type, flags2;
391 flags_type = rte_le_to_cpu_16(rxcmp->flags_type);
392 flags2 = rte_le_to_cpu_32(rxcmp1->flags2);
396 * bit 0: RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC
397 * bit 1: RX_CMPL_FLAGS2_IP_TYPE
398 * bit 2: RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN
399 * bits 3-6: RX_PKT_CMPL_FLAGS_ITYPE
401 index = ((flags_type & RX_PKT_CMPL_FLAGS_ITYPE_MASK) >> 9) |
402 ((flags2 & (RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN |
403 RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)) >> 2) |
404 ((flags2 & RX_PKT_CMPL_FLAGS2_IP_TYPE) >> 7);
406 return bnxt_ptype_table[index];
410 bnxt_ol_flags_table[BNXT_OL_FLAGS_TBL_DIM] __rte_cache_aligned;
413 bnxt_ol_flags_err_table[BNXT_OL_FLAGS_ERR_TBL_DIM] __rte_cache_aligned;
415 static void __rte_cold
416 bnxt_init_ol_flags_tables(void)
418 static bool initialized;
425 /* Initialize ol_flags table. */
426 pt = bnxt_ol_flags_table;
427 for (i = 0; i < BNXT_OL_FLAGS_TBL_DIM; i++) {
429 if (i & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN)
430 pt[i] |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
432 if (i & RX_PKT_CMPL_FLAGS2_IP_CS_CALC)
433 pt[i] |= PKT_RX_IP_CKSUM_GOOD;
435 if (i & RX_PKT_CMPL_FLAGS2_L4_CS_CALC)
436 pt[i] |= PKT_RX_L4_CKSUM_GOOD;
438 if (i & RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)
439 pt[i] |= PKT_RX_OUTER_L4_CKSUM_GOOD;
442 /* Initialize checksum error table. */
443 pt = bnxt_ol_flags_err_table;
444 for (i = 0; i < BNXT_OL_FLAGS_ERR_TBL_DIM; i++) {
446 if (i & (RX_PKT_CMPL_ERRORS_IP_CS_ERROR >> 4))
447 pt[i] |= PKT_RX_IP_CKSUM_BAD;
449 if (i & (RX_PKT_CMPL_ERRORS_L4_CS_ERROR >> 4))
450 pt[i] |= PKT_RX_L4_CKSUM_BAD;
452 if (i & (RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR >> 4))
453 pt[i] |= PKT_RX_EIP_CKSUM_BAD;
455 if (i & (RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR >> 4))
456 pt[i] |= PKT_RX_OUTER_L4_CKSUM_BAD;
463 bnxt_set_ol_flags(struct rx_pkt_cmpl *rxcmp, struct rx_pkt_cmpl_hi *rxcmp1,
464 struct rte_mbuf *mbuf)
466 uint16_t flags_type, errors, flags;
469 flags_type = rte_le_to_cpu_16(rxcmp->flags_type);
471 flags = rte_le_to_cpu_32(rxcmp1->flags2) &
472 (RX_PKT_CMPL_FLAGS2_IP_CS_CALC |
473 RX_PKT_CMPL_FLAGS2_L4_CS_CALC |
474 RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC |
475 RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC |
476 RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN);
478 errors = rte_le_to_cpu_16(rxcmp1->errors_v2) &
479 (RX_PKT_CMPL_ERRORS_IP_CS_ERROR |
480 RX_PKT_CMPL_ERRORS_L4_CS_ERROR |
481 RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR |
482 RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR);
483 errors = (errors >> 4) & flags;
485 ol_flags = bnxt_ol_flags_table[flags & ~errors];
488 ol_flags |= bnxt_ol_flags_err_table[errors];
490 if (flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) {
491 mbuf->hash.rss = rte_le_to_cpu_32(rxcmp->rss_hash);
492 ol_flags |= PKT_RX_RSS_HASH;
495 mbuf->ol_flags = ol_flags;
498 #ifdef RTE_LIBRTE_IEEE1588
500 bnxt_get_rx_ts_thor(struct bnxt *bp, uint32_t rx_ts_cmpl)
502 uint64_t systime_cycles = 0;
504 if (!BNXT_CHIP_THOR(bp))
507 /* On Thor, Rx timestamps are provided directly in the
508 * Rx completion records to the driver. Only 32 bits of
509 * the timestamp is present in the completion. Driver needs
510 * to read the current 48 bit free running timer using the
511 * HWRM_PORT_TS_QUERY command and combine the upper 16 bits
512 * from the HWRM response with the lower 32 bits in the
513 * Rx completion to produce the 48 bit timestamp for the Rx packet
515 bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME,
517 bp->ptp_cfg->rx_timestamp = (systime_cycles & 0xFFFF00000000);
518 bp->ptp_cfg->rx_timestamp |= rx_ts_cmpl;
523 bnxt_ulp_set_mark_in_mbuf(struct bnxt *bp, struct rx_pkt_cmpl_hi *rxcmp1,
524 struct rte_mbuf *mbuf, uint32_t *vfr_flag)
532 uint32_t gfid_support = 0;
535 if (BNXT_GFID_ENABLED(bp))
538 cfa_code = rte_le_to_cpu_16(rxcmp1->cfa_code);
539 flags2 = rte_le_to_cpu_32(rxcmp1->flags2);
540 meta = rte_le_to_cpu_32(rxcmp1->metadata);
543 * The flags field holds extra bits of info from [6:4]
544 * which indicate if the flow is in TCAM or EM or EEM
546 meta_fmt = (flags2 & BNXT_CFA_META_FMT_MASK) >>
547 BNXT_CFA_META_FMT_SHFT;
552 /* Not an LFID or GFID, a flush cmd. */
555 /* LFID mode, no vlan scenario */
563 * Assume that EM doesn't support Mark due to GFID
564 * collisions with EEM. Simply return without setting the mark
567 if (BNXT_CFA_META_EM_TEST(meta)) {
568 /*This is EM hit {EM(1), GFID[27:16], 19'd0 or vtag } */
570 meta >>= BNXT_RX_META_CFA_CODE_SHIFT;
571 cfa_code |= meta << BNXT_CFA_CODE_META_SHIFT;
574 * It is a TCAM entry, so it is an LFID.
575 * The TCAM IDX and Mode can also be determined
576 * by decoding the meta_data. We are not
577 * using these for now.
583 /* EEM Case, only using gfid in EEM for now. */
587 * For EEM flows, The first part of cfa_code is 16 bits.
588 * The second part is embedded in the
589 * metadata field from bit 19 onwards. The driver needs to
590 * ignore the first 19 bits of metadata and use the next 12
591 * bits as higher 12 bits of cfa_code.
593 meta >>= BNXT_RX_META_CFA_CODE_SHIFT;
594 cfa_code |= meta << BNXT_CFA_CODE_META_SHIFT;
597 /* For other values, the cfa_code is assumed to be an LFID. */
601 rc = ulp_mark_db_mark_get(bp->ulp_ctx, gfid,
602 cfa_code, vfr_flag, &mark_id);
604 /* VF to VFR Rx path. So, skip mark_id injection in mbuf */
605 if (vfr_flag && *vfr_flag)
607 /* Got the mark, write it to the mbuf and return */
608 mbuf->hash.fdir.hi = mark_id;
609 *bnxt_cfa_code_dynfield(mbuf) = cfa_code & 0xffffffffull;
610 mbuf->hash.fdir.id = rxcmp1->cfa_code;
611 mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
616 mbuf->hash.fdir.hi = 0;
617 mbuf->hash.fdir.id = 0;
622 void bnxt_set_mark_in_mbuf(struct bnxt *bp,
623 struct rx_pkt_cmpl_hi *rxcmp1,
624 struct rte_mbuf *mbuf)
626 uint32_t cfa_code = 0;
627 uint8_t meta_fmt = 0;
631 cfa_code = rte_le_to_cpu_16(rxcmp1->cfa_code);
635 if (cfa_code && !bp->mark_table[cfa_code].valid)
638 flags2 = rte_le_to_cpu_16(rxcmp1->flags2);
639 meta = rte_le_to_cpu_32(rxcmp1->metadata);
641 meta >>= BNXT_RX_META_CFA_CODE_SHIFT;
643 /* The flags field holds extra bits of info from [6:4]
644 * which indicate if the flow is in TCAM or EM or EEM
646 meta_fmt = (flags2 & BNXT_CFA_META_FMT_MASK) >>
647 BNXT_CFA_META_FMT_SHFT;
649 /* meta_fmt == 4 => 'b100 => 'b10x => EM.
650 * meta_fmt == 5 => 'b101 => 'b10x => EM + VLAN
651 * meta_fmt == 6 => 'b110 => 'b11x => EEM
652 * meta_fmt == 7 => 'b111 => 'b11x => EEM + VLAN.
654 meta_fmt >>= BNXT_CFA_META_FMT_EM_EEM_SHFT;
657 mbuf->hash.fdir.hi = bp->mark_table[cfa_code].mark_id;
658 mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
661 static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
662 struct bnxt_rx_queue *rxq, uint32_t *raw_cons)
664 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
665 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
666 struct rx_pkt_cmpl *rxcmp;
667 struct rx_pkt_cmpl_hi *rxcmp1;
668 uint32_t tmp_raw_cons = *raw_cons;
669 uint16_t cons, prod, cp_cons =
670 RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
671 struct rte_mbuf *mbuf;
675 uint32_t vfr_flag = 0, mark_id = 0;
676 struct bnxt *bp = rxq->bp;
678 rxcmp = (struct rx_pkt_cmpl *)
679 &cpr->cp_desc_ring[cp_cons];
681 cmp_type = CMP_TYPE(rxcmp);
683 if (cmp_type == RX_TPA_V2_ABUF_CMPL_TYPE_RX_TPA_AGG) {
684 struct rx_tpa_v2_abuf_cmpl *rx_agg = (void *)rxcmp;
685 uint16_t agg_id = rte_cpu_to_le_16(rx_agg->agg_id);
686 struct bnxt_tpa_info *tpa_info;
688 tpa_info = &rxr->tpa_info[agg_id];
689 RTE_ASSERT(tpa_info->agg_count < 16);
690 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
691 rc = -EINVAL; /* Continue w/o new mbuf */
695 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
696 cp_cons = RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
697 rxcmp1 = (struct rx_pkt_cmpl_hi *)&cpr->cp_desc_ring[cp_cons];
699 if (!CMP_VALID(rxcmp1, tmp_raw_cons, cpr->cp_ring_struct))
702 cpr->valid = FLIP_VALID(cp_cons,
703 cpr->cp_ring_struct->ring_mask,
706 if (cmp_type == RX_TPA_START_CMPL_TYPE_RX_TPA_START) {
707 bnxt_tpa_start(rxq, (struct rx_tpa_start_cmpl *)rxcmp,
708 (struct rx_tpa_start_cmpl_hi *)rxcmp1);
709 rc = -EINVAL; /* Continue w/o new mbuf */
711 } else if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) {
712 mbuf = bnxt_tpa_end(rxq, &tmp_raw_cons,
713 (struct rx_tpa_end_cmpl *)rxcmp,
714 (struct rx_tpa_end_cmpl_hi *)rxcmp1);
719 } else if (cmp_type != 0x11) {
724 agg_buf = (rxcmp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK)
725 >> RX_PKT_CMPL_AGG_BUFS_SFT;
726 if (agg_buf && !bnxt_agg_bufs_valid(cpr, agg_buf, tmp_raw_cons))
731 cons = rxcmp->opaque;
732 mbuf = bnxt_consume_rx_buf(rxr, cons);
736 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
739 mbuf->pkt_len = rxcmp->len;
740 mbuf->data_len = mbuf->pkt_len;
741 mbuf->port = rxq->port_id;
743 bnxt_set_ol_flags(rxcmp, rxcmp1, mbuf);
745 #ifdef RTE_LIBRTE_IEEE1588
746 if (unlikely((rte_le_to_cpu_16(rxcmp->flags_type) &
747 RX_PKT_CMPL_FLAGS_MASK) ==
748 RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP)) {
749 mbuf->ol_flags |= PKT_RX_IEEE1588_PTP | PKT_RX_IEEE1588_TMST;
750 bnxt_get_rx_ts_thor(rxq->bp, rxcmp1->reorder);
754 if (BNXT_TRUFLOW_EN(bp))
755 mark_id = bnxt_ulp_set_mark_in_mbuf(rxq->bp, rxcmp1, mbuf,
758 bnxt_set_mark_in_mbuf(rxq->bp, rxcmp1, mbuf);
761 bnxt_rx_pages(rxq, mbuf, &tmp_raw_cons, agg_buf, NULL);
763 mbuf->packet_type = bnxt_parse_pkt_type(rxcmp, rxcmp1);
766 if (rxcmp1->errors_v2 & RX_CMP_L2_ERRORS) {
767 /* Re-install the mbuf back to the rx ring */
768 bnxt_reuse_rx_mbuf(rxr, cons, mbuf);
775 * TODO: Redesign this....
776 * If the allocation fails, the packet does not get received.
777 * Simply returning this will result in slowly falling behind
778 * on the producer ring buffers.
779 * Instead, "filling up" the producer just before ringing the
780 * doorbell could be a better solution since it will let the
781 * producer ring starve until memory is available again pushing
782 * the drops into hardware and getting them out of the driver
783 * allowing recovery to a full producer ring.
785 * This could also help with cache usage by preventing per-packet
786 * calls in favour of a tight loop with the same function being called
789 prod = RING_NEXT(rxr->rx_ring_struct, prod);
790 if (bnxt_alloc_rx_data(rxq, rxr, prod)) {
791 PMD_DRV_LOG(ERR, "mbuf alloc failed with prod=0x%x\n", prod);
797 if (BNXT_TRUFLOW_EN(bp) && (BNXT_VF_IS_TRUSTED(bp) || BNXT_PF(bp)) &&
799 bnxt_vfr_recv(mark_id, rxq->queue_id, mbuf);
800 /* Now return an error so that nb_rx_pkts is not
802 * This packet was meant to be given to the representor.
803 * So no need to account the packet and give it to
804 * parent Rx burst function.
810 * All MBUFs are allocated with the same size under DPDK,
811 * no optimization for rx_copy_thresh
818 *raw_cons = tmp_raw_cons;
823 uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
826 struct bnxt_rx_queue *rxq = rx_queue;
827 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
828 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
829 uint32_t raw_cons = cpr->cp_raw_cons;
832 int nb_rep_rx_pkts = 0;
833 struct rx_pkt_cmpl *rxcmp;
834 uint16_t prod = rxr->rx_prod;
835 uint16_t ag_prod = rxr->ag_prod;
839 if (unlikely(is_bnxt_in_error(rxq->bp)))
842 /* If Rx Q was stopped return */
843 if (unlikely(!rxq->rx_started))
846 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
848 * Replenish buffers if needed when a transition has been made from
849 * vector- to non-vector- receive processing.
851 while (unlikely(rxq->rxrearm_nb)) {
852 if (!bnxt_alloc_rx_data(rxq, rxr, rxq->rxrearm_start)) {
853 rxr->rx_prod = rxq->rxrearm_start;
854 bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
855 rxq->rxrearm_start++;
858 /* Retry allocation on next call. */
864 /* Handle RX burst request */
866 cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
867 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
869 if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
871 cpr->valid = FLIP_VALID(cons,
872 cpr->cp_ring_struct->ring_mask,
875 /* TODO: Avoid magic numbers... */
876 if ((CMP_TYPE(rxcmp) & 0x30) == 0x10) {
877 rc = bnxt_rx_pkt(&rx_pkts[nb_rx_pkts], rxq, &raw_cons);
878 if (likely(!rc) || rc == -ENOMEM)
880 if (rc == -EBUSY) /* partial completion */
882 if (rc == -ENODEV) /* completion for representor */
884 } else if (!BNXT_NUM_ASYNC_CPR(rxq->bp)) {
886 bnxt_event_hwrm_resp_handler(rxq->bp,
887 (struct cmpl_base *)rxcmp);
888 /* If the async event is Fatal error, return */
889 if (unlikely(is_bnxt_in_error(rxq->bp)))
893 raw_cons = NEXT_RAW_CMP(raw_cons);
894 if (nb_rx_pkts == nb_pkts || evt)
896 /* Post some Rx buf early in case of larger burst processing */
897 if (nb_rx_pkts == BNXT_RX_POST_THRESH)
898 bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
901 cpr->cp_raw_cons = raw_cons;
902 if (!nb_rx_pkts && !nb_rep_rx_pkts && !evt) {
904 * For PMD, there is no need to keep on pushing to REARM
905 * the doorbell if there are no new completions
910 if (prod != rxr->rx_prod)
911 bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
913 /* Ring the AGG ring DB */
914 if (ag_prod != rxr->ag_prod)
915 bnxt_db_write(&rxr->ag_db, rxr->ag_prod);
919 /* Attempt to alloc Rx buf in case of a previous allocation failure. */
921 int i = RING_NEXT(rxr->rx_ring_struct, prod);
922 int cnt = nb_rx_pkts;
925 i = RING_NEXT(rxr->rx_ring_struct, i), cnt--) {
926 struct rte_mbuf **rx_buf = &rxr->rx_buf_ring[i];
928 /* Buffer already allocated for this index. */
929 if (*rx_buf != NULL && *rx_buf != &rxq->fake_mbuf)
932 /* This slot is empty. Alloc buffer for Rx */
933 if (!bnxt_alloc_rx_data(rxq, rxr, i)) {
935 bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
937 PMD_DRV_LOG(ERR, "Alloc mbuf failed\n");
948 * Dummy DPDK callback for RX.
950 * This function is used to temporarily replace the real callback during
951 * unsafe control operations on the queue, or in case of error.
954 bnxt_dummy_recv_pkts(void *rx_queue __rte_unused,
955 struct rte_mbuf **rx_pkts __rte_unused,
956 uint16_t nb_pkts __rte_unused)
961 void bnxt_free_rx_rings(struct bnxt *bp)
964 struct bnxt_rx_queue *rxq;
969 for (i = 0; i < (int)bp->rx_nr_rings; i++) {
970 rxq = bp->rx_queues[i];
974 bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
975 rte_free(rxq->rx_ring->rx_ring_struct);
977 /* Free the Aggregator ring */
978 bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
979 rte_free(rxq->rx_ring->ag_ring_struct);
980 rxq->rx_ring->ag_ring_struct = NULL;
982 rte_free(rxq->rx_ring);
984 bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
985 rte_free(rxq->cp_ring->cp_ring_struct);
986 rte_free(rxq->cp_ring);
989 bp->rx_queues[i] = NULL;
993 int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
995 struct rte_eth_dev *eth_dev = rxq->bp->eth_dev;
996 struct rte_eth_rxmode *rxmode;
997 struct bnxt_cp_ring_info *cpr;
998 struct bnxt_rx_ring_info *rxr;
999 struct bnxt_ring *ring;
1002 rxq->rx_buf_size = BNXT_MAX_PKT_LEN + sizeof(struct rte_mbuf);
1004 rxr = rte_zmalloc_socket("bnxt_rx_ring",
1005 sizeof(struct bnxt_rx_ring_info),
1006 RTE_CACHE_LINE_SIZE, socket_id);
1011 ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
1012 sizeof(struct bnxt_ring),
1013 RTE_CACHE_LINE_SIZE, socket_id);
1016 rxr->rx_ring_struct = ring;
1017 ring->ring_size = rte_align32pow2(rxq->nb_rx_desc);
1018 ring->ring_mask = ring->ring_size - 1;
1019 ring->bd = (void *)rxr->rx_desc_ring;
1020 ring->bd_dma = rxr->rx_desc_mapping;
1022 /* Allocate extra rx ring entries for vector rx. */
1023 ring->vmem_size = sizeof(struct rte_mbuf *) *
1024 (ring->ring_size + RTE_BNXT_DESCS_PER_LOOP);
1026 ring->vmem = (void **)&rxr->rx_buf_ring;
1027 ring->fw_ring_id = INVALID_HW_RING_ID;
1029 cpr = rte_zmalloc_socket("bnxt_rx_ring",
1030 sizeof(struct bnxt_cp_ring_info),
1031 RTE_CACHE_LINE_SIZE, socket_id);
1036 ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
1037 sizeof(struct bnxt_ring),
1038 RTE_CACHE_LINE_SIZE, socket_id);
1041 cpr->cp_ring_struct = ring;
1043 rxmode = ð_dev->data->dev_conf.rxmode;
1044 use_agg_ring = (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) ||
1045 (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) ||
1046 (rxmode->max_rx_pkt_len >
1047 (uint32_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
1048 RTE_PKTMBUF_HEADROOM));
1050 /* Allocate two completion slots per entry in desc ring. */
1051 ring->ring_size = rxr->rx_ring_struct->ring_size * 2;
1053 /* Allocate additional slots if aggregation ring is in use. */
1055 ring->ring_size *= AGG_RING_SIZE_FACTOR;
1057 ring->ring_size = rte_align32pow2(ring->ring_size);
1058 ring->ring_mask = ring->ring_size - 1;
1059 ring->bd = (void *)cpr->cp_desc_ring;
1060 ring->bd_dma = cpr->cp_desc_mapping;
1061 ring->vmem_size = 0;
1063 ring->fw_ring_id = INVALID_HW_RING_ID;
1065 /* Allocate Aggregator rings */
1066 ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
1067 sizeof(struct bnxt_ring),
1068 RTE_CACHE_LINE_SIZE, socket_id);
1071 rxr->ag_ring_struct = ring;
1072 ring->ring_size = rte_align32pow2(rxq->nb_rx_desc *
1073 AGG_RING_SIZE_FACTOR);
1074 ring->ring_mask = ring->ring_size - 1;
1075 ring->bd = (void *)rxr->ag_desc_ring;
1076 ring->bd_dma = rxr->ag_desc_mapping;
1077 ring->vmem_size = ring->ring_size * sizeof(struct rte_mbuf *);
1078 ring->vmem = (void **)&rxr->ag_buf_ring;
1079 ring->fw_ring_id = INVALID_HW_RING_ID;
1084 static void bnxt_init_rxbds(struct bnxt_ring *ring, uint32_t type,
1088 struct rx_prod_pkt_bd *rx_bd_ring = (struct rx_prod_pkt_bd *)ring->bd;
1092 for (j = 0; j < ring->ring_size; j++) {
1093 rx_bd_ring[j].flags_type = rte_cpu_to_le_16(type);
1094 rx_bd_ring[j].len = rte_cpu_to_le_16(len);
1095 rx_bd_ring[j].opaque = j;
1099 int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
1101 struct bnxt_rx_ring_info *rxr;
1102 struct bnxt_ring *ring;
1103 uint32_t prod, type;
1107 /* Initialize packet type table. */
1108 bnxt_init_ptype_table();
1110 /* Initialize offload flags parsing table. */
1111 bnxt_init_ol_flags_tables();
1113 size = rte_pktmbuf_data_room_size(rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
1114 size = RTE_MIN(BNXT_MAX_PKT_LEN, size);
1116 type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT | RX_PROD_PKT_BD_FLAGS_EOP_PAD;
1119 ring = rxr->rx_ring_struct;
1120 bnxt_init_rxbds(ring, type, size);
1122 prod = rxr->rx_prod;
1123 for (i = 0; i < ring->ring_size; i++) {
1124 if (unlikely(!rxr->rx_buf_ring[i])) {
1125 if (bnxt_alloc_rx_data(rxq, rxr, prod) != 0) {
1126 PMD_DRV_LOG(WARNING,
1127 "init'ed rx ring %d with %d/%d mbufs only\n",
1128 rxq->queue_id, i, ring->ring_size);
1132 rxr->rx_prod = prod;
1133 prod = RING_NEXT(rxr->rx_ring_struct, prod);
1136 /* Initialize dummy mbuf pointers for vector mode rx. */
1137 for (i = ring->ring_size;
1138 i < ring->ring_size + RTE_BNXT_DESCS_PER_LOOP; i++) {
1139 rxr->rx_buf_ring[i] = &rxq->fake_mbuf;
1142 ring = rxr->ag_ring_struct;
1143 type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG;
1144 bnxt_init_rxbds(ring, type, size);
1145 prod = rxr->ag_prod;
1147 for (i = 0; i < ring->ring_size; i++) {
1148 if (unlikely(!rxr->ag_buf_ring[i])) {
1149 if (bnxt_alloc_ag_data(rxq, rxr, prod) != 0) {
1150 PMD_DRV_LOG(WARNING,
1151 "init'ed AG ring %d with %d/%d mbufs only\n",
1152 rxq->queue_id, i, ring->ring_size);
1156 rxr->ag_prod = prod;
1157 prod = RING_NEXT(rxr->ag_ring_struct, prod);
1159 PMD_DRV_LOG(DEBUG, "AGG Done!\n");
1161 if (rxr->tpa_info) {
1162 unsigned int max_aggs = BNXT_TPA_MAX_AGGS(rxq->bp);
1164 for (i = 0; i < max_aggs; i++) {
1165 if (unlikely(!rxr->tpa_info[i].mbuf)) {
1166 rxr->tpa_info[i].mbuf =
1167 __bnxt_alloc_rx_data(rxq->mb_pool);
1168 if (!rxr->tpa_info[i].mbuf) {
1169 rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
1175 PMD_DRV_LOG(DEBUG, "TPA alloc Done!\n");