1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
9 #include <rte_bitmap.h>
10 #include <rte_byteorder.h>
11 #include <rte_malloc.h>
12 #include <rte_memory.h>
13 #include <rte_alarm.h>
16 #include "bnxt_reps.h"
17 #include "bnxt_ring.h"
20 #include "hsi_struct_def_dpdk.h"
21 #include "bnxt_hwrm.h"
23 #include <bnxt_tf_common.h>
24 #include <ulp_mark_mgr.h>
30 static inline struct rte_mbuf *__bnxt_alloc_rx_data(struct rte_mempool *mb)
32 struct rte_mbuf *data;
34 data = rte_mbuf_raw_alloc(mb);
39 static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
40 struct bnxt_rx_ring_info *rxr,
43 uint16_t prod = RING_IDX(rxr->rx_ring_struct, raw_prod);
44 struct rx_prod_pkt_bd *rxbd;
45 struct rte_mbuf **rx_buf;
46 struct rte_mbuf *mbuf;
48 rxbd = &rxr->rx_desc_ring[prod];
49 rx_buf = &rxr->rx_buf_ring[prod];
50 mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
52 __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1, __ATOMIC_RELAXED);
57 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
59 rxbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
64 static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
65 struct bnxt_rx_ring_info *rxr,
68 uint16_t prod = RING_IDX(rxr->ag_ring_struct, raw_prod);
69 struct rx_prod_pkt_bd *rxbd;
70 struct rte_mbuf **rx_buf;
71 struct rte_mbuf *mbuf;
73 rxbd = &rxr->ag_desc_ring[prod];
74 rx_buf = &rxr->ag_buf_ring[prod];
76 PMD_DRV_LOG(ERR, "Jumbo Frame. rxbd is NULL\n");
81 PMD_DRV_LOG(ERR, "Jumbo Frame. rx_buf is NULL\n");
85 mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
87 __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1, __ATOMIC_RELAXED);
92 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
94 rxbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
99 static inline void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr,
100 struct rte_mbuf *mbuf)
102 uint16_t prod, raw_prod = RING_NEXT(rxr->rx_raw_prod);
103 struct rte_mbuf **prod_rx_buf;
104 struct rx_prod_pkt_bd *prod_bd;
106 prod = RING_IDX(rxr->rx_ring_struct, raw_prod);
107 prod_rx_buf = &rxr->rx_buf_ring[prod];
109 RTE_ASSERT(*prod_rx_buf == NULL);
110 RTE_ASSERT(mbuf != NULL);
114 prod_bd = &rxr->rx_desc_ring[prod];
116 prod_bd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
118 rxr->rx_raw_prod = raw_prod;
122 struct rte_mbuf *bnxt_consume_rx_buf(struct bnxt_rx_ring_info *rxr,
125 struct rte_mbuf **cons_rx_buf;
126 struct rte_mbuf *mbuf;
128 cons_rx_buf = &rxr->rx_buf_ring[RING_IDX(rxr->rx_ring_struct, cons)];
129 RTE_ASSERT(*cons_rx_buf != NULL);
136 static void bnxt_rx_ring_reset(void *arg)
138 struct bnxt *bp = arg;
140 struct bnxt_rx_queue *rxq;
143 for (i = 0; i < (int)bp->rx_nr_rings; i++) {
144 struct bnxt_rx_ring_info *rxr;
146 rxq = bp->rx_queues[i];
147 if (!rxq || !rxq->in_reset)
151 /* Disable and flush TPA before resetting the RX ring */
153 bnxt_hwrm_vnic_tpa_cfg(bp, rxq->vnic, false);
154 rc = bnxt_hwrm_rx_ring_reset(bp, i);
156 PMD_DRV_LOG(ERR, "Rx ring%d reset failed\n", i);
160 bnxt_rx_queue_release_mbufs(rxq);
161 rxr->rx_raw_prod = 0;
162 rxr->ag_raw_prod = 0;
163 rxr->rx_next_cons = 0;
164 bnxt_init_one_rx_ring(rxq);
165 bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod);
166 bnxt_db_write(&rxr->ag_db, rxr->ag_raw_prod);
168 bnxt_hwrm_vnic_tpa_cfg(bp, rxq->vnic, true);
175 static void bnxt_sched_ring_reset(struct bnxt_rx_queue *rxq)
178 rte_eal_alarm_set(1, bnxt_rx_ring_reset, (void *)rxq->bp);
181 static void bnxt_tpa_get_metadata(struct bnxt *bp,
182 struct bnxt_tpa_info *tpa_info,
183 struct rx_tpa_start_cmpl *tpa_start,
184 struct rx_tpa_start_cmpl_hi *tpa_start1)
186 tpa_info->cfa_code_valid = 0;
187 tpa_info->vlan_valid = 0;
188 tpa_info->hash_valid = 0;
189 tpa_info->l4_csum_valid = 0;
191 if (likely(tpa_start->flags_type &
192 rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS_RSS_VALID))) {
193 tpa_info->hash_valid = 1;
194 tpa_info->rss_hash = rte_le_to_cpu_32(tpa_start->rss_hash);
197 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_RX_CMPL_V2) {
198 struct rx_tpa_start_v2_cmpl *v2_tpa_start = (void *)tpa_start;
199 struct rx_tpa_start_v2_cmpl_hi *v2_tpa_start1 =
202 if (v2_tpa_start->agg_id &
203 RX_TPA_START_V2_CMPL_METADATA1_VALID) {
204 tpa_info->vlan_valid = 1;
206 rte_le_to_cpu_16(v2_tpa_start1->metadata0);
209 if (v2_tpa_start1->flags2 & RX_CMP_FLAGS2_L4_CSUM_ALL_OK_MASK)
210 tpa_info->l4_csum_valid = 1;
215 tpa_info->cfa_code_valid = 1;
216 tpa_info->cfa_code = rte_le_to_cpu_16(tpa_start1->cfa_code);
217 if (tpa_start1->flags2 &
218 rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN)) {
219 tpa_info->vlan_valid = 1;
220 tpa_info->vlan = rte_le_to_cpu_32(tpa_start1->metadata);
223 if (likely(tpa_start1->flags2 &
224 rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC)))
225 tpa_info->l4_csum_valid = 1;
228 static void bnxt_tpa_start(struct bnxt_rx_queue *rxq,
229 struct rx_tpa_start_cmpl *tpa_start,
230 struct rx_tpa_start_cmpl_hi *tpa_start1)
232 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
235 struct bnxt_tpa_info *tpa_info;
236 struct rte_mbuf *mbuf;
238 agg_id = bnxt_tpa_start_agg_id(rxq->bp, tpa_start);
240 data_cons = tpa_start->opaque;
241 tpa_info = &rxr->tpa_info[agg_id];
242 if (unlikely(data_cons != rxr->rx_next_cons)) {
243 PMD_DRV_LOG(ERR, "TPA cons %x, expected cons %x\n",
244 data_cons, rxr->rx_next_cons);
245 bnxt_sched_ring_reset(rxq);
249 mbuf = bnxt_consume_rx_buf(rxr, data_cons);
251 bnxt_reuse_rx_mbuf(rxr, tpa_info->mbuf);
253 tpa_info->agg_count = 0;
254 tpa_info->mbuf = mbuf;
255 tpa_info->len = rte_le_to_cpu_32(tpa_start->len);
257 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
260 mbuf->pkt_len = rte_le_to_cpu_32(tpa_start->len);
261 mbuf->data_len = mbuf->pkt_len;
262 mbuf->port = rxq->port_id;
263 mbuf->ol_flags = RTE_MBUF_F_RX_LRO;
265 bnxt_tpa_get_metadata(rxq->bp, tpa_info, tpa_start, tpa_start1);
267 if (likely(tpa_info->hash_valid)) {
268 mbuf->hash.rss = tpa_info->rss_hash;
269 mbuf->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
270 } else if (tpa_info->cfa_code_valid) {
271 mbuf->hash.fdir.id = tpa_info->cfa_code;
272 mbuf->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
275 if (tpa_info->vlan_valid && BNXT_RX_VLAN_STRIP_EN(rxq->bp)) {
276 mbuf->vlan_tci = tpa_info->vlan;
277 mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
280 if (likely(tpa_info->l4_csum_valid))
281 mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
283 /* recycle next mbuf */
284 data_cons = RING_NEXT(data_cons);
285 bnxt_reuse_rx_mbuf(rxr, bnxt_consume_rx_buf(rxr, data_cons));
287 rxr->rx_next_cons = RING_IDX(rxr->rx_ring_struct,
288 RING_NEXT(data_cons));
291 static int bnxt_agg_bufs_valid(struct bnxt_cp_ring_info *cpr,
292 uint8_t agg_bufs, uint32_t raw_cp_cons)
294 uint16_t last_cp_cons;
295 struct rx_pkt_cmpl *agg_cmpl;
297 raw_cp_cons = ADV_RAW_CMP(raw_cp_cons, agg_bufs);
298 last_cp_cons = RING_CMP(cpr->cp_ring_struct, raw_cp_cons);
299 agg_cmpl = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[last_cp_cons];
300 return bnxt_cpr_cmp_valid(agg_cmpl, raw_cp_cons,
301 cpr->cp_ring_struct->ring_size);
304 /* TPA consume agg buffer out of order, allocate connected data only */
305 static int bnxt_prod_ag_mbuf(struct bnxt_rx_queue *rxq)
307 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
308 uint16_t raw_next = RING_NEXT(rxr->ag_raw_prod);
309 uint16_t bmap_next = RING_IDX(rxr->ag_ring_struct, raw_next);
311 /* TODO batch allocation for better performance */
312 while (rte_bitmap_get(rxr->ag_bitmap, bmap_next)) {
313 if (unlikely(bnxt_alloc_ag_data(rxq, rxr, raw_next))) {
314 PMD_DRV_LOG(ERR, "agg mbuf alloc failed: prod=0x%x\n",
318 rte_bitmap_clear(rxr->ag_bitmap, bmap_next);
319 rxr->ag_raw_prod = raw_next;
320 raw_next = RING_NEXT(raw_next);
321 bmap_next = RING_IDX(rxr->ag_ring_struct, raw_next);
327 static int bnxt_rx_pages(struct bnxt_rx_queue *rxq,
328 struct rte_mbuf *mbuf, uint32_t *tmp_raw_cons,
329 uint8_t agg_buf, struct bnxt_tpa_info *tpa_info)
331 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
332 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
334 uint16_t cp_cons, ag_cons;
335 struct rx_pkt_cmpl *rxcmp;
336 struct rte_mbuf *last = mbuf;
337 bool is_p5_tpa = tpa_info && BNXT_CHIP_P5(rxq->bp);
339 for (i = 0; i < agg_buf; i++) {
340 struct rte_mbuf **ag_buf;
341 struct rte_mbuf *ag_mbuf;
344 rxcmp = (void *)&tpa_info->agg_arr[i];
346 *tmp_raw_cons = NEXT_RAW_CMP(*tmp_raw_cons);
347 cp_cons = RING_CMP(cpr->cp_ring_struct, *tmp_raw_cons);
348 rxcmp = (struct rx_pkt_cmpl *)
349 &cpr->cp_desc_ring[cp_cons];
353 bnxt_dump_cmpl(cp_cons, rxcmp);
356 ag_cons = rxcmp->opaque;
357 RTE_ASSERT(ag_cons <= rxr->ag_ring_struct->ring_mask);
358 ag_buf = &rxr->ag_buf_ring[ag_cons];
360 RTE_ASSERT(ag_mbuf != NULL);
362 ag_mbuf->data_len = rte_le_to_cpu_16(rxcmp->len);
365 mbuf->pkt_len += ag_mbuf->data_len;
367 last->next = ag_mbuf;
373 * As aggregation buffer consumed out of order in TPA module,
374 * use bitmap to track freed slots to be allocated and notified
377 rte_bitmap_set(rxr->ag_bitmap, ag_cons);
380 bnxt_prod_ag_mbuf(rxq);
384 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
385 uint32_t *raw_cons, void *cmp)
387 struct rx_pkt_cmpl *rxcmp = cmp;
388 uint32_t tmp_raw_cons = *raw_cons;
389 uint8_t cmp_type, agg_bufs = 0;
391 cmp_type = CMP_TYPE(rxcmp);
393 if (cmp_type == CMPL_BASE_TYPE_RX_L2) {
394 agg_bufs = BNXT_RX_L2_AGG_BUFS(rxcmp);
395 } else if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) {
396 struct rx_tpa_end_cmpl *tpa_end = cmp;
398 if (BNXT_CHIP_P5(bp))
401 agg_bufs = BNXT_TPA_END_AGG_BUFS(tpa_end);
405 if (!bnxt_agg_bufs_valid(cpr, agg_bufs, tmp_raw_cons))
408 *raw_cons = tmp_raw_cons;
412 static inline struct rte_mbuf *bnxt_tpa_end(
413 struct bnxt_rx_queue *rxq,
414 uint32_t *raw_cp_cons,
415 struct rx_tpa_end_cmpl *tpa_end,
416 struct rx_tpa_end_cmpl_hi *tpa_end1)
418 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
419 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
421 struct rte_mbuf *mbuf;
423 uint8_t payload_offset;
424 struct bnxt_tpa_info *tpa_info;
426 if (unlikely(rxq->in_reset)) {
427 PMD_DRV_LOG(ERR, "rxq->in_reset: raw_cp_cons:%d\n",
429 bnxt_discard_rx(rxq->bp, cpr, raw_cp_cons, tpa_end);
433 if (BNXT_CHIP_P5(rxq->bp)) {
434 struct rx_tpa_v2_end_cmpl *th_tpa_end;
435 struct rx_tpa_v2_end_cmpl_hi *th_tpa_end1;
437 th_tpa_end = (void *)tpa_end;
438 th_tpa_end1 = (void *)tpa_end1;
439 agg_id = BNXT_TPA_END_AGG_ID_TH(th_tpa_end);
440 agg_bufs = BNXT_TPA_END_AGG_BUFS_TH(th_tpa_end1);
441 payload_offset = th_tpa_end1->payload_offset;
443 agg_id = BNXT_TPA_END_AGG_ID(tpa_end);
444 agg_bufs = BNXT_TPA_END_AGG_BUFS(tpa_end);
445 if (!bnxt_agg_bufs_valid(cpr, agg_bufs, *raw_cp_cons))
447 payload_offset = tpa_end->payload_offset;
450 tpa_info = &rxr->tpa_info[agg_id];
451 mbuf = tpa_info->mbuf;
452 RTE_ASSERT(mbuf != NULL);
455 bnxt_rx_pages(rxq, mbuf, raw_cp_cons, agg_bufs, tpa_info);
457 mbuf->l4_len = payload_offset;
459 struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool);
460 RTE_ASSERT(new_data != NULL);
462 __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1, __ATOMIC_RELAXED);
465 tpa_info->mbuf = new_data;
470 uint32_t bnxt_ptype_table[BNXT_PTYPE_TBL_DIM] __rte_cache_aligned;
472 static void __rte_cold
473 bnxt_init_ptype_table(void)
475 uint32_t *pt = bnxt_ptype_table;
476 static bool initialized;
484 for (i = 0; i < BNXT_PTYPE_TBL_DIM; i++) {
485 if (i & BNXT_PTYPE_TBL_VLAN_MSK)
486 pt[i] = RTE_PTYPE_L2_ETHER_VLAN;
488 pt[i] = RTE_PTYPE_L2_ETHER;
490 ip6 = !!(i & BNXT_PTYPE_TBL_IP_VER_MSK);
491 tun = !!(i & BNXT_PTYPE_TBL_TUN_MSK);
492 type = (i & BNXT_PTYPE_TBL_TYPE_MSK) >> BNXT_PTYPE_TBL_TYPE_SFT;
495 l3 = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
496 else if (!tun && ip6)
497 l3 = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
498 else if (tun && !ip6)
499 l3 = RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
501 l3 = RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
504 case BNXT_PTYPE_TBL_TYPE_ICMP:
506 pt[i] |= l3 | RTE_PTYPE_INNER_L4_ICMP;
508 pt[i] |= l3 | RTE_PTYPE_L4_ICMP;
510 case BNXT_PTYPE_TBL_TYPE_TCP:
512 pt[i] |= l3 | RTE_PTYPE_INNER_L4_TCP;
514 pt[i] |= l3 | RTE_PTYPE_L4_TCP;
516 case BNXT_PTYPE_TBL_TYPE_UDP:
518 pt[i] |= l3 | RTE_PTYPE_INNER_L4_UDP;
520 pt[i] |= l3 | RTE_PTYPE_L4_UDP;
522 case BNXT_PTYPE_TBL_TYPE_IP:
531 bnxt_parse_pkt_type(struct rx_pkt_cmpl *rxcmp, struct rx_pkt_cmpl_hi *rxcmp1)
533 uint32_t flags_type, flags2;
536 flags_type = rte_le_to_cpu_16(rxcmp->flags_type);
537 flags2 = rte_le_to_cpu_32(rxcmp1->flags2);
539 /* Validate ptype table indexing at build time. */
540 bnxt_check_ptype_constants();
544 * bit 0: Set if IP tunnel encapsulated packet.
545 * bit 1: Set if IPv6 packet, clear if IPv4.
546 * bit 2: Set if VLAN tag present.
547 * bits 3-6: Four-bit hardware packet type field.
549 index = BNXT_CMPL_ITYPE_TO_IDX(flags_type) |
550 BNXT_CMPL_VLAN_TUN_TO_IDX(flags2) |
551 BNXT_CMPL_IP_VER_TO_IDX(flags2);
553 return bnxt_ptype_table[index];
556 static void __rte_cold
557 bnxt_init_ol_flags_tables(struct bnxt_rx_queue *rxq)
559 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
560 struct rte_eth_conf *dev_conf;
561 bool outer_cksum_enabled;
566 dev_conf = &rxq->bp->eth_dev->data->dev_conf;
567 offloads = dev_conf->rxmode.offloads;
569 outer_cksum_enabled = !!(offloads & (RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
570 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM));
572 /* Initialize ol_flags table. */
573 pt = rxr->ol_flags_table;
574 for (i = 0; i < BNXT_OL_FLAGS_TBL_DIM; i++) {
577 if (BNXT_RX_VLAN_STRIP_EN(rxq->bp)) {
578 if (i & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN)
579 pt[i] |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
582 if (i & (RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC << 3)) {
584 if (outer_cksum_enabled) {
585 if (i & RX_PKT_CMPL_FLAGS2_IP_CS_CALC)
586 pt[i] |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
588 if (i & RX_PKT_CMPL_FLAGS2_L4_CS_CALC)
589 pt[i] |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
591 if (i & RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)
592 pt[i] |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;
594 if (i & RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)
595 pt[i] |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
597 if (i & RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)
598 pt[i] |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
601 /* Non-tunnel case. */
602 if (i & RX_PKT_CMPL_FLAGS2_IP_CS_CALC)
603 pt[i] |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
605 if (i & RX_PKT_CMPL_FLAGS2_L4_CS_CALC)
606 pt[i] |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
610 /* Initialize checksum error table. */
611 pt = rxr->ol_flags_err_table;
612 for (i = 0; i < BNXT_OL_FLAGS_ERR_TBL_DIM; i++) {
615 if (i & (RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC << 2)) {
617 if (outer_cksum_enabled) {
618 if (i & (RX_PKT_CMPL_ERRORS_IP_CS_ERROR >> 4))
619 pt[i] |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
621 if (i & (RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR >> 4))
622 pt[i] |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
624 if (i & (RX_PKT_CMPL_ERRORS_L4_CS_ERROR >> 4))
625 pt[i] |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
627 if (i & (RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR >> 4))
628 pt[i] |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
630 if (i & (RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR >> 4))
631 pt[i] |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
633 if (i & (RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR >> 4))
634 pt[i] |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
637 /* Non-tunnel case. */
638 if (i & (RX_PKT_CMPL_ERRORS_IP_CS_ERROR >> 4))
639 pt[i] |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
641 if (i & (RX_PKT_CMPL_ERRORS_L4_CS_ERROR >> 4))
642 pt[i] |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
648 bnxt_set_ol_flags(struct bnxt_rx_ring_info *rxr, struct rx_pkt_cmpl *rxcmp,
649 struct rx_pkt_cmpl_hi *rxcmp1, struct rte_mbuf *mbuf)
651 uint16_t flags_type, errors, flags;
654 flags_type = rte_le_to_cpu_16(rxcmp->flags_type);
656 flags = rte_le_to_cpu_32(rxcmp1->flags2) &
657 (RX_PKT_CMPL_FLAGS2_IP_CS_CALC |
658 RX_PKT_CMPL_FLAGS2_L4_CS_CALC |
659 RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC |
660 RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC |
661 RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN);
663 flags |= (flags & RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC) << 3;
664 errors = rte_le_to_cpu_16(rxcmp1->errors_v2) &
665 (RX_PKT_CMPL_ERRORS_IP_CS_ERROR |
666 RX_PKT_CMPL_ERRORS_L4_CS_ERROR |
667 RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR |
668 RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR);
669 errors = (errors >> 4) & flags;
671 ol_flags = rxr->ol_flags_table[flags & ~errors];
673 if (unlikely(errors)) {
674 errors |= (flags & RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC) << 2;
675 ol_flags |= rxr->ol_flags_err_table[errors];
678 if (flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) {
679 mbuf->hash.rss = rte_le_to_cpu_32(rxcmp->rss_hash);
680 ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
683 #ifdef RTE_LIBRTE_IEEE1588
684 if (unlikely((flags_type & RX_PKT_CMPL_FLAGS_MASK) ==
685 RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP))
686 ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP | RTE_MBUF_F_RX_IEEE1588_TMST;
689 mbuf->ol_flags = ol_flags;
692 #ifdef RTE_LIBRTE_IEEE1588
694 bnxt_get_rx_ts_p5(struct bnxt *bp, uint32_t rx_ts_cmpl)
696 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
697 uint64_t last_hwrm_time;
698 uint64_t pkt_time = 0;
700 if (!BNXT_CHIP_P5(bp) || !ptp)
703 /* On Thor, Rx timestamps are provided directly in the
704 * Rx completion records to the driver. Only 32 bits of
705 * the timestamp is present in the completion. Driver needs
706 * to read the current 48 bit free running timer using the
707 * HWRM_PORT_TS_QUERY command and combine the upper 16 bits
708 * from the HWRM response with the lower 32 bits in the
709 * Rx completion to produce the 48 bit timestamp for the Rx packet
711 last_hwrm_time = ptp->current_time;
712 pkt_time = (last_hwrm_time & BNXT_PTP_CURRENT_TIME_MASK) | rx_ts_cmpl;
713 if (rx_ts_cmpl < (uint32_t)last_hwrm_time) {
714 /* timer has rolled over */
715 pkt_time += (1ULL << 32);
717 ptp->rx_timestamp = pkt_time;
722 bnxt_ulp_set_mark_in_mbuf(struct bnxt *bp, struct rx_pkt_cmpl_hi *rxcmp1,
723 struct rte_mbuf *mbuf, uint32_t *vfr_flag)
731 uint32_t gfid_support = 0;
734 if (BNXT_GFID_ENABLED(bp))
737 cfa_code = rte_le_to_cpu_16(rxcmp1->cfa_code);
738 flags2 = rte_le_to_cpu_32(rxcmp1->flags2);
739 meta = rte_le_to_cpu_32(rxcmp1->metadata);
742 * The flags field holds extra bits of info from [6:4]
743 * which indicate if the flow is in TCAM or EM or EEM
745 meta_fmt = (flags2 & BNXT_CFA_META_FMT_MASK) >>
746 BNXT_CFA_META_FMT_SHFT;
751 /* Not an LFID or GFID, a flush cmd. */
754 /* LFID mode, no vlan scenario */
762 * Assume that EM doesn't support Mark due to GFID
763 * collisions with EEM. Simply return without setting the mark
766 if (BNXT_CFA_META_EM_TEST(meta)) {
767 /*This is EM hit {EM(1), GFID[27:16], 19'd0 or vtag } */
769 meta >>= BNXT_RX_META_CFA_CODE_SHIFT;
770 cfa_code |= meta << BNXT_CFA_CODE_META_SHIFT;
773 * It is a TCAM entry, so it is an LFID.
774 * The TCAM IDX and Mode can also be determined
775 * by decoding the meta_data. We are not
776 * using these for now.
782 /* EEM Case, only using gfid in EEM for now. */
786 * For EEM flows, The first part of cfa_code is 16 bits.
787 * The second part is embedded in the
788 * metadata field from bit 19 onwards. The driver needs to
789 * ignore the first 19 bits of metadata and use the next 12
790 * bits as higher 12 bits of cfa_code.
792 meta >>= BNXT_RX_META_CFA_CODE_SHIFT;
793 cfa_code |= meta << BNXT_CFA_CODE_META_SHIFT;
796 /* For other values, the cfa_code is assumed to be an LFID. */
800 rc = ulp_mark_db_mark_get(bp->ulp_ctx, gfid,
801 cfa_code, vfr_flag, &mark_id);
803 /* VF to VFR Rx path. So, skip mark_id injection in mbuf */
804 if (vfr_flag && *vfr_flag)
806 /* Got the mark, write it to the mbuf and return */
807 mbuf->hash.fdir.hi = mark_id;
808 *bnxt_cfa_code_dynfield(mbuf) = cfa_code & 0xffffffffull;
809 mbuf->hash.fdir.id = rxcmp1->cfa_code;
810 mbuf->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
815 mbuf->hash.fdir.hi = 0;
816 mbuf->hash.fdir.id = 0;
821 void bnxt_set_mark_in_mbuf(struct bnxt *bp,
822 struct rx_pkt_cmpl_hi *rxcmp1,
823 struct rte_mbuf *mbuf)
825 uint32_t cfa_code = 0;
827 if (unlikely(bp->mark_table == NULL))
830 cfa_code = rte_le_to_cpu_16(rxcmp1->cfa_code);
834 if (cfa_code && !bp->mark_table[cfa_code].valid)
837 mbuf->hash.fdir.hi = bp->mark_table[cfa_code].mark_id;
838 mbuf->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
841 static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
842 struct bnxt_rx_queue *rxq, uint32_t *raw_cons)
844 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
845 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
846 struct rx_pkt_cmpl *rxcmp;
847 struct rx_pkt_cmpl_hi *rxcmp1;
848 uint32_t tmp_raw_cons = *raw_cons;
849 uint16_t cons, raw_prod, cp_cons =
850 RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
851 struct rte_mbuf *mbuf;
855 uint32_t vfr_flag = 0, mark_id = 0;
856 struct bnxt *bp = rxq->bp;
858 rxcmp = (struct rx_pkt_cmpl *)
859 &cpr->cp_desc_ring[cp_cons];
861 cmp_type = CMP_TYPE(rxcmp);
863 if (cmp_type == RX_TPA_V2_ABUF_CMPL_TYPE_RX_TPA_AGG) {
864 struct rx_tpa_v2_abuf_cmpl *rx_agg = (void *)rxcmp;
865 uint16_t agg_id = rte_cpu_to_le_16(rx_agg->agg_id);
866 struct bnxt_tpa_info *tpa_info;
868 tpa_info = &rxr->tpa_info[agg_id];
869 RTE_ASSERT(tpa_info->agg_count < 16);
870 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
871 rc = -EINVAL; /* Continue w/o new mbuf */
875 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
876 cp_cons = RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
877 rxcmp1 = (struct rx_pkt_cmpl_hi *)&cpr->cp_desc_ring[cp_cons];
879 if (!bnxt_cpr_cmp_valid(rxcmp1, tmp_raw_cons,
880 cpr->cp_ring_struct->ring_size))
883 if (cmp_type == RX_TPA_START_CMPL_TYPE_RX_TPA_START ||
884 cmp_type == RX_TPA_START_V2_CMPL_TYPE_RX_TPA_START_V2) {
885 bnxt_tpa_start(rxq, (struct rx_tpa_start_cmpl *)rxcmp,
886 (struct rx_tpa_start_cmpl_hi *)rxcmp1);
887 rc = -EINVAL; /* Continue w/o new mbuf */
889 } else if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) {
890 mbuf = bnxt_tpa_end(rxq, &tmp_raw_cons,
891 (struct rx_tpa_end_cmpl *)rxcmp,
892 (struct rx_tpa_end_cmpl_hi *)rxcmp1);
897 } else if ((cmp_type != CMPL_BASE_TYPE_RX_L2) &&
898 (cmp_type != CMPL_BASE_TYPE_RX_L2_V2)) {
903 agg_buf = BNXT_RX_L2_AGG_BUFS(rxcmp);
904 if (agg_buf && !bnxt_agg_bufs_valid(cpr, agg_buf, tmp_raw_cons))
907 raw_prod = rxr->rx_raw_prod;
909 cons = rxcmp->opaque;
910 if (unlikely(cons != rxr->rx_next_cons)) {
911 bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
912 PMD_DRV_LOG(ERR, "RX cons %x != expected cons %x\n",
913 cons, rxr->rx_next_cons);
914 bnxt_sched_ring_reset(rxq);
918 mbuf = bnxt_consume_rx_buf(rxr, cons);
922 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
925 mbuf->pkt_len = rxcmp->len;
926 mbuf->data_len = mbuf->pkt_len;
927 mbuf->port = rxq->port_id;
929 #ifdef RTE_LIBRTE_IEEE1588
930 if (unlikely((rte_le_to_cpu_16(rxcmp->flags_type) &
931 RX_PKT_CMPL_FLAGS_MASK) ==
932 RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP))
933 bnxt_get_rx_ts_p5(rxq->bp, rxcmp1->reorder);
936 if (cmp_type == CMPL_BASE_TYPE_RX_L2_V2) {
937 bnxt_parse_csum_v2(mbuf, rxcmp1);
938 bnxt_parse_pkt_type_v2(mbuf, rxcmp, rxcmp1);
939 bnxt_rx_vlan_v2(mbuf, rxcmp, rxcmp1);
940 /* TODO Add support for cfa_code parsing */
944 bnxt_set_ol_flags(rxr, rxcmp, rxcmp1, mbuf);
946 mbuf->packet_type = bnxt_parse_pkt_type(rxcmp, rxcmp1);
948 bnxt_set_vlan(rxcmp1, mbuf);
950 if (BNXT_TRUFLOW_EN(bp))
951 mark_id = bnxt_ulp_set_mark_in_mbuf(rxq->bp, rxcmp1, mbuf,
954 bnxt_set_mark_in_mbuf(rxq->bp, rxcmp1, mbuf);
958 bnxt_rx_pages(rxq, mbuf, &tmp_raw_cons, agg_buf, NULL);
961 if (rxcmp1->errors_v2 & RX_CMP_L2_ERRORS) {
962 /* Re-install the mbuf back to the rx ring */
963 bnxt_reuse_rx_mbuf(rxr, cons, mbuf);
970 * TODO: Redesign this....
971 * If the allocation fails, the packet does not get received.
972 * Simply returning this will result in slowly falling behind
973 * on the producer ring buffers.
974 * Instead, "filling up" the producer just before ringing the
975 * doorbell could be a better solution since it will let the
976 * producer ring starve until memory is available again pushing
977 * the drops into hardware and getting them out of the driver
978 * allowing recovery to a full producer ring.
980 * This could also help with cache usage by preventing per-packet
981 * calls in favour of a tight loop with the same function being called
984 raw_prod = RING_NEXT(raw_prod);
985 if (bnxt_alloc_rx_data(rxq, rxr, raw_prod)) {
986 PMD_DRV_LOG(ERR, "mbuf alloc failed with prod=0x%x\n",
991 rxr->rx_raw_prod = raw_prod;
993 rxr->rx_next_cons = RING_IDX(rxr->rx_ring_struct, RING_NEXT(cons));
995 if (BNXT_TRUFLOW_EN(bp) && (BNXT_VF_IS_TRUSTED(bp) || BNXT_PF(bp)) &&
997 bnxt_vfr_recv(mark_id, rxq->queue_id, mbuf);
998 /* Now return an error so that nb_rx_pkts is not
1000 * This packet was meant to be given to the representor.
1001 * So no need to account the packet and give it to
1002 * parent Rx burst function.
1008 * All MBUFs are allocated with the same size under DPDK,
1009 * no optimization for rx_copy_thresh
1015 *raw_cons = tmp_raw_cons;
1020 uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1023 struct bnxt_rx_queue *rxq = rx_queue;
1024 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1025 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1026 uint16_t rx_raw_prod = rxr->rx_raw_prod;
1027 uint16_t ag_raw_prod = rxr->ag_raw_prod;
1028 uint32_t raw_cons = cpr->cp_raw_cons;
1029 bool alloc_failed = false;
1032 int nb_rep_rx_pkts = 0;
1033 struct rx_pkt_cmpl *rxcmp;
1037 if (unlikely(is_bnxt_in_error(rxq->bp)))
1040 /* If Rx Q was stopped return */
1041 if (unlikely(!rxq->rx_started))
1044 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
1046 * Replenish buffers if needed when a transition has been made from
1047 * vector- to non-vector- receive processing.
1049 while (unlikely(rxq->rxrearm_nb)) {
1050 if (!bnxt_alloc_rx_data(rxq, rxr, rxq->rxrearm_start)) {
1051 rxr->rx_raw_prod = rxq->rxrearm_start;
1052 bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod);
1053 rxq->rxrearm_start++;
1056 /* Retry allocation on next call. */
1062 /* Handle RX burst request */
1064 cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
1065 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
1067 if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons,
1068 cpr->cp_ring_struct->ring_size))
1070 if (CMP_TYPE(rxcmp) == CMPL_BASE_TYPE_HWRM_DONE) {
1071 PMD_DRV_LOG(ERR, "Rx flush done\n");
1072 } else if ((CMP_TYPE(rxcmp) >= CMPL_BASE_TYPE_RX_TPA_START_V2) &&
1073 (CMP_TYPE(rxcmp) <= RX_TPA_V2_ABUF_CMPL_TYPE_RX_TPA_AGG)) {
1074 rc = bnxt_rx_pkt(&rx_pkts[nb_rx_pkts], rxq, &raw_cons);
1077 else if (rc == -EBUSY) /* partial completion */
1079 else if (rc == -ENODEV) /* completion for representor */
1081 else if (rc == -ENOMEM) {
1083 alloc_failed = true;
1085 } else if (!BNXT_NUM_ASYNC_CPR(rxq->bp)) {
1087 bnxt_event_hwrm_resp_handler(rxq->bp,
1088 (struct cmpl_base *)rxcmp);
1089 /* If the async event is Fatal error, return */
1090 if (unlikely(is_bnxt_in_error(rxq->bp)))
1094 raw_cons = NEXT_RAW_CMP(raw_cons);
1095 if (nb_rx_pkts == nb_pkts || nb_rep_rx_pkts == nb_pkts || evt)
1099 if (!nb_rx_pkts && !nb_rep_rx_pkts && !evt) {
1101 * For PMD, there is no need to keep on pushing to REARM
1102 * the doorbell if there are no new completions
1107 cpr->cp_raw_cons = raw_cons;
1108 /* Ring the completion queue doorbell. */
1111 /* Ring the receive descriptor doorbell. */
1112 if (rx_raw_prod != rxr->rx_raw_prod)
1113 bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod);
1115 /* Ring the AGG ring DB */
1116 if (ag_raw_prod != rxr->ag_raw_prod)
1117 bnxt_db_write(&rxr->ag_db, rxr->ag_raw_prod);
1119 /* Attempt to alloc Rx buf in case of a previous allocation failure. */
1123 rx_raw_prod = RING_NEXT(rx_raw_prod);
1124 for (cnt = 0; cnt < nb_rx_pkts + nb_rep_rx_pkts; cnt++) {
1125 struct rte_mbuf **rx_buf;
1128 ndx = RING_IDX(rxr->rx_ring_struct, rx_raw_prod + cnt);
1129 rx_buf = &rxr->rx_buf_ring[ndx];
1131 /* Buffer already allocated for this index. */
1132 if (*rx_buf != NULL && *rx_buf != &rxq->fake_mbuf)
1135 /* This slot is empty. Alloc buffer for Rx */
1136 if (!bnxt_alloc_rx_data(rxq, rxr, rx_raw_prod + cnt)) {
1137 rxr->rx_raw_prod = rx_raw_prod + cnt;
1138 bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod);
1140 PMD_DRV_LOG(ERR, "Alloc mbuf failed\n");
1150 void bnxt_free_rx_rings(struct bnxt *bp)
1153 struct bnxt_rx_queue *rxq;
1158 for (i = 0; i < (int)bp->rx_nr_rings; i++) {
1159 rxq = bp->rx_queues[i];
1163 bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
1164 rte_free(rxq->rx_ring->rx_ring_struct);
1166 /* Free the Aggregator ring */
1167 bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
1168 rte_free(rxq->rx_ring->ag_ring_struct);
1169 rxq->rx_ring->ag_ring_struct = NULL;
1171 rte_free(rxq->rx_ring);
1173 bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
1174 rte_free(rxq->cp_ring->cp_ring_struct);
1175 rte_free(rxq->cp_ring);
1177 rte_memzone_free(rxq->mz);
1181 bp->rx_queues[i] = NULL;
1185 int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
1187 struct bnxt_cp_ring_info *cpr;
1188 struct bnxt_rx_ring_info *rxr;
1189 struct bnxt_ring *ring;
1191 rxq->rx_buf_size = BNXT_MAX_PKT_LEN + sizeof(struct rte_mbuf);
1193 if (rxq->rx_ring != NULL) {
1197 rxr = rte_zmalloc_socket("bnxt_rx_ring",
1198 sizeof(struct bnxt_rx_ring_info),
1199 RTE_CACHE_LINE_SIZE, socket_id);
1205 if (rxr->rx_ring_struct == NULL) {
1206 ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
1207 sizeof(struct bnxt_ring),
1208 RTE_CACHE_LINE_SIZE, socket_id);
1211 rxr->rx_ring_struct = ring;
1212 ring->ring_size = rte_align32pow2(rxq->nb_rx_desc);
1213 ring->ring_mask = ring->ring_size - 1;
1214 ring->bd = (void *)rxr->rx_desc_ring;
1215 ring->bd_dma = rxr->rx_desc_mapping;
1217 /* Allocate extra rx ring entries for vector rx. */
1218 ring->vmem_size = sizeof(struct rte_mbuf *) *
1219 (ring->ring_size + BNXT_RX_EXTRA_MBUF_ENTRIES);
1221 ring->vmem = (void **)&rxr->rx_buf_ring;
1222 ring->fw_ring_id = INVALID_HW_RING_ID;
1225 if (rxq->cp_ring != NULL) {
1228 cpr = rte_zmalloc_socket("bnxt_rx_ring",
1229 sizeof(struct bnxt_cp_ring_info),
1230 RTE_CACHE_LINE_SIZE, socket_id);
1236 if (cpr->cp_ring_struct == NULL) {
1237 ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
1238 sizeof(struct bnxt_ring),
1239 RTE_CACHE_LINE_SIZE, socket_id);
1242 cpr->cp_ring_struct = ring;
1244 /* Allocate two completion slots per entry in desc ring. */
1245 ring->ring_size = rxr->rx_ring_struct->ring_size * 2;
1246 if (bnxt_need_agg_ring(rxq->bp->eth_dev))
1247 ring->ring_size *= AGG_RING_SIZE_FACTOR;
1249 ring->ring_size = rte_align32pow2(ring->ring_size);
1250 ring->ring_mask = ring->ring_size - 1;
1251 ring->bd = (void *)cpr->cp_desc_ring;
1252 ring->bd_dma = cpr->cp_desc_mapping;
1253 ring->vmem_size = 0;
1255 ring->fw_ring_id = INVALID_HW_RING_ID;
1258 if (!bnxt_need_agg_ring(rxq->bp->eth_dev))
1262 /* Allocate Aggregator rings */
1263 ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
1264 sizeof(struct bnxt_ring),
1265 RTE_CACHE_LINE_SIZE, socket_id);
1268 rxr->ag_ring_struct = ring;
1269 ring->ring_size = rte_align32pow2(rxq->nb_rx_desc *
1270 AGG_RING_SIZE_FACTOR);
1271 ring->ring_mask = ring->ring_size - 1;
1272 ring->bd = (void *)rxr->ag_desc_ring;
1273 ring->bd_dma = rxr->ag_desc_mapping;
1274 ring->vmem_size = ring->ring_size * sizeof(struct rte_mbuf *);
1275 ring->vmem = (void **)&rxr->ag_buf_ring;
1276 ring->fw_ring_id = INVALID_HW_RING_ID;
1281 static void bnxt_init_rxbds(struct bnxt_ring *ring, uint32_t type,
1285 struct rx_prod_pkt_bd *rx_bd_ring = (struct rx_prod_pkt_bd *)ring->bd;
1289 for (j = 0; j < ring->ring_size; j++) {
1290 rx_bd_ring[j].flags_type = rte_cpu_to_le_16(type);
1291 rx_bd_ring[j].len = rte_cpu_to_le_16(len);
1292 rx_bd_ring[j].opaque = j;
1296 int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
1298 struct bnxt_rx_ring_info *rxr;
1299 struct bnxt_ring *ring;
1300 uint32_t raw_prod, type;
1304 /* Initialize packet type table. */
1305 bnxt_init_ptype_table();
1307 size = rte_pktmbuf_data_room_size(rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
1308 size = RTE_MIN(BNXT_MAX_PKT_LEN, size);
1310 type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT;
1313 ring = rxr->rx_ring_struct;
1314 bnxt_init_rxbds(ring, type, size);
1316 /* Initialize offload flags parsing table. */
1317 bnxt_init_ol_flags_tables(rxq);
1319 raw_prod = rxr->rx_raw_prod;
1320 for (i = 0; i < ring->ring_size; i++) {
1321 if (unlikely(!rxr->rx_buf_ring[i])) {
1322 if (bnxt_alloc_rx_data(rxq, rxr, raw_prod) != 0) {
1323 PMD_DRV_LOG(WARNING,
1324 "RxQ %d allocated %d of %d mbufs\n",
1325 rxq->queue_id, i, ring->ring_size);
1329 rxr->rx_raw_prod = raw_prod;
1330 raw_prod = RING_NEXT(raw_prod);
1333 /* Initialize dummy mbuf pointers for vector mode rx. */
1334 for (i = ring->ring_size;
1335 i < ring->ring_size + BNXT_RX_EXTRA_MBUF_ENTRIES; i++) {
1336 rxr->rx_buf_ring[i] = &rxq->fake_mbuf;
1339 /* Explicitly reset this driver internal tracker on a ring init */
1340 rxr->rx_next_cons = 0;
1342 if (!bnxt_need_agg_ring(rxq->bp->eth_dev))
1345 ring = rxr->ag_ring_struct;
1346 type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG;
1347 bnxt_init_rxbds(ring, type, size);
1348 raw_prod = rxr->ag_raw_prod;
1350 for (i = 0; i < ring->ring_size; i++) {
1351 if (unlikely(!rxr->ag_buf_ring[i])) {
1352 if (bnxt_alloc_ag_data(rxq, rxr, raw_prod) != 0) {
1353 PMD_DRV_LOG(WARNING,
1354 "RxQ %d allocated %d of %d mbufs\n",
1355 rxq->queue_id, i, ring->ring_size);
1359 rxr->ag_raw_prod = raw_prod;
1360 raw_prod = RING_NEXT(raw_prod);
1362 PMD_DRV_LOG(DEBUG, "AGG Done!\n");
1364 if (rxr->tpa_info) {
1365 unsigned int max_aggs = BNXT_TPA_MAX_AGGS(rxq->bp);
1367 for (i = 0; i < max_aggs; i++) {
1368 if (unlikely(!rxr->tpa_info[i].mbuf)) {
1369 rxr->tpa_info[i].mbuf =
1370 __bnxt_alloc_rx_data(rxq->mb_pool);
1371 if (!rxr->tpa_info[i].mbuf) {
1372 __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1,
1379 PMD_DRV_LOG(DEBUG, "TPA alloc Done!\n");
1384 /* Sweep the Rx completion queue till HWRM_DONE for ring flush is received.
1385 * The mbufs will not be freed in this call.
1386 * They will be freed during ring free as a part of mem cleanup.
1388 int bnxt_flush_rx_cmp(struct bnxt_cp_ring_info *cpr)
1390 struct bnxt_ring *cp_ring_struct = cpr->cp_ring_struct;
1391 uint32_t ring_mask = cp_ring_struct->ring_mask;
1392 uint32_t raw_cons = cpr->cp_raw_cons;
1393 struct rx_pkt_cmpl *rxcmp;
1398 cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
1399 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
1401 if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons, ring_mask + 1))
1404 if (CMP_TYPE(rxcmp) == CMPL_BASE_TYPE_HWRM_DONE)
1407 raw_cons = NEXT_RAW_CMP(raw_cons);
1409 } while (nb_rx < ring_mask);
1411 cpr->cp_raw_cons = raw_cons;
1413 /* Ring the completion queue doorbell. */