4 * Copyright(c) Broadcom Limited.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <rte_bitmap.h>
38 #include <rte_byteorder.h>
39 #include <rte_malloc.h>
40 #include <rte_memory.h>
44 #include "bnxt_ring.h"
47 #include "hsi_struct_def_dpdk.h"
53 static inline struct rte_mbuf *__bnxt_alloc_rx_data(struct rte_mempool *mb)
55 struct rte_mbuf *data;
57 data = rte_mbuf_raw_alloc(mb);
62 static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
63 struct bnxt_rx_ring_info *rxr,
66 struct rx_prod_pkt_bd *rxbd = &rxr->rx_desc_ring[prod];
67 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
68 struct rte_mbuf *data;
70 data = __bnxt_alloc_rx_data(rxq->mb_pool);
72 rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail);
78 rxbd->addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(rx_buf->mbuf));
83 static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
84 struct bnxt_rx_ring_info *rxr,
87 struct rx_prod_pkt_bd *rxbd = &rxr->ag_desc_ring[prod];
88 struct bnxt_sw_rx_bd *rx_buf = &rxr->ag_buf_ring[prod];
89 struct rte_mbuf *data;
91 data = __bnxt_alloc_rx_data(rxq->mb_pool);
93 rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail);
98 RTE_LOG(ERR, PMD, "Jumbo Frame. rxbd is NULL\n");
100 RTE_LOG(ERR, PMD, "Jumbo Frame. rx_buf is NULL\n");
105 rxbd->addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(rx_buf->mbuf));
110 static inline void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr,
111 struct rte_mbuf *mbuf)
113 uint16_t prod = RING_NEXT(rxr->rx_ring_struct, rxr->rx_prod);
114 struct bnxt_sw_rx_bd *prod_rx_buf;
115 struct rx_prod_pkt_bd *prod_bd;
117 prod_rx_buf = &rxr->rx_buf_ring[prod];
119 RTE_ASSERT(prod_rx_buf->mbuf == NULL);
120 RTE_ASSERT(mbuf != NULL);
122 prod_rx_buf->mbuf = mbuf;
124 prod_bd = &rxr->rx_desc_ring[prod];
126 prod_bd->addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(mbuf));
132 static void bnxt_reuse_ag_mbuf(struct bnxt_rx_ring_info *rxr, uint16_t cons,
133 struct rte_mbuf *mbuf)
135 uint16_t prod = rxr->ag_prod;
136 struct bnxt_sw_rx_bd *prod_rx_buf;
137 struct rx_prod_pkt_bd *prod_bd, *cons_bd;
139 prod_rx_buf = &rxr->ag_buf_ring[prod];
141 prod_rx_buf->mbuf = mbuf;
143 prod_bd = &rxr->ag_desc_ring[prod];
144 cons_bd = &rxr->ag_desc_ring[cons];
146 prod_bd->addr = cons_bd->addr;
151 struct rte_mbuf *bnxt_consume_rx_buf(struct bnxt_rx_ring_info *rxr,
154 struct bnxt_sw_rx_bd *cons_rx_buf;
155 struct rte_mbuf *mbuf;
157 cons_rx_buf = &rxr->rx_buf_ring[cons];
158 RTE_ASSERT(cons_rx_buf->mbuf != NULL);
159 mbuf = cons_rx_buf->mbuf;
160 cons_rx_buf->mbuf = NULL;
164 static void bnxt_tpa_start(struct bnxt_rx_queue *rxq,
165 struct rx_tpa_start_cmpl *tpa_start,
166 struct rx_tpa_start_cmpl_hi *tpa_start1)
168 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
169 uint8_t agg_id = rte_le_to_cpu_32(tpa_start->agg_id &
170 RX_TPA_START_CMPL_AGG_ID_MASK) >> RX_TPA_START_CMPL_AGG_ID_SFT;
172 struct bnxt_tpa_info *tpa_info;
173 struct rte_mbuf *mbuf;
175 data_cons = tpa_start->opaque;
176 tpa_info = &rxr->tpa_info[agg_id];
178 mbuf = bnxt_consume_rx_buf(rxr, data_cons);
180 bnxt_reuse_rx_mbuf(rxr, tpa_info->mbuf);
182 tpa_info->mbuf = mbuf;
183 tpa_info->len = rte_le_to_cpu_32(tpa_start->len);
187 mbuf->pkt_len = rte_le_to_cpu_32(tpa_start->len);
188 mbuf->data_len = mbuf->pkt_len;
189 mbuf->port = rxq->port_id;
190 mbuf->ol_flags = PKT_RX_LRO;
191 if (likely(tpa_start->flags_type &
192 rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS_RSS_VALID))) {
193 mbuf->hash.rss = rte_le_to_cpu_32(tpa_start->rss_hash);
194 mbuf->ol_flags |= PKT_RX_RSS_HASH;
196 mbuf->hash.fdir.id = rte_le_to_cpu_16(tpa_start1->cfa_code);
197 mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
199 if (tpa_start1->flags2 &
200 rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN)) {
201 mbuf->vlan_tci = rte_le_to_cpu_32(tpa_start1->metadata);
202 mbuf->ol_flags |= PKT_RX_VLAN_PKT;
204 if (likely(tpa_start1->flags2 &
205 rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC)))
206 mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
208 /* recycle next mbuf */
209 data_cons = RING_NEXT(rxr->rx_ring_struct, data_cons);
210 bnxt_reuse_rx_mbuf(rxr, bnxt_consume_rx_buf(rxr, data_cons));
213 static int bnxt_agg_bufs_valid(struct bnxt_cp_ring_info *cpr,
214 uint8_t agg_bufs, uint32_t raw_cp_cons)
216 uint16_t last_cp_cons;
217 struct rx_pkt_cmpl *agg_cmpl;
219 raw_cp_cons = ADV_RAW_CMP(raw_cp_cons, agg_bufs);
220 last_cp_cons = RING_CMP(cpr->cp_ring_struct, raw_cp_cons);
221 agg_cmpl = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[last_cp_cons];
222 return CMP_VALID(agg_cmpl, raw_cp_cons, cpr->cp_ring_struct);
225 /* TPA consume agg buffer out of order, allocate connected data only */
226 static int bnxt_prod_ag_mbuf(struct bnxt_rx_queue *rxq)
228 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
229 uint16_t next = RING_NEXT(rxr->ag_ring_struct, rxr->ag_prod);
231 /* TODO batch allocation for better performance */
232 while (rte_bitmap_get(rxr->ag_bitmap, next)) {
233 if (unlikely(bnxt_alloc_ag_data(rxq, rxr, next))) {
235 "agg mbuf alloc failed: prod=0x%x\n", next);
238 rte_bitmap_clear(rxr->ag_bitmap, next);
240 next = RING_NEXT(rxr->ag_ring_struct, next);
246 static int bnxt_rx_pages(struct bnxt_rx_queue *rxq,
247 struct rte_mbuf *mbuf, uint32_t *tmp_raw_cons,
250 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
251 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
253 uint16_t cp_cons, ag_cons;
254 struct rx_pkt_cmpl *rxcmp;
255 struct rte_mbuf *last = mbuf;
257 for (i = 0; i < agg_buf; i++) {
258 struct bnxt_sw_rx_bd *ag_buf;
259 struct rte_mbuf *ag_mbuf;
260 *tmp_raw_cons = NEXT_RAW_CMP(*tmp_raw_cons);
261 cp_cons = RING_CMP(cpr->cp_ring_struct, *tmp_raw_cons);
262 rxcmp = (struct rx_pkt_cmpl *)
263 &cpr->cp_desc_ring[cp_cons];
266 bnxt_dump_cmpl(cp_cons, rxcmp);
269 ag_cons = rxcmp->opaque;
270 RTE_ASSERT(ag_cons <= rxr->ag_ring_struct->ring_mask);
271 ag_buf = &rxr->ag_buf_ring[ag_cons];
272 ag_mbuf = ag_buf->mbuf;
273 RTE_ASSERT(ag_mbuf != NULL);
275 ag_mbuf->data_len = rte_le_to_cpu_16(rxcmp->len);
278 mbuf->pkt_len += ag_mbuf->data_len;
280 last->next = ag_mbuf;
286 * As aggregation buffer consumed out of order in TPA module,
287 * use bitmap to track freed slots to be allocated and notified
290 rte_bitmap_set(rxr->ag_bitmap, ag_cons);
292 bnxt_prod_ag_mbuf(rxq);
296 static inline struct rte_mbuf *bnxt_tpa_end(
297 struct bnxt_rx_queue *rxq,
298 uint32_t *raw_cp_cons,
299 struct rx_tpa_end_cmpl *tpa_end,
300 struct rx_tpa_end_cmpl_hi *tpa_end1 __rte_unused)
302 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
303 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
304 uint8_t agg_id = (tpa_end->agg_id & RX_TPA_END_CMPL_AGG_ID_MASK)
305 >> RX_TPA_END_CMPL_AGG_ID_SFT;
306 struct rte_mbuf *mbuf;
308 struct bnxt_tpa_info *tpa_info;
310 tpa_info = &rxr->tpa_info[agg_id];
311 mbuf = tpa_info->mbuf;
312 RTE_ASSERT(mbuf != NULL);
315 agg_bufs = (rte_le_to_cpu_32(tpa_end->agg_bufs_v1) &
316 RX_TPA_END_CMPL_AGG_BUFS_MASK) >> RX_TPA_END_CMPL_AGG_BUFS_SFT;
318 if (!bnxt_agg_bufs_valid(cpr, agg_bufs, *raw_cp_cons))
320 bnxt_rx_pages(rxq, mbuf, raw_cp_cons, agg_bufs);
322 mbuf->l4_len = tpa_end->payload_offset;
324 struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool);
325 RTE_ASSERT(new_data != NULL);
327 rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail);
330 tpa_info->mbuf = new_data;
335 static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
336 struct bnxt_rx_queue *rxq, uint32_t *raw_cons)
338 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
339 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
340 struct rx_pkt_cmpl *rxcmp;
341 struct rx_pkt_cmpl_hi *rxcmp1;
342 uint32_t tmp_raw_cons = *raw_cons;
343 uint16_t cons, prod, cp_cons =
344 RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
348 struct rte_mbuf *mbuf;
353 rxcmp = (struct rx_pkt_cmpl *)
354 &cpr->cp_desc_ring[cp_cons];
356 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
357 cp_cons = RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
358 rxcmp1 = (struct rx_pkt_cmpl_hi *)&cpr->cp_desc_ring[cp_cons];
360 if (!CMP_VALID(rxcmp1, tmp_raw_cons, cpr->cp_ring_struct))
363 cmp_type = CMP_TYPE(rxcmp);
364 if (cmp_type == RX_PKT_CMPL_TYPE_RX_L2_TPA_START) {
365 bnxt_tpa_start(rxq, (struct rx_tpa_start_cmpl *)rxcmp,
366 (struct rx_tpa_start_cmpl_hi *)rxcmp1);
367 rc = -EINVAL; /* Continue w/o new mbuf */
369 } else if (cmp_type == RX_PKT_CMPL_TYPE_RX_L2_TPA_END) {
370 mbuf = bnxt_tpa_end(rxq, &tmp_raw_cons,
371 (struct rx_tpa_end_cmpl *)rxcmp,
372 (struct rx_tpa_end_cmpl_hi *)rxcmp1);
377 } else if (cmp_type != 0x11) {
382 agg_buf = (rxcmp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK)
383 >> RX_PKT_CMPL_AGG_BUFS_SFT;
384 if (agg_buf && !bnxt_agg_bufs_valid(cpr, agg_buf, tmp_raw_cons))
389 cons = rxcmp->opaque;
390 mbuf = bnxt_consume_rx_buf(rxr, cons);
398 mbuf->pkt_len = rxcmp->len;
399 mbuf->data_len = mbuf->pkt_len;
400 mbuf->port = rxq->port_id;
402 if (rxcmp->flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) {
403 mbuf->hash.rss = rxcmp->rss_hash;
404 mbuf->ol_flags |= PKT_RX_RSS_HASH;
406 mbuf->hash.fdir.id = rxcmp1->cfa_code;
407 mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
411 bnxt_rx_pages(rxq, mbuf, &tmp_raw_cons, agg_buf);
413 if (rxcmp1->flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) {
414 mbuf->vlan_tci = rxcmp1->metadata &
415 (RX_PKT_CMPL_METADATA_VID_MASK |
416 RX_PKT_CMPL_METADATA_DE |
417 RX_PKT_CMPL_METADATA_PRI_MASK);
418 mbuf->ol_flags |= PKT_RX_VLAN_PKT;
422 if (rxcmp1->errors_v2 & RX_CMP_L2_ERRORS) {
423 /* Re-install the mbuf back to the rx ring */
424 bnxt_reuse_rx_mbuf(rxr, cons, mbuf);
426 bnxt_reuse_ag_mbuf(rxr, ag_cons, mbuf);
433 * TODO: Redesign this....
434 * If the allocation fails, the packet does not get received.
435 * Simply returning this will result in slowly falling behind
436 * on the producer ring buffers.
437 * Instead, "filling up" the producer just before ringing the
438 * doorbell could be a better solution since it will let the
439 * producer ring starve until memory is available again pushing
440 * the drops into hardware and getting them out of the driver
441 * allowing recovery to a full producer ring.
443 * This could also help with cache usage by preventing per-packet
444 * calls in favour of a tight loop with the same function being called
447 prod = RING_NEXT(rxr->rx_ring_struct, prod);
448 if (bnxt_alloc_rx_data(rxq, rxr, prod)) {
449 RTE_LOG(ERR, PMD, "mbuf alloc failed with prod=0x%x\n", prod);
454 * All MBUFs are allocated with the same size under DPDK,
455 * no optimization for rx_copy_thresh
462 *raw_cons = tmp_raw_cons;
467 uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
470 struct bnxt_rx_queue *rxq = rx_queue;
471 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
472 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
473 uint32_t raw_cons = cpr->cp_raw_cons;
476 struct rx_pkt_cmpl *rxcmp;
477 uint16_t prod = rxr->rx_prod;
478 uint16_t ag_prod = rxr->ag_prod;
480 /* Handle RX burst request */
484 cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
485 rte_prefetch0(&cpr->cp_desc_ring[cons]);
486 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
488 if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
491 /* TODO: Avoid magic numbers... */
492 if ((CMP_TYPE(rxcmp) & 0x30) == 0x10) {
493 rc = bnxt_rx_pkt(&rx_pkts[nb_rx_pkts], rxq, &raw_cons);
496 if (rc == -EBUSY) /* partial completion */
499 raw_cons = NEXT_RAW_CMP(raw_cons);
500 if (nb_rx_pkts == nb_pkts)
504 cpr->cp_raw_cons = raw_cons;
505 if (prod == rxr->rx_prod && ag_prod == rxr->ag_prod) {
507 * For PMD, there is no need to keep on pushing to REARM
508 * the doorbell if there are no new completions
513 B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
514 B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
515 /* Ring the AGG ring DB */
516 B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
520 void bnxt_free_rx_rings(struct bnxt *bp)
524 for (i = 0; i < (int)bp->rx_nr_rings; i++) {
525 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
530 bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
531 rte_free(rxq->rx_ring->rx_ring_struct);
533 /* Free the Aggregator ring */
534 bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
535 rte_free(rxq->rx_ring->ag_ring_struct);
536 rxq->rx_ring->ag_ring_struct = NULL;
538 rte_free(rxq->rx_ring);
540 bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
541 rte_free(rxq->cp_ring->cp_ring_struct);
542 rte_free(rxq->cp_ring);
545 bp->rx_queues[i] = NULL;
549 int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
551 struct bnxt_cp_ring_info *cpr;
552 struct bnxt_rx_ring_info *rxr;
553 struct bnxt_ring *ring;
555 rxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN +
557 rxq->rx_buf_size = rxq->rx_buf_use_size + sizeof(struct rte_mbuf);
559 rxr = rte_zmalloc_socket("bnxt_rx_ring",
560 sizeof(struct bnxt_rx_ring_info),
561 RTE_CACHE_LINE_SIZE, socket_id);
566 ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
567 sizeof(struct bnxt_ring),
568 RTE_CACHE_LINE_SIZE, socket_id);
571 rxr->rx_ring_struct = ring;
572 ring->ring_size = rte_align32pow2(rxq->nb_rx_desc);
573 ring->ring_mask = ring->ring_size - 1;
574 ring->bd = (void *)rxr->rx_desc_ring;
575 ring->bd_dma = rxr->rx_desc_mapping;
576 ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_rx_bd);
577 ring->vmem = (void **)&rxr->rx_buf_ring;
579 cpr = rte_zmalloc_socket("bnxt_rx_ring",
580 sizeof(struct bnxt_cp_ring_info),
581 RTE_CACHE_LINE_SIZE, socket_id);
586 ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
587 sizeof(struct bnxt_ring),
588 RTE_CACHE_LINE_SIZE, socket_id);
591 cpr->cp_ring_struct = ring;
592 ring->ring_size = rte_align32pow2(rxr->rx_ring_struct->ring_size *
593 (2 + AGG_RING_SIZE_FACTOR));
594 ring->ring_mask = ring->ring_size - 1;
595 ring->bd = (void *)cpr->cp_desc_ring;
596 ring->bd_dma = cpr->cp_desc_mapping;
600 /* Allocate Aggregator rings */
601 ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
602 sizeof(struct bnxt_ring),
603 RTE_CACHE_LINE_SIZE, socket_id);
606 rxr->ag_ring_struct = ring;
607 ring->ring_size = rte_align32pow2(rxq->nb_rx_desc *
608 AGG_RING_SIZE_FACTOR);
609 ring->ring_mask = ring->ring_size - 1;
610 ring->bd = (void *)rxr->ag_desc_ring;
611 ring->bd_dma = rxr->ag_desc_mapping;
612 ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_rx_bd);
613 ring->vmem = (void **)&rxr->ag_buf_ring;
618 static void bnxt_init_rxbds(struct bnxt_ring *ring, uint32_t type,
622 struct rx_prod_pkt_bd *rx_bd_ring = (struct rx_prod_pkt_bd *)ring->bd;
626 for (j = 0; j < ring->ring_size; j++) {
627 rx_bd_ring[j].flags_type = rte_cpu_to_le_16(type);
628 rx_bd_ring[j].len = rte_cpu_to_le_16(len);
629 rx_bd_ring[j].opaque = j;
633 int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
635 struct bnxt_rx_ring_info *rxr;
636 struct bnxt_ring *ring;
641 size = rte_pktmbuf_data_room_size(rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
642 if (rxq->rx_buf_use_size <= size)
643 size = rxq->rx_buf_use_size;
645 type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT;
648 ring = rxr->rx_ring_struct;
649 bnxt_init_rxbds(ring, type, size);
652 for (i = 0; i < ring->ring_size; i++) {
653 if (bnxt_alloc_rx_data(rxq, rxr, prod) != 0) {
654 RTE_LOG(WARNING, PMD,
655 "init'ed rx ring %d with %d/%d mbufs only\n",
656 rxq->queue_id, i, ring->ring_size);
660 prod = RING_NEXT(rxr->rx_ring_struct, prod);
662 RTE_LOG(DEBUG, PMD, "%s\n", __func__);
664 ring = rxr->ag_ring_struct;
665 type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG;
666 bnxt_init_rxbds(ring, type, size);
669 for (i = 0; i < ring->ring_size; i++) {
670 if (bnxt_alloc_ag_data(rxq, rxr, prod) != 0) {
671 RTE_LOG(WARNING, PMD,
672 "init'ed AG ring %d with %d/%d mbufs only\n",
673 rxq->queue_id, i, ring->ring_size);
677 prod = RING_NEXT(rxr->ag_ring_struct, prod);
679 RTE_LOG(DEBUG, PMD, "%s AGG Done!\n", __func__);
682 for (i = 0; i < BNXT_TPA_MAX; i++) {
683 rxr->tpa_info[i].mbuf =
684 __bnxt_alloc_rx_data(rxq->mb_pool);
685 if (!rxr->tpa_info[i].mbuf) {
686 rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail);
691 RTE_LOG(DEBUG, PMD, "%s TPA alloc Done!\n", __func__);