1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
9 #include <rte_bitmap.h>
10 #include <rte_byteorder.h>
11 #include <rte_malloc.h>
12 #include <rte_memory.h>
16 #include "bnxt_ring.h"
19 #include "hsi_struct_def_dpdk.h"
25 static inline struct rte_mbuf *__bnxt_alloc_rx_data(struct rte_mempool *mb)
27 struct rte_mbuf *data;
29 data = rte_mbuf_raw_alloc(mb);
34 static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
35 struct bnxt_rx_ring_info *rxr,
38 struct rx_prod_pkt_bd *rxbd = &rxr->rx_desc_ring[prod];
39 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
40 struct rte_mbuf *mbuf;
42 mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
44 rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
49 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
51 rxbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
56 static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
57 struct bnxt_rx_ring_info *rxr,
60 struct rx_prod_pkt_bd *rxbd = &rxr->ag_desc_ring[prod];
61 struct bnxt_sw_rx_bd *rx_buf = &rxr->ag_buf_ring[prod];
62 struct rte_mbuf *mbuf;
64 mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
66 rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
71 PMD_DRV_LOG(ERR, "Jumbo Frame. rxbd is NULL\n");
73 PMD_DRV_LOG(ERR, "Jumbo Frame. rx_buf is NULL\n");
77 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
79 rxbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
84 static inline void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr,
85 struct rte_mbuf *mbuf)
87 uint16_t prod = RING_NEXT(rxr->rx_ring_struct, rxr->rx_prod);
88 struct bnxt_sw_rx_bd *prod_rx_buf;
89 struct rx_prod_pkt_bd *prod_bd;
91 prod_rx_buf = &rxr->rx_buf_ring[prod];
93 RTE_ASSERT(prod_rx_buf->mbuf == NULL);
94 RTE_ASSERT(mbuf != NULL);
96 prod_rx_buf->mbuf = mbuf;
98 prod_bd = &rxr->rx_desc_ring[prod];
100 prod_bd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
106 static void bnxt_reuse_ag_mbuf(struct bnxt_rx_ring_info *rxr, uint16_t cons,
107 struct rte_mbuf *mbuf)
109 uint16_t prod = rxr->ag_prod;
110 struct bnxt_sw_rx_bd *prod_rx_buf;
111 struct rx_prod_pkt_bd *prod_bd, *cons_bd;
113 prod_rx_buf = &rxr->ag_buf_ring[prod];
115 prod_rx_buf->mbuf = mbuf;
117 prod_bd = &rxr->ag_desc_ring[prod];
118 cons_bd = &rxr->ag_desc_ring[cons];
120 prod_bd->address = cons_bd->addr;
125 struct rte_mbuf *bnxt_consume_rx_buf(struct bnxt_rx_ring_info *rxr,
128 struct bnxt_sw_rx_bd *cons_rx_buf;
129 struct rte_mbuf *mbuf;
131 cons_rx_buf = &rxr->rx_buf_ring[cons];
132 RTE_ASSERT(cons_rx_buf->mbuf != NULL);
133 mbuf = cons_rx_buf->mbuf;
134 cons_rx_buf->mbuf = NULL;
138 static void bnxt_tpa_start(struct bnxt_rx_queue *rxq,
139 struct rx_tpa_start_cmpl *tpa_start,
140 struct rx_tpa_start_cmpl_hi *tpa_start1)
142 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
143 uint8_t agg_id = rte_le_to_cpu_32(tpa_start->agg_id &
144 RX_TPA_START_CMPL_AGG_ID_MASK) >> RX_TPA_START_CMPL_AGG_ID_SFT;
146 struct bnxt_tpa_info *tpa_info;
147 struct rte_mbuf *mbuf;
149 data_cons = tpa_start->opaque;
150 tpa_info = &rxr->tpa_info[agg_id];
152 mbuf = bnxt_consume_rx_buf(rxr, data_cons);
154 bnxt_reuse_rx_mbuf(rxr, tpa_info->mbuf);
156 tpa_info->mbuf = mbuf;
157 tpa_info->len = rte_le_to_cpu_32(tpa_start->len);
161 mbuf->pkt_len = rte_le_to_cpu_32(tpa_start->len);
162 mbuf->data_len = mbuf->pkt_len;
163 mbuf->port = rxq->port_id;
164 mbuf->ol_flags = PKT_RX_LRO;
165 if (likely(tpa_start->flags_type &
166 rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS_RSS_VALID))) {
167 mbuf->hash.rss = rte_le_to_cpu_32(tpa_start->rss_hash);
168 mbuf->ol_flags |= PKT_RX_RSS_HASH;
170 mbuf->hash.fdir.id = rte_le_to_cpu_16(tpa_start1->cfa_code);
171 mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
173 if (tpa_start1->flags2 &
174 rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN)) {
175 mbuf->vlan_tci = rte_le_to_cpu_32(tpa_start1->metadata);
176 mbuf->ol_flags |= PKT_RX_VLAN;
178 if (likely(tpa_start1->flags2 &
179 rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC)))
180 mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
182 /* recycle next mbuf */
183 data_cons = RING_NEXT(rxr->rx_ring_struct, data_cons);
184 bnxt_reuse_rx_mbuf(rxr, bnxt_consume_rx_buf(rxr, data_cons));
187 static int bnxt_agg_bufs_valid(struct bnxt_cp_ring_info *cpr,
188 uint8_t agg_bufs, uint32_t raw_cp_cons)
190 uint16_t last_cp_cons;
191 struct rx_pkt_cmpl *agg_cmpl;
193 raw_cp_cons = ADV_RAW_CMP(raw_cp_cons, agg_bufs);
194 last_cp_cons = RING_CMP(cpr->cp_ring_struct, raw_cp_cons);
195 agg_cmpl = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[last_cp_cons];
196 cpr->valid = FLIP_VALID(raw_cp_cons,
197 cpr->cp_ring_struct->ring_mask,
199 return CMP_VALID(agg_cmpl, raw_cp_cons, cpr->cp_ring_struct);
202 /* TPA consume agg buffer out of order, allocate connected data only */
203 static int bnxt_prod_ag_mbuf(struct bnxt_rx_queue *rxq)
205 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
206 uint16_t next = RING_NEXT(rxr->ag_ring_struct, rxr->ag_prod);
208 /* TODO batch allocation for better performance */
209 while (rte_bitmap_get(rxr->ag_bitmap, next)) {
210 if (unlikely(bnxt_alloc_ag_data(rxq, rxr, next))) {
212 "agg mbuf alloc failed: prod=0x%x\n", next);
215 rte_bitmap_clear(rxr->ag_bitmap, next);
217 next = RING_NEXT(rxr->ag_ring_struct, next);
223 static int bnxt_rx_pages(struct bnxt_rx_queue *rxq,
224 struct rte_mbuf *mbuf, uint32_t *tmp_raw_cons,
227 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
228 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
230 uint16_t cp_cons, ag_cons;
231 struct rx_pkt_cmpl *rxcmp;
232 struct rte_mbuf *last = mbuf;
234 for (i = 0; i < agg_buf; i++) {
235 struct bnxt_sw_rx_bd *ag_buf;
236 struct rte_mbuf *ag_mbuf;
237 *tmp_raw_cons = NEXT_RAW_CMP(*tmp_raw_cons);
238 cp_cons = RING_CMP(cpr->cp_ring_struct, *tmp_raw_cons);
239 rxcmp = (struct rx_pkt_cmpl *)
240 &cpr->cp_desc_ring[cp_cons];
243 bnxt_dump_cmpl(cp_cons, rxcmp);
246 ag_cons = rxcmp->opaque;
247 RTE_ASSERT(ag_cons <= rxr->ag_ring_struct->ring_mask);
248 ag_buf = &rxr->ag_buf_ring[ag_cons];
249 ag_mbuf = ag_buf->mbuf;
250 RTE_ASSERT(ag_mbuf != NULL);
252 ag_mbuf->data_len = rte_le_to_cpu_16(rxcmp->len);
255 mbuf->pkt_len += ag_mbuf->data_len;
257 last->next = ag_mbuf;
263 * As aggregation buffer consumed out of order in TPA module,
264 * use bitmap to track freed slots to be allocated and notified
267 rte_bitmap_set(rxr->ag_bitmap, ag_cons);
269 bnxt_prod_ag_mbuf(rxq);
273 static inline struct rte_mbuf *bnxt_tpa_end(
274 struct bnxt_rx_queue *rxq,
275 uint32_t *raw_cp_cons,
276 struct rx_tpa_end_cmpl *tpa_end,
277 struct rx_tpa_end_cmpl_hi *tpa_end1 __rte_unused)
279 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
280 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
281 uint8_t agg_id = (tpa_end->agg_id & RX_TPA_END_CMPL_AGG_ID_MASK)
282 >> RX_TPA_END_CMPL_AGG_ID_SFT;
283 struct rte_mbuf *mbuf;
285 struct bnxt_tpa_info *tpa_info;
287 tpa_info = &rxr->tpa_info[agg_id];
288 mbuf = tpa_info->mbuf;
289 RTE_ASSERT(mbuf != NULL);
292 agg_bufs = (rte_le_to_cpu_32(tpa_end->agg_bufs_v1) &
293 RX_TPA_END_CMPL_AGG_BUFS_MASK) >> RX_TPA_END_CMPL_AGG_BUFS_SFT;
295 if (!bnxt_agg_bufs_valid(cpr, agg_bufs, *raw_cp_cons))
297 bnxt_rx_pages(rxq, mbuf, raw_cp_cons, agg_bufs);
299 mbuf->l4_len = tpa_end->payload_offset;
301 struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool);
302 RTE_ASSERT(new_data != NULL);
304 rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
307 tpa_info->mbuf = new_data;
313 bnxt_parse_pkt_type(struct rx_pkt_cmpl *rxcmp, struct rx_pkt_cmpl_hi *rxcmp1)
315 uint32_t l3, pkt_type = 0;
316 uint32_t t_ipcs = 0, ip6 = 0, vlan = 0;
319 vlan = !!(rxcmp1->flags2 &
320 rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN));
321 pkt_type |= vlan ? RTE_PTYPE_L2_ETHER_VLAN : RTE_PTYPE_L2_ETHER;
323 t_ipcs = !!(rxcmp1->flags2 &
324 rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC));
325 ip6 = !!(rxcmp1->flags2 &
326 rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_TYPE));
328 flags_type = rxcmp->flags_type &
329 rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS_ITYPE_MASK);
332 l3 = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
333 else if (!t_ipcs && ip6)
334 l3 = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
335 else if (t_ipcs && !ip6)
336 l3 = RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
338 l3 = RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
340 switch (flags_type) {
341 case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_ICMP):
343 pkt_type |= l3 | RTE_PTYPE_L4_ICMP;
345 pkt_type |= l3 | RTE_PTYPE_INNER_L4_ICMP;
348 case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_TCP):
350 pkt_type |= l3 | RTE_PTYPE_L4_TCP;
352 pkt_type |= l3 | RTE_PTYPE_INNER_L4_TCP;
355 case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_UDP):
357 pkt_type |= l3 | RTE_PTYPE_L4_UDP;
359 pkt_type |= l3 | RTE_PTYPE_INNER_L4_UDP;
362 case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_IP):
370 static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
371 struct bnxt_rx_queue *rxq, uint32_t *raw_cons)
373 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
374 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
375 struct rx_pkt_cmpl *rxcmp;
376 struct rx_pkt_cmpl_hi *rxcmp1;
377 uint32_t tmp_raw_cons = *raw_cons;
378 uint16_t cons, prod, cp_cons =
379 RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
383 struct rte_mbuf *mbuf;
388 rxcmp = (struct rx_pkt_cmpl *)
389 &cpr->cp_desc_ring[cp_cons];
391 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
392 cp_cons = RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
393 rxcmp1 = (struct rx_pkt_cmpl_hi *)&cpr->cp_desc_ring[cp_cons];
395 if (!CMP_VALID(rxcmp1, tmp_raw_cons, cpr->cp_ring_struct))
398 cpr->valid = FLIP_VALID(cp_cons,
399 cpr->cp_ring_struct->ring_mask,
402 cmp_type = CMP_TYPE(rxcmp);
403 if (cmp_type == RX_TPA_START_CMPL_TYPE_RX_TPA_START) {
404 bnxt_tpa_start(rxq, (struct rx_tpa_start_cmpl *)rxcmp,
405 (struct rx_tpa_start_cmpl_hi *)rxcmp1);
406 rc = -EINVAL; /* Continue w/o new mbuf */
408 } else if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) {
409 mbuf = bnxt_tpa_end(rxq, &tmp_raw_cons,
410 (struct rx_tpa_end_cmpl *)rxcmp,
411 (struct rx_tpa_end_cmpl_hi *)rxcmp1);
416 } else if (cmp_type != 0x11) {
421 agg_buf = (rxcmp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK)
422 >> RX_PKT_CMPL_AGG_BUFS_SFT;
423 if (agg_buf && !bnxt_agg_bufs_valid(cpr, agg_buf, tmp_raw_cons))
428 cons = rxcmp->opaque;
429 mbuf = bnxt_consume_rx_buf(rxr, cons);
435 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
438 mbuf->pkt_len = rxcmp->len;
439 mbuf->data_len = mbuf->pkt_len;
440 mbuf->port = rxq->port_id;
442 if (rxcmp->flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) {
443 mbuf->hash.rss = rxcmp->rss_hash;
444 mbuf->ol_flags |= PKT_RX_RSS_HASH;
446 mbuf->hash.fdir.id = rxcmp1->cfa_code;
447 mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
450 if ((rxcmp->flags_type & rte_cpu_to_le_16(RX_PKT_CMPL_FLAGS_MASK)) ==
451 RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP)
452 mbuf->ol_flags |= PKT_RX_IEEE1588_PTP | PKT_RX_IEEE1588_TMST;
455 bnxt_rx_pages(rxq, mbuf, &tmp_raw_cons, agg_buf);
457 if (rxcmp1->flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) {
458 mbuf->vlan_tci = rxcmp1->metadata &
459 (RX_PKT_CMPL_METADATA_VID_MASK |
460 RX_PKT_CMPL_METADATA_DE |
461 RX_PKT_CMPL_METADATA_PRI_MASK);
462 mbuf->ol_flags |= PKT_RX_VLAN;
465 if (likely(RX_CMP_IP_CS_OK(rxcmp1)))
466 mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
467 else if (likely(RX_CMP_IP_CS_UNKNOWN(rxcmp1)))
468 mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
470 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
472 if (likely(RX_CMP_L4_CS_OK(rxcmp1)))
473 mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
474 else if (likely(RX_CMP_L4_CS_UNKNOWN(rxcmp1)))
475 mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
477 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
479 mbuf->packet_type = bnxt_parse_pkt_type(rxcmp, rxcmp1);
482 if (rxcmp1->errors_v2 & RX_CMP_L2_ERRORS) {
483 /* Re-install the mbuf back to the rx ring */
484 bnxt_reuse_rx_mbuf(rxr, cons, mbuf);
486 bnxt_reuse_ag_mbuf(rxr, ag_cons, mbuf);
493 * TODO: Redesign this....
494 * If the allocation fails, the packet does not get received.
495 * Simply returning this will result in slowly falling behind
496 * on the producer ring buffers.
497 * Instead, "filling up" the producer just before ringing the
498 * doorbell could be a better solution since it will let the
499 * producer ring starve until memory is available again pushing
500 * the drops into hardware and getting them out of the driver
501 * allowing recovery to a full producer ring.
503 * This could also help with cache usage by preventing per-packet
504 * calls in favour of a tight loop with the same function being called
507 prod = RING_NEXT(rxr->rx_ring_struct, prod);
508 if (bnxt_alloc_rx_data(rxq, rxr, prod)) {
509 PMD_DRV_LOG(ERR, "mbuf alloc failed with prod=0x%x\n", prod);
515 * All MBUFs are allocated with the same size under DPDK,
516 * no optimization for rx_copy_thresh
523 *raw_cons = tmp_raw_cons;
528 uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
531 struct bnxt_rx_queue *rxq = rx_queue;
532 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
533 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
534 uint32_t raw_cons = cpr->cp_raw_cons;
537 struct rx_pkt_cmpl *rxcmp;
538 uint16_t prod = rxr->rx_prod;
539 uint16_t ag_prod = rxr->ag_prod;
543 /* If Rx Q was stopped return. RxQ0 cannot be stopped. */
544 if (unlikely(((rxq->rx_deferred_start ||
545 !rte_spinlock_trylock(&rxq->lock)) &&
549 /* Handle RX burst request */
551 cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
552 rte_prefetch0(&cpr->cp_desc_ring[cons]);
553 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
555 if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
557 cpr->valid = FLIP_VALID(cons,
558 cpr->cp_ring_struct->ring_mask,
561 /* TODO: Avoid magic numbers... */
562 if ((CMP_TYPE(rxcmp) & 0x30) == 0x10) {
563 rc = bnxt_rx_pkt(&rx_pkts[nb_rx_pkts], rxq, &raw_cons);
564 if (likely(!rc) || rc == -ENOMEM)
566 if (rc == -EBUSY) /* partial completion */
570 bnxt_event_hwrm_resp_handler(rxq->bp,
571 (struct cmpl_base *)rxcmp);
574 raw_cons = NEXT_RAW_CMP(raw_cons);
575 if (nb_rx_pkts == nb_pkts || evt)
577 /* Post some Rx buf early in case of larger burst processing */
578 if (nb_rx_pkts == BNXT_RX_POST_THRESH)
579 B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
582 cpr->cp_raw_cons = raw_cons;
583 if (!nb_rx_pkts && !evt) {
585 * For PMD, there is no need to keep on pushing to REARM
586 * the doorbell if there are no new completions
591 if (prod != rxr->rx_prod)
592 B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
594 /* Ring the AGG ring DB */
595 if (ag_prod != rxr->ag_prod)
596 B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
598 B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
600 /* Attempt to alloc Rx buf in case of a previous allocation failure. */
604 for (i = prod; i <= nb_rx_pkts;
605 i = RING_NEXT(rxr->rx_ring_struct, i)) {
606 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
608 /* Buffer already allocated for this index. */
609 if (rx_buf->mbuf != NULL)
612 /* This slot is empty. Alloc buffer for Rx */
613 if (!bnxt_alloc_rx_data(rxq, rxr, i)) {
615 B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
617 PMD_DRV_LOG(ERR, "Alloc mbuf failed\n");
624 rte_spinlock_unlock(&rxq->lock);
629 void bnxt_free_rx_rings(struct bnxt *bp)
632 struct bnxt_rx_queue *rxq;
637 for (i = 0; i < (int)bp->rx_nr_rings; i++) {
638 rxq = bp->rx_queues[i];
642 bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
643 rte_free(rxq->rx_ring->rx_ring_struct);
645 /* Free the Aggregator ring */
646 bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
647 rte_free(rxq->rx_ring->ag_ring_struct);
648 rxq->rx_ring->ag_ring_struct = NULL;
650 rte_free(rxq->rx_ring);
652 bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
653 rte_free(rxq->cp_ring->cp_ring_struct);
654 rte_free(rxq->cp_ring);
657 bp->rx_queues[i] = NULL;
661 int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
663 struct bnxt_cp_ring_info *cpr;
664 struct bnxt_rx_ring_info *rxr;
665 struct bnxt_ring *ring;
667 rxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN +
669 rxq->rx_buf_size = rxq->rx_buf_use_size + sizeof(struct rte_mbuf);
671 rxr = rte_zmalloc_socket("bnxt_rx_ring",
672 sizeof(struct bnxt_rx_ring_info),
673 RTE_CACHE_LINE_SIZE, socket_id);
678 ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
679 sizeof(struct bnxt_ring),
680 RTE_CACHE_LINE_SIZE, socket_id);
683 rxr->rx_ring_struct = ring;
684 ring->ring_size = rte_align32pow2(rxq->nb_rx_desc);
685 ring->ring_mask = ring->ring_size - 1;
686 ring->bd = (void *)rxr->rx_desc_ring;
687 ring->bd_dma = rxr->rx_desc_mapping;
688 ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_rx_bd);
689 ring->vmem = (void **)&rxr->rx_buf_ring;
691 cpr = rte_zmalloc_socket("bnxt_rx_ring",
692 sizeof(struct bnxt_cp_ring_info),
693 RTE_CACHE_LINE_SIZE, socket_id);
698 ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
699 sizeof(struct bnxt_ring),
700 RTE_CACHE_LINE_SIZE, socket_id);
703 cpr->cp_ring_struct = ring;
704 ring->ring_size = rte_align32pow2(rxr->rx_ring_struct->ring_size *
705 (2 + AGG_RING_SIZE_FACTOR));
706 ring->ring_mask = ring->ring_size - 1;
707 ring->bd = (void *)cpr->cp_desc_ring;
708 ring->bd_dma = cpr->cp_desc_mapping;
712 /* Allocate Aggregator rings */
713 ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
714 sizeof(struct bnxt_ring),
715 RTE_CACHE_LINE_SIZE, socket_id);
718 rxr->ag_ring_struct = ring;
719 ring->ring_size = rte_align32pow2(rxq->nb_rx_desc *
720 AGG_RING_SIZE_FACTOR);
721 ring->ring_mask = ring->ring_size - 1;
722 ring->bd = (void *)rxr->ag_desc_ring;
723 ring->bd_dma = rxr->ag_desc_mapping;
724 ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_rx_bd);
725 ring->vmem = (void **)&rxr->ag_buf_ring;
730 static void bnxt_init_rxbds(struct bnxt_ring *ring, uint32_t type,
734 struct rx_prod_pkt_bd *rx_bd_ring = (struct rx_prod_pkt_bd *)ring->bd;
738 for (j = 0; j < ring->ring_size; j++) {
739 rx_bd_ring[j].flags_type = rte_cpu_to_le_16(type);
740 rx_bd_ring[j].len = rte_cpu_to_le_16(len);
741 rx_bd_ring[j].opaque = j;
745 int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
747 struct bnxt_rx_ring_info *rxr;
748 struct bnxt_ring *ring;
753 size = rte_pktmbuf_data_room_size(rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
754 if (rxq->rx_buf_use_size <= size)
755 size = rxq->rx_buf_use_size;
757 type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT | RX_PROD_PKT_BD_FLAGS_EOP_PAD;
760 ring = rxr->rx_ring_struct;
761 bnxt_init_rxbds(ring, type, size);
764 for (i = 0; i < ring->ring_size; i++) {
765 if (bnxt_alloc_rx_data(rxq, rxr, prod) != 0) {
767 "init'ed rx ring %d with %d/%d mbufs only\n",
768 rxq->queue_id, i, ring->ring_size);
772 prod = RING_NEXT(rxr->rx_ring_struct, prod);
775 ring = rxr->ag_ring_struct;
776 type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG;
777 bnxt_init_rxbds(ring, type, size);
780 for (i = 0; i < ring->ring_size; i++) {
781 if (bnxt_alloc_ag_data(rxq, rxr, prod) != 0) {
783 "init'ed AG ring %d with %d/%d mbufs only\n",
784 rxq->queue_id, i, ring->ring_size);
788 prod = RING_NEXT(rxr->ag_ring_struct, prod);
790 PMD_DRV_LOG(DEBUG, "AGG Done!\n");
793 for (i = 0; i < BNXT_TPA_MAX; i++) {
794 rxr->tpa_info[i].mbuf =
795 __bnxt_alloc_rx_data(rxq->mb_pool);
796 if (!rxr->tpa_info[i].mbuf) {
797 rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
802 PMD_DRV_LOG(DEBUG, "TPA alloc Done!\n");