1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
9 #include <rte_bitmap.h>
10 #include <rte_byteorder.h>
11 #include <rte_malloc.h>
12 #include <rte_memory.h>
16 #include "bnxt_ring.h"
19 #include "hsi_struct_def_dpdk.h"
20 #ifdef RTE_LIBRTE_IEEE1588
21 #include "bnxt_hwrm.h"
28 static inline struct rte_mbuf *__bnxt_alloc_rx_data(struct rte_mempool *mb)
30 struct rte_mbuf *data;
32 data = rte_mbuf_raw_alloc(mb);
37 static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
38 struct bnxt_rx_ring_info *rxr,
41 struct rx_prod_pkt_bd *rxbd = &rxr->rx_desc_ring[prod];
42 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
43 struct rte_mbuf *mbuf;
45 mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
47 rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
52 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
54 rxbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
59 static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
60 struct bnxt_rx_ring_info *rxr,
63 struct rx_prod_pkt_bd *rxbd = &rxr->ag_desc_ring[prod];
64 struct bnxt_sw_rx_bd *rx_buf = &rxr->ag_buf_ring[prod];
65 struct rte_mbuf *mbuf;
67 mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
69 rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
74 PMD_DRV_LOG(ERR, "Jumbo Frame. rxbd is NULL\n");
76 PMD_DRV_LOG(ERR, "Jumbo Frame. rx_buf is NULL\n");
80 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
82 rxbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
87 static inline void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr,
88 struct rte_mbuf *mbuf)
90 uint16_t prod = RING_NEXT(rxr->rx_ring_struct, rxr->rx_prod);
91 struct bnxt_sw_rx_bd *prod_rx_buf;
92 struct rx_prod_pkt_bd *prod_bd;
94 prod_rx_buf = &rxr->rx_buf_ring[prod];
96 RTE_ASSERT(prod_rx_buf->mbuf == NULL);
97 RTE_ASSERT(mbuf != NULL);
99 prod_rx_buf->mbuf = mbuf;
101 prod_bd = &rxr->rx_desc_ring[prod];
103 prod_bd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
109 struct rte_mbuf *bnxt_consume_rx_buf(struct bnxt_rx_ring_info *rxr,
112 struct bnxt_sw_rx_bd *cons_rx_buf;
113 struct rte_mbuf *mbuf;
115 cons_rx_buf = &rxr->rx_buf_ring[cons];
116 RTE_ASSERT(cons_rx_buf->mbuf != NULL);
117 mbuf = cons_rx_buf->mbuf;
118 cons_rx_buf->mbuf = NULL;
122 static void bnxt_tpa_start(struct bnxt_rx_queue *rxq,
123 struct rx_tpa_start_cmpl *tpa_start,
124 struct rx_tpa_start_cmpl_hi *tpa_start1)
126 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
127 uint8_t agg_id = rte_le_to_cpu_32(tpa_start->agg_id &
128 RX_TPA_START_CMPL_AGG_ID_MASK) >> RX_TPA_START_CMPL_AGG_ID_SFT;
130 struct bnxt_tpa_info *tpa_info;
131 struct rte_mbuf *mbuf;
133 data_cons = tpa_start->opaque;
134 tpa_info = &rxr->tpa_info[agg_id];
136 mbuf = bnxt_consume_rx_buf(rxr, data_cons);
138 bnxt_reuse_rx_mbuf(rxr, tpa_info->mbuf);
140 tpa_info->mbuf = mbuf;
141 tpa_info->len = rte_le_to_cpu_32(tpa_start->len);
145 mbuf->pkt_len = rte_le_to_cpu_32(tpa_start->len);
146 mbuf->data_len = mbuf->pkt_len;
147 mbuf->port = rxq->port_id;
148 mbuf->ol_flags = PKT_RX_LRO;
149 if (likely(tpa_start->flags_type &
150 rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS_RSS_VALID))) {
151 mbuf->hash.rss = rte_le_to_cpu_32(tpa_start->rss_hash);
152 mbuf->ol_flags |= PKT_RX_RSS_HASH;
154 mbuf->hash.fdir.id = rte_le_to_cpu_16(tpa_start1->cfa_code);
155 mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
157 if (tpa_start1->flags2 &
158 rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN)) {
159 mbuf->vlan_tci = rte_le_to_cpu_32(tpa_start1->metadata);
160 mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
162 if (likely(tpa_start1->flags2 &
163 rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC)))
164 mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
166 /* recycle next mbuf */
167 data_cons = RING_NEXT(rxr->rx_ring_struct, data_cons);
168 bnxt_reuse_rx_mbuf(rxr, bnxt_consume_rx_buf(rxr, data_cons));
171 static int bnxt_agg_bufs_valid(struct bnxt_cp_ring_info *cpr,
172 uint8_t agg_bufs, uint32_t raw_cp_cons)
174 uint16_t last_cp_cons;
175 struct rx_pkt_cmpl *agg_cmpl;
177 raw_cp_cons = ADV_RAW_CMP(raw_cp_cons, agg_bufs);
178 last_cp_cons = RING_CMP(cpr->cp_ring_struct, raw_cp_cons);
179 agg_cmpl = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[last_cp_cons];
180 cpr->valid = FLIP_VALID(raw_cp_cons,
181 cpr->cp_ring_struct->ring_mask,
183 return CMP_VALID(agg_cmpl, raw_cp_cons, cpr->cp_ring_struct);
186 /* TPA consume agg buffer out of order, allocate connected data only */
187 static int bnxt_prod_ag_mbuf(struct bnxt_rx_queue *rxq)
189 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
190 uint16_t next = RING_NEXT(rxr->ag_ring_struct, rxr->ag_prod);
192 /* TODO batch allocation for better performance */
193 while (rte_bitmap_get(rxr->ag_bitmap, next)) {
194 if (unlikely(bnxt_alloc_ag_data(rxq, rxr, next))) {
196 "agg mbuf alloc failed: prod=0x%x\n", next);
199 rte_bitmap_clear(rxr->ag_bitmap, next);
201 next = RING_NEXT(rxr->ag_ring_struct, next);
207 static int bnxt_rx_pages(struct bnxt_rx_queue *rxq,
208 struct rte_mbuf *mbuf, uint32_t *tmp_raw_cons,
211 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
212 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
214 uint16_t cp_cons, ag_cons;
215 struct rx_pkt_cmpl *rxcmp;
216 struct rte_mbuf *last = mbuf;
218 for (i = 0; i < agg_buf; i++) {
219 struct bnxt_sw_rx_bd *ag_buf;
220 struct rte_mbuf *ag_mbuf;
221 *tmp_raw_cons = NEXT_RAW_CMP(*tmp_raw_cons);
222 cp_cons = RING_CMP(cpr->cp_ring_struct, *tmp_raw_cons);
223 rxcmp = (struct rx_pkt_cmpl *)
224 &cpr->cp_desc_ring[cp_cons];
227 bnxt_dump_cmpl(cp_cons, rxcmp);
230 ag_cons = rxcmp->opaque;
231 RTE_ASSERT(ag_cons <= rxr->ag_ring_struct->ring_mask);
232 ag_buf = &rxr->ag_buf_ring[ag_cons];
233 ag_mbuf = ag_buf->mbuf;
234 RTE_ASSERT(ag_mbuf != NULL);
236 ag_mbuf->data_len = rte_le_to_cpu_16(rxcmp->len);
239 mbuf->pkt_len += ag_mbuf->data_len;
241 last->next = ag_mbuf;
247 * As aggregation buffer consumed out of order in TPA module,
248 * use bitmap to track freed slots to be allocated and notified
251 rte_bitmap_set(rxr->ag_bitmap, ag_cons);
253 bnxt_prod_ag_mbuf(rxq);
257 static inline struct rte_mbuf *bnxt_tpa_end(
258 struct bnxt_rx_queue *rxq,
259 uint32_t *raw_cp_cons,
260 struct rx_tpa_end_cmpl *tpa_end,
261 struct rx_tpa_end_cmpl_hi *tpa_end1 __rte_unused)
263 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
264 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
265 uint8_t agg_id = (tpa_end->agg_id & RX_TPA_END_CMPL_AGG_ID_MASK)
266 >> RX_TPA_END_CMPL_AGG_ID_SFT;
267 struct rte_mbuf *mbuf;
269 struct bnxt_tpa_info *tpa_info;
271 tpa_info = &rxr->tpa_info[agg_id];
272 mbuf = tpa_info->mbuf;
273 RTE_ASSERT(mbuf != NULL);
276 agg_bufs = (rte_le_to_cpu_32(tpa_end->agg_bufs_v1) &
277 RX_TPA_END_CMPL_AGG_BUFS_MASK) >> RX_TPA_END_CMPL_AGG_BUFS_SFT;
279 if (!bnxt_agg_bufs_valid(cpr, agg_bufs, *raw_cp_cons))
281 bnxt_rx_pages(rxq, mbuf, raw_cp_cons, agg_bufs);
283 mbuf->l4_len = tpa_end->payload_offset;
285 struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool);
286 RTE_ASSERT(new_data != NULL);
288 rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
291 tpa_info->mbuf = new_data;
297 bnxt_parse_pkt_type(struct rx_pkt_cmpl *rxcmp, struct rx_pkt_cmpl_hi *rxcmp1)
299 uint32_t l3, pkt_type = 0;
300 uint32_t t_ipcs = 0, ip6 = 0, vlan = 0;
303 vlan = !!(rxcmp1->flags2 &
304 rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN));
305 pkt_type |= vlan ? RTE_PTYPE_L2_ETHER_VLAN : RTE_PTYPE_L2_ETHER;
307 t_ipcs = !!(rxcmp1->flags2 &
308 rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC));
309 ip6 = !!(rxcmp1->flags2 &
310 rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_TYPE));
312 flags_type = rxcmp->flags_type &
313 rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS_ITYPE_MASK);
316 l3 = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
317 else if (!t_ipcs && ip6)
318 l3 = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
319 else if (t_ipcs && !ip6)
320 l3 = RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
322 l3 = RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
324 switch (flags_type) {
325 case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_ICMP):
327 pkt_type |= l3 | RTE_PTYPE_L4_ICMP;
329 pkt_type |= l3 | RTE_PTYPE_INNER_L4_ICMP;
332 case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_TCP):
334 pkt_type |= l3 | RTE_PTYPE_L4_TCP;
336 pkt_type |= l3 | RTE_PTYPE_INNER_L4_TCP;
339 case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_UDP):
341 pkt_type |= l3 | RTE_PTYPE_L4_UDP;
343 pkt_type |= l3 | RTE_PTYPE_INNER_L4_UDP;
346 case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_IP):
354 #ifdef RTE_LIBRTE_IEEE1588
356 bnxt_get_rx_ts_thor(struct bnxt *bp, uint32_t rx_ts_cmpl)
358 uint64_t systime_cycles = 0;
360 if (!BNXT_CHIP_THOR(bp))
363 /* On Thor, Rx timestamps are provided directly in the
364 * Rx completion records to the driver. Only 32 bits of
365 * the timestamp is present in the completion. Driver needs
366 * to read the current 48 bit free running timer using the
367 * HWRM_PORT_TS_QUERY command and combine the upper 16 bits
368 * from the HWRM response with the lower 32 bits in the
369 * Rx completion to produce the 48 bit timestamp for the Rx packet
371 bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME,
373 bp->ptp_cfg->rx_timestamp = (systime_cycles & 0xFFFF00000000);
374 bp->ptp_cfg->rx_timestamp |= rx_ts_cmpl;
378 static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
379 struct bnxt_rx_queue *rxq, uint32_t *raw_cons)
381 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
382 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
383 struct rx_pkt_cmpl *rxcmp;
384 struct rx_pkt_cmpl_hi *rxcmp1;
385 uint32_t tmp_raw_cons = *raw_cons;
386 uint16_t cons, prod, cp_cons =
387 RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
388 struct rte_mbuf *mbuf;
392 uint32_t flags2_f = 0;
395 rxcmp = (struct rx_pkt_cmpl *)
396 &cpr->cp_desc_ring[cp_cons];
398 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
399 cp_cons = RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
400 rxcmp1 = (struct rx_pkt_cmpl_hi *)&cpr->cp_desc_ring[cp_cons];
402 if (!CMP_VALID(rxcmp1, tmp_raw_cons, cpr->cp_ring_struct))
405 cpr->valid = FLIP_VALID(cp_cons,
406 cpr->cp_ring_struct->ring_mask,
409 cmp_type = CMP_TYPE(rxcmp);
410 if (cmp_type == RX_TPA_START_CMPL_TYPE_RX_TPA_START) {
411 bnxt_tpa_start(rxq, (struct rx_tpa_start_cmpl *)rxcmp,
412 (struct rx_tpa_start_cmpl_hi *)rxcmp1);
413 rc = -EINVAL; /* Continue w/o new mbuf */
415 } else if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) {
416 mbuf = bnxt_tpa_end(rxq, &tmp_raw_cons,
417 (struct rx_tpa_end_cmpl *)rxcmp,
418 (struct rx_tpa_end_cmpl_hi *)rxcmp1);
423 } else if (cmp_type != 0x11) {
428 agg_buf = (rxcmp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK)
429 >> RX_PKT_CMPL_AGG_BUFS_SFT;
430 if (agg_buf && !bnxt_agg_bufs_valid(cpr, agg_buf, tmp_raw_cons))
435 cons = rxcmp->opaque;
436 mbuf = bnxt_consume_rx_buf(rxr, cons);
442 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
445 mbuf->pkt_len = rxcmp->len;
446 mbuf->data_len = mbuf->pkt_len;
447 mbuf->port = rxq->port_id;
450 flags_type = rte_le_to_cpu_16(rxcmp->flags_type);
451 if (flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) {
452 mbuf->hash.rss = rxcmp->rss_hash;
453 mbuf->ol_flags |= PKT_RX_RSS_HASH;
455 mbuf->hash.fdir.id = rxcmp1->cfa_code;
456 mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
458 #ifdef RTE_LIBRTE_IEEE1588
459 if (unlikely((flags_type & RX_PKT_CMPL_FLAGS_MASK) ==
460 RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP)) {
461 mbuf->ol_flags |= PKT_RX_IEEE1588_PTP | PKT_RX_IEEE1588_TMST;
462 bnxt_get_rx_ts_thor(rxq->bp, rxcmp1->reorder);
466 bnxt_rx_pages(rxq, mbuf, &tmp_raw_cons, agg_buf);
468 if (rxcmp1->flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) {
469 mbuf->vlan_tci = rxcmp1->metadata &
470 (RX_PKT_CMPL_METADATA_VID_MASK |
471 RX_PKT_CMPL_METADATA_DE |
472 RX_PKT_CMPL_METADATA_PRI_MASK);
473 mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
476 flags2_f = flags2_0xf(rxcmp1);
478 if (unlikely(((IS_IP_NONTUNNEL_PKT(flags2_f)) &&
479 (RX_CMP_IP_CS_ERROR(rxcmp1))) ||
480 (IS_IP_TUNNEL_PKT(flags2_f) &&
481 (RX_CMP_IP_OUTER_CS_ERROR(rxcmp1))))) {
482 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
483 } else if (unlikely(RX_CMP_IP_CS_UNKNOWN(rxcmp1))) {
484 mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
486 mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
490 if (likely(IS_L4_NONTUNNEL_PKT(flags2_f))) {
491 if (unlikely(RX_CMP_L4_INNER_CS_ERR2(rxcmp1)))
492 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
494 mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
495 } else if (IS_L4_TUNNEL_PKT(flags2_f)) {
496 if (unlikely(RX_CMP_L4_INNER_CS_ERR2(rxcmp1)))
497 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
499 mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
500 if (unlikely(RX_CMP_L4_OUTER_CS_ERR2(rxcmp1))) {
501 mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
502 } else if (unlikely(IS_L4_TUNNEL_PKT_ONLY_INNER_L4_CS
504 mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_UNKNOWN;
506 mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;
508 } else if (unlikely(RX_CMP_L4_CS_UNKNOWN(rxcmp1))) {
509 mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
512 mbuf->packet_type = bnxt_parse_pkt_type(rxcmp, rxcmp1);
515 if (rxcmp1->errors_v2 & RX_CMP_L2_ERRORS) {
516 /* Re-install the mbuf back to the rx ring */
517 bnxt_reuse_rx_mbuf(rxr, cons, mbuf);
524 * TODO: Redesign this....
525 * If the allocation fails, the packet does not get received.
526 * Simply returning this will result in slowly falling behind
527 * on the producer ring buffers.
528 * Instead, "filling up" the producer just before ringing the
529 * doorbell could be a better solution since it will let the
530 * producer ring starve until memory is available again pushing
531 * the drops into hardware and getting them out of the driver
532 * allowing recovery to a full producer ring.
534 * This could also help with cache usage by preventing per-packet
535 * calls in favour of a tight loop with the same function being called
538 prod = RING_NEXT(rxr->rx_ring_struct, prod);
539 if (bnxt_alloc_rx_data(rxq, rxr, prod)) {
540 PMD_DRV_LOG(ERR, "mbuf alloc failed with prod=0x%x\n", prod);
546 * All MBUFs are allocated with the same size under DPDK,
547 * no optimization for rx_copy_thresh
554 *raw_cons = tmp_raw_cons;
559 uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
562 struct bnxt_rx_queue *rxq = rx_queue;
563 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
564 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
565 uint32_t raw_cons = cpr->cp_raw_cons;
568 struct rx_pkt_cmpl *rxcmp;
569 uint16_t prod = rxr->rx_prod;
570 uint16_t ag_prod = rxr->ag_prod;
574 if (unlikely(is_bnxt_in_error(rxq->bp)))
577 /* If Rx Q was stopped return */
578 if (unlikely(!rxq->rx_started ||
579 !rte_spinlock_trylock(&rxq->lock)))
582 /* Handle RX burst request */
584 cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
585 rte_prefetch0(&cpr->cp_desc_ring[cons]);
586 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
588 if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
590 cpr->valid = FLIP_VALID(cons,
591 cpr->cp_ring_struct->ring_mask,
594 /* TODO: Avoid magic numbers... */
595 if ((CMP_TYPE(rxcmp) & 0x30) == 0x10) {
596 rc = bnxt_rx_pkt(&rx_pkts[nb_rx_pkts], rxq, &raw_cons);
597 if (likely(!rc) || rc == -ENOMEM)
599 if (rc == -EBUSY) /* partial completion */
601 } else if (!BNXT_NUM_ASYNC_CPR(rxq->bp)) {
603 bnxt_event_hwrm_resp_handler(rxq->bp,
604 (struct cmpl_base *)rxcmp);
607 raw_cons = NEXT_RAW_CMP(raw_cons);
608 if (nb_rx_pkts == nb_pkts || evt)
610 /* Post some Rx buf early in case of larger burst processing */
611 if (nb_rx_pkts == BNXT_RX_POST_THRESH)
612 bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
615 cpr->cp_raw_cons = raw_cons;
616 if (!nb_rx_pkts && !evt) {
618 * For PMD, there is no need to keep on pushing to REARM
619 * the doorbell if there are no new completions
624 if (prod != rxr->rx_prod)
625 bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
627 /* Ring the AGG ring DB */
628 if (ag_prod != rxr->ag_prod)
629 bnxt_db_write(&rxr->ag_db, rxr->ag_prod);
633 /* Attempt to alloc Rx buf in case of a previous allocation failure. */
637 for (i = prod; i <= nb_rx_pkts;
638 i = RING_NEXT(rxr->rx_ring_struct, i)) {
639 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
641 /* Buffer already allocated for this index. */
642 if (rx_buf->mbuf != NULL)
645 /* This slot is empty. Alloc buffer for Rx */
646 if (!bnxt_alloc_rx_data(rxq, rxr, i)) {
648 bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
650 PMD_DRV_LOG(ERR, "Alloc mbuf failed\n");
657 rte_spinlock_unlock(&rxq->lock);
663 * Dummy DPDK callback for RX.
665 * This function is used to temporarily replace the real callback during
666 * unsafe control operations on the queue, or in case of error.
669 bnxt_dummy_recv_pkts(void *rx_queue __rte_unused,
670 struct rte_mbuf **rx_pkts __rte_unused,
671 uint16_t nb_pkts __rte_unused)
676 void bnxt_free_rx_rings(struct bnxt *bp)
679 struct bnxt_rx_queue *rxq;
684 for (i = 0; i < (int)bp->rx_nr_rings; i++) {
685 rxq = bp->rx_queues[i];
689 bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
690 rte_free(rxq->rx_ring->rx_ring_struct);
692 /* Free the Aggregator ring */
693 bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
694 rte_free(rxq->rx_ring->ag_ring_struct);
695 rxq->rx_ring->ag_ring_struct = NULL;
697 rte_free(rxq->rx_ring);
699 bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
700 rte_free(rxq->cp_ring->cp_ring_struct);
701 rte_free(rxq->cp_ring);
704 bp->rx_queues[i] = NULL;
708 int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
710 struct bnxt_cp_ring_info *cpr;
711 struct bnxt_cp_ring_info *nqr;
712 struct bnxt_rx_ring_info *rxr;
713 struct bnxt_ring *ring;
715 rxq->rx_buf_size = BNXT_MAX_PKT_LEN + sizeof(struct rte_mbuf);
717 rxr = rte_zmalloc_socket("bnxt_rx_ring",
718 sizeof(struct bnxt_rx_ring_info),
719 RTE_CACHE_LINE_SIZE, socket_id);
724 ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
725 sizeof(struct bnxt_ring),
726 RTE_CACHE_LINE_SIZE, socket_id);
729 rxr->rx_ring_struct = ring;
730 ring->ring_size = rte_align32pow2(rxq->nb_rx_desc);
731 ring->ring_mask = ring->ring_size - 1;
732 ring->bd = (void *)rxr->rx_desc_ring;
733 ring->bd_dma = rxr->rx_desc_mapping;
734 ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_rx_bd);
735 ring->vmem = (void **)&rxr->rx_buf_ring;
737 cpr = rte_zmalloc_socket("bnxt_rx_ring",
738 sizeof(struct bnxt_cp_ring_info),
739 RTE_CACHE_LINE_SIZE, socket_id);
744 ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
745 sizeof(struct bnxt_ring),
746 RTE_CACHE_LINE_SIZE, socket_id);
749 cpr->cp_ring_struct = ring;
750 ring->ring_size = rte_align32pow2(rxr->rx_ring_struct->ring_size *
751 (2 + AGG_RING_SIZE_FACTOR));
752 ring->ring_mask = ring->ring_size - 1;
753 ring->bd = (void *)cpr->cp_desc_ring;
754 ring->bd_dma = cpr->cp_desc_mapping;
758 if (BNXT_HAS_NQ(rxq->bp)) {
759 nqr = rte_zmalloc_socket("bnxt_rx_ring_cq",
760 sizeof(struct bnxt_cp_ring_info),
761 RTE_CACHE_LINE_SIZE, socket_id);
767 ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
768 sizeof(struct bnxt_ring),
769 RTE_CACHE_LINE_SIZE, socket_id);
773 nqr->cp_ring_struct = ring;
775 rte_align32pow2(rxr->rx_ring_struct->ring_size *
776 (2 + AGG_RING_SIZE_FACTOR));
777 ring->ring_mask = ring->ring_size - 1;
778 ring->bd = (void *)nqr->cp_desc_ring;
779 ring->bd_dma = nqr->cp_desc_mapping;
784 /* Allocate Aggregator rings */
785 ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
786 sizeof(struct bnxt_ring),
787 RTE_CACHE_LINE_SIZE, socket_id);
790 rxr->ag_ring_struct = ring;
791 ring->ring_size = rte_align32pow2(rxq->nb_rx_desc *
792 AGG_RING_SIZE_FACTOR);
793 ring->ring_mask = ring->ring_size - 1;
794 ring->bd = (void *)rxr->ag_desc_ring;
795 ring->bd_dma = rxr->ag_desc_mapping;
796 ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_rx_bd);
797 ring->vmem = (void **)&rxr->ag_buf_ring;
802 static void bnxt_init_rxbds(struct bnxt_ring *ring, uint32_t type,
806 struct rx_prod_pkt_bd *rx_bd_ring = (struct rx_prod_pkt_bd *)ring->bd;
810 for (j = 0; j < ring->ring_size; j++) {
811 rx_bd_ring[j].flags_type = rte_cpu_to_le_16(type);
812 rx_bd_ring[j].len = rte_cpu_to_le_16(len);
813 rx_bd_ring[j].opaque = j;
817 int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
819 struct bnxt_rx_ring_info *rxr;
820 struct bnxt_ring *ring;
825 size = rte_pktmbuf_data_room_size(rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
826 size = RTE_MIN(BNXT_MAX_PKT_LEN, size);
828 type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT | RX_PROD_PKT_BD_FLAGS_EOP_PAD;
831 ring = rxr->rx_ring_struct;
832 bnxt_init_rxbds(ring, type, size);
835 for (i = 0; i < ring->ring_size; i++) {
836 if (bnxt_alloc_rx_data(rxq, rxr, prod) != 0) {
838 "init'ed rx ring %d with %d/%d mbufs only\n",
839 rxq->queue_id, i, ring->ring_size);
843 prod = RING_NEXT(rxr->rx_ring_struct, prod);
846 ring = rxr->ag_ring_struct;
847 type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG;
848 bnxt_init_rxbds(ring, type, size);
851 for (i = 0; i < ring->ring_size; i++) {
852 if (bnxt_alloc_ag_data(rxq, rxr, prod) != 0) {
854 "init'ed AG ring %d with %d/%d mbufs only\n",
855 rxq->queue_id, i, ring->ring_size);
859 prod = RING_NEXT(rxr->ag_ring_struct, prod);
861 PMD_DRV_LOG(DEBUG, "AGG Done!\n");
864 for (i = 0; i < BNXT_TPA_MAX; i++) {
865 rxr->tpa_info[i].mbuf =
866 __bnxt_alloc_rx_data(rxq->mb_pool);
867 if (!rxr->tpa_info[i].mbuf) {
868 rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
873 PMD_DRV_LOG(DEBUG, "TPA alloc Done!\n");