1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
6 #include <rte_bitmap.h>
7 #include <rte_memzone.h>
8 #include <rte_malloc.h>
12 #include "bnxt_hwrm.h"
13 #include "bnxt_ring.h"
19 #include "hsi_struct_def_dpdk.h"
22 * Generic ring handling
25 void bnxt_free_ring(struct bnxt_ring *ring)
30 if (ring->vmem_size && *ring->vmem) {
31 memset((char *)*ring->vmem, 0, ring->vmem_size);
34 ring->mem_zone = NULL;
41 static void bnxt_init_ring_grps(struct bnxt *bp)
45 for (i = 0; i < bp->max_ring_grps; i++)
46 memset(&bp->grp_info[i], (uint8_t)HWRM_NA_SIGNATURE,
47 sizeof(struct bnxt_ring_grp_info));
50 int bnxt_alloc_ring_grps(struct bnxt *bp)
52 if (bp->max_tx_rings == 0) {
53 PMD_DRV_LOG(ERR, "No TX rings available!\n");
57 /* THOR does not support ring groups.
58 * But we will use the array to save RSS context IDs.
60 if (BNXT_CHIP_P5(bp)) {
61 bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_P5;
62 } else if (bp->max_ring_grps < bp->rx_cp_nr_rings) {
63 /* 1 ring is for default completion ring */
64 PMD_DRV_LOG(ERR, "Insufficient resource: Ring Group\n");
68 if (BNXT_HAS_RING_GRPS(bp)) {
69 bp->grp_info = rte_zmalloc("bnxt_grp_info",
70 sizeof(*bp->grp_info) *
71 bp->max_ring_grps, 0);
74 "Failed to alloc grp info tbl.\n");
77 bnxt_init_ring_grps(bp);
84 * Allocates a completion ring with vmem and stats optionally also allocating
85 * a TX and/or RX ring. Passing NULL as tx_ring_info and/or rx_ring_info
86 * to not allocate them.
88 * Order in the allocation is:
89 * stats - Always non-zero length
90 * cp vmem - Always zero-length, supported for the bnxt_ring abstraction
91 * tx vmem - Only non-zero length if tx_ring_info is not NULL
92 * rx vmem - Only non-zero length if rx_ring_info is not NULL
93 * cp bd ring - Always non-zero length
94 * tx bd ring - Only non-zero length if tx_ring_info is not NULL
95 * rx bd ring - Only non-zero length if rx_ring_info is not NULL
97 int bnxt_alloc_rings(struct bnxt *bp, unsigned int socket_id, uint16_t qidx,
98 struct bnxt_tx_queue *txq,
99 struct bnxt_rx_queue *rxq,
100 struct bnxt_cp_ring_info *cp_ring_info,
101 struct bnxt_cp_ring_info *nq_ring_info,
104 struct bnxt_ring *cp_ring = cp_ring_info->cp_ring_struct;
105 struct bnxt_rx_ring_info *rx_ring_info = rxq ? rxq->rx_ring : NULL;
106 struct bnxt_tx_ring_info *tx_ring_info = txq ? txq->tx_ring : NULL;
107 uint64_t rx_offloads = bp->eth_dev->data->dev_conf.rxmode.offloads;
108 int ag_ring_start, ag_bitmap_start, tpa_info_start;
109 int ag_vmem_start, cp_ring_start, nq_ring_start;
110 int total_alloc_len, rx_ring_start, rx_ring_len;
111 struct rte_pci_device *pdev = bp->pdev;
112 struct bnxt_ring *tx_ring, *rx_ring;
113 const struct rte_memzone *mz = NULL;
114 char mz_name[RTE_MEMZONE_NAMESIZE];
115 rte_iova_t mz_phys_addr;
116 int ag_bitmap_len = 0;
117 int tpa_info_len = 0;
121 int stats_len = (tx_ring_info || rx_ring_info) ?
122 RTE_CACHE_LINE_ROUNDUP(sizeof(struct hwrm_stat_ctx_query_output) -
123 sizeof (struct hwrm_resp_hdr)) : 0;
124 stats_len = RTE_ALIGN(stats_len, 128);
126 int cp_vmem_start = stats_len;
127 int cp_vmem_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size);
128 cp_vmem_len = RTE_ALIGN(cp_vmem_len, 128);
130 int nq_vmem_len = nq_ring_info ?
131 RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size) : 0;
132 nq_vmem_len = RTE_ALIGN(nq_vmem_len, 128);
134 int nq_vmem_start = cp_vmem_start + cp_vmem_len;
136 int tx_vmem_start = nq_vmem_start + nq_vmem_len;
138 tx_ring_info ? RTE_CACHE_LINE_ROUNDUP(tx_ring_info->
139 tx_ring_struct->vmem_size) : 0;
140 tx_vmem_len = RTE_ALIGN(tx_vmem_len, 128);
142 int rx_vmem_start = tx_vmem_start + tx_vmem_len;
143 int rx_vmem_len = rx_ring_info ?
144 RTE_CACHE_LINE_ROUNDUP(rx_ring_info->
145 rx_ring_struct->vmem_size) : 0;
146 rx_vmem_len = RTE_ALIGN(rx_vmem_len, 128);
148 ag_vmem_start = rx_vmem_start + rx_vmem_len;
149 if (bnxt_need_agg_ring(bp->eth_dev))
150 ag_vmem_len = rx_ring_info && rx_ring_info->ag_ring_struct ?
151 RTE_CACHE_LINE_ROUNDUP(rx_ring_info->ag_ring_struct->vmem_size) : 0;
153 cp_ring_start = ag_vmem_start + ag_vmem_len;
154 cp_ring_start = RTE_ALIGN(cp_ring_start, 4096);
156 int cp_ring_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->ring_size *
157 sizeof(struct cmpl_base));
158 cp_ring_len = RTE_ALIGN(cp_ring_len, 128);
159 nq_ring_start = cp_ring_start + cp_ring_len;
160 nq_ring_start = RTE_ALIGN(nq_ring_start, 4096);
162 int nq_ring_len = nq_ring_info ? cp_ring_len : 0;
164 int tx_ring_start = nq_ring_start + nq_ring_len;
165 tx_ring_start = RTE_ALIGN(tx_ring_start, 4096);
166 int tx_ring_len = tx_ring_info ?
167 RTE_CACHE_LINE_ROUNDUP(tx_ring_info->tx_ring_struct->ring_size *
168 sizeof(struct tx_bd_long)) : 0;
169 tx_ring_len = RTE_ALIGN(tx_ring_len, 4096);
171 rx_ring_start = tx_ring_start + tx_ring_len;
172 rx_ring_start = RTE_ALIGN(rx_ring_start, 4096);
173 rx_ring_len = rx_ring_info ?
174 RTE_CACHE_LINE_ROUNDUP(rx_ring_info->rx_ring_struct->ring_size *
175 sizeof(struct rx_prod_pkt_bd)) : 0;
176 rx_ring_len = RTE_ALIGN(rx_ring_len, 4096);
178 ag_ring_start = rx_ring_start + rx_ring_len;
179 ag_ring_start = RTE_ALIGN(ag_ring_start, 4096);
181 if (bnxt_need_agg_ring(bp->eth_dev)) {
182 ag_ring_len = rx_ring_len * AGG_RING_SIZE_FACTOR;
183 ag_ring_len = RTE_ALIGN(ag_ring_len, 4096);
185 ag_bitmap_len = rx_ring_info ?
186 RTE_CACHE_LINE_ROUNDUP(rte_bitmap_get_memory_footprint(
187 rx_ring_info->rx_ring_struct->ring_size *
188 AGG_RING_SIZE_FACTOR)) : 0;
190 if (rx_ring_info && (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
191 int tpa_max = BNXT_TPA_MAX_AGGS(bp);
193 tpa_info_len = tpa_max * sizeof(struct bnxt_tpa_info);
194 tpa_info_len = RTE_CACHE_LINE_ROUNDUP(tpa_info_len);
198 ag_bitmap_start = ag_ring_start + ag_ring_len;
199 tpa_info_start = ag_bitmap_start + ag_bitmap_len;
200 total_alloc_len = tpa_info_start + tpa_info_len;
202 snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
203 "bnxt_" PCI_PRI_FMT "-%04x_%s", pdev->addr.domain,
204 pdev->addr.bus, pdev->addr.devid, pdev->addr.function, qidx,
206 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
207 mz = rte_memzone_lookup(mz_name);
209 mz = rte_memzone_reserve_aligned(mz_name, total_alloc_len,
212 RTE_MEMZONE_SIZE_HINT_ONLY |
213 RTE_MEMZONE_IOVA_CONTIG,
218 memset(mz->addr, 0, mz->len);
219 mz_phys_addr = mz->iova;
223 tx_ring = tx_ring_info->tx_ring_struct;
225 tx_ring->bd = ((char *)mz->addr + tx_ring_start);
226 tx_ring_info->tx_desc_ring = (struct tx_bd_long *)tx_ring->bd;
227 tx_ring->bd_dma = mz_phys_addr + tx_ring_start;
228 tx_ring_info->tx_desc_mapping = tx_ring->bd_dma;
229 tx_ring->mem_zone = (const void *)mz;
233 if (tx_ring->vmem_size) {
235 (void **)((char *)mz->addr + tx_vmem_start);
236 tx_ring_info->tx_buf_ring =
237 (struct rte_mbuf **)tx_ring->vmem;
243 rx_ring = rx_ring_info->rx_ring_struct;
245 rx_ring->bd = ((char *)mz->addr + rx_ring_start);
246 rx_ring_info->rx_desc_ring =
247 (struct rx_prod_pkt_bd *)rx_ring->bd;
248 rx_ring->bd_dma = mz_phys_addr + rx_ring_start;
249 rx_ring_info->rx_desc_mapping = rx_ring->bd_dma;
250 rx_ring->mem_zone = (const void *)mz;
254 if (rx_ring->vmem_size) {
256 (void **)((char *)mz->addr + rx_vmem_start);
257 rx_ring_info->rx_buf_ring =
258 (struct rte_mbuf **)rx_ring->vmem;
261 if (bnxt_need_agg_ring(bp->eth_dev)) {
262 rx_ring = rx_ring_info->ag_ring_struct;
264 rx_ring->bd = ((char *)mz->addr + ag_ring_start);
265 rx_ring_info->ag_desc_ring =
266 (struct rx_prod_pkt_bd *)rx_ring->bd;
267 rx_ring->bd_dma = mz->iova + ag_ring_start;
268 rx_ring_info->ag_desc_mapping = rx_ring->bd_dma;
269 rx_ring->mem_zone = (const void *)mz;
273 if (rx_ring->vmem_size) {
275 (void **)((char *)mz->addr + ag_vmem_start);
276 rx_ring_info->ag_buf_ring =
277 (struct rte_mbuf **)rx_ring->vmem;
280 rx_ring_info->ag_bitmap =
281 rte_bitmap_init(rx_ring_info->rx_ring_struct->ring_size *
282 AGG_RING_SIZE_FACTOR, (uint8_t *)mz->addr +
283 ag_bitmap_start, ag_bitmap_len);
286 if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
287 rx_ring_info->tpa_info =
288 ((struct bnxt_tpa_info *)
289 ((char *)mz->addr + tpa_info_start));
293 cp_ring->bd = ((char *)mz->addr + cp_ring_start);
294 cp_ring->bd_dma = mz_phys_addr + cp_ring_start;
295 cp_ring_info->cp_desc_ring = cp_ring->bd;
296 cp_ring_info->cp_desc_mapping = cp_ring->bd_dma;
297 cp_ring->mem_zone = (const void *)mz;
301 if (cp_ring->vmem_size)
302 *cp_ring->vmem = ((char *)mz->addr + stats_len);
304 cp_ring_info->hw_stats = mz->addr;
305 cp_ring_info->hw_stats_map = mz_phys_addr;
307 cp_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
310 struct bnxt_ring *nq_ring = nq_ring_info->cp_ring_struct;
312 nq_ring->bd = (char *)mz->addr + nq_ring_start;
313 nq_ring->bd_dma = mz_phys_addr + nq_ring_start;
314 nq_ring_info->cp_desc_ring = nq_ring->bd;
315 nq_ring_info->cp_desc_mapping = nq_ring->bd_dma;
316 nq_ring->mem_zone = (const void *)mz;
320 if (nq_ring->vmem_size)
321 *nq_ring->vmem = (char *)mz->addr + nq_vmem_start;
323 nq_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
329 static void bnxt_init_dflt_coal(struct bnxt_coal *coal)
331 /* Tick values in micro seconds.
332 * 1 coal_buf x bufs_per_record = 1 completion record.
334 coal->num_cmpl_aggr_int = BNXT_NUM_CMPL_AGGR_INT;
335 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
336 coal->num_cmpl_dma_aggr = BNXT_NUM_CMPL_DMA_AGGR;
337 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
338 coal->num_cmpl_dma_aggr_during_int = BNXT_NUM_CMPL_DMA_AGGR_DURING_INT;
339 coal->int_lat_tmr_max = BNXT_INT_LAT_TMR_MAX;
340 /* min timer set to 1/2 of interrupt timer */
341 coal->int_lat_tmr_min = BNXT_INT_LAT_TMR_MIN;
342 /* buf timer set to 1/4 of interrupt timer */
343 coal->cmpl_aggr_dma_tmr = BNXT_CMPL_AGGR_DMA_TMR;
344 coal->cmpl_aggr_dma_tmr_during_int = BNXT_CMPL_AGGR_DMA_TMR_DURING_INT;
347 static void bnxt_set_db(struct bnxt *bp,
348 struct bnxt_db_info *db,
354 if (BNXT_CHIP_P5(bp)) {
355 int db_offset = DB_PF_OFFSET;
357 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
358 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
360 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
361 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
362 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
364 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
365 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_CQ;
367 case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
368 db->db_key64 = DBR_PATH_L2;
371 if (BNXT_CHIP_SR2(bp)) {
372 db->db_key64 |= DBR_VALID;
373 db_offset = bp->legacy_db_size;
374 } else if (BNXT_VF(bp)) {
375 db_offset = DB_VF_OFFSET;
378 db->doorbell = (char *)bp->doorbell_base + db_offset;
379 db->db_key64 |= (uint64_t)fid << DBR_XID_SFT;
382 db->doorbell = (char *)bp->doorbell_base + map_idx * 0x80;
384 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
385 db->db_key32 = DB_KEY_TX;
387 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
388 db->db_key32 = DB_KEY_RX;
390 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
391 db->db_key32 = DB_KEY_CP;
396 db->db_ring_mask = ring_mask;
398 if (BNXT_CHIP_SR2(bp)) {
399 db->db_epoch_mask = db->db_ring_mask + 1;
400 db->db_epoch_shift = DBR_EPOCH_SFT -
401 rte_log2_u32(db->db_epoch_mask);
405 static int bnxt_alloc_cmpl_ring(struct bnxt *bp, int queue_index,
406 struct bnxt_cp_ring_info *cpr)
408 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
409 uint32_t nq_ring_id = HWRM_NA_SIGNATURE;
410 int cp_ring_index = queue_index + BNXT_RX_VEC_START;
411 struct bnxt_cp_ring_info *nqr = bp->rxtx_nq_ring;
415 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL;
417 if (BNXT_HAS_NQ(bp)) {
419 nq_ring_id = nqr->cp_ring_struct->fw_ring_id;
421 PMD_DRV_LOG(ERR, "NQ ring is NULL\n");
426 rc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, cp_ring_index,
427 HWRM_NA_SIGNATURE, nq_ring_id, 0);
431 cpr->cp_raw_cons = 0;
432 bnxt_set_db(bp, &cpr->cp_db, ring_type, cp_ring_index,
433 cp_ring->fw_ring_id, cp_ring->ring_mask);
439 int bnxt_alloc_rxtx_nq_ring(struct bnxt *bp)
441 struct bnxt_cp_ring_info *nqr;
442 struct bnxt_ring *ring;
443 int ring_index = BNXT_NUM_ASYNC_CPR(bp);
447 if (!BNXT_HAS_NQ(bp) || bp->rxtx_nq_ring)
450 nqr = rte_zmalloc_socket("nqr",
451 sizeof(struct bnxt_cp_ring_info),
453 bp->eth_dev->device->numa_node);
457 ring = rte_zmalloc_socket("bnxt_cp_ring_struct",
458 sizeof(struct bnxt_ring),
460 bp->eth_dev->device->numa_node);
466 ring->bd = (void *)nqr->cp_desc_ring;
467 ring->bd_dma = nqr->cp_desc_mapping;
468 ring->ring_size = rte_align32pow2(DEFAULT_CP_RING_SIZE);
469 ring->ring_mask = ring->ring_size - 1;
472 ring->fw_ring_id = INVALID_HW_RING_ID;
474 nqr->cp_ring_struct = ring;
475 rc = bnxt_alloc_rings(bp, bp->eth_dev->device->numa_node, 0, NULL,
476 NULL, nqr, NULL, "l2_nqr");
483 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ;
485 rc = bnxt_hwrm_ring_alloc(bp, ring, ring_type, ring_index,
486 HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE, 0);
493 bnxt_set_db(bp, &nqr->cp_db, ring_type, ring_index,
494 ring->fw_ring_id, ring->ring_mask);
497 bp->rxtx_nq_ring = nqr;
502 /* Free RX/TX NQ ring. */
503 void bnxt_free_rxtx_nq_ring(struct bnxt *bp)
505 struct bnxt_cp_ring_info *nqr = bp->rxtx_nq_ring;
510 bnxt_free_nq_ring(bp, nqr);
512 bnxt_free_ring(nqr->cp_ring_struct);
513 rte_free(nqr->cp_ring_struct);
514 nqr->cp_ring_struct = NULL;
516 bp->rxtx_nq_ring = NULL;
519 static int bnxt_alloc_rx_ring(struct bnxt *bp, int queue_index)
521 struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
522 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
523 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
524 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
525 struct bnxt_ring *ring = rxr->rx_ring_struct;
529 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX;
531 rc = bnxt_hwrm_ring_alloc(bp, ring, ring_type,
532 queue_index, cpr->hw_stats_ctx_id,
533 cp_ring->fw_ring_id, 0);
537 rxr->rx_raw_prod = 0;
538 if (BNXT_HAS_RING_GRPS(bp))
539 bp->grp_info[queue_index].rx_fw_ring_id = ring->fw_ring_id;
540 bnxt_set_db(bp, &rxr->rx_db, ring_type, queue_index, ring->fw_ring_id,
542 bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod);
547 static int bnxt_alloc_rx_agg_ring(struct bnxt *bp, int queue_index)
549 unsigned int map_idx = queue_index + bp->rx_cp_nr_rings;
550 struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
551 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
552 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
553 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
554 struct bnxt_ring *ring = rxr->ag_ring_struct;
555 uint32_t hw_stats_ctx_id = HWRM_NA_SIGNATURE;
559 if (!bnxt_need_agg_ring(bp->eth_dev))
562 ring->fw_rx_ring_id = rxr->rx_ring_struct->fw_ring_id;
564 if (BNXT_CHIP_P5(bp)) {
565 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG;
566 hw_stats_ctx_id = cpr->hw_stats_ctx_id;
568 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX;
571 rc = bnxt_hwrm_ring_alloc(bp, ring, ring_type, map_idx,
572 hw_stats_ctx_id, cp_ring->fw_ring_id, 0);
577 rxr->ag_raw_prod = 0;
578 if (BNXT_HAS_RING_GRPS(bp))
579 bp->grp_info[queue_index].ag_fw_ring_id = ring->fw_ring_id;
580 bnxt_set_db(bp, &rxr->ag_db, ring_type, map_idx, ring->fw_ring_id,
582 bnxt_db_write(&rxr->ag_db, rxr->ag_raw_prod);
587 int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index)
589 struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
590 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
591 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
592 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
593 struct bnxt_coal coal;
597 * Storage for the cp ring is allocated based on worst-case
598 * usage, the actual size to be used by hw is computed here.
600 cp_ring->ring_size = rxr->rx_ring_struct->ring_size * 2;
602 if (bnxt_need_agg_ring(bp->eth_dev))
603 cp_ring->ring_size *= AGG_RING_SIZE_FACTOR;
605 cp_ring->ring_mask = cp_ring->ring_size - 1;
607 rc = bnxt_alloc_cmpl_ring(bp, queue_index, cpr);
611 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr);
615 if (BNXT_HAS_RING_GRPS(bp)) {
616 bp->grp_info[queue_index].fw_stats_ctx = cpr->hw_stats_ctx_id;
617 bp->grp_info[queue_index].cp_fw_ring_id = cp_ring->fw_ring_id;
620 bnxt_init_dflt_coal(&coal);
621 bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id);
623 if (!BNXT_NUM_ASYNC_CPR(bp) && !queue_index) {
625 * If a dedicated async event completion ring is not enabled,
626 * use the first completion ring from PF or VF as the default
627 * completion ring for async event handling.
629 bp->async_cp_ring = cpr;
630 rc = bnxt_hwrm_set_async_event_cr(bp);
635 rc = bnxt_alloc_rx_ring(bp, queue_index);
639 rc = bnxt_alloc_rx_agg_ring(bp, queue_index);
643 if (BNXT_HAS_RING_GRPS(bp)) {
644 rc = bnxt_hwrm_ring_grp_alloc(bp, queue_index);
649 if (rxq->rx_started) {
650 if (bnxt_init_one_rx_ring(rxq)) {
651 PMD_DRV_LOG(ERR, "bnxt_init_one_rx_ring failed!\n");
652 bnxt_rx_queue_release_op(bp->eth_dev, queue_index);
656 bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod);
657 if (bnxt_need_agg_ring(bp->eth_dev))
658 bnxt_db_write(&rxr->ag_db, rxr->ag_raw_prod);
660 rxq->index = queue_index;
661 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
662 bnxt_rxq_vec_setup(rxq);
669 "Failed to allocate receive queue %d, rc %d.\n",
674 /* Initialise all rings to -1, its used to free rings later if allocation
675 * of few rings fails.
677 static void bnxt_init_all_rings(struct bnxt *bp)
680 struct bnxt_rx_queue *rxq;
681 struct bnxt_ring *cp_ring;
682 struct bnxt_ring *ring;
683 struct bnxt_rx_ring_info *rxr;
684 struct bnxt_tx_queue *txq;
686 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
687 rxq = bp->rx_queues[i];
689 cp_ring = rxq->cp_ring->cp_ring_struct;
690 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
693 ring = rxr->rx_ring_struct;
694 ring->fw_ring_id = INVALID_HW_RING_ID;
696 if (bnxt_need_agg_ring(bp->eth_dev)) {
697 ring = rxr->ag_ring_struct;
699 ring->fw_ring_id = INVALID_HW_RING_ID;
702 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
703 txq = bp->tx_queues[i];
705 cp_ring = txq->cp_ring->cp_ring_struct;
706 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
708 ring = txq->tx_ring->tx_ring_struct;
709 ring->fw_ring_id = INVALID_HW_RING_ID;
714 * [0] = default completion ring
715 * [1 -> +rx_cp_nr_rings] = rx_cp, rx rings
716 * [1+rx_cp_nr_rings + 1 -> +tx_cp_nr_rings] = tx_cp, tx rings
718 int bnxt_alloc_hwrm_rings(struct bnxt *bp)
720 struct bnxt_coal coal;
724 bnxt_init_dflt_coal(&coal);
725 bnxt_init_all_rings(bp);
727 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
728 unsigned int soc_id = bp->eth_dev->device->numa_node;
729 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
730 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
731 struct bnxt_ring *ring;
733 if (bnxt_need_agg_ring(bp->eth_dev)) {
734 ring = rxr->ag_ring_struct;
736 bnxt_free_rxq_mem(rxq);
738 rc = bnxt_init_rx_ring_struct(rxq, soc_id);
742 rc = bnxt_alloc_rings(bp, soc_id,
751 rc = bnxt_alloc_hwrm_rx_ring(bp, i);
756 /* If something is wrong with Rx ring alloc, skip Tx ring alloc */
757 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
758 rc = bnxt_alloc_hwrm_tx_ring(bp, i);
767 /* Allocate dedicated async completion ring. */
768 int bnxt_alloc_async_cp_ring(struct bnxt *bp)
770 struct bnxt_cp_ring_info *cpr = bp->async_cp_ring;
771 struct bnxt_ring *cp_ring;
775 if (BNXT_NUM_ASYNC_CPR(bp) == 0 || cpr == NULL)
778 cp_ring = cpr->cp_ring_struct;
781 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ;
783 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL;
785 rc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, 0,
786 HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE, 0);
791 cpr->cp_raw_cons = 0;
792 bnxt_set_db(bp, &cpr->cp_db, ring_type, 0,
793 cp_ring->fw_ring_id, cp_ring->ring_mask);
800 return bnxt_hwrm_set_async_event_cr(bp);
803 /* Free dedicated async completion ring. */
804 void bnxt_free_async_cp_ring(struct bnxt *bp)
806 struct bnxt_cp_ring_info *cpr = bp->async_cp_ring;
808 if (BNXT_NUM_ASYNC_CPR(bp) == 0 || cpr == NULL)
812 bnxt_free_nq_ring(bp, cpr);
814 bnxt_free_cp_ring(bp, cpr);
816 bnxt_free_ring(cpr->cp_ring_struct);
817 rte_free(cpr->cp_ring_struct);
818 cpr->cp_ring_struct = NULL;
820 bp->async_cp_ring = NULL;
823 int bnxt_alloc_async_ring_struct(struct bnxt *bp)
825 struct bnxt_cp_ring_info *cpr = NULL;
826 struct bnxt_ring *ring = NULL;
828 if (BNXT_NUM_ASYNC_CPR(bp) == 0)
831 cpr = rte_zmalloc_socket("cpr",
832 sizeof(struct bnxt_cp_ring_info),
834 bp->eth_dev->device->numa_node);
838 ring = rte_zmalloc_socket("bnxt_cp_ring_struct",
839 sizeof(struct bnxt_ring),
841 bp->eth_dev->device->numa_node);
847 ring->bd = (void *)cpr->cp_desc_ring;
848 ring->bd_dma = cpr->cp_desc_mapping;
849 ring->ring_size = rte_align32pow2(DEFAULT_CP_RING_SIZE);
850 ring->ring_mask = ring->ring_size - 1;
854 bp->async_cp_ring = cpr;
855 cpr->cp_ring_struct = ring;
857 return bnxt_alloc_rings(bp, bp->eth_dev->device->numa_node, 0, NULL,
858 NULL, bp->async_cp_ring, NULL, "def_cp");
861 int bnxt_alloc_hwrm_tx_ring(struct bnxt *bp, int queue_index)
863 struct bnxt_tx_queue *txq = bp->tx_queues[queue_index];
864 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
865 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
866 struct bnxt_tx_ring_info *txr = txq->tx_ring;
867 struct bnxt_ring *ring = txr->tx_ring_struct;
868 unsigned int idx = queue_index + bp->rx_cp_nr_rings;
869 uint16_t tx_cosq_id = 0;
870 struct bnxt_coal coal;
873 rc = bnxt_alloc_cmpl_ring(bp, idx, cpr);
877 bnxt_init_dflt_coal(&coal);
878 bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id);
880 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr);
884 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY)
885 tx_cosq_id = bp->tx_cosq_id[queue_index < bp->max_lltc ? queue_index : 0];
887 tx_cosq_id = bp->tx_cosq_id[0];
889 rc = bnxt_hwrm_ring_alloc(bp, ring,
890 HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
891 queue_index, cpr->hw_stats_ctx_id,
897 bnxt_set_db(bp, &txr->tx_db, HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
898 queue_index, ring->fw_ring_id,
904 bnxt_free_hwrm_tx_ring(bp, queue_index);