1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
6 #include <rte_bitmap.h>
7 #include <rte_memzone.h>
8 #include <rte_malloc.h>
13 #include "bnxt_hwrm.h"
14 #include "bnxt_ring.h"
20 #include "hsi_struct_def_dpdk.h"
23 * Generic ring handling
26 void bnxt_free_ring(struct bnxt_ring *ring)
31 if (ring->vmem_size && *ring->vmem) {
32 memset((char *)*ring->vmem, 0, ring->vmem_size);
35 ring->mem_zone = NULL;
42 int bnxt_init_ring_grps(struct bnxt *bp)
46 for (i = 0; i < bp->max_ring_grps; i++)
47 memset(&bp->grp_info[i], (uint8_t)HWRM_NA_SIGNATURE,
48 sizeof(struct bnxt_ring_grp_info));
53 int bnxt_alloc_ring_grps(struct bnxt *bp)
55 if (bp->max_tx_rings == 0) {
56 PMD_DRV_LOG(ERR, "No TX rings available!\n");
60 /* THOR does not support ring groups.
61 * But we will use the array to save RSS context IDs.
63 if (BNXT_CHIP_THOR(bp)) {
64 bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR;
65 } else if (bp->max_ring_grps < bp->rx_cp_nr_rings) {
66 /* 1 ring is for default completion ring */
67 PMD_DRV_LOG(ERR, "Insufficient resource: Ring Group\n");
71 if (BNXT_HAS_RING_GRPS(bp)) {
72 bp->grp_info = rte_zmalloc("bnxt_grp_info",
73 sizeof(*bp->grp_info) *
74 bp->max_ring_grps, 0);
77 "Failed to alloc grp info tbl.\n");
86 * Allocates a completion ring with vmem and stats optionally also allocating
87 * a TX and/or RX ring. Passing NULL as tx_ring_info and/or rx_ring_info
88 * to not allocate them.
90 * Order in the allocation is:
91 * stats - Always non-zero length
92 * cp vmem - Always zero-length, supported for the bnxt_ring abstraction
93 * tx vmem - Only non-zero length if tx_ring_info is not NULL
94 * rx vmem - Only non-zero length if rx_ring_info is not NULL
95 * cp bd ring - Always non-zero length
96 * tx bd ring - Only non-zero length if tx_ring_info is not NULL
97 * rx bd ring - Only non-zero length if rx_ring_info is not NULL
99 int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
100 struct bnxt_tx_queue *txq,
101 struct bnxt_rx_queue *rxq,
102 struct bnxt_cp_ring_info *cp_ring_info,
103 struct bnxt_cp_ring_info *nq_ring_info,
106 struct bnxt_ring *cp_ring = cp_ring_info->cp_ring_struct;
107 struct bnxt_rx_ring_info *rx_ring_info = rxq ? rxq->rx_ring : NULL;
108 struct bnxt_tx_ring_info *tx_ring_info = txq ? txq->tx_ring : NULL;
109 struct bnxt_ring *tx_ring;
110 struct bnxt_ring *rx_ring;
111 struct rte_pci_device *pdev = bp->pdev;
112 uint64_t rx_offloads = bp->eth_dev->data->dev_conf.rxmode.offloads;
113 const struct rte_memzone *mz = NULL;
114 char mz_name[RTE_MEMZONE_NAMESIZE];
115 rte_iova_t mz_phys_addr_base;
116 rte_iova_t mz_phys_addr;
119 int stats_len = (tx_ring_info || rx_ring_info) ?
120 RTE_CACHE_LINE_ROUNDUP(sizeof(struct hwrm_stat_ctx_query_output) -
121 sizeof (struct hwrm_resp_hdr)) : 0;
122 stats_len = RTE_ALIGN(stats_len, 128);
124 int cp_vmem_start = stats_len;
125 int cp_vmem_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size);
126 cp_vmem_len = RTE_ALIGN(cp_vmem_len, 128);
128 int nq_vmem_len = BNXT_CHIP_THOR(bp) ?
129 RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size) : 0;
130 nq_vmem_len = RTE_ALIGN(nq_vmem_len, 128);
132 int nq_vmem_start = cp_vmem_start + cp_vmem_len;
134 int tx_vmem_start = nq_vmem_start + nq_vmem_len;
136 tx_ring_info ? RTE_CACHE_LINE_ROUNDUP(tx_ring_info->
137 tx_ring_struct->vmem_size) : 0;
138 tx_vmem_len = RTE_ALIGN(tx_vmem_len, 128);
140 int rx_vmem_start = tx_vmem_start + tx_vmem_len;
141 int rx_vmem_len = rx_ring_info ?
142 RTE_CACHE_LINE_ROUNDUP(rx_ring_info->
143 rx_ring_struct->vmem_size) : 0;
144 rx_vmem_len = RTE_ALIGN(rx_vmem_len, 128);
145 int ag_vmem_start = 0;
147 int cp_ring_start = 0;
148 int nq_ring_start = 0;
150 ag_vmem_start = rx_vmem_start + rx_vmem_len;
151 ag_vmem_len = rx_ring_info ? RTE_CACHE_LINE_ROUNDUP(
152 rx_ring_info->ag_ring_struct->vmem_size) : 0;
153 cp_ring_start = ag_vmem_start + ag_vmem_len;
154 cp_ring_start = RTE_ALIGN(cp_ring_start, 4096);
156 int cp_ring_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->ring_size *
157 sizeof(struct cmpl_base));
158 cp_ring_len = RTE_ALIGN(cp_ring_len, 128);
159 nq_ring_start = cp_ring_start + cp_ring_len;
160 nq_ring_start = RTE_ALIGN(nq_ring_start, 4096);
162 int nq_ring_len = BNXT_CHIP_THOR(bp) ? cp_ring_len : 0;
164 int tx_ring_start = nq_ring_start + nq_ring_len;
165 tx_ring_start = RTE_ALIGN(tx_ring_start, 4096);
166 int tx_ring_len = tx_ring_info ?
167 RTE_CACHE_LINE_ROUNDUP(tx_ring_info->tx_ring_struct->ring_size *
168 sizeof(struct tx_bd_long)) : 0;
169 tx_ring_len = RTE_ALIGN(tx_ring_len, 4096);
171 int rx_ring_start = tx_ring_start + tx_ring_len;
172 rx_ring_start = RTE_ALIGN(rx_ring_start, 4096);
173 int rx_ring_len = rx_ring_info ?
174 RTE_CACHE_LINE_ROUNDUP(rx_ring_info->rx_ring_struct->ring_size *
175 sizeof(struct rx_prod_pkt_bd)) : 0;
176 rx_ring_len = RTE_ALIGN(rx_ring_len, 4096);
178 int ag_ring_start = rx_ring_start + rx_ring_len;
179 ag_ring_start = RTE_ALIGN(ag_ring_start, 4096);
180 int ag_ring_len = rx_ring_len * AGG_RING_SIZE_FACTOR;
181 ag_ring_len = RTE_ALIGN(ag_ring_len, 4096);
183 int ag_bitmap_start = ag_ring_start + ag_ring_len;
184 int ag_bitmap_len = rx_ring_info ?
185 RTE_CACHE_LINE_ROUNDUP(rte_bitmap_get_memory_footprint(
186 rx_ring_info->rx_ring_struct->ring_size *
187 AGG_RING_SIZE_FACTOR)) : 0;
189 int tpa_info_start = ag_bitmap_start + ag_bitmap_len;
190 int tpa_info_len = 0;
192 if (rx_ring_info && (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
193 int tpa_max = BNXT_TPA_MAX_AGGS(bp);
195 tpa_info_len = tpa_max * sizeof(struct bnxt_tpa_info);
196 tpa_info_len = RTE_CACHE_LINE_ROUNDUP(tpa_info_len);
199 int total_alloc_len = tpa_info_start;
200 total_alloc_len += tpa_info_len;
202 snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
203 "bnxt_%04x:%02x:%02x:%02x-%04x_%s", pdev->addr.domain,
204 pdev->addr.bus, pdev->addr.devid, pdev->addr.function, qidx,
206 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
207 mz = rte_memzone_lookup(mz_name);
209 mz = rte_memzone_reserve_aligned(mz_name, total_alloc_len,
212 RTE_MEMZONE_SIZE_HINT_ONLY |
213 RTE_MEMZONE_IOVA_CONTIG,
218 memset(mz->addr, 0, mz->len);
219 mz_phys_addr_base = mz->iova;
220 mz_phys_addr = mz->iova;
221 if ((unsigned long)mz->addr == mz_phys_addr_base) {
223 "Memzone physical address same as virtual.\n");
224 PMD_DRV_LOG(DEBUG, "Using rte_mem_virt2iova()\n");
225 for (sz = 0; sz < total_alloc_len; sz += getpagesize())
226 rte_mem_lock_page(((char *)mz->addr) + sz);
227 mz_phys_addr_base = rte_mem_virt2iova(mz->addr);
228 mz_phys_addr = rte_mem_virt2iova(mz->addr);
229 if (mz_phys_addr == RTE_BAD_IOVA) {
231 "unable to map ring address to physical memory\n");
238 tx_ring = tx_ring_info->tx_ring_struct;
240 tx_ring->bd = ((char *)mz->addr + tx_ring_start);
241 tx_ring_info->tx_desc_ring = (struct tx_bd_long *)tx_ring->bd;
242 tx_ring->bd_dma = mz_phys_addr + tx_ring_start;
243 tx_ring_info->tx_desc_mapping = tx_ring->bd_dma;
244 tx_ring->mem_zone = (const void *)mz;
248 if (tx_ring->vmem_size) {
250 (void **)((char *)mz->addr + tx_vmem_start);
251 tx_ring_info->tx_buf_ring =
252 (struct bnxt_sw_tx_bd *)tx_ring->vmem;
258 rx_ring = rx_ring_info->rx_ring_struct;
260 rx_ring->bd = ((char *)mz->addr + rx_ring_start);
261 rx_ring_info->rx_desc_ring =
262 (struct rx_prod_pkt_bd *)rx_ring->bd;
263 rx_ring->bd_dma = mz_phys_addr + rx_ring_start;
264 rx_ring_info->rx_desc_mapping = rx_ring->bd_dma;
265 rx_ring->mem_zone = (const void *)mz;
269 if (rx_ring->vmem_size) {
271 (void **)((char *)mz->addr + rx_vmem_start);
272 rx_ring_info->rx_buf_ring =
273 (struct bnxt_sw_rx_bd *)rx_ring->vmem;
276 rx_ring = rx_ring_info->ag_ring_struct;
278 rx_ring->bd = ((char *)mz->addr + ag_ring_start);
279 rx_ring_info->ag_desc_ring =
280 (struct rx_prod_pkt_bd *)rx_ring->bd;
281 rx_ring->bd_dma = mz->iova + ag_ring_start;
282 rx_ring_info->ag_desc_mapping = rx_ring->bd_dma;
283 rx_ring->mem_zone = (const void *)mz;
287 if (rx_ring->vmem_size) {
289 (void **)((char *)mz->addr + ag_vmem_start);
290 rx_ring_info->ag_buf_ring =
291 (struct bnxt_sw_rx_bd *)rx_ring->vmem;
294 rx_ring_info->ag_bitmap =
295 rte_bitmap_init(rx_ring_info->rx_ring_struct->ring_size *
296 AGG_RING_SIZE_FACTOR, (uint8_t *)mz->addr +
297 ag_bitmap_start, ag_bitmap_len);
300 if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
301 rx_ring_info->tpa_info =
302 ((struct bnxt_tpa_info *)((char *)mz->addr +
306 cp_ring->bd = ((char *)mz->addr + cp_ring_start);
307 cp_ring->bd_dma = mz_phys_addr + cp_ring_start;
308 cp_ring_info->cp_desc_ring = cp_ring->bd;
309 cp_ring_info->cp_desc_mapping = cp_ring->bd_dma;
310 cp_ring->mem_zone = (const void *)mz;
314 if (cp_ring->vmem_size)
315 *cp_ring->vmem = ((char *)mz->addr + stats_len);
317 cp_ring_info->hw_stats = mz->addr;
318 cp_ring_info->hw_stats_map = mz_phys_addr;
320 cp_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
323 struct bnxt_ring *nq_ring = nq_ring_info->cp_ring_struct;
325 nq_ring->bd = (char *)mz->addr + nq_ring_start;
326 nq_ring->bd_dma = mz_phys_addr + nq_ring_start;
327 nq_ring_info->cp_desc_ring = nq_ring->bd;
328 nq_ring_info->cp_desc_mapping = nq_ring->bd_dma;
329 nq_ring->mem_zone = (const void *)mz;
333 if (nq_ring->vmem_size)
334 *nq_ring->vmem = (char *)mz->addr + nq_vmem_start;
336 nq_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
342 static void bnxt_init_dflt_coal(struct bnxt_coal *coal)
344 /* Tick values in micro seconds.
345 * 1 coal_buf x bufs_per_record = 1 completion record.
347 coal->num_cmpl_aggr_int = BNXT_NUM_CMPL_AGGR_INT;
348 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
349 coal->num_cmpl_dma_aggr = BNXT_NUM_CMPL_DMA_AGGR;
350 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
351 coal->num_cmpl_dma_aggr_during_int = BNXT_NUM_CMPL_DMA_AGGR_DURING_INT;
352 coal->int_lat_tmr_max = BNXT_INT_LAT_TMR_MAX;
353 /* min timer set to 1/2 of interrupt timer */
354 coal->int_lat_tmr_min = BNXT_INT_LAT_TMR_MIN;
355 /* buf timer set to 1/4 of interrupt timer */
356 coal->cmpl_aggr_dma_tmr = BNXT_CMPL_AGGR_DMA_TMR;
357 coal->cmpl_aggr_dma_tmr_during_int = BNXT_CMPL_AGGR_DMA_TMR_DURING_INT;
360 static void bnxt_set_db(struct bnxt *bp,
361 struct bnxt_db_info *db,
366 if (BNXT_CHIP_THOR(bp)) {
368 db->doorbell = (char *)bp->doorbell_base + 0x10000;
370 db->doorbell = (char *)bp->doorbell_base + 0x4000;
372 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
373 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
375 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
376 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
377 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
379 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
380 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_CQ;
382 case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
383 db->db_key64 = DBR_PATH_L2;
386 db->db_key64 |= (uint64_t)fid << DBR_XID_SFT;
389 db->doorbell = (char *)bp->doorbell_base + map_idx * 0x80;
391 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
392 db->db_key32 = DB_KEY_TX;
394 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
395 db->db_key32 = DB_KEY_RX;
397 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
398 db->db_key32 = DB_KEY_CP;
405 static int bnxt_alloc_cmpl_ring(struct bnxt *bp, int queue_index,
406 struct bnxt_cp_ring_info *cpr,
407 struct bnxt_cp_ring_info *nqr)
409 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
410 uint32_t nq_ring_id = HWRM_NA_SIGNATURE;
411 int cp_ring_index = queue_index + BNXT_NUM_ASYNC_CPR(bp);
415 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL;
417 if (BNXT_HAS_NQ(bp)) {
419 nq_ring_id = nqr->cp_ring_struct->fw_ring_id;
421 PMD_DRV_LOG(ERR, "NQ ring is NULL\n");
426 rc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, cp_ring_index,
427 HWRM_NA_SIGNATURE, nq_ring_id, 0);
432 bnxt_set_db(bp, &cpr->cp_db, ring_type, cp_ring_index,
433 cp_ring->fw_ring_id);
439 static int bnxt_alloc_nq_ring(struct bnxt *bp, int queue_index,
440 struct bnxt_cp_ring_info *nqr)
442 struct bnxt_ring *nq_ring = nqr->cp_ring_struct;
443 int nq_ring_index = queue_index + BNXT_NUM_ASYNC_CPR(bp);
447 if (!BNXT_HAS_NQ(bp))
450 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ;
452 rc = bnxt_hwrm_ring_alloc(bp, nq_ring, ring_type, nq_ring_index,
453 HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE, 0);
457 bnxt_set_db(bp, &nqr->cp_db, ring_type, nq_ring_index,
458 nq_ring->fw_ring_id);
464 static int bnxt_alloc_rx_ring(struct bnxt *bp, int queue_index)
466 struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
467 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
468 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
469 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
470 struct bnxt_ring *ring = rxr->rx_ring_struct;
474 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX;
476 rc = bnxt_hwrm_ring_alloc(bp, ring, ring_type,
477 queue_index, cpr->hw_stats_ctx_id,
478 cp_ring->fw_ring_id, 0);
483 if (BNXT_HAS_RING_GRPS(bp))
484 bp->grp_info[queue_index].rx_fw_ring_id = ring->fw_ring_id;
485 bnxt_set_db(bp, &rxr->rx_db, ring_type, queue_index, ring->fw_ring_id);
486 bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
491 static int bnxt_alloc_rx_agg_ring(struct bnxt *bp, int queue_index)
493 unsigned int map_idx = queue_index + bp->rx_cp_nr_rings;
494 struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
495 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
496 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
497 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
498 struct bnxt_ring *ring = rxr->ag_ring_struct;
499 uint32_t hw_stats_ctx_id = HWRM_NA_SIGNATURE;
503 ring->fw_rx_ring_id = rxr->rx_ring_struct->fw_ring_id;
505 if (BNXT_CHIP_THOR(bp)) {
506 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG;
507 hw_stats_ctx_id = cpr->hw_stats_ctx_id;
509 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX;
512 rc = bnxt_hwrm_ring_alloc(bp, ring, ring_type, map_idx,
513 hw_stats_ctx_id, cp_ring->fw_ring_id, 0);
519 if (BNXT_HAS_RING_GRPS(bp))
520 bp->grp_info[queue_index].ag_fw_ring_id = ring->fw_ring_id;
521 bnxt_set_db(bp, &rxr->ag_db, ring_type, map_idx, ring->fw_ring_id);
522 bnxt_db_write(&rxr->ag_db, rxr->ag_prod);
527 int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index)
529 struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
530 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
531 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
532 struct bnxt_cp_ring_info *nqr = rxq->nq_ring;
533 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
536 if (BNXT_HAS_NQ(bp)) {
537 rc = bnxt_alloc_nq_ring(bp, queue_index, nqr);
542 rc = bnxt_alloc_cmpl_ring(bp, queue_index, cpr, nqr);
546 if (BNXT_HAS_RING_GRPS(bp)) {
547 bp->grp_info[queue_index].fw_stats_ctx = cpr->hw_stats_ctx_id;
548 bp->grp_info[queue_index].cp_fw_ring_id = cp_ring->fw_ring_id;
551 if (!BNXT_NUM_ASYNC_CPR(bp) && !queue_index) {
553 * If a dedicated async event completion ring is not enabled,
554 * use the first completion ring from PF or VF as the default
555 * completion ring for async event handling.
557 bp->async_cp_ring = cpr;
558 rc = bnxt_hwrm_set_async_event_cr(bp);
563 rc = bnxt_alloc_rx_ring(bp, queue_index);
567 rc = bnxt_alloc_rx_agg_ring(bp, queue_index);
571 if (rxq->rx_started) {
572 if (bnxt_init_one_rx_ring(rxq)) {
574 "bnxt_init_one_rx_ring failed!\n");
575 bnxt_rx_queue_release_op(rxq);
579 bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
580 bnxt_db_write(&rxr->ag_db, rxr->ag_prod);
582 rxq->index = queue_index;
588 "Failed to allocate receive queue %d, rc %d.\n",
593 /* Initialise all rings to -1, its used to free rings later if allocation
594 * of few rings fails.
596 static void bnxt_init_all_rings(struct bnxt *bp)
599 struct bnxt_rx_queue *rxq;
600 struct bnxt_ring *cp_ring;
601 struct bnxt_ring *ring;
602 struct bnxt_rx_ring_info *rxr;
603 struct bnxt_tx_queue *txq;
605 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
606 rxq = bp->rx_queues[i];
608 cp_ring = rxq->cp_ring->cp_ring_struct;
609 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
612 ring = rxr->rx_ring_struct;
613 ring->fw_ring_id = INVALID_HW_RING_ID;
615 ring = rxr->ag_ring_struct;
616 ring->fw_ring_id = INVALID_HW_RING_ID;
618 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
619 txq = bp->tx_queues[i];
621 cp_ring = txq->cp_ring->cp_ring_struct;
622 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
624 ring = txq->tx_ring->tx_ring_struct;
625 ring->fw_ring_id = INVALID_HW_RING_ID;
630 * [0] = default completion ring
631 * [1 -> +rx_cp_nr_rings] = rx_cp, rx rings
632 * [1+rx_cp_nr_rings + 1 -> +tx_cp_nr_rings] = tx_cp, tx rings
634 int bnxt_alloc_hwrm_rings(struct bnxt *bp)
636 struct bnxt_coal coal;
641 bnxt_init_dflt_coal(&coal);
642 bnxt_init_all_rings(bp);
644 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
645 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
646 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
647 struct bnxt_cp_ring_info *nqr = rxq->nq_ring;
648 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
649 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
651 if (BNXT_HAS_NQ(bp)) {
652 if (bnxt_alloc_nq_ring(bp, i, nqr))
656 if (bnxt_alloc_cmpl_ring(bp, i, cpr, nqr))
659 if (BNXT_HAS_RING_GRPS(bp)) {
660 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
661 bp->grp_info[i].cp_fw_ring_id = cp_ring->fw_ring_id;
664 bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id);
665 if (!BNXT_NUM_ASYNC_CPR(bp) && !i) {
667 * If a dedicated async event completion ring is not
668 * enabled, use the first completion ring as the default
669 * completion ring for async event handling.
671 bp->async_cp_ring = cpr;
672 rc = bnxt_hwrm_set_async_event_cr(bp);
677 if (bnxt_alloc_rx_ring(bp, i))
680 if (bnxt_alloc_rx_agg_ring(bp, i))
683 if (bnxt_init_one_rx_ring(rxq)) {
684 PMD_DRV_LOG(ERR, "bnxt_init_one_rx_ring failed!\n");
685 bnxt_rx_queue_release_op(rxq);
688 bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
689 bnxt_db_write(&rxr->ag_db, rxr->ag_prod);
692 bnxt_rxq_vec_setup(rxq);
696 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
697 struct bnxt_tx_queue *txq = bp->tx_queues[i];
698 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
699 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
700 struct bnxt_cp_ring_info *nqr = txq->nq_ring;
701 struct bnxt_tx_ring_info *txr = txq->tx_ring;
702 struct bnxt_ring *ring = txr->tx_ring_struct;
703 unsigned int idx = i + bp->rx_cp_nr_rings;
704 uint16_t tx_cosq_id = 0;
706 if (BNXT_HAS_NQ(bp)) {
707 if (bnxt_alloc_nq_ring(bp, idx, nqr))
711 if (bnxt_alloc_cmpl_ring(bp, idx, cpr, nqr))
714 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY)
715 tx_cosq_id = bp->tx_cosq_id[i < bp->max_lltc ? i : 0];
717 tx_cosq_id = bp->tx_cosq_id[0];
719 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_TX;
720 rc = bnxt_hwrm_ring_alloc(bp, ring,
722 i, cpr->hw_stats_ctx_id,
728 bnxt_set_db(bp, &txr->tx_db, ring_type, i, ring->fw_ring_id);
730 bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id);
737 /* Allocate dedicated async completion ring. */
738 int bnxt_alloc_async_cp_ring(struct bnxt *bp)
740 struct bnxt_cp_ring_info *cpr = bp->async_cp_ring;
741 struct bnxt_ring *cp_ring;
745 if (BNXT_NUM_ASYNC_CPR(bp) == 0 || cpr == NULL)
748 cp_ring = cpr->cp_ring_struct;
751 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ;
753 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL;
755 rc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, 0,
756 HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE, 0);
763 bnxt_set_db(bp, &cpr->cp_db, ring_type, 0,
764 cp_ring->fw_ring_id);
771 return bnxt_hwrm_set_async_event_cr(bp);
774 /* Free dedicated async completion ring. */
775 void bnxt_free_async_cp_ring(struct bnxt *bp)
777 struct bnxt_cp_ring_info *cpr = bp->async_cp_ring;
779 if (BNXT_NUM_ASYNC_CPR(bp) == 0 || cpr == NULL)
783 bnxt_free_nq_ring(bp, cpr);
785 bnxt_free_cp_ring(bp, cpr);
787 bnxt_free_ring(cpr->cp_ring_struct);
788 rte_free(cpr->cp_ring_struct);
789 cpr->cp_ring_struct = NULL;
791 bp->async_cp_ring = NULL;
794 int bnxt_alloc_async_ring_struct(struct bnxt *bp)
796 struct bnxt_cp_ring_info *cpr = NULL;
797 struct bnxt_ring *ring = NULL;
798 unsigned int socket_id;
800 if (BNXT_NUM_ASYNC_CPR(bp) == 0)
803 socket_id = rte_lcore_to_socket_id(rte_get_master_lcore());
805 cpr = rte_zmalloc_socket("cpr",
806 sizeof(struct bnxt_cp_ring_info),
807 RTE_CACHE_LINE_SIZE, socket_id);
811 ring = rte_zmalloc_socket("bnxt_cp_ring_struct",
812 sizeof(struct bnxt_ring),
813 RTE_CACHE_LINE_SIZE, socket_id);
819 ring->bd = (void *)cpr->cp_desc_ring;
820 ring->bd_dma = cpr->cp_desc_mapping;
821 ring->ring_size = rte_align32pow2(DEFAULT_CP_RING_SIZE);
822 ring->ring_mask = ring->ring_size - 1;
826 bp->async_cp_ring = cpr;
827 cpr->cp_ring_struct = ring;
829 return bnxt_alloc_rings(bp, 0, NULL, NULL,
830 bp->async_cp_ring, NULL,