1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
6 #include <rte_bitmap.h>
7 #include <rte_memzone.h>
8 #include <rte_malloc.h>
13 #include "bnxt_hwrm.h"
14 #include "bnxt_ring.h"
20 #include "hsi_struct_def_dpdk.h"
23 * Generic ring handling
26 void bnxt_free_ring(struct bnxt_ring *ring)
31 if (ring->vmem_size && *ring->vmem) {
32 memset((char *)*ring->vmem, 0, ring->vmem_size);
35 ring->mem_zone = NULL;
42 int bnxt_init_ring_grps(struct bnxt *bp)
46 for (i = 0; i < bp->max_ring_grps; i++)
47 memset(&bp->grp_info[i], (uint8_t)HWRM_NA_SIGNATURE,
48 sizeof(struct bnxt_ring_grp_info));
53 int bnxt_alloc_ring_grps(struct bnxt *bp)
55 if (bp->max_tx_rings == 0) {
56 PMD_DRV_LOG(ERR, "No TX rings available!\n");
60 /* THOR does not support ring groups.
61 * But we will use the array to save RSS context IDs.
63 if (BNXT_CHIP_THOR(bp)) {
64 bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR;
65 } else if (bp->max_ring_grps < bp->rx_cp_nr_rings) {
66 /* 1 ring is for default completion ring */
67 PMD_DRV_LOG(ERR, "Insufficient resource: Ring Group\n");
71 if (BNXT_HAS_RING_GRPS(bp)) {
72 bp->grp_info = rte_zmalloc("bnxt_grp_info",
73 sizeof(*bp->grp_info) *
74 bp->max_ring_grps, 0);
77 "Failed to alloc grp info tbl.\n");
86 * Allocates a completion ring with vmem and stats optionally also allocating
87 * a TX and/or RX ring. Passing NULL as tx_ring_info and/or rx_ring_info
88 * to not allocate them.
90 * Order in the allocation is:
91 * stats - Always non-zero length
92 * cp vmem - Always zero-length, supported for the bnxt_ring abstraction
93 * tx vmem - Only non-zero length if tx_ring_info is not NULL
94 * rx vmem - Only non-zero length if rx_ring_info is not NULL
95 * cp bd ring - Always non-zero length
96 * tx bd ring - Only non-zero length if tx_ring_info is not NULL
97 * rx bd ring - Only non-zero length if rx_ring_info is not NULL
99 int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
100 struct bnxt_tx_queue *txq,
101 struct bnxt_rx_queue *rxq,
102 struct bnxt_cp_ring_info *cp_ring_info,
103 struct bnxt_cp_ring_info *nq_ring_info,
106 struct bnxt_ring *cp_ring = cp_ring_info->cp_ring_struct;
107 struct bnxt_rx_ring_info *rx_ring_info = rxq ? rxq->rx_ring : NULL;
108 struct bnxt_tx_ring_info *tx_ring_info = txq ? txq->tx_ring : NULL;
109 struct bnxt_ring *tx_ring;
110 struct bnxt_ring *rx_ring;
111 struct rte_pci_device *pdev = bp->pdev;
112 uint64_t rx_offloads = bp->eth_dev->data->dev_conf.rxmode.offloads;
113 const struct rte_memzone *mz = NULL;
114 char mz_name[RTE_MEMZONE_NAMESIZE];
115 rte_iova_t mz_phys_addr_base;
116 rte_iova_t mz_phys_addr;
119 int stats_len = (tx_ring_info || rx_ring_info) ?
120 RTE_CACHE_LINE_ROUNDUP(sizeof(struct hwrm_stat_ctx_query_output) -
121 sizeof (struct hwrm_resp_hdr)) : 0;
122 stats_len = RTE_ALIGN(stats_len, 128);
124 int cp_vmem_start = stats_len;
125 int cp_vmem_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size);
126 cp_vmem_len = RTE_ALIGN(cp_vmem_len, 128);
128 int nq_vmem_len = BNXT_CHIP_THOR(bp) ?
129 RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size) : 0;
130 nq_vmem_len = RTE_ALIGN(nq_vmem_len, 128);
132 int nq_vmem_start = cp_vmem_start + cp_vmem_len;
134 int tx_vmem_start = nq_vmem_start + nq_vmem_len;
136 tx_ring_info ? RTE_CACHE_LINE_ROUNDUP(tx_ring_info->
137 tx_ring_struct->vmem_size) : 0;
138 tx_vmem_len = RTE_ALIGN(tx_vmem_len, 128);
140 int rx_vmem_start = tx_vmem_start + tx_vmem_len;
141 int rx_vmem_len = rx_ring_info ?
142 RTE_CACHE_LINE_ROUNDUP(rx_ring_info->
143 rx_ring_struct->vmem_size) : 0;
144 rx_vmem_len = RTE_ALIGN(rx_vmem_len, 128);
145 int ag_vmem_start = 0;
147 int cp_ring_start = 0;
148 int nq_ring_start = 0;
150 ag_vmem_start = rx_vmem_start + rx_vmem_len;
151 ag_vmem_len = rx_ring_info ? RTE_CACHE_LINE_ROUNDUP(
152 rx_ring_info->ag_ring_struct->vmem_size) : 0;
153 cp_ring_start = ag_vmem_start + ag_vmem_len;
154 cp_ring_start = RTE_ALIGN(cp_ring_start, 4096);
156 int cp_ring_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->ring_size *
157 sizeof(struct cmpl_base));
158 cp_ring_len = RTE_ALIGN(cp_ring_len, 128);
159 nq_ring_start = cp_ring_start + cp_ring_len;
160 nq_ring_start = RTE_ALIGN(nq_ring_start, 4096);
162 int nq_ring_len = BNXT_CHIP_THOR(bp) ? cp_ring_len : 0;
164 int tx_ring_start = nq_ring_start + nq_ring_len;
165 int tx_ring_len = tx_ring_info ?
166 RTE_CACHE_LINE_ROUNDUP(tx_ring_info->tx_ring_struct->ring_size *
167 sizeof(struct tx_bd_long)) : 0;
168 tx_ring_len = RTE_ALIGN(tx_ring_len, 4096);
170 int rx_ring_start = tx_ring_start + tx_ring_len;
171 int rx_ring_len = rx_ring_info ?
172 RTE_CACHE_LINE_ROUNDUP(rx_ring_info->rx_ring_struct->ring_size *
173 sizeof(struct rx_prod_pkt_bd)) : 0;
174 rx_ring_len = RTE_ALIGN(rx_ring_len, 4096);
176 int ag_ring_start = rx_ring_start + rx_ring_len;
177 int ag_ring_len = rx_ring_len * AGG_RING_SIZE_FACTOR;
178 ag_ring_len = RTE_ALIGN(ag_ring_len, 4096);
180 int ag_bitmap_start = ag_ring_start + ag_ring_len;
181 int ag_bitmap_len = rx_ring_info ?
182 RTE_CACHE_LINE_ROUNDUP(rte_bitmap_get_memory_footprint(
183 rx_ring_info->rx_ring_struct->ring_size *
184 AGG_RING_SIZE_FACTOR)) : 0;
186 int tpa_info_start = ag_bitmap_start + ag_bitmap_len;
187 int tpa_info_len = rx_ring_info ?
188 RTE_CACHE_LINE_ROUNDUP(BNXT_TPA_MAX *
189 sizeof(struct bnxt_tpa_info)) : 0;
191 int total_alloc_len = tpa_info_start;
192 if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
193 total_alloc_len += tpa_info_len;
195 snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
196 "bnxt_%04x:%02x:%02x:%02x-%04x_%s", pdev->addr.domain,
197 pdev->addr.bus, pdev->addr.devid, pdev->addr.function, qidx,
199 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
200 mz = rte_memzone_lookup(mz_name);
202 mz = rte_memzone_reserve_aligned(mz_name, total_alloc_len,
205 RTE_MEMZONE_SIZE_HINT_ONLY |
206 RTE_MEMZONE_IOVA_CONTIG,
211 memset(mz->addr, 0, mz->len);
212 mz_phys_addr_base = mz->iova;
213 mz_phys_addr = mz->iova;
214 if ((unsigned long)mz->addr == mz_phys_addr_base) {
216 "Memzone physical address same as virtual.\n");
217 PMD_DRV_LOG(DEBUG, "Using rte_mem_virt2iova()\n");
218 for (sz = 0; sz < total_alloc_len; sz += getpagesize())
219 rte_mem_lock_page(((char *)mz->addr) + sz);
220 mz_phys_addr_base = rte_mem_virt2iova(mz->addr);
221 mz_phys_addr = rte_mem_virt2iova(mz->addr);
222 if (mz_phys_addr == RTE_BAD_IOVA) {
224 "unable to map ring address to physical memory\n");
231 tx_ring = tx_ring_info->tx_ring_struct;
233 tx_ring->bd = ((char *)mz->addr + tx_ring_start);
234 tx_ring_info->tx_desc_ring = (struct tx_bd_long *)tx_ring->bd;
235 tx_ring->bd_dma = mz_phys_addr + tx_ring_start;
236 tx_ring_info->tx_desc_mapping = tx_ring->bd_dma;
237 tx_ring->mem_zone = (const void *)mz;
241 if (tx_ring->vmem_size) {
243 (void **)((char *)mz->addr + tx_vmem_start);
244 tx_ring_info->tx_buf_ring =
245 (struct bnxt_sw_tx_bd *)tx_ring->vmem;
251 rx_ring = rx_ring_info->rx_ring_struct;
253 rx_ring->bd = ((char *)mz->addr + rx_ring_start);
254 rx_ring_info->rx_desc_ring =
255 (struct rx_prod_pkt_bd *)rx_ring->bd;
256 rx_ring->bd_dma = mz_phys_addr + rx_ring_start;
257 rx_ring_info->rx_desc_mapping = rx_ring->bd_dma;
258 rx_ring->mem_zone = (const void *)mz;
262 if (rx_ring->vmem_size) {
264 (void **)((char *)mz->addr + rx_vmem_start);
265 rx_ring_info->rx_buf_ring =
266 (struct bnxt_sw_rx_bd *)rx_ring->vmem;
269 rx_ring = rx_ring_info->ag_ring_struct;
271 rx_ring->bd = ((char *)mz->addr + ag_ring_start);
272 rx_ring_info->ag_desc_ring =
273 (struct rx_prod_pkt_bd *)rx_ring->bd;
274 rx_ring->bd_dma = mz->iova + ag_ring_start;
275 rx_ring_info->ag_desc_mapping = rx_ring->bd_dma;
276 rx_ring->mem_zone = (const void *)mz;
280 if (rx_ring->vmem_size) {
282 (void **)((char *)mz->addr + ag_vmem_start);
283 rx_ring_info->ag_buf_ring =
284 (struct bnxt_sw_rx_bd *)rx_ring->vmem;
287 rx_ring_info->ag_bitmap =
288 rte_bitmap_init(rx_ring_info->rx_ring_struct->ring_size *
289 AGG_RING_SIZE_FACTOR, (uint8_t *)mz->addr +
290 ag_bitmap_start, ag_bitmap_len);
293 if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
294 rx_ring_info->tpa_info =
295 ((struct bnxt_tpa_info *)((char *)mz->addr +
299 cp_ring->bd = ((char *)mz->addr + cp_ring_start);
300 cp_ring->bd_dma = mz_phys_addr + cp_ring_start;
301 cp_ring_info->cp_desc_ring = cp_ring->bd;
302 cp_ring_info->cp_desc_mapping = cp_ring->bd_dma;
303 cp_ring->mem_zone = (const void *)mz;
307 if (cp_ring->vmem_size)
308 *cp_ring->vmem = ((char *)mz->addr + stats_len);
310 cp_ring_info->hw_stats = mz->addr;
311 cp_ring_info->hw_stats_map = mz_phys_addr;
313 cp_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
316 struct bnxt_ring *nq_ring = nq_ring_info->cp_ring_struct;
318 nq_ring->bd = (char *)mz->addr + nq_ring_start;
319 nq_ring->bd_dma = mz_phys_addr + nq_ring_start;
320 nq_ring_info->cp_desc_ring = nq_ring->bd;
321 nq_ring_info->cp_desc_mapping = nq_ring->bd_dma;
322 nq_ring->mem_zone = (const void *)mz;
326 if (nq_ring->vmem_size)
327 *nq_ring->vmem = (char *)mz->addr + nq_vmem_start;
329 nq_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
335 static void bnxt_init_dflt_coal(struct bnxt_coal *coal)
337 /* Tick values in micro seconds.
338 * 1 coal_buf x bufs_per_record = 1 completion record.
340 coal->num_cmpl_aggr_int = BNXT_NUM_CMPL_AGGR_INT;
341 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
342 coal->num_cmpl_dma_aggr = BNXT_NUM_CMPL_DMA_AGGR;
343 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
344 coal->num_cmpl_dma_aggr_during_int = BNXT_NUM_CMPL_DMA_AGGR_DURING_INT;
345 coal->int_lat_tmr_max = BNXT_INT_LAT_TMR_MAX;
346 /* min timer set to 1/2 of interrupt timer */
347 coal->int_lat_tmr_min = BNXT_INT_LAT_TMR_MIN;
348 /* buf timer set to 1/4 of interrupt timer */
349 coal->cmpl_aggr_dma_tmr = BNXT_CMPL_AGGR_DMA_TMR;
350 coal->cmpl_aggr_dma_tmr_during_int = BNXT_CMPL_AGGR_DMA_TMR_DURING_INT;
353 static void bnxt_set_db(struct bnxt *bp,
354 struct bnxt_db_info *db,
359 if (BNXT_CHIP_THOR(bp)) {
361 db->doorbell = (char *)bp->doorbell_base + 0x10000;
363 db->doorbell = (char *)bp->doorbell_base + 0x4000;
365 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
366 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
368 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
369 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
370 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
372 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
373 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_CQ;
375 case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
376 db->db_key64 = DBR_PATH_L2;
379 db->db_key64 |= (uint64_t)fid << DBR_XID_SFT;
382 db->doorbell = (char *)bp->doorbell_base + map_idx * 0x80;
384 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
385 db->db_key32 = DB_KEY_TX;
387 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
388 db->db_key32 = DB_KEY_RX;
390 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
391 db->db_key32 = DB_KEY_CP;
398 static int bnxt_alloc_cmpl_ring(struct bnxt *bp, int queue_index,
399 struct bnxt_cp_ring_info *cpr,
400 struct bnxt_cp_ring_info *nqr)
402 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
403 uint32_t nq_ring_id = HWRM_NA_SIGNATURE;
404 int cp_ring_index = queue_index + BNXT_NUM_ASYNC_CPR(bp);
408 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL;
410 if (BNXT_HAS_NQ(bp)) {
412 nq_ring_id = nqr->cp_ring_struct->fw_ring_id;
414 PMD_DRV_LOG(ERR, "NQ ring is NULL\n");
419 rc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, cp_ring_index,
420 HWRM_NA_SIGNATURE, nq_ring_id);
425 bnxt_set_db(bp, &cpr->cp_db, ring_type, cp_ring_index,
426 cp_ring->fw_ring_id);
432 static int bnxt_alloc_nq_ring(struct bnxt *bp, int queue_index,
433 struct bnxt_cp_ring_info *nqr)
435 struct bnxt_ring *nq_ring = nqr->cp_ring_struct;
436 int nq_ring_index = queue_index + BNXT_NUM_ASYNC_CPR(bp);
440 if (!BNXT_HAS_NQ(bp))
443 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ;
445 rc = bnxt_hwrm_ring_alloc(bp, nq_ring, ring_type, nq_ring_index,
446 HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE);
450 bnxt_set_db(bp, &nqr->cp_db, ring_type, nq_ring_index,
451 nq_ring->fw_ring_id);
457 static int bnxt_alloc_rx_ring(struct bnxt *bp, int queue_index)
459 struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
460 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
461 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
462 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
463 struct bnxt_ring *ring = rxr->rx_ring_struct;
467 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX;
469 rc = bnxt_hwrm_ring_alloc(bp, ring, ring_type,
470 queue_index, cpr->hw_stats_ctx_id,
471 cp_ring->fw_ring_id);
476 if (BNXT_HAS_RING_GRPS(bp))
477 bp->grp_info[queue_index].rx_fw_ring_id = ring->fw_ring_id;
478 bnxt_set_db(bp, &rxr->rx_db, ring_type, queue_index, ring->fw_ring_id);
479 bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
484 static int bnxt_alloc_rx_agg_ring(struct bnxt *bp, int queue_index)
486 unsigned int map_idx = queue_index + bp->rx_cp_nr_rings;
487 struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
488 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
489 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
490 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
491 struct bnxt_ring *ring = rxr->ag_ring_struct;
492 uint32_t hw_stats_ctx_id = HWRM_NA_SIGNATURE;
496 ring->fw_rx_ring_id = rxr->rx_ring_struct->fw_ring_id;
498 if (BNXT_CHIP_THOR(bp)) {
499 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG;
500 hw_stats_ctx_id = cpr->hw_stats_ctx_id;
502 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX;
505 rc = bnxt_hwrm_ring_alloc(bp, ring, ring_type, map_idx,
506 hw_stats_ctx_id, cp_ring->fw_ring_id);
512 if (BNXT_HAS_RING_GRPS(bp))
513 bp->grp_info[queue_index].ag_fw_ring_id = ring->fw_ring_id;
514 bnxt_set_db(bp, &rxr->ag_db, ring_type, map_idx, ring->fw_ring_id);
515 bnxt_db_write(&rxr->ag_db, rxr->ag_prod);
520 int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index)
522 struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
523 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
524 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
525 struct bnxt_cp_ring_info *nqr = rxq->nq_ring;
526 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
529 if (BNXT_HAS_NQ(bp)) {
530 rc = bnxt_alloc_nq_ring(bp, queue_index, nqr);
535 rc = bnxt_alloc_cmpl_ring(bp, queue_index, cpr, nqr);
539 if (BNXT_HAS_RING_GRPS(bp)) {
540 bp->grp_info[queue_index].fw_stats_ctx = cpr->hw_stats_ctx_id;
541 bp->grp_info[queue_index].cp_fw_ring_id = cp_ring->fw_ring_id;
544 if (!BNXT_NUM_ASYNC_CPR(bp) && !queue_index) {
546 * If a dedicated async event completion ring is not enabled,
547 * use the first completion ring from PF or VF as the default
548 * completion ring for async event handling.
550 bp->async_cp_ring = cpr;
551 rc = bnxt_hwrm_set_async_event_cr(bp);
556 rc = bnxt_alloc_rx_ring(bp, queue_index);
560 rc = bnxt_alloc_rx_agg_ring(bp, queue_index);
564 if (bp->eth_dev->data->rx_queue_state[queue_index] ==
565 RTE_ETH_QUEUE_STATE_STARTED) {
566 if (bnxt_init_one_rx_ring(rxq)) {
568 "bnxt_init_one_rx_ring failed!\n");
569 bnxt_rx_queue_release_op(rxq);
573 bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
574 bnxt_db_write(&rxr->ag_db, rxr->ag_prod);
576 rxq->index = queue_index;
582 "Failed to allocate receive queue %d, rc %d.\n",
588 * [0] = default completion ring
589 * [1 -> +rx_cp_nr_rings] = rx_cp, rx rings
590 * [1+rx_cp_nr_rings + 1 -> +tx_cp_nr_rings] = tx_cp, tx rings
592 int bnxt_alloc_hwrm_rings(struct bnxt *bp)
594 struct bnxt_coal coal;
599 bnxt_init_dflt_coal(&coal);
601 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
602 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
603 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
604 struct bnxt_cp_ring_info *nqr = rxq->nq_ring;
605 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
606 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
608 if (BNXT_HAS_NQ(bp)) {
609 if (bnxt_alloc_nq_ring(bp, i, nqr))
613 if (bnxt_alloc_cmpl_ring(bp, i, cpr, nqr))
616 if (BNXT_HAS_RING_GRPS(bp)) {
617 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
618 bp->grp_info[i].cp_fw_ring_id = cp_ring->fw_ring_id;
621 bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id);
622 if (!BNXT_NUM_ASYNC_CPR(bp) && !i) {
624 * If a dedicated async event completion ring is not
625 * enabled, use the first completion ring as the default
626 * completion ring for async event handling.
628 bp->async_cp_ring = cpr;
629 rc = bnxt_hwrm_set_async_event_cr(bp);
634 if (bnxt_alloc_rx_ring(bp, i))
637 if (bnxt_alloc_rx_agg_ring(bp, i))
640 if (bnxt_init_one_rx_ring(rxq)) {
641 PMD_DRV_LOG(ERR, "bnxt_init_one_rx_ring failed!\n");
642 bnxt_rx_queue_release_op(rxq);
645 bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
646 bnxt_db_write(&rxr->ag_db, rxr->ag_prod);
649 bnxt_rxq_vec_setup(rxq);
653 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
654 struct bnxt_tx_queue *txq = bp->tx_queues[i];
655 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
656 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
657 struct bnxt_cp_ring_info *nqr = txq->nq_ring;
658 struct bnxt_tx_ring_info *txr = txq->tx_ring;
659 struct bnxt_ring *ring = txr->tx_ring_struct;
660 unsigned int idx = i + bp->rx_cp_nr_rings;
662 if (BNXT_HAS_NQ(bp)) {
663 if (bnxt_alloc_nq_ring(bp, idx, nqr))
667 if (bnxt_alloc_cmpl_ring(bp, idx, cpr, nqr))
671 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_TX;
672 rc = bnxt_hwrm_ring_alloc(bp, ring,
674 i, cpr->hw_stats_ctx_id,
675 cp_ring->fw_ring_id);
679 bnxt_set_db(bp, &txr->tx_db, ring_type, i, ring->fw_ring_id);
681 bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id);
688 /* Allocate dedicated async completion ring. */
689 int bnxt_alloc_async_cp_ring(struct bnxt *bp)
691 struct bnxt_cp_ring_info *cpr = bp->async_cp_ring;
692 struct bnxt_ring *cp_ring;
696 if (BNXT_NUM_ASYNC_CPR(bp) == 0 || cpr == NULL)
699 cp_ring = cpr->cp_ring_struct;
702 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ;
704 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL;
706 rc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, 0,
707 HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE);
714 bnxt_set_db(bp, &cpr->cp_db, ring_type, 0,
715 cp_ring->fw_ring_id);
722 return bnxt_hwrm_set_async_event_cr(bp);
725 /* Free dedicated async completion ring. */
726 void bnxt_free_async_cp_ring(struct bnxt *bp)
728 struct bnxt_cp_ring_info *cpr = bp->async_cp_ring;
730 if (BNXT_NUM_ASYNC_CPR(bp) == 0 || cpr == NULL)
734 bnxt_free_nq_ring(bp, cpr);
736 bnxt_free_cp_ring(bp, cpr);
738 bnxt_free_ring(cpr->cp_ring_struct);
739 rte_free(cpr->cp_ring_struct);
740 cpr->cp_ring_struct = NULL;
742 bp->async_cp_ring = NULL;
745 int bnxt_alloc_async_ring_struct(struct bnxt *bp)
747 struct bnxt_cp_ring_info *cpr = NULL;
748 struct bnxt_ring *ring = NULL;
749 unsigned int socket_id;
751 if (BNXT_NUM_ASYNC_CPR(bp) == 0)
754 socket_id = rte_lcore_to_socket_id(rte_get_master_lcore());
756 cpr = rte_zmalloc_socket("cpr",
757 sizeof(struct bnxt_cp_ring_info),
758 RTE_CACHE_LINE_SIZE, socket_id);
762 ring = rte_zmalloc_socket("bnxt_cp_ring_struct",
763 sizeof(struct bnxt_ring),
764 RTE_CACHE_LINE_SIZE, socket_id);
770 ring->bd = (void *)cpr->cp_desc_ring;
771 ring->bd_dma = cpr->cp_desc_mapping;
772 ring->ring_size = rte_align32pow2(DEFAULT_CP_RING_SIZE);
773 ring->ring_mask = ring->ring_size - 1;
777 bp->async_cp_ring = cpr;
778 cpr->cp_ring_struct = ring;
780 return bnxt_alloc_rings(bp, 0, NULL, NULL,
781 bp->async_cp_ring, NULL,