1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
6 #include <rte_bitmap.h>
7 #include <rte_memzone.h>
12 #include "bnxt_hwrm.h"
13 #include "bnxt_ring.h"
19 #include "hsi_struct_def_dpdk.h"
22 * Generic ring handling
25 void bnxt_free_ring(struct bnxt_ring *ring)
30 if (ring->vmem_size && *ring->vmem) {
31 memset((char *)*ring->vmem, 0, ring->vmem_size);
34 ring->mem_zone = NULL;
41 int bnxt_init_ring_grps(struct bnxt *bp)
45 for (i = 0; i < bp->max_ring_grps; i++)
46 memset(&bp->grp_info[i], (uint8_t)HWRM_NA_SIGNATURE,
47 sizeof(struct bnxt_ring_grp_info));
53 * Allocates a completion ring with vmem and stats optionally also allocating
54 * a TX and/or RX ring. Passing NULL as tx_ring_info and/or rx_ring_info
55 * to not allocate them.
57 * Order in the allocation is:
58 * stats - Always non-zero length
59 * cp vmem - Always zero-length, supported for the bnxt_ring abstraction
60 * tx vmem - Only non-zero length if tx_ring_info is not NULL
61 * rx vmem - Only non-zero length if rx_ring_info is not NULL
62 * cp bd ring - Always non-zero length
63 * tx bd ring - Only non-zero length if tx_ring_info is not NULL
64 * rx bd ring - Only non-zero length if rx_ring_info is not NULL
66 int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
67 struct bnxt_tx_queue *txq,
68 struct bnxt_rx_queue *rxq,
69 struct bnxt_cp_ring_info *cp_ring_info,
70 struct bnxt_cp_ring_info *nq_ring_info,
73 struct bnxt_ring *cp_ring = cp_ring_info->cp_ring_struct;
74 struct bnxt_rx_ring_info *rx_ring_info = rxq ? rxq->rx_ring : NULL;
75 struct bnxt_tx_ring_info *tx_ring_info = txq ? txq->tx_ring : NULL;
76 struct bnxt_ring *tx_ring;
77 struct bnxt_ring *rx_ring;
78 struct rte_pci_device *pdev = bp->pdev;
79 uint64_t rx_offloads = bp->eth_dev->data->dev_conf.rxmode.offloads;
80 const struct rte_memzone *mz = NULL;
81 char mz_name[RTE_MEMZONE_NAMESIZE];
82 rte_iova_t mz_phys_addr_base;
83 rte_iova_t mz_phys_addr;
86 int stats_len = (tx_ring_info || rx_ring_info) ?
87 RTE_CACHE_LINE_ROUNDUP(sizeof(struct hwrm_stat_ctx_query_output) -
88 sizeof (struct hwrm_resp_hdr)) : 0;
89 stats_len = RTE_ALIGN(stats_len, 128);
91 int cp_vmem_start = stats_len;
92 int cp_vmem_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size);
93 cp_vmem_len = RTE_ALIGN(cp_vmem_len, 128);
95 int nq_vmem_len = BNXT_CHIP_THOR(bp) ?
96 RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size) : 0;
97 nq_vmem_len = RTE_ALIGN(nq_vmem_len, 128);
99 int nq_vmem_start = cp_vmem_start + cp_vmem_len;
101 int tx_vmem_start = nq_vmem_start + nq_vmem_len;
103 tx_ring_info ? RTE_CACHE_LINE_ROUNDUP(tx_ring_info->
104 tx_ring_struct->vmem_size) : 0;
105 tx_vmem_len = RTE_ALIGN(tx_vmem_len, 128);
107 int rx_vmem_start = tx_vmem_start + tx_vmem_len;
108 int rx_vmem_len = rx_ring_info ?
109 RTE_CACHE_LINE_ROUNDUP(rx_ring_info->
110 rx_ring_struct->vmem_size) : 0;
111 rx_vmem_len = RTE_ALIGN(rx_vmem_len, 128);
112 int ag_vmem_start = 0;
114 int cp_ring_start = 0;
115 int nq_ring_start = 0;
117 ag_vmem_start = rx_vmem_start + rx_vmem_len;
118 ag_vmem_len = rx_ring_info ? RTE_CACHE_LINE_ROUNDUP(
119 rx_ring_info->ag_ring_struct->vmem_size) : 0;
120 cp_ring_start = ag_vmem_start + ag_vmem_len;
121 cp_ring_start = RTE_ALIGN(cp_ring_start, 4096);
123 int cp_ring_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->ring_size *
124 sizeof(struct cmpl_base));
125 cp_ring_len = RTE_ALIGN(cp_ring_len, 128);
126 nq_ring_start = cp_ring_start + cp_ring_len;
127 nq_ring_start = RTE_ALIGN(nq_ring_start, 4096);
129 int nq_ring_len = BNXT_CHIP_THOR(bp) ? cp_ring_len : 0;
131 int tx_ring_start = nq_ring_start + nq_ring_len;
132 int tx_ring_len = tx_ring_info ?
133 RTE_CACHE_LINE_ROUNDUP(tx_ring_info->tx_ring_struct->ring_size *
134 sizeof(struct tx_bd_long)) : 0;
135 tx_ring_len = RTE_ALIGN(tx_ring_len, 4096);
137 int rx_ring_start = tx_ring_start + tx_ring_len;
138 int rx_ring_len = rx_ring_info ?
139 RTE_CACHE_LINE_ROUNDUP(rx_ring_info->rx_ring_struct->ring_size *
140 sizeof(struct rx_prod_pkt_bd)) : 0;
141 rx_ring_len = RTE_ALIGN(rx_ring_len, 4096);
143 int ag_ring_start = rx_ring_start + rx_ring_len;
144 int ag_ring_len = rx_ring_len * AGG_RING_SIZE_FACTOR;
145 ag_ring_len = RTE_ALIGN(ag_ring_len, 4096);
147 int ag_bitmap_start = ag_ring_start + ag_ring_len;
148 int ag_bitmap_len = rx_ring_info ?
149 RTE_CACHE_LINE_ROUNDUP(rte_bitmap_get_memory_footprint(
150 rx_ring_info->rx_ring_struct->ring_size *
151 AGG_RING_SIZE_FACTOR)) : 0;
153 int tpa_info_start = ag_bitmap_start + ag_bitmap_len;
154 int tpa_info_len = rx_ring_info ?
155 RTE_CACHE_LINE_ROUNDUP(BNXT_TPA_MAX *
156 sizeof(struct bnxt_tpa_info)) : 0;
158 int total_alloc_len = tpa_info_start;
159 if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
160 total_alloc_len += tpa_info_len;
162 snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
163 "bnxt_%04x:%02x:%02x:%02x-%04x_%s", pdev->addr.domain,
164 pdev->addr.bus, pdev->addr.devid, pdev->addr.function, qidx,
166 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
167 mz = rte_memzone_lookup(mz_name);
169 mz = rte_memzone_reserve_aligned(mz_name, total_alloc_len,
172 RTE_MEMZONE_SIZE_HINT_ONLY |
173 RTE_MEMZONE_IOVA_CONTIG,
178 memset(mz->addr, 0, mz->len);
179 mz_phys_addr_base = mz->iova;
180 mz_phys_addr = mz->iova;
181 if ((unsigned long)mz->addr == mz_phys_addr_base) {
183 "Memzone physical address same as virtual.\n");
185 "Using rte_mem_virt2iova()\n");
186 for (sz = 0; sz < total_alloc_len; sz += getpagesize())
187 rte_mem_lock_page(((char *)mz->addr) + sz);
188 mz_phys_addr_base = rte_mem_virt2iova(mz->addr);
189 mz_phys_addr = rte_mem_virt2iova(mz->addr);
190 if (mz_phys_addr == 0) {
192 "unable to map ring address to physical memory\n");
199 tx_ring = tx_ring_info->tx_ring_struct;
201 tx_ring->bd = ((char *)mz->addr + tx_ring_start);
202 tx_ring_info->tx_desc_ring = (struct tx_bd_long *)tx_ring->bd;
203 tx_ring->bd_dma = mz_phys_addr + tx_ring_start;
204 tx_ring_info->tx_desc_mapping = tx_ring->bd_dma;
205 tx_ring->mem_zone = (const void *)mz;
209 if (tx_ring->vmem_size) {
211 (void **)((char *)mz->addr + tx_vmem_start);
212 tx_ring_info->tx_buf_ring =
213 (struct bnxt_sw_tx_bd *)tx_ring->vmem;
219 rx_ring = rx_ring_info->rx_ring_struct;
221 rx_ring->bd = ((char *)mz->addr + rx_ring_start);
222 rx_ring_info->rx_desc_ring =
223 (struct rx_prod_pkt_bd *)rx_ring->bd;
224 rx_ring->bd_dma = mz_phys_addr + rx_ring_start;
225 rx_ring_info->rx_desc_mapping = rx_ring->bd_dma;
226 rx_ring->mem_zone = (const void *)mz;
230 if (rx_ring->vmem_size) {
232 (void **)((char *)mz->addr + rx_vmem_start);
233 rx_ring_info->rx_buf_ring =
234 (struct bnxt_sw_rx_bd *)rx_ring->vmem;
237 rx_ring = rx_ring_info->ag_ring_struct;
239 rx_ring->bd = ((char *)mz->addr + ag_ring_start);
240 rx_ring_info->ag_desc_ring =
241 (struct rx_prod_pkt_bd *)rx_ring->bd;
242 rx_ring->bd_dma = mz->iova + ag_ring_start;
243 rx_ring_info->ag_desc_mapping = rx_ring->bd_dma;
244 rx_ring->mem_zone = (const void *)mz;
248 if (rx_ring->vmem_size) {
250 (void **)((char *)mz->addr + ag_vmem_start);
251 rx_ring_info->ag_buf_ring =
252 (struct bnxt_sw_rx_bd *)rx_ring->vmem;
255 rx_ring_info->ag_bitmap =
256 rte_bitmap_init(rx_ring_info->rx_ring_struct->ring_size *
257 AGG_RING_SIZE_FACTOR, (uint8_t *)mz->addr +
258 ag_bitmap_start, ag_bitmap_len);
261 if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
262 rx_ring_info->tpa_info =
263 ((struct bnxt_tpa_info *)((char *)mz->addr +
267 cp_ring->bd = ((char *)mz->addr + cp_ring_start);
268 cp_ring->bd_dma = mz_phys_addr + cp_ring_start;
269 cp_ring_info->cp_desc_ring = cp_ring->bd;
270 cp_ring_info->cp_desc_mapping = cp_ring->bd_dma;
271 cp_ring->mem_zone = (const void *)mz;
275 if (cp_ring->vmem_size)
276 *cp_ring->vmem = ((char *)mz->addr + stats_len);
278 cp_ring_info->hw_stats = mz->addr;
279 cp_ring_info->hw_stats_map = mz_phys_addr;
281 cp_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
284 struct bnxt_ring *nq_ring = nq_ring_info->cp_ring_struct;
286 nq_ring->bd = (char *)mz->addr + nq_ring_start;
287 nq_ring->bd_dma = mz_phys_addr + nq_ring_start;
288 nq_ring_info->cp_desc_ring = nq_ring->bd;
289 nq_ring_info->cp_desc_mapping = nq_ring->bd_dma;
290 nq_ring->mem_zone = (const void *)mz;
294 if (nq_ring->vmem_size)
295 *nq_ring->vmem = (char *)mz->addr + nq_vmem_start;
297 nq_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
303 static void bnxt_init_dflt_coal(struct bnxt_coal *coal)
305 /* Tick values in micro seconds.
306 * 1 coal_buf x bufs_per_record = 1 completion record.
308 coal->num_cmpl_aggr_int = BNXT_NUM_CMPL_AGGR_INT;
309 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
310 coal->num_cmpl_dma_aggr = BNXT_NUM_CMPL_DMA_AGGR;
311 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
312 coal->num_cmpl_dma_aggr_during_int = BNXT_NUM_CMPL_DMA_AGGR_DURING_INT;
313 coal->int_lat_tmr_max = BNXT_INT_LAT_TMR_MAX;
314 /* min timer set to 1/2 of interrupt timer */
315 coal->int_lat_tmr_min = BNXT_INT_LAT_TMR_MIN;
316 /* buf timer set to 1/4 of interrupt timer */
317 coal->cmpl_aggr_dma_tmr = BNXT_CMPL_AGGR_DMA_TMR;
318 coal->cmpl_aggr_dma_tmr_during_int = BNXT_CMPL_AGGR_DMA_TMR_DURING_INT;
321 static void bnxt_set_db(struct bnxt *bp,
322 struct bnxt_db_info *db,
327 if (BNXT_CHIP_THOR(bp)) {
329 db->doorbell = (char *)bp->doorbell_base + 0x10000;
331 db->doorbell = (char *)bp->doorbell_base + 0x4000;
333 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
334 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
336 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
337 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
338 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
340 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
341 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_CQ;
343 case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
344 db->db_key64 = DBR_PATH_L2;
347 db->db_key64 |= (uint64_t)fid << DBR_XID_SFT;
350 db->doorbell = (char *)bp->doorbell_base + map_idx * 0x80;
352 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
353 db->db_key32 = DB_KEY_TX;
355 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
356 db->db_key32 = DB_KEY_RX;
358 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
359 db->db_key32 = DB_KEY_CP;
366 static int bnxt_alloc_cmpl_ring(struct bnxt *bp, int queue_index,
367 struct bnxt_cp_ring_info *cpr,
368 struct bnxt_cp_ring_info *nqr)
370 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
371 uint32_t nq_ring_id = HWRM_NA_SIGNATURE;
375 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL;
377 if (BNXT_HAS_NQ(bp)) {
379 nq_ring_id = nqr->cp_ring_struct->fw_ring_id;
381 PMD_DRV_LOG(ERR, "NQ ring is NULL\n");
386 rc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, queue_index,
387 HWRM_NA_SIGNATURE, nq_ring_id);
392 bnxt_set_db(bp, &cpr->cp_db, ring_type, queue_index,
393 cp_ring->fw_ring_id);
399 static int bnxt_alloc_nq_ring(struct bnxt *bp, int queue_index,
400 struct bnxt_cp_ring_info *nqr)
402 struct bnxt_ring *nq_ring = nqr->cp_ring_struct;
406 if (!BNXT_HAS_NQ(bp))
409 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ;
411 rc = bnxt_hwrm_ring_alloc(bp, nq_ring, ring_type, queue_index,
412 HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE);
416 bnxt_set_db(bp, &nqr->cp_db, ring_type, queue_index,
417 nq_ring->fw_ring_id);
423 static int bnxt_alloc_rx_ring(struct bnxt *bp, int queue_index)
425 struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
426 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
427 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
428 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
429 struct bnxt_ring *ring = rxr->rx_ring_struct;
433 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX;
435 rc = bnxt_hwrm_ring_alloc(bp, ring, ring_type,
436 queue_index, cpr->hw_stats_ctx_id,
437 cp_ring->fw_ring_id);
442 if (BNXT_HAS_RING_GRPS(bp))
443 bp->grp_info[queue_index].rx_fw_ring_id = ring->fw_ring_id;
444 bnxt_set_db(bp, &rxr->rx_db, ring_type, queue_index, ring->fw_ring_id);
445 bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
450 static int bnxt_alloc_rx_agg_ring(struct bnxt *bp, int queue_index)
452 unsigned int map_idx = queue_index + bp->rx_cp_nr_rings;
453 struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
454 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
455 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
456 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
457 struct bnxt_ring *ring = rxr->ag_ring_struct;
458 uint32_t hw_stats_ctx_id = HWRM_NA_SIGNATURE;
462 ring->fw_rx_ring_id = rxr->rx_ring_struct->fw_ring_id;
464 if (BNXT_CHIP_THOR(bp)) {
465 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG;
466 hw_stats_ctx_id = cpr->hw_stats_ctx_id;
468 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX;
471 rc = bnxt_hwrm_ring_alloc(bp, ring, ring_type, map_idx,
472 hw_stats_ctx_id, cp_ring->fw_ring_id);
478 if (BNXT_HAS_RING_GRPS(bp))
479 bp->grp_info[queue_index].ag_fw_ring_id = ring->fw_ring_id;
480 bnxt_set_db(bp, &rxr->ag_db, ring_type, map_idx, ring->fw_ring_id);
481 bnxt_db_write(&rxr->ag_db, rxr->ag_prod);
486 int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index)
488 struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
489 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
490 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
491 struct bnxt_cp_ring_info *nqr = rxq->nq_ring;
492 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
495 if (BNXT_HAS_NQ(bp)) {
496 if (bnxt_alloc_nq_ring(bp, queue_index, nqr))
500 if (bnxt_alloc_cmpl_ring(bp, queue_index, cpr, nqr))
503 if (BNXT_HAS_RING_GRPS(bp)) {
504 bp->grp_info[queue_index].fw_stats_ctx = cpr->hw_stats_ctx_id;
505 bp->grp_info[queue_index].cp_fw_ring_id = cp_ring->fw_ring_id;
510 * In order to save completion resources, use the first
511 * completion ring from PF or VF as the default completion ring
512 * for async event and HWRM forward response handling.
514 bp->def_cp_ring = cpr;
515 rc = bnxt_hwrm_set_async_event_cr(bp);
520 if (bnxt_alloc_rx_ring(bp, queue_index))
523 if (bnxt_alloc_rx_agg_ring(bp, queue_index))
526 rxq->rx_buf_use_size = BNXT_MAX_MTU + RTE_ETHER_HDR_LEN +
527 RTE_ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);
529 if (bp->eth_dev->data->rx_queue_state[queue_index] ==
530 RTE_ETH_QUEUE_STATE_STARTED) {
531 if (bnxt_init_one_rx_ring(rxq)) {
533 "bnxt_init_one_rx_ring failed!\n");
534 bnxt_rx_queue_release_op(rxq);
538 bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
539 bnxt_db_write(&rxr->ag_db, rxr->ag_prod);
541 rxq->index = queue_index;
543 "queue %d, rx_deferred_start %d, state %d!\n",
544 queue_index, rxq->rx_deferred_start,
545 bp->eth_dev->data->rx_queue_state[queue_index]);
552 * [0] = default completion ring
553 * [1 -> +rx_cp_nr_rings] = rx_cp, rx rings
554 * [1+rx_cp_nr_rings + 1 -> +tx_cp_nr_rings] = tx_cp, tx rings
556 int bnxt_alloc_hwrm_rings(struct bnxt *bp)
558 struct bnxt_coal coal;
563 bnxt_init_dflt_coal(&coal);
565 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
566 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
567 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
568 struct bnxt_cp_ring_info *nqr = rxq->nq_ring;
569 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
570 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
572 if (BNXT_HAS_NQ(bp)) {
573 if (bnxt_alloc_nq_ring(bp, i, nqr))
577 if (bnxt_alloc_cmpl_ring(bp, i, cpr, nqr))
580 if (BNXT_HAS_RING_GRPS(bp)) {
581 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
582 bp->grp_info[i].cp_fw_ring_id = cp_ring->fw_ring_id;
585 bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id);
589 * In order to save completion resource, use the first
590 * completion ring from PF or VF as the default
591 * completion ring for async event & HWRM
592 * forward response handling.
594 bp->def_cp_ring = cpr;
595 rc = bnxt_hwrm_set_async_event_cr(bp);
600 if (bnxt_alloc_rx_ring(bp, i))
603 if (bnxt_alloc_rx_agg_ring(bp, i))
606 rxq->rx_buf_use_size = BNXT_MAX_MTU + RTE_ETHER_HDR_LEN +
607 RTE_ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);
608 if (bnxt_init_one_rx_ring(rxq)) {
609 PMD_DRV_LOG(ERR, "bnxt_init_one_rx_ring failed!\n");
610 bnxt_rx_queue_release_op(rxq);
613 bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
614 bnxt_db_write(&rxr->ag_db, rxr->ag_prod);
617 bnxt_rxq_vec_setup(rxq);
621 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
622 struct bnxt_tx_queue *txq = bp->tx_queues[i];
623 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
624 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
625 struct bnxt_cp_ring_info *nqr = txq->nq_ring;
626 struct bnxt_tx_ring_info *txr = txq->tx_ring;
627 struct bnxt_ring *ring = txr->tx_ring_struct;
628 unsigned int idx = i + bp->rx_cp_nr_rings;
630 if (BNXT_HAS_NQ(bp)) {
631 if (bnxt_alloc_nq_ring(bp, idx, nqr))
635 if (bnxt_alloc_cmpl_ring(bp, idx, cpr, nqr))
639 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_TX;
640 rc = bnxt_hwrm_ring_alloc(bp, ring,
642 i, cpr->hw_stats_ctx_id,
643 cp_ring->fw_ring_id);
647 bnxt_set_db(bp, &txr->tx_db, ring_type, i, ring->fw_ring_id);
649 bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id);