1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
6 #include <rte_bitmap.h>
7 #include <rte_memzone.h>
12 #include "bnxt_hwrm.h"
13 #include "bnxt_ring.h"
19 #include "hsi_struct_def_dpdk.h"
22 * Generic ring handling
25 void bnxt_free_ring(struct bnxt_ring *ring)
30 if (ring->vmem_size && *ring->vmem) {
31 memset((char *)*ring->vmem, 0, ring->vmem_size);
34 ring->mem_zone = NULL;
41 int bnxt_init_ring_grps(struct bnxt *bp)
45 for (i = 0; i < bp->max_ring_grps; i++)
46 memset(&bp->grp_info[i], (uint8_t)HWRM_NA_SIGNATURE,
47 sizeof(struct bnxt_ring_grp_info));
53 * Allocates a completion ring with vmem and stats optionally also allocating
54 * a TX and/or RX ring. Passing NULL as tx_ring_info and/or rx_ring_info
55 * to not allocate them.
57 * Order in the allocation is:
58 * stats - Always non-zero length
59 * cp vmem - Always zero-length, supported for the bnxt_ring abstraction
60 * tx vmem - Only non-zero length if tx_ring_info is not NULL
61 * rx vmem - Only non-zero length if rx_ring_info is not NULL
62 * cp bd ring - Always non-zero length
63 * tx bd ring - Only non-zero length if tx_ring_info is not NULL
64 * rx bd ring - Only non-zero length if rx_ring_info is not NULL
66 int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
67 struct bnxt_tx_queue *txq,
68 struct bnxt_rx_queue *rxq,
69 struct bnxt_cp_ring_info *cp_ring_info,
72 struct bnxt_ring *cp_ring = cp_ring_info->cp_ring_struct;
73 struct bnxt_rx_ring_info *rx_ring_info = rxq ? rxq->rx_ring : NULL;
74 struct bnxt_tx_ring_info *tx_ring_info = txq ? txq->tx_ring : NULL;
75 struct bnxt_ring *tx_ring;
76 struct bnxt_ring *rx_ring;
77 struct rte_pci_device *pdev = bp->pdev;
78 uint64_t rx_offloads = bp->eth_dev->data->dev_conf.rxmode.offloads;
79 const struct rte_memzone *mz = NULL;
80 char mz_name[RTE_MEMZONE_NAMESIZE];
81 rte_iova_t mz_phys_addr;
84 int stats_len = (tx_ring_info || rx_ring_info) ?
85 RTE_CACHE_LINE_ROUNDUP(sizeof(struct ctx_hw_stats64)) : 0;
87 int cp_vmem_start = stats_len;
88 int cp_vmem_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size);
90 int tx_vmem_start = cp_vmem_start + cp_vmem_len;
92 tx_ring_info ? RTE_CACHE_LINE_ROUNDUP(tx_ring_info->
93 tx_ring_struct->vmem_size) : 0;
95 int rx_vmem_start = tx_vmem_start + tx_vmem_len;
96 int rx_vmem_len = rx_ring_info ?
97 RTE_CACHE_LINE_ROUNDUP(rx_ring_info->
98 rx_ring_struct->vmem_size) : 0;
99 int ag_vmem_start = 0;
101 int cp_ring_start = 0;
103 ag_vmem_start = rx_vmem_start + rx_vmem_len;
104 ag_vmem_len = rx_ring_info ? RTE_CACHE_LINE_ROUNDUP(
105 rx_ring_info->ag_ring_struct->vmem_size) : 0;
106 cp_ring_start = ag_vmem_start + ag_vmem_len;
108 int cp_ring_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->ring_size *
109 sizeof(struct cmpl_base));
111 int tx_ring_start = cp_ring_start + cp_ring_len;
112 int tx_ring_len = tx_ring_info ?
113 RTE_CACHE_LINE_ROUNDUP(tx_ring_info->tx_ring_struct->ring_size *
114 sizeof(struct tx_bd_long)) : 0;
116 int rx_ring_start = tx_ring_start + tx_ring_len;
117 int rx_ring_len = rx_ring_info ?
118 RTE_CACHE_LINE_ROUNDUP(rx_ring_info->rx_ring_struct->ring_size *
119 sizeof(struct rx_prod_pkt_bd)) : 0;
121 int ag_ring_start = rx_ring_start + rx_ring_len;
122 int ag_ring_len = rx_ring_len * AGG_RING_SIZE_FACTOR;
124 int ag_bitmap_start = ag_ring_start + ag_ring_len;
125 int ag_bitmap_len = rx_ring_info ?
126 RTE_CACHE_LINE_ROUNDUP(rte_bitmap_get_memory_footprint(
127 rx_ring_info->rx_ring_struct->ring_size *
128 AGG_RING_SIZE_FACTOR)) : 0;
130 int tpa_info_start = ag_bitmap_start + ag_bitmap_len;
131 int tpa_info_len = rx_ring_info ?
132 RTE_CACHE_LINE_ROUNDUP(BNXT_TPA_MAX *
133 sizeof(struct bnxt_tpa_info)) : 0;
135 int total_alloc_len = tpa_info_start;
136 if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
137 total_alloc_len += tpa_info_len;
139 snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
140 "bnxt_%04x:%02x:%02x:%02x-%04x_%s", pdev->addr.domain,
141 pdev->addr.bus, pdev->addr.devid, pdev->addr.function, qidx,
143 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
144 mz = rte_memzone_lookup(mz_name);
146 mz = rte_memzone_reserve_aligned(mz_name, total_alloc_len,
149 RTE_MEMZONE_SIZE_HINT_ONLY |
150 RTE_MEMZONE_IOVA_CONTIG,
155 memset(mz->addr, 0, mz->len);
156 mz_phys_addr = mz->iova;
157 if ((unsigned long)mz->addr == mz_phys_addr) {
159 "Memzone physical address same as virtual.\n");
161 "Using rte_mem_virt2iova()\n");
162 for (sz = 0; sz < total_alloc_len; sz += getpagesize())
163 rte_mem_lock_page(((char *)mz->addr) + sz);
164 mz_phys_addr = rte_mem_virt2iova(mz->addr);
165 if (mz_phys_addr == 0) {
167 "unable to map ring address to physical memory\n");
174 tx_ring = tx_ring_info->tx_ring_struct;
176 tx_ring->bd = ((char *)mz->addr + tx_ring_start);
177 tx_ring_info->tx_desc_ring = (struct tx_bd_long *)tx_ring->bd;
178 tx_ring->bd_dma = mz_phys_addr + tx_ring_start;
179 tx_ring_info->tx_desc_mapping = tx_ring->bd_dma;
180 tx_ring->mem_zone = (const void *)mz;
184 if (tx_ring->vmem_size) {
186 (void **)((char *)mz->addr + tx_vmem_start);
187 tx_ring_info->tx_buf_ring =
188 (struct bnxt_sw_tx_bd *)tx_ring->vmem;
194 rx_ring = rx_ring_info->rx_ring_struct;
196 rx_ring->bd = ((char *)mz->addr + rx_ring_start);
197 rx_ring_info->rx_desc_ring =
198 (struct rx_prod_pkt_bd *)rx_ring->bd;
199 rx_ring->bd_dma = mz_phys_addr + rx_ring_start;
200 rx_ring_info->rx_desc_mapping = rx_ring->bd_dma;
201 rx_ring->mem_zone = (const void *)mz;
205 if (rx_ring->vmem_size) {
207 (void **)((char *)mz->addr + rx_vmem_start);
208 rx_ring_info->rx_buf_ring =
209 (struct bnxt_sw_rx_bd *)rx_ring->vmem;
212 rx_ring = rx_ring_info->ag_ring_struct;
214 rx_ring->bd = ((char *)mz->addr + ag_ring_start);
215 rx_ring_info->ag_desc_ring =
216 (struct rx_prod_pkt_bd *)rx_ring->bd;
217 rx_ring->bd_dma = mz->iova + ag_ring_start;
218 rx_ring_info->ag_desc_mapping = rx_ring->bd_dma;
219 rx_ring->mem_zone = (const void *)mz;
223 if (rx_ring->vmem_size) {
225 (void **)((char *)mz->addr + ag_vmem_start);
226 rx_ring_info->ag_buf_ring =
227 (struct bnxt_sw_rx_bd *)rx_ring->vmem;
230 rx_ring_info->ag_bitmap =
231 rte_bitmap_init(rx_ring_info->rx_ring_struct->ring_size *
232 AGG_RING_SIZE_FACTOR, (uint8_t *)mz->addr +
233 ag_bitmap_start, ag_bitmap_len);
236 if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
237 rx_ring_info->tpa_info =
238 ((struct bnxt_tpa_info *)((char *)mz->addr +
242 cp_ring->bd = ((char *)mz->addr + cp_ring_start);
243 cp_ring->bd_dma = mz_phys_addr + cp_ring_start;
244 cp_ring_info->cp_desc_ring = cp_ring->bd;
245 cp_ring_info->cp_desc_mapping = cp_ring->bd_dma;
246 cp_ring->mem_zone = (const void *)mz;
250 if (cp_ring->vmem_size)
251 *cp_ring->vmem = ((char *)mz->addr + stats_len);
253 cp_ring_info->hw_stats = mz->addr;
254 cp_ring_info->hw_stats_map = mz_phys_addr;
256 cp_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
261 * [0] = default completion ring
262 * [1 -> +rx_cp_nr_rings] = rx_cp, rx rings
263 * [1+rx_cp_nr_rings + 1 -> +tx_cp_nr_rings] = tx_cp, tx rings
265 int bnxt_alloc_hwrm_rings(struct bnxt *bp)
270 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
271 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
272 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
273 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
274 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
275 struct bnxt_ring *ring = rxr->rx_ring_struct;
276 unsigned int idx = i + 1;
277 unsigned int map_idx = idx + bp->rx_cp_nr_rings;
279 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
282 rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
283 HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
284 idx, HWRM_NA_SIGNATURE,
288 cpr->cp_doorbell = (char *)bp->doorbell_base + idx * 0x80;
289 bp->grp_info[i].cp_fw_ring_id = cp_ring->fw_ring_id;
290 B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
293 rc = bnxt_hwrm_ring_alloc(bp, ring,
294 HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
295 idx, cpr->hw_stats_ctx_id,
296 cp_ring->fw_ring_id);
300 rxr->rx_doorbell = (char *)bp->doorbell_base + idx * 0x80;
301 bp->grp_info[i].rx_fw_ring_id = ring->fw_ring_id;
302 B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
304 ring = rxr->ag_ring_struct;
307 PMD_DRV_LOG(ERR, "Alloc AGG Ring is NULL!\n");
311 rc = bnxt_hwrm_ring_alloc(bp, ring,
312 HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
313 map_idx, HWRM_NA_SIGNATURE,
314 cp_ring->fw_ring_id);
317 PMD_DRV_LOG(DEBUG, "Alloc AGG Done!\n");
319 rxr->ag_doorbell = (char *)bp->doorbell_base + map_idx * 0x80;
320 bp->grp_info[i].ag_fw_ring_id = ring->fw_ring_id;
321 B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
323 rxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN +
324 ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);
325 if (bnxt_init_one_rx_ring(rxq)) {
326 PMD_DRV_LOG(ERR, "bnxt_init_one_rx_ring failed!\n");
327 bnxt_rx_queue_release_op(rxq);
330 B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
331 B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
335 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
336 struct bnxt_tx_queue *txq = bp->tx_queues[i];
337 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
338 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
339 struct bnxt_tx_ring_info *txr = txq->tx_ring;
340 struct bnxt_ring *ring = txr->tx_ring_struct;
341 unsigned int idx = i + 1 + bp->rx_cp_nr_rings;
344 rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
345 HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
346 idx, HWRM_NA_SIGNATURE,
351 cpr->cp_doorbell = (char *)bp->doorbell_base + idx * 0x80;
352 B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
355 rc = bnxt_hwrm_ring_alloc(bp, ring,
356 HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
357 idx, cpr->hw_stats_ctx_id,
358 cp_ring->fw_ring_id);
362 txr->tx_doorbell = (char *)bp->doorbell_base + idx * 0x80;