4 * Copyright(c) Broadcom Limited.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_bitmap.h>
35 #include <rte_memzone.h>
40 #include "bnxt_hwrm.h"
41 #include "bnxt_ring.h"
47 #include "hsi_struct_def_dpdk.h"
50 * Generic ring handling
53 void bnxt_free_ring(struct bnxt_ring *ring)
55 if (ring->vmem_size && *ring->vmem) {
56 memset((char *)*ring->vmem, 0, ring->vmem_size);
59 rte_memzone_free((const struct rte_memzone *)ring->mem_zone);
66 void bnxt_init_ring_grps(struct bnxt *bp)
70 for (i = 0; i < bp->max_ring_grps; i++)
71 memset(&bp->grp_info[i], (uint8_t)HWRM_NA_SIGNATURE,
72 sizeof(struct bnxt_ring_grp_info));
76 * Allocates a completion ring with vmem and stats optionally also allocating
77 * a TX and/or RX ring. Passing NULL as tx_ring_info and/or rx_ring_info
78 * to not allocate them.
80 * Order in the allocation is:
81 * stats - Always non-zero length
82 * cp vmem - Always zero-length, supported for the bnxt_ring abstraction
83 * tx vmem - Only non-zero length if tx_ring_info is not NULL
84 * rx vmem - Only non-zero length if rx_ring_info is not NULL
85 * cp bd ring - Always non-zero length
86 * tx bd ring - Only non-zero length if tx_ring_info is not NULL
87 * rx bd ring - Only non-zero length if rx_ring_info is not NULL
89 int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
90 struct bnxt_tx_ring_info *tx_ring_info,
91 struct bnxt_rx_ring_info *rx_ring_info,
92 struct bnxt_cp_ring_info *cp_ring_info,
95 struct bnxt_ring *cp_ring = cp_ring_info->cp_ring_struct;
96 struct bnxt_ring *tx_ring;
97 struct bnxt_ring *rx_ring;
98 struct rte_pci_device *pdev = bp->pdev;
99 const struct rte_memzone *mz = NULL;
100 char mz_name[RTE_MEMZONE_NAMESIZE];
101 phys_addr_t mz_phys_addr;
104 int stats_len = (tx_ring_info || rx_ring_info) ?
105 RTE_CACHE_LINE_ROUNDUP(sizeof(struct ctx_hw_stats64)) : 0;
107 int cp_vmem_start = stats_len;
108 int cp_vmem_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size);
110 int tx_vmem_start = cp_vmem_start + cp_vmem_len;
112 tx_ring_info ? RTE_CACHE_LINE_ROUNDUP(tx_ring_info->
113 tx_ring_struct->vmem_size) : 0;
115 int rx_vmem_start = tx_vmem_start + tx_vmem_len;
116 int rx_vmem_len = rx_ring_info ?
117 RTE_CACHE_LINE_ROUNDUP(rx_ring_info->
118 rx_ring_struct->vmem_size) : 0;
119 int ag_vmem_start = 0;
121 int cp_ring_start = 0;
123 ag_vmem_start = rx_vmem_start + rx_vmem_len;
124 ag_vmem_len = rx_ring_info ? RTE_CACHE_LINE_ROUNDUP(
125 rx_ring_info->ag_ring_struct->vmem_size) : 0;
126 cp_ring_start = ag_vmem_start + ag_vmem_len;
128 int cp_ring_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->ring_size *
129 sizeof(struct cmpl_base));
131 int tx_ring_start = cp_ring_start + cp_ring_len;
132 int tx_ring_len = tx_ring_info ?
133 RTE_CACHE_LINE_ROUNDUP(tx_ring_info->tx_ring_struct->ring_size *
134 sizeof(struct tx_bd_long)) : 0;
136 int rx_ring_start = tx_ring_start + tx_ring_len;
137 int rx_ring_len = rx_ring_info ?
138 RTE_CACHE_LINE_ROUNDUP(rx_ring_info->rx_ring_struct->ring_size *
139 sizeof(struct rx_prod_pkt_bd)) : 0;
141 int ag_ring_start = rx_ring_start + rx_ring_len;
142 int ag_ring_len = rx_ring_len * AGG_RING_SIZE_FACTOR;
144 int ag_bitmap_start = ag_ring_start + ag_ring_len;
145 int ag_bitmap_len = rx_ring_info ?
146 RTE_CACHE_LINE_ROUNDUP(rte_bitmap_get_memory_footprint(
147 rx_ring_info->rx_ring_struct->ring_size *
148 AGG_RING_SIZE_FACTOR)) : 0;
150 int tpa_info_start = ag_bitmap_start + ag_bitmap_len;
151 int tpa_info_len = rx_ring_info ?
152 RTE_CACHE_LINE_ROUNDUP(BNXT_TPA_MAX *
153 sizeof(struct bnxt_tpa_info)) : 0;
155 int total_alloc_len = tpa_info_start;
156 if (bp->eth_dev->data->dev_conf.rxmode.enable_lro)
157 total_alloc_len += tpa_info_len;
159 snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
160 "bnxt_%04x:%02x:%02x:%02x-%04x_%s", pdev->addr.domain,
161 pdev->addr.bus, pdev->addr.devid, pdev->addr.function, qidx,
163 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
164 mz = rte_memzone_lookup(mz_name);
166 mz = rte_memzone_reserve_aligned(mz_name, total_alloc_len,
169 RTE_MEMZONE_SIZE_HINT_ONLY,
174 memset(mz->addr, 0, mz->len);
175 mz_phys_addr = mz->phys_addr;
176 if ((unsigned long)mz->addr == mz_phys_addr) {
177 RTE_LOG(WARNING, PMD,
178 "Memzone physical address same as virtual.\n");
179 RTE_LOG(WARNING, PMD,
180 "Using rte_mem_virt2phy()\n");
181 for (sz = 0; sz < total_alloc_len; sz += getpagesize())
182 rte_mem_lock_page(((char *)mz->addr) + sz);
183 mz_phys_addr = rte_mem_virt2phy(mz->addr);
184 if (mz_phys_addr == 0) {
186 "unable to map ring address to physical memory\n");
192 tx_ring = tx_ring_info->tx_ring_struct;
194 tx_ring->bd = ((char *)mz->addr + tx_ring_start);
195 tx_ring_info->tx_desc_ring = (struct tx_bd_long *)tx_ring->bd;
196 tx_ring->bd_dma = mz_phys_addr + tx_ring_start;
197 tx_ring_info->tx_desc_mapping = tx_ring->bd_dma;
198 tx_ring->mem_zone = (const void *)mz;
202 if (tx_ring->vmem_size) {
204 (void **)((char *)mz->addr + tx_vmem_start);
205 tx_ring_info->tx_buf_ring =
206 (struct bnxt_sw_tx_bd *)tx_ring->vmem;
211 rx_ring = rx_ring_info->rx_ring_struct;
213 rx_ring->bd = ((char *)mz->addr + rx_ring_start);
214 rx_ring_info->rx_desc_ring =
215 (struct rx_prod_pkt_bd *)rx_ring->bd;
216 rx_ring->bd_dma = mz_phys_addr + rx_ring_start;
217 rx_ring_info->rx_desc_mapping = rx_ring->bd_dma;
218 rx_ring->mem_zone = (const void *)mz;
222 if (rx_ring->vmem_size) {
224 (void **)((char *)mz->addr + rx_vmem_start);
225 rx_ring_info->rx_buf_ring =
226 (struct bnxt_sw_rx_bd *)rx_ring->vmem;
229 rx_ring = rx_ring_info->ag_ring_struct;
231 rx_ring->bd = ((char *)mz->addr + ag_ring_start);
232 rx_ring_info->ag_desc_ring =
233 (struct rx_prod_pkt_bd *)rx_ring->bd;
234 rx_ring->bd_dma = mz->phys_addr + ag_ring_start;
235 rx_ring_info->ag_desc_mapping = rx_ring->bd_dma;
236 rx_ring->mem_zone = (const void *)mz;
240 if (rx_ring->vmem_size) {
242 (void **)((char *)mz->addr + ag_vmem_start);
243 rx_ring_info->ag_buf_ring =
244 (struct bnxt_sw_rx_bd *)rx_ring->vmem;
247 rx_ring_info->ag_bitmap =
248 rte_bitmap_init(rx_ring_info->rx_ring_struct->ring_size *
249 AGG_RING_SIZE_FACTOR, (uint8_t *)mz->addr +
250 ag_bitmap_start, ag_bitmap_len);
253 if (bp->eth_dev->data->dev_conf.rxmode.enable_lro)
254 rx_ring_info->tpa_info =
255 ((struct bnxt_tpa_info *)((char *)mz->addr +
259 cp_ring->bd = ((char *)mz->addr + cp_ring_start);
260 cp_ring->bd_dma = mz_phys_addr + cp_ring_start;
261 cp_ring_info->cp_desc_ring = cp_ring->bd;
262 cp_ring_info->cp_desc_mapping = cp_ring->bd_dma;
263 cp_ring->mem_zone = (const void *)mz;
267 if (cp_ring->vmem_size)
268 *cp_ring->vmem = ((char *)mz->addr + stats_len);
270 cp_ring_info->hw_stats = mz->addr;
271 cp_ring_info->hw_stats_map = mz_phys_addr;
273 cp_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
278 * [0] = default completion ring
279 * [1 -> +rx_cp_nr_rings] = rx_cp, rx rings
280 * [1+rx_cp_nr_rings + 1 -> +tx_cp_nr_rings] = tx_cp, tx rings
282 int bnxt_alloc_hwrm_rings(struct bnxt *bp)
284 struct rte_pci_device *pci_dev = bp->pdev;
288 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
289 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
290 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
291 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
292 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
293 struct bnxt_ring *ring = rxr->rx_ring_struct;
294 unsigned int idx = i + 1;
295 unsigned int map_idx = idx + bp->rx_cp_nr_rings;
297 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
300 rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
301 HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
302 idx, HWRM_NA_SIGNATURE,
306 cpr->cp_doorbell = (char *)pci_dev->mem_resource[2].addr +
308 bp->grp_info[i].cp_fw_ring_id = cp_ring->fw_ring_id;
309 B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
312 rc = bnxt_hwrm_ring_alloc(bp, ring,
313 HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
314 idx, cpr->hw_stats_ctx_id,
315 cp_ring->fw_ring_id);
319 rxr->rx_doorbell = (char *)pci_dev->mem_resource[2].addr +
321 bp->grp_info[i].rx_fw_ring_id = ring->fw_ring_id;
322 B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
324 ring = rxr->ag_ring_struct;
327 RTE_LOG(ERR, PMD, "Alloc AGG Ring is NULL!\n");
329 rc = bnxt_hwrm_ring_alloc(bp, ring,
330 HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
331 map_idx, HWRM_NA_SIGNATURE,
332 cp_ring->fw_ring_id);
335 RTE_LOG(DEBUG, PMD, "Alloc AGG Done!\n");
338 (char *)pci_dev->mem_resource[2].addr +
340 bp->grp_info[i].ag_fw_ring_id = ring->fw_ring_id;
341 B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
343 rxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN +
344 ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);
345 if (bnxt_init_one_rx_ring(rxq)) {
346 RTE_LOG(ERR, PMD, "bnxt_init_one_rx_ring failed!\n");
347 bnxt_rx_queue_release_op(rxq);
350 B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
351 B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
355 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
356 struct bnxt_tx_queue *txq = bp->tx_queues[i];
357 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
358 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
359 struct bnxt_tx_ring_info *txr = txq->tx_ring;
360 struct bnxt_ring *ring = txr->tx_ring_struct;
361 unsigned int idx = i + 1 + bp->rx_cp_nr_rings;
363 /* Account for AGG Rings. AGG ring cnt = Rx Cmpl ring cnt */
364 idx += bp->rx_cp_nr_rings;
367 rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
368 HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
369 idx, HWRM_NA_SIGNATURE,
374 cpr->cp_doorbell = (char *)pci_dev->mem_resource[2].addr +
376 B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
379 rc = bnxt_hwrm_ring_alloc(bp, ring,
380 HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
381 idx, cpr->hw_stats_ctx_id,
382 cp_ring->fw_ring_id);
386 txr->tx_doorbell = (char *)pci_dev->mem_resource[2].addr +