net/bnxt: avoid null pointer dereference
[dpdk.git] / drivers / net / bnxt / bnxt_ring.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <rte_bitmap.h>
7 #include <rte_memzone.h>
8 #include <rte_malloc.h>
9 #include <unistd.h>
10
11 #include "bnxt.h"
12 #include "bnxt_cpr.h"
13 #include "bnxt_hwrm.h"
14 #include "bnxt_ring.h"
15 #include "bnxt_rxq.h"
16 #include "bnxt_rxr.h"
17 #include "bnxt_txq.h"
18 #include "bnxt_txr.h"
19
20 #include "hsi_struct_def_dpdk.h"
21
22 /*
23  * Generic ring handling
24  */
25
26 void bnxt_free_ring(struct bnxt_ring *ring)
27 {
28         if (!ring)
29                 return;
30
31         if (ring->vmem_size && *ring->vmem) {
32                 memset((char *)*ring->vmem, 0, ring->vmem_size);
33                 *ring->vmem = NULL;
34         }
35         ring->mem_zone = NULL;
36 }
37
38 /*
39  * Ring groups
40  */
41
42 int bnxt_init_ring_grps(struct bnxt *bp)
43 {
44         unsigned int i;
45
46         for (i = 0; i < bp->max_ring_grps; i++)
47                 memset(&bp->grp_info[i], (uint8_t)HWRM_NA_SIGNATURE,
48                        sizeof(struct bnxt_ring_grp_info));
49
50         return 0;
51 }
52
53 int bnxt_alloc_ring_grps(struct bnxt *bp)
54 {
55         if (bp->max_tx_rings == 0) {
56                 PMD_DRV_LOG(ERR, "No TX rings available!\n");
57                 return -EBUSY;
58         }
59
60         /* THOR does not support ring groups.
61          * But we will use the array to save RSS context IDs.
62          */
63         if (BNXT_CHIP_THOR(bp)) {
64                 bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR;
65         } else if (bp->max_ring_grps < bp->rx_cp_nr_rings) {
66                 /* 1 ring is for default completion ring */
67                 PMD_DRV_LOG(ERR, "Insufficient resource: Ring Group\n");
68                 return -ENOSPC;
69         }
70
71         if (BNXT_HAS_RING_GRPS(bp)) {
72                 bp->grp_info = rte_zmalloc("bnxt_grp_info",
73                                            sizeof(*bp->grp_info) *
74                                            bp->max_ring_grps, 0);
75                 if (!bp->grp_info) {
76                         PMD_DRV_LOG(ERR,
77                                     "Failed to alloc grp info tbl.\n");
78                         return -ENOMEM;
79                 }
80         }
81
82         return 0;
83 }
84
85 /*
86  * Allocates a completion ring with vmem and stats optionally also allocating
87  * a TX and/or RX ring.  Passing NULL as tx_ring_info and/or rx_ring_info
88  * to not allocate them.
89  *
90  * Order in the allocation is:
91  * stats - Always non-zero length
92  * cp vmem - Always zero-length, supported for the bnxt_ring abstraction
93  * tx vmem - Only non-zero length if tx_ring_info is not NULL
94  * rx vmem - Only non-zero length if rx_ring_info is not NULL
95  * cp bd ring - Always non-zero length
96  * tx bd ring - Only non-zero length if tx_ring_info is not NULL
97  * rx bd ring - Only non-zero length if rx_ring_info is not NULL
98  */
99 int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
100                             struct bnxt_tx_queue *txq,
101                             struct bnxt_rx_queue *rxq,
102                             struct bnxt_cp_ring_info *cp_ring_info,
103                             struct bnxt_cp_ring_info *nq_ring_info,
104                             const char *suffix)
105 {
106         struct bnxt_ring *cp_ring = cp_ring_info->cp_ring_struct;
107         struct bnxt_rx_ring_info *rx_ring_info = rxq ? rxq->rx_ring : NULL;
108         struct bnxt_tx_ring_info *tx_ring_info = txq ? txq->tx_ring : NULL;
109         struct bnxt_ring *tx_ring;
110         struct bnxt_ring *rx_ring;
111         struct rte_pci_device *pdev = bp->pdev;
112         uint64_t rx_offloads = bp->eth_dev->data->dev_conf.rxmode.offloads;
113         const struct rte_memzone *mz = NULL;
114         char mz_name[RTE_MEMZONE_NAMESIZE];
115         rte_iova_t mz_phys_addr_base;
116         rte_iova_t mz_phys_addr;
117         int sz;
118
119         int stats_len = (tx_ring_info || rx_ring_info) ?
120             RTE_CACHE_LINE_ROUNDUP(sizeof(struct hwrm_stat_ctx_query_output) -
121                                    sizeof (struct hwrm_resp_hdr)) : 0;
122         stats_len = RTE_ALIGN(stats_len, 128);
123
124         int cp_vmem_start = stats_len;
125         int cp_vmem_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size);
126         cp_vmem_len = RTE_ALIGN(cp_vmem_len, 128);
127
128         int nq_vmem_len = BNXT_CHIP_THOR(bp) ?
129                 RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size) : 0;
130         nq_vmem_len = RTE_ALIGN(nq_vmem_len, 128);
131
132         int nq_vmem_start = cp_vmem_start + cp_vmem_len;
133
134         int tx_vmem_start = nq_vmem_start + nq_vmem_len;
135         int tx_vmem_len =
136             tx_ring_info ? RTE_CACHE_LINE_ROUNDUP(tx_ring_info->
137                                                 tx_ring_struct->vmem_size) : 0;
138         tx_vmem_len = RTE_ALIGN(tx_vmem_len, 128);
139
140         int rx_vmem_start = tx_vmem_start + tx_vmem_len;
141         int rx_vmem_len = rx_ring_info ?
142                 RTE_CACHE_LINE_ROUNDUP(rx_ring_info->
143                                                 rx_ring_struct->vmem_size) : 0;
144         rx_vmem_len = RTE_ALIGN(rx_vmem_len, 128);
145         int ag_vmem_start = 0;
146         int ag_vmem_len = 0;
147         int cp_ring_start =  0;
148         int nq_ring_start = 0;
149
150         ag_vmem_start = rx_vmem_start + rx_vmem_len;
151         ag_vmem_len = rx_ring_info ? RTE_CACHE_LINE_ROUNDUP(
152                                 rx_ring_info->ag_ring_struct->vmem_size) : 0;
153         cp_ring_start = ag_vmem_start + ag_vmem_len;
154         cp_ring_start = RTE_ALIGN(cp_ring_start, 4096);
155
156         int cp_ring_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->ring_size *
157                                                  sizeof(struct cmpl_base));
158         cp_ring_len = RTE_ALIGN(cp_ring_len, 128);
159         nq_ring_start = cp_ring_start + cp_ring_len;
160         nq_ring_start = RTE_ALIGN(nq_ring_start, 4096);
161
162         int nq_ring_len = BNXT_CHIP_THOR(bp) ? cp_ring_len : 0;
163
164         int tx_ring_start = nq_ring_start + nq_ring_len;
165         int tx_ring_len = tx_ring_info ?
166             RTE_CACHE_LINE_ROUNDUP(tx_ring_info->tx_ring_struct->ring_size *
167                                    sizeof(struct tx_bd_long)) : 0;
168         tx_ring_len = RTE_ALIGN(tx_ring_len, 4096);
169
170         int rx_ring_start = tx_ring_start + tx_ring_len;
171         int rx_ring_len =  rx_ring_info ?
172                 RTE_CACHE_LINE_ROUNDUP(rx_ring_info->rx_ring_struct->ring_size *
173                 sizeof(struct rx_prod_pkt_bd)) : 0;
174         rx_ring_len = RTE_ALIGN(rx_ring_len, 4096);
175
176         int ag_ring_start = rx_ring_start + rx_ring_len;
177         int ag_ring_len = rx_ring_len * AGG_RING_SIZE_FACTOR;
178         ag_ring_len = RTE_ALIGN(ag_ring_len, 4096);
179
180         int ag_bitmap_start = ag_ring_start + ag_ring_len;
181         int ag_bitmap_len =  rx_ring_info ?
182                 RTE_CACHE_LINE_ROUNDUP(rte_bitmap_get_memory_footprint(
183                         rx_ring_info->rx_ring_struct->ring_size *
184                         AGG_RING_SIZE_FACTOR)) : 0;
185
186         int tpa_info_start = ag_bitmap_start + ag_bitmap_len;
187         int tpa_info_len = rx_ring_info ?
188                 RTE_CACHE_LINE_ROUNDUP(BNXT_TPA_MAX *
189                                        sizeof(struct bnxt_tpa_info)) : 0;
190
191         int total_alloc_len = tpa_info_start;
192         if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
193                 total_alloc_len += tpa_info_len;
194
195         snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
196                  "bnxt_%04x:%02x:%02x:%02x-%04x_%s", pdev->addr.domain,
197                  pdev->addr.bus, pdev->addr.devid, pdev->addr.function, qidx,
198                  suffix);
199         mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
200         mz = rte_memzone_lookup(mz_name);
201         if (!mz) {
202                 mz = rte_memzone_reserve_aligned(mz_name, total_alloc_len,
203                                 SOCKET_ID_ANY,
204                                 RTE_MEMZONE_2MB |
205                                 RTE_MEMZONE_SIZE_HINT_ONLY |
206                                 RTE_MEMZONE_IOVA_CONTIG,
207                                 getpagesize());
208                 if (mz == NULL)
209                         return -ENOMEM;
210         }
211         memset(mz->addr, 0, mz->len);
212         mz_phys_addr_base = mz->iova;
213         mz_phys_addr = mz->iova;
214         if ((unsigned long)mz->addr == mz_phys_addr_base) {
215                 PMD_DRV_LOG(DEBUG,
216                             "Memzone physical address same as virtual.\n");
217                 PMD_DRV_LOG(DEBUG, "Using rte_mem_virt2iova()\n");
218                 for (sz = 0; sz < total_alloc_len; sz += getpagesize())
219                         rte_mem_lock_page(((char *)mz->addr) + sz);
220                 mz_phys_addr_base = rte_mem_virt2iova(mz->addr);
221                 mz_phys_addr = rte_mem_virt2iova(mz->addr);
222                 if (mz_phys_addr == RTE_BAD_IOVA) {
223                         PMD_DRV_LOG(ERR,
224                         "unable to map ring address to physical memory\n");
225                         return -ENOMEM;
226                 }
227         }
228
229         if (tx_ring_info) {
230                 txq->mz = mz;
231                 tx_ring = tx_ring_info->tx_ring_struct;
232
233                 tx_ring->bd = ((char *)mz->addr + tx_ring_start);
234                 tx_ring_info->tx_desc_ring = (struct tx_bd_long *)tx_ring->bd;
235                 tx_ring->bd_dma = mz_phys_addr + tx_ring_start;
236                 tx_ring_info->tx_desc_mapping = tx_ring->bd_dma;
237                 tx_ring->mem_zone = (const void *)mz;
238
239                 if (!tx_ring->bd)
240                         return -ENOMEM;
241                 if (tx_ring->vmem_size) {
242                         tx_ring->vmem =
243                             (void **)((char *)mz->addr + tx_vmem_start);
244                         tx_ring_info->tx_buf_ring =
245                             (struct bnxt_sw_tx_bd *)tx_ring->vmem;
246                 }
247         }
248
249         if (rx_ring_info) {
250                 rxq->mz = mz;
251                 rx_ring = rx_ring_info->rx_ring_struct;
252
253                 rx_ring->bd = ((char *)mz->addr + rx_ring_start);
254                 rx_ring_info->rx_desc_ring =
255                     (struct rx_prod_pkt_bd *)rx_ring->bd;
256                 rx_ring->bd_dma = mz_phys_addr + rx_ring_start;
257                 rx_ring_info->rx_desc_mapping = rx_ring->bd_dma;
258                 rx_ring->mem_zone = (const void *)mz;
259
260                 if (!rx_ring->bd)
261                         return -ENOMEM;
262                 if (rx_ring->vmem_size) {
263                         rx_ring->vmem =
264                             (void **)((char *)mz->addr + rx_vmem_start);
265                         rx_ring_info->rx_buf_ring =
266                             (struct bnxt_sw_rx_bd *)rx_ring->vmem;
267                 }
268
269                 rx_ring = rx_ring_info->ag_ring_struct;
270
271                 rx_ring->bd = ((char *)mz->addr + ag_ring_start);
272                 rx_ring_info->ag_desc_ring =
273                     (struct rx_prod_pkt_bd *)rx_ring->bd;
274                 rx_ring->bd_dma = mz->iova + ag_ring_start;
275                 rx_ring_info->ag_desc_mapping = rx_ring->bd_dma;
276                 rx_ring->mem_zone = (const void *)mz;
277
278                 if (!rx_ring->bd)
279                         return -ENOMEM;
280                 if (rx_ring->vmem_size) {
281                         rx_ring->vmem =
282                             (void **)((char *)mz->addr + ag_vmem_start);
283                         rx_ring_info->ag_buf_ring =
284                             (struct bnxt_sw_rx_bd *)rx_ring->vmem;
285                 }
286
287                 rx_ring_info->ag_bitmap =
288                     rte_bitmap_init(rx_ring_info->rx_ring_struct->ring_size *
289                                     AGG_RING_SIZE_FACTOR, (uint8_t *)mz->addr +
290                                     ag_bitmap_start, ag_bitmap_len);
291
292                 /* TPA info */
293                 if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
294                         rx_ring_info->tpa_info =
295                                 ((struct bnxt_tpa_info *)((char *)mz->addr +
296                                                           tpa_info_start));
297         }
298
299         cp_ring->bd = ((char *)mz->addr + cp_ring_start);
300         cp_ring->bd_dma = mz_phys_addr + cp_ring_start;
301         cp_ring_info->cp_desc_ring = cp_ring->bd;
302         cp_ring_info->cp_desc_mapping = cp_ring->bd_dma;
303         cp_ring->mem_zone = (const void *)mz;
304
305         if (!cp_ring->bd)
306                 return -ENOMEM;
307         if (cp_ring->vmem_size)
308                 *cp_ring->vmem = ((char *)mz->addr + stats_len);
309         if (stats_len) {
310                 cp_ring_info->hw_stats = mz->addr;
311                 cp_ring_info->hw_stats_map = mz_phys_addr;
312         }
313         cp_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
314
315         if (nq_ring_info) {
316                 struct bnxt_ring *nq_ring = nq_ring_info->cp_ring_struct;
317
318                 nq_ring->bd = (char *)mz->addr + nq_ring_start;
319                 nq_ring->bd_dma = mz_phys_addr + nq_ring_start;
320                 nq_ring_info->cp_desc_ring = nq_ring->bd;
321                 nq_ring_info->cp_desc_mapping = nq_ring->bd_dma;
322                 nq_ring->mem_zone = (const void *)mz;
323
324                 if (!nq_ring->bd)
325                         return -ENOMEM;
326                 if (nq_ring->vmem_size)
327                         *nq_ring->vmem = (char *)mz->addr + nq_vmem_start;
328
329                 nq_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
330         }
331
332         return 0;
333 }
334
335 static void bnxt_init_dflt_coal(struct bnxt_coal *coal)
336 {
337         /* Tick values in micro seconds.
338          * 1 coal_buf x bufs_per_record = 1 completion record.
339          */
340         coal->num_cmpl_aggr_int = BNXT_NUM_CMPL_AGGR_INT;
341         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
342         coal->num_cmpl_dma_aggr = BNXT_NUM_CMPL_DMA_AGGR;
343         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
344         coal->num_cmpl_dma_aggr_during_int = BNXT_NUM_CMPL_DMA_AGGR_DURING_INT;
345         coal->int_lat_tmr_max = BNXT_INT_LAT_TMR_MAX;
346         /* min timer set to 1/2 of interrupt timer */
347         coal->int_lat_tmr_min = BNXT_INT_LAT_TMR_MIN;
348         /* buf timer set to 1/4 of interrupt timer */
349         coal->cmpl_aggr_dma_tmr = BNXT_CMPL_AGGR_DMA_TMR;
350         coal->cmpl_aggr_dma_tmr_during_int = BNXT_CMPL_AGGR_DMA_TMR_DURING_INT;
351 }
352
353 static void bnxt_set_db(struct bnxt *bp,
354                         struct bnxt_db_info *db,
355                         uint32_t ring_type,
356                         uint32_t map_idx,
357                         uint32_t fid)
358 {
359         if (BNXT_CHIP_THOR(bp)) {
360                 if (BNXT_PF(bp))
361                         db->doorbell = (char *)bp->doorbell_base + 0x10000;
362                 else
363                         db->doorbell = (char *)bp->doorbell_base + 0x4000;
364                 switch (ring_type) {
365                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
366                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
367                         break;
368                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
369                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
370                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
371                         break;
372                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
373                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_CQ;
374                         break;
375                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
376                         db->db_key64 = DBR_PATH_L2;
377                         break;
378                 }
379                 db->db_key64 |= (uint64_t)fid << DBR_XID_SFT;
380                 db->db_64 = true;
381         } else {
382                 db->doorbell = (char *)bp->doorbell_base + map_idx * 0x80;
383                 switch (ring_type) {
384                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
385                         db->db_key32 = DB_KEY_TX;
386                         break;
387                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
388                         db->db_key32 = DB_KEY_RX;
389                         break;
390                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
391                         db->db_key32 = DB_KEY_CP;
392                         break;
393                 }
394                 db->db_64 = false;
395         }
396 }
397
398 static int bnxt_alloc_cmpl_ring(struct bnxt *bp, int queue_index,
399                                 struct bnxt_cp_ring_info *cpr,
400                                 struct bnxt_cp_ring_info *nqr)
401 {
402         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
403         uint32_t nq_ring_id = HWRM_NA_SIGNATURE;
404         int cp_ring_index = queue_index + BNXT_NUM_ASYNC_CPR(bp);
405         uint8_t ring_type;
406         int rc = 0;
407
408         ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL;
409
410         if (BNXT_HAS_NQ(bp)) {
411                 if (nqr) {
412                         nq_ring_id = nqr->cp_ring_struct->fw_ring_id;
413                 } else {
414                         PMD_DRV_LOG(ERR, "NQ ring is NULL\n");
415                         return -EINVAL;
416                 }
417         }
418
419         rc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, cp_ring_index,
420                                   HWRM_NA_SIGNATURE, nq_ring_id);
421         if (rc)
422                 return rc;
423
424         cpr->cp_cons = 0;
425         bnxt_set_db(bp, &cpr->cp_db, ring_type, cp_ring_index,
426                     cp_ring->fw_ring_id);
427         bnxt_db_cq(cpr);
428
429         return 0;
430 }
431
432 static int bnxt_alloc_nq_ring(struct bnxt *bp, int queue_index,
433                               struct bnxt_cp_ring_info *nqr)
434 {
435         struct bnxt_ring *nq_ring = nqr->cp_ring_struct;
436         int nq_ring_index = queue_index + BNXT_NUM_ASYNC_CPR(bp);
437         uint8_t ring_type;
438         int rc = 0;
439
440         if (!BNXT_HAS_NQ(bp))
441                 return -EINVAL;
442
443         ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ;
444
445         rc = bnxt_hwrm_ring_alloc(bp, nq_ring, ring_type, nq_ring_index,
446                                   HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE);
447         if (rc)
448                 return rc;
449
450         bnxt_set_db(bp, &nqr->cp_db, ring_type, nq_ring_index,
451                     nq_ring->fw_ring_id);
452         bnxt_db_nq(nqr);
453
454         return 0;
455 }
456
457 static int bnxt_alloc_rx_ring(struct bnxt *bp, int queue_index)
458 {
459         struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
460         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
461         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
462         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
463         struct bnxt_ring *ring = rxr->rx_ring_struct;
464         uint8_t ring_type;
465         int rc = 0;
466
467         ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX;
468
469         rc = bnxt_hwrm_ring_alloc(bp, ring, ring_type,
470                                   queue_index, cpr->hw_stats_ctx_id,
471                                   cp_ring->fw_ring_id);
472         if (rc)
473                 return rc;
474
475         rxr->rx_prod = 0;
476         if (BNXT_HAS_RING_GRPS(bp))
477                 bp->grp_info[queue_index].rx_fw_ring_id = ring->fw_ring_id;
478         bnxt_set_db(bp, &rxr->rx_db, ring_type, queue_index, ring->fw_ring_id);
479         bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
480
481         return 0;
482 }
483
484 static int bnxt_alloc_rx_agg_ring(struct bnxt *bp, int queue_index)
485 {
486         unsigned int map_idx = queue_index + bp->rx_cp_nr_rings;
487         struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
488         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
489         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
490         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
491         struct bnxt_ring *ring = rxr->ag_ring_struct;
492         uint32_t hw_stats_ctx_id = HWRM_NA_SIGNATURE;
493         uint8_t ring_type;
494         int rc = 0;
495
496         ring->fw_rx_ring_id = rxr->rx_ring_struct->fw_ring_id;
497
498         if (BNXT_CHIP_THOR(bp)) {
499                 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG;
500                 hw_stats_ctx_id = cpr->hw_stats_ctx_id;
501         } else {
502                 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX;
503         }
504
505         rc = bnxt_hwrm_ring_alloc(bp, ring, ring_type, map_idx,
506                                   hw_stats_ctx_id, cp_ring->fw_ring_id);
507
508         if (rc)
509                 return rc;
510
511         rxr->ag_prod = 0;
512         if (BNXT_HAS_RING_GRPS(bp))
513                 bp->grp_info[queue_index].ag_fw_ring_id = ring->fw_ring_id;
514         bnxt_set_db(bp, &rxr->ag_db, ring_type, map_idx, ring->fw_ring_id);
515         bnxt_db_write(&rxr->ag_db, rxr->ag_prod);
516
517         return 0;
518 }
519
520 int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index)
521 {
522         struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
523         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
524         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
525         struct bnxt_cp_ring_info *nqr = rxq->nq_ring;
526         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
527         int rc;
528
529         if (BNXT_HAS_NQ(bp)) {
530                 rc = bnxt_alloc_nq_ring(bp, queue_index, nqr);
531                 if (rc)
532                         goto err_out;
533         }
534
535         rc = bnxt_alloc_cmpl_ring(bp, queue_index, cpr, nqr);
536         if (rc)
537                 goto err_out;
538
539         if (BNXT_HAS_RING_GRPS(bp)) {
540                 bp->grp_info[queue_index].fw_stats_ctx = cpr->hw_stats_ctx_id;
541                 bp->grp_info[queue_index].cp_fw_ring_id = cp_ring->fw_ring_id;
542         }
543
544         if (!BNXT_NUM_ASYNC_CPR(bp) && !queue_index) {
545                 /*
546                  * If a dedicated async event completion ring is not enabled,
547                  * use the first completion ring from PF or VF as the default
548                  * completion ring for async event handling.
549                  */
550                 bp->async_cp_ring = cpr;
551                 rc = bnxt_hwrm_set_async_event_cr(bp);
552                 if (rc)
553                         goto err_out;
554         }
555
556         rc = bnxt_alloc_rx_ring(bp, queue_index);
557         if (rc)
558                 goto err_out;
559
560         rc = bnxt_alloc_rx_agg_ring(bp, queue_index);
561         if (rc)
562                 goto err_out;
563
564         rxq->rx_buf_use_size = BNXT_MAX_MTU + RTE_ETHER_HDR_LEN +
565                 RTE_ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);
566
567         if (bp->eth_dev->data->rx_queue_state[queue_index] ==
568             RTE_ETH_QUEUE_STATE_STARTED) {
569                 if (bnxt_init_one_rx_ring(rxq)) {
570                         RTE_LOG(ERR, PMD,
571                                 "bnxt_init_one_rx_ring failed!\n");
572                         bnxt_rx_queue_release_op(rxq);
573                         rc = -ENOMEM;
574                         goto err_out;
575                 }
576                 bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
577                 bnxt_db_write(&rxr->ag_db, rxr->ag_prod);
578         }
579         rxq->index = queue_index;
580
581         return 0;
582
583 err_out:
584         PMD_DRV_LOG(ERR,
585                     "Failed to allocate receive queue %d, rc %d.\n",
586                     queue_index, rc);
587         return rc;
588 }
589
590 /* ring_grp usage:
591  * [0] = default completion ring
592  * [1 -> +rx_cp_nr_rings] = rx_cp, rx rings
593  * [1+rx_cp_nr_rings + 1 -> +tx_cp_nr_rings] = tx_cp, tx rings
594  */
595 int bnxt_alloc_hwrm_rings(struct bnxt *bp)
596 {
597         struct bnxt_coal coal;
598         unsigned int i;
599         uint8_t ring_type;
600         int rc = 0;
601
602         bnxt_init_dflt_coal(&coal);
603
604         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
605                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
606                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
607                 struct bnxt_cp_ring_info *nqr = rxq->nq_ring;
608                 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
609                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
610
611                 if (BNXT_HAS_NQ(bp)) {
612                         if (bnxt_alloc_nq_ring(bp, i, nqr))
613                                 goto err_out;
614                 }
615
616                 if (bnxt_alloc_cmpl_ring(bp, i, cpr, nqr))
617                         goto err_out;
618
619                 if (BNXT_HAS_RING_GRPS(bp)) {
620                         bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
621                         bp->grp_info[i].cp_fw_ring_id = cp_ring->fw_ring_id;
622                 }
623
624                 bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id);
625                 if (!BNXT_NUM_ASYNC_CPR(bp) && !i) {
626                         /*
627                          * If a dedicated async event completion ring is not
628                          * enabled, use the first completion ring as the default
629                          * completion ring for async event handling.
630                          */
631                         bp->async_cp_ring = cpr;
632                         rc = bnxt_hwrm_set_async_event_cr(bp);
633                         if (rc)
634                                 goto err_out;
635                 }
636
637                 if (bnxt_alloc_rx_ring(bp, i))
638                         goto err_out;
639
640                 if (bnxt_alloc_rx_agg_ring(bp, i))
641                         goto err_out;
642
643                 rxq->rx_buf_use_size = BNXT_MAX_MTU + RTE_ETHER_HDR_LEN +
644                                         RTE_ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);
645                 if (bnxt_init_one_rx_ring(rxq)) {
646                         PMD_DRV_LOG(ERR, "bnxt_init_one_rx_ring failed!\n");
647                         bnxt_rx_queue_release_op(rxq);
648                         return -ENOMEM;
649                 }
650                 bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
651                 bnxt_db_write(&rxr->ag_db, rxr->ag_prod);
652                 rxq->index = i;
653 #ifdef RTE_ARCH_X86
654                 bnxt_rxq_vec_setup(rxq);
655 #endif
656         }
657
658         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
659                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
660                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
661                 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
662                 struct bnxt_cp_ring_info *nqr = txq->nq_ring;
663                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
664                 struct bnxt_ring *ring = txr->tx_ring_struct;
665                 unsigned int idx = i + bp->rx_cp_nr_rings;
666
667                 if (BNXT_HAS_NQ(bp)) {
668                         if (bnxt_alloc_nq_ring(bp, idx, nqr))
669                                 goto err_out;
670                 }
671
672                 if (bnxt_alloc_cmpl_ring(bp, idx, cpr, nqr))
673                         goto err_out;
674
675                 /* Tx ring */
676                 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_TX;
677                 rc = bnxt_hwrm_ring_alloc(bp, ring,
678                                           ring_type,
679                                           i, cpr->hw_stats_ctx_id,
680                                           cp_ring->fw_ring_id);
681                 if (rc)
682                         goto err_out;
683
684                 bnxt_set_db(bp, &txr->tx_db, ring_type, i, ring->fw_ring_id);
685                 txq->index = idx;
686                 bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id);
687         }
688
689 err_out:
690         return rc;
691 }
692
693 /* Allocate dedicated async completion ring. */
694 int bnxt_alloc_async_cp_ring(struct bnxt *bp)
695 {
696         struct bnxt_cp_ring_info *cpr = bp->async_cp_ring;
697         struct bnxt_ring *cp_ring;
698         uint8_t ring_type;
699         int rc;
700
701         if (BNXT_NUM_ASYNC_CPR(bp) == 0 || cpr == NULL)
702                 return 0;
703
704         cp_ring = cpr->cp_ring_struct;
705
706         if (BNXT_HAS_NQ(bp))
707                 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ;
708         else
709                 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL;
710
711         rc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, 0,
712                                   HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE);
713
714         if (rc)
715                 return rc;
716
717         cpr->cp_cons = 0;
718         cpr->valid = 0;
719         bnxt_set_db(bp, &cpr->cp_db, ring_type, 0,
720                     cp_ring->fw_ring_id);
721
722         if (BNXT_HAS_NQ(bp))
723                 bnxt_db_nq(cpr);
724         else
725                 bnxt_db_cq(cpr);
726
727         return bnxt_hwrm_set_async_event_cr(bp);
728 }
729
730 /* Free dedicated async completion ring. */
731 void bnxt_free_async_cp_ring(struct bnxt *bp)
732 {
733         struct bnxt_cp_ring_info *cpr = bp->async_cp_ring;
734
735         if (BNXT_NUM_ASYNC_CPR(bp) == 0 || cpr == NULL)
736                 return;
737
738         if (BNXT_HAS_NQ(bp))
739                 bnxt_free_nq_ring(bp, cpr);
740         else
741                 bnxt_free_cp_ring(bp, cpr);
742
743         bnxt_free_ring(cpr->cp_ring_struct);
744         rte_free(cpr->cp_ring_struct);
745         cpr->cp_ring_struct = NULL;
746         rte_free(cpr);
747         bp->async_cp_ring = NULL;
748 }
749
750 int bnxt_alloc_async_ring_struct(struct bnxt *bp)
751 {
752         struct bnxt_cp_ring_info *cpr = NULL;
753         struct bnxt_ring *ring = NULL;
754         unsigned int socket_id;
755
756         if (BNXT_NUM_ASYNC_CPR(bp) == 0)
757                 return 0;
758
759         socket_id = rte_lcore_to_socket_id(rte_get_master_lcore());
760
761         cpr = rte_zmalloc_socket("cpr",
762                                  sizeof(struct bnxt_cp_ring_info),
763                                  RTE_CACHE_LINE_SIZE, socket_id);
764         if (cpr == NULL)
765                 return -ENOMEM;
766
767         ring = rte_zmalloc_socket("bnxt_cp_ring_struct",
768                                   sizeof(struct bnxt_ring),
769                                   RTE_CACHE_LINE_SIZE, socket_id);
770         if (ring == NULL) {
771                 rte_free(cpr);
772                 return -ENOMEM;
773         }
774
775         ring->bd = (void *)cpr->cp_desc_ring;
776         ring->bd_dma = cpr->cp_desc_mapping;
777         ring->ring_size = rte_align32pow2(DEFAULT_CP_RING_SIZE);
778         ring->ring_mask = ring->ring_size - 1;
779         ring->vmem_size = 0;
780         ring->vmem = NULL;
781
782         bp->async_cp_ring = cpr;
783         cpr->cp_ring_struct = ring;
784
785         return bnxt_alloc_rings(bp, 0, NULL, NULL,
786                                 bp->async_cp_ring, NULL,
787                                 "def_cp");
788 }