net/bnxt: handle ring cleanup in case of error
[dpdk.git] / drivers / net / bnxt / bnxt_ring.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <rte_bitmap.h>
7 #include <rte_memzone.h>
8 #include <rte_malloc.h>
9 #include <unistd.h>
10
11 #include "bnxt.h"
12 #include "bnxt_cpr.h"
13 #include "bnxt_hwrm.h"
14 #include "bnxt_ring.h"
15 #include "bnxt_rxq.h"
16 #include "bnxt_rxr.h"
17 #include "bnxt_txq.h"
18 #include "bnxt_txr.h"
19
20 #include "hsi_struct_def_dpdk.h"
21
22 /*
23  * Generic ring handling
24  */
25
26 void bnxt_free_ring(struct bnxt_ring *ring)
27 {
28         if (!ring)
29                 return;
30
31         if (ring->vmem_size && *ring->vmem) {
32                 memset((char *)*ring->vmem, 0, ring->vmem_size);
33                 *ring->vmem = NULL;
34         }
35         ring->mem_zone = NULL;
36 }
37
38 /*
39  * Ring groups
40  */
41
42 int bnxt_init_ring_grps(struct bnxt *bp)
43 {
44         unsigned int i;
45
46         for (i = 0; i < bp->max_ring_grps; i++)
47                 memset(&bp->grp_info[i], (uint8_t)HWRM_NA_SIGNATURE,
48                        sizeof(struct bnxt_ring_grp_info));
49
50         return 0;
51 }
52
53 int bnxt_alloc_ring_grps(struct bnxt *bp)
54 {
55         if (bp->max_tx_rings == 0) {
56                 PMD_DRV_LOG(ERR, "No TX rings available!\n");
57                 return -EBUSY;
58         }
59
60         /* THOR does not support ring groups.
61          * But we will use the array to save RSS context IDs.
62          */
63         if (BNXT_CHIP_THOR(bp)) {
64                 bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR;
65         } else if (bp->max_ring_grps < bp->rx_cp_nr_rings) {
66                 /* 1 ring is for default completion ring */
67                 PMD_DRV_LOG(ERR, "Insufficient resource: Ring Group\n");
68                 return -ENOSPC;
69         }
70
71         if (BNXT_HAS_RING_GRPS(bp)) {
72                 bp->grp_info = rte_zmalloc("bnxt_grp_info",
73                                            sizeof(*bp->grp_info) *
74                                            bp->max_ring_grps, 0);
75                 if (!bp->grp_info) {
76                         PMD_DRV_LOG(ERR,
77                                     "Failed to alloc grp info tbl.\n");
78                         return -ENOMEM;
79                 }
80         }
81
82         return 0;
83 }
84
85 /*
86  * Allocates a completion ring with vmem and stats optionally also allocating
87  * a TX and/or RX ring.  Passing NULL as tx_ring_info and/or rx_ring_info
88  * to not allocate them.
89  *
90  * Order in the allocation is:
91  * stats - Always non-zero length
92  * cp vmem - Always zero-length, supported for the bnxt_ring abstraction
93  * tx vmem - Only non-zero length if tx_ring_info is not NULL
94  * rx vmem - Only non-zero length if rx_ring_info is not NULL
95  * cp bd ring - Always non-zero length
96  * tx bd ring - Only non-zero length if tx_ring_info is not NULL
97  * rx bd ring - Only non-zero length if rx_ring_info is not NULL
98  */
99 int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
100                             struct bnxt_tx_queue *txq,
101                             struct bnxt_rx_queue *rxq,
102                             struct bnxt_cp_ring_info *cp_ring_info,
103                             struct bnxt_cp_ring_info *nq_ring_info,
104                             const char *suffix)
105 {
106         struct bnxt_ring *cp_ring = cp_ring_info->cp_ring_struct;
107         struct bnxt_rx_ring_info *rx_ring_info = rxq ? rxq->rx_ring : NULL;
108         struct bnxt_tx_ring_info *tx_ring_info = txq ? txq->tx_ring : NULL;
109         struct bnxt_ring *tx_ring;
110         struct bnxt_ring *rx_ring;
111         struct rte_pci_device *pdev = bp->pdev;
112         uint64_t rx_offloads = bp->eth_dev->data->dev_conf.rxmode.offloads;
113         const struct rte_memzone *mz = NULL;
114         char mz_name[RTE_MEMZONE_NAMESIZE];
115         rte_iova_t mz_phys_addr_base;
116         rte_iova_t mz_phys_addr;
117         int sz;
118
119         int stats_len = (tx_ring_info || rx_ring_info) ?
120             RTE_CACHE_LINE_ROUNDUP(sizeof(struct hwrm_stat_ctx_query_output) -
121                                    sizeof (struct hwrm_resp_hdr)) : 0;
122         stats_len = RTE_ALIGN(stats_len, 128);
123
124         int cp_vmem_start = stats_len;
125         int cp_vmem_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size);
126         cp_vmem_len = RTE_ALIGN(cp_vmem_len, 128);
127
128         int nq_vmem_len = BNXT_CHIP_THOR(bp) ?
129                 RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size) : 0;
130         nq_vmem_len = RTE_ALIGN(nq_vmem_len, 128);
131
132         int nq_vmem_start = cp_vmem_start + cp_vmem_len;
133
134         int tx_vmem_start = nq_vmem_start + nq_vmem_len;
135         int tx_vmem_len =
136             tx_ring_info ? RTE_CACHE_LINE_ROUNDUP(tx_ring_info->
137                                                 tx_ring_struct->vmem_size) : 0;
138         tx_vmem_len = RTE_ALIGN(tx_vmem_len, 128);
139
140         int rx_vmem_start = tx_vmem_start + tx_vmem_len;
141         int rx_vmem_len = rx_ring_info ?
142                 RTE_CACHE_LINE_ROUNDUP(rx_ring_info->
143                                                 rx_ring_struct->vmem_size) : 0;
144         rx_vmem_len = RTE_ALIGN(rx_vmem_len, 128);
145         int ag_vmem_start = 0;
146         int ag_vmem_len = 0;
147         int cp_ring_start =  0;
148         int nq_ring_start = 0;
149
150         ag_vmem_start = rx_vmem_start + rx_vmem_len;
151         ag_vmem_len = rx_ring_info ? RTE_CACHE_LINE_ROUNDUP(
152                                 rx_ring_info->ag_ring_struct->vmem_size) : 0;
153         cp_ring_start = ag_vmem_start + ag_vmem_len;
154         cp_ring_start = RTE_ALIGN(cp_ring_start, 4096);
155
156         int cp_ring_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->ring_size *
157                                                  sizeof(struct cmpl_base));
158         cp_ring_len = RTE_ALIGN(cp_ring_len, 128);
159         nq_ring_start = cp_ring_start + cp_ring_len;
160         nq_ring_start = RTE_ALIGN(nq_ring_start, 4096);
161
162         int nq_ring_len = BNXT_CHIP_THOR(bp) ? cp_ring_len : 0;
163
164         int tx_ring_start = nq_ring_start + nq_ring_len;
165         int tx_ring_len = tx_ring_info ?
166             RTE_CACHE_LINE_ROUNDUP(tx_ring_info->tx_ring_struct->ring_size *
167                                    sizeof(struct tx_bd_long)) : 0;
168         tx_ring_len = RTE_ALIGN(tx_ring_len, 4096);
169
170         int rx_ring_start = tx_ring_start + tx_ring_len;
171         int rx_ring_len =  rx_ring_info ?
172                 RTE_CACHE_LINE_ROUNDUP(rx_ring_info->rx_ring_struct->ring_size *
173                 sizeof(struct rx_prod_pkt_bd)) : 0;
174         rx_ring_len = RTE_ALIGN(rx_ring_len, 4096);
175
176         int ag_ring_start = rx_ring_start + rx_ring_len;
177         int ag_ring_len = rx_ring_len * AGG_RING_SIZE_FACTOR;
178         ag_ring_len = RTE_ALIGN(ag_ring_len, 4096);
179
180         int ag_bitmap_start = ag_ring_start + ag_ring_len;
181         int ag_bitmap_len =  rx_ring_info ?
182                 RTE_CACHE_LINE_ROUNDUP(rte_bitmap_get_memory_footprint(
183                         rx_ring_info->rx_ring_struct->ring_size *
184                         AGG_RING_SIZE_FACTOR)) : 0;
185
186         int tpa_info_start = ag_bitmap_start + ag_bitmap_len;
187         int tpa_info_len = rx_ring_info ?
188                 RTE_CACHE_LINE_ROUNDUP(BNXT_TPA_MAX *
189                                        sizeof(struct bnxt_tpa_info)) : 0;
190
191         int total_alloc_len = tpa_info_start;
192         if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
193                 total_alloc_len += tpa_info_len;
194
195         snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
196                  "bnxt_%04x:%02x:%02x:%02x-%04x_%s", pdev->addr.domain,
197                  pdev->addr.bus, pdev->addr.devid, pdev->addr.function, qidx,
198                  suffix);
199         mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
200         mz = rte_memzone_lookup(mz_name);
201         if (!mz) {
202                 mz = rte_memzone_reserve_aligned(mz_name, total_alloc_len,
203                                 SOCKET_ID_ANY,
204                                 RTE_MEMZONE_2MB |
205                                 RTE_MEMZONE_SIZE_HINT_ONLY |
206                                 RTE_MEMZONE_IOVA_CONTIG,
207                                 getpagesize());
208                 if (mz == NULL)
209                         return -ENOMEM;
210         }
211         memset(mz->addr, 0, mz->len);
212         mz_phys_addr_base = mz->iova;
213         mz_phys_addr = mz->iova;
214         if ((unsigned long)mz->addr == mz_phys_addr_base) {
215                 PMD_DRV_LOG(DEBUG,
216                             "Memzone physical address same as virtual.\n");
217                 PMD_DRV_LOG(DEBUG, "Using rte_mem_virt2iova()\n");
218                 for (sz = 0; sz < total_alloc_len; sz += getpagesize())
219                         rte_mem_lock_page(((char *)mz->addr) + sz);
220                 mz_phys_addr_base = rte_mem_virt2iova(mz->addr);
221                 mz_phys_addr = rte_mem_virt2iova(mz->addr);
222                 if (mz_phys_addr == RTE_BAD_IOVA) {
223                         PMD_DRV_LOG(ERR,
224                         "unable to map ring address to physical memory\n");
225                         return -ENOMEM;
226                 }
227         }
228
229         if (tx_ring_info) {
230                 txq->mz = mz;
231                 tx_ring = tx_ring_info->tx_ring_struct;
232
233                 tx_ring->bd = ((char *)mz->addr + tx_ring_start);
234                 tx_ring_info->tx_desc_ring = (struct tx_bd_long *)tx_ring->bd;
235                 tx_ring->bd_dma = mz_phys_addr + tx_ring_start;
236                 tx_ring_info->tx_desc_mapping = tx_ring->bd_dma;
237                 tx_ring->mem_zone = (const void *)mz;
238
239                 if (!tx_ring->bd)
240                         return -ENOMEM;
241                 if (tx_ring->vmem_size) {
242                         tx_ring->vmem =
243                             (void **)((char *)mz->addr + tx_vmem_start);
244                         tx_ring_info->tx_buf_ring =
245                             (struct bnxt_sw_tx_bd *)tx_ring->vmem;
246                 }
247         }
248
249         if (rx_ring_info) {
250                 rxq->mz = mz;
251                 rx_ring = rx_ring_info->rx_ring_struct;
252
253                 rx_ring->bd = ((char *)mz->addr + rx_ring_start);
254                 rx_ring_info->rx_desc_ring =
255                     (struct rx_prod_pkt_bd *)rx_ring->bd;
256                 rx_ring->bd_dma = mz_phys_addr + rx_ring_start;
257                 rx_ring_info->rx_desc_mapping = rx_ring->bd_dma;
258                 rx_ring->mem_zone = (const void *)mz;
259
260                 if (!rx_ring->bd)
261                         return -ENOMEM;
262                 if (rx_ring->vmem_size) {
263                         rx_ring->vmem =
264                             (void **)((char *)mz->addr + rx_vmem_start);
265                         rx_ring_info->rx_buf_ring =
266                             (struct bnxt_sw_rx_bd *)rx_ring->vmem;
267                 }
268
269                 rx_ring = rx_ring_info->ag_ring_struct;
270
271                 rx_ring->bd = ((char *)mz->addr + ag_ring_start);
272                 rx_ring_info->ag_desc_ring =
273                     (struct rx_prod_pkt_bd *)rx_ring->bd;
274                 rx_ring->bd_dma = mz->iova + ag_ring_start;
275                 rx_ring_info->ag_desc_mapping = rx_ring->bd_dma;
276                 rx_ring->mem_zone = (const void *)mz;
277
278                 if (!rx_ring->bd)
279                         return -ENOMEM;
280                 if (rx_ring->vmem_size) {
281                         rx_ring->vmem =
282                             (void **)((char *)mz->addr + ag_vmem_start);
283                         rx_ring_info->ag_buf_ring =
284                             (struct bnxt_sw_rx_bd *)rx_ring->vmem;
285                 }
286
287                 rx_ring_info->ag_bitmap =
288                     rte_bitmap_init(rx_ring_info->rx_ring_struct->ring_size *
289                                     AGG_RING_SIZE_FACTOR, (uint8_t *)mz->addr +
290                                     ag_bitmap_start, ag_bitmap_len);
291
292                 /* TPA info */
293                 if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
294                         rx_ring_info->tpa_info =
295                                 ((struct bnxt_tpa_info *)((char *)mz->addr +
296                                                           tpa_info_start));
297         }
298
299         cp_ring->bd = ((char *)mz->addr + cp_ring_start);
300         cp_ring->bd_dma = mz_phys_addr + cp_ring_start;
301         cp_ring_info->cp_desc_ring = cp_ring->bd;
302         cp_ring_info->cp_desc_mapping = cp_ring->bd_dma;
303         cp_ring->mem_zone = (const void *)mz;
304
305         if (!cp_ring->bd)
306                 return -ENOMEM;
307         if (cp_ring->vmem_size)
308                 *cp_ring->vmem = ((char *)mz->addr + stats_len);
309         if (stats_len) {
310                 cp_ring_info->hw_stats = mz->addr;
311                 cp_ring_info->hw_stats_map = mz_phys_addr;
312         }
313         cp_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
314
315         if (nq_ring_info) {
316                 struct bnxt_ring *nq_ring = nq_ring_info->cp_ring_struct;
317
318                 nq_ring->bd = (char *)mz->addr + nq_ring_start;
319                 nq_ring->bd_dma = mz_phys_addr + nq_ring_start;
320                 nq_ring_info->cp_desc_ring = nq_ring->bd;
321                 nq_ring_info->cp_desc_mapping = nq_ring->bd_dma;
322                 nq_ring->mem_zone = (const void *)mz;
323
324                 if (!nq_ring->bd)
325                         return -ENOMEM;
326                 if (nq_ring->vmem_size)
327                         *nq_ring->vmem = (char *)mz->addr + nq_vmem_start;
328
329                 nq_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
330         }
331
332         return 0;
333 }
334
335 static void bnxt_init_dflt_coal(struct bnxt_coal *coal)
336 {
337         /* Tick values in micro seconds.
338          * 1 coal_buf x bufs_per_record = 1 completion record.
339          */
340         coal->num_cmpl_aggr_int = BNXT_NUM_CMPL_AGGR_INT;
341         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
342         coal->num_cmpl_dma_aggr = BNXT_NUM_CMPL_DMA_AGGR;
343         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
344         coal->num_cmpl_dma_aggr_during_int = BNXT_NUM_CMPL_DMA_AGGR_DURING_INT;
345         coal->int_lat_tmr_max = BNXT_INT_LAT_TMR_MAX;
346         /* min timer set to 1/2 of interrupt timer */
347         coal->int_lat_tmr_min = BNXT_INT_LAT_TMR_MIN;
348         /* buf timer set to 1/4 of interrupt timer */
349         coal->cmpl_aggr_dma_tmr = BNXT_CMPL_AGGR_DMA_TMR;
350         coal->cmpl_aggr_dma_tmr_during_int = BNXT_CMPL_AGGR_DMA_TMR_DURING_INT;
351 }
352
353 static void bnxt_set_db(struct bnxt *bp,
354                         struct bnxt_db_info *db,
355                         uint32_t ring_type,
356                         uint32_t map_idx,
357                         uint32_t fid)
358 {
359         if (BNXT_CHIP_THOR(bp)) {
360                 if (BNXT_PF(bp))
361                         db->doorbell = (char *)bp->doorbell_base + 0x10000;
362                 else
363                         db->doorbell = (char *)bp->doorbell_base + 0x4000;
364                 switch (ring_type) {
365                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
366                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
367                         break;
368                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
369                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
370                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
371                         break;
372                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
373                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_CQ;
374                         break;
375                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
376                         db->db_key64 = DBR_PATH_L2;
377                         break;
378                 }
379                 db->db_key64 |= (uint64_t)fid << DBR_XID_SFT;
380                 db->db_64 = true;
381         } else {
382                 db->doorbell = (char *)bp->doorbell_base + map_idx * 0x80;
383                 switch (ring_type) {
384                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
385                         db->db_key32 = DB_KEY_TX;
386                         break;
387                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
388                         db->db_key32 = DB_KEY_RX;
389                         break;
390                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
391                         db->db_key32 = DB_KEY_CP;
392                         break;
393                 }
394                 db->db_64 = false;
395         }
396 }
397
398 static int bnxt_alloc_cmpl_ring(struct bnxt *bp, int queue_index,
399                                 struct bnxt_cp_ring_info *cpr,
400                                 struct bnxt_cp_ring_info *nqr)
401 {
402         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
403         uint32_t nq_ring_id = HWRM_NA_SIGNATURE;
404         int cp_ring_index = queue_index + BNXT_NUM_ASYNC_CPR(bp);
405         uint8_t ring_type;
406         int rc = 0;
407
408         ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL;
409
410         if (BNXT_HAS_NQ(bp)) {
411                 if (nqr) {
412                         nq_ring_id = nqr->cp_ring_struct->fw_ring_id;
413                 } else {
414                         PMD_DRV_LOG(ERR, "NQ ring is NULL\n");
415                         return -EINVAL;
416                 }
417         }
418
419         rc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, cp_ring_index,
420                                   HWRM_NA_SIGNATURE, nq_ring_id);
421         if (rc)
422                 return rc;
423
424         cpr->cp_cons = 0;
425         bnxt_set_db(bp, &cpr->cp_db, ring_type, cp_ring_index,
426                     cp_ring->fw_ring_id);
427         bnxt_db_cq(cpr);
428
429         return 0;
430 }
431
432 static int bnxt_alloc_nq_ring(struct bnxt *bp, int queue_index,
433                               struct bnxt_cp_ring_info *nqr)
434 {
435         struct bnxt_ring *nq_ring = nqr->cp_ring_struct;
436         int nq_ring_index = queue_index + BNXT_NUM_ASYNC_CPR(bp);
437         uint8_t ring_type;
438         int rc = 0;
439
440         if (!BNXT_HAS_NQ(bp))
441                 return -EINVAL;
442
443         ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ;
444
445         rc = bnxt_hwrm_ring_alloc(bp, nq_ring, ring_type, nq_ring_index,
446                                   HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE);
447         if (rc)
448                 return rc;
449
450         bnxt_set_db(bp, &nqr->cp_db, ring_type, nq_ring_index,
451                     nq_ring->fw_ring_id);
452         bnxt_db_nq(nqr);
453
454         return 0;
455 }
456
457 static int bnxt_alloc_rx_ring(struct bnxt *bp, int queue_index)
458 {
459         struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
460         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
461         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
462         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
463         struct bnxt_ring *ring = rxr->rx_ring_struct;
464         uint8_t ring_type;
465         int rc = 0;
466
467         ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX;
468
469         rc = bnxt_hwrm_ring_alloc(bp, ring, ring_type,
470                                   queue_index, cpr->hw_stats_ctx_id,
471                                   cp_ring->fw_ring_id);
472         if (rc)
473                 return rc;
474
475         rxr->rx_prod = 0;
476         if (BNXT_HAS_RING_GRPS(bp))
477                 bp->grp_info[queue_index].rx_fw_ring_id = ring->fw_ring_id;
478         bnxt_set_db(bp, &rxr->rx_db, ring_type, queue_index, ring->fw_ring_id);
479         bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
480
481         return 0;
482 }
483
484 static int bnxt_alloc_rx_agg_ring(struct bnxt *bp, int queue_index)
485 {
486         unsigned int map_idx = queue_index + bp->rx_cp_nr_rings;
487         struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
488         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
489         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
490         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
491         struct bnxt_ring *ring = rxr->ag_ring_struct;
492         uint32_t hw_stats_ctx_id = HWRM_NA_SIGNATURE;
493         uint8_t ring_type;
494         int rc = 0;
495
496         ring->fw_rx_ring_id = rxr->rx_ring_struct->fw_ring_id;
497
498         if (BNXT_CHIP_THOR(bp)) {
499                 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG;
500                 hw_stats_ctx_id = cpr->hw_stats_ctx_id;
501         } else {
502                 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX;
503         }
504
505         rc = bnxt_hwrm_ring_alloc(bp, ring, ring_type, map_idx,
506                                   hw_stats_ctx_id, cp_ring->fw_ring_id);
507
508         if (rc)
509                 return rc;
510
511         rxr->ag_prod = 0;
512         if (BNXT_HAS_RING_GRPS(bp))
513                 bp->grp_info[queue_index].ag_fw_ring_id = ring->fw_ring_id;
514         bnxt_set_db(bp, &rxr->ag_db, ring_type, map_idx, ring->fw_ring_id);
515         bnxt_db_write(&rxr->ag_db, rxr->ag_prod);
516
517         return 0;
518 }
519
520 int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index)
521 {
522         struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
523         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
524         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
525         struct bnxt_cp_ring_info *nqr = rxq->nq_ring;
526         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
527         int rc;
528
529         if (BNXT_HAS_NQ(bp)) {
530                 rc = bnxt_alloc_nq_ring(bp, queue_index, nqr);
531                 if (rc)
532                         goto err_out;
533         }
534
535         rc = bnxt_alloc_cmpl_ring(bp, queue_index, cpr, nqr);
536         if (rc)
537                 goto err_out;
538
539         if (BNXT_HAS_RING_GRPS(bp)) {
540                 bp->grp_info[queue_index].fw_stats_ctx = cpr->hw_stats_ctx_id;
541                 bp->grp_info[queue_index].cp_fw_ring_id = cp_ring->fw_ring_id;
542         }
543
544         if (!BNXT_NUM_ASYNC_CPR(bp) && !queue_index) {
545                 /*
546                  * If a dedicated async event completion ring is not enabled,
547                  * use the first completion ring from PF or VF as the default
548                  * completion ring for async event handling.
549                  */
550                 bp->async_cp_ring = cpr;
551                 rc = bnxt_hwrm_set_async_event_cr(bp);
552                 if (rc)
553                         goto err_out;
554         }
555
556         rc = bnxt_alloc_rx_ring(bp, queue_index);
557         if (rc)
558                 goto err_out;
559
560         rc = bnxt_alloc_rx_agg_ring(bp, queue_index);
561         if (rc)
562                 goto err_out;
563
564         if (rxq->rx_started) {
565                 if (bnxt_init_one_rx_ring(rxq)) {
566                         RTE_LOG(ERR, PMD,
567                                 "bnxt_init_one_rx_ring failed!\n");
568                         bnxt_rx_queue_release_op(rxq);
569                         rc = -ENOMEM;
570                         goto err_out;
571                 }
572                 bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
573                 bnxt_db_write(&rxr->ag_db, rxr->ag_prod);
574         }
575         rxq->index = queue_index;
576
577         return 0;
578
579 err_out:
580         PMD_DRV_LOG(ERR,
581                     "Failed to allocate receive queue %d, rc %d.\n",
582                     queue_index, rc);
583         return rc;
584 }
585
586 /* Initialise all rings to -1, its used to free rings later if allocation
587  * of few rings fails.
588  */
589 static void bnxt_init_all_rings(struct bnxt *bp)
590 {
591         unsigned int i = 0;
592         struct bnxt_rx_queue *rxq;
593         struct bnxt_ring *cp_ring;
594         struct bnxt_ring *ring;
595         struct bnxt_rx_ring_info *rxr;
596         struct bnxt_tx_queue *txq;
597
598         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
599                 rxq = bp->rx_queues[i];
600                 /* Rx-compl */
601                 cp_ring = rxq->cp_ring->cp_ring_struct;
602                 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
603                 /* Rx-Reg */
604                 rxr = rxq->rx_ring;
605                 ring = rxr->rx_ring_struct;
606                 ring->fw_ring_id = INVALID_HW_RING_ID;
607                 /* Rx-AGG */
608                 ring = rxr->ag_ring_struct;
609                 ring->fw_ring_id = INVALID_HW_RING_ID;
610         }
611         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
612                 txq = bp->tx_queues[i];
613                 /* Tx cmpl */
614                 cp_ring = txq->cp_ring->cp_ring_struct;
615                 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
616                 /*Tx Ring */
617                 ring = txq->tx_ring->tx_ring_struct;
618                 ring->fw_ring_id = INVALID_HW_RING_ID;
619         }
620 }
621
622 /* ring_grp usage:
623  * [0] = default completion ring
624  * [1 -> +rx_cp_nr_rings] = rx_cp, rx rings
625  * [1+rx_cp_nr_rings + 1 -> +tx_cp_nr_rings] = tx_cp, tx rings
626  */
627 int bnxt_alloc_hwrm_rings(struct bnxt *bp)
628 {
629         struct bnxt_coal coal;
630         unsigned int i;
631         uint8_t ring_type;
632         int rc = 0;
633
634         bnxt_init_dflt_coal(&coal);
635         bnxt_init_all_rings(bp);
636
637         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
638                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
639                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
640                 struct bnxt_cp_ring_info *nqr = rxq->nq_ring;
641                 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
642                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
643
644                 if (BNXT_HAS_NQ(bp)) {
645                         if (bnxt_alloc_nq_ring(bp, i, nqr))
646                                 goto err_out;
647                 }
648
649                 if (bnxt_alloc_cmpl_ring(bp, i, cpr, nqr))
650                         goto err_out;
651
652                 if (BNXT_HAS_RING_GRPS(bp)) {
653                         bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
654                         bp->grp_info[i].cp_fw_ring_id = cp_ring->fw_ring_id;
655                 }
656
657                 bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id);
658                 if (!BNXT_NUM_ASYNC_CPR(bp) && !i) {
659                         /*
660                          * If a dedicated async event completion ring is not
661                          * enabled, use the first completion ring as the default
662                          * completion ring for async event handling.
663                          */
664                         bp->async_cp_ring = cpr;
665                         rc = bnxt_hwrm_set_async_event_cr(bp);
666                         if (rc)
667                                 goto err_out;
668                 }
669
670                 if (bnxt_alloc_rx_ring(bp, i))
671                         goto err_out;
672
673                 if (bnxt_alloc_rx_agg_ring(bp, i))
674                         goto err_out;
675
676                 if (bnxt_init_one_rx_ring(rxq)) {
677                         PMD_DRV_LOG(ERR, "bnxt_init_one_rx_ring failed!\n");
678                         bnxt_rx_queue_release_op(rxq);
679                         return -ENOMEM;
680                 }
681                 bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
682                 bnxt_db_write(&rxr->ag_db, rxr->ag_prod);
683                 rxq->index = i;
684 #ifdef RTE_ARCH_X86
685                 bnxt_rxq_vec_setup(rxq);
686 #endif
687         }
688
689         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
690                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
691                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
692                 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
693                 struct bnxt_cp_ring_info *nqr = txq->nq_ring;
694                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
695                 struct bnxt_ring *ring = txr->tx_ring_struct;
696                 unsigned int idx = i + bp->rx_cp_nr_rings;
697
698                 if (BNXT_HAS_NQ(bp)) {
699                         if (bnxt_alloc_nq_ring(bp, idx, nqr))
700                                 goto err_out;
701                 }
702
703                 if (bnxt_alloc_cmpl_ring(bp, idx, cpr, nqr))
704                         goto err_out;
705
706                 /* Tx ring */
707                 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_TX;
708                 rc = bnxt_hwrm_ring_alloc(bp, ring,
709                                           ring_type,
710                                           i, cpr->hw_stats_ctx_id,
711                                           cp_ring->fw_ring_id);
712                 if (rc)
713                         goto err_out;
714
715                 bnxt_set_db(bp, &txr->tx_db, ring_type, i, ring->fw_ring_id);
716                 txq->index = idx;
717                 bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id);
718         }
719
720 err_out:
721         return rc;
722 }
723
724 /* Allocate dedicated async completion ring. */
725 int bnxt_alloc_async_cp_ring(struct bnxt *bp)
726 {
727         struct bnxt_cp_ring_info *cpr = bp->async_cp_ring;
728         struct bnxt_ring *cp_ring;
729         uint8_t ring_type;
730         int rc;
731
732         if (BNXT_NUM_ASYNC_CPR(bp) == 0 || cpr == NULL)
733                 return 0;
734
735         cp_ring = cpr->cp_ring_struct;
736
737         if (BNXT_HAS_NQ(bp))
738                 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ;
739         else
740                 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL;
741
742         rc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, 0,
743                                   HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE);
744
745         if (rc)
746                 return rc;
747
748         cpr->cp_cons = 0;
749         cpr->valid = 0;
750         bnxt_set_db(bp, &cpr->cp_db, ring_type, 0,
751                     cp_ring->fw_ring_id);
752
753         if (BNXT_HAS_NQ(bp))
754                 bnxt_db_nq(cpr);
755         else
756                 bnxt_db_cq(cpr);
757
758         return bnxt_hwrm_set_async_event_cr(bp);
759 }
760
761 /* Free dedicated async completion ring. */
762 void bnxt_free_async_cp_ring(struct bnxt *bp)
763 {
764         struct bnxt_cp_ring_info *cpr = bp->async_cp_ring;
765
766         if (BNXT_NUM_ASYNC_CPR(bp) == 0 || cpr == NULL)
767                 return;
768
769         if (BNXT_HAS_NQ(bp))
770                 bnxt_free_nq_ring(bp, cpr);
771         else
772                 bnxt_free_cp_ring(bp, cpr);
773
774         bnxt_free_ring(cpr->cp_ring_struct);
775         rte_free(cpr->cp_ring_struct);
776         cpr->cp_ring_struct = NULL;
777         rte_free(cpr);
778         bp->async_cp_ring = NULL;
779 }
780
781 int bnxt_alloc_async_ring_struct(struct bnxt *bp)
782 {
783         struct bnxt_cp_ring_info *cpr = NULL;
784         struct bnxt_ring *ring = NULL;
785         unsigned int socket_id;
786
787         if (BNXT_NUM_ASYNC_CPR(bp) == 0)
788                 return 0;
789
790         socket_id = rte_lcore_to_socket_id(rte_get_master_lcore());
791
792         cpr = rte_zmalloc_socket("cpr",
793                                  sizeof(struct bnxt_cp_ring_info),
794                                  RTE_CACHE_LINE_SIZE, socket_id);
795         if (cpr == NULL)
796                 return -ENOMEM;
797
798         ring = rte_zmalloc_socket("bnxt_cp_ring_struct",
799                                   sizeof(struct bnxt_ring),
800                                   RTE_CACHE_LINE_SIZE, socket_id);
801         if (ring == NULL) {
802                 rte_free(cpr);
803                 return -ENOMEM;
804         }
805
806         ring->bd = (void *)cpr->cp_desc_ring;
807         ring->bd_dma = cpr->cp_desc_mapping;
808         ring->ring_size = rte_align32pow2(DEFAULT_CP_RING_SIZE);
809         ring->ring_mask = ring->ring_size - 1;
810         ring->vmem_size = 0;
811         ring->vmem = NULL;
812
813         bp->async_cp_ring = cpr;
814         cpr->cp_ring_struct = ring;
815
816         return bnxt_alloc_rings(bp, 0, NULL, NULL,
817                                 bp->async_cp_ring, NULL,
818                                 "def_cp");
819 }