net/bnxt: fix accessing variable before null check
[dpdk.git] / drivers / net / bnxt / bnxt_rxq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <inttypes.h>
7
8 #include <rte_malloc.h>
9
10 #include "bnxt.h"
11 #include "bnxt_cpr.h"
12 #include "bnxt_filter.h"
13 #include "bnxt_hwrm.h"
14 #include "bnxt_ring.h"
15 #include "bnxt_rxq.h"
16 #include "bnxt_rxr.h"
17 #include "bnxt_vnic.h"
18 #include "hsi_struct_def_dpdk.h"
19
20 /*
21  * RX Queues
22  */
23
24 void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq)
25 {
26         if (rxq && rxq->cp_ring && rxq->cp_ring->hw_stats)
27                 rxq->cp_ring->hw_stats = NULL;
28 }
29
30 int bnxt_mq_rx_configure(struct bnxt *bp)
31 {
32         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
33         const struct rte_eth_vmdq_rx_conf *conf =
34                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
35         unsigned int i, j, nb_q_per_grp = 1, ring_idx = 0;
36         int start_grp_id, end_grp_id = 1, rc = 0;
37         struct bnxt_vnic_info *vnic;
38         struct bnxt_filter_info *filter;
39         enum rte_eth_nb_pools pools = bp->rx_cp_nr_rings, max_pools = 0;
40         struct bnxt_rx_queue *rxq;
41
42         bp->nr_vnics = 0;
43
44         /* Single queue mode */
45         if (bp->rx_cp_nr_rings < 2) {
46                 vnic = &bp->vnic_info[0];
47                 if (!vnic) {
48                         PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
49                         rc = -ENOMEM;
50                         goto err_out;
51                 }
52                 vnic->flags |= BNXT_VNIC_INFO_BCAST;
53                 bp->nr_vnics++;
54
55                 rxq = bp->eth_dev->data->rx_queues[0];
56                 rxq->vnic = vnic;
57
58                 vnic->func_default = true;
59                 vnic->start_grp_id = 0;
60                 vnic->end_grp_id = vnic->start_grp_id;
61                 filter = bnxt_alloc_filter(bp);
62                 if (!filter) {
63                         PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
64                         rc = -ENOMEM;
65                         goto err_out;
66                 }
67                 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
68                 goto out;
69         }
70
71         /* Multi-queue mode */
72         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB_RSS) {
73                 /* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */
74
75                 switch (dev_conf->rxmode.mq_mode) {
76                 case ETH_MQ_RX_VMDQ_RSS:
77                 case ETH_MQ_RX_VMDQ_ONLY:
78                         /* FALLTHROUGH */
79                         /* ETH_8/64_POOLs */
80                         pools = conf->nb_queue_pools;
81                         /* For each pool, allocate MACVLAN CFA rule & VNIC */
82                         max_pools = RTE_MIN(bp->max_vnics,
83                                             RTE_MIN(bp->max_l2_ctx,
84                                             RTE_MIN(bp->max_rsscos_ctx,
85                                                     ETH_64_POOLS)));
86                         PMD_DRV_LOG(DEBUG,
87                                     "pools = %u max_pools = %u\n",
88                                     pools, max_pools);
89                         if (pools > max_pools)
90                                 pools = max_pools;
91                         break;
92                 case ETH_MQ_RX_RSS:
93                         pools = 1;
94                         break;
95                 default:
96                         PMD_DRV_LOG(ERR, "Unsupported mq_mod %d\n",
97                                 dev_conf->rxmode.mq_mode);
98                         rc = -EINVAL;
99                         goto err_out;
100                 }
101         }
102         nb_q_per_grp = bp->rx_cp_nr_rings / pools;
103         PMD_DRV_LOG(DEBUG, "pools = %u nb_q_per_grp = %u\n",
104                     pools, nb_q_per_grp);
105         start_grp_id = 0;
106         end_grp_id = nb_q_per_grp;
107
108         for (i = 0; i < pools; i++) {
109                 vnic = &bp->vnic_info[i];
110                 if (!vnic) {
111                         PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
112                         rc = -ENOMEM;
113                         goto err_out;
114                 }
115                 vnic->flags |= BNXT_VNIC_INFO_BCAST;
116                 bp->nr_vnics++;
117
118                 for (j = 0; j < nb_q_per_grp; j++, ring_idx++) {
119                         rxq = bp->eth_dev->data->rx_queues[ring_idx];
120                         rxq->vnic = vnic;
121                         PMD_DRV_LOG(DEBUG,
122                                     "rxq[%d] = %p vnic[%d] = %p\n",
123                                     ring_idx, rxq, i, vnic);
124                 }
125                 if (i == 0) {
126                         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB) {
127                                 bp->eth_dev->data->promiscuous = 1;
128                                 vnic->flags |= BNXT_VNIC_INFO_PROMISC;
129                         }
130                         vnic->func_default = true;
131                 }
132                 vnic->start_grp_id = start_grp_id;
133                 vnic->end_grp_id = end_grp_id;
134
135                 if (i) {
136                         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB ||
137                             !(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS))
138                                 vnic->rss_dflt_cr = true;
139                         goto skip_filter_allocation;
140                 }
141                 filter = bnxt_alloc_filter(bp);
142                 if (!filter) {
143                         PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
144                         rc = -ENOMEM;
145                         goto err_out;
146                 }
147                 /*
148                  * TODO: Configure & associate CFA rule for
149                  * each VNIC for each VMDq with MACVLAN, MACVLAN+TC
150                  */
151                 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
152
153 skip_filter_allocation:
154                 start_grp_id = end_grp_id;
155                 end_grp_id += nb_q_per_grp;
156         }
157
158 out:
159         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
160                 struct rte_eth_rss_conf *rss = &dev_conf->rx_adv_conf.rss_conf;
161                 uint16_t hash_type = 0;
162
163                 if (bp->flags & BNXT_FLAG_UPDATE_HASH) {
164                         rss = &bp->rss_conf;
165                         bp->flags &= ~BNXT_FLAG_UPDATE_HASH;
166                 }
167
168                 if (rss->rss_hf & ETH_RSS_IPV4)
169                         hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
170                 if (rss->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
171                         hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
172                 if (rss->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
173                         hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
174                 if (rss->rss_hf & ETH_RSS_IPV6)
175                         hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
176                 if (rss->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
177                         hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
178                 if (rss->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
179                         hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
180
181                 for (i = 0; i < bp->nr_vnics; i++) {
182                         vnic = &bp->vnic_info[i];
183                         vnic->hash_type = hash_type;
184
185                         /*
186                          * Use the supplied key if the key length is
187                          * acceptable and the rss_key is not NULL
188                          */
189                         if (rss->rss_key &&
190                             rss->rss_key_len <= HW_HASH_KEY_SIZE)
191                                 memcpy(vnic->rss_hash_key,
192                                        rss->rss_key, rss->rss_key_len);
193                 }
194         }
195
196         return rc;
197
198 err_out:
199         /* Free allocated vnic/filters */
200
201         return rc;
202 }
203
204 void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
205 {
206         struct bnxt_sw_rx_bd *sw_ring;
207         struct bnxt_tpa_info *tpa_info;
208         uint16_t i;
209
210         if (!rxq)
211                 return;
212
213         rte_spinlock_lock(&rxq->lock);
214
215         sw_ring = rxq->rx_ring->rx_buf_ring;
216         if (sw_ring) {
217                 for (i = 0;
218                      i < rxq->rx_ring->rx_ring_struct->ring_size; i++) {
219                         if (sw_ring[i].mbuf) {
220                                 rte_pktmbuf_free_seg(sw_ring[i].mbuf);
221                                 sw_ring[i].mbuf = NULL;
222                         }
223                 }
224         }
225         /* Free up mbufs in Agg ring */
226         sw_ring = rxq->rx_ring->ag_buf_ring;
227         if (sw_ring) {
228                 for (i = 0;
229                      i < rxq->rx_ring->ag_ring_struct->ring_size; i++) {
230                         if (sw_ring[i].mbuf) {
231                                 rte_pktmbuf_free_seg(sw_ring[i].mbuf);
232                                 sw_ring[i].mbuf = NULL;
233                         }
234                 }
235         }
236
237         /* Free up mbufs in TPA */
238         tpa_info = rxq->rx_ring->tpa_info;
239         if (tpa_info) {
240                 for (i = 0; i < BNXT_TPA_MAX; i++) {
241                         if (tpa_info[i].mbuf) {
242                                 rte_pktmbuf_free_seg(tpa_info[i].mbuf);
243                                 tpa_info[i].mbuf = NULL;
244                         }
245                 }
246         }
247
248         rte_spinlock_unlock(&rxq->lock);
249 }
250
251 void bnxt_free_rx_mbufs(struct bnxt *bp)
252 {
253         struct bnxt_rx_queue *rxq;
254         int i;
255
256         for (i = 0; i < (int)bp->rx_nr_rings; i++) {
257                 rxq = bp->rx_queues[i];
258                 bnxt_rx_queue_release_mbufs(rxq);
259         }
260 }
261
262 void bnxt_rx_queue_release_op(void *rx_queue)
263 {
264         struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
265
266         if (rxq) {
267                 if (is_bnxt_in_error(rxq->bp))
268                         return;
269
270                 bnxt_rx_queue_release_mbufs(rxq);
271
272                 /* Free RX ring hardware descriptors */
273                 bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
274                 /* Free RX Agg ring hardware descriptors */
275                 bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
276
277                 /* Free RX completion ring hardware descriptors */
278                 bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
279
280                 bnxt_free_rxq_stats(rxq);
281                 rte_memzone_free(rxq->mz);
282                 rxq->mz = NULL;
283
284                 rte_free(rxq);
285         }
286 }
287
288 int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
289                                uint16_t queue_idx,
290                                uint16_t nb_desc,
291                                unsigned int socket_id,
292                                const struct rte_eth_rxconf *rx_conf,
293                                struct rte_mempool *mp)
294 {
295         struct bnxt *bp = eth_dev->data->dev_private;
296         uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
297         struct bnxt_rx_queue *rxq;
298         int rc = 0;
299         uint8_t queue_state;
300
301         rc = is_bnxt_in_error(bp);
302         if (rc)
303                 return rc;
304
305         if (queue_idx >= bp->max_rx_rings) {
306                 PMD_DRV_LOG(ERR,
307                         "Cannot create Rx ring %d. Only %d rings available\n",
308                         queue_idx, bp->max_rx_rings);
309                 return -EINVAL;
310         }
311
312         if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
313                 PMD_DRV_LOG(ERR, "nb_desc %d is invalid\n", nb_desc);
314                 rc = -EINVAL;
315                 goto out;
316         }
317
318         if (eth_dev->data->rx_queues) {
319                 rxq = eth_dev->data->rx_queues[queue_idx];
320                 if (rxq)
321                         bnxt_rx_queue_release_op(rxq);
322         }
323         rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue),
324                                  RTE_CACHE_LINE_SIZE, socket_id);
325         if (!rxq) {
326                 PMD_DRV_LOG(ERR, "bnxt_rx_queue allocation failed!\n");
327                 rc = -ENOMEM;
328                 goto out;
329         }
330         rxq->bp = bp;
331         rxq->mb_pool = mp;
332         rxq->nb_rx_desc = nb_desc;
333         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
334
335         PMD_DRV_LOG(DEBUG, "RX Buf MTU %d\n", eth_dev->data->mtu);
336
337         rc = bnxt_init_rx_ring_struct(rxq, socket_id);
338         if (rc)
339                 goto out;
340
341         PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_size);
342         rxq->queue_id = queue_idx;
343         rxq->port_id = eth_dev->data->port_id;
344         if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC)
345                 rxq->crc_len = RTE_ETHER_CRC_LEN;
346         else
347                 rxq->crc_len = 0;
348
349         eth_dev->data->rx_queues[queue_idx] = rxq;
350         /* Allocate RX ring hardware descriptors */
351         if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring,
352                         rxq->nq_ring, "rxr")) {
353                 PMD_DRV_LOG(ERR,
354                         "ring_dma_zone_reserve for rx_ring failed!\n");
355                 bnxt_rx_queue_release_op(rxq);
356                 rc = -ENOMEM;
357                 goto out;
358         }
359         rte_atomic64_init(&rxq->rx_mbuf_alloc_fail);
360
361         /* rxq 0 must not be stopped when used as async CPR */
362         if (!BNXT_NUM_ASYNC_CPR(bp) && queue_idx == 0)
363                 rxq->rx_deferred_start = false;
364         else
365                 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
366
367         if (rxq->rx_deferred_start) {
368                 queue_state = RTE_ETH_QUEUE_STATE_STOPPED;
369                 rxq->rx_started = false;
370         } else {
371                 queue_state = RTE_ETH_QUEUE_STATE_STARTED;
372                 rxq->rx_started = true;
373         }
374         eth_dev->data->rx_queue_state[queue_idx] = queue_state;
375         rte_spinlock_init(&rxq->lock);
376
377 out:
378         return rc;
379 }
380
381 int
382 bnxt_rx_queue_intr_enable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
383 {
384         struct bnxt *bp = eth_dev->data->dev_private;
385         struct bnxt_rx_queue *rxq;
386         struct bnxt_cp_ring_info *cpr;
387         int rc = 0;
388
389         rc = is_bnxt_in_error(bp);
390         if (rc)
391                 return rc;
392
393         if (eth_dev->data->rx_queues) {
394                 rxq = eth_dev->data->rx_queues[queue_id];
395                 if (!rxq) {
396                         rc = -EINVAL;
397                         return rc;
398                 }
399                 cpr = rxq->cp_ring;
400                 B_CP_DB_REARM(cpr, cpr->cp_raw_cons);
401         }
402         return rc;
403 }
404
405 int
406 bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
407 {
408         struct bnxt *bp = eth_dev->data->dev_private;
409         struct bnxt_rx_queue *rxq;
410         struct bnxt_cp_ring_info *cpr;
411         int rc = 0;
412
413         rc = is_bnxt_in_error(bp);
414         if (rc)
415                 return rc;
416
417         if (eth_dev->data->rx_queues) {
418                 rxq = eth_dev->data->rx_queues[queue_id];
419                 if (!rxq) {
420                         rc = -EINVAL;
421                         return rc;
422                 }
423                 cpr = rxq->cp_ring;
424                 B_CP_DB_DISARM(cpr);
425         }
426         return rc;
427 }
428
429 int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
430 {
431         struct bnxt *bp = dev->data->dev_private;
432         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
433         struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
434         struct bnxt_vnic_info *vnic = NULL;
435         int rc = 0;
436
437         rc = is_bnxt_in_error(bp);
438         if (rc)
439                 return rc;
440
441         if (rxq == NULL) {
442                 PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
443                 return -EINVAL;
444         }
445
446         /* Set the queue state to started here.
447          * We check the status of the queue while posting buffer.
448          * If queue is it started, we do not post buffers for Rx.
449          */
450         rxq->rx_started = true;
451         bnxt_free_hwrm_rx_ring(bp, rx_queue_id);
452         rc = bnxt_alloc_hwrm_rx_ring(bp, rx_queue_id);
453         if (rc)
454                 return rc;
455
456         PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
457
458         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
459                 vnic = rxq->vnic;
460
461                 if (BNXT_HAS_RING_GRPS(bp)) {
462                         if (vnic->fw_grp_ids[rx_queue_id] != INVALID_HW_RING_ID)
463                                 return 0;
464
465                         vnic->fw_grp_ids[rx_queue_id] =
466                                         bp->grp_info[rx_queue_id].fw_grp_id;
467                         PMD_DRV_LOG(DEBUG,
468                                     "vnic = %p fw_grp_id = %d\n",
469                                     vnic, bp->grp_info[rx_queue_id].fw_grp_id);
470                 }
471
472                 rc = bnxt_vnic_rss_configure(bp, vnic);
473         }
474
475         if (rc == 0)
476                 dev->data->rx_queue_state[rx_queue_id] =
477                                 RTE_ETH_QUEUE_STATE_STARTED;
478         else
479                 rxq->rx_started = false;
480
481         PMD_DRV_LOG(INFO,
482                     "queue %d, rx_deferred_start %d, state %d!\n",
483                     rx_queue_id, rxq->rx_deferred_start,
484                     bp->eth_dev->data->rx_queue_state[rx_queue_id]);
485
486         return rc;
487 }
488
489 int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
490 {
491         struct bnxt *bp = dev->data->dev_private;
492         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
493         struct bnxt_vnic_info *vnic = NULL;
494         struct bnxt_rx_queue *rxq = NULL;
495         int rc = 0;
496
497         rc = is_bnxt_in_error(bp);
498         if (rc)
499                 return rc;
500
501         /* For the stingray platform and other platforms needing tighter
502          * control of resource utilization, Rx CQ 0 also works as
503          * Default CQ for async notifications
504          */
505         if (!BNXT_NUM_ASYNC_CPR(bp) && !rx_queue_id) {
506                 PMD_DRV_LOG(ERR, "Cannot stop Rx queue id %d\n", rx_queue_id);
507                 return -EINVAL;
508         }
509
510         rxq = bp->rx_queues[rx_queue_id];
511
512         if (rxq == NULL) {
513                 PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
514                 return -EINVAL;
515         }
516
517         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
518         rxq->rx_started = false;
519         PMD_DRV_LOG(DEBUG, "Rx queue stopped\n");
520
521         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
522                 vnic = rxq->vnic;
523                 if (BNXT_HAS_RING_GRPS(bp))
524                         vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
525                 rc = bnxt_vnic_rss_configure(bp, vnic);
526         }
527
528         if (rc == 0)
529                 bnxt_rx_queue_release_mbufs(rxq);
530
531         return rc;
532 }