net/bnxt: fix queues per VNIC
[dpdk.git] / drivers / net / bnxt / bnxt_rxq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5
6 #include <inttypes.h>
7
8 #include <rte_malloc.h>
9
10 #include "bnxt.h"
11 #include "bnxt_filter.h"
12 #include "bnxt_hwrm.h"
13 #include "bnxt_ring.h"
14 #include "bnxt_rxq.h"
15 #include "bnxt_rxr.h"
16 #include "bnxt_vnic.h"
17 #include "hsi_struct_def_dpdk.h"
18
19 /*
20  * RX Queues
21  */
22
23 void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq)
24 {
25         if (rxq && rxq->cp_ring && rxq->cp_ring->hw_stats)
26                 rxq->cp_ring->hw_stats = NULL;
27 }
28
29 int bnxt_mq_rx_configure(struct bnxt *bp)
30 {
31         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
32         const struct rte_eth_vmdq_rx_conf *conf =
33                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
34         unsigned int i, j, nb_q_per_grp = 1, ring_idx = 0;
35         int start_grp_id, end_grp_id = 1, rc = 0;
36         struct bnxt_vnic_info *vnic;
37         struct bnxt_filter_info *filter;
38         enum rte_eth_nb_pools pools = 1, max_pools = 0;
39         struct bnxt_rx_queue *rxq;
40
41         bp->nr_vnics = 0;
42
43         /* Single queue mode */
44         if (bp->rx_cp_nr_rings < 2) {
45                 vnic = &bp->vnic_info[0];
46                 if (!vnic) {
47                         PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
48                         rc = -ENOMEM;
49                         goto err_out;
50                 }
51                 vnic->flags |= BNXT_VNIC_INFO_BCAST;
52                 bp->nr_vnics++;
53
54                 rxq = bp->eth_dev->data->rx_queues[0];
55                 rxq->vnic = vnic;
56
57                 vnic->func_default = true;
58                 vnic->start_grp_id = 0;
59                 vnic->end_grp_id = vnic->start_grp_id;
60                 filter = bnxt_alloc_filter(bp);
61                 if (!filter) {
62                         PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
63                         rc = -ENOMEM;
64                         goto err_out;
65                 }
66                 filter->mac_index = 0;
67                 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
68                 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
69                 goto out;
70         }
71
72         /* Multi-queue mode */
73         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB_RSS) {
74                 /* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */
75
76                 switch (dev_conf->rxmode.mq_mode) {
77                 case ETH_MQ_RX_VMDQ_RSS:
78                 case ETH_MQ_RX_VMDQ_ONLY:
79                 case ETH_MQ_RX_VMDQ_DCB_RSS:
80                         /* FALLTHROUGH */
81                         /* ETH_8/64_POOLs */
82                         pools = conf->nb_queue_pools;
83                         /* For each pool, allocate MACVLAN CFA rule & VNIC */
84                         max_pools = RTE_MIN(bp->max_vnics,
85                                             RTE_MIN(bp->max_l2_ctx,
86                                             RTE_MIN(bp->max_rsscos_ctx,
87                                                     ETH_64_POOLS)));
88                         PMD_DRV_LOG(DEBUG,
89                                     "pools = %u max_pools = %u\n",
90                                     pools, max_pools);
91                         if (pools > max_pools)
92                                 pools = max_pools;
93                         break;
94                 case ETH_MQ_RX_RSS:
95                         pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : 1;
96                         break;
97                 default:
98                         PMD_DRV_LOG(ERR, "Unsupported mq_mod %d\n",
99                                 dev_conf->rxmode.mq_mode);
100                         rc = -EINVAL;
101                         goto err_out;
102                 }
103         } else if (!dev_conf->rxmode.mq_mode) {
104                 pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : pools;
105         }
106
107         pools = RTE_MIN(pools, bp->rx_cp_nr_rings);
108         nb_q_per_grp = bp->rx_cp_nr_rings / pools;
109         PMD_DRV_LOG(DEBUG, "pools = %u nb_q_per_grp = %u\n",
110                     pools, nb_q_per_grp);
111         start_grp_id = 0;
112         end_grp_id = nb_q_per_grp;
113
114         for (i = 0; i < pools; i++) {
115                 vnic = &bp->vnic_info[i];
116                 if (!vnic) {
117                         PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
118                         rc = -ENOMEM;
119                         goto err_out;
120                 }
121                 vnic->flags |= BNXT_VNIC_INFO_BCAST;
122                 bp->nr_vnics++;
123
124                 for (j = 0; j < nb_q_per_grp; j++, ring_idx++) {
125                         rxq = bp->eth_dev->data->rx_queues[ring_idx];
126                         rxq->vnic = vnic;
127                         PMD_DRV_LOG(DEBUG,
128                                     "rxq[%d] = %p vnic[%d] = %p\n",
129                                     ring_idx, rxq, i, vnic);
130                 }
131                 if (i == 0) {
132                         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB) {
133                                 bp->eth_dev->data->promiscuous = 1;
134                                 vnic->flags |= BNXT_VNIC_INFO_PROMISC;
135                         }
136                         vnic->func_default = true;
137                 }
138                 vnic->start_grp_id = start_grp_id;
139                 vnic->end_grp_id = end_grp_id;
140
141                 if (i) {
142                         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB ||
143                             !(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS))
144                                 vnic->rss_dflt_cr = true;
145                         goto skip_filter_allocation;
146                 }
147                 filter = bnxt_alloc_filter(bp);
148                 if (!filter) {
149                         PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
150                         rc = -ENOMEM;
151                         goto err_out;
152                 }
153                 filter->mac_index = 0;
154                 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
155                 /*
156                  * TODO: Configure & associate CFA rule for
157                  * each VNIC for each VMDq with MACVLAN, MACVLAN+TC
158                  */
159                 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
160
161 skip_filter_allocation:
162                 start_grp_id = end_grp_id;
163                 end_grp_id += nb_q_per_grp;
164         }
165
166 out:
167         bp->rx_num_qs_per_vnic = nb_q_per_grp;
168
169         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
170                 struct rte_eth_rss_conf *rss = &dev_conf->rx_adv_conf.rss_conf;
171
172                 if (bp->flags & BNXT_FLAG_UPDATE_HASH)
173                         bp->flags &= ~BNXT_FLAG_UPDATE_HASH;
174
175                 for (i = 0; i < bp->nr_vnics; i++) {
176                         uint32_t lvl = ETH_RSS_LEVEL(rss->rss_hf);
177
178                         vnic = &bp->vnic_info[i];
179                         vnic->hash_type =
180                                 bnxt_rte_to_hwrm_hash_types(rss->rss_hf);
181                         vnic->hash_mode =
182                                 bnxt_rte_to_hwrm_hash_level(bp,
183                                                             rss->rss_hf,
184                                                             lvl);
185
186                         /*
187                          * Use the supplied key if the key length is
188                          * acceptable and the rss_key is not NULL
189                          */
190                         if (rss->rss_key &&
191                             rss->rss_key_len <= HW_HASH_KEY_SIZE)
192                                 memcpy(vnic->rss_hash_key,
193                                        rss->rss_key, rss->rss_key_len);
194                 }
195         }
196
197         return rc;
198
199 err_out:
200         /* Free allocated vnic/filters */
201
202         return rc;
203 }
204
205 void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
206 {
207         struct rte_mbuf **sw_ring;
208         struct bnxt_tpa_info *tpa_info;
209         uint16_t i;
210
211         if (!rxq || !rxq->rx_ring)
212                 return;
213
214         sw_ring = rxq->rx_ring->rx_buf_ring;
215         if (sw_ring) {
216                 for (i = 0;
217                      i < rxq->rx_ring->rx_ring_struct->ring_size; i++) {
218                         if (sw_ring[i]) {
219                                 if (sw_ring[i] != &rxq->fake_mbuf)
220                                         rte_pktmbuf_free_seg(sw_ring[i]);
221                                 sw_ring[i] = NULL;
222                         }
223                 }
224         }
225         /* Free up mbufs in Agg ring */
226         sw_ring = rxq->rx_ring->ag_buf_ring;
227         if (sw_ring) {
228                 for (i = 0;
229                      i < rxq->rx_ring->ag_ring_struct->ring_size; i++) {
230                         if (sw_ring[i]) {
231                                 rte_pktmbuf_free_seg(sw_ring[i]);
232                                 sw_ring[i] = NULL;
233                         }
234                 }
235         }
236
237         /* Free up mbufs in TPA */
238         tpa_info = rxq->rx_ring->tpa_info;
239         if (tpa_info) {
240                 int max_aggs = BNXT_TPA_MAX_AGGS(rxq->bp);
241
242                 for (i = 0; i < max_aggs; i++) {
243                         if (tpa_info[i].mbuf) {
244                                 rte_pktmbuf_free_seg(tpa_info[i].mbuf);
245                                 tpa_info[i].mbuf = NULL;
246                         }
247                 }
248         }
249
250 }
251
252 void bnxt_free_rx_mbufs(struct bnxt *bp)
253 {
254         struct bnxt_rx_queue *rxq;
255         int i;
256
257         for (i = 0; i < (int)bp->rx_nr_rings; i++) {
258                 rxq = bp->rx_queues[i];
259                 bnxt_rx_queue_release_mbufs(rxq);
260         }
261 }
262
263 void bnxt_rx_queue_release_op(void *rx_queue)
264 {
265         struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
266
267         if (rxq) {
268                 if (is_bnxt_in_error(rxq->bp))
269                         return;
270
271                 bnxt_rx_queue_release_mbufs(rxq);
272
273                 /* Free RX ring hardware descriptors */
274                 if (rxq->rx_ring) {
275                         bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
276                         rte_free(rxq->rx_ring->rx_ring_struct);
277                         /* Free RX Agg ring hardware descriptors */
278                         bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
279                         rte_free(rxq->rx_ring->ag_ring_struct);
280
281                         rte_free(rxq->rx_ring);
282                 }
283                 /* Free RX completion ring hardware descriptors */
284                 if (rxq->cp_ring) {
285                         bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
286                         rte_free(rxq->cp_ring->cp_ring_struct);
287                         rte_free(rxq->cp_ring);
288                 }
289
290                 bnxt_free_rxq_stats(rxq);
291                 rte_memzone_free(rxq->mz);
292                 rxq->mz = NULL;
293
294                 rte_free(rxq);
295         }
296 }
297
298 int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
299                                uint16_t queue_idx,
300                                uint16_t nb_desc,
301                                unsigned int socket_id,
302                                const struct rte_eth_rxconf *rx_conf,
303                                struct rte_mempool *mp)
304 {
305         struct bnxt *bp = eth_dev->data->dev_private;
306         uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
307         struct bnxt_rx_queue *rxq;
308         int rc = 0;
309         uint8_t queue_state;
310
311         rc = is_bnxt_in_error(bp);
312         if (rc)
313                 return rc;
314
315         if (queue_idx >= bnxt_max_rings(bp)) {
316                 PMD_DRV_LOG(ERR,
317                         "Cannot create Rx ring %d. Only %d rings available\n",
318                         queue_idx, bp->max_rx_rings);
319                 return -EINVAL;
320         }
321
322         if (nb_desc < BNXT_MIN_RING_DESC || nb_desc > MAX_RX_DESC_CNT) {
323                 PMD_DRV_LOG(ERR, "nb_desc %d is invalid\n", nb_desc);
324                 return -EINVAL;
325         }
326
327         if (eth_dev->data->rx_queues) {
328                 rxq = eth_dev->data->rx_queues[queue_idx];
329                 if (rxq)
330                         bnxt_rx_queue_release_op(rxq);
331         }
332         rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue),
333                                  RTE_CACHE_LINE_SIZE, socket_id);
334         if (!rxq) {
335                 PMD_DRV_LOG(ERR, "bnxt_rx_queue allocation failed!\n");
336                 return -ENOMEM;
337         }
338         rxq->bp = bp;
339         rxq->mb_pool = mp;
340         rxq->nb_rx_desc = nb_desc;
341         rxq->rx_free_thresh =
342                 RTE_MIN(rte_align32pow2(nb_desc) / 4, RTE_BNXT_MAX_RX_BURST);
343
344         if (rx_conf->rx_drop_en != BNXT_DEFAULT_RX_DROP_EN)
345                 PMD_DRV_LOG(NOTICE,
346                             "Per-queue config of drop-en is not supported.\n");
347         rxq->drop_en = BNXT_DEFAULT_RX_DROP_EN;
348
349         PMD_DRV_LOG(DEBUG, "RX Buf MTU %d\n", eth_dev->data->mtu);
350
351         rc = bnxt_init_rx_ring_struct(rxq, socket_id);
352         if (rc) {
353                 PMD_DRV_LOG(ERR,
354                             "init_rx_ring_struct failed!\n");
355                 goto err;
356         }
357
358         PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_size);
359         rxq->queue_id = queue_idx;
360         rxq->port_id = eth_dev->data->port_id;
361         if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC)
362                 rxq->crc_len = RTE_ETHER_CRC_LEN;
363         else
364                 rxq->crc_len = 0;
365
366         eth_dev->data->rx_queues[queue_idx] = rxq;
367         /* Allocate RX ring hardware descriptors */
368         rc = bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring, NULL,
369                              "rxr");
370         if (rc) {
371                 PMD_DRV_LOG(ERR,
372                             "ring_dma_zone_reserve for rx_ring failed!\n");
373                 goto err;
374         }
375         rte_atomic64_init(&rxq->rx_mbuf_alloc_fail);
376
377         /* rxq 0 must not be stopped when used as async CPR */
378         if (!BNXT_NUM_ASYNC_CPR(bp) && queue_idx == 0)
379                 rxq->rx_deferred_start = false;
380         else
381                 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
382
383         if (rxq->rx_deferred_start) {
384                 queue_state = RTE_ETH_QUEUE_STATE_STOPPED;
385                 rxq->rx_started = false;
386         } else {
387                 queue_state = RTE_ETH_QUEUE_STATE_STARTED;
388                 rxq->rx_started = true;
389         }
390         eth_dev->data->rx_queue_state[queue_idx] = queue_state;
391
392         /* Configure mtu if it is different from what was configured before */
393         if (!queue_idx)
394                 bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
395
396         return 0;
397 err:
398         bnxt_rx_queue_release_op(rxq);
399         return rc;
400 }
401
402 int
403 bnxt_rx_queue_intr_enable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
404 {
405         struct bnxt *bp = eth_dev->data->dev_private;
406         struct bnxt_rx_queue *rxq;
407         struct bnxt_cp_ring_info *cpr;
408         int rc = 0;
409
410         rc = is_bnxt_in_error(bp);
411         if (rc)
412                 return rc;
413
414         if (eth_dev->data->rx_queues) {
415                 rxq = eth_dev->data->rx_queues[queue_id];
416                 if (!rxq)
417                         return -EINVAL;
418
419                 cpr = rxq->cp_ring;
420                 B_CP_DB_REARM(cpr, cpr->cp_raw_cons);
421         }
422         return rc;
423 }
424
425 int
426 bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
427 {
428         struct bnxt *bp = eth_dev->data->dev_private;
429         struct bnxt_rx_queue *rxq;
430         struct bnxt_cp_ring_info *cpr;
431         int rc = 0;
432
433         rc = is_bnxt_in_error(bp);
434         if (rc)
435                 return rc;
436
437         if (eth_dev->data->rx_queues) {
438                 rxq = eth_dev->data->rx_queues[queue_id];
439                 if (!rxq)
440                         return -EINVAL;
441
442                 cpr = rxq->cp_ring;
443                 B_CP_DB_DISARM(cpr);
444         }
445         return rc;
446 }
447
448 int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
449 {
450         struct bnxt *bp = dev->data->dev_private;
451         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
452         struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
453         struct bnxt_vnic_info *vnic = NULL;
454         int rc = 0;
455
456         rc = is_bnxt_in_error(bp);
457         if (rc)
458                 return rc;
459
460         if (rxq == NULL) {
461                 PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
462                 return -EINVAL;
463         }
464
465         /* Set the queue state to started here.
466          * We check the status of the queue while posting buffer.
467          * If queue is it started, we do not post buffers for Rx.
468          */
469         rxq->rx_started = true;
470         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
471
472         bnxt_free_hwrm_rx_ring(bp, rx_queue_id);
473         rc = bnxt_alloc_hwrm_rx_ring(bp, rx_queue_id);
474         if (rc)
475                 return rc;
476
477         if (BNXT_CHIP_P5(bp)) {
478                 /* Reconfigure default receive ring and MRU. */
479                 bnxt_hwrm_vnic_cfg(bp, rxq->vnic);
480         }
481         PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
482
483         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
484                 vnic = rxq->vnic;
485
486                 if (BNXT_HAS_RING_GRPS(bp)) {
487                         if (vnic->fw_grp_ids[rx_queue_id] != INVALID_HW_RING_ID)
488                                 return 0;
489
490                         vnic->fw_grp_ids[rx_queue_id] =
491                                         bp->grp_info[rx_queue_id].fw_grp_id;
492                         PMD_DRV_LOG(DEBUG,
493                                     "vnic = %p fw_grp_id = %d\n",
494                                     vnic, bp->grp_info[rx_queue_id].fw_grp_id);
495                 }
496
497                 PMD_DRV_LOG(DEBUG, "Rx Queue Count %d\n", vnic->rx_queue_cnt);
498                 rc = bnxt_vnic_rss_configure(bp, vnic);
499         }
500
501         if (rc != 0) {
502                 dev->data->rx_queue_state[rx_queue_id] =
503                                 RTE_ETH_QUEUE_STATE_STOPPED;
504                 rxq->rx_started = false;
505         }
506
507         PMD_DRV_LOG(INFO,
508                     "queue %d, rx_deferred_start %d, state %d!\n",
509                     rx_queue_id, rxq->rx_deferred_start,
510                     bp->eth_dev->data->rx_queue_state[rx_queue_id]);
511
512         return rc;
513 }
514
515 int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
516 {
517         struct bnxt *bp = dev->data->dev_private;
518         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
519         struct bnxt_vnic_info *vnic = NULL;
520         struct bnxt_rx_queue *rxq = NULL;
521         int active_queue_cnt = 0;
522         int i, rc = 0;
523
524         rc = is_bnxt_in_error(bp);
525         if (rc)
526                 return rc;
527
528         /* For the stingray platform and other platforms needing tighter
529          * control of resource utilization, Rx CQ 0 also works as
530          * Default CQ for async notifications
531          */
532         if (!BNXT_NUM_ASYNC_CPR(bp) && !rx_queue_id) {
533                 PMD_DRV_LOG(ERR, "Cannot stop Rx queue id %d\n", rx_queue_id);
534                 return -EINVAL;
535         }
536
537         rxq = bp->rx_queues[rx_queue_id];
538         if (!rxq) {
539                 PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
540                 return -EINVAL;
541         }
542
543         vnic = rxq->vnic;
544         if (!vnic) {
545                 PMD_DRV_LOG(ERR, "VNIC not initialized for RxQ %d\n",
546                             rx_queue_id);
547                 return -EINVAL;
548         }
549
550         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
551         rxq->rx_started = false;
552         PMD_DRV_LOG(DEBUG, "Rx queue stopped\n");
553
554         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
555                 if (BNXT_HAS_RING_GRPS(bp))
556                         vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
557
558                 PMD_DRV_LOG(DEBUG, "Rx Queue Count %d\n", vnic->rx_queue_cnt);
559                 rc = bnxt_vnic_rss_configure(bp, vnic);
560         }
561
562         /* Compute current number of active receive queues. */
563         for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++)
564                 if (bp->rx_queues[i]->rx_started)
565                         active_queue_cnt++;
566
567         if (BNXT_CHIP_P5(bp)) {
568                 /*
569                  * For Thor, we need to ensure that the VNIC default receive
570                  * ring corresponds to an active receive queue. When no queue
571                  * is active, we need to temporarily set the MRU to zero so
572                  * that packets are dropped early in the receive pipeline in
573                  * order to prevent the VNIC default receive ring from being
574                  * accessed.
575                  */
576                 if (active_queue_cnt == 0) {
577                         uint16_t saved_mru = vnic->mru;
578
579                         vnic->mru = 0;
580                         /* Reconfigure default receive ring and MRU. */
581                         bnxt_hwrm_vnic_cfg(bp, vnic);
582                         vnic->mru = saved_mru;
583                 } else {
584                         /* Reconfigure default receive ring. */
585                         bnxt_hwrm_vnic_cfg(bp, vnic);
586                 }
587         } else if (active_queue_cnt) {
588                 /*
589                  * If the queue being stopped is the current default queue and
590                  * there are other active queues, pick one of them as the
591                  * default and reconfigure the vnic.
592                  */
593                 if (vnic->dflt_ring_grp == bp->grp_info[rx_queue_id].fw_grp_id) {
594                         for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) {
595                                 if (bp->rx_queues[i]->rx_started) {
596                                         vnic->dflt_ring_grp =
597                                                 bp->grp_info[i].fw_grp_id;
598                                         bnxt_hwrm_vnic_cfg(bp, vnic);
599                                         break;
600                                 }
601                         }
602                 }
603         }
604
605         if (rc == 0)
606                 bnxt_rx_queue_release_mbufs(rxq);
607
608         return rc;
609 }