ethdev: add namespace
[dpdk.git] / drivers / net / bnxt / bnxt_rxq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5
6 #include <inttypes.h>
7
8 #include <rte_malloc.h>
9
10 #include "bnxt.h"
11 #include "bnxt_filter.h"
12 #include "bnxt_hwrm.h"
13 #include "bnxt_ring.h"
14 #include "bnxt_rxq.h"
15 #include "bnxt_rxr.h"
16 #include "bnxt_vnic.h"
17 #include "hsi_struct_def_dpdk.h"
18
19 /*
20  * RX Queues
21  */
22
23 /* Determine whether the current configuration needs aggregation ring in HW. */
24 int bnxt_need_agg_ring(struct rte_eth_dev *eth_dev)
25 {
26         /* scattered_rx will be true if OFFLOAD_SCATTER is enabled,
27          * if LRO is enabled, or if the max packet len is greater than the
28          * mbuf data size. So AGG ring will be needed whenever scattered_rx
29          * is set.
30          */
31         return eth_dev->data->scattered_rx ? 1 : 0;
32 }
33
34 void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq)
35 {
36         if (rxq && rxq->cp_ring && rxq->cp_ring->hw_stats)
37                 rxq->cp_ring->hw_stats = NULL;
38 }
39
40 int bnxt_mq_rx_configure(struct bnxt *bp)
41 {
42         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
43         const struct rte_eth_vmdq_rx_conf *conf =
44                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
45         unsigned int i, j, nb_q_per_grp = 1, ring_idx = 0;
46         int start_grp_id, end_grp_id = 1, rc = 0;
47         struct bnxt_vnic_info *vnic;
48         struct bnxt_filter_info *filter;
49         enum rte_eth_nb_pools pools = 1, max_pools = 0;
50         struct bnxt_rx_queue *rxq;
51
52         bp->nr_vnics = 0;
53
54         /* Multi-queue mode */
55         if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
56                 /* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */
57
58                 switch (dev_conf->rxmode.mq_mode) {
59                 case RTE_ETH_MQ_RX_VMDQ_RSS:
60                 case RTE_ETH_MQ_RX_VMDQ_ONLY:
61                 case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
62                         /* FALLTHROUGH */
63                         /* ETH_8/64_POOLs */
64                         pools = conf->nb_queue_pools;
65                         /* For each pool, allocate MACVLAN CFA rule & VNIC */
66                         max_pools = RTE_MIN(bp->max_vnics,
67                                             RTE_MIN(bp->max_l2_ctx,
68                                             RTE_MIN(bp->max_rsscos_ctx,
69                                                     RTE_ETH_64_POOLS)));
70                         PMD_DRV_LOG(DEBUG,
71                                     "pools = %u max_pools = %u\n",
72                                     pools, max_pools);
73                         if (pools > max_pools)
74                                 pools = max_pools;
75                         break;
76                 case RTE_ETH_MQ_RX_RSS:
77                         pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : 1;
78                         break;
79                 default:
80                         PMD_DRV_LOG(ERR, "Unsupported mq_mod %d\n",
81                                 dev_conf->rxmode.mq_mode);
82                         rc = -EINVAL;
83                         goto err_out;
84                 }
85         } else if (!dev_conf->rxmode.mq_mode) {
86                 pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : pools;
87         }
88
89         pools = RTE_MIN(pools, bp->rx_cp_nr_rings);
90         nb_q_per_grp = bp->rx_cp_nr_rings / pools;
91         PMD_DRV_LOG(DEBUG, "pools = %u nb_q_per_grp = %u\n",
92                     pools, nb_q_per_grp);
93         start_grp_id = 0;
94         end_grp_id = nb_q_per_grp;
95
96         for (i = 0; i < pools; i++) {
97                 vnic = &bp->vnic_info[i];
98                 if (!vnic) {
99                         PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
100                         rc = -ENOMEM;
101                         goto err_out;
102                 }
103                 vnic->flags |= BNXT_VNIC_INFO_BCAST;
104                 bp->nr_vnics++;
105
106                 for (j = 0; j < nb_q_per_grp; j++, ring_idx++) {
107                         rxq = bp->eth_dev->data->rx_queues[ring_idx];
108                         rxq->vnic = vnic;
109                         PMD_DRV_LOG(DEBUG,
110                                     "rxq[%d] = %p vnic[%d] = %p\n",
111                                     ring_idx, rxq, i, vnic);
112                 }
113                 if (i == 0) {
114                         if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB) {
115                                 bp->eth_dev->data->promiscuous = 1;
116                                 vnic->flags |= BNXT_VNIC_INFO_PROMISC;
117                         }
118                         vnic->func_default = true;
119                 }
120                 vnic->start_grp_id = start_grp_id;
121                 vnic->end_grp_id = end_grp_id;
122
123                 if (i) {
124                         if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB ||
125                             !(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS))
126                                 vnic->rss_dflt_cr = true;
127                         goto skip_filter_allocation;
128                 }
129                 filter = bnxt_alloc_filter(bp);
130                 if (!filter) {
131                         PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
132                         rc = -ENOMEM;
133                         goto err_out;
134                 }
135                 filter->mac_index = 0;
136                 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
137                 /*
138                  * TODO: Configure & associate CFA rule for
139                  * each VNIC for each VMDq with MACVLAN, MACVLAN+TC
140                  */
141                 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
142
143 skip_filter_allocation:
144                 start_grp_id = end_grp_id;
145                 end_grp_id += nb_q_per_grp;
146         }
147
148         bp->rx_num_qs_per_vnic = nb_q_per_grp;
149
150         if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
151                 struct rte_eth_rss_conf *rss = &dev_conf->rx_adv_conf.rss_conf;
152
153                 if (bp->flags & BNXT_FLAG_UPDATE_HASH)
154                         bp->flags &= ~BNXT_FLAG_UPDATE_HASH;
155
156                 for (i = 0; i < bp->nr_vnics; i++) {
157                         uint32_t lvl = RTE_ETH_RSS_LEVEL(rss->rss_hf);
158
159                         vnic = &bp->vnic_info[i];
160                         vnic->hash_type =
161                                 bnxt_rte_to_hwrm_hash_types(rss->rss_hf);
162                         vnic->hash_mode =
163                                 bnxt_rte_to_hwrm_hash_level(bp,
164                                                             rss->rss_hf,
165                                                             lvl);
166
167                         /*
168                          * Use the supplied key if the key length is
169                          * acceptable and the rss_key is not NULL
170                          */
171                         if (rss->rss_key &&
172                             rss->rss_key_len <= HW_HASH_KEY_SIZE)
173                                 memcpy(vnic->rss_hash_key,
174                                        rss->rss_key, rss->rss_key_len);
175                 }
176         }
177
178         return rc;
179
180 err_out:
181         /* Free allocated vnic/filters */
182
183         return rc;
184 }
185
186 void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
187 {
188         struct rte_mbuf **sw_ring;
189         struct bnxt_tpa_info *tpa_info;
190         uint16_t i;
191
192         if (!rxq || !rxq->rx_ring)
193                 return;
194
195         sw_ring = rxq->rx_ring->rx_buf_ring;
196         if (sw_ring) {
197 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
198                 /*
199                  * The vector receive burst function does not set used
200                  * mbuf pointers to NULL, do that here to simplify
201                  * cleanup logic.
202                  */
203                 for (i = 0; i < rxq->rxrearm_nb; i++)
204                         sw_ring[rxq->rxrearm_start + i] = NULL;
205                 rxq->rxrearm_nb = 0;
206 #endif
207                 for (i = 0;
208                      i < rxq->rx_ring->rx_ring_struct->ring_size; i++) {
209                         if (sw_ring[i]) {
210                                 if (sw_ring[i] != &rxq->fake_mbuf)
211                                         rte_pktmbuf_free_seg(sw_ring[i]);
212                                 sw_ring[i] = NULL;
213                         }
214                 }
215         }
216         /* Free up mbufs in Agg ring */
217         if (!bnxt_need_agg_ring(rxq->bp->eth_dev))
218                 return;
219
220         sw_ring = rxq->rx_ring->ag_buf_ring;
221         if (sw_ring) {
222                 for (i = 0;
223                      i < rxq->rx_ring->ag_ring_struct->ring_size; i++) {
224                         if (sw_ring[i]) {
225                                 rte_pktmbuf_free_seg(sw_ring[i]);
226                                 sw_ring[i] = NULL;
227                         }
228                 }
229         }
230
231         /* Free up mbufs in TPA */
232         tpa_info = rxq->rx_ring->tpa_info;
233         if (tpa_info) {
234                 int max_aggs = BNXT_TPA_MAX_AGGS(rxq->bp);
235
236                 for (i = 0; i < max_aggs; i++) {
237                         if (tpa_info[i].mbuf) {
238                                 rte_pktmbuf_free_seg(tpa_info[i].mbuf);
239                                 tpa_info[i].mbuf = NULL;
240                         }
241                 }
242         }
243
244 }
245
246 void bnxt_free_rx_mbufs(struct bnxt *bp)
247 {
248         struct bnxt_rx_queue *rxq;
249         int i;
250
251         for (i = 0; i < (int)bp->rx_nr_rings; i++) {
252                 rxq = bp->rx_queues[i];
253                 bnxt_rx_queue_release_mbufs(rxq);
254         }
255 }
256
257 void bnxt_free_rxq_mem(struct bnxt_rx_queue *rxq)
258 {
259         bnxt_rx_queue_release_mbufs(rxq);
260
261         /* Free RX, AGG ring hardware descriptors */
262         if (rxq->rx_ring) {
263                 bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
264                 rte_free(rxq->rx_ring->rx_ring_struct);
265                 rxq->rx_ring->rx_ring_struct = NULL;
266                 /* Free RX Agg ring hardware descriptors */
267                 bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
268                 rte_free(rxq->rx_ring->ag_ring_struct);
269                 rxq->rx_ring->ag_ring_struct = NULL;
270
271                 rte_free(rxq->rx_ring);
272                 rxq->rx_ring = NULL;
273         }
274         /* Free RX completion ring hardware descriptors */
275         if (rxq->cp_ring) {
276                 bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
277                 rte_free(rxq->cp_ring->cp_ring_struct);
278                 rxq->cp_ring->cp_ring_struct = NULL;
279                 rte_free(rxq->cp_ring);
280                 rxq->cp_ring = NULL;
281         }
282
283         bnxt_free_rxq_stats(rxq);
284         rte_memzone_free(rxq->mz);
285         rxq->mz = NULL;
286 }
287
288 void bnxt_rx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx)
289 {
290         struct bnxt_rx_queue *rxq = dev->data->rx_queues[queue_idx];
291
292         if (rxq != NULL) {
293                 if (is_bnxt_in_error(rxq->bp))
294                         return;
295
296                 bnxt_free_hwrm_rx_ring(rxq->bp, rxq->queue_id);
297                 bnxt_free_rxq_mem(rxq);
298                 rte_free(rxq);
299         }
300 }
301
302 int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
303                                uint16_t queue_idx,
304                                uint16_t nb_desc,
305                                unsigned int socket_id,
306                                const struct rte_eth_rxconf *rx_conf,
307                                struct rte_mempool *mp)
308 {
309         struct bnxt *bp = eth_dev->data->dev_private;
310         uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
311         struct bnxt_rx_queue *rxq;
312         int rc = 0;
313
314         rc = is_bnxt_in_error(bp);
315         if (rc)
316                 return rc;
317
318         if (queue_idx >= bnxt_max_rings(bp)) {
319                 PMD_DRV_LOG(ERR,
320                         "Cannot create Rx ring %d. Only %d rings available\n",
321                         queue_idx, bp->max_rx_rings);
322                 return -EINVAL;
323         }
324
325         if (nb_desc < BNXT_MIN_RING_DESC || nb_desc > MAX_RX_DESC_CNT) {
326                 PMD_DRV_LOG(ERR, "nb_desc %d is invalid\n", nb_desc);
327                 return -EINVAL;
328         }
329
330         if (eth_dev->data->rx_queues) {
331                 rxq = eth_dev->data->rx_queues[queue_idx];
332                 if (rxq)
333                         bnxt_rx_queue_release_op(eth_dev, queue_idx);
334         }
335         rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue),
336                                  RTE_CACHE_LINE_SIZE, socket_id);
337         if (!rxq) {
338                 PMD_DRV_LOG(ERR, "bnxt_rx_queue allocation failed!\n");
339                 return -ENOMEM;
340         }
341         rxq->bp = bp;
342         rxq->mb_pool = mp;
343         rxq->nb_rx_desc = nb_desc;
344         rxq->rx_free_thresh =
345                 RTE_MIN(rte_align32pow2(nb_desc) / 4, RTE_BNXT_MAX_RX_BURST);
346
347         if (rx_conf->rx_drop_en != BNXT_DEFAULT_RX_DROP_EN)
348                 PMD_DRV_LOG(NOTICE,
349                             "Per-queue config of drop-en is not supported.\n");
350         rxq->drop_en = BNXT_DEFAULT_RX_DROP_EN;
351
352         PMD_DRV_LOG(DEBUG, "RX Buf MTU %d\n", eth_dev->data->mtu);
353
354         eth_dev->data->rx_queues[queue_idx] = rxq;
355
356         rc = bnxt_init_rx_ring_struct(rxq, socket_id);
357         if (rc) {
358                 PMD_DRV_LOG(ERR,
359                             "init_rx_ring_struct failed!\n");
360                 goto err;
361         }
362
363         PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_size);
364         rxq->queue_id = queue_idx;
365         rxq->port_id = eth_dev->data->port_id;
366         if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
367                 rxq->crc_len = RTE_ETHER_CRC_LEN;
368         else
369                 rxq->crc_len = 0;
370
371         /* Allocate RX ring hardware descriptors */
372         rc = bnxt_alloc_rings(bp, socket_id, queue_idx, NULL, rxq, rxq->cp_ring,
373                               NULL, "rxr");
374         if (rc) {
375                 PMD_DRV_LOG(ERR,
376                             "ring_dma_zone_reserve for rx_ring failed!\n");
377                 goto err;
378         }
379         rte_atomic64_init(&rxq->rx_mbuf_alloc_fail);
380
381         /* rxq 0 must not be stopped when used as async CPR */
382         if (!BNXT_NUM_ASYNC_CPR(bp) && queue_idx == 0)
383                 rxq->rx_deferred_start = false;
384         else
385                 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
386
387         rxq->rx_started = rxq->rx_deferred_start ? false : true;
388         rxq->vnic = BNXT_GET_DEFAULT_VNIC(bp);
389
390         /* Configure mtu if it is different from what was configured before */
391         if (!queue_idx)
392                 bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
393
394         return 0;
395 err:
396         bnxt_rx_queue_release_op(eth_dev, queue_idx);
397         return rc;
398 }
399
400 int
401 bnxt_rx_queue_intr_enable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
402 {
403         struct bnxt *bp = eth_dev->data->dev_private;
404         struct bnxt_rx_queue *rxq;
405         struct bnxt_cp_ring_info *cpr;
406         int rc = 0;
407
408         rc = is_bnxt_in_error(bp);
409         if (rc)
410                 return rc;
411
412         if (eth_dev->data->rx_queues) {
413                 rxq = eth_dev->data->rx_queues[queue_id];
414                 if (!rxq)
415                         return -EINVAL;
416
417                 cpr = rxq->cp_ring;
418                 B_CP_DB_REARM(cpr, cpr->cp_raw_cons);
419         }
420         return rc;
421 }
422
423 int
424 bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
425 {
426         struct bnxt *bp = eth_dev->data->dev_private;
427         struct bnxt_rx_queue *rxq;
428         struct bnxt_cp_ring_info *cpr;
429         int rc = 0;
430
431         rc = is_bnxt_in_error(bp);
432         if (rc)
433                 return rc;
434
435         if (eth_dev->data->rx_queues) {
436                 rxq = eth_dev->data->rx_queues[queue_id];
437                 if (!rxq)
438                         return -EINVAL;
439
440                 cpr = rxq->cp_ring;
441                 B_CP_DB_DISARM(cpr);
442         }
443         return rc;
444 }
445
446 int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
447 {
448         struct bnxt *bp = dev->data->dev_private;
449         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
450         struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
451         struct bnxt_vnic_info *vnic = NULL;
452         int rc = 0;
453
454         rc = is_bnxt_in_error(bp);
455         if (rc)
456                 return rc;
457
458         if (rxq == NULL) {
459                 PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
460                 return -EINVAL;
461         }
462
463         /* Set the queue state to started here.
464          * We check the status of the queue while posting buffer.
465          * If queue is it started, we do not post buffers for Rx.
466          */
467         rxq->rx_started = true;
468         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
469
470         bnxt_free_hwrm_rx_ring(bp, rx_queue_id);
471         rc = bnxt_alloc_hwrm_rx_ring(bp, rx_queue_id);
472         if (rc)
473                 return rc;
474
475         if (BNXT_CHIP_P5(bp)) {
476                 /* Reconfigure default receive ring and MRU. */
477                 bnxt_hwrm_vnic_cfg(bp, rxq->vnic);
478         }
479         PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
480
481         if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
482                 vnic = rxq->vnic;
483
484                 if (BNXT_HAS_RING_GRPS(bp)) {
485                         if (vnic->fw_grp_ids[rx_queue_id] != INVALID_HW_RING_ID)
486                                 return 0;
487
488                         vnic->fw_grp_ids[rx_queue_id] =
489                                         bp->grp_info[rx_queue_id].fw_grp_id;
490                         PMD_DRV_LOG(DEBUG,
491                                     "vnic = %p fw_grp_id = %d\n",
492                                     vnic, bp->grp_info[rx_queue_id].fw_grp_id);
493                 }
494
495                 PMD_DRV_LOG(DEBUG, "Rx Queue Count %d\n", vnic->rx_queue_cnt);
496                 rc = bnxt_vnic_rss_configure(bp, vnic);
497         }
498
499         if (rc != 0) {
500                 dev->data->rx_queue_state[rx_queue_id] =
501                                 RTE_ETH_QUEUE_STATE_STOPPED;
502                 rxq->rx_started = false;
503         }
504
505         PMD_DRV_LOG(INFO,
506                     "queue %d, rx_deferred_start %d, state %d!\n",
507                     rx_queue_id, rxq->rx_deferred_start,
508                     bp->eth_dev->data->rx_queue_state[rx_queue_id]);
509
510         return rc;
511 }
512
513 int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
514 {
515         struct bnxt *bp = dev->data->dev_private;
516         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
517         struct bnxt_vnic_info *vnic = NULL;
518         struct bnxt_rx_queue *rxq = NULL;
519         int active_queue_cnt = 0;
520         int i, rc = 0;
521
522         rc = is_bnxt_in_error(bp);
523         if (rc)
524                 return rc;
525
526         /* For the stingray platform and other platforms needing tighter
527          * control of resource utilization, Rx CQ 0 also works as
528          * Default CQ for async notifications
529          */
530         if (!BNXT_NUM_ASYNC_CPR(bp) && !rx_queue_id) {
531                 PMD_DRV_LOG(ERR, "Cannot stop Rx queue id %d\n", rx_queue_id);
532                 return -EINVAL;
533         }
534
535         rxq = bp->rx_queues[rx_queue_id];
536         if (!rxq) {
537                 PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
538                 return -EINVAL;
539         }
540
541         vnic = rxq->vnic;
542         if (!vnic) {
543                 PMD_DRV_LOG(ERR, "VNIC not initialized for RxQ %d\n",
544                             rx_queue_id);
545                 return -EINVAL;
546         }
547
548         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
549         rxq->rx_started = false;
550         PMD_DRV_LOG(DEBUG, "Rx queue stopped\n");
551
552         if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
553                 if (BNXT_HAS_RING_GRPS(bp))
554                         vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
555
556                 PMD_DRV_LOG(DEBUG, "Rx Queue Count %d\n", vnic->rx_queue_cnt);
557                 rc = bnxt_vnic_rss_configure(bp, vnic);
558         }
559
560         /* Compute current number of active receive queues. */
561         for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++)
562                 if (bp->rx_queues[i]->rx_started)
563                         active_queue_cnt++;
564
565         if (BNXT_CHIP_P5(bp)) {
566                 /*
567                  * For Thor, we need to ensure that the VNIC default receive
568                  * ring corresponds to an active receive queue. When no queue
569                  * is active, we need to temporarily set the MRU to zero so
570                  * that packets are dropped early in the receive pipeline in
571                  * order to prevent the VNIC default receive ring from being
572                  * accessed.
573                  */
574                 if (active_queue_cnt == 0) {
575                         uint16_t saved_mru = vnic->mru;
576
577                         vnic->mru = 0;
578                         /* Reconfigure default receive ring and MRU. */
579                         bnxt_hwrm_vnic_cfg(bp, vnic);
580                         vnic->mru = saved_mru;
581                 } else {
582                         /* Reconfigure default receive ring. */
583                         bnxt_hwrm_vnic_cfg(bp, vnic);
584                 }
585         } else if (active_queue_cnt) {
586                 /*
587                  * If the queue being stopped is the current default queue and
588                  * there are other active queues, pick one of them as the
589                  * default and reconfigure the vnic.
590                  */
591                 if (vnic->dflt_ring_grp == bp->grp_info[rx_queue_id].fw_grp_id) {
592                         for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) {
593                                 if (bp->rx_queues[i]->rx_started) {
594                                         vnic->dflt_ring_grp =
595                                                 bp->grp_info[i].fw_grp_id;
596                                         bnxt_hwrm_vnic_cfg(bp, vnic);
597                                         break;
598                                 }
599                         }
600                 }
601         }
602
603         if (rc == 0)
604                 bnxt_rx_queue_release_mbufs(rxq);
605
606         return rc;
607 }