net/bnxt: fix device capability reporting
[dpdk.git] / drivers / net / bnxt / bnxt_rxq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5
6 #include <inttypes.h>
7
8 #include <rte_malloc.h>
9
10 #include "bnxt.h"
11 #include "bnxt_filter.h"
12 #include "bnxt_hwrm.h"
13 #include "bnxt_ring.h"
14 #include "bnxt_rxq.h"
15 #include "bnxt_rxr.h"
16 #include "bnxt_vnic.h"
17 #include "hsi_struct_def_dpdk.h"
18
19 /*
20  * RX Queues
21  */
22
23 uint64_t bnxt_get_rx_port_offloads(struct bnxt *bp)
24 {
25         uint64_t rx_offload_capa;
26
27         rx_offload_capa = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
28                           RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
29                           RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
30                           RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
31                           RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
32                           RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
33                           RTE_ETH_RX_OFFLOAD_TCP_LRO |
34                           RTE_ETH_RX_OFFLOAD_SCATTER |
35                           RTE_ETH_RX_OFFLOAD_RSS_HASH;
36
37         rx_offload_capa |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
38                            RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM;
39
40         if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
41                 rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
42         if (bp->vnic_cap_flags & BNXT_VNIC_CAP_VLAN_RX_STRIP)
43                 rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
44
45         return rx_offload_capa;
46 }
47
48 /* Determine whether the current configuration needs aggregation ring in HW. */
49 int bnxt_need_agg_ring(struct rte_eth_dev *eth_dev)
50 {
51         /* scattered_rx will be true if OFFLOAD_SCATTER is enabled,
52          * if LRO is enabled, or if the max packet len is greater than the
53          * mbuf data size. So AGG ring will be needed whenever scattered_rx
54          * is set.
55          */
56         return eth_dev->data->scattered_rx ? 1 : 0;
57 }
58
59 void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq)
60 {
61         if (rxq && rxq->cp_ring && rxq->cp_ring->hw_stats)
62                 rxq->cp_ring->hw_stats = NULL;
63 }
64
65 int bnxt_mq_rx_configure(struct bnxt *bp)
66 {
67         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
68         const struct rte_eth_vmdq_rx_conf *conf =
69                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
70         unsigned int i, j, nb_q_per_grp = 1, ring_idx = 0;
71         int start_grp_id, end_grp_id = 1, rc = 0;
72         struct bnxt_vnic_info *vnic;
73         struct bnxt_filter_info *filter;
74         enum rte_eth_nb_pools pools = 1, max_pools = 0;
75         struct bnxt_rx_queue *rxq;
76
77         bp->nr_vnics = 0;
78
79         /* Multi-queue mode */
80         if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
81                 /* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */
82
83                 switch (dev_conf->rxmode.mq_mode) {
84                 case RTE_ETH_MQ_RX_VMDQ_RSS:
85                 case RTE_ETH_MQ_RX_VMDQ_ONLY:
86                 case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
87                         /* FALLTHROUGH */
88                         /* ETH_8/64_POOLs */
89                         pools = conf->nb_queue_pools;
90                         /* For each pool, allocate MACVLAN CFA rule & VNIC */
91                         max_pools = RTE_MIN(bp->max_vnics,
92                                             RTE_MIN(bp->max_l2_ctx,
93                                             RTE_MIN(bp->max_rsscos_ctx,
94                                                     RTE_ETH_64_POOLS)));
95                         PMD_DRV_LOG(DEBUG,
96                                     "pools = %u max_pools = %u\n",
97                                     pools, max_pools);
98                         if (pools > max_pools)
99                                 pools = max_pools;
100                         break;
101                 case RTE_ETH_MQ_RX_RSS:
102                         pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : 1;
103                         break;
104                 default:
105                         PMD_DRV_LOG(ERR, "Unsupported mq_mod %d\n",
106                                 dev_conf->rxmode.mq_mode);
107                         rc = -EINVAL;
108                         goto err_out;
109                 }
110         } else if (!dev_conf->rxmode.mq_mode) {
111                 pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : pools;
112         }
113
114         pools = RTE_MIN(pools, bp->rx_cp_nr_rings);
115         nb_q_per_grp = bp->rx_cp_nr_rings / pools;
116         PMD_DRV_LOG(DEBUG, "pools = %u nb_q_per_grp = %u\n",
117                     pools, nb_q_per_grp);
118         start_grp_id = 0;
119         end_grp_id = nb_q_per_grp;
120
121         for (i = 0; i < pools; i++) {
122                 vnic = &bp->vnic_info[i];
123                 if (!vnic) {
124                         PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
125                         rc = -ENOMEM;
126                         goto err_out;
127                 }
128                 vnic->flags |= BNXT_VNIC_INFO_BCAST;
129                 bp->nr_vnics++;
130
131                 for (j = 0; j < nb_q_per_grp; j++, ring_idx++) {
132                         rxq = bp->eth_dev->data->rx_queues[ring_idx];
133                         rxq->vnic = vnic;
134                         PMD_DRV_LOG(DEBUG,
135                                     "rxq[%d] = %p vnic[%d] = %p\n",
136                                     ring_idx, rxq, i, vnic);
137                 }
138                 if (i == 0) {
139                         if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB) {
140                                 bp->eth_dev->data->promiscuous = 1;
141                                 vnic->flags |= BNXT_VNIC_INFO_PROMISC;
142                         }
143                         vnic->func_default = true;
144                 }
145                 vnic->start_grp_id = start_grp_id;
146                 vnic->end_grp_id = end_grp_id;
147
148                 if (i) {
149                         if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB ||
150                             !(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS))
151                                 vnic->rss_dflt_cr = true;
152                         goto skip_filter_allocation;
153                 }
154                 filter = bnxt_alloc_filter(bp);
155                 if (!filter) {
156                         PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
157                         rc = -ENOMEM;
158                         goto err_out;
159                 }
160                 filter->mac_index = 0;
161                 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
162                 /*
163                  * TODO: Configure & associate CFA rule for
164                  * each VNIC for each VMDq with MACVLAN, MACVLAN+TC
165                  */
166                 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
167
168 skip_filter_allocation:
169                 start_grp_id = end_grp_id;
170                 end_grp_id += nb_q_per_grp;
171         }
172
173         bp->rx_num_qs_per_vnic = nb_q_per_grp;
174
175         if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
176                 struct rte_eth_rss_conf *rss = &bp->rss_conf;
177
178                 if (bp->flags & BNXT_FLAG_UPDATE_HASH)
179                         bp->flags &= ~BNXT_FLAG_UPDATE_HASH;
180
181                 for (i = 0; i < bp->nr_vnics; i++) {
182                         uint32_t lvl = RTE_ETH_RSS_LEVEL(rss->rss_hf);
183
184                         vnic = &bp->vnic_info[i];
185                         vnic->hash_type =
186                                 bnxt_rte_to_hwrm_hash_types(rss->rss_hf);
187                         vnic->hash_mode =
188                                 bnxt_rte_to_hwrm_hash_level(bp,
189                                                             rss->rss_hf,
190                                                             lvl);
191
192                         /*
193                          * Use the supplied key if the key length is
194                          * acceptable and the rss_key is not NULL
195                          */
196                         if (rss->rss_key &&
197                             rss->rss_key_len <= HW_HASH_KEY_SIZE)
198                                 memcpy(vnic->rss_hash_key,
199                                        rss->rss_key, rss->rss_key_len);
200                 }
201         }
202
203         return rc;
204
205 err_out:
206         /* Free allocated vnic/filters */
207
208         return rc;
209 }
210
211 void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
212 {
213         struct rte_mbuf **sw_ring;
214         struct bnxt_tpa_info *tpa_info;
215         uint16_t i;
216
217         if (!rxq || !rxq->rx_ring)
218                 return;
219
220         sw_ring = rxq->rx_ring->rx_buf_ring;
221         if (sw_ring) {
222 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
223                 /*
224                  * The vector receive burst function does not set used
225                  * mbuf pointers to NULL, do that here to simplify
226                  * cleanup logic.
227                  */
228                 for (i = 0; i < rxq->rxrearm_nb; i++)
229                         sw_ring[rxq->rxrearm_start + i] = NULL;
230                 rxq->rxrearm_nb = 0;
231 #endif
232                 for (i = 0;
233                      i < rxq->rx_ring->rx_ring_struct->ring_size; i++) {
234                         if (sw_ring[i]) {
235                                 if (sw_ring[i] != &rxq->fake_mbuf)
236                                         rte_pktmbuf_free_seg(sw_ring[i]);
237                                 sw_ring[i] = NULL;
238                         }
239                 }
240         }
241         /* Free up mbufs in Agg ring */
242         if (rxq->bp == NULL ||
243             rxq->bp->eth_dev == NULL ||
244             !bnxt_need_agg_ring(rxq->bp->eth_dev))
245                 return;
246
247         sw_ring = rxq->rx_ring->ag_buf_ring;
248         if (sw_ring) {
249                 for (i = 0;
250                      i < rxq->rx_ring->ag_ring_struct->ring_size; i++) {
251                         if (sw_ring[i]) {
252                                 rte_pktmbuf_free_seg(sw_ring[i]);
253                                 sw_ring[i] = NULL;
254                         }
255                 }
256         }
257
258         /* Free up mbufs in TPA */
259         tpa_info = rxq->rx_ring->tpa_info;
260         if (tpa_info) {
261                 int max_aggs = BNXT_TPA_MAX_AGGS(rxq->bp);
262
263                 for (i = 0; i < max_aggs; i++) {
264                         if (tpa_info[i].mbuf) {
265                                 rte_pktmbuf_free_seg(tpa_info[i].mbuf);
266                                 tpa_info[i].mbuf = NULL;
267                         }
268                 }
269         }
270
271 }
272
273 void bnxt_free_rx_mbufs(struct bnxt *bp)
274 {
275         struct bnxt_rx_queue *rxq;
276         int i;
277
278         for (i = 0; i < (int)bp->rx_nr_rings; i++) {
279                 rxq = bp->rx_queues[i];
280                 bnxt_rx_queue_release_mbufs(rxq);
281         }
282 }
283
284 void bnxt_free_rxq_mem(struct bnxt_rx_queue *rxq)
285 {
286         bnxt_rx_queue_release_mbufs(rxq);
287
288         /* Free RX, AGG ring hardware descriptors */
289         if (rxq->rx_ring) {
290                 bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
291                 rte_free(rxq->rx_ring->rx_ring_struct);
292                 rxq->rx_ring->rx_ring_struct = NULL;
293                 /* Free RX Agg ring hardware descriptors */
294                 bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
295                 rte_free(rxq->rx_ring->ag_ring_struct);
296                 rxq->rx_ring->ag_ring_struct = NULL;
297
298                 rte_free(rxq->rx_ring);
299                 rxq->rx_ring = NULL;
300         }
301         /* Free RX completion ring hardware descriptors */
302         if (rxq->cp_ring) {
303                 bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
304                 rte_free(rxq->cp_ring->cp_ring_struct);
305                 rxq->cp_ring->cp_ring_struct = NULL;
306                 rte_free(rxq->cp_ring);
307                 rxq->cp_ring = NULL;
308         }
309
310         bnxt_free_rxq_stats(rxq);
311         rte_memzone_free(rxq->mz);
312         rxq->mz = NULL;
313 }
314
315 void bnxt_rx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx)
316 {
317         struct bnxt_rx_queue *rxq = dev->data->rx_queues[queue_idx];
318
319         if (rxq != NULL) {
320                 if (is_bnxt_in_error(rxq->bp))
321                         return;
322
323                 bnxt_free_hwrm_rx_ring(rxq->bp, rxq->queue_id);
324                 bnxt_free_rxq_mem(rxq);
325                 rte_free(rxq);
326         }
327 }
328
329 int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
330                                uint16_t queue_idx,
331                                uint16_t nb_desc,
332                                unsigned int socket_id,
333                                const struct rte_eth_rxconf *rx_conf,
334                                struct rte_mempool *mp)
335 {
336         struct bnxt *bp = eth_dev->data->dev_private;
337         uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
338         struct bnxt_rx_queue *rxq;
339         int rc = 0;
340
341         rc = is_bnxt_in_error(bp);
342         if (rc)
343                 return rc;
344
345         if (queue_idx >= bnxt_max_rings(bp)) {
346                 PMD_DRV_LOG(ERR,
347                         "Cannot create Rx ring %d. Only %d rings available\n",
348                         queue_idx, bp->max_rx_rings);
349                 return -EINVAL;
350         }
351
352         if (nb_desc < BNXT_MIN_RING_DESC || nb_desc > MAX_RX_DESC_CNT) {
353                 PMD_DRV_LOG(ERR, "nb_desc %d is invalid\n", nb_desc);
354                 return -EINVAL;
355         }
356
357         if (eth_dev->data->rx_queues) {
358                 rxq = eth_dev->data->rx_queues[queue_idx];
359                 if (rxq)
360                         bnxt_rx_queue_release_op(eth_dev, queue_idx);
361         }
362         rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue),
363                                  RTE_CACHE_LINE_SIZE, socket_id);
364         if (!rxq) {
365                 PMD_DRV_LOG(ERR, "bnxt_rx_queue allocation failed!\n");
366                 return -ENOMEM;
367         }
368         rxq->bp = bp;
369         rxq->mb_pool = mp;
370         rxq->nb_rx_desc = nb_desc;
371         rxq->rx_free_thresh =
372                 RTE_MIN(rte_align32pow2(nb_desc) / 4, RTE_BNXT_MAX_RX_BURST);
373
374         if (rx_conf->rx_drop_en != BNXT_DEFAULT_RX_DROP_EN)
375                 PMD_DRV_LOG(NOTICE,
376                             "Per-queue config of drop-en is not supported.\n");
377         rxq->drop_en = BNXT_DEFAULT_RX_DROP_EN;
378
379         PMD_DRV_LOG(DEBUG, "RX Buf MTU %d\n", eth_dev->data->mtu);
380
381         eth_dev->data->rx_queues[queue_idx] = rxq;
382
383         rc = bnxt_init_rx_ring_struct(rxq, socket_id);
384         if (rc) {
385                 PMD_DRV_LOG(ERR,
386                             "init_rx_ring_struct failed!\n");
387                 goto err;
388         }
389
390         PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_size);
391         rxq->queue_id = queue_idx;
392         rxq->port_id = eth_dev->data->port_id;
393         if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
394                 rxq->crc_len = RTE_ETHER_CRC_LEN;
395         else
396                 rxq->crc_len = 0;
397
398         /* Allocate RX ring hardware descriptors */
399         rc = bnxt_alloc_rings(bp, socket_id, queue_idx, NULL, rxq, rxq->cp_ring,
400                               NULL, "rxr");
401         if (rc) {
402                 PMD_DRV_LOG(ERR,
403                             "ring_dma_zone_reserve for rx_ring failed!\n");
404                 goto err;
405         }
406         rte_atomic64_init(&rxq->rx_mbuf_alloc_fail);
407
408         /* rxq 0 must not be stopped when used as async CPR */
409         if (!BNXT_NUM_ASYNC_CPR(bp) && queue_idx == 0)
410                 rxq->rx_deferred_start = false;
411         else
412                 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
413
414         rxq->rx_started = rxq->rx_deferred_start ? false : true;
415         rxq->vnic = BNXT_GET_DEFAULT_VNIC(bp);
416
417         /* Configure mtu if it is different from what was configured before */
418         if (!queue_idx)
419                 bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
420
421         return 0;
422 err:
423         bnxt_rx_queue_release_op(eth_dev, queue_idx);
424         return rc;
425 }
426
427 int
428 bnxt_rx_queue_intr_enable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
429 {
430         struct bnxt *bp = eth_dev->data->dev_private;
431         struct bnxt_rx_queue *rxq;
432         struct bnxt_cp_ring_info *cpr;
433         int rc = 0;
434
435         rc = is_bnxt_in_error(bp);
436         if (rc)
437                 return rc;
438
439         if (eth_dev->data->rx_queues) {
440                 rxq = eth_dev->data->rx_queues[queue_id];
441                 if (!rxq)
442                         return -EINVAL;
443
444                 cpr = rxq->cp_ring;
445                 B_CP_DB_REARM(cpr, cpr->cp_raw_cons);
446         }
447         return rc;
448 }
449
450 int
451 bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
452 {
453         struct bnxt *bp = eth_dev->data->dev_private;
454         struct bnxt_rx_queue *rxq;
455         struct bnxt_cp_ring_info *cpr;
456         int rc = 0;
457
458         rc = is_bnxt_in_error(bp);
459         if (rc)
460                 return rc;
461
462         if (eth_dev->data->rx_queues) {
463                 rxq = eth_dev->data->rx_queues[queue_id];
464                 if (!rxq)
465                         return -EINVAL;
466
467                 cpr = rxq->cp_ring;
468                 B_CP_DB_DISARM(cpr);
469         }
470         return rc;
471 }
472
473 int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
474 {
475         struct bnxt *bp = dev->data->dev_private;
476         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
477         struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
478         struct bnxt_vnic_info *vnic = NULL;
479         int rc = 0;
480
481         rc = is_bnxt_in_error(bp);
482         if (rc)
483                 return rc;
484
485         if (rxq == NULL) {
486                 PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
487                 return -EINVAL;
488         }
489
490         /* Set the queue state to started here.
491          * We check the status of the queue while posting buffer.
492          * If queue is it started, we do not post buffers for Rx.
493          */
494         rxq->rx_started = true;
495         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
496
497         bnxt_free_hwrm_rx_ring(bp, rx_queue_id);
498         rc = bnxt_alloc_hwrm_rx_ring(bp, rx_queue_id);
499         if (rc)
500                 return rc;
501
502         if (BNXT_CHIP_P5(bp)) {
503                 /* Reconfigure default receive ring and MRU. */
504                 bnxt_hwrm_vnic_cfg(bp, rxq->vnic);
505         }
506         PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
507
508         if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
509                 vnic = rxq->vnic;
510
511                 if (BNXT_HAS_RING_GRPS(bp)) {
512                         if (vnic->fw_grp_ids[rx_queue_id] != INVALID_HW_RING_ID)
513                                 return 0;
514
515                         vnic->fw_grp_ids[rx_queue_id] =
516                                         bp->grp_info[rx_queue_id].fw_grp_id;
517                         PMD_DRV_LOG(DEBUG,
518                                     "vnic = %p fw_grp_id = %d\n",
519                                     vnic, bp->grp_info[rx_queue_id].fw_grp_id);
520                 }
521
522                 PMD_DRV_LOG(DEBUG, "Rx Queue Count %d\n", vnic->rx_queue_cnt);
523                 rc = bnxt_vnic_rss_configure(bp, vnic);
524         }
525
526         if (rc != 0) {
527                 dev->data->rx_queue_state[rx_queue_id] =
528                                 RTE_ETH_QUEUE_STATE_STOPPED;
529                 rxq->rx_started = false;
530         }
531
532         PMD_DRV_LOG(INFO,
533                     "queue %d, rx_deferred_start %d, state %d!\n",
534                     rx_queue_id, rxq->rx_deferred_start,
535                     bp->eth_dev->data->rx_queue_state[rx_queue_id]);
536
537         return rc;
538 }
539
540 int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
541 {
542         struct bnxt *bp = dev->data->dev_private;
543         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
544         struct bnxt_vnic_info *vnic = NULL;
545         struct bnxt_rx_queue *rxq = NULL;
546         int active_queue_cnt = 0;
547         int i, rc = 0;
548
549         rc = is_bnxt_in_error(bp);
550         if (rc)
551                 return rc;
552
553         /* For the stingray platform and other platforms needing tighter
554          * control of resource utilization, Rx CQ 0 also works as
555          * Default CQ for async notifications
556          */
557         if (!BNXT_NUM_ASYNC_CPR(bp) && !rx_queue_id) {
558                 PMD_DRV_LOG(ERR, "Cannot stop Rx queue id %d\n", rx_queue_id);
559                 return -EINVAL;
560         }
561
562         rxq = bp->rx_queues[rx_queue_id];
563         if (!rxq) {
564                 PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
565                 return -EINVAL;
566         }
567
568         vnic = rxq->vnic;
569         if (!vnic) {
570                 PMD_DRV_LOG(ERR, "VNIC not initialized for RxQ %d\n",
571                             rx_queue_id);
572                 return -EINVAL;
573         }
574
575         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
576         rxq->rx_started = false;
577         PMD_DRV_LOG(DEBUG, "Rx queue stopped\n");
578
579         if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
580                 if (BNXT_HAS_RING_GRPS(bp))
581                         vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
582
583                 PMD_DRV_LOG(DEBUG, "Rx Queue Count %d\n", vnic->rx_queue_cnt);
584                 rc = bnxt_vnic_rss_configure(bp, vnic);
585         }
586
587         /* Compute current number of active receive queues. */
588         for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++)
589                 if (bp->rx_queues[i]->rx_started)
590                         active_queue_cnt++;
591
592         if (BNXT_CHIP_P5(bp)) {
593                 /*
594                  * For Thor, we need to ensure that the VNIC default receive
595                  * ring corresponds to an active receive queue. When no queue
596                  * is active, we need to temporarily set the MRU to zero so
597                  * that packets are dropped early in the receive pipeline in
598                  * order to prevent the VNIC default receive ring from being
599                  * accessed.
600                  */
601                 if (active_queue_cnt == 0) {
602                         uint16_t saved_mru = vnic->mru;
603
604                         /* clear RSS setting on vnic. */
605                         bnxt_vnic_rss_clear_p5(bp, vnic);
606
607                         vnic->mru = 0;
608                         /* Reconfigure default receive ring and MRU. */
609                         bnxt_hwrm_vnic_cfg(bp, vnic);
610                         vnic->mru = saved_mru;
611                 } else {
612                         /* Reconfigure default receive ring. */
613                         bnxt_hwrm_vnic_cfg(bp, vnic);
614                 }
615         } else if (active_queue_cnt) {
616                 /*
617                  * If the queue being stopped is the current default queue and
618                  * there are other active queues, pick one of them as the
619                  * default and reconfigure the vnic.
620                  */
621                 if (vnic->dflt_ring_grp == bp->grp_info[rx_queue_id].fw_grp_id) {
622                         for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) {
623                                 if (bp->rx_queues[i]->rx_started) {
624                                         vnic->dflt_ring_grp =
625                                                 bp->grp_info[i].fw_grp_id;
626                                         bnxt_hwrm_vnic_cfg(bp, vnic);
627                                         break;
628                                 }
629                         }
630                 }
631         }
632
633         if (rc == 0)
634                 bnxt_rx_queue_release_mbufs(rxq);
635
636         return rc;
637 }