dma/dpaa2: support statistics
[dpdk.git] / drivers / net / bnxt / bnxt_rxq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5
6 #include <inttypes.h>
7
8 #include <rte_malloc.h>
9
10 #include "bnxt.h"
11 #include "bnxt_filter.h"
12 #include "bnxt_hwrm.h"
13 #include "bnxt_ring.h"
14 #include "bnxt_rxq.h"
15 #include "bnxt_rxr.h"
16 #include "bnxt_vnic.h"
17 #include "hsi_struct_def_dpdk.h"
18
19 /*
20  * RX Queues
21  */
22
23 uint64_t bnxt_get_rx_port_offloads(struct bnxt *bp)
24 {
25         uint64_t rx_offload_capa;
26
27         rx_offload_capa = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
28                           RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
29                           RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
30                           RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
31                           RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
32                           RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
33                           RTE_ETH_RX_OFFLOAD_TCP_LRO |
34                           RTE_ETH_RX_OFFLOAD_SCATTER |
35                           RTE_ETH_RX_OFFLOAD_RSS_HASH;
36
37         if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
38                 rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
39         if (bp->vnic_cap_flags & BNXT_VNIC_CAP_VLAN_RX_STRIP)
40                 rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
41
42         if (BNXT_TUNNELED_OFFLOADS_CAP_ALL_EN(bp))
43                 rx_offload_capa |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
44                                         RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM;
45
46         return rx_offload_capa;
47 }
48
49 /* Determine whether the current configuration needs aggregation ring in HW. */
50 int bnxt_need_agg_ring(struct rte_eth_dev *eth_dev)
51 {
52         /* scattered_rx will be true if OFFLOAD_SCATTER is enabled,
53          * if LRO is enabled, or if the max packet len is greater than the
54          * mbuf data size. So AGG ring will be needed whenever scattered_rx
55          * is set.
56          */
57         return eth_dev->data->scattered_rx ? 1 : 0;
58 }
59
60 void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq)
61 {
62         if (rxq && rxq->cp_ring && rxq->cp_ring->hw_stats)
63                 rxq->cp_ring->hw_stats = NULL;
64 }
65
66 int bnxt_mq_rx_configure(struct bnxt *bp)
67 {
68         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
69         struct rte_eth_rss_conf *rss = &bp->rss_conf;
70         const struct rte_eth_vmdq_rx_conf *conf =
71                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
72         unsigned int i, j, nb_q_per_grp = 1, ring_idx = 0;
73         int start_grp_id, end_grp_id = 1, rc = 0;
74         struct bnxt_vnic_info *vnic;
75         struct bnxt_filter_info *filter;
76         enum rte_eth_nb_pools pools = 1, max_pools = 0;
77         struct bnxt_rx_queue *rxq;
78
79         bp->nr_vnics = 0;
80
81         /* Multi-queue mode */
82         if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
83                 /* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */
84
85                 switch (dev_conf->rxmode.mq_mode) {
86                 case RTE_ETH_MQ_RX_VMDQ_RSS:
87                 case RTE_ETH_MQ_RX_VMDQ_ONLY:
88                 case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
89                         /* FALLTHROUGH */
90                         /* ETH_8/64_POOLs */
91                         pools = conf->nb_queue_pools;
92                         /* For each pool, allocate MACVLAN CFA rule & VNIC */
93                         max_pools = RTE_MIN(bp->max_vnics,
94                                             RTE_MIN(bp->max_l2_ctx,
95                                             RTE_MIN(bp->max_rsscos_ctx,
96                                                     RTE_ETH_64_POOLS)));
97                         PMD_DRV_LOG(DEBUG,
98                                     "pools = %u max_pools = %u\n",
99                                     pools, max_pools);
100                         if (pools > max_pools)
101                                 pools = max_pools;
102                         break;
103                 case RTE_ETH_MQ_RX_RSS:
104                         pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : 1;
105                         break;
106                 default:
107                         PMD_DRV_LOG(ERR, "Unsupported mq_mod %d\n",
108                                 dev_conf->rxmode.mq_mode);
109                         rc = -EINVAL;
110                         goto err_out;
111                 }
112         } else if (!dev_conf->rxmode.mq_mode) {
113                 pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : pools;
114         }
115
116         pools = RTE_MIN(pools, bp->rx_cp_nr_rings);
117         nb_q_per_grp = bp->rx_cp_nr_rings / pools;
118         PMD_DRV_LOG(DEBUG, "pools = %u nb_q_per_grp = %u\n",
119                     pools, nb_q_per_grp);
120         start_grp_id = 0;
121         end_grp_id = nb_q_per_grp;
122
123         for (i = 0; i < pools; i++) {
124                 vnic = &bp->vnic_info[i];
125                 if (!vnic) {
126                         PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
127                         rc = -ENOMEM;
128                         goto err_out;
129                 }
130                 vnic->flags |= BNXT_VNIC_INFO_BCAST;
131                 bp->nr_vnics++;
132
133                 for (j = 0; j < nb_q_per_grp; j++, ring_idx++) {
134                         rxq = bp->eth_dev->data->rx_queues[ring_idx];
135                         rxq->vnic = vnic;
136                         PMD_DRV_LOG(DEBUG,
137                                     "rxq[%d] = %p vnic[%d] = %p\n",
138                                     ring_idx, rxq, i, vnic);
139                 }
140                 if (i == 0) {
141                         if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB) {
142                                 bp->eth_dev->data->promiscuous = 1;
143                                 vnic->flags |= BNXT_VNIC_INFO_PROMISC;
144                         }
145                         vnic->func_default = true;
146                 }
147                 vnic->start_grp_id = start_grp_id;
148                 vnic->end_grp_id = end_grp_id;
149
150                 if (i) {
151                         if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB ||
152                             !(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS))
153                                 vnic->rss_dflt_cr = true;
154                         goto skip_filter_allocation;
155                 }
156                 filter = bnxt_alloc_filter(bp);
157                 if (!filter) {
158                         PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
159                         rc = -ENOMEM;
160                         goto err_out;
161                 }
162                 filter->mac_index = 0;
163                 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
164                 /*
165                  * TODO: Configure & associate CFA rule for
166                  * each VNIC for each VMDq with MACVLAN, MACVLAN+TC
167                  */
168                 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
169
170 skip_filter_allocation:
171                 start_grp_id = end_grp_id;
172                 end_grp_id += nb_q_per_grp;
173         }
174
175         bp->rx_num_qs_per_vnic = nb_q_per_grp;
176
177         for (i = 0; i < bp->nr_vnics; i++) {
178                 uint32_t lvl = RTE_ETH_RSS_LEVEL(rss->rss_hf);
179
180                 vnic = &bp->vnic_info[i];
181                 vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss->rss_hf);
182                 vnic->hash_mode = bnxt_rte_to_hwrm_hash_level(bp, rss->rss_hf, lvl);
183
184                 /*
185                  * Use the supplied key if the key length is
186                  * acceptable and the rss_key is not NULL
187                  */
188                 if (rss->rss_key && rss->rss_key_len <= HW_HASH_KEY_SIZE)
189                         memcpy(vnic->rss_hash_key, rss->rss_key, rss->rss_key_len);
190         }
191
192         return rc;
193
194 err_out:
195         /* Free allocated vnic/filters */
196
197         return rc;
198 }
199
200 void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
201 {
202         struct rte_mbuf **sw_ring;
203         struct bnxt_tpa_info *tpa_info;
204         uint16_t i;
205
206         if (!rxq || !rxq->rx_ring)
207                 return;
208
209         sw_ring = rxq->rx_ring->rx_buf_ring;
210         if (sw_ring) {
211 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
212                 /*
213                  * The vector receive burst function does not set used
214                  * mbuf pointers to NULL, do that here to simplify
215                  * cleanup logic.
216                  */
217                 for (i = 0; i < rxq->rxrearm_nb; i++)
218                         sw_ring[rxq->rxrearm_start + i] = NULL;
219                 rxq->rxrearm_nb = 0;
220 #endif
221                 for (i = 0;
222                      i < rxq->rx_ring->rx_ring_struct->ring_size; i++) {
223                         if (sw_ring[i]) {
224                                 if (sw_ring[i] != &rxq->fake_mbuf)
225                                         rte_pktmbuf_free_seg(sw_ring[i]);
226                                 sw_ring[i] = NULL;
227                         }
228                 }
229         }
230         /* Free up mbufs in Agg ring */
231         if (rxq->bp == NULL ||
232             rxq->bp->eth_dev == NULL ||
233             !bnxt_need_agg_ring(rxq->bp->eth_dev))
234                 return;
235
236         sw_ring = rxq->rx_ring->ag_buf_ring;
237         if (sw_ring) {
238                 for (i = 0;
239                      i < rxq->rx_ring->ag_ring_struct->ring_size; i++) {
240                         if (sw_ring[i]) {
241                                 rte_pktmbuf_free_seg(sw_ring[i]);
242                                 sw_ring[i] = NULL;
243                         }
244                 }
245         }
246
247         /* Free up mbufs in TPA */
248         tpa_info = rxq->rx_ring->tpa_info;
249         if (tpa_info) {
250                 int max_aggs = BNXT_TPA_MAX_AGGS(rxq->bp);
251
252                 for (i = 0; i < max_aggs; i++) {
253                         if (tpa_info[i].mbuf) {
254                                 rte_pktmbuf_free_seg(tpa_info[i].mbuf);
255                                 tpa_info[i].mbuf = NULL;
256                         }
257                 }
258         }
259
260 }
261
262 void bnxt_free_rx_mbufs(struct bnxt *bp)
263 {
264         struct bnxt_rx_queue *rxq;
265         int i;
266
267         for (i = 0; i < (int)bp->rx_nr_rings; i++) {
268                 rxq = bp->rx_queues[i];
269                 bnxt_rx_queue_release_mbufs(rxq);
270         }
271 }
272
273 void bnxt_free_rxq_mem(struct bnxt_rx_queue *rxq)
274 {
275         bnxt_rx_queue_release_mbufs(rxq);
276
277         /* Free RX, AGG ring hardware descriptors */
278         if (rxq->rx_ring) {
279                 bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
280                 rte_free(rxq->rx_ring->rx_ring_struct);
281                 rxq->rx_ring->rx_ring_struct = NULL;
282                 /* Free RX Agg ring hardware descriptors */
283                 bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
284                 rte_free(rxq->rx_ring->ag_ring_struct);
285                 rxq->rx_ring->ag_ring_struct = NULL;
286
287                 rte_free(rxq->rx_ring);
288                 rxq->rx_ring = NULL;
289         }
290         /* Free RX completion ring hardware descriptors */
291         if (rxq->cp_ring) {
292                 bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
293                 rte_free(rxq->cp_ring->cp_ring_struct);
294                 rxq->cp_ring->cp_ring_struct = NULL;
295                 rte_free(rxq->cp_ring);
296                 rxq->cp_ring = NULL;
297         }
298
299         bnxt_free_rxq_stats(rxq);
300         rte_memzone_free(rxq->mz);
301         rxq->mz = NULL;
302 }
303
304 void bnxt_rx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx)
305 {
306         struct bnxt_rx_queue *rxq = dev->data->rx_queues[queue_idx];
307
308         if (rxq != NULL) {
309                 if (is_bnxt_in_error(rxq->bp))
310                         return;
311
312                 bnxt_free_hwrm_rx_ring(rxq->bp, rxq->queue_id);
313                 bnxt_free_rxq_mem(rxq);
314                 rte_free(rxq);
315         }
316 }
317
318 int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
319                                uint16_t queue_idx,
320                                uint16_t nb_desc,
321                                unsigned int socket_id,
322                                const struct rte_eth_rxconf *rx_conf,
323                                struct rte_mempool *mp)
324 {
325         struct bnxt *bp = eth_dev->data->dev_private;
326         uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
327         struct bnxt_rx_queue *rxq;
328         int rc = 0;
329
330         rc = is_bnxt_in_error(bp);
331         if (rc)
332                 return rc;
333
334         if (queue_idx >= bnxt_max_rings(bp)) {
335                 PMD_DRV_LOG(ERR,
336                         "Cannot create Rx ring %d. Only %d rings available\n",
337                         queue_idx, bp->max_rx_rings);
338                 return -EINVAL;
339         }
340
341         if (nb_desc < BNXT_MIN_RING_DESC || nb_desc > MAX_RX_DESC_CNT) {
342                 PMD_DRV_LOG(ERR, "nb_desc %d is invalid\n", nb_desc);
343                 return -EINVAL;
344         }
345
346         if (eth_dev->data->rx_queues) {
347                 rxq = eth_dev->data->rx_queues[queue_idx];
348                 if (rxq)
349                         bnxt_rx_queue_release_op(eth_dev, queue_idx);
350         }
351         rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue),
352                                  RTE_CACHE_LINE_SIZE, socket_id);
353         if (!rxq) {
354                 PMD_DRV_LOG(ERR, "bnxt_rx_queue allocation failed!\n");
355                 return -ENOMEM;
356         }
357         rxq->bp = bp;
358         rxq->mb_pool = mp;
359         rxq->nb_rx_desc = nb_desc;
360         rxq->rx_free_thresh =
361                 RTE_MIN(rte_align32pow2(nb_desc) / 4, RTE_BNXT_MAX_RX_BURST);
362
363         if (rx_conf->rx_drop_en != BNXT_DEFAULT_RX_DROP_EN)
364                 PMD_DRV_LOG(NOTICE,
365                             "Per-queue config of drop-en is not supported.\n");
366         rxq->drop_en = BNXT_DEFAULT_RX_DROP_EN;
367
368         PMD_DRV_LOG(DEBUG, "RX Buf MTU %d\n", eth_dev->data->mtu);
369
370         eth_dev->data->rx_queues[queue_idx] = rxq;
371
372         rc = bnxt_init_rx_ring_struct(rxq, socket_id);
373         if (rc) {
374                 PMD_DRV_LOG(ERR,
375                             "init_rx_ring_struct failed!\n");
376                 goto err;
377         }
378
379         PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_size);
380         rxq->queue_id = queue_idx;
381         rxq->port_id = eth_dev->data->port_id;
382         if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
383                 rxq->crc_len = RTE_ETHER_CRC_LEN;
384         else
385                 rxq->crc_len = 0;
386
387         /* Allocate RX ring hardware descriptors */
388         rc = bnxt_alloc_rings(bp, socket_id, queue_idx, NULL, rxq, rxq->cp_ring,
389                               NULL, "rxr");
390         if (rc) {
391                 PMD_DRV_LOG(ERR,
392                             "ring_dma_zone_reserve for rx_ring failed!\n");
393                 goto err;
394         }
395         rte_atomic64_init(&rxq->rx_mbuf_alloc_fail);
396
397         /* rxq 0 must not be stopped when used as async CPR */
398         if (!BNXT_NUM_ASYNC_CPR(bp) && queue_idx == 0)
399                 rxq->rx_deferred_start = false;
400         else
401                 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
402
403         rxq->rx_started = rxq->rx_deferred_start ? false : true;
404         rxq->vnic = BNXT_GET_DEFAULT_VNIC(bp);
405
406         /* Configure mtu if it is different from what was configured before */
407         if (!queue_idx)
408                 bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
409
410         return 0;
411 err:
412         bnxt_rx_queue_release_op(eth_dev, queue_idx);
413         return rc;
414 }
415
416 int
417 bnxt_rx_queue_intr_enable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
418 {
419         struct bnxt *bp = eth_dev->data->dev_private;
420         struct bnxt_rx_queue *rxq;
421         struct bnxt_cp_ring_info *cpr;
422         int rc = 0;
423
424         rc = is_bnxt_in_error(bp);
425         if (rc)
426                 return rc;
427
428         if (eth_dev->data->rx_queues) {
429                 rxq = eth_dev->data->rx_queues[queue_id];
430                 if (!rxq)
431                         return -EINVAL;
432
433                 cpr = rxq->cp_ring;
434                 B_CP_DB_REARM(cpr, cpr->cp_raw_cons);
435         }
436         return rc;
437 }
438
439 int
440 bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
441 {
442         struct bnxt *bp = eth_dev->data->dev_private;
443         struct bnxt_rx_queue *rxq;
444         struct bnxt_cp_ring_info *cpr;
445         int rc = 0;
446
447         rc = is_bnxt_in_error(bp);
448         if (rc)
449                 return rc;
450
451         if (eth_dev->data->rx_queues) {
452                 rxq = eth_dev->data->rx_queues[queue_id];
453                 if (!rxq)
454                         return -EINVAL;
455
456                 cpr = rxq->cp_ring;
457                 B_CP_DB_DISARM(cpr);
458         }
459         return rc;
460 }
461
462 int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
463 {
464         struct bnxt *bp = dev->data->dev_private;
465         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
466         struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
467         struct bnxt_vnic_info *vnic = NULL;
468         int rc = 0;
469
470         rc = is_bnxt_in_error(bp);
471         if (rc)
472                 return rc;
473
474         if (rxq == NULL) {
475                 PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
476                 return -EINVAL;
477         }
478
479         /* Set the queue state to started here.
480          * We check the status of the queue while posting buffer.
481          * If queue is it started, we do not post buffers for Rx.
482          */
483         rxq->rx_started = true;
484         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
485
486         bnxt_free_hwrm_rx_ring(bp, rx_queue_id);
487         rc = bnxt_alloc_hwrm_rx_ring(bp, rx_queue_id);
488         if (rc)
489                 return rc;
490
491         if (BNXT_HAS_RING_GRPS(bp))
492                 rxq->vnic->dflt_ring_grp = bp->grp_info[rx_queue_id].fw_grp_id;
493         /* Reconfigure default receive ring and MRU. */
494         bnxt_hwrm_vnic_cfg(bp, rxq->vnic);
495
496         PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
497
498         if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
499                 vnic = rxq->vnic;
500
501                 if (BNXT_HAS_RING_GRPS(bp)) {
502                         if (vnic->fw_grp_ids[rx_queue_id] != INVALID_HW_RING_ID)
503                                 return 0;
504
505                         vnic->fw_grp_ids[rx_queue_id] =
506                                         bp->grp_info[rx_queue_id].fw_grp_id;
507                         PMD_DRV_LOG(DEBUG,
508                                     "vnic = %p fw_grp_id = %d\n",
509                                     vnic, bp->grp_info[rx_queue_id].fw_grp_id);
510                 }
511
512                 PMD_DRV_LOG(DEBUG, "Rx Queue Count %d\n", vnic->rx_queue_cnt);
513                 rc = bnxt_vnic_rss_configure(bp, vnic);
514         }
515
516         if (rc != 0) {
517                 dev->data->rx_queue_state[rx_queue_id] =
518                                 RTE_ETH_QUEUE_STATE_STOPPED;
519                 rxq->rx_started = false;
520         }
521
522         PMD_DRV_LOG(INFO,
523                     "queue %d, rx_deferred_start %d, state %d!\n",
524                     rx_queue_id, rxq->rx_deferred_start,
525                     bp->eth_dev->data->rx_queue_state[rx_queue_id]);
526
527         return rc;
528 }
529
530 int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
531 {
532         struct bnxt *bp = dev->data->dev_private;
533         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
534         struct bnxt_vnic_info *vnic = NULL;
535         struct bnxt_rx_queue *rxq = NULL;
536         int active_queue_cnt = 0;
537         int i, rc = 0;
538
539         rc = is_bnxt_in_error(bp);
540         if (rc)
541                 return rc;
542
543         /* For the stingray platform and other platforms needing tighter
544          * control of resource utilization, Rx CQ 0 also works as
545          * Default CQ for async notifications
546          */
547         if (!BNXT_NUM_ASYNC_CPR(bp) && !rx_queue_id) {
548                 PMD_DRV_LOG(ERR, "Cannot stop Rx queue id %d\n", rx_queue_id);
549                 return -EINVAL;
550         }
551
552         rxq = bp->rx_queues[rx_queue_id];
553         if (!rxq) {
554                 PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
555                 return -EINVAL;
556         }
557
558         vnic = rxq->vnic;
559         if (!vnic) {
560                 PMD_DRV_LOG(ERR, "VNIC not initialized for RxQ %d\n",
561                             rx_queue_id);
562                 return -EINVAL;
563         }
564
565         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
566         rxq->rx_started = false;
567         PMD_DRV_LOG(DEBUG, "Rx queue stopped\n");
568
569         if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
570                 if (BNXT_HAS_RING_GRPS(bp))
571                         vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
572
573                 PMD_DRV_LOG(DEBUG, "Rx Queue Count %d\n", vnic->rx_queue_cnt);
574                 rc = bnxt_vnic_rss_configure(bp, vnic);
575         }
576
577         /* Compute current number of active receive queues. */
578         for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++)
579                 if (bp->rx_queues[i]->rx_started)
580                         active_queue_cnt++;
581
582         if (BNXT_CHIP_P5(bp)) {
583                 /*
584                  * For Thor, we need to ensure that the VNIC default receive
585                  * ring corresponds to an active receive queue. When no queue
586                  * is active, we need to temporarily set the MRU to zero so
587                  * that packets are dropped early in the receive pipeline in
588                  * order to prevent the VNIC default receive ring from being
589                  * accessed.
590                  */
591                 if (active_queue_cnt == 0) {
592                         uint16_t saved_mru = vnic->mru;
593
594                         /* clear RSS setting on vnic. */
595                         bnxt_vnic_rss_clear_p5(bp, vnic);
596
597                         vnic->mru = 0;
598                         /* Reconfigure default receive ring and MRU. */
599                         bnxt_hwrm_vnic_cfg(bp, vnic);
600                         vnic->mru = saved_mru;
601                 } else {
602                         /* Reconfigure default receive ring. */
603                         bnxt_hwrm_vnic_cfg(bp, vnic);
604                 }
605         } else if (active_queue_cnt) {
606                 /*
607                  * If the queue being stopped is the current default queue and
608                  * there are other active queues, pick one of them as the
609                  * default and reconfigure the vnic.
610                  */
611                 if (vnic->dflt_ring_grp == bp->grp_info[rx_queue_id].fw_grp_id) {
612                         for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) {
613                                 if (bp->rx_queues[i]->rx_started) {
614                                         vnic->dflt_ring_grp =
615                                                 bp->grp_info[i].fw_grp_id;
616                                         bnxt_hwrm_vnic_cfg(bp, vnic);
617                                         break;
618                                 }
619                         }
620                 }
621         }
622
623         if (rc == 0)
624                 bnxt_rx_queue_release_mbufs(rxq);
625
626         return rc;
627 }