1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
8 #include <rte_malloc.h>
11 #include "bnxt_filter.h"
12 #include "bnxt_hwrm.h"
13 #include "bnxt_ring.h"
16 #include "bnxt_vnic.h"
17 #include "hsi_struct_def_dpdk.h"
23 void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq)
25 if (rxq && rxq->cp_ring && rxq->cp_ring->hw_stats)
26 rxq->cp_ring->hw_stats = NULL;
29 int bnxt_mq_rx_configure(struct bnxt *bp)
31 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
32 const struct rte_eth_vmdq_rx_conf *conf =
33 &dev_conf->rx_adv_conf.vmdq_rx_conf;
34 unsigned int i, j, nb_q_per_grp = 1, ring_idx = 0;
35 int start_grp_id, end_grp_id = 1, rc = 0;
36 struct bnxt_vnic_info *vnic;
37 struct bnxt_filter_info *filter;
38 enum rte_eth_nb_pools pools = 1, max_pools = 0;
39 struct bnxt_rx_queue *rxq;
43 /* Single queue mode */
44 if (bp->rx_cp_nr_rings < 2) {
45 vnic = &bp->vnic_info[0];
47 PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
51 vnic->flags |= BNXT_VNIC_INFO_BCAST;
54 rxq = bp->eth_dev->data->rx_queues[0];
57 vnic->func_default = true;
58 vnic->start_grp_id = 0;
59 vnic->end_grp_id = vnic->start_grp_id;
60 filter = bnxt_alloc_filter(bp);
62 PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
66 filter->mac_index = 0;
67 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
68 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
72 /* Multi-queue mode */
73 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB_RSS) {
74 /* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */
76 switch (dev_conf->rxmode.mq_mode) {
77 case ETH_MQ_RX_VMDQ_RSS:
78 case ETH_MQ_RX_VMDQ_ONLY:
79 case ETH_MQ_RX_VMDQ_DCB_RSS:
82 pools = conf->nb_queue_pools;
83 /* For each pool, allocate MACVLAN CFA rule & VNIC */
84 max_pools = RTE_MIN(bp->max_vnics,
85 RTE_MIN(bp->max_l2_ctx,
86 RTE_MIN(bp->max_rsscos_ctx,
89 "pools = %u max_pools = %u\n",
91 if (pools > max_pools)
95 pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : 1;
98 PMD_DRV_LOG(ERR, "Unsupported mq_mod %d\n",
99 dev_conf->rxmode.mq_mode);
103 } else if (!dev_conf->rxmode.mq_mode) {
104 pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : pools;
107 pools = RTE_MIN(pools, bp->rx_cp_nr_rings);
108 nb_q_per_grp = bp->rx_cp_nr_rings / pools;
109 PMD_DRV_LOG(DEBUG, "pools = %u nb_q_per_grp = %u\n",
110 pools, nb_q_per_grp);
112 end_grp_id = nb_q_per_grp;
114 for (i = 0; i < pools; i++) {
115 vnic = &bp->vnic_info[i];
117 PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
121 vnic->flags |= BNXT_VNIC_INFO_BCAST;
124 for (j = 0; j < nb_q_per_grp; j++, ring_idx++) {
125 rxq = bp->eth_dev->data->rx_queues[ring_idx];
128 "rxq[%d] = %p vnic[%d] = %p\n",
129 ring_idx, rxq, i, vnic);
132 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB) {
133 bp->eth_dev->data->promiscuous = 1;
134 vnic->flags |= BNXT_VNIC_INFO_PROMISC;
136 vnic->func_default = true;
138 vnic->start_grp_id = start_grp_id;
139 vnic->end_grp_id = end_grp_id;
142 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB ||
143 !(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS))
144 vnic->rss_dflt_cr = true;
145 goto skip_filter_allocation;
147 filter = bnxt_alloc_filter(bp);
149 PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
153 filter->mac_index = 0;
154 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
156 * TODO: Configure & associate CFA rule for
157 * each VNIC for each VMDq with MACVLAN, MACVLAN+TC
159 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
161 skip_filter_allocation:
162 start_grp_id = end_grp_id;
163 end_grp_id += nb_q_per_grp;
167 bp->rx_num_qs_per_vnic = nb_q_per_grp;
169 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
170 struct rte_eth_rss_conf *rss = &dev_conf->rx_adv_conf.rss_conf;
172 if (bp->flags & BNXT_FLAG_UPDATE_HASH)
173 bp->flags &= ~BNXT_FLAG_UPDATE_HASH;
175 for (i = 0; i < bp->nr_vnics; i++) {
176 uint32_t lvl = ETH_RSS_LEVEL(rss->rss_hf);
178 vnic = &bp->vnic_info[i];
180 bnxt_rte_to_hwrm_hash_types(rss->rss_hf);
182 bnxt_rte_to_hwrm_hash_level(bp,
187 * Use the supplied key if the key length is
188 * acceptable and the rss_key is not NULL
191 rss->rss_key_len <= HW_HASH_KEY_SIZE)
192 memcpy(vnic->rss_hash_key,
193 rss->rss_key, rss->rss_key_len);
200 /* Free allocated vnic/filters */
205 void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
207 struct rte_mbuf **sw_ring;
208 struct bnxt_tpa_info *tpa_info;
211 if (!rxq || !rxq->rx_ring)
214 sw_ring = rxq->rx_ring->rx_buf_ring;
217 i < rxq->rx_ring->rx_ring_struct->ring_size; i++) {
219 if (sw_ring[i] != &rxq->fake_mbuf)
220 rte_pktmbuf_free_seg(sw_ring[i]);
225 /* Free up mbufs in Agg ring */
226 sw_ring = rxq->rx_ring->ag_buf_ring;
229 i < rxq->rx_ring->ag_ring_struct->ring_size; i++) {
231 rte_pktmbuf_free_seg(sw_ring[i]);
237 /* Free up mbufs in TPA */
238 tpa_info = rxq->rx_ring->tpa_info;
240 int max_aggs = BNXT_TPA_MAX_AGGS(rxq->bp);
242 for (i = 0; i < max_aggs; i++) {
243 if (tpa_info[i].mbuf) {
244 rte_pktmbuf_free_seg(tpa_info[i].mbuf);
245 tpa_info[i].mbuf = NULL;
252 void bnxt_free_rx_mbufs(struct bnxt *bp)
254 struct bnxt_rx_queue *rxq;
257 for (i = 0; i < (int)bp->rx_nr_rings; i++) {
258 rxq = bp->rx_queues[i];
259 bnxt_rx_queue_release_mbufs(rxq);
263 void bnxt_rx_queue_release_op(void *rx_queue)
265 struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
268 if (is_bnxt_in_error(rxq->bp))
271 bnxt_rx_queue_release_mbufs(rxq);
273 /* Free RX ring hardware descriptors */
275 bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
276 rte_free(rxq->rx_ring->rx_ring_struct);
277 /* Free RX Agg ring hardware descriptors */
278 bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
279 rte_free(rxq->rx_ring->ag_ring_struct);
281 rte_free(rxq->rx_ring);
283 /* Free RX completion ring hardware descriptors */
285 bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
286 rte_free(rxq->cp_ring->cp_ring_struct);
287 rte_free(rxq->cp_ring);
290 bnxt_free_rxq_stats(rxq);
291 rte_memzone_free(rxq->mz);
298 int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
301 unsigned int socket_id,
302 const struct rte_eth_rxconf *rx_conf,
303 struct rte_mempool *mp)
305 struct bnxt *bp = eth_dev->data->dev_private;
306 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
307 struct bnxt_rx_queue *rxq;
311 rc = is_bnxt_in_error(bp);
315 if (queue_idx >= bnxt_max_rings(bp)) {
317 "Cannot create Rx ring %d. Only %d rings available\n",
318 queue_idx, bp->max_rx_rings);
322 if (nb_desc < BNXT_MIN_RING_DESC || nb_desc > MAX_RX_DESC_CNT) {
323 PMD_DRV_LOG(ERR, "nb_desc %d is invalid\n", nb_desc);
327 if (eth_dev->data->rx_queues) {
328 rxq = eth_dev->data->rx_queues[queue_idx];
330 bnxt_rx_queue_release_op(rxq);
332 rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue),
333 RTE_CACHE_LINE_SIZE, socket_id);
335 PMD_DRV_LOG(ERR, "bnxt_rx_queue allocation failed!\n");
340 rxq->nb_rx_desc = nb_desc;
341 rxq->rx_free_thresh =
342 RTE_MIN(rte_align32pow2(nb_desc) / 4, RTE_BNXT_MAX_RX_BURST);
344 if (rx_conf->rx_drop_en != BNXT_DEFAULT_RX_DROP_EN)
346 "Per-queue config of drop-en is not supported.\n");
347 rxq->drop_en = BNXT_DEFAULT_RX_DROP_EN;
349 PMD_DRV_LOG(DEBUG, "RX Buf MTU %d\n", eth_dev->data->mtu);
351 rc = bnxt_init_rx_ring_struct(rxq, socket_id);
354 "init_rx_ring_struct failed!\n");
358 PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_size);
359 rxq->queue_id = queue_idx;
360 rxq->port_id = eth_dev->data->port_id;
361 if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC)
362 rxq->crc_len = RTE_ETHER_CRC_LEN;
366 eth_dev->data->rx_queues[queue_idx] = rxq;
367 /* Allocate RX ring hardware descriptors */
368 rc = bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring, NULL,
372 "ring_dma_zone_reserve for rx_ring failed!\n");
375 rte_atomic64_init(&rxq->rx_mbuf_alloc_fail);
377 /* rxq 0 must not be stopped when used as async CPR */
378 if (!BNXT_NUM_ASYNC_CPR(bp) && queue_idx == 0)
379 rxq->rx_deferred_start = false;
381 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
383 if (rxq->rx_deferred_start) {
384 queue_state = RTE_ETH_QUEUE_STATE_STOPPED;
385 rxq->rx_started = false;
387 queue_state = RTE_ETH_QUEUE_STATE_STARTED;
388 rxq->rx_started = true;
390 eth_dev->data->rx_queue_state[queue_idx] = queue_state;
392 /* Configure mtu if it is different from what was configured before */
394 bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
398 bnxt_rx_queue_release_op(rxq);
403 bnxt_rx_queue_intr_enable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
405 struct bnxt *bp = eth_dev->data->dev_private;
406 struct bnxt_rx_queue *rxq;
407 struct bnxt_cp_ring_info *cpr;
410 rc = is_bnxt_in_error(bp);
414 if (eth_dev->data->rx_queues) {
415 rxq = eth_dev->data->rx_queues[queue_id];
420 B_CP_DB_REARM(cpr, cpr->cp_raw_cons);
426 bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
428 struct bnxt *bp = eth_dev->data->dev_private;
429 struct bnxt_rx_queue *rxq;
430 struct bnxt_cp_ring_info *cpr;
433 rc = is_bnxt_in_error(bp);
437 if (eth_dev->data->rx_queues) {
438 rxq = eth_dev->data->rx_queues[queue_id];
448 int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
450 struct bnxt *bp = dev->data->dev_private;
451 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
452 struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
453 struct bnxt_vnic_info *vnic = NULL;
456 rc = is_bnxt_in_error(bp);
461 PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
465 /* Set the queue state to started here.
466 * We check the status of the queue while posting buffer.
467 * If queue is it started, we do not post buffers for Rx.
469 rxq->rx_started = true;
470 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
472 bnxt_free_hwrm_rx_ring(bp, rx_queue_id);
473 rc = bnxt_alloc_hwrm_rx_ring(bp, rx_queue_id);
477 if (BNXT_CHIP_P5(bp)) {
478 /* Reconfigure default receive ring and MRU. */
479 bnxt_hwrm_vnic_cfg(bp, rxq->vnic);
481 PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
483 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
486 if (BNXT_HAS_RING_GRPS(bp)) {
487 if (vnic->fw_grp_ids[rx_queue_id] != INVALID_HW_RING_ID)
490 vnic->fw_grp_ids[rx_queue_id] =
491 bp->grp_info[rx_queue_id].fw_grp_id;
493 "vnic = %p fw_grp_id = %d\n",
494 vnic, bp->grp_info[rx_queue_id].fw_grp_id);
497 PMD_DRV_LOG(DEBUG, "Rx Queue Count %d\n", vnic->rx_queue_cnt);
498 rc = bnxt_vnic_rss_configure(bp, vnic);
502 dev->data->rx_queue_state[rx_queue_id] =
503 RTE_ETH_QUEUE_STATE_STOPPED;
504 rxq->rx_started = false;
508 "queue %d, rx_deferred_start %d, state %d!\n",
509 rx_queue_id, rxq->rx_deferred_start,
510 bp->eth_dev->data->rx_queue_state[rx_queue_id]);
515 int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
517 struct bnxt *bp = dev->data->dev_private;
518 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
519 struct bnxt_vnic_info *vnic = NULL;
520 struct bnxt_rx_queue *rxq = NULL;
521 int active_queue_cnt = 0;
524 rc = is_bnxt_in_error(bp);
528 /* For the stingray platform and other platforms needing tighter
529 * control of resource utilization, Rx CQ 0 also works as
530 * Default CQ for async notifications
532 if (!BNXT_NUM_ASYNC_CPR(bp) && !rx_queue_id) {
533 PMD_DRV_LOG(ERR, "Cannot stop Rx queue id %d\n", rx_queue_id);
537 rxq = bp->rx_queues[rx_queue_id];
539 PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
545 PMD_DRV_LOG(ERR, "VNIC not initialized for RxQ %d\n",
550 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
551 rxq->rx_started = false;
552 PMD_DRV_LOG(DEBUG, "Rx queue stopped\n");
554 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
555 if (BNXT_HAS_RING_GRPS(bp))
556 vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
558 PMD_DRV_LOG(DEBUG, "Rx Queue Count %d\n", vnic->rx_queue_cnt);
559 rc = bnxt_vnic_rss_configure(bp, vnic);
562 /* Compute current number of active receive queues. */
563 for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++)
564 if (bp->rx_queues[i]->rx_started)
567 if (BNXT_CHIP_P5(bp)) {
569 * For Thor, we need to ensure that the VNIC default receive
570 * ring corresponds to an active receive queue. When no queue
571 * is active, we need to temporarily set the MRU to zero so
572 * that packets are dropped early in the receive pipeline in
573 * order to prevent the VNIC default receive ring from being
576 if (active_queue_cnt == 0) {
577 uint16_t saved_mru = vnic->mru;
580 /* Reconfigure default receive ring and MRU. */
581 bnxt_hwrm_vnic_cfg(bp, vnic);
582 vnic->mru = saved_mru;
584 /* Reconfigure default receive ring. */
585 bnxt_hwrm_vnic_cfg(bp, vnic);
587 } else if (active_queue_cnt) {
589 * If the queue being stopped is the current default queue and
590 * there are other active queues, pick one of them as the
591 * default and reconfigure the vnic.
593 if (vnic->dflt_ring_grp == bp->grp_info[rx_queue_id].fw_grp_id) {
594 for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) {
595 if (bp->rx_queues[i]->rx_started) {
596 vnic->dflt_ring_grp =
597 bp->grp_info[i].fw_grp_id;
598 bnxt_hwrm_vnic_cfg(bp, vnic);
606 bnxt_rx_queue_release_mbufs(rxq);