ethdev: add namespace
[dpdk.git] / drivers / net / bnxt / bnxt_rxq.c
index e82eda9..1456f8b 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2018 Broadcom
+ * Copyright(c) 2014-2021 Broadcom
  * All rights reserved.
  */
 
  * RX Queues
  */
 
+/* Determine whether the current configuration needs aggregation ring in HW. */
+int bnxt_need_agg_ring(struct rte_eth_dev *eth_dev)
+{
+       /* scattered_rx will be true if OFFLOAD_SCATTER is enabled,
+        * if LRO is enabled, or if the max packet len is greater than the
+        * mbuf data size. So AGG ring will be needed whenever scattered_rx
+        * is set.
+        */
+       return eth_dev->data->scattered_rx ? 1 : 0;
+}
+
 void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq)
 {
        if (rxq && rxq->cp_ring && rxq->cp_ring->hw_stats)
@@ -35,48 +46,19 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
        int start_grp_id, end_grp_id = 1, rc = 0;
        struct bnxt_vnic_info *vnic;
        struct bnxt_filter_info *filter;
-       enum rte_eth_nb_pools pools = bp->rx_cp_nr_rings, max_pools = 0;
+       enum rte_eth_nb_pools pools = 1, max_pools = 0;
        struct bnxt_rx_queue *rxq;
 
        bp->nr_vnics = 0;
 
-       /* Single queue mode */
-       if (bp->rx_cp_nr_rings < 2) {
-               vnic = &bp->vnic_info[0];
-               if (!vnic) {
-                       PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
-                       rc = -ENOMEM;
-                       goto err_out;
-               }
-               vnic->flags |= BNXT_VNIC_INFO_BCAST;
-               bp->nr_vnics++;
-
-               rxq = bp->eth_dev->data->rx_queues[0];
-               rxq->vnic = vnic;
-
-               vnic->func_default = true;
-               vnic->start_grp_id = 0;
-               vnic->end_grp_id = vnic->start_grp_id;
-               filter = bnxt_alloc_filter(bp);
-               if (!filter) {
-                       PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
-                       rc = -ENOMEM;
-                       goto err_out;
-               }
-               filter->mac_index = 0;
-               filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
-               STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
-               goto out;
-       }
-
        /* Multi-queue mode */
-       if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB_RSS) {
+       if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
                /* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */
 
                switch (dev_conf->rxmode.mq_mode) {
-               case ETH_MQ_RX_VMDQ_RSS:
-               case ETH_MQ_RX_VMDQ_ONLY:
-               case ETH_MQ_RX_VMDQ_DCB_RSS:
+               case RTE_ETH_MQ_RX_VMDQ_RSS:
+               case RTE_ETH_MQ_RX_VMDQ_ONLY:
+               case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
                        /* FALLTHROUGH */
                        /* ETH_8/64_POOLs */
                        pools = conf->nb_queue_pools;
@@ -84,14 +66,14 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
                        max_pools = RTE_MIN(bp->max_vnics,
                                            RTE_MIN(bp->max_l2_ctx,
                                            RTE_MIN(bp->max_rsscos_ctx,
-                                                   ETH_64_POOLS)));
+                                                   RTE_ETH_64_POOLS)));
                        PMD_DRV_LOG(DEBUG,
                                    "pools = %u max_pools = %u\n",
                                    pools, max_pools);
                        if (pools > max_pools)
                                pools = max_pools;
                        break;
-               case ETH_MQ_RX_RSS:
+               case RTE_ETH_MQ_RX_RSS:
                        pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : 1;
                        break;
                default:
@@ -100,9 +82,12 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
                        rc = -EINVAL;
                        goto err_out;
                }
+       } else if (!dev_conf->rxmode.mq_mode) {
+               pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : pools;
        }
+
+       pools = RTE_MIN(pools, bp->rx_cp_nr_rings);
        nb_q_per_grp = bp->rx_cp_nr_rings / pools;
-       bp->rx_num_qs_per_vnic = nb_q_per_grp;
        PMD_DRV_LOG(DEBUG, "pools = %u nb_q_per_grp = %u\n",
                    pools, nb_q_per_grp);
        start_grp_id = 0;
@@ -126,7 +111,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
                                    ring_idx, rxq, i, vnic);
                }
                if (i == 0) {
-                       if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB) {
+                       if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB) {
                                bp->eth_dev->data->promiscuous = 1;
                                vnic->flags |= BNXT_VNIC_INFO_PROMISC;
                        }
@@ -136,8 +121,8 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
                vnic->end_grp_id = end_grp_id;
 
                if (i) {
-                       if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB ||
-                           !(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS))
+                       if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB ||
+                           !(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS))
                                vnic->rss_dflt_cr = true;
                        goto skip_filter_allocation;
                }
@@ -160,19 +145,24 @@ skip_filter_allocation:
                end_grp_id += nb_q_per_grp;
        }
 
-out:
-       if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+       bp->rx_num_qs_per_vnic = nb_q_per_grp;
+
+       if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
                struct rte_eth_rss_conf *rss = &dev_conf->rx_adv_conf.rss_conf;
 
-               if (bp->flags & BNXT_FLAG_UPDATE_HASH) {
-                       rss = &bp->rss_conf;
+               if (bp->flags & BNXT_FLAG_UPDATE_HASH)
                        bp->flags &= ~BNXT_FLAG_UPDATE_HASH;
-               }
 
                for (i = 0; i < bp->nr_vnics; i++) {
+                       uint32_t lvl = RTE_ETH_RSS_LEVEL(rss->rss_hf);
+
                        vnic = &bp->vnic_info[i];
                        vnic->hash_type =
                                bnxt_rte_to_hwrm_hash_types(rss->rss_hf);
+                       vnic->hash_mode =
+                               bnxt_rte_to_hwrm_hash_level(bp,
+                                                           rss->rss_hf,
+                                                           lvl);
 
                        /*
                         * Use the supplied key if the key length is
@@ -195,33 +185,45 @@ err_out:
 
 void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
 {
-       struct bnxt_sw_rx_bd *sw_ring;
+       struct rte_mbuf **sw_ring;
        struct bnxt_tpa_info *tpa_info;
        uint16_t i;
 
-       if (!rxq)
+       if (!rxq || !rxq->rx_ring)
                return;
 
-       rte_spinlock_lock(&rxq->lock);
-
        sw_ring = rxq->rx_ring->rx_buf_ring;
        if (sw_ring) {
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
+               /*
+                * The vector receive burst function does not set used
+                * mbuf pointers to NULL, do that here to simplify
+                * cleanup logic.
+                */
+               for (i = 0; i < rxq->rxrearm_nb; i++)
+                       sw_ring[rxq->rxrearm_start + i] = NULL;
+               rxq->rxrearm_nb = 0;
+#endif
                for (i = 0;
                     i < rxq->rx_ring->rx_ring_struct->ring_size; i++) {
-                       if (sw_ring[i].mbuf) {
-                               rte_pktmbuf_free_seg(sw_ring[i].mbuf);
-                               sw_ring[i].mbuf = NULL;
+                       if (sw_ring[i]) {
+                               if (sw_ring[i] != &rxq->fake_mbuf)
+                                       rte_pktmbuf_free_seg(sw_ring[i]);
+                               sw_ring[i] = NULL;
                        }
                }
        }
        /* Free up mbufs in Agg ring */
+       if (!bnxt_need_agg_ring(rxq->bp->eth_dev))
+               return;
+
        sw_ring = rxq->rx_ring->ag_buf_ring;
        if (sw_ring) {
                for (i = 0;
                     i < rxq->rx_ring->ag_ring_struct->ring_size; i++) {
-                       if (sw_ring[i].mbuf) {
-                               rte_pktmbuf_free_seg(sw_ring[i].mbuf);
-                               sw_ring[i].mbuf = NULL;
+                       if (sw_ring[i]) {
+                               rte_pktmbuf_free_seg(sw_ring[i]);
+                               sw_ring[i] = NULL;
                        }
                }
        }
@@ -239,7 +241,6 @@ void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
                }
        }
 
-       rte_spinlock_unlock(&rxq->lock);
 }
 
 void bnxt_free_rx_mbufs(struct bnxt *bp)
@@ -253,28 +254,47 @@ void bnxt_free_rx_mbufs(struct bnxt *bp)
        }
 }
 
-void bnxt_rx_queue_release_op(void *rx_queue)
+void bnxt_free_rxq_mem(struct bnxt_rx_queue *rxq)
 {
-       struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
+       bnxt_rx_queue_release_mbufs(rxq);
 
-       if (rxq) {
-               if (is_bnxt_in_error(rxq->bp))
-                       return;
-
-               bnxt_rx_queue_release_mbufs(rxq);
-
-               /* Free RX ring hardware descriptors */
+       /* Free RX, AGG ring hardware descriptors */
+       if (rxq->rx_ring) {
                bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
+               rte_free(rxq->rx_ring->rx_ring_struct);
+               rxq->rx_ring->rx_ring_struct = NULL;
                /* Free RX Agg ring hardware descriptors */
                bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
+               rte_free(rxq->rx_ring->ag_ring_struct);
+               rxq->rx_ring->ag_ring_struct = NULL;
 
-               /* Free RX completion ring hardware descriptors */
+               rte_free(rxq->rx_ring);
+               rxq->rx_ring = NULL;
+       }
+       /* Free RX completion ring hardware descriptors */
+       if (rxq->cp_ring) {
                bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
+               rte_free(rxq->cp_ring->cp_ring_struct);
+               rxq->cp_ring->cp_ring_struct = NULL;
+               rte_free(rxq->cp_ring);
+               rxq->cp_ring = NULL;
+       }
+
+       bnxt_free_rxq_stats(rxq);
+       rte_memzone_free(rxq->mz);
+       rxq->mz = NULL;
+}
 
-               bnxt_free_rxq_stats(rxq);
-               rte_memzone_free(rxq->mz);
-               rxq->mz = NULL;
+void bnxt_rx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx)
+{
+       struct bnxt_rx_queue *rxq = dev->data->rx_queues[queue_idx];
 
+       if (rxq != NULL) {
+               if (is_bnxt_in_error(rxq->bp))
+                       return;
+
+               bnxt_free_hwrm_rx_ring(rxq->bp, rxq->queue_id);
+               bnxt_free_rxq_mem(rxq);
                rte_free(rxq);
        }
 }
@@ -290,65 +310,71 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
        uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
        struct bnxt_rx_queue *rxq;
        int rc = 0;
-       uint8_t queue_state;
 
        rc = is_bnxt_in_error(bp);
        if (rc)
                return rc;
 
-       if (queue_idx >= bp->max_rx_rings) {
+       if (queue_idx >= bnxt_max_rings(bp)) {
                PMD_DRV_LOG(ERR,
                        "Cannot create Rx ring %d. Only %d rings available\n",
                        queue_idx, bp->max_rx_rings);
                return -EINVAL;
        }
 
-       if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
+       if (nb_desc < BNXT_MIN_RING_DESC || nb_desc > MAX_RX_DESC_CNT) {
                PMD_DRV_LOG(ERR, "nb_desc %d is invalid\n", nb_desc);
-               rc = -EINVAL;
-               goto out;
+               return -EINVAL;
        }
 
        if (eth_dev->data->rx_queues) {
                rxq = eth_dev->data->rx_queues[queue_idx];
                if (rxq)
-                       bnxt_rx_queue_release_op(rxq);
+                       bnxt_rx_queue_release_op(eth_dev, queue_idx);
        }
        rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue),
                                 RTE_CACHE_LINE_SIZE, socket_id);
        if (!rxq) {
                PMD_DRV_LOG(ERR, "bnxt_rx_queue allocation failed!\n");
-               rc = -ENOMEM;
-               goto out;
+               return -ENOMEM;
        }
        rxq->bp = bp;
        rxq->mb_pool = mp;
        rxq->nb_rx_desc = nb_desc;
-       rxq->rx_free_thresh = rx_conf->rx_free_thresh;
+       rxq->rx_free_thresh =
+               RTE_MIN(rte_align32pow2(nb_desc) / 4, RTE_BNXT_MAX_RX_BURST);
+
+       if (rx_conf->rx_drop_en != BNXT_DEFAULT_RX_DROP_EN)
+               PMD_DRV_LOG(NOTICE,
+                           "Per-queue config of drop-en is not supported.\n");
+       rxq->drop_en = BNXT_DEFAULT_RX_DROP_EN;
 
        PMD_DRV_LOG(DEBUG, "RX Buf MTU %d\n", eth_dev->data->mtu);
 
+       eth_dev->data->rx_queues[queue_idx] = rxq;
+
        rc = bnxt_init_rx_ring_struct(rxq, socket_id);
-       if (rc)
-               goto out;
+       if (rc) {
+               PMD_DRV_LOG(ERR,
+                           "init_rx_ring_struct failed!\n");
+               goto err;
+       }
 
        PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_size);
        rxq->queue_id = queue_idx;
        rxq->port_id = eth_dev->data->port_id;
-       if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+       if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
                rxq->crc_len = RTE_ETHER_CRC_LEN;
        else
                rxq->crc_len = 0;
 
-       eth_dev->data->rx_queues[queue_idx] = rxq;
        /* Allocate RX ring hardware descriptors */
-       if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring, NULL,
-                            "rxr")) {
+       rc = bnxt_alloc_rings(bp, socket_id, queue_idx, NULL, rxq, rxq->cp_ring,
+                             NULL, "rxr");
+       if (rc) {
                PMD_DRV_LOG(ERR,
-                       "ring_dma_zone_reserve for rx_ring failed!\n");
-               bnxt_rx_queue_release_op(rxq);
-               rc = -ENOMEM;
-               goto out;
+                           "ring_dma_zone_reserve for rx_ring failed!\n");
+               goto err;
        }
        rte_atomic64_init(&rxq->rx_mbuf_alloc_fail);
 
@@ -358,21 +384,16 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
        else
                rxq->rx_deferred_start = rx_conf->rx_deferred_start;
 
-       if (rxq->rx_deferred_start) {
-               queue_state = RTE_ETH_QUEUE_STATE_STOPPED;
-               rxq->rx_started = false;
-       } else {
-               queue_state = RTE_ETH_QUEUE_STATE_STARTED;
-               rxq->rx_started = true;
-       }
-       eth_dev->data->rx_queue_state[queue_idx] = queue_state;
-       rte_spinlock_init(&rxq->lock);
+       rxq->rx_started = rxq->rx_deferred_start ? false : true;
+       rxq->vnic = BNXT_GET_DEFAULT_VNIC(bp);
 
        /* Configure mtu if it is different from what was configured before */
        if (!queue_idx)
                bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
 
-out:
+       return 0;
+err:
+       bnxt_rx_queue_release_op(eth_dev, queue_idx);
        return rc;
 }
 
@@ -451,9 +472,13 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
        if (rc)
                return rc;
 
+       if (BNXT_CHIP_P5(bp)) {
+               /* Reconfigure default receive ring and MRU. */
+               bnxt_hwrm_vnic_cfg(bp, rxq->vnic);
+       }
        PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
 
-       if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+       if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
                vnic = rxq->vnic;
 
                if (BNXT_HAS_RING_GRPS(bp)) {
@@ -491,7 +516,8 @@ int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
        struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
        struct bnxt_vnic_info *vnic = NULL;
        struct bnxt_rx_queue *rxq = NULL;
-       int rc = 0;
+       int active_queue_cnt = 0;
+       int i, rc = 0;
 
        rc = is_bnxt_in_error(bp);
        if (rc)
@@ -507,18 +533,23 @@ int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
        }
 
        rxq = bp->rx_queues[rx_queue_id];
-
-       if (rxq == NULL) {
+       if (!rxq) {
                PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
                return -EINVAL;
        }
 
+       vnic = rxq->vnic;
+       if (!vnic) {
+               PMD_DRV_LOG(ERR, "VNIC not initialized for RxQ %d\n",
+                           rx_queue_id);
+               return -EINVAL;
+       }
+
        dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
        rxq->rx_started = false;
        PMD_DRV_LOG(DEBUG, "Rx queue stopped\n");
 
-       if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
-               vnic = rxq->vnic;
+       if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
                if (BNXT_HAS_RING_GRPS(bp))
                        vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
 
@@ -526,6 +557,49 @@ int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
                rc = bnxt_vnic_rss_configure(bp, vnic);
        }
 
+       /* Compute current number of active receive queues. */
+       for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++)
+               if (bp->rx_queues[i]->rx_started)
+                       active_queue_cnt++;
+
+       if (BNXT_CHIP_P5(bp)) {
+               /*
+                * For Thor, we need to ensure that the VNIC default receive
+                * ring corresponds to an active receive queue. When no queue
+                * is active, we need to temporarily set the MRU to zero so
+                * that packets are dropped early in the receive pipeline in
+                * order to prevent the VNIC default receive ring from being
+                * accessed.
+                */
+               if (active_queue_cnt == 0) {
+                       uint16_t saved_mru = vnic->mru;
+
+                       vnic->mru = 0;
+                       /* Reconfigure default receive ring and MRU. */
+                       bnxt_hwrm_vnic_cfg(bp, vnic);
+                       vnic->mru = saved_mru;
+               } else {
+                       /* Reconfigure default receive ring. */
+                       bnxt_hwrm_vnic_cfg(bp, vnic);
+               }
+       } else if (active_queue_cnt) {
+               /*
+                * If the queue being stopped is the current default queue and
+                * there are other active queues, pick one of them as the
+                * default and reconfigure the vnic.
+                */
+               if (vnic->dflt_ring_grp == bp->grp_info[rx_queue_id].fw_grp_id) {
+                       for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) {
+                               if (bp->rx_queues[i]->rx_started) {
+                                       vnic->dflt_ring_grp =
+                                               bp->grp_info[i].fw_grp_id;
+                                       bnxt_hwrm_vnic_cfg(bp, vnic);
+                                       break;
+                               }
+                       }
+               }
+       }
+
        if (rc == 0)
                bnxt_rx_queue_release_mbufs(rxq);