net/mlx5: translate shared action for RSS action
[dpdk.git] / drivers / net / mlx5 / mlx5.c
index b099b23..2484251 100644 (file)
@@ -9,8 +9,6 @@
 #include <stdint.h>
 #include <stdlib.h>
 #include <errno.h>
-#include <sys/mman.h>
-#include <linux/rtnetlink.h>
 
 #include <rte_malloc.h>
 #include <rte_ethdev_driver.h>
@@ -243,6 +241,28 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
                .free = mlx5_free,
                .type = "mlx5_jump_ipool",
        },
+       {
+               .size = sizeof(struct mlx5_flow_dv_sample_resource),
+               .trunk_size = 64,
+               .grow_trunk = 3,
+               .grow_shift = 2,
+               .need_lock = 0,
+               .release_mem_en = 1,
+               .malloc = mlx5_malloc,
+               .free = mlx5_free,
+               .type = "mlx5_sample_ipool",
+       },
+       {
+               .size = sizeof(struct mlx5_flow_dv_dest_array_resource),
+               .trunk_size = 64,
+               .grow_trunk = 3,
+               .grow_shift = 2,
+               .need_lock = 0,
+               .release_mem_en = 1,
+               .malloc = mlx5_malloc,
+               .free = mlx5_free,
+               .type = "mlx5_dest_array_ipool",
+       },
 #endif
        {
                .size = sizeof(struct mlx5_flow_meter),
@@ -464,14 +484,13 @@ mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh)
 
        memset(&sh->cmng, 0, sizeof(sh->cmng));
        TAILQ_INIT(&sh->cmng.flow_counters);
-       for (i = 0; i < MLX5_CCONT_TYPE_MAX; ++i) {
-               sh->cmng.ccont[i].min_id = MLX5_CNT_BATCH_OFFSET;
-               sh->cmng.ccont[i].max_id = -1;
-               sh->cmng.ccont[i].last_pool_idx = POOL_IDX_INVALID;
-               TAILQ_INIT(&sh->cmng.ccont[i].pool_list);
-               rte_spinlock_init(&sh->cmng.ccont[i].resize_sl);
-               TAILQ_INIT(&sh->cmng.ccont[i].counters);
-               rte_spinlock_init(&sh->cmng.ccont[i].csl);
+       sh->cmng.min_id = MLX5_CNT_BATCH_OFFSET;
+       sh->cmng.max_id = -1;
+       sh->cmng.last_pool_idx = POOL_IDX_INVALID;
+       rte_spinlock_init(&sh->cmng.pool_update_sl);
+       for (i = 0; i < MLX5_COUNTER_TYPE_MAX; i++) {
+               TAILQ_INIT(&sh->cmng.counters[i]);
+               rte_spinlock_init(&sh->cmng.csl[i]);
        }
 }
 
@@ -502,8 +521,7 @@ static void
 mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh)
 {
        struct mlx5_counter_stats_mem_mng *mng;
-       int i;
-       int j;
+       int i, j;
        int retries = 1024;
 
        rte_errno = 0;
@@ -513,34 +531,33 @@ mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh)
                        break;
                rte_pause();
        }
-       for (i = 0; i < MLX5_CCONT_TYPE_MAX; ++i) {
+
+       if (sh->cmng.pools) {
                struct mlx5_flow_counter_pool *pool;
-               uint32_t batch = !!(i > 1);
+               uint16_t n_valid = sh->cmng.n_valid;
+               bool fallback = sh->cmng.counter_fallback;
 
-               if (!sh->cmng.ccont[i].pools)
-                       continue;
-               pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list);
-               while (pool) {
-                       if (batch && pool->min_dcs)
+               for (i = 0; i < n_valid; ++i) {
+                       pool = sh->cmng.pools[i];
+                       if (!fallback && pool->min_dcs)
                                claim_zero(mlx5_devx_cmd_destroy
                                                               (pool->min_dcs));
                        for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) {
-                               if (MLX5_POOL_GET_CNT(pool, j)->action)
+                               struct mlx5_flow_counter *cnt =
+                                               MLX5_POOL_GET_CNT(pool, j);
+
+                               if (cnt->action)
                                        claim_zero
                                         (mlx5_glue->destroy_flow_action
-                                         (MLX5_POOL_GET_CNT
-                                         (pool, j)->action));
-                               if (!batch && MLX5_GET_POOL_CNT_EXT
-                                   (pool, j)->dcs)
+                                         (cnt->action));
+                               if (fallback && MLX5_POOL_GET_CNT
+                                   (pool, j)->dcs_when_free)
                                        claim_zero(mlx5_devx_cmd_destroy
-                                                  (MLX5_GET_POOL_CNT_EXT
-                                                   (pool, j)->dcs));
+                                                  (cnt->dcs_when_free));
                        }
-                       TAILQ_REMOVE(&sh->cmng.ccont[i].pool_list, pool, next);
                        mlx5_free(pool);
-                       pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list);
                }
-               mlx5_free(sh->cmng.ccont[i].pools);
+               mlx5_free(sh->cmng.pools);
        }
        mng = LIST_FIRST(&sh->cmng.mem_mngs);
        while (mng) {
@@ -725,6 +742,7 @@ mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,
 {
        uint32_t uar_mapping, retry;
        int err = 0;
+       void *base_addr;
 
        for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) {
 #ifdef MLX5DV_UAR_ALLOC_TYPE_NC
@@ -783,7 +801,8 @@ mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,
                        err = ENOMEM;
                        goto exit;
                }
-               if (sh->tx_uar->base_addr)
+               base_addr = mlx5_os_get_devx_uar_base_addr(sh->tx_uar);
+               if (base_addr)
                        break;
                /*
                 * The UARs are allocated by rdma_core within the
@@ -822,7 +841,8 @@ mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,
                        err = ENOMEM;
                        goto exit;
                }
-               if (sh->devx_rx_uar->base_addr)
+               base_addr = mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar);
+               if (base_addr)
                        break;
                /*
                 * The UARs are allocated by rdma_core within the
@@ -924,6 +944,14 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
                goto error;
        }
        if (sh->devx) {
+               /* Query the EQN for this core. */
+               err = mlx5_glue->devx_query_eqn(sh->ctx, 0, &sh->eqn);
+               if (err) {
+                       rte_errno = errno;
+                       DRV_LOG(ERR, "Failed to query event queue number %d.",
+                               rte_errno);
+                       goto error;
+               }
                err = mlx5_os_get_pdn(sh->pd, &sh->pdn);
                if (err) {
                        DRV_LOG(ERR, "Fail to extract pdn from PD");
@@ -945,8 +973,11 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
                err = mlx5_alloc_rxtx_uars(sh, config);
                if (err)
                        goto error;
-               MLX5_ASSERT(sh->tx_uar && sh->tx_uar->base_addr);
-               MLX5_ASSERT(sh->devx_rx_uar && sh->devx_rx_uar->base_addr);
+               MLX5_ASSERT(sh->tx_uar);
+               MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->tx_uar));
+
+               MLX5_ASSERT(sh->devx_rx_uar);
+               MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar));
        }
        sh->flow_id_pool = mlx5_flow_id_pool_alloc
                                        ((1 << HAIRPIN_FLOW_ID_BITS) - 1);
@@ -1337,7 +1368,7 @@ mlx5_proc_priv_uninit(struct rte_eth_dev *dev)
  * @param dev
  *   Pointer to Ethernet device structure.
  */
-void
+int
 mlx5_dev_close(struct rte_eth_dev *dev)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
@@ -1347,14 +1378,14 @@ mlx5_dev_close(struct rte_eth_dev *dev)
        if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
                /* Check if process_private released. */
                if (!dev->process_private)
-                       return;
+                       return 0;
                mlx5_tx_uar_uninit_secondary(dev);
                mlx5_proc_priv_uninit(dev);
                rte_eth_dev_release_port(dev);
-               return;
+               return 0;
        }
        if (!priv->sh)
-               return;
+               return 0;
        DRV_LOG(DEBUG, "port %u closing device \"%s\"",
                dev->data->port_id,
                ((priv->sh->ctx != NULL) ?
@@ -1370,6 +1401,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
         * then this will return directly without any action.
         */
        mlx5_flow_list_flush(dev, &priv->flows, true);
+       mlx5_shared_action_flush(dev);
        mlx5_flow_meter_flush(dev, NULL);
        /* Free the intermediate buffers for flow creation. */
        mlx5_flow_free_intermediate(dev);
@@ -1407,9 +1439,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
        if (priv->reta_idx != NULL)
                mlx5_free(priv->reta_idx);
        if (priv->config.vf)
-               mlx5_nl_mac_addr_flush(priv->nl_socket_route, mlx5_ifindex(dev),
-                                      dev->data->mac_addrs,
-                                      MLX5_MAX_MAC_ADDRESSES, priv->mac_own);
+               mlx5_os_mac_addr_flush(dev);
        if (priv->nl_socket_route >= 0)
                close(priv->nl_socket_route);
        if (priv->nl_socket_rdma >= 0)
@@ -1447,7 +1477,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
        /*
         * Free the shared context in last turn, because the cleanup
         * routines above may use some shared fields, like
-        * mlx5_nl_mac_addr_flush() uses ibdev_path for retrieveing
+        * mlx5_os_mac_addr_flush() uses ibdev_path for retrieveing
         * ifindex if Netlink fails.
         */
        mlx5_free_shared_dev_ctx(priv->sh);
@@ -1477,6 +1507,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
         * it is freed when dev_private is freed.
         */
        dev->data->mac_addrs = NULL;
+       return 0;
 }
 
 /**
@@ -2007,6 +2038,7 @@ static int
 mlx5_pci_remove(struct rte_pci_device *pci_dev)
 {
        uint16_t port_id;
+       int ret = 0;
 
        RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) {
                /*
@@ -2014,11 +2046,11 @@ mlx5_pci_remove(struct rte_pci_device *pci_dev)
                 * call the close function explicitly for secondary process.
                 */
                if (rte_eal_process_type() == RTE_PROC_SECONDARY)
-                       mlx5_dev_close(&rte_eth_devices[port_id]);
+                       ret |= mlx5_dev_close(&rte_eth_devices[port_id]);
                else
-                       rte_eth_dev_close(port_id);
+                       ret |= rte_eth_dev_close(port_id);
        }
-       return 0;
+       return ret == 0 ? 0 : -EIO;
 }
 
 static const struct rte_pci_id mlx5_pci_id_map[] = {