net/mlx5: accelerate DV flow counter transactions
[dpdk.git] / drivers / net / mlx5 / mlx5.c
index daadf4a..237d05b 100644 (file)
 /* Select port representors to instantiate. */
 #define MLX5_REPRESENTOR "representor"
 
+/* Device parameter to configure the maximum number of dump files per queue. */
+#define MLX5_MAX_DUMP_FILES_NUM "max_dump_files_num"
+
 #ifndef HAVE_IBV_MLX5_MOD_MPW
 #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
 #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
@@ -153,6 +156,89 @@ struct mlx5_dev_spawn_data {
 static LIST_HEAD(, mlx5_ibv_shared) mlx5_ibv_list = LIST_HEAD_INITIALIZER();
 static pthread_mutex_t mlx5_ibv_list_mutex = PTHREAD_MUTEX_INITIALIZER;
 
+/**
+ * Initialize the counters management structure.
+ *
+ * @param[in] sh
+ *   Pointer to mlx5_ibv_shared object to free
+ */
+static void
+mlx5_flow_counters_mng_init(struct mlx5_ibv_shared *sh)
+{
+       uint8_t i;
+
+       TAILQ_INIT(&sh->cmng.flow_counters);
+       for (i = 0; i < RTE_DIM(sh->cmng.ccont); ++i)
+               TAILQ_INIT(&sh->cmng.ccont[i].pool_list);
+}
+
+/**
+ * Destroy all the resources allocated for a counter memory management.
+ *
+ * @param[in] mng
+ *   Pointer to the memory management structure.
+ */
+static void
+mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_counter_stats_mem_mng *mng)
+{
+       uint8_t *mem = (uint8_t *)(uintptr_t)mng->raws[0].data;
+
+       LIST_REMOVE(mng, next);
+       claim_zero(mlx5_devx_cmd_destroy(mng->dm));
+       claim_zero(mlx5_glue->devx_umem_dereg(mng->umem));
+       rte_free(mem);
+}
+
+/**
+ * Close and release all the resources of the counters management.
+ *
+ * @param[in] sh
+ *   Pointer to mlx5_ibv_shared object to free.
+ */
+static void
+mlx5_flow_counters_mng_close(struct mlx5_ibv_shared *sh)
+{
+       struct mlx5_counter_stats_mem_mng *mng;
+       uint8_t i;
+       int j;
+
+       for (i = 0; i < RTE_DIM(sh->cmng.ccont); ++i) {
+               struct mlx5_flow_counter_pool *pool;
+               uint32_t batch = !!(i % 2);
+
+               if (!sh->cmng.ccont[i].pools)
+                       continue;
+               pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list);
+               while (pool) {
+                       if (batch) {
+                               if (pool->min_dcs)
+                                       claim_zero
+                                       (mlx5_devx_cmd_destroy(pool->min_dcs));
+                       }
+                       for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) {
+                               if (pool->counters_raw[j].action)
+                                       claim_zero
+                                       (mlx5_glue->destroy_flow_action
+                                              (pool->counters_raw[j].action));
+                               if (!batch && pool->counters_raw[j].dcs)
+                                       claim_zero(mlx5_devx_cmd_destroy
+                                                 (pool->counters_raw[j].dcs));
+                       }
+                       TAILQ_REMOVE(&sh->cmng.ccont[i].pool_list, pool,
+                                    next);
+                       rte_free(pool);
+                       pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list);
+               }
+               rte_free(sh->cmng.ccont[i].pools);
+       }
+       mng = LIST_FIRST(&sh->cmng.mem_mngs);
+       while (mng) {
+               mlx5_flow_destroy_counter_stat_mem_mng(mng);
+               mng = LIST_FIRST(&sh->cmng.mem_mngs);
+       }
+       memset(&sh->cmng, 0, sizeof(sh->cmng));
+}
+
 /**
  * Allocate shared IB device context. If there is multiport device the
  * master and representors will share this context, if there is single
@@ -257,6 +343,7 @@ mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn)
                err = rte_errno;
                goto error;
        }
+       mlx5_flow_counters_mng_init(sh);
        LIST_INSERT_HEAD(&mlx5_ibv_list, sh, next);
 exit:
        pthread_mutex_unlock(&mlx5_ibv_list_mutex);
@@ -311,9 +398,10 @@ mlx5_free_shared_ibctx(struct mlx5_ibv_shared *sh)
         *  Ensure there is no async event handler installed.
         *  Only primary process handles async device events.
         **/
+       mlx5_flow_counters_mng_close(sh);
        assert(!sh->intr_cnt);
        if (sh->intr_cnt)
-               rte_intr_callback_unregister
+               mlx5_intr_callback_unregister
                        (&sh->intr_handle, mlx5_dev_interrupt_handler, sh);
        pthread_mutex_destroy(&sh->intr_mutex);
        if (sh->pd)
@@ -687,8 +775,6 @@ mlx5_dev_close(struct rte_eth_dev *dev)
                close(priv->nl_socket_route);
        if (priv->nl_socket_rdma >= 0)
                close(priv->nl_socket_rdma);
-       if (priv->tcf_context)
-               mlx5_flow_tcf_context_destroy(priv->tcf_context);
        if (priv->sh) {
                /*
                 * Free the shared context in last turn, because the cleanup
@@ -773,6 +859,7 @@ const struct eth_dev_ops mlx5_dev_ops = {
        .xstats_get_names = mlx5_xstats_get_names,
        .fw_version_get = mlx5_fw_version_get,
        .dev_infos_get = mlx5_dev_infos_get,
+       .read_clock = mlx5_read_clock,
        .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
        .vlan_filter_set = mlx5_vlan_filter_set,
        .rx_queue_setup = mlx5_rx_queue_setup,
@@ -926,6 +1013,8 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
                config->dv_flow_en = !!tmp;
        } else if (strcmp(MLX5_MR_EXT_MEMSEG_EN, key) == 0) {
                config->mr_ext_memseg_en = !!tmp;
+       } else if (strcmp(MLX5_MAX_DUMP_FILES_NUM, key) == 0) {
+               config->max_dump_files_num = tmp;
        } else {
                DRV_LOG(WARNING, "%s: unknown parameter", key);
                rte_errno = EINVAL;
@@ -970,6 +1059,7 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
                MLX5_DV_FLOW_EN,
                MLX5_MR_EXT_MEMSEG_EN,
                MLX5_REPRESENTOR,
+               MLX5_MAX_DUMP_FILES_NUM,
                NULL,
        };
        struct rte_kvargs *kvlist;
@@ -980,8 +1070,10 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
                return 0;
        /* Following UGLY cast is done to pass checkpatch. */
        kvlist = rte_kvargs_parse(devargs->args, params);
-       if (kvlist == NULL)
-               return 0;
+       if (kvlist == NULL) {
+               rte_errno = EINVAL;
+               return -rte_errno;
+       }
        /* Process parameters. */
        for (i = 0; (params[i] != NULL); ++i) {
                if (rte_kvargs_count(kvlist, params[i])) {
@@ -1439,6 +1531,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
                config.mprq.enabled = 0;
        }
+       if (config.max_dump_files_num == 0)
+               config.max_dump_files_num = 128;
        eth_dev = rte_eth_dev_allocate(name);
        if (eth_dev == NULL) {
                DRV_LOG(ERR, "can not allocate rte ethdev");
@@ -1498,34 +1592,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
        if (config.vf && config.vf_nl_en)
                mlx5_nl_mac_addr_sync(eth_dev);
-       priv->tcf_context = mlx5_flow_tcf_context_create();
-       if (!priv->tcf_context) {
-               err = -rte_errno;
-               DRV_LOG(WARNING,
-                       "flow rules relying on switch offloads will not be"
-                       " supported: cannot open libmnl socket: %s",
-                       strerror(rte_errno));
-       } else {
-               struct rte_flow_error error;
-               unsigned int ifindex = mlx5_ifindex(eth_dev);
-
-               if (!ifindex) {
-                       err = -rte_errno;
-                       error.message =
-                               "cannot retrieve network interface index";
-               } else {
-                       err = mlx5_flow_tcf_init(priv->tcf_context,
-                                                ifindex, &error);
-               }
-               if (err) {
-                       DRV_LOG(WARNING,
-                               "flow rules relying on switch offloads will"
-                               " not be supported: %s: %s",
-                               error.message, strerror(rte_errno));
-                       mlx5_flow_tcf_context_destroy(priv->tcf_context);
-                       priv->tcf_context = NULL;
-               }
-       }
        TAILQ_INIT(&priv->flows);
        TAILQ_INIT(&priv->ctrl_flows);
        /* Hint libmlx5 to use PMD allocator for data plane resources */
@@ -1591,8 +1657,6 @@ error:
                        close(priv->nl_socket_route);
                if (priv->nl_socket_rdma >= 0)
                        close(priv->nl_socket_rdma);
-               if (priv->tcf_context)
-                       mlx5_flow_tcf_context_destroy(priv->tcf_context);
                if (own_domain_id)
                        claim_zero(rte_eth_switch_domain_free(priv->domain_id));
                rte_free(priv);
@@ -2108,7 +2172,7 @@ static struct rte_pci_driver mlx5_driver = {
        .dma_map = mlx5_dma_map,
        .dma_unmap = mlx5_dma_unmap,
        .drv_flags = RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_INTR_RMV |
-                    RTE_PCI_DRV_PROBE_AGAIN | RTE_PCI_DRV_IOVA_AS_VA,
+                    RTE_PCI_DRV_PROBE_AGAIN,
 };
 
 #ifdef RTE_IBVERBS_LINK_DLOPEN