net/mlx5: allow basic counter management fallback
[dpdk.git] / drivers / net / mlx5 / mlx5.c
index 3a269bc..7e0df51 100644 (file)
@@ -37,6 +37,7 @@
 #include <rte_rwlock.h>
 #include <rte_spinlock.h>
 #include <rte_string_fns.h>
+#include <rte_alarm.h>
 
 #include "mlx5.h"
 #include "mlx5_utils.h"
@@ -156,6 +157,97 @@ struct mlx5_dev_spawn_data {
 static LIST_HEAD(, mlx5_ibv_shared) mlx5_ibv_list = LIST_HEAD_INITIALIZER();
 static pthread_mutex_t mlx5_ibv_list_mutex = PTHREAD_MUTEX_INITIALIZER;
 
+/**
+ * Initialize the counters management structure.
+ *
+ * @param[in] sh
+ *   Pointer to mlx5_ibv_shared object to free
+ */
+static void
+mlx5_flow_counters_mng_init(struct mlx5_ibv_shared *sh)
+{
+       uint8_t i;
+
+       TAILQ_INIT(&sh->cmng.flow_counters);
+       for (i = 0; i < RTE_DIM(sh->cmng.ccont); ++i)
+               TAILQ_INIT(&sh->cmng.ccont[i].pool_list);
+}
+
+/**
+ * Destroy all the resources allocated for a counter memory management.
+ *
+ * @param[in] mng
+ *   Pointer to the memory management structure.
+ */
+static void
+mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_counter_stats_mem_mng *mng)
+{
+       uint8_t *mem = (uint8_t *)(uintptr_t)mng->raws[0].data;
+
+       LIST_REMOVE(mng, next);
+       claim_zero(mlx5_devx_cmd_destroy(mng->dm));
+       claim_zero(mlx5_glue->devx_umem_dereg(mng->umem));
+       rte_free(mem);
+}
+
+/**
+ * Close and release all the resources of the counters management.
+ *
+ * @param[in] sh
+ *   Pointer to mlx5_ibv_shared object to free.
+ */
+static void
+mlx5_flow_counters_mng_close(struct mlx5_ibv_shared *sh)
+{
+       struct mlx5_counter_stats_mem_mng *mng;
+       uint8_t i;
+       int j;
+       int retries = 1024;
+
+       rte_errno = 0;
+       while (--retries) {
+               rte_eal_alarm_cancel(mlx5_flow_query_alarm, sh);
+               if (rte_errno != EINPROGRESS)
+                       break;
+               rte_pause();
+       }
+       for (i = 0; i < RTE_DIM(sh->cmng.ccont); ++i) {
+               struct mlx5_flow_counter_pool *pool;
+               uint32_t batch = !!(i % 2);
+
+               if (!sh->cmng.ccont[i].pools)
+                       continue;
+               pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list);
+               while (pool) {
+                       if (batch) {
+                               if (pool->min_dcs)
+                                       claim_zero
+                                       (mlx5_devx_cmd_destroy(pool->min_dcs));
+                       }
+                       for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) {
+                               if (pool->counters_raw[j].action)
+                                       claim_zero
+                                       (mlx5_glue->destroy_flow_action
+                                              (pool->counters_raw[j].action));
+                               if (!batch && pool->counters_raw[j].dcs)
+                                       claim_zero(mlx5_devx_cmd_destroy
+                                                 (pool->counters_raw[j].dcs));
+                       }
+                       TAILQ_REMOVE(&sh->cmng.ccont[i].pool_list, pool,
+                                    next);
+                       rte_free(pool);
+                       pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list);
+               }
+               rte_free(sh->cmng.ccont[i].pools);
+       }
+       mng = LIST_FIRST(&sh->cmng.mem_mngs);
+       while (mng) {
+               mlx5_flow_destroy_counter_stat_mem_mng(mng);
+               mng = LIST_FIRST(&sh->cmng.mem_mngs);
+       }
+       memset(&sh->cmng, 0, sizeof(sh->cmng));
+}
+
 /**
  * Allocate shared IB device context. If there is multiport device the
  * master and representors will share this context, if there is single
@@ -260,6 +352,7 @@ mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn)
                err = rte_errno;
                goto error;
        }
+       mlx5_flow_counters_mng_init(sh);
        LIST_INSERT_HEAD(&mlx5_ibv_list, sh, next);
 exit:
        pthread_mutex_unlock(&mlx5_ibv_list_mutex);
@@ -314,6 +407,7 @@ mlx5_free_shared_ibctx(struct mlx5_ibv_shared *sh)
         *  Ensure there is no async event handler installed.
         *  Only primary process handles async device events.
         **/
+       mlx5_flow_counters_mng_close(sh);
        assert(!sh->intr_cnt);
        if (sh->intr_cnt)
                mlx5_intr_callback_unregister
@@ -690,8 +784,6 @@ mlx5_dev_close(struct rte_eth_dev *dev)
                close(priv->nl_socket_route);
        if (priv->nl_socket_rdma >= 0)
                close(priv->nl_socket_rdma);
-       if (priv->tcf_context)
-               mlx5_flow_tcf_context_destroy(priv->tcf_context);
        if (priv->sh) {
                /*
                 * Free the shared context in last turn, because the cleanup
@@ -987,8 +1079,10 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
                return 0;
        /* Following UGLY cast is done to pass checkpatch. */
        kvlist = rte_kvargs_parse(devargs->args, params);
-       if (kvlist == NULL)
-               return 0;
+       if (kvlist == NULL) {
+               rte_errno = EINVAL;
+               return -rte_errno;
+       }
        /* Process parameters. */
        for (i = 0; (params[i] != NULL); ++i) {
                if (rte_kvargs_count(kvlist, params[i])) {
@@ -1507,34 +1601,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
        if (config.vf && config.vf_nl_en)
                mlx5_nl_mac_addr_sync(eth_dev);
-       priv->tcf_context = mlx5_flow_tcf_context_create();
-       if (!priv->tcf_context) {
-               err = -rte_errno;
-               DRV_LOG(WARNING,
-                       "flow rules relying on switch offloads will not be"
-                       " supported: cannot open libmnl socket: %s",
-                       strerror(rte_errno));
-       } else {
-               struct rte_flow_error error;
-               unsigned int ifindex = mlx5_ifindex(eth_dev);
-
-               if (!ifindex) {
-                       err = -rte_errno;
-                       error.message =
-                               "cannot retrieve network interface index";
-               } else {
-                       err = mlx5_flow_tcf_init(priv->tcf_context,
-                                                ifindex, &error);
-               }
-               if (err) {
-                       DRV_LOG(WARNING,
-                               "flow rules relying on switch offloads will"
-                               " not be supported: %s: %s",
-                               error.message, strerror(rte_errno));
-                       mlx5_flow_tcf_context_destroy(priv->tcf_context);
-                       priv->tcf_context = NULL;
-               }
-       }
        TAILQ_INIT(&priv->flows);
        TAILQ_INIT(&priv->ctrl_flows);
        /* Hint libmlx5 to use PMD allocator for data plane resources */
@@ -1558,11 +1624,19 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        mlx5_link_update(eth_dev, 0);
 #ifdef HAVE_IBV_DEVX_OBJ
        if (config.devx) {
+               priv->counter_fallback = 0;
                err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config.hca_attr);
                if (err) {
                        err = -err;
                        goto error;
                }
+               if (!config.hca_attr.flow_counters_dump)
+                       priv->counter_fallback = 1;
+#ifndef HAVE_IBV_DEVX_ASYNC
+               priv->counter_fallback = 1;
+#endif
+               if (priv->counter_fallback)
+                       DRV_LOG(INFO, "Use fall-back DV counter management\n");
        }
 #endif
 #ifdef HAVE_MLX5DV_DR_ESWITCH
@@ -1600,8 +1674,6 @@ error:
                        close(priv->nl_socket_route);
                if (priv->nl_socket_rdma >= 0)
                        close(priv->nl_socket_rdma);
-               if (priv->tcf_context)
-                       mlx5_flow_tcf_context_destroy(priv->tcf_context);
                if (own_domain_id)
                        claim_zero(rte_eth_switch_domain_free(priv->domain_id));
                rte_free(priv);
@@ -2117,7 +2189,7 @@ static struct rte_pci_driver mlx5_driver = {
        .dma_map = mlx5_dma_map,
        .dma_unmap = mlx5_dma_unmap,
        .drv_flags = RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_INTR_RMV |
-                    RTE_PCI_DRV_PROBE_AGAIN | RTE_PCI_DRV_IOVA_AS_VA,
+                    RTE_PCI_DRV_PROBE_AGAIN,
 };
 
 #ifdef RTE_IBVERBS_LINK_DLOPEN