net/mlx5: fix E-Switch DV flow disabling
[dpdk.git] / drivers / net / mlx5 / mlx5.c
index 0079aa8..72b1e35 100644 (file)
@@ -1298,6 +1298,11 @@ mlx5_shared_dev_ctx_args_config(struct mlx5_dev_ctx_shared *sh,
                DRV_LOG(DEBUG, "E-Switch DV flow is not supported.");
                config->dv_esw_en = 0;
        }
+       if (config->dv_esw_en && !config->dv_flow_en) {
+               DRV_LOG(DEBUG,
+                       "E-Switch DV flow is supported only when DV flow is enabled.");
+               config->dv_esw_en = 0;
+       }
        if (config->dv_miss_info && config->dv_esw_en)
                config->dv_xmeta_en = MLX5_XMETA_MODE_META16;
        if (!config->dv_esw_en &&
@@ -1457,6 +1462,7 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
        for (i = 0; i < sh->max_port; i++) {
                sh->port[i].ih_port_id = RTE_MAX_ETHPORTS;
                sh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS;
+               sh->port[i].nl_ih_port_id = RTE_MAX_ETHPORTS;
        }
        if (sh->cdev->config.devx) {
                sh->td = mlx5_devx_cmd_create_td(sh->cdev->ctx);
@@ -1607,12 +1613,46 @@ void
 mlx5_free_table_hash_list(struct mlx5_priv *priv)
 {
        struct mlx5_dev_ctx_shared *sh = priv->sh;
-
-       if (!sh->flow_tbls)
+       struct mlx5_hlist **tbls = (priv->sh->config.dv_flow_en == 2) ?
+                                  &sh->groups : &sh->flow_tbls;
+       if (*tbls == NULL)
                return;
-       mlx5_hlist_destroy(sh->flow_tbls);
-       sh->flow_tbls = NULL;
+       mlx5_hlist_destroy(*tbls);
+       *tbls = NULL;
+}
+
+#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
+/**
+ * Allocate HW steering group hash list.
+ *
+ * @param[in] priv
+ *   Pointer to the private device data structure.
+ */
+static int
+mlx5_alloc_hw_group_hash_list(struct mlx5_priv *priv)
+{
+       int err = 0;
+       struct mlx5_dev_ctx_shared *sh = priv->sh;
+       char s[MLX5_NAME_SIZE];
+
+       MLX5_ASSERT(sh);
+       snprintf(s, sizeof(s), "%s_flow_groups", priv->sh->ibdev_name);
+       sh->groups = mlx5_hlist_create
+                       (s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE,
+                        false, true, sh,
+                        flow_hw_grp_create_cb,
+                        flow_hw_grp_match_cb,
+                        flow_hw_grp_remove_cb,
+                        flow_hw_grp_clone_cb,
+                        flow_hw_grp_clone_free_cb);
+       if (!sh->groups) {
+               DRV_LOG(ERR, "flow groups with hash creation failed.");
+               err = ENOMEM;
+       }
+       return err;
 }
+#endif
+
 
 /**
  * Initialize flow table hash list and create the root tables entry
@@ -1628,11 +1668,14 @@ int
 mlx5_alloc_table_hash_list(struct mlx5_priv *priv __rte_unused)
 {
        int err = 0;
+
        /* Tables are only used in DV and DR modes. */
 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
        struct mlx5_dev_ctx_shared *sh = priv->sh;
        char s[MLX5_NAME_SIZE];
 
+       if (priv->sh->config.dv_flow_en == 2)
+               return mlx5_alloc_hw_group_hash_list(priv);
        MLX5_ASSERT(sh);
        snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name);
        sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE,
@@ -1875,6 +1918,10 @@ mlx5_dev_close(struct rte_eth_dev *dev)
        if (ret)
                DRV_LOG(WARNING, "port %u some Rx queue objects still remain",
                        dev->data->port_id);
+       ret = mlx5_ext_rxq_verify(dev);
+       if (ret)
+               DRV_LOG(WARNING, "Port %u some external RxQ still remain.",
+                       dev->data->port_id);
        ret = mlx5_rxq_verify(dev);
        if (ret)
                DRV_LOG(WARNING, "port %u some Rx queues still remain",
@@ -1893,6 +1940,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
                        dev->data->port_id);
        if (priv->hrxqs)
                mlx5_list_destroy(priv->hrxqs);
+       mlx5_free(priv->ext_rxqs);
        /*
         * Free the shared context in last turn, because the cleanup
         * routines above may use some shared fields, like