net/mlx5: convert flow dev handle to indexed
authorSuanming Mou <suanmingm@mellanox.com>
Thu, 16 Apr 2020 02:42:08 +0000 (10:42 +0800)
committerFerruh Yigit <ferruh.yigit@intel.com>
Tue, 21 Apr 2020 11:57:09 +0000 (13:57 +0200)
This commit converts flow dev handle to indexed.

Change the mlx5 flow handle from pointer to uint32_t saves memory for
flow. With million flow, it saves several MBytes memory.

Signed-off-by: Suanming Mou <suanmingm@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
drivers/net/mlx5/mlx5.c
drivers/net/mlx5/mlx5.h
drivers/net/mlx5/mlx5_flow.c
drivers/net/mlx5/mlx5_flow.h
drivers/net/mlx5/mlx5_flow_dv.c
drivers/net/mlx5/mlx5_flow_verbs.c

index 1d38aa2..64c06b4 100644 (file)
@@ -199,6 +199,7 @@ static LIST_HEAD(, mlx5_ibv_shared) mlx5_ibv_list = LIST_HEAD_INITIALIZER();
 static pthread_mutex_t mlx5_ibv_list_mutex = PTHREAD_MUTEX_INITIALIZER;
 
 static struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
        {
                .size = sizeof(struct mlx5_flow_dv_encap_decap_resource),
                .trunk_size = 64,
@@ -254,6 +255,7 @@ static struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
                .free = rte_free,
                .type = "mlx5_jump_ipool",
        },
+#endif
        {
                .size = (sizeof(struct mlx5_hrxq) + MLX5_RSS_HASH_KEY_LEN),
                .trunk_size = 64,
@@ -265,6 +267,17 @@ static struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
                .free = rte_free,
                .type = "mlx5_hrxq_ipool",
        },
+       {
+               .size = sizeof(struct mlx5_flow_handle),
+               .trunk_size = 64,
+               .grow_trunk = 3,
+               .grow_shift = 2,
+               .need_lock = 0,
+               .release_mem_en = 1,
+               .malloc = rte_malloc_socket,
+               .free = rte_free,
+               .type = "mlx5_flow_handle_ipool",
+       },
 };
 
 
@@ -491,12 +504,25 @@ mlx5_flow_counters_mng_close(struct mlx5_ibv_shared *sh)
  *
  * @param[in] sh
  *   Pointer to mlx5_ibv_shared object.
+ * @param[in] sh
+ *   Pointer to user dev config.
  */
 static void
-mlx5_flow_ipool_create(struct mlx5_ibv_shared *sh)
+mlx5_flow_ipool_create(struct mlx5_ibv_shared *sh,
+                      const struct mlx5_dev_config *config __rte_unused)
 {
        uint8_t i;
 
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+       /*
+        * While DV is supported, user chooses the verbs mode,
+        * the mlx5 flow handle size is different with the
+        * MLX5_FLOW_HANDLE_VERBS_SIZE.
+        */
+       if (!config->dv_flow_en)
+               mlx5_ipool_cfg[MLX5_IPOOL_MLX5_FLOW].size =
+                                       MLX5_FLOW_HANDLE_VERBS_SIZE;
+#endif
        for (i = 0; i < MLX5_IPOOL_MAX; ++i)
                sh->ipool[i] = mlx5_ipool_create(&mlx5_ipool_cfg[i]);
 }
@@ -731,7 +757,7 @@ mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn,
                goto error;
        }
        mlx5_flow_counters_mng_init(sh);
-       mlx5_flow_ipool_create(sh);
+       mlx5_flow_ipool_create(sh, config);
        /* Add device to memory callback list. */
        rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
        LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,
index 42b4818..56f0a23 100644 (file)
 
 
 enum mlx5_ipool_index {
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
        MLX5_IPOOL_DECAP_ENCAP = 0, /* Pool for encap/decap resource. */
        MLX5_IPOOL_PUSH_VLAN, /* Pool for push vlan resource. */
        MLX5_IPOOL_TAG, /* Pool for tag resource. */
        MLX5_IPOOL_PORT_ID, /* Pool for port id resource. */
        MLX5_IPOOL_JUMP, /* Pool for jump resource. */
+#endif
        MLX5_IPOOL_HRXQ, /* Pool for hrxq resource. */
+       MLX5_IPOOL_MLX5_FLOW, /* Pool for mlx5 flow handle. */
        MLX5_IPOOL_MAX,
 };
 
index c44bc1f..bf95a40 100644 (file)
@@ -775,9 +775,12 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow,
 static void
 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
+       struct mlx5_priv *priv = dev->data->dev_private;
+       uint32_t handle_idx;
        struct mlx5_flow_handle *dev_handle;
 
-       LIST_FOREACH(dev_handle, &flow->dev_handles, next)
+       SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
+                      handle_idx, dev_handle, next)
                flow_drv_rxq_flags_set(dev, flow, dev_handle);
 }
 
@@ -847,9 +850,12 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow,
 static void
 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
+       struct mlx5_priv *priv = dev->data->dev_private;
+       uint32_t handle_idx;
        struct mlx5_flow_handle *dev_handle;
 
-       LIST_FOREACH(dev_handle, &flow->dev_handles, next)
+       SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
+                      handle_idx, dev_handle, next)
                flow_drv_rxq_flags_trim(dev, flow, dev_handle);
 }
 
@@ -2313,9 +2319,12 @@ static void
 flow_mreg_split_qrss_release(struct rte_eth_dev *dev,
                             struct rte_flow *flow)
 {
+       struct mlx5_priv *priv = dev->data->dev_private;
+       uint32_t handle_idx;
        struct mlx5_flow_handle *dev_handle;
 
-       LIST_FOREACH(dev_handle, &flow->dev_handles, next)
+       SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
+                      handle_idx, dev_handle, next)
                if (dev_handle->qrss_id)
                        flow_qrss_free_id(dev, dev_handle->qrss_id);
 }
@@ -3459,7 +3468,8 @@ flow_create_split_inner(struct rte_eth_dev *dev,
        dev_flow->flow = flow;
        dev_flow->external = external;
        /* Subflow object was created, we must include one in the list. */
-       LIST_INSERT_HEAD(&flow->dev_handles, dev_flow->handle, next);
+       SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
+                     dev_flow->handle, next);
        /*
         * If dev_flow is as one of the suffix flow, some actions in suffix
         * flow may need some user defined item layer flags.
@@ -4264,7 +4274,7 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
                /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
                flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
        }
-       LIST_INIT(&flow->dev_handles);
+       flow->dev_handles = 0;
        if (rss && rss->types) {
                unsigned int graph_root;
 
@@ -4312,7 +4322,8 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
                        goto error;
                dev_flow->flow = flow;
                dev_flow->external = 0;
-               LIST_INSERT_HEAD(&flow->dev_handles, dev_flow->handle, next);
+               SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
+                             dev_flow->handle, next);
                ret = flow_drv_translate(dev, dev_flow, &attr_tx,
                                         items_tx.items,
                                         actions_hairpin_tx.actions, error);
index 26f8704..a2ea122 100644 (file)
@@ -502,8 +502,8 @@ struct mlx5_flow_handle_dv {
 
 /** Device flow handle structure: used both for creating & destroying. */
 struct mlx5_flow_handle {
-       LIST_ENTRY(mlx5_flow_handle) next;
-       /**< Pointer to next device flow handle. */
+       SILIST_ENTRY(uint32_t)next;
+       /**< Index to next device flow handle. */
        uint64_t layers;
        /**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
        uint64_t act_flags;
@@ -632,6 +632,7 @@ struct mlx5_flow {
                struct mlx5_flow_verbs_workspace verbs;
        };
        struct mlx5_flow_handle *handle;
+       uint32_t handle_idx; /* Index of the mlx5 flow handle memory. */
 };
 
 /* Flow meter state. */
@@ -747,7 +748,7 @@ struct rte_flow {
        struct mlx5_flow_mreg_copy_resource *mreg_copy;
        /**< pointer to metadata register copy table resource. */
        struct mlx5_flow_meter *meter; /**< Holds flow meter. */
-       LIST_HEAD(dev_handles, mlx5_flow_handle) dev_handles;
+       uint32_t dev_handles;
        /**< Device flow handles that are part of the flow. */
        struct mlx5_fdir *fdir; /**< Pointer to associated FDIR if any. */
        uint32_t hairpin_flow_id; /**< The flow id used for hairpin. */
index 5e6143b..f001b34 100644 (file)
@@ -5333,7 +5333,7 @@ flow_dv_prepare(struct rte_eth_dev *dev,
                const struct rte_flow_action actions[] __rte_unused,
                struct rte_flow_error *error)
 {
-       size_t size = sizeof(struct mlx5_flow_handle);
+       uint32_t handle_idx = 0;
        struct mlx5_flow *dev_flow;
        struct mlx5_flow_handle *dev_handle;
        struct mlx5_priv *priv = dev->data->dev_private;
@@ -5345,7 +5345,8 @@ flow_dv_prepare(struct rte_eth_dev *dev,
                                   "not free temporary device flow");
                return NULL;
        }
-       dev_handle = rte_calloc(__func__, 1, size, 0);
+       dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
+                                  &handle_idx);
        if (!dev_handle) {
                rte_flow_error_set(error, ENOMEM,
                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -5355,6 +5356,7 @@ flow_dv_prepare(struct rte_eth_dev *dev,
        /* No multi-thread supporting. */
        dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++];
        dev_flow->handle = dev_handle;
+       dev_flow->handle_idx = handle_idx;
        dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
        /*
         * The matching value needs to be cleared to 0 before using. In the
@@ -8088,6 +8090,7 @@ __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
        struct mlx5_flow_handle_dv *dv_h;
        struct mlx5_flow *dev_flow;
        struct mlx5_priv *priv = dev->data->dev_private;
+       uint32_t handle_idx;
        int n;
        int err;
        int idx;
@@ -8172,7 +8175,8 @@ __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
        return 0;
 error:
        err = rte_errno; /* Save rte_errno before cleanup. */
-       LIST_FOREACH(dh, &flow->dev_handles, next) {
+       SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
+                      handle_idx, dh, next) {
                if (dh->hrxq) {
                        if (dh->act_flags & MLX5_FLOW_ACTION_DROP)
                                mlx5_hrxq_drop_release(dev);
@@ -8429,10 +8433,17 @@ static void
 __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
        struct mlx5_flow_handle *dh;
+       uint32_t handle_idx;
+       struct mlx5_priv *priv = dev->data->dev_private;
 
        if (!flow)
                return;
-       LIST_FOREACH(dh, &flow->dev_handles, next) {
+       handle_idx = flow->dev_handles;
+       while (handle_idx) {
+               dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
+                                   handle_idx);
+               if (!dh)
+                       return;
                if (dh->ib_flow) {
                        claim_zero(mlx5_glue->dv_destroy_flow(dh->ib_flow));
                        dh->ib_flow = NULL;
@@ -8446,6 +8457,7 @@ __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
                }
                if (dh->vf_vlan.tag && dh->vf_vlan.created)
                        mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
+               handle_idx = dh->next.next;
        }
 }
 
@@ -8462,6 +8474,7 @@ static void
 __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
        struct mlx5_flow_handle *dev_handle;
+       struct mlx5_priv *priv = dev->data->dev_private;
 
        if (!flow)
                return;
@@ -8474,9 +8487,14 @@ __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
                mlx5_flow_meter_detach(flow->meter);
                flow->meter = NULL;
        }
-       while (!LIST_EMPTY(&flow->dev_handles)) {
-               dev_handle = LIST_FIRST(&flow->dev_handles);
-               LIST_REMOVE(dev_handle, next);
+       while (flow->dev_handles) {
+               uint32_t tmp_idx = flow->dev_handles;
+
+               dev_handle = mlx5_ipool_get(priv->sh->ipool
+                                           [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
+               if (!dev_handle)
+                       return;
+               flow->dev_handles = dev_handle->next.next;
                if (dev_handle->dvh.matcher)
                        flow_dv_matcher_release(dev, dev_handle);
                if (dev_handle->dvh.encap_decap)
@@ -8494,7 +8512,8 @@ __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
                if (dev_handle->dvh.tag_resource)
                        flow_dv_tag_release(dev,
                                            dev_handle->dvh.tag_resource);
-               rte_free(dev_handle);
+               mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
+                          tmp_idx);
        }
 }
 
index aa55f4e..9525fd4 100644 (file)
@@ -1505,6 +1505,7 @@ flow_verbs_prepare(struct rte_eth_dev *dev,
                   struct rte_flow_error *error)
 {
        size_t size = 0;
+       uint32_t handle_idx = 0;
        struct mlx5_flow *dev_flow;
        struct mlx5_flow_handle *dev_handle;
        struct mlx5_priv *priv = dev->data->dev_private;
@@ -1524,7 +1525,8 @@ flow_verbs_prepare(struct rte_eth_dev *dev,
                                   "not free temporary device flow");
                return NULL;
        }
-       dev_handle = rte_calloc(__func__, 1, MLX5_FLOW_HANDLE_VERBS_SIZE, 0);
+       dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
+                                  &handle_idx);
        if (!dev_handle) {
                rte_flow_error_set(error, ENOMEM,
                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -1534,6 +1536,7 @@ flow_verbs_prepare(struct rte_eth_dev *dev,
        /* No multi-thread supporting. */
        dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++];
        dev_flow->handle = dev_handle;
+       dev_flow->handle_idx = handle_idx;
        /* Memcpy is used, only size needs to be cleared to 0. */
        dev_flow->verbs.size = 0;
        dev_flow->verbs.attr.num_of_specs = 0;
@@ -1739,11 +1742,14 @@ flow_verbs_translate(struct rte_eth_dev *dev,
 static void
 flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
+       struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_flow_handle *handle;
+       uint32_t handle_idx;
 
        if (!flow)
                return;
-       LIST_FOREACH(handle, &flow->dev_handles, next) {
+       SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
+                      handle_idx, handle, next) {
                if (handle->ib_flow) {
                        claim_zero(mlx5_glue->destroy_flow(handle->ib_flow));
                        handle->ib_flow = NULL;
@@ -1771,15 +1777,22 @@ flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
 static void
 flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
+       struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_flow_handle *handle;
 
        if (!flow)
                return;
        flow_verbs_remove(dev, flow);
-       while (!LIST_EMPTY(&flow->dev_handles)) {
-               handle = LIST_FIRST(&flow->dev_handles);
-               LIST_REMOVE(handle, next);
-               rte_free(handle);
+       while (flow->dev_handles) {
+               uint32_t tmp_idx = flow->dev_handles;
+
+               handle = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
+                                  tmp_idx);
+               if (!handle)
+                       return;
+               flow->dev_handles = handle->next.next;
+               mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
+                          tmp_idx);
        }
        if (flow->counter) {
                flow_verbs_counter_release(dev, flow->counter);
@@ -1808,6 +1821,7 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
        struct mlx5_flow_handle *handle;
        struct mlx5_flow *dev_flow;
        struct mlx5_hrxq *hrxq;
+       uint32_t dev_handles;
        int err;
        int idx;
 
@@ -1875,7 +1889,8 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
        return 0;
 error:
        err = rte_errno; /* Save rte_errno before cleanup. */
-       LIST_FOREACH(handle, &flow->dev_handles, next) {
+       SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
+                      dev_handles, handle, next) {
                if (handle->hrxq) {
                        if (handle->act_flags & MLX5_FLOW_ACTION_DROP)
                                mlx5_hrxq_drop_release(dev);