net/mlx5: fix index when creating flow
authorBing Zhao <bingz@mellanox.com>
Thu, 9 Apr 2020 14:38:41 +0000 (22:38 +0800)
committerFerruh Yigit <ferruh.yigit@intel.com>
Tue, 21 Apr 2020 11:57:07 +0000 (13:57 +0200)
When creating a flow, usually the creating routine is called in
serial. No parallel execution is supported right now. The same
function will be called only once for a single flow creation.

But there is a special case that the creating routine will be called
nested. If the xmeta feature is enabled and there is FLAG / MARK in
the actions list, some metadata reg copy flow needs to be created
before the original flow is applied to the hardware.
In the flow non-cached mode, resources only for flow creation will
not be saved anymore. The memory space is pre-allocated and reused
for each flow. A global index for each device is used to indicate
the memory address of the resources. If the function is called in a
nested mode, then the index will be reset and make everything get
corrupted.

To solve this, a nested index is introduced to save the position for
the original flow creation. Currently, only one level nested call
of the flow creating routine is supported.

Fixes: e7bfa3596a0a ("net/mlx5: separate the flow handle resource")

Signed-off-by: Bing Zhao <bingz@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
drivers/net/mlx5/mlx5.h
drivers/net/mlx5/mlx5_flow.c
drivers/net/mlx5/mlx5_flow_dv.c
drivers/net/mlx5/mlx5_flow_verbs.c

index 5b5fdff..45619dc 100644 (file)
@@ -543,6 +543,7 @@ struct mlx5_priv {
        struct mlx5_flows ctrl_flows; /* Control flow rules. */
        void *inter_flows; /* Intermediate resources for flow creation. */
        int flow_idx; /* Intermediate device flow index. */
+       int flow_nested_idx; /* Intermediate device flow index, nested. */
        LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
        LIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */
        LIST_HEAD(hrxq, mlx5_hrxq) hrxqs; /* Verbs Hash Rx queues. */
index 3b358b6..c44bc1f 100644 (file)
@@ -4279,8 +4279,15 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
                buf->entries = 1;
                buf->entry[0].pattern = (void *)(uintptr_t)items;
        }
-       /* Reset device flow index to 0. */
-       priv->flow_idx = 0;
+       /*
+        * Record the start index when there is a nested call. All sub-flows
+        * need to be translated before another calling.
+        * No need to use ping-pong buffer to save memory here.
+        */
+       if (priv->flow_idx) {
+               MLX5_ASSERT(!priv->flow_nested_idx);
+               priv->flow_nested_idx = priv->flow_idx;
+       }
        for (i = 0; i < buf->entries; ++i) {
                /*
                 * The splitter may create multiple dev_flows,
@@ -4340,23 +4347,27 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
        if (list)
                TAILQ_INSERT_TAIL(list, flow, next);
        flow_rxq_flags_set(dev, flow);
+       /* Nested flow creation index recovery. */
+       priv->flow_idx = priv->flow_nested_idx;
+       if (priv->flow_nested_idx)
+               priv->flow_nested_idx = 0;
        return flow;
-error_before_flow:
-       if (hairpin_id)
-               mlx5_flow_id_release(priv->sh->flow_id_pool,
-                                    hairpin_id);
-       return NULL;
 error:
        MLX5_ASSERT(flow);
-       flow_mreg_del_copy_action(dev, flow);
        ret = rte_errno; /* Save rte_errno before cleanup. */
-       if (flow->hairpin_flow_id)
-               mlx5_flow_id_release(priv->sh->flow_id_pool,
-                                    flow->hairpin_flow_id);
-       MLX5_ASSERT(flow);
+       flow_mreg_del_copy_action(dev, flow);
        flow_drv_destroy(dev, flow);
        rte_free(flow);
        rte_errno = ret; /* Restore rte_errno. */
+error_before_flow:
+       ret = rte_errno;
+       if (hairpin_id)
+               mlx5_flow_id_release(priv->sh->flow_id_pool,
+                                    hairpin_id);
+       rte_errno = ret;
+       priv->flow_idx = priv->flow_nested_idx;
+       if (priv->flow_nested_idx)
+               priv->flow_nested_idx = 0;
        return NULL;
 }
 
@@ -4606,6 +4617,9 @@ mlx5_flow_alloc_intermediate(struct rte_eth_dev *dev)
        if (!priv->inter_flows)
                priv->inter_flows = rte_calloc(__func__, MLX5_NUM_MAX_DEV_FLOWS,
                                               sizeof(struct mlx5_flow), 0);
+       /* Reset the index. */
+       priv->flow_idx = 0;
+       priv->flow_nested_idx = 0;
 }
 
 /**
index b8d03d4..18ea577 100644 (file)
@@ -8021,7 +8021,7 @@ __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
        int err;
        int idx;
 
-       for (idx = priv->flow_idx - 1; idx >= 0; idx--) {
+       for (idx = priv->flow_idx - 1; idx >= priv->flow_nested_idx; idx--) {
                dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx];
                dv = &dev_flow->dv;
                dh = dev_flow->handle;
index 1a7ae1f..ef4d7a3 100644 (file)
@@ -1810,7 +1810,7 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
        int err;
        int idx;
 
-       for (idx = priv->flow_idx - 1; idx >= 0; idx--) {
+       for (idx = priv->flow_idx - 1; idx >= priv->flow_nested_idx; idx--) {
                dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx];
                handle = dev_flow->handle;
                if (handle->act_flags & MLX5_FLOW_ACTION_DROP) {