net/mlx5: fix flow tunnel handling
authorYongseok Koh <yskoh@mellanox.com>
Wed, 24 Oct 2018 12:36:14 +0000 (15:36 +0300)
committerFerruh Yigit <ferruh.yigit@intel.com>
Fri, 26 Oct 2018 20:14:06 +0000 (22:14 +0200)
Both rte_flow and mlx5_flow redundantly have item flags. And it is not
properly set in the code. This causes wrong tunnel flag handling. A
rte_flow can have multiple expanded device flows if the flow has an RSS
action. Therefore, mlx5_flow should have the layers field.

Fixes: c4d9b9f7f382 ("net/mlx5: add Direct Verbs final functions")
Fixes: fc2c498ccb94 ("net/mlx5: add Direct Verbs translate items")
Fixes: 3d69434113d1 ("net/mlx5: add Direct Verbs validation function")
Fixes: 84c406e74524 ("net/mlx5: add flow translate function")
Fixes: 4e05a229c5da ("net/mlx5: add flow prepare function")
Fixes: 23c1d42c7138 ("net/mlx5: split flow validation to dedicated function")

Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
Acked-by: Shahaf Shuler <shahafs@mellanox.com>
drivers/net/mlx5/mlx5_flow.c
drivers/net/mlx5/mlx5_flow.h
drivers/net/mlx5/mlx5_flow_dv.c
drivers/net/mlx5/mlx5_flow_verbs.c

index 2d43b1a..b170856 100644 (file)
@@ -525,20 +525,22 @@ flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
 }
 
 /**
- * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the flow.
+ * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive
+ * flow.
  *
  * @param[in] dev
  *   Pointer to the Ethernet device structure.
- * @param[in] flow
- *   Pointer to flow structure.
+ * @param[in] dev_flow
+ *   Pointer to device flow structure.
  */
 static void
-flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
+flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
 {
        struct priv *priv = dev->data->dev_private;
+       struct rte_flow *flow = dev_flow->flow;
        const int mark = !!(flow->actions &
                            (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
-       const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+       const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
        unsigned int i;
 
        for (i = 0; i != flow->rss.queue_num; ++i) {
@@ -556,7 +558,8 @@ flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
 
                        /* Increase the counter matching the flow. */
                        for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
-                               if ((tunnels_info[j].tunnel & flow->layers) ==
+                               if ((tunnels_info[j].tunnel &
+                                    dev_flow->layers) ==
                                    tunnels_info[j].tunnel) {
                                        rxq_ctrl->flow_tunnels_n[j]++;
                                        break;
@@ -567,22 +570,40 @@ flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
        }
 }
 
+/**
+ * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow
+ *
+ * @param[in] dev
+ *   Pointer to the Ethernet device structure.
+ * @param[in] flow
+ *   Pointer to flow structure.
+ */
+static void
+flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+       struct mlx5_flow *dev_flow;
+
+       LIST_FOREACH(dev_flow, &flow->dev_flows, next)
+               flow_drv_rxq_flags_set(dev, dev_flow);
+}
+
 /**
  * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
- * @p flow if no other flow uses it with the same kind of request.
+ * device flow if no other flow uses it with the same kind of request.
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @param[in] flow
- *   Pointer to the flow.
+ * @param[in] dev_flow
+ *   Pointer to the device flow.
  */
 static void
-flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
+flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
 {
        struct priv *priv = dev->data->dev_private;
+       struct rte_flow *flow = dev_flow->flow;
        const int mark = !!(flow->actions &
                            (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
-       const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+       const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
        unsigned int i;
 
        assert(dev->data->dev_started);
@@ -601,7 +622,8 @@ flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
 
                        /* Decrease the counter matching the flow. */
                        for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
-                               if ((tunnels_info[j].tunnel & flow->layers) ==
+                               if ((tunnels_info[j].tunnel &
+                                    dev_flow->layers) ==
                                    tunnels_info[j].tunnel) {
                                        rxq_ctrl->flow_tunnels_n[j]--;
                                        break;
@@ -612,6 +634,24 @@ flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
        }
 }
 
+/**
+ * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
+ * @p flow if no other flow uses it with the same kind of request.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param[in] flow
+ *   Pointer to the flow.
+ */
+static void
+flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+       struct mlx5_flow *dev_flow;
+
+       LIST_FOREACH(dev_flow, &flow->dev_flows, next)
+               flow_drv_rxq_flags_trim(dev, dev_flow);
+}
+
 /**
  * Clear the Mark/Flag and Tunnel ptype information in all Rx queues.
  *
@@ -2018,6 +2058,11 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
                if (!dev_flow)
                        goto error;
                dev_flow->flow = flow;
+               dev_flow->layers = item_flags;
+               /* Store actions once as expanded flows have same actions. */
+               if (i == 0)
+                       flow->actions = action_flags;
+               assert(flow->actions == action_flags);
                LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
                ret = flow_drv_translate(dev, dev_flow, attr,
                                         buf->entry[i].pattern,
index cc4f20c..4b1f36f 100644 (file)
@@ -215,7 +215,8 @@ struct mlx5_flow_verbs {
 struct mlx5_flow {
        LIST_ENTRY(mlx5_flow) next;
        struct rte_flow *flow; /**< Pointer to the main flow. */
-       uint32_t layers; /**< Bit-fields that holds the detected layers. */
+       uint32_t layers;
+       /**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
        union {
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
                struct mlx5_flow_dv dv;
@@ -244,15 +245,14 @@ struct mlx5_flow_counter {
 struct rte_flow {
        TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
        enum mlx5_flow_drv_type drv_type; /**< Drvier type. */
-       uint32_t layers;
-       /**< Bit-fields of present layers see MLX5_FLOW_LAYER_*. */
        struct mlx5_flow_counter *counter; /**< Holds flow counter. */
        struct rte_flow_action_rss rss;/**< RSS context. */
        uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
        uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */
        LIST_HEAD(dev_flows, mlx5_flow) dev_flows;
        /**< Device flows that are part of the flow. */
-       uint32_t actions; /**< Bit-fields which mark all detected actions. */
+       uint32_t actions;
+       /**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
 };
 typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev,
                                    const struct rte_flow_attr *attr,
index 071f31d..53e9a17 100644 (file)
@@ -177,6 +177,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
        if (ret < 0)
                return ret;
        for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+               tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
                switch (items->type) {
                case RTE_FLOW_ITEM_TYPE_VOID:
                        break;
@@ -285,7 +286,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                        return rte_flow_error_set(error, ENOTSUP,
                                                  RTE_FLOW_ERROR_TYPE_ACTION,
                                                  actions, "too many actions");
-               tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
                switch (actions->type) {
                case RTE_FLOW_ACTION_TYPE_VOID:
                        break;
@@ -1253,13 +1253,15 @@ flow_dv_translate(struct rte_eth_dev *dev,
                },
        };
        void *match_value = dev_flow->dv.value.buf;
-       uint8_t inner = 0;
+       int tunnel = 0;
 
        if (priority == MLX5_FLOW_PRIO_RSVD)
                priority = priv->config.flow_prio - 1;
-       for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++)
+       for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+               tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
                flow_dv_create_item(&matcher, match_value, items, dev_flow,
-                                   inner);
+                                   tunnel);
+       }
        matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
                                     matcher.mask.size);
        if (priority == MLX5_FLOW_PRIO_RSVD)
@@ -1324,7 +1326,7 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
                                        (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
                                         dv->hash_fields, (*flow->queue),
                                         flow->rss.queue_num,
-                                        !!(flow->layers &
+                                        !!(dev_flow->layers &
                                            MLX5_FLOW_LAYER_TUNNEL));
                        if (!hrxq) {
                                rte_flow_error_set
index 2547fb8..a435d65 100644 (file)
@@ -466,7 +466,7 @@ flow_verbs_translate_item_ipv6(const struct rte_flow_item *item,
 {
        const struct rte_flow_item_ipv6 *spec = item->spec;
        const struct rte_flow_item_ipv6 *mask = item->mask;
-       const int tunnel = !!(dev_flow->flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+       const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
        unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
        struct ibv_flow_spec_ipv6 ipv6 = {
                .type = IBV_FLOW_SPEC_IPV6 | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
@@ -590,7 +590,7 @@ flow_verbs_translate_item_tcp(const struct rte_flow_item *item,
 {
        const struct rte_flow_item_tcp *spec = item->spec;
        const struct rte_flow_item_tcp *mask = item->mask;
-       const int tunnel = !!(dev_flow->flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+       const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
        unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
        struct ibv_flow_spec_tcp_udp tcp = {
                .type = IBV_FLOW_SPEC_TCP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
@@ -1126,6 +1126,8 @@ flow_verbs_validate(struct rte_eth_dev *dev,
                return ret;
        for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
                int ret = 0;
+
+               tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
                switch (items->type) {
                case RTE_FLOW_ITEM_TYPE_VOID:
                        break;
@@ -1237,7 +1239,6 @@ flow_verbs_validate(struct rte_eth_dev *dev,
                }
        }
        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
-               tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
                switch (actions->type) {
                case RTE_FLOW_ACTION_TYPE_VOID:
                        break;
@@ -1380,9 +1381,10 @@ flow_verbs_get_items_and_size(const struct rte_flow_item items[],
 {
        int size = 0;
        uint64_t detected_items = 0;
-       const int tunnel = !!(*item_flags & MLX5_FLOW_LAYER_TUNNEL);
 
        for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+               int tunnel = !!(detected_items & MLX5_FLOW_LAYER_TUNNEL);
+
                switch (items->type) {
                case RTE_FLOW_ITEM_TYPE_VOID:
                        break;
@@ -1578,7 +1580,8 @@ flow_verbs_translate(struct rte_eth_dev *dev,
                                                  "action not supported");
                }
        }
-       dev_flow->flow->actions |= action_flags;
+       /* Device flow should have action flags by flow_drv_prepare(). */
+       assert(dev_flow->flow->actions == action_flags);
        for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
                switch (items->type) {
                case RTE_FLOW_ITEM_TYPE_VOID:
@@ -1741,7 +1744,7 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
                                                     verbs->hash_fields,
                                                     (*flow->queue),
                                                     flow->rss.queue_num,
-                                                    !!(flow->layers &
+                                                    !!(dev_flow->layers &
                                                      MLX5_FLOW_LAYER_TUNNEL));
                        if (!hrxq) {
                                rte_flow_error_set