net/mlx5: remove redundant flag in device config
[dpdk.git] / drivers / net / mlx5 / mlx5_flow.c
index 4762fa0..b4d0b7b 100644 (file)
@@ -132,6 +132,12 @@ struct mlx5_flow_expand_rss {
 static void
 mlx5_dbg__print_pattern(const struct rte_flow_item *item);
 
+static const struct mlx5_flow_expand_node *
+mlx5_flow_expand_rss_adjust_node(const struct rte_flow_item *pattern,
+               unsigned int item_idx,
+               const struct mlx5_flow_expand_node graph[],
+               const struct mlx5_flow_expand_node *node);
+
 static bool
 mlx5_flow_is_rss_expandable_item(const struct rte_flow_item *item)
 {
@@ -259,6 +265,26 @@ mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item)
        return ret;
 }
 
+static const int *
+mlx5_flow_expand_rss_skip_explicit(const struct mlx5_flow_expand_node graph[],
+               const int *next_node)
+{
+       const struct mlx5_flow_expand_node *node = NULL;
+       const int *next = next_node;
+
+       while (next && *next) {
+               /*
+                * Skip the nodes with the MLX5_EXPANSION_NODE_EXPLICIT
+                * flag set, because they were not found in the flow pattern.
+                */
+               node = &graph[*next];
+               if (!(node->node_flags & MLX5_EXPANSION_NODE_EXPLICIT))
+                       break;
+               next = node->next;
+       }
+       return next;
+}
+
 #define MLX5_RSS_EXP_ELT_N 16
 
 /**
@@ -298,7 +324,7 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
        const int *stack[MLX5_RSS_EXP_ELT_N];
        int stack_pos = 0;
        struct rte_flow_item flow_items[MLX5_RSS_EXP_ELT_N];
-       unsigned int i;
+       unsigned int i, item_idx, last_expand_item_idx = 0;
        size_t lsize;
        size_t user_pattern_size = 0;
        void *addr = NULL;
@@ -306,7 +332,7 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
        struct rte_flow_item missed_item;
        int missed = 0;
        int elt = 0;
-       const struct rte_flow_item *last_item = NULL;
+       const struct rte_flow_item *last_expand_item = NULL;
 
        memset(&missed_item, 0, sizeof(missed_item));
        lsize = offsetof(struct mlx5_flow_expand_rss, entry) +
@@ -317,12 +343,15 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
        buf->entry[0].pattern = (void *)&buf->entry[MLX5_RSS_EXP_ELT_N];
        buf->entries = 0;
        addr = buf->entry[0].pattern;
-       for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+       for (item = pattern, item_idx = 0;
+                       item->type != RTE_FLOW_ITEM_TYPE_END;
+                       item++, item_idx++) {
                if (!mlx5_flow_is_rss_expandable_item(item)) {
                        user_pattern_size += sizeof(*item);
                        continue;
                }
-               last_item = item;
+               last_expand_item = item;
+               last_expand_item_idx = item_idx;
                i = 0;
                while (node->next && node->next[i]) {
                        next = &graph[node->next[i]];
@@ -354,7 +383,7 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
         * Check if the last valid item has spec set, need complete pattern,
         * and the pattern can be used for expansion.
         */
-       missed_item.type = mlx5_flow_expand_rss_item_complete(last_item);
+       missed_item.type = mlx5_flow_expand_rss_item_complete(last_expand_item);
        if (missed_item.type == RTE_FLOW_ITEM_TYPE_END) {
                /* Item type END indicates expansion is not required. */
                return lsize;
@@ -389,19 +418,13 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
                        addr = (void *)(((uintptr_t)addr) +
                                        elt * sizeof(*item));
                }
+       } else if (last_expand_item != NULL) {
+               node = mlx5_flow_expand_rss_adjust_node(pattern,
+                               last_expand_item_idx, graph, node);
        }
        memset(flow_items, 0, sizeof(flow_items));
-       next_node = node->next;
-       while (next_node) {
-               /*
-                * Skip the nodes with the MLX5_EXPANSION_NODE_EXPLICIT
-                * flag set, because they were not found in the flow pattern.
-                */
-               node = &graph[*next_node];
-               if (!(node->node_flags & MLX5_EXPANSION_NODE_EXPLICIT))
-                       break;
-               next_node = node->next;
-       }
+       next_node = mlx5_flow_expand_rss_skip_explicit(graph,
+                       node->next);
        stack[stack_pos] = next_node;
        node = next_node ? &graph[*next_node] : NULL;
        while (node) {
@@ -438,7 +461,8 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
                /* Go deeper. */
                if (!(node->node_flags & MLX5_EXPANSION_NODE_OPTIONAL) &&
                                node->next) {
-                       next_node = node->next;
+                       next_node = mlx5_flow_expand_rss_skip_explicit(graph,
+                                       node->next);
                        if (stack_pos++ == MLX5_RSS_EXP_ELT_N) {
                                rte_errno = E2BIG;
                                return -rte_errno;
@@ -446,15 +470,27 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
                        stack[stack_pos] = next_node;
                } else if (*(next_node + 1)) {
                        /* Follow up with the next possibility. */
+                       next_node = mlx5_flow_expand_rss_skip_explicit(graph,
+                                       ++next_node);
+               } else if (!stack_pos) {
+                       /*
+                        * Completing the traverse over the different paths.
+                        * The next_node is advanced to the terminator.
+                        */
                        ++next_node;
                } else {
                        /* Move to the next path. */
-                       if (stack_pos)
+                       while (stack_pos) {
                                next_node = stack[--stack_pos];
-                       next_node++;
+                               next_node++;
+                               if (*next_node)
+                                       break;
+                       }
+                       next_node = mlx5_flow_expand_rss_skip_explicit(graph,
+                                       next_node);
                        stack[stack_pos] = next_node;
                }
-               node = *next_node ? &graph[*next_node] : NULL;
+               node = next_node && *next_node ? &graph[*next_node] : NULL;
        };
        return lsize;
 }
@@ -471,6 +507,8 @@ enum mlx5_expansion {
        MLX5_EXPANSION_OUTER_IPV6_UDP,
        MLX5_EXPANSION_OUTER_IPV6_TCP,
        MLX5_EXPANSION_VXLAN,
+       MLX5_EXPANSION_STD_VXLAN,
+       MLX5_EXPANSION_L3_VXLAN,
        MLX5_EXPANSION_VXLAN_GPE,
        MLX5_EXPANSION_GRE,
        MLX5_EXPANSION_NVGRE,
@@ -567,6 +605,15 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
                                                  MLX5_EXPANSION_IPV6),
                .type = RTE_FLOW_ITEM_TYPE_VXLAN,
        },
+       [MLX5_EXPANSION_STD_VXLAN] = {
+                       .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH),
+                                       .type = RTE_FLOW_ITEM_TYPE_VXLAN,
+       },
+       [MLX5_EXPANSION_L3_VXLAN] = {
+                       .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
+                                       MLX5_EXPANSION_IPV6),
+                                       .type = RTE_FLOW_ITEM_TYPE_VXLAN,
+       },
        [MLX5_EXPANSION_VXLAN_GPE] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
                                                  MLX5_EXPANSION_IPV4,
@@ -9099,6 +9146,9 @@ mlx5_flow_tunnel_validate(struct rte_eth_dev *dev,
                err_msg = "unsupported tunnel type";
                goto out;
        case RTE_FLOW_ITEM_TYPE_VXLAN:
+       case RTE_FLOW_ITEM_TYPE_GRE:
+       case RTE_FLOW_ITEM_TYPE_NVGRE:
+       case RTE_FLOW_ITEM_TYPE_GENEVE:
                break;
        }
 
@@ -9392,3 +9442,41 @@ mlx5_dbg__print_pattern(const struct rte_flow_item *item)
        }
        printf("END\n");
 }
+
+static int
+mlx5_flow_is_std_vxlan_port(const struct rte_flow_item *udp_item)
+{
+       const struct rte_flow_item_udp *spec = udp_item->spec;
+       const struct rte_flow_item_udp *mask = udp_item->mask;
+       uint16_t udp_dport = 0;
+
+       if (spec != NULL) {
+               if (!mask)
+                       mask = &rte_flow_item_udp_mask;
+               udp_dport = rte_be_to_cpu_16(spec->hdr.dst_port &
+                               mask->hdr.dst_port);
+       }
+       return (!udp_dport || udp_dport == MLX5_UDP_PORT_VXLAN);
+}
+
+static const struct mlx5_flow_expand_node *
+mlx5_flow_expand_rss_adjust_node(const struct rte_flow_item *pattern,
+               unsigned int item_idx,
+               const struct mlx5_flow_expand_node graph[],
+               const struct mlx5_flow_expand_node *node)
+{
+       const struct rte_flow_item *item = pattern + item_idx, *prev_item;
+       switch (item->type) {
+       case RTE_FLOW_ITEM_TYPE_VXLAN:
+               MLX5_ASSERT(item_idx > 0);
+               prev_item = pattern + item_idx - 1;
+               MLX5_ASSERT(prev_item->type == RTE_FLOW_ITEM_TYPE_UDP);
+               if (mlx5_flow_is_std_vxlan_port(prev_item))
+                       return &graph[MLX5_EXPANSION_STD_VXLAN];
+               else
+                       return &graph[MLX5_EXPANSION_L3_VXLAN];
+               break;
+       default:
+               return node;
+       }
+}