net/mlx5: fix mismatch metadata flow with meter action
[dpdk.git] / drivers / net / mlx5 / mlx5_flow.c
index c5d4a95..a30ce69 100644 (file)
@@ -98,12 +98,27 @@ struct mlx5_flow_expand_node {
        uint64_t rss_types;
        /**<
         * RSS types bit-field associated with this node
-        * (see ETH_RSS_* definitions).
+        * (see RTE_ETH_RSS_* definitions).
+        */
+       uint64_t node_flags;
+       /**<
+        *  Bit-fields that define how the node is used in the expansion.
+        * (see MLX5_EXPANSION_NODE_* definitions).
         */
-       uint8_t optional;
-       /**< optional expand field. Default 0 to expand, 1 not go deeper. */
 };
 
+/* Optional expand field. The expansion alg will not go deeper. */
+#define MLX5_EXPANSION_NODE_OPTIONAL (UINT64_C(1) << 0)
+
+/* The node is not added implicitly as expansion to the flow pattern.
+ * If the node type does not match the flow pattern item type, the
+ * expansion alg will go deeper to its next items.
+ * In the current implementation, the list of next nodes indexes can
+ * have up to one node with this flag set and it has to be the last
+ * node index (before the list terminator).
+ */
+#define MLX5_EXPANSION_NODE_EXPLICIT (UINT64_C(1) << 1)
+
 /** Object returned by mlx5_flow_expand_rss(). */
 struct mlx5_flow_expand_rss {
        uint32_t entries;
@@ -117,6 +132,12 @@ struct mlx5_flow_expand_rss {
 static void
 mlx5_dbg__print_pattern(const struct rte_flow_item *item);
 
+static const struct mlx5_flow_expand_node *
+mlx5_flow_expand_rss_adjust_node(const struct rte_flow_item *pattern,
+               unsigned int item_idx,
+               const struct mlx5_flow_expand_node graph[],
+               const struct mlx5_flow_expand_node *node);
+
 static bool
 mlx5_flow_is_rss_expandable_item(const struct rte_flow_item *item)
 {
@@ -131,6 +152,11 @@ mlx5_flow_is_rss_expandable_item(const struct rte_flow_item *item)
        case RTE_FLOW_ITEM_TYPE_NVGRE:
        case RTE_FLOW_ITEM_TYPE_GRE:
        case RTE_FLOW_ITEM_TYPE_GENEVE:
+       case RTE_FLOW_ITEM_TYPE_MPLS:
+       case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+       case RTE_FLOW_ITEM_TYPE_GRE_KEY:
+       case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
+       case RTE_FLOW_ITEM_TYPE_GTP:
                return true;
        default:
                break;
@@ -232,6 +258,29 @@ mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item)
                else
                        ret = RTE_FLOW_ITEM_TYPE_END;
                break;
+       case RTE_FLOW_ITEM_TYPE_GENEVE:
+               ether_type_m = item->mask ?
+                              ((const struct rte_flow_item_geneve *)
+                              (item->mask))->protocol :
+                              rte_flow_item_geneve_mask.protocol;
+               ether_type = ((const struct rte_flow_item_geneve *)
+                            (item->spec))->protocol;
+               ether_type_m = rte_be_to_cpu_16(ether_type_m);
+               ether_type = rte_be_to_cpu_16(ether_type);
+               switch (ether_type_m & ether_type) {
+               case RTE_ETHER_TYPE_TEB:
+                       ret = RTE_FLOW_ITEM_TYPE_ETH;
+                       break;
+               case RTE_ETHER_TYPE_IPV4:
+                       ret = RTE_FLOW_ITEM_TYPE_IPV4;
+                       break;
+               case RTE_ETHER_TYPE_IPV6:
+                       ret = RTE_FLOW_ITEM_TYPE_IPV6;
+                       break;
+               default:
+                       ret = RTE_FLOW_ITEM_TYPE_END;
+               }
+               break;
        default:
                ret = RTE_FLOW_ITEM_TYPE_VOID;
                break;
@@ -239,6 +288,26 @@ mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item)
        return ret;
 }
 
+static const int *
+mlx5_flow_expand_rss_skip_explicit(const struct mlx5_flow_expand_node graph[],
+               const int *next_node)
+{
+       const struct mlx5_flow_expand_node *node = NULL;
+       const int *next = next_node;
+
+       while (next && *next) {
+               /*
+                * Skip the nodes with the MLX5_EXPANSION_NODE_EXPLICIT
+                * flag set, because they were not found in the flow pattern.
+                */
+               node = &graph[*next];
+               if (!(node->node_flags & MLX5_EXPANSION_NODE_EXPLICIT))
+                       break;
+               next = node->next;
+       }
+       return next;
+}
+
 #define MLX5_RSS_EXP_ELT_N 16
 
 /**
@@ -252,7 +321,7 @@ mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item)
  * @param[in] pattern
  *   User flow pattern.
  * @param[in] types
- *   RSS types to expand (see ETH_RSS_* definitions).
+ *   RSS types to expand (see RTE_ETH_RSS_* definitions).
  * @param[in] graph
  *   Input graph to expand @p pattern according to @p types.
  * @param[in] graph_root_index
@@ -264,6 +333,7 @@ mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item)
  *   set, the following errors are defined:
  *
  *   -E2BIG: graph-depth @p graph is too deep.
+ *   -EINVAL: @p size has not enough space for expanded pattern.
  */
 static int
 mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
@@ -277,7 +347,7 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
        const int *stack[MLX5_RSS_EXP_ELT_N];
        int stack_pos = 0;
        struct rte_flow_item flow_items[MLX5_RSS_EXP_ELT_N];
-       unsigned int i;
+       unsigned int i, item_idx, last_expand_item_idx = 0;
        size_t lsize;
        size_t user_pattern_size = 0;
        void *addr = NULL;
@@ -285,27 +355,37 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
        struct rte_flow_item missed_item;
        int missed = 0;
        int elt = 0;
-       const struct rte_flow_item *last_item = NULL;
+       const struct rte_flow_item *last_expand_item = NULL;
 
        memset(&missed_item, 0, sizeof(missed_item));
        lsize = offsetof(struct mlx5_flow_expand_rss, entry) +
                MLX5_RSS_EXP_ELT_N * sizeof(buf->entry[0]);
-       if (lsize <= size) {
-               buf->entry[0].priority = 0;
-               buf->entry[0].pattern = (void *)&buf->entry[MLX5_RSS_EXP_ELT_N];
-               buf->entries = 0;
-               addr = buf->entry[0].pattern;
-       }
-       for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+       if (lsize > size)
+               return -EINVAL;
+       buf->entry[0].priority = 0;
+       buf->entry[0].pattern = (void *)&buf->entry[MLX5_RSS_EXP_ELT_N];
+       buf->entries = 0;
+       addr = buf->entry[0].pattern;
+       for (item = pattern, item_idx = 0;
+                       item->type != RTE_FLOW_ITEM_TYPE_END;
+                       item++, item_idx++) {
                if (!mlx5_flow_is_rss_expandable_item(item)) {
                        user_pattern_size += sizeof(*item);
                        continue;
                }
-               last_item = item;
-               for (i = 0; node->next && node->next[i]; ++i) {
+               last_expand_item = item;
+               last_expand_item_idx = item_idx;
+               i = 0;
+               while (node->next && node->next[i]) {
                        next = &graph[node->next[i]];
                        if (next->type == item->type)
                                break;
+                       if (next->node_flags & MLX5_EXPANSION_NODE_EXPLICIT) {
+                               node = next;
+                               i = 0;
+                       } else {
+                               ++i;
+                       }
                }
                if (next)
                        node = next;
@@ -313,12 +393,12 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
        }
        user_pattern_size += sizeof(*item); /* Handle END item. */
        lsize += user_pattern_size;
+       if (lsize > size)
+               return -EINVAL;
        /* Copy the user pattern in the first entry of the buffer. */
-       if (lsize <= size) {
-               rte_memcpy(addr, pattern, user_pattern_size);
-               addr = (void *)(((uintptr_t)addr) + user_pattern_size);
-               buf->entries = 1;
-       }
+       rte_memcpy(addr, pattern, user_pattern_size);
+       addr = (void *)(((uintptr_t)addr) + user_pattern_size);
+       buf->entries = 1;
        /* Start expanding. */
        memset(flow_items, 0, sizeof(flow_items));
        user_pattern_size -= sizeof(*item);
@@ -326,7 +406,7 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
         * Check if the last valid item has spec set, need complete pattern,
         * and the pattern can be used for expansion.
         */
-       missed_item.type = mlx5_flow_expand_rss_item_complete(last_item);
+       missed_item.type = mlx5_flow_expand_rss_item_complete(last_expand_item);
        if (missed_item.type == RTE_FLOW_ITEM_TYPE_END) {
                /* Item type END indicates expansion is not required. */
                return lsize;
@@ -334,13 +414,20 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
        if (missed_item.type != RTE_FLOW_ITEM_TYPE_VOID) {
                next = NULL;
                missed = 1;
-               for (i = 0; node->next && node->next[i]; ++i) {
+               i = 0;
+               while (node->next && node->next[i]) {
                        next = &graph[node->next[i]];
                        if (next->type == missed_item.type) {
                                flow_items[0].type = missed_item.type;
                                flow_items[1].type = RTE_FLOW_ITEM_TYPE_END;
                                break;
                        }
+                       if (next->node_flags & MLX5_EXPANSION_NODE_EXPLICIT) {
+                               node = next;
+                               i = 0;
+                       } else {
+                               ++i;
+                       }
                        next = NULL;
                }
        }
@@ -348,7 +435,9 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
                elt = 2; /* missed item + item end. */
                node = next;
                lsize += elt * sizeof(*item) + user_pattern_size;
-               if ((node->rss_types & types) && lsize <= size) {
+               if (lsize > size)
+                       return -EINVAL;
+               if (node->rss_types & types) {
                        buf->entry[buf->entries].priority = 1;
                        buf->entry[buf->entries].pattern = addr;
                        buf->entries++;
@@ -359,14 +448,19 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
                        addr = (void *)(((uintptr_t)addr) +
                                        elt * sizeof(*item));
                }
+       } else if (last_expand_item != NULL) {
+               node = mlx5_flow_expand_rss_adjust_node(pattern,
+                               last_expand_item_idx, graph, node);
        }
        memset(flow_items, 0, sizeof(flow_items));
-       next_node = node->next;
+       next_node = mlx5_flow_expand_rss_skip_explicit(graph,
+                       node->next);
        stack[stack_pos] = next_node;
        node = next_node ? &graph[*next_node] : NULL;
        while (node) {
                flow_items[stack_pos].type = node->type;
                if (node->rss_types & types) {
+                       size_t n;
                        /*
                         * compute the number of items to copy from the
                         * expansion and copy it.
@@ -376,28 +470,29 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
                        elt = stack_pos + 2;
                        flow_items[stack_pos + 1].type = RTE_FLOW_ITEM_TYPE_END;
                        lsize += elt * sizeof(*item) + user_pattern_size;
-                       if (lsize <= size) {
-                               size_t n = elt * sizeof(*item);
-
-                               buf->entry[buf->entries].priority =
-                                       stack_pos + 1 + missed;
-                               buf->entry[buf->entries].pattern = addr;
-                               buf->entries++;
-                               rte_memcpy(addr, buf->entry[0].pattern,
-                                          user_pattern_size);
-                               addr = (void *)(((uintptr_t)addr) +
-                                               user_pattern_size);
-                               rte_memcpy(addr, &missed_item,
-                                          missed * sizeof(*item));
-                               addr = (void *)(((uintptr_t)addr) +
-                                       missed * sizeof(*item));
-                               rte_memcpy(addr, flow_items, n);
-                               addr = (void *)(((uintptr_t)addr) + n);
-                       }
+                       if (lsize > size)
+                               return -EINVAL;
+                       n = elt * sizeof(*item);
+                       buf->entry[buf->entries].priority =
+                               stack_pos + 1 + missed;
+                       buf->entry[buf->entries].pattern = addr;
+                       buf->entries++;
+                       rte_memcpy(addr, buf->entry[0].pattern,
+                                  user_pattern_size);
+                       addr = (void *)(((uintptr_t)addr) +
+                                       user_pattern_size);
+                       rte_memcpy(addr, &missed_item,
+                                  missed * sizeof(*item));
+                       addr = (void *)(((uintptr_t)addr) +
+                               missed * sizeof(*item));
+                       rte_memcpy(addr, flow_items, n);
+                       addr = (void *)(((uintptr_t)addr) + n);
                }
                /* Go deeper. */
-               if (!node->optional && node->next) {
-                       next_node = node->next;
+               if (!(node->node_flags & MLX5_EXPANSION_NODE_OPTIONAL) &&
+                               node->next) {
+                       next_node = mlx5_flow_expand_rss_skip_explicit(graph,
+                                       node->next);
                        if (stack_pos++ == MLX5_RSS_EXP_ELT_N) {
                                rte_errno = E2BIG;
                                return -rte_errno;
@@ -405,15 +500,27 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
                        stack[stack_pos] = next_node;
                } else if (*(next_node + 1)) {
                        /* Follow up with the next possibility. */
+                       next_node = mlx5_flow_expand_rss_skip_explicit(graph,
+                                       ++next_node);
+               } else if (!stack_pos) {
+                       /*
+                        * Completing the traverse over the different paths.
+                        * The next_node is advanced to the terminator.
+                        */
                        ++next_node;
                } else {
                        /* Move to the next path. */
-                       if (stack_pos)
+                       while (stack_pos) {
                                next_node = stack[--stack_pos];
-                       next_node++;
+                               next_node++;
+                               if (*next_node)
+                                       break;
+                       }
+                       next_node = mlx5_flow_expand_rss_skip_explicit(graph,
+                                       next_node);
                        stack[stack_pos] = next_node;
                }
-               node = *next_node ? &graph[*next_node] : NULL;
+               node = next_node && *next_node ? &graph[*next_node] : NULL;
        };
        return lsize;
 }
@@ -421,10 +528,7 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
 enum mlx5_expansion {
        MLX5_EXPANSION_ROOT,
        MLX5_EXPANSION_ROOT_OUTER,
-       MLX5_EXPANSION_ROOT_ETH_VLAN,
-       MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN,
        MLX5_EXPANSION_OUTER_ETH,
-       MLX5_EXPANSION_OUTER_ETH_VLAN,
        MLX5_EXPANSION_OUTER_VLAN,
        MLX5_EXPANSION_OUTER_IPV4,
        MLX5_EXPANSION_OUTER_IPV4_UDP,
@@ -433,13 +537,14 @@ enum mlx5_expansion {
        MLX5_EXPANSION_OUTER_IPV6_UDP,
        MLX5_EXPANSION_OUTER_IPV6_TCP,
        MLX5_EXPANSION_VXLAN,
+       MLX5_EXPANSION_STD_VXLAN,
+       MLX5_EXPANSION_L3_VXLAN,
        MLX5_EXPANSION_VXLAN_GPE,
        MLX5_EXPANSION_GRE,
        MLX5_EXPANSION_NVGRE,
        MLX5_EXPANSION_GRE_KEY,
        MLX5_EXPANSION_MPLS,
        MLX5_EXPANSION_ETH,
-       MLX5_EXPANSION_ETH_VLAN,
        MLX5_EXPANSION_VLAN,
        MLX5_EXPANSION_IPV4,
        MLX5_EXPANSION_IPV4_UDP,
@@ -447,6 +552,9 @@ enum mlx5_expansion {
        MLX5_EXPANSION_IPV6,
        MLX5_EXPANSION_IPV6_UDP,
        MLX5_EXPANSION_IPV6_TCP,
+       MLX5_EXPANSION_IPV6_FRAG_EXT,
+       MLX5_EXPANSION_GTP,
+       MLX5_EXPANSION_GENEVE,
 };
 
 /** Supported expansion of items. */
@@ -463,23 +571,7 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
                                                  MLX5_EXPANSION_OUTER_IPV6),
                .type = RTE_FLOW_ITEM_TYPE_END,
        },
-       [MLX5_EXPANSION_ROOT_ETH_VLAN] = {
-               .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN),
-               .type = RTE_FLOW_ITEM_TYPE_END,
-       },
-       [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = {
-               .next = MLX5_FLOW_EXPAND_RSS_NEXT
-                                               (MLX5_EXPANSION_OUTER_ETH_VLAN),
-               .type = RTE_FLOW_ITEM_TYPE_END,
-       },
        [MLX5_EXPANSION_OUTER_ETH] = {
-               .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
-                                                 MLX5_EXPANSION_OUTER_IPV6,
-                                                 MLX5_EXPANSION_MPLS),
-               .type = RTE_FLOW_ITEM_TYPE_ETH,
-               .rss_types = 0,
-       },
-       [MLX5_EXPANSION_OUTER_ETH_VLAN] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN),
                .type = RTE_FLOW_ITEM_TYPE_ETH,
                .rss_types = 0,
@@ -488,6 +580,7 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
                                                  MLX5_EXPANSION_OUTER_IPV6),
                .type = RTE_FLOW_ITEM_TYPE_VLAN,
+               .node_flags = MLX5_EXPANSION_NODE_EXPLICIT,
        },
        [MLX5_EXPANSION_OUTER_IPV4] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT
@@ -498,18 +591,21 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
                         MLX5_EXPANSION_IPV4,
                         MLX5_EXPANSION_IPV6),
                .type = RTE_FLOW_ITEM_TYPE_IPV4,
-               .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-                       ETH_RSS_NONFRAG_IPV4_OTHER,
+               .rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+                       RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
        },
        [MLX5_EXPANSION_OUTER_IPV4_UDP] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
-                                                 MLX5_EXPANSION_VXLAN_GPE),
+                                                 MLX5_EXPANSION_VXLAN_GPE,
+                                                 MLX5_EXPANSION_MPLS,
+                                                 MLX5_EXPANSION_GENEVE,
+                                                 MLX5_EXPANSION_GTP),
                .type = RTE_FLOW_ITEM_TYPE_UDP,
-               .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
+               .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
        },
        [MLX5_EXPANSION_OUTER_IPV4_TCP] = {
                .type = RTE_FLOW_ITEM_TYPE_TCP,
-               .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
+               .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
        },
        [MLX5_EXPANSION_OUTER_IPV6] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT
@@ -520,18 +616,21 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
                         MLX5_EXPANSION_GRE,
                         MLX5_EXPANSION_NVGRE),
                .type = RTE_FLOW_ITEM_TYPE_IPV6,
-               .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
-                       ETH_RSS_NONFRAG_IPV6_OTHER,
+               .rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+                       RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
        },
        [MLX5_EXPANSION_OUTER_IPV6_UDP] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
-                                                 MLX5_EXPANSION_VXLAN_GPE),
+                                                 MLX5_EXPANSION_VXLAN_GPE,
+                                                 MLX5_EXPANSION_MPLS,
+                                                 MLX5_EXPANSION_GENEVE,
+                                                 MLX5_EXPANSION_GTP),
                .type = RTE_FLOW_ITEM_TYPE_UDP,
-               .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
+               .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
        },
        [MLX5_EXPANSION_OUTER_IPV6_TCP] = {
                .type = RTE_FLOW_ITEM_TYPE_TCP,
-               .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
+               .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
        },
        [MLX5_EXPANSION_VXLAN] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
@@ -539,6 +638,15 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
                                                  MLX5_EXPANSION_IPV6),
                .type = RTE_FLOW_ITEM_TYPE_VXLAN,
        },
+       [MLX5_EXPANSION_STD_VXLAN] = {
+                       .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH),
+                                       .type = RTE_FLOW_ITEM_TYPE_VXLAN,
+       },
+       [MLX5_EXPANSION_L3_VXLAN] = {
+                       .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
+                                       MLX5_EXPANSION_IPV6),
+                                       .type = RTE_FLOW_ITEM_TYPE_VXLAN,
+       },
        [MLX5_EXPANSION_VXLAN_GPE] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
                                                  MLX5_EXPANSION_IPV4,
@@ -546,16 +654,19 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
                .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
        },
        [MLX5_EXPANSION_GRE] = {
-               .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
+               .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
+                                                 MLX5_EXPANSION_IPV4,
                                                  MLX5_EXPANSION_IPV6,
-                                                 MLX5_EXPANSION_GRE_KEY),
+                                                 MLX5_EXPANSION_GRE_KEY,
+                                                 MLX5_EXPANSION_MPLS),
                .type = RTE_FLOW_ITEM_TYPE_GRE,
        },
        [MLX5_EXPANSION_GRE_KEY] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
-                                                 MLX5_EXPANSION_IPV6),
+                                                 MLX5_EXPANSION_IPV6,
+                                                 MLX5_EXPANSION_MPLS),
                .type = RTE_FLOW_ITEM_TYPE_GRE_KEY,
-               .optional = 1,
+               .node_flags = MLX5_EXPANSION_NODE_OPTIONAL,
        },
        [MLX5_EXPANSION_NVGRE] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH),
@@ -563,15 +674,12 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
        },
        [MLX5_EXPANSION_MPLS] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
-                                                 MLX5_EXPANSION_IPV6),
+                                                 MLX5_EXPANSION_IPV6,
+                                                 MLX5_EXPANSION_ETH),
                .type = RTE_FLOW_ITEM_TYPE_MPLS,
+               .node_flags = MLX5_EXPANSION_NODE_OPTIONAL,
        },
        [MLX5_EXPANSION_ETH] = {
-               .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
-                                                 MLX5_EXPANSION_IPV6),
-               .type = RTE_FLOW_ITEM_TYPE_ETH,
-       },
-       [MLX5_EXPANSION_ETH_VLAN] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN),
                .type = RTE_FLOW_ITEM_TYPE_ETH,
        },
@@ -579,36 +687,52 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
                                                  MLX5_EXPANSION_IPV6),
                .type = RTE_FLOW_ITEM_TYPE_VLAN,
+               .node_flags = MLX5_EXPANSION_NODE_EXPLICIT,
        },
        [MLX5_EXPANSION_IPV4] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
                                                  MLX5_EXPANSION_IPV4_TCP),
                .type = RTE_FLOW_ITEM_TYPE_IPV4,
-               .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-                       ETH_RSS_NONFRAG_IPV4_OTHER,
+               .rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+                       RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
        },
        [MLX5_EXPANSION_IPV4_UDP] = {
                .type = RTE_FLOW_ITEM_TYPE_UDP,
-               .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
+               .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
        },
        [MLX5_EXPANSION_IPV4_TCP] = {
                .type = RTE_FLOW_ITEM_TYPE_TCP,
-               .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
+               .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
        },
        [MLX5_EXPANSION_IPV6] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
-                                                 MLX5_EXPANSION_IPV6_TCP),
+                                                 MLX5_EXPANSION_IPV6_TCP,
+                                                 MLX5_EXPANSION_IPV6_FRAG_EXT),
                .type = RTE_FLOW_ITEM_TYPE_IPV6,
-               .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
-                       ETH_RSS_NONFRAG_IPV6_OTHER,
+               .rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+                       RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
        },
        [MLX5_EXPANSION_IPV6_UDP] = {
                .type = RTE_FLOW_ITEM_TYPE_UDP,
-               .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
+               .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
        },
        [MLX5_EXPANSION_IPV6_TCP] = {
                .type = RTE_FLOW_ITEM_TYPE_TCP,
-               .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
+               .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
+       },
+       [MLX5_EXPANSION_IPV6_FRAG_EXT] = {
+               .type = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT,
+       },
+       [MLX5_EXPANSION_GTP] = {
+               .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
+                                                 MLX5_EXPANSION_IPV6),
+               .type = RTE_FLOW_ITEM_TYPE_GTP,
+       },
+       [MLX5_EXPANSION_GENEVE] = {
+               .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
+                                                 MLX5_EXPANSION_IPV4,
+                                                 MLX5_EXPANSION_IPV6),
+               .type = RTE_FLOW_ITEM_TYPE_GENEVE,
        },
 };
 
@@ -657,6 +781,14 @@ mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
                                  struct rte_mbuf *m,
                                  struct rte_flow_restore_info *info,
                                  struct rte_flow_error *err);
+static struct rte_flow_item_flex_handle *
+mlx5_flow_flex_item_create(struct rte_eth_dev *dev,
+                          const struct rte_flow_item_flex_conf *conf,
+                          struct rte_flow_error *error);
+static int
+mlx5_flow_flex_item_release(struct rte_eth_dev *dev,
+                           const struct rte_flow_item_flex_handle *handle,
+                           struct rte_flow_error *error);
 
 static const struct rte_flow_ops mlx5_flow_ops = {
        .validate = mlx5_flow_validate,
@@ -676,6 +808,8 @@ static const struct rte_flow_ops mlx5_flow_ops = {
        .tunnel_action_decap_release = mlx5_flow_tunnel_action_release,
        .tunnel_item_release = mlx5_flow_tunnel_item_release,
        .get_restore_info = mlx5_flow_tunnel_get_restore_info,
+       .flex_item_create = mlx5_flow_flex_item_create,
+       .flex_item_release = mlx5_flow_flex_item_release,
 };
 
 /* Tunnel information. */
@@ -806,6 +940,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
        case MLX5_MTR_COLOR:
        case MLX5_ASO_FLOW_HIT:
        case MLX5_ASO_CONNTRACK:
+       case MLX5_SAMPLE_ID:
                /* All features use the same REG_C. */
                MLX5_ASSERT(priv->mtr_color_reg != REG_NON);
                return priv->mtr_color_reg;
@@ -830,7 +965,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
                        return rte_flow_error_set(error, EINVAL,
                                                  RTE_FLOW_ERROR_TYPE_ITEM,
                                                  NULL, "invalid tag id");
-               if (config->flow_mreg_c[id + start_reg - REG_C_0] == REG_NON)
+               if (priv->sh->flow_mreg_c[id + start_reg - REG_C_0] == REG_NON)
                        return rte_flow_error_set(error, ENOTSUP,
                                                  RTE_FLOW_ERROR_TYPE_ITEM,
                                                  NULL, "unsupported tag id");
@@ -840,21 +975,21 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
                 * If the available index REG_C_y >= REG_C_x, skip the
                 * color register.
                 */
-               if (skip_mtr_reg && config->flow_mreg_c
+               if (skip_mtr_reg && priv->sh->flow_mreg_c
                    [id + start_reg - REG_C_0] >= priv->mtr_color_reg) {
                        if (id >= (uint32_t)(REG_C_7 - start_reg))
                                return rte_flow_error_set(error, EINVAL,
                                                       RTE_FLOW_ERROR_TYPE_ITEM,
                                                        NULL, "invalid tag id");
-                       if (config->flow_mreg_c
+                       if (priv->sh->flow_mreg_c
                            [id + 1 + start_reg - REG_C_0] != REG_NON)
-                               return config->flow_mreg_c
+                               return priv->sh->flow_mreg_c
                                               [id + 1 + start_reg - REG_C_0];
                        return rte_flow_error_set(error, ENOTSUP,
                                                  RTE_FLOW_ERROR_TYPE_ITEM,
                                                  NULL, "unsupported tag id");
                }
-               return config->flow_mreg_c[id + start_reg - REG_C_0];
+               return priv->sh->flow_mreg_c[id + start_reg - REG_C_0];
        }
        MLX5_ASSERT(false);
        return rte_flow_error_set(error, EINVAL,
@@ -875,7 +1010,6 @@ bool
 mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_dev_config *config = &priv->config;
 
        /*
         * Having available reg_c can be regarded inclusively as supporting
@@ -885,7 +1019,7 @@ mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev)
         * - reg_c's are preserved across different domain (FDB and NIC) on
         *   packet loopback by flow lookup miss.
         */
-       return config->flow_mreg_c[2] != REG_NON;
+       return priv->sh->flow_mreg_c[2] != REG_NON;
 }
 
 /**
@@ -906,7 +1040,7 @@ mlx5_get_lowest_priority(struct rte_eth_dev *dev,
        struct mlx5_priv *priv = dev->data->dev_private;
 
        if (!attr->group && !attr->transfer)
-               return priv->config.flow_prio - 2;
+               return priv->sh->flow_max_priority - 2;
        return MLX5_NON_ROOT_FLOW_MAX_PRIO - 1;
 }
 
@@ -919,21 +1053,26 @@ mlx5_get_lowest_priority(struct rte_eth_dev *dev,
  *   Pointer to device flow rule attributes.
  * @param[in] subpriority
  *   The priority based on the items.
+ * @param[in] external
+ *   Flow is user flow.
  * @return
  *   The matcher priority of the flow.
  */
 uint16_t
 mlx5_get_matcher_priority(struct rte_eth_dev *dev,
                          const struct rte_flow_attr *attr,
-                         uint32_t subpriority)
+                         uint32_t subpriority, bool external)
 {
        uint16_t priority = (uint16_t)attr->priority;
        struct mlx5_priv *priv = dev->data->dev_private;
 
        if (!attr->group && !attr->transfer) {
                if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
-                       priority = priv->config.flow_prio - 1;
+                       priority = priv->sh->flow_max_priority - 1;
                return mlx5_os_flow_adjust_priority(dev, priority, subpriority);
+       } else if (!external && attr->transfer && attr->group == 0 &&
+                  attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR) {
+               return (priv->sh->flow_max_priority - 1) * 3;
        }
        if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
                priority = MLX5_NON_ROOT_FLOW_MAX_PRIO;
@@ -1011,7 +1150,7 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item,
  * @param[in] tunnel
  *   1 when the hash field is for a tunnel item.
  * @param[in] layer_types
- *   ETH_RSS_* types.
+ *   RTE_ETH_RSS_* types.
  * @param[in] hash_fields
  *   Item hash fields.
  *
@@ -1105,10 +1244,11 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
                return;
        for (i = 0; i != ind_tbl->queues_n; ++i) {
                int idx = ind_tbl->queues[i];
-               struct mlx5_rxq_ctrl *rxq_ctrl =
-                       container_of((*priv->rxqs)[idx],
-                                    struct mlx5_rxq_ctrl, rxq);
+               struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx);
 
+               MLX5_ASSERT(rxq_ctrl != NULL);
+               if (rxq_ctrl == NULL)
+                       continue;
                /*
                 * To support metadata register copy on Tx loopback,
                 * this must be always enabled (metadata may arive
@@ -1200,10 +1340,11 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
        MLX5_ASSERT(dev->data->dev_started);
        for (i = 0; i != ind_tbl->queues_n; ++i) {
                int idx = ind_tbl->queues[i];
-               struct mlx5_rxq_ctrl *rxq_ctrl =
-                       container_of((*priv->rxqs)[idx],
-                                    struct mlx5_rxq_ctrl, rxq);
+               struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx);
 
+               MLX5_ASSERT(rxq_ctrl != NULL);
+               if (rxq_ctrl == NULL)
+                       continue;
                if (priv->config.dv_flow_en &&
                    priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
                    mlx5_flow_ext_mreg_supported(dev)) {
@@ -1264,18 +1405,16 @@ flow_rxq_flags_clear(struct rte_eth_dev *dev)
        unsigned int i;
 
        for (i = 0; i != priv->rxqs_n; ++i) {
-               struct mlx5_rxq_ctrl *rxq_ctrl;
+               struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);
                unsigned int j;
 
-               if (!(*priv->rxqs)[i])
+               if (rxq == NULL || rxq->ctrl == NULL)
                        continue;
-               rxq_ctrl = container_of((*priv->rxqs)[i],
-                                       struct mlx5_rxq_ctrl, rxq);
-               rxq_ctrl->flow_mark_n = 0;
-               rxq_ctrl->rxq.mark = 0;
+               rxq->ctrl->flow_mark_n = 0;
+               rxq->ctrl->rxq.mark = 0;
                for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
-                       rxq_ctrl->flow_tunnels_n[j] = 0;
-               rxq_ctrl->rxq.tunnel = 0;
+                       rxq->ctrl->flow_tunnels_n[j] = 0;
+               rxq->ctrl->rxq.tunnel = 0;
        }
 }
 
@@ -1289,13 +1428,15 @@ void
 mlx5_flow_rxq_dynf_metadata_set(struct rte_eth_dev *dev)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_rxq_data *data;
        unsigned int i;
 
        for (i = 0; i != priv->rxqs_n; ++i) {
-               if (!(*priv->rxqs)[i])
+               struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);
+               struct mlx5_rxq_data *data;
+
+               if (rxq == NULL || rxq->ctrl == NULL)
                        continue;
-               data = (*priv->rxqs)[i];
+               data = &rxq->ctrl->rxq;
                if (!rte_flow_dynf_metadata_avail()) {
                        data->dynf_meta = 0;
                        data->flow_meta_mask = 0;
@@ -1305,9 +1446,7 @@ mlx5_flow_rxq_dynf_metadata_set(struct rte_eth_dev *dev)
                        data->dynf_meta = 1;
                        data->flow_meta_mask = rte_flow_dynf_metadata_mask;
                        data->flow_meta_offset = rte_flow_dynf_metadata_offs;
-                       data->flow_meta_port_mask = (uint32_t)~0;
-                       if (priv->config.dv_xmeta_en == MLX5_XMETA_MODE_META16)
-                               data->flow_meta_port_mask >>= 16;
+                       data->flow_meta_port_mask = priv->sh->dv_meta_mask;
                }
        }
 }
@@ -1488,7 +1627,7 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
                                          &queue->index,
                                          "queue index out of range");
-       if (!(*priv->rxqs)[queue->index])
+       if (mlx5_rxq_get(dev, queue->index) == NULL)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
                                          &queue->index,
@@ -1501,6 +1640,57 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
        return 0;
 }
 
+/**
+ * Validate queue numbers for device RSS.
+ *
+ * @param[in] dev
+ *   Configured device.
+ * @param[in] queues
+ *   Array of queue numbers.
+ * @param[in] queues_n
+ *   Size of the @p queues array.
+ * @param[out] error
+ *   On error, filled with a textual error description.
+ * @param[out] queue
+ *   On error, filled with an offending queue index in @p queues array.
+ *
+ * @return
+ *   0 on success, a negative errno code on error.
+ */
+static int
+mlx5_validate_rss_queues(struct rte_eth_dev *dev,
+                        const uint16_t *queues, uint32_t queues_n,
+                        const char **error, uint32_t *queue_idx)
+{
+       const struct mlx5_priv *priv = dev->data->dev_private;
+       enum mlx5_rxq_type rxq_type = MLX5_RXQ_TYPE_UNDEFINED;
+       uint32_t i;
+
+       for (i = 0; i != queues_n; ++i) {
+               struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev,
+                                                                  queues[i]);
+
+               if (queues[i] >= priv->rxqs_n) {
+                       *error = "queue index out of range";
+                       *queue_idx = i;
+                       return -EINVAL;
+               }
+               if (rxq_ctrl == NULL) {
+                       *error =  "queue is not configured";
+                       *queue_idx = i;
+                       return -EINVAL;
+               }
+               if (i == 0)
+                       rxq_type = rxq_ctrl->type;
+               if (rxq_type != rxq_ctrl->type) {
+                       *error = "combining hairpin and regular RSS queues is not supported";
+                       *queue_idx = i;
+                       return -ENOTSUP;
+               }
+       }
+       return 0;
+}
+
 /*
  * Validate the rss action.
  *
@@ -1521,8 +1711,9 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev,
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        const struct rte_flow_action_rss *rss = action->conf;
-       enum mlx5_rxq_type rxq_type = MLX5_RXQ_TYPE_UNDEFINED;
-       unsigned int i;
+       int ret;
+       const char *message;
+       uint32_t queue_idx;
 
        if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
            rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
@@ -1566,14 +1757,14 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev,
                                          &rss->types,
                                          "some RSS protocols are not"
                                          " supported");
-       if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) &&
-           !(rss->types & ETH_RSS_IP))
+       if ((rss->types & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY)) &&
+           !(rss->types & RTE_ETH_RSS_IP))
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
                                          "L3 partial RSS requested but L3 RSS"
                                          " type not specified");
-       if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) &&
-           !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP)))
+       if ((rss->types & (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)) &&
+           !(rss->types & (RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP)))
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
                                          "L4 partial RSS requested but L4 RSS"
@@ -1586,27 +1777,12 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev,
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
                                          NULL, "No queues configured");
-       for (i = 0; i != rss->queue_num; ++i) {
-               struct mlx5_rxq_ctrl *rxq_ctrl;
-
-               if (rss->queue[i] >= priv->rxqs_n)
-                       return rte_flow_error_set
-                               (error, EINVAL,
-                                RTE_FLOW_ERROR_TYPE_ACTION_CONF,
-                                &rss->queue[i], "queue index out of range");
-               if (!(*priv->rxqs)[rss->queue[i]])
-                       return rte_flow_error_set
-                               (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
-                                &rss->queue[i], "queue is not configured");
-               rxq_ctrl = container_of((*priv->rxqs)[rss->queue[i]],
-                                       struct mlx5_rxq_ctrl, rxq);
-               if (i == 0)
-                       rxq_type = rxq_ctrl->type;
-               if (rxq_type != rxq_ctrl->type)
-                       return rte_flow_error_set
-                               (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
-                                &rss->queue[i],
-                                "combining hairpin and regular RSS queues is not supported");
+       ret = mlx5_validate_rss_queues(dev, rss->queue, rss->queue_num,
+                                      &message, &queue_idx);
+       if (ret != 0) {
+               return rte_flow_error_set(error, -ret,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                                         &rss->queue[queue_idx], message);
        }
        return 0;
 }
@@ -1666,6 +1842,13 @@ mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
                                          "RSS on eCPRI is not supported now");
        }
+       if ((item_flags & MLX5_FLOW_LAYER_MPLS) &&
+           !(item_flags &
+             (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3)) &&
+           rss->level > 1)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+                                         "MPLS inner RSS needs to specify inner L2/L3 items after MPLS in pattern");
        return 0;
 }
 
@@ -1783,7 +1966,7 @@ mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
                              struct rte_flow_error *error)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       uint32_t priority_max = priv->config.flow_prio - 1;
+       uint32_t priority_max = priv->sh->flow_max_priority - 1;
 
        if (attributes->group)
                return rte_flow_error_set(error, ENOTSUP,
@@ -1969,6 +2152,10 @@ mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "L2 layer should not follow VLAN");
+       if (item_flags & MLX5_FLOW_LAYER_GTP)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "L2 layer should not follow GTP");
        if (!mask)
                mask = &rte_flow_item_eth_mask;
        ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
@@ -2395,12 +2582,16 @@ mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
 /**
  * Validate VXLAN item.
  *
+ * @param[in] dev
+ *   Pointer to the Ethernet device structure.
+ * @param[in] udp_dport
+ *   UDP destination port
  * @param[in] item
  *   Item specification.
  * @param[in] item_flags
  *   Bit-fields that holds the items detected until now.
- * @param[in] target_protocol
- *   The next protocol in the previous item.
+ * @param[in] attr
+ *   Flow rule attributes.
  * @param[out] error
  *   Pointer to error structure.
  *
@@ -2408,24 +2599,33 @@ mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
+mlx5_flow_validate_item_vxlan(struct rte_eth_dev *dev,
+                             uint16_t udp_dport,
+                             const struct rte_flow_item *item,
                              uint64_t item_flags,
+                             const struct rte_flow_attr *attr,
                              struct rte_flow_error *error)
 {
        const struct rte_flow_item_vxlan *spec = item->spec;
        const struct rte_flow_item_vxlan *mask = item->mask;
        int ret;
+       struct mlx5_priv *priv = dev->data->dev_private;
        union vni {
                uint32_t vlan_id;
                uint8_t vni[4];
        } id = { .vlan_id = 0, };
-
+       const struct rte_flow_item_vxlan nic_mask = {
+               .vni = "\xff\xff\xff",
+               .rsvd1 = 0xff,
+       };
+       const struct rte_flow_item_vxlan *valid_mask;
 
        if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "multiple tunnel layers not"
                                          " supported");
+       valid_mask = &rte_flow_item_vxlan_mask;
        /*
         * Verify only UDPv4 is present as defined in
         * https://tools.ietf.org/html/rfc7348
@@ -2436,9 +2636,21 @@ mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
                                          "no outer UDP layer found");
        if (!mask)
                mask = &rte_flow_item_vxlan_mask;
+
+       if (priv->sh->steering_format_version !=
+           MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 ||
+           !udp_dport || udp_dport == MLX5_UDP_PORT_VXLAN) {
+               /* FDB domain & NIC domain non-zero group */
+               if ((attr->transfer || attr->group) && priv->sh->misc5_cap)
+                       valid_mask = &nic_mask;
+               /* Group zero in NIC domain */
+               if (!attr->group && !attr->transfer &&
+                   priv->sh->tunnel_header_0_1)
+                       valid_mask = &nic_mask;
+       }
        ret = mlx5_flow_item_acceptable
                (item, (const uint8_t *)mask,
-                (const uint8_t *)&rte_flow_item_vxlan_mask,
+                (const uint8_t *)valid_mask,
                 sizeof(struct rte_flow_item_vxlan),
                 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
        if (ret < 0)
@@ -2920,9 +3132,8 @@ mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused,
                                          "MPLS not supported or"
                                          " disabled in firmware"
                                          " configuration.");
-       /* MPLS over IP, UDP, GRE is allowed */
-       if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 |
-                           MLX5_FLOW_LAYER_OUTER_L4_UDP |
+       /* MPLS over UDP, GRE is allowed */
+       if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L4_UDP |
                            MLX5_FLOW_LAYER_GRE |
                            MLX5_FLOW_LAYER_GRE_KEY)))
                return rte_flow_error_set(error, EINVAL,
@@ -3095,31 +3306,6 @@ mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item,
                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
 }
 
-/**
- * Release resource related QUEUE/RSS action split.
- *
- * @param dev
- *   Pointer to Ethernet device.
- * @param flow
- *   Flow to release id's from.
- */
-static void
-flow_mreg_split_qrss_release(struct rte_eth_dev *dev,
-                            struct rte_flow *flow)
-{
-       struct mlx5_priv *priv = dev->data->dev_private;
-       uint32_t handle_idx;
-       struct mlx5_flow_handle *dev_handle;
-
-       SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
-                      handle_idx, dev_handle, next)
-               if (dev_handle->split_flow_id &&
-                   !dev_handle->is_meter_flow_id)
-                       mlx5_ipool_free(priv->sh->ipool
-                                       [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
-                                       dev_handle->split_flow_id);
-}
-
 static int
 flow_null_validate(struct rte_eth_dev *dev __rte_unused,
                   const struct rte_flow_attr *attr __rte_unused,
@@ -3415,7 +3601,6 @@ flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
        const struct mlx5_flow_driver_ops *fops;
        enum mlx5_flow_drv_type type = flow->drv_type;
 
-       flow_mreg_split_qrss_release(dev, flow);
        MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
        fops = flow_get_drv_ops(type);
        fops->destroy(dev, flow);
@@ -3449,6 +3634,41 @@ flow_drv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
        return fops->meter_sub_policy_rss_prepare(dev, policy, rss_desc);
 }
 
+/**
+ * Flow driver color tag rule API. This abstracts calling driver
+ * specific functions. Parent flow (rte_flow) should have driver
+ * type (drv_type). It will create the color tag rules in hierarchy meter.
+ *
+ * @param[in] dev
+ *   Pointer to Ethernet device.
+ * @param[in, out] flow
+ *   Pointer to flow structure.
+ * @param[in] fm
+ *   Pointer to flow meter structure.
+ * @param[in] src_port
+ *   The src port this extra rule should use.
+ * @param[in] item
+ *   The src port id match item.
+ * @param[out] error
+ *   Pointer to error structure.
+ */
+static int
+flow_drv_mtr_hierarchy_rule_create(struct rte_eth_dev *dev,
+               struct rte_flow *flow,
+               struct mlx5_flow_meter_info *fm,
+               int32_t src_port,
+               const struct rte_flow_item *item,
+               struct rte_flow_error *error)
+{
+       const struct mlx5_flow_driver_ops *fops;
+       enum mlx5_flow_drv_type type = flow->drv_type;
+
+       MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+       fops = flow_get_drv_ops(type);
+       return fops->meter_hierarchy_rule_create(dev, fm,
+                                               src_port, item, error);
+}
+
 /**
  * Get RSS action from the action list.
  *
@@ -3468,6 +3688,8 @@ flow_get_rss_action(struct rte_eth_dev *dev,
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        const struct rte_flow_action_rss *rss = NULL;
+       struct mlx5_meter_policy_action_container *acg;
+       struct mlx5_meter_policy_action_container *acy;
 
        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
                switch (actions->type) {
@@ -3492,12 +3714,29 @@ flow_get_rss_action(struct rte_eth_dev *dev,
                        const struct rte_flow_action_meter *mtr = actions->conf;
 
                        fm = mlx5_flow_meter_find(priv, mtr->mtr_id, &mtr_idx);
-                       if (fm) {
+                       if (fm && !fm->def_policy) {
                                policy = mlx5_flow_meter_policy_find(dev,
                                                fm->policy_id, NULL);
-                               if (policy && policy->is_rss)
-                                       rss =
-                               policy->act_cnt[RTE_COLOR_GREEN].rss->conf;
+                               MLX5_ASSERT(policy);
+                               if (policy->is_hierarchy) {
+                                       policy =
+                               mlx5_flow_meter_hierarchy_get_final_policy(dev,
+                                                                       policy);
+                                       if (!policy)
+                                               return NULL;
+                               }
+                               if (policy->is_rss) {
+                                       acg =
+                                       &policy->act_cnt[RTE_COLOR_GREEN];
+                                       acy =
+                                       &policy->act_cnt[RTE_COLOR_YELLOW];
+                                       if (acg->fate_action ==
+                                           MLX5_FLOW_FATE_SHARED_RSS)
+                                               rss = acg->rss->conf;
+                                       else if (acy->fate_action ==
+                                                MLX5_FLOW_FATE_SHARED_RSS)
+                                               rss = acy->rss->conf;
+                               }
                        }
                        break;
                }
@@ -3526,8 +3765,11 @@ flow_aso_age_get_by_idx(struct rte_eth_dev *dev, uint32_t age_idx)
        uint16_t offset = (age_idx >> 16) & UINT16_MAX;
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
-       struct mlx5_aso_age_pool *pool = mng->pools[pool_idx];
+       struct mlx5_aso_age_pool *pool;
 
+       rte_rwlock_read_lock(&mng->resize_rwl);
+       pool = mng->pools[pool_idx];
+       rte_rwlock_read_unlock(&mng->resize_rwl);
        return &pool->actions[offset - 1];
 }
 
@@ -3698,20 +3940,8 @@ flow_get_shared_rss_action(struct rte_eth_dev *dev,
 }
 
 static unsigned int
-find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level)
+find_graph_root(uint32_t rss_level)
 {
-       const struct rte_flow_item *item;
-       unsigned int has_vlan = 0;
-
-       for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
-               if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
-                       has_vlan = 1;
-                       break;
-               }
-       }
-       if (has_vlan)
-               return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN :
-                                      MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN;
        return rss_level < 2 ? MLX5_EXPANSION_ROOT :
                               MLX5_EXPANSION_ROOT_OUTER;
 }
@@ -3998,39 +4228,38 @@ flow_check_hairpin_split(struct rte_eth_dev *dev,
 
 /* Declare flow create/destroy prototype in advance. */
 static uint32_t
-flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
+flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
                 const struct rte_flow_attr *attr,
                 const struct rte_flow_item items[],
                 const struct rte_flow_action actions[],
                 bool external, struct rte_flow_error *error);
 
 static void
-flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
+flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
                  uint32_t flow_idx);
 
 int
-flow_dv_mreg_match_cb(struct mlx5_hlist *list __rte_unused,
-                     struct mlx5_hlist_entry *entry,
-                     uint64_t key, void *cb_ctx __rte_unused)
+flow_dv_mreg_match_cb(void *tool_ctx __rte_unused,
+                     struct mlx5_list_entry *entry, void *cb_ctx)
 {
+       struct mlx5_flow_cb_ctx *ctx = cb_ctx;
        struct mlx5_flow_mreg_copy_resource *mcp_res =
-               container_of(entry, typeof(*mcp_res), hlist_ent);
+                              container_of(entry, typeof(*mcp_res), hlist_ent);
 
-       return mcp_res->mark_id != key;
+       return mcp_res->mark_id != *(uint32_t *)(ctx->data);
 }
 
-struct mlx5_hlist_entry *
-flow_dv_mreg_create_cb(struct mlx5_hlist *list, uint64_t key,
-                      void *cb_ctx)
+struct mlx5_list_entry *
+flow_dv_mreg_create_cb(void *tool_ctx, void *cb_ctx)
 {
-       struct rte_eth_dev *dev = list->ctx;
+       struct rte_eth_dev *dev = tool_ctx;
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_flow_cb_ctx *ctx = cb_ctx;
        struct mlx5_flow_mreg_copy_resource *mcp_res;
        struct rte_flow_error *error = ctx->error;
        uint32_t idx = 0;
        int ret;
-       uint32_t mark_id = key;
+       uint32_t mark_id = *(uint32_t *)(ctx->data);
        struct rte_flow_attr attr = {
                .group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
                .ingress = 1,
@@ -4127,8 +4356,8 @@ flow_dv_mreg_create_cb(struct mlx5_hlist *list, uint64_t key,
         * be applied, removed, deleted in ardbitrary order
         * by list traversing.
         */
-       mcp_res->rix_flow = flow_list_create(dev, NULL, &attr, items,
-                                        actions, false, error);
+       mcp_res->rix_flow = flow_list_create(dev, MLX5_FLOW_TYPE_MCP,
+                                       &attr, items, actions, false, error);
        if (!mcp_res->rix_flow) {
                mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], idx);
                return NULL;
@@ -4136,6 +4365,36 @@ flow_dv_mreg_create_cb(struct mlx5_hlist *list, uint64_t key,
        return &mcp_res->hlist_ent;
 }
 
+struct mlx5_list_entry *
+flow_dv_mreg_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
+                     void *cb_ctx __rte_unused)
+{
+       struct rte_eth_dev *dev = tool_ctx;
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_flow_mreg_copy_resource *mcp_res;
+       uint32_t idx = 0;
+
+       mcp_res = mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_MCP], &idx);
+       if (!mcp_res) {
+               rte_errno = ENOMEM;
+               return NULL;
+       }
+       memcpy(mcp_res, oentry, sizeof(*mcp_res));
+       mcp_res->idx = idx;
+       return &mcp_res->hlist_ent;
+}
+
+void
+flow_dv_mreg_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
+{
+       struct mlx5_flow_mreg_copy_resource *mcp_res =
+                              container_of(entry, typeof(*mcp_res), hlist_ent);
+       struct rte_eth_dev *dev = tool_ctx;
+       struct mlx5_priv *priv = dev->data->dev_private;
+
+       mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
+}
+
 /**
  * Add a flow of copying flow metadata registers in RX_CP_TBL.
  *
@@ -4166,10 +4425,11 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
                          struct rte_flow_error *error)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_hlist_entry *entry;
+       struct mlx5_list_entry *entry;
        struct mlx5_flow_cb_ctx ctx = {
                .dev = dev,
                .error = error,
+               .data = &mark_id,
        };
 
        /* Check if already registered. */
@@ -4182,15 +4442,15 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
 }
 
 void
-flow_dv_mreg_remove_cb(struct mlx5_hlist *list, struct mlx5_hlist_entry *entry)
+flow_dv_mreg_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
 {
        struct mlx5_flow_mreg_copy_resource *mcp_res =
-               container_of(entry, typeof(*mcp_res), hlist_ent);
-       struct rte_eth_dev *dev = list->ctx;
+                              container_of(entry, typeof(*mcp_res), hlist_ent);
+       struct rte_eth_dev *dev = tool_ctx;
        struct mlx5_priv *priv = dev->data->dev_private;
 
        MLX5_ASSERT(mcp_res->rix_flow);
-       flow_list_destroy(dev, NULL, mcp_res->rix_flow);
+       flow_list_destroy(dev, MLX5_FLOW_TYPE_MCP, mcp_res->rix_flow);
        mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
 }
 
@@ -4232,14 +4492,17 @@ flow_mreg_del_copy_action(struct rte_eth_dev *dev,
 static void
 flow_mreg_del_default_copy_action(struct rte_eth_dev *dev)
 {
-       struct mlx5_hlist_entry *entry;
+       struct mlx5_list_entry *entry;
        struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_flow_cb_ctx ctx;
+       uint32_t mark_id;
 
        /* Check if default flow is registered. */
        if (!priv->mreg_cp_tbl)
                return;
-       entry = mlx5_hlist_lookup(priv->mreg_cp_tbl,
-                                 MLX5_DEFAULT_COPY_ID, NULL);
+       mark_id = MLX5_DEFAULT_COPY_ID;
+       ctx.data = &mark_id;
+       entry = mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id, &ctx);
        if (!entry)
                return;
        mlx5_hlist_unregister(priv->mreg_cp_tbl, entry);
@@ -4265,6 +4528,8 @@ flow_mreg_add_default_copy_action(struct rte_eth_dev *dev,
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_flow_mreg_copy_resource *mcp_res;
+       struct mlx5_flow_cb_ctx ctx;
+       uint32_t mark_id;
 
        /* Check whether extensive metadata feature is engaged. */
        if (!priv->config.dv_flow_en ||
@@ -4276,9 +4541,11 @@ flow_mreg_add_default_copy_action(struct rte_eth_dev *dev,
         * Add default mreg copy flow may be called multiple time, but
         * only be called once in stop. Avoid register it twice.
         */
-       if (mlx5_hlist_lookup(priv->mreg_cp_tbl, MLX5_DEFAULT_COPY_ID, NULL))
+       mark_id = MLX5_DEFAULT_COPY_ID;
+       ctx.data = &mark_id;
+       if (mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id, &ctx))
                return 0;
-       mcp_res = flow_mreg_add_copy_action(dev, MLX5_DEFAULT_COPY_ID, error);
+       mcp_res = flow_mreg_add_copy_action(dev, mark_id, error);
        if (!mcp_res)
                return -rte_errno;
        return 0;
@@ -4564,8 +4831,8 @@ flow_create_split_inner(struct rte_eth_dev *dev,
  *   Pointer to Ethernet device.
  * @param[in] flow
  *   Parent flow structure pointer.
- * @param[in] policy_id;
- *   Meter Policy id.
+ * @param wks
+ *   Pointer to thread flow work space.
  * @param[in] attr
  *   Flow rule attributes.
  * @param[in] items
@@ -4579,32 +4846,23 @@ flow_create_split_inner(struct rte_eth_dev *dev,
 static struct mlx5_flow_meter_sub_policy *
 get_meter_sub_policy(struct rte_eth_dev *dev,
                     struct rte_flow *flow,
-                    uint32_t policy_id,
+                    struct mlx5_flow_workspace *wks,
                     const struct rte_flow_attr *attr,
                     const struct rte_flow_item items[],
                     struct rte_flow_error *error)
 {
        struct mlx5_flow_meter_policy *policy;
+       struct mlx5_flow_meter_policy *final_policy;
        struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
 
-       policy = mlx5_flow_meter_policy_find(dev, policy_id, NULL);
-       if (!policy) {
-               rte_flow_error_set(error, EINVAL,
-                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-                                  "Failed to find Meter Policy.");
-               goto exit;
-       }
-       if (policy->is_rss ||
-               (policy->is_queue &&
-       !policy->sub_policys[MLX5_MTR_DOMAIN_INGRESS][0]->rix_hrxq[0])) {
-               struct mlx5_flow_workspace *wks =
-                               mlx5_flow_get_thread_workspace();
+       policy = wks->policy;
+       final_policy = policy->is_hierarchy ? wks->final_policy : policy;
+       if (final_policy->is_rss || final_policy->is_queue) {
                struct mlx5_flow_rss_desc rss_desc_v[MLX5_MTR_RTE_COLORS];
                struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS] = {0};
                uint32_t i;
 
-               MLX5_ASSERT(wks);
-               /**
+               /*
                 * This is a tmp dev_flow,
                 * no need to register any matcher for it in translate.
                 */
@@ -4612,18 +4870,19 @@ get_meter_sub_policy(struct rte_eth_dev *dev,
                for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
                        struct mlx5_flow dev_flow = {0};
                        struct mlx5_flow_handle dev_handle = { {0} };
+                       uint8_t fate = final_policy->act_cnt[i].fate_action;
 
-                       if (policy->is_rss) {
-                               const void *rss_act =
-                                       policy->act_cnt[i].rss->conf;
+                       if (fate == MLX5_FLOW_FATE_SHARED_RSS) {
+                               const struct rte_flow_action_rss *rss_act =
+                                       final_policy->act_cnt[i].rss->conf;
                                struct rte_flow_action rss_actions[2] = {
                                        [0] = {
                                        .type = RTE_FLOW_ACTION_TYPE_RSS,
-                                       .conf = rss_act
+                                       .conf = rss_act,
                                        },
                                        [1] = {
                                        .type = RTE_FLOW_ACTION_TYPE_END,
-                                       .conf = NULL
+                                       .conf = NULL,
                                        }
                                };
 
@@ -4648,34 +4907,38 @@ get_meter_sub_policy(struct rte_eth_dev *dev,
                                                rss_desc_v[i].hash_fields ?
                                                rss_desc_v[i].queue_num : 1;
                                rss_desc_v[i].tunnel =
-                                       !!(dev_flow.handle->layers &
-                                       MLX5_FLOW_LAYER_TUNNEL);
-                       } else {
+                                               !!(dev_flow.handle->layers &
+                                                  MLX5_FLOW_LAYER_TUNNEL);
+                               /* Use the RSS queues in the containers. */
+                               rss_desc_v[i].queue =
+                                       (uint16_t *)(uintptr_t)rss_act->queue;
+                               rss_desc[i] = &rss_desc_v[i];
+                       } else if (fate == MLX5_FLOW_FATE_QUEUE) {
                                /* This is queue action. */
                                rss_desc_v[i] = wks->rss_desc;
                                rss_desc_v[i].key_len = 0;
                                rss_desc_v[i].hash_fields = 0;
                                rss_desc_v[i].queue =
-                                       &policy->act_cnt[i].queue;
+                                       &final_policy->act_cnt[i].queue;
                                rss_desc_v[i].queue_num = 1;
+                               rss_desc[i] = &rss_desc_v[i];
+                       } else {
+                               rss_desc[i] = NULL;
                        }
-                       rss_desc[i] = &rss_desc_v[i];
                }
                sub_policy = flow_drv_meter_sub_policy_rss_prepare(dev,
                                                flow, policy, rss_desc);
        } else {
                enum mlx5_meter_domain mtr_domain =
                        attr->transfer ? MLX5_MTR_DOMAIN_TRANSFER :
-                               attr->egress ? MLX5_MTR_DOMAIN_EGRESS :
-                                       MLX5_MTR_DOMAIN_INGRESS;
+                               (attr->egress ? MLX5_MTR_DOMAIN_EGRESS :
+                                               MLX5_MTR_DOMAIN_INGRESS);
                sub_policy = policy->sub_policys[mtr_domain][0];
        }
-       if (!sub_policy) {
+       if (!sub_policy)
                rte_flow_error_set(error, EINVAL,
-                       RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-                       "Failed to get meter sub-policy.");
-               goto exit;
-       }
+                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                                  "Failed to get meter sub-policy.");
 exit:
        return sub_policy;
 }
@@ -4696,8 +4959,8 @@ exit:
  *   Pointer to Ethernet device.
  * @param[in] flow
  *   Parent flow structure pointer.
- * @param[in] fm
- *   Pointer to flow meter structure.
+ * @param wks
+ *   Pointer to thread flow work space.
  * @param[in] attr
  *   Flow rule attributes.
  * @param[in] items
@@ -4721,7 +4984,7 @@ exit:
 static int
 flow_meter_split_prep(struct rte_eth_dev *dev,
                      struct rte_flow *flow,
-                     struct mlx5_flow_meter_info *fm,
+                     struct mlx5_flow_workspace *wks,
                      const struct rte_flow_attr *attr,
                      const struct rte_flow_item items[],
                      struct rte_flow_item sfx_items[],
@@ -4732,6 +4995,7 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
                      struct rte_flow_error *error)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_flow_meter_info *fm = wks->fm;
        struct rte_flow_action *tag_action = NULL;
        struct rte_flow_item *tag_item;
        struct mlx5_rte_flow_action_set_tag *set_tag;
@@ -4740,12 +5004,12 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
        struct mlx5_rte_flow_item_tag *tag_item_spec;
        struct mlx5_rte_flow_item_tag *tag_item_mask;
        uint32_t tag_id = 0;
-       bool copy_vlan = false;
+       struct rte_flow_item *vlan_item_dst = NULL;
+       const struct rte_flow_item *vlan_item_src = NULL;
        struct rte_flow_action *hw_mtr_action;
        struct rte_flow_action *action_pre_head = NULL;
-       bool mtr_first = priv->sh->meter_aso_en &&
-                       (attr->egress ||
-                       (attr->transfer && priv->representor_id != UINT16_MAX));
+       int32_t flow_src_port = priv->representor_id;
+       bool mtr_first;
        uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
        uint8_t mtr_reg_bits = priv->mtr_reg_share ?
                                MLX5_MTR_IDLE_BITS_IN_COLOR_REG : MLX5_REG_BITS;
@@ -4754,6 +5018,51 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
        uint8_t flow_id_bits = 0;
        int shift;
 
+       /* Prepare the suffix subflow items. */
+       tag_item = sfx_items++;
+       for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+               struct mlx5_priv *port_priv;
+               const struct rte_flow_item_port_id *pid_v;
+               int item_type = items->type;
+
+               switch (item_type) {
+               case RTE_FLOW_ITEM_TYPE_PORT_ID:
+                       pid_v = items->spec;
+                       MLX5_ASSERT(pid_v);
+                       port_priv = mlx5_port_to_eswitch_info(pid_v->id, false);
+                       if (!port_priv)
+                               return rte_flow_error_set(error,
+                                               rte_errno,
+                                               RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+                                               pid_v,
+                                               "Failed to get port info.");
+                       flow_src_port = port_priv->representor_id;
+                       if (!fm->def_policy && wks->policy->is_hierarchy &&
+                           flow_src_port != priv->representor_id) {
+                               if (flow_drv_mtr_hierarchy_rule_create(dev,
+                                                               flow, fm,
+                                                               flow_src_port,
+                                                               items,
+                                                               error))
+                                       return -rte_errno;
+                       }
+                       memcpy(sfx_items, items, sizeof(*sfx_items));
+                       sfx_items++;
+                       break;
+               case RTE_FLOW_ITEM_TYPE_VLAN:
+                       /* Determine if copy vlan item below. */
+                       vlan_item_src = items;
+                       vlan_item_dst = sfx_items++;
+                       vlan_item_dst->type = RTE_FLOW_ITEM_TYPE_VOID;
+                       break;
+               default:
+                       break;
+               }
+       }
+       sfx_items->type = RTE_FLOW_ITEM_TYPE_END;
+       sfx_items++;
+       mtr_first = priv->sh->meter_aso_en &&
+               (attr->egress || (attr->transfer && flow_src_port != UINT16_MAX));
        /* For ASO meter, meter must be before tag in TX direction. */
        if (mtr_first) {
                action_pre_head = actions_pre++;
@@ -4790,7 +5099,16 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
                        break;
                case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
                case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
-                       copy_vlan = true;
+                       if (vlan_item_dst && vlan_item_src) {
+                               memcpy(vlan_item_dst, vlan_item_src,
+                                       sizeof(*vlan_item_dst));
+                               /*
+                                * Convert to internal match item, it is used
+                                * for vlan push and set vid.
+                                */
+                               vlan_item_dst->type = (enum rte_flow_item_type)
+                                               MLX5_RTE_FLOW_ITEM_TYPE_VLAN;
+                       }
                        break;
                default:
                        break;
@@ -4811,16 +5129,15 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
                struct mlx5_flow_tbl_data_entry *tbl_data;
 
                if (!fm->def_policy) {
-                       sub_policy = get_meter_sub_policy(dev, flow,
-                                                         fm->policy_id, attr,
-                                                         items, error);
+                       sub_policy = get_meter_sub_policy(dev, flow, wks,
+                                                         attr, items, error);
                        if (!sub_policy)
                                return -rte_errno;
                } else {
                        enum mlx5_meter_domain mtr_domain =
                        attr->transfer ? MLX5_MTR_DOMAIN_TRANSFER :
-                               attr->egress ? MLX5_MTR_DOMAIN_EGRESS :
-                                       MLX5_MTR_DOMAIN_INGRESS;
+                               (attr->egress ? MLX5_MTR_DOMAIN_EGRESS :
+                                               MLX5_MTR_DOMAIN_INGRESS);
 
                        sub_policy =
                        &priv->sh->mtrmng->def_policy[mtr_domain]->sub_policy;
@@ -4836,8 +5153,8 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
        actions_pre++;
        if (!tag_action)
                return rte_flow_error_set(error, ENOMEM,
-                                       RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-                                       "No tag action space.");
+                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                         NULL, "No tag action space.");
        if (!mtr_flow_id) {
                tag_action->type = RTE_FLOW_ACTION_TYPE_VOID;
                goto exit;
@@ -4862,34 +5179,6 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
                if (flow_id_bits > priv->sh->mtrmng->max_mtr_flow_bits)
                        priv->sh->mtrmng->max_mtr_flow_bits = flow_id_bits;
        }
-       /* Prepare the suffix subflow items. */
-       tag_item = sfx_items++;
-       for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
-               int item_type = items->type;
-
-               switch (item_type) {
-               case RTE_FLOW_ITEM_TYPE_PORT_ID:
-                       memcpy(sfx_items, items, sizeof(*sfx_items));
-                       sfx_items++;
-                       break;
-               case RTE_FLOW_ITEM_TYPE_VLAN:
-                       if (copy_vlan) {
-                               memcpy(sfx_items, items, sizeof(*sfx_items));
-                               /*
-                                * Convert to internal match item, it is used
-                                * for vlan push and set vid.
-                                */
-                               sfx_items->type = (enum rte_flow_item_type)
-                                                 MLX5_RTE_FLOW_ITEM_TYPE_VLAN;
-                               sfx_items++;
-                       }
-                       break;
-               default:
-                       break;
-               }
-       }
-       sfx_items->type = RTE_FLOW_ITEM_TYPE_END;
-       sfx_items++;
        /* Build tag actions and items for meter_id/meter flow_id. */
        set_tag = (struct mlx5_rte_flow_action_set_tag *)actions_pre;
        tag_item_spec = (struct mlx5_rte_flow_item_tag *)sfx_items;
@@ -4962,6 +5251,8 @@ exit:
  *   Pointer to the Q/RSS action.
  * @param[in] actions_n
  *   Number of original actions.
+ * @param[in] mtr_sfx
+ *   Check if it is in meter suffix table.
  * @param[out] error
  *   Perform verbose error reporting if not NULL.
  *
@@ -4974,7 +5265,8 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev,
                          struct rte_flow_action *split_actions,
                          const struct rte_flow_action *actions,
                          const struct rte_flow_action *qrss,
-                         int actions_n, struct rte_flow_error *error)
+                         int actions_n, int mtr_sfx,
+                         struct rte_flow_error *error)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_rte_flow_action_set_tag *set_tag;
@@ -4989,15 +5281,15 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev,
         * - Add jump to mreg CP_TBL.
         * As a result, there will be one more action.
         */
-       ++actions_n;
        memcpy(split_actions, actions, sizeof(*split_actions) * actions_n);
+       /* Count MLX5_RTE_FLOW_ACTION_TYPE_TAG. */
+       ++actions_n;
        set_tag = (void *)(split_actions + actions_n);
        /*
-        * If tag action is not set to void(it means we are not the meter
-        * suffix flow), add the tag action. Since meter suffix flow already
-        * has the tag added.
+        * If we are not the meter suffix flow, add the tag action.
+        * Since meter suffix flow already has the tag added.
         */
-       if (split_actions[qrss_idx].type != RTE_FLOW_ACTION_TYPE_VOID) {
+       if (!mtr_sfx) {
                /*
                 * Allocate the new subflow ID. This one is unique within
                 * device and not shared with representors. Otherwise,
@@ -5030,6 +5322,12 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev,
                                MLX5_RTE_FLOW_ACTION_TYPE_TAG,
                        .conf = set_tag,
                };
+       } else {
+               /*
+                * If we are the suffix flow of meter, tag already exist.
+                * Set the QUEUE/RSS action to void.
+                */
+               split_actions[qrss_idx].type = RTE_FLOW_ACTION_TYPE_VOID;
        }
        /* JUMP action to jump to mreg copy table (CP_TBL). */
        jump = (void *)(set_tag + 1);
@@ -5137,6 +5435,7 @@ flow_check_match_action(const struct rte_flow_action actions[],
                        int *modify_after_mirror)
 {
        const struct rte_flow_action_sample *sample;
+       const struct rte_flow_action_raw_decap *decap;
        int actions_n = 0;
        uint32_t ratio = 0;
        int sub_type = 0;
@@ -5189,12 +5488,29 @@ flow_check_match_action(const struct rte_flow_action actions[],
                case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
                case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
                case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
-               case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
                case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
                case RTE_FLOW_ACTION_TYPE_METER:
                        if (fdb_mirror)
                                *modify_after_mirror = 1;
                        break;
+               case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+                       decap = actions->conf;
+                       while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
+                               ;
+                       actions_n++;
+                       if (actions->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
+                               const struct rte_flow_action_raw_encap *encap =
+                                                               actions->conf;
+                               if (decap->size <=
+                                       MLX5_ENCAPSULATION_DECISION_SIZE &&
+                                   encap->size >
+                                       MLX5_ENCAPSULATION_DECISION_SIZE)
+                                       /* L3 encap. */
+                                       break;
+                       }
+                       if (fdb_mirror)
+                               *modify_after_mirror = 1;
+                       break;
                default:
                        break;
                }
@@ -5308,7 +5624,7 @@ flow_sample_split_prep(struct rte_eth_dev *dev,
                /* Prepare the prefix tag action. */
                append_index++;
                set_tag = (void *)(actions_pre + actions_n + append_index);
-               ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, 0, error);
+               ret = mlx5_flow_get_reg_id(dev, MLX5_SAMPLE_ID, 0, error);
                if (ret < 0)
                        return ret;
                mlx5_ipool_malloc(priv->sh->ipool
@@ -5466,17 +5782,6 @@ flow_create_split_metadata(struct rte_eth_dev *dev,
                                                  RTE_FLOW_ERROR_TYPE_ACTION,
                                                  NULL, "no memory to split "
                                                  "metadata flow");
-               /*
-                * If we are the suffix flow of meter, tag already exist.
-                * Set the tag action to void.
-                */
-               if (mtr_sfx)
-                       ext_actions[qrss - actions].type =
-                                               RTE_FLOW_ACTION_TYPE_VOID;
-               else
-                       ext_actions[qrss - actions].type =
-                                               (enum rte_flow_action_type)
-                                               MLX5_RTE_FLOW_ACTION_TYPE_TAG;
                /*
                 * Create the new actions list with removed Q/RSS action
                 * and appended set tag and jump to register copy table
@@ -5484,7 +5789,8 @@ flow_create_split_metadata(struct rte_eth_dev *dev,
                 * in advance, because it is needed for set tag action.
                 */
                qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions,
-                                                   qrss, actions_n, error);
+                                                   qrss, actions_n,
+                                                   mtr_sfx, error);
                if (!mtr_sfx && !qrss_id) {
                        ret = -rte_errno;
                        goto exit;
@@ -5575,6 +5881,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev,
                /* Add suffix subflow to execute Q/RSS. */
                flow_split_info->prefix_layers = layers;
                flow_split_info->prefix_mark = 0;
+               flow_split_info->table_id = 0;
                ret = flow_create_split_inner(dev, flow, &dev_flow,
                                              &q_attr, mtr_sfx ? items :
                                              q_items, q_actions,
@@ -5697,6 +6004,7 @@ flow_create_split_meter(struct rte_eth_dev *dev,
        bool has_mtr = false;
        bool has_modify = false;
        bool set_mtr_reg = true;
+       bool is_mtr_hierarchy = false;
        uint32_t meter_id = 0;
        uint32_t mtr_idx = 0;
        uint32_t mtr_flow_id = 0;
@@ -5729,14 +6037,33 @@ flow_create_split_meter(struct rte_eth_dev *dev,
                }
                MLX5_ASSERT(wks);
                wks->fm = fm;
+               if (!fm->def_policy) {
+                       wks->policy = mlx5_flow_meter_policy_find(dev,
+                                                                 fm->policy_id,
+                                                                 NULL);
+                       MLX5_ASSERT(wks->policy);
+                       if (wks->policy->is_hierarchy) {
+                               wks->final_policy =
+                               mlx5_flow_meter_hierarchy_get_final_policy(dev,
+                                                               wks->policy);
+                               if (!wks->final_policy)
+                                       return rte_flow_error_set(error,
+                                       EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+                               "Failed to find terminal policy of hierarchy.");
+                               is_mtr_hierarchy = true;
+                       }
+               }
                /*
                 * If it isn't default-policy Meter, and
                 * 1. There's no action in flow to change
                 *    packet (modify/encap/decap etc.), OR
                 * 2. No drop count needed for this meter.
-                * no need to use regC to save meter id anymore.
+                * 3. It's not meter hierarchy.
+                * Then no need to use regC to save meter id anymore.
                 */
-               if (!fm->def_policy && (!has_modify || !fm->drop_cnt))
+               if (!fm->def_policy && !is_mtr_hierarchy &&
+                   (!has_modify || !fm->drop_cnt))
                        set_mtr_reg = false;
                /* Prefix actions: meter, decap, encap, tag, jump, end. */
                act_size = sizeof(struct rte_flow_action) * (actions_n + 6) +
@@ -5759,7 +6086,7 @@ flow_create_split_meter(struct rte_eth_dev *dev,
                        pre_actions = sfx_actions + 1;
                else
                        pre_actions = sfx_actions + actions_n;
-               ret = flow_meter_split_prep(dev, flow, fm, &sfx_attr,
+               ret = flow_meter_split_prep(dev, flow, wks, &sfx_attr,
                                            items, sfx_items, actions,
                                            sfx_actions, pre_actions,
                                            (set_mtr_reg ? &mtr_flow_id : NULL),
@@ -5900,8 +6227,8 @@ flow_create_split_sample(struct rte_eth_dev *dev,
                                                  RTE_FLOW_ERROR_TYPE_ACTION,
                                                  NULL, "no memory to split "
                                                  "sample flow");
-               /* The representor_id is -1 for uplink. */
-               fdb_tx = (attr->transfer && priv->representor_id != -1);
+               /* The representor_id is UINT16_MAX for uplink. */
+               fdb_tx = (attr->transfer && priv->representor_id != UINT16_MAX);
                /*
                 * When reg_c_preserve is set, metadata registers Cx preserve
                 * their value even through packet duplication.
@@ -6093,7 +6420,7 @@ flow_rss_workspace_adjust(struct mlx5_flow_workspace *wks,
  *   A flow index on success, 0 otherwise and rte_errno is set.
  */
 static uint32_t
-flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
+flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
                 const struct rte_flow_attr *attr,
                 const struct rte_flow_item items[],
                 const struct rte_flow_action original_actions[],
@@ -6108,7 +6435,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
        int indir_actions_n = MLX5_MAX_INDIRECT_ACTIONS;
        union {
                struct mlx5_flow_expand_rss buf;
-               uint8_t buffer[2048];
+               uint8_t buffer[4096];
        } expand_buffer;
        union {
                struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
@@ -6161,7 +6488,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
                                external, hairpin_flow, error);
        if (ret < 0)
                goto error_before_hairpin_split;
-       flow = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], &idx);
+       flow = mlx5_ipool_zmalloc(priv->flows[type], &idx);
        if (!flow) {
                rte_errno = ENOMEM;
                goto error_before_hairpin_split;
@@ -6192,14 +6519,14 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
                 * mlx5_flow_hashfields_adjust() in advance.
                 */
                rss_desc->level = rss->level;
-               /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
-               rss_desc->types = !rss->types ? ETH_RSS_IP : rss->types;
+               /* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */
+               rss_desc->types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
        }
        flow->dev_handles = 0;
        if (rss && rss->types) {
                unsigned int graph_root;
 
-               graph_root = find_graph_root(items, rss->level);
+               graph_root = find_graph_root(rss->level);
                ret = mlx5_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
                                           items, rss->types,
                                           mlx5_support_expansion, graph_root);
@@ -6291,12 +6618,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
                if (ret < 0)
                        goto error;
        }
-       if (list) {
-               rte_spinlock_lock(&priv->flow_list_lock);
-               ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, idx,
-                            flow, next);
-               rte_spinlock_unlock(&priv->flow_list_lock);
-       }
+       flow->type = type;
        flow_rxq_flags_set(dev, flow);
        rte_free(translated_actions);
        tunnel = flow_tunnel_from_rule(wks->flows);
@@ -6318,7 +6640,7 @@ error:
                        mlx5_ipool_get
                        (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
                        rss_desc->shared_rss))->refcnt, 1, __ATOMIC_RELAXED);
-       mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], idx);
+       mlx5_ipool_free(priv->flows[type], idx);
        rte_errno = ret; /* Restore rte_errno. */
        ret = rte_errno;
        rte_errno = ret;
@@ -6370,14 +6692,87 @@ mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev)
                        .type = RTE_FLOW_ACTION_TYPE_END,
                },
        };
-       struct mlx5_priv *priv = dev->data->dev_private;
        struct rte_flow_error error;
 
-       return (void *)(uintptr_t)flow_list_create(dev, &priv->ctrl_flows,
+       return (void *)(uintptr_t)flow_list_create(dev, MLX5_FLOW_TYPE_CTL,
                                                   &attr, &pattern,
                                                   actions, false, &error);
 }
 
+/**
+ * Create a dedicated flow rule on e-switch table 1, matches ESW manager
+ * and sq number, directs all packets to peer vport.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param txq
+ *   Txq index.
+ *
+ * @return
+ *   Flow ID on success, 0 otherwise and rte_errno is set.
+ */
+uint32_t
+mlx5_flow_create_devx_sq_miss_flow(struct rte_eth_dev *dev, uint32_t txq)
+{
+       struct rte_flow_attr attr = {
+               .group = 0,
+               .priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR,
+               .ingress = 1,
+               .egress = 0,
+               .transfer = 1,
+       };
+       struct rte_flow_item_port_id port_spec = {
+               .id = MLX5_PORT_ESW_MGR,
+       };
+       struct mlx5_rte_flow_item_tx_queue txq_spec = {
+               .queue = txq,
+       };
+       struct rte_flow_item pattern[] = {
+               {
+                       .type = RTE_FLOW_ITEM_TYPE_PORT_ID,
+                       .spec = &port_spec,
+               },
+               {
+                       .type = (enum rte_flow_item_type)
+                               MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,
+                       .spec = &txq_spec,
+               },
+               {
+                       .type = RTE_FLOW_ITEM_TYPE_END,
+               },
+       };
+       struct rte_flow_action_jump jump = {
+               .group = 1,
+       };
+       struct rte_flow_action_port_id port = {
+               .id = dev->data->port_id,
+       };
+       struct rte_flow_action actions[] = {
+               {
+                       .type = RTE_FLOW_ACTION_TYPE_JUMP,
+                       .conf = &jump,
+               },
+               {
+                       .type = RTE_FLOW_ACTION_TYPE_END,
+               },
+       };
+       struct rte_flow_error error;
+
+       /*
+        * Creates group 0, highest priority jump flow.
+        * Matches txq to bypass kernel packets.
+        */
+       if (flow_list_create(dev, MLX5_FLOW_TYPE_CTL, &attr, pattern, actions,
+                            false, &error) == 0)
+               return 0;
+       /* Create group 1, lowest priority redirect flow for txq. */
+       attr.group = 1;
+       actions[0].conf = &port;
+       actions[0].type = RTE_FLOW_ACTION_TYPE_PORT_ID;
+       return flow_list_create(dev, MLX5_FLOW_TYPE_CTL, &attr, pattern,
+                               actions, false, &error);
+}
+
 /**
  * Validate a flow supported by the NIC.
  *
@@ -6425,8 +6820,6 @@ mlx5_flow_create(struct rte_eth_dev *dev,
                 const struct rte_flow_action actions[],
                 struct rte_flow_error *error)
 {
-       struct mlx5_priv *priv = dev->data->dev_private;
-
        /*
         * If the device is not started yet, it is not allowed to created a
         * flow from application. PMD default flows and traffic control flows
@@ -6442,8 +6835,9 @@ mlx5_flow_create(struct rte_eth_dev *dev,
                return NULL;
        }
 
-       return (void *)(uintptr_t)flow_list_create(dev, &priv->flows,
-                                 attr, items, actions, true, error);
+       return (void *)(uintptr_t)flow_list_create(dev, MLX5_FLOW_TYPE_GEN,
+                                                  attr, items, actions,
+                                                  true, error);
 }
 
 /**
@@ -6451,24 +6845,19 @@ mlx5_flow_create(struct rte_eth_dev *dev,
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @param list
- *   Pointer to the Indexed flow list. If this parameter NULL,
- *   there is no flow removal from the list. Be noted that as
- *   flow is add to the indexed list, memory of the indexed
- *   list points to maybe changed as flow destroyed.
  * @param[in] flow_idx
  *   Index of flow to destroy.
  */
 static void
-flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
+flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
                  uint32_t flow_idx)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool
-                                              [MLX5_IPOOL_RTE_FLOW], flow_idx);
+       struct rte_flow *flow = mlx5_ipool_get(priv->flows[type], flow_idx);
 
        if (!flow)
                return;
+       MLX5_ASSERT(flow->type == type);
        /*
         * Update RX queue flags only if port is started, otherwise it is
         * already clean.
@@ -6476,12 +6865,6 @@ flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
        if (dev->data->dev_started)
                flow_rxq_flags_trim(dev, flow);
        flow_drv_destroy(dev, flow);
-       if (list) {
-               rte_spinlock_lock(&priv->flow_list_lock);
-               ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list,
-                            flow_idx, flow, next);
-               rte_spinlock_unlock(&priv->flow_list_lock);
-       }
        if (flow->tunnel) {
                struct mlx5_flow_tunnel *tunnel;
 
@@ -6491,7 +6874,7 @@ flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
                        mlx5_flow_tunnel_free(dev, tunnel);
        }
        flow_mreg_del_copy_action(dev, flow);
-       mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx);
+       mlx5_ipool_free(priv->flows[type], flow_idx);
 }
 
 /**
@@ -6499,18 +6882,21 @@ flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @param list
- *   Pointer to the Indexed flow list.
+ * @param type
+ *   Flow type to be flushed.
  * @param active
  *   If flushing is called avtively.
  */
 void
-mlx5_flow_list_flush(struct rte_eth_dev *dev, uint32_t *list, bool active)
+mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type,
+                    bool active)
 {
-       uint32_t num_flushed = 0;
+       struct mlx5_priv *priv = dev->data->dev_private;
+       uint32_t num_flushed = 0, fidx = 1;
+       struct rte_flow *flow;
 
-       while (*list) {
-               flow_list_destroy(dev, list, *list);
+       MLX5_IPOOL_FOREACH(priv->flows[type], fidx, flow) {
+               flow_list_destroy(dev, type, fidx);
                num_flushed++;
        }
        if (active) {
@@ -6682,18 +7068,19 @@ mlx5_flow_pop_thread_workspace(void)
  * @return the number of flows not released.
  */
 int
-mlx5_flow_verify(struct rte_eth_dev *dev)
+mlx5_flow_verify(struct rte_eth_dev *dev __rte_unused)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct rte_flow *flow;
-       uint32_t idx;
-       int ret = 0;
+       uint32_t idx = 0;
+       int ret = 0, i;
 
-       ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], priv->flows, idx,
-                     flow, next) {
-               DRV_LOG(DEBUG, "port %u flow %p still referenced",
-                       dev->data->port_id, (void *)flow);
-               ++ret;
+       for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
+               MLX5_IPOOL_FOREACH(priv->flows[i], idx, flow) {
+                       DRV_LOG(DEBUG, "port %u flow %p still referenced",
+                               dev->data->port_id, (void *)flow);
+                       ret++;
+               }
        }
        return ret;
 }
@@ -6713,7 +7100,6 @@ int
 mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev,
                            uint32_t queue)
 {
-       struct mlx5_priv *priv = dev->data->dev_private;
        const struct rte_flow_attr attr = {
                .egress = 1,
                .priority = 0,
@@ -6746,8 +7132,8 @@ mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev,
        actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP;
        actions[0].conf = &jump;
        actions[1].type = RTE_FLOW_ACTION_TYPE_END;
-       flow_idx = flow_list_create(dev, &priv->ctrl_flows,
-                               &attr, items, actions, false, &error);
+       flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_CTL,
+                                   &attr, items, actions, false, &error);
        if (!flow_idx) {
                DRV_LOG(DEBUG,
                        "Failed to create ctrl flow: rte_errno(%d),"
@@ -6832,12 +7218,12 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
        if (!priv->reta_idx_n || !priv->rxqs_n) {
                return 0;
        }
-       if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+       if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
                action_rss.types = 0;
        for (i = 0; i != priv->reta_idx_n; ++i)
                queue[i] = (*priv->reta_idx)[i];
-       flow_idx = flow_list_create(dev, &priv->ctrl_flows,
-                               &attr, items, actions, false, &error);
+       flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_CTL,
+                                   &attr, items, actions, false, &error);
        if (!flow_idx)
                return -rte_errno;
        return 0;
@@ -6878,7 +7264,6 @@ mlx5_ctrl_flow(struct rte_eth_dev *dev,
 int
 mlx5_flow_lacp_miss(struct rte_eth_dev *dev)
 {
-       struct mlx5_priv *priv = dev->data->dev_private;
        /*
         * The LACP matching is done by only using ether type since using
         * a multicast dst mac causes kernel to give low priority to this flow.
@@ -6912,8 +7297,9 @@ mlx5_flow_lacp_miss(struct rte_eth_dev *dev)
                },
        };
        struct rte_flow_error error;
-       uint32_t flow_idx = flow_list_create(dev, &priv->ctrl_flows,
-                               &attr, items, actions, false, &error);
+       uint32_t flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_CTL,
+                                       &attr, items, actions,
+                                       false, &error);
 
        if (!flow_idx)
                return -rte_errno;
@@ -6931,9 +7317,8 @@ mlx5_flow_destroy(struct rte_eth_dev *dev,
                  struct rte_flow *flow,
                  struct rte_flow_error *error __rte_unused)
 {
-       struct mlx5_priv *priv = dev->data->dev_private;
-
-       flow_list_destroy(dev, &priv->flows, (uintptr_t)(void *)flow);
+       flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN,
+                               (uintptr_t)(void *)flow);
        return 0;
 }
 
@@ -6947,9 +7332,7 @@ int
 mlx5_flow_flush(struct rte_eth_dev *dev,
                struct rte_flow_error *error __rte_unused)
 {
-       struct mlx5_priv *priv = dev->data->dev_private;
-
-       mlx5_flow_list_flush(dev, &priv->flows, false);
+       mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, false);
        return 0;
 }
 
@@ -7000,8 +7383,7 @@ flow_drv_query(struct rte_eth_dev *dev,
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        const struct mlx5_flow_driver_ops *fops;
-       struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool
-                                              [MLX5_IPOOL_RTE_FLOW],
+       struct rte_flow *flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN],
                                               flow_idx);
        enum mlx5_flow_drv_type ftype;
 
@@ -7087,14 +7469,14 @@ mlx5_flow_validate_mtr_acts(struct rte_eth_dev *dev,
                        struct rte_flow_attr *attr,
                        bool *is_rss,
                        uint8_t *domain_bitmap,
-                       bool *is_def_policy,
+                       uint8_t *policy_mode,
                        struct rte_mtr_error *error)
 {
        const struct mlx5_flow_driver_ops *fops;
 
        fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
-       return fops->validate_mtr_acts(dev, actions, attr,
-                       is_rss, domain_bitmap, is_def_policy, error);
+       return fops->validate_mtr_acts(dev, actions, attr, is_rss,
+                                      domain_bitmap, policy_mode, error);
 }
 
 /**
@@ -7427,7 +7809,6 @@ mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt,
 static int
 mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
 {
-       struct mlx5_devx_mkey_attr mkey_attr;
        struct mlx5_counter_stats_mem_mng *mem_mng;
        volatile struct flow_counter_stats *raw_data;
        int raws_n = MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES;
@@ -7437,6 +7818,7 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
                        sizeof(struct mlx5_counter_stats_mem_mng);
        size_t pgsize = rte_mem_page_size();
        uint8_t *mem;
+       int ret;
        int i;
 
        if (pgsize == (size_t)-1) {
@@ -7451,23 +7833,10 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
        }
        mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
        size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
-       mem_mng->umem = mlx5_os_umem_reg(sh->ctx, mem, size,
-                                                IBV_ACCESS_LOCAL_WRITE);
-       if (!mem_mng->umem) {
-               rte_errno = errno;
-               mlx5_free(mem);
-               return -rte_errno;
-       }
-       memset(&mkey_attr, 0, sizeof(mkey_attr));
-       mkey_attr.addr = (uintptr_t)mem;
-       mkey_attr.size = size;
-       mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem);
-       mkey_attr.pd = sh->pdn;
-       mkey_attr.relaxed_ordering_write = sh->cmng.relaxed_ordering_write;
-       mkey_attr.relaxed_ordering_read = sh->cmng.relaxed_ordering_read;
-       mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
-       if (!mem_mng->dm) {
-               mlx5_os_umem_dereg(mem_mng->umem);
+       ret = mlx5_os_wrapped_mkey_create(sh->cdev->ctx, sh->cdev->pd,
+                                         sh->cdev->pdn, mem, size,
+                                         &mem_mng->wm);
+       if (ret) {
                rte_errno = errno;
                mlx5_free(mem);
                return -rte_errno;
@@ -7586,7 +7955,7 @@ mlx5_flow_query_alarm(void *arg)
        ret = mlx5_devx_cmd_flow_counter_query(pool->min_dcs, 0,
                                               MLX5_COUNTERS_PER_POOL,
                                               NULL, NULL,
-                                              pool->raw_hw->mem_mng->dm->id,
+                                              pool->raw_hw->mem_mng->wm.lkey,
                                               (void *)(uintptr_t)
                                               pool->raw_hw->data,
                                               sh->devx_comp,
@@ -7822,13 +8191,12 @@ int
 mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_dev_config *config = &priv->config;
        enum modify_reg idx;
        int n = 0;
 
        /* reg_c[0] and reg_c[1] are reserved. */
-       config->flow_mreg_c[n++] = REG_C_0;
-       config->flow_mreg_c[n++] = REG_C_1;
+       priv->sh->flow_mreg_c[n++] = REG_C_0;
+       priv->sh->flow_mreg_c[n++] = REG_C_1;
        /* Discover availability of other reg_c's. */
        for (idx = REG_C_2; idx <= REG_C_7; ++idx) {
                struct rte_flow_attr attr = {
@@ -7864,23 +8232,295 @@ mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
                struct rte_flow *flow;
                struct rte_flow_error error;
 
-               if (!config->dv_flow_en)
+               if (!priv->config.dv_flow_en)
                        break;
                /* Create internal flow, validation skips copy action. */
-               flow_idx = flow_list_create(dev, NULL, &attr, items,
-                                           actions, false, &error);
-               flow = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
+               flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_GEN, &attr,
+                                       items, actions, false, &error);
+               flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN],
                                      flow_idx);
                if (!flow)
                        continue;
-               config->flow_mreg_c[n++] = idx;
-               flow_list_destroy(dev, NULL, flow_idx);
+               priv->sh->flow_mreg_c[n++] = idx;
+               flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, flow_idx);
        }
        for (; n < MLX5_MREG_C_NUM; ++n)
-               config->flow_mreg_c[n] = REG_NON;
+               priv->sh->flow_mreg_c[n] = REG_NON;
+       priv->sh->metadata_regc_check_flag = 1;
        return 0;
 }
 
+int
+save_dump_file(const uint8_t *data, uint32_t size,
+       uint32_t type, uint64_t id, void *arg, FILE *file)
+{
+       char line[BUF_SIZE];
+       uint32_t out = 0;
+       uint32_t k;
+       uint32_t actions_num;
+       struct rte_flow_query_count *count;
+
+       memset(line, 0, BUF_SIZE);
+       switch (type) {
+       case DR_DUMP_REC_TYPE_PMD_MODIFY_HDR:
+               actions_num = *(uint32_t *)(arg);
+               out += snprintf(line + out, BUF_SIZE - out, "%d,0x%" PRIx64 ",%d,",
+                               type, id, actions_num);
+               break;
+       case DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT:
+               out += snprintf(line + out, BUF_SIZE - out, "%d,0x%" PRIx64 ",",
+                               type, id);
+               break;
+       case DR_DUMP_REC_TYPE_PMD_COUNTER:
+               count = (struct rte_flow_query_count *)arg;
+               fprintf(file,
+                       "%d,0x%" PRIx64 ",%" PRIu64 ",%" PRIu64 "\n",
+                       type, id, count->hits, count->bytes);
+               return 0;
+       default:
+               return -1;
+       }
+
+       for (k = 0; k < size; k++) {
+               /* Make sure we do not overrun the line buffer length. */
+               if (out >= BUF_SIZE - 4) {
+                       line[out] = '\0';
+                       break;
+               }
+               out += snprintf(line + out, BUF_SIZE - out, "%02x",
+                               (data[k]) & 0xff);
+       }
+       fprintf(file, "%s\n", line);
+       return 0;
+}
+
+int
+mlx5_flow_query_counter(struct rte_eth_dev *dev, struct rte_flow *flow,
+       struct rte_flow_query_count *count, struct rte_flow_error *error)
+{
+       struct rte_flow_action action[2];
+       enum mlx5_flow_drv_type ftype;
+       const struct mlx5_flow_driver_ops *fops;
+
+       if (!flow) {
+               return rte_flow_error_set(error, ENOENT,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL,
+                               "invalid flow handle");
+       }
+       action[0].type = RTE_FLOW_ACTION_TYPE_COUNT;
+       action[1].type = RTE_FLOW_ACTION_TYPE_END;
+       if (flow->counter) {
+               memset(count, 0, sizeof(struct rte_flow_query_count));
+               ftype = (enum mlx5_flow_drv_type)(flow->drv_type);
+               MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN &&
+                                               ftype < MLX5_FLOW_TYPE_MAX);
+               fops = flow_get_drv_ops(ftype);
+               return fops->query(dev, flow, action, count, error);
+       }
+       return -1;
+}
+
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+/**
+ * Dump flow ipool data to file
+ *
+ * @param[in] dev
+ *   The pointer to Ethernet device.
+ * @param[in] file
+ *   A pointer to a file for output.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL. PMDs initialize this
+ *   structure in case of error only.
+ * @return
+ *   0 on success, a negative value otherwise.
+ */
+int
+mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev,
+       struct rte_flow *flow, FILE *file,
+       struct rte_flow_error *error)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_flow_dv_modify_hdr_resource  *modify_hdr;
+       struct mlx5_flow_dv_encap_decap_resource *encap_decap;
+       uint32_t handle_idx;
+       struct mlx5_flow_handle *dh;
+       struct rte_flow_query_count count;
+       uint32_t actions_num;
+       const uint8_t *data;
+       size_t size;
+       uint64_t id;
+       uint32_t type;
+       void *action = NULL;
+
+       if (!flow) {
+               return rte_flow_error_set(error, ENOENT,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL,
+                               "invalid flow handle");
+       }
+       handle_idx = flow->dev_handles;
+       while (handle_idx) {
+               dh = mlx5_ipool_get(priv->sh->ipool
+                               [MLX5_IPOOL_MLX5_FLOW], handle_idx);
+               if (!dh)
+                       continue;
+               handle_idx = dh->next.next;
+
+               /* query counter */
+               type = DR_DUMP_REC_TYPE_PMD_COUNTER;
+               flow_dv_query_count_ptr(dev, flow->counter,
+                                               &action, error);
+               if (action) {
+                       id = (uint64_t)(uintptr_t)action;
+                       if (!mlx5_flow_query_counter(dev, flow, &count, error))
+                               save_dump_file(NULL, 0, type,
+                                               id, (void *)&count, file);
+               }
+               /* Get modify_hdr and encap_decap buf from ipools. */
+               encap_decap = NULL;
+               modify_hdr = dh->dvh.modify_hdr;
+
+               if (dh->dvh.rix_encap_decap) {
+                       encap_decap = mlx5_ipool_get(priv->sh->ipool
+                                               [MLX5_IPOOL_DECAP_ENCAP],
+                                               dh->dvh.rix_encap_decap);
+               }
+               if (modify_hdr) {
+                       data = (const uint8_t *)modify_hdr->actions;
+                       size = (size_t)(modify_hdr->actions_num) * 8;
+                       id = (uint64_t)(uintptr_t)modify_hdr->action;
+                       actions_num = modify_hdr->actions_num;
+                       type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR;
+                       save_dump_file(data, size, type, id,
+                                               (void *)(&actions_num), file);
+               }
+               if (encap_decap) {
+                       data = encap_decap->buf;
+                       size = encap_decap->size;
+                       id = (uint64_t)(uintptr_t)encap_decap->action;
+                       type = DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT;
+                       save_dump_file(data, size, type,
+                                               id, NULL, file);
+               }
+       }
+       return 0;
+}
+
+/**
+ * Dump all flow's encap_decap/modify_hdr/counter data to file
+ *
+ * @param[in] dev
+ *   The pointer to Ethernet device.
+ * @param[in] file
+ *   A pointer to a file for output.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL. PMDs initialize this
+ *   structure in case of error only.
+ * @return
+ *   0 on success, a negative value otherwise.
+ */
+static int
+mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev,
+       FILE *file, struct rte_flow_error *error)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_dev_ctx_shared *sh = priv->sh;
+       struct mlx5_hlist *h;
+       struct mlx5_flow_dv_modify_hdr_resource  *modify_hdr;
+       struct mlx5_flow_dv_encap_decap_resource *encap_decap;
+       struct rte_flow_query_count count;
+       uint32_t actions_num;
+       const uint8_t *data;
+       size_t size;
+       uint64_t id;
+       uint32_t type;
+       uint32_t i;
+       uint32_t j;
+       struct mlx5_list_inconst *l_inconst;
+       struct mlx5_list_entry *e;
+       int lcore_index;
+       struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
+       uint32_t max;
+       void *action;
+
+       /* encap_decap hlist is lcore_share, get global core cache. */
+       i = MLX5_LIST_GLOBAL;
+       h = sh->encaps_decaps;
+       if (h) {
+               for (j = 0; j <= h->mask; j++) {
+                       l_inconst = &h->buckets[j].l;
+                       if (!l_inconst || !l_inconst->cache[i])
+                               continue;
+
+                       e = LIST_FIRST(&l_inconst->cache[i]->h);
+                       while (e) {
+                               encap_decap =
+                               (struct mlx5_flow_dv_encap_decap_resource *)e;
+                               data = encap_decap->buf;
+                               size = encap_decap->size;
+                               id = (uint64_t)(uintptr_t)encap_decap->action;
+                               type = DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT;
+                               save_dump_file(data, size, type,
+                                       id, NULL, file);
+                               e = LIST_NEXT(e, next);
+                       }
+               }
+       }
+
+       /* get modify_hdr */
+       h = sh->modify_cmds;
+       if (h) {
+               lcore_index = rte_lcore_index(rte_lcore_id());
+               if (unlikely(lcore_index == -1)) {
+                       lcore_index = MLX5_LIST_NLCORE;
+                       rte_spinlock_lock(&h->l_const.lcore_lock);
+               }
+               i = lcore_index;
+
+               for (j = 0; j <= h->mask; j++) {
+                       l_inconst = &h->buckets[j].l;
+                       if (!l_inconst || !l_inconst->cache[i])
+                               continue;
+
+                       e = LIST_FIRST(&l_inconst->cache[i]->h);
+                       while (e) {
+                               modify_hdr =
+                               (struct mlx5_flow_dv_modify_hdr_resource *)e;
+                               data = (const uint8_t *)modify_hdr->actions;
+                               size = (size_t)(modify_hdr->actions_num) * 8;
+                               actions_num = modify_hdr->actions_num;
+                               id = (uint64_t)(uintptr_t)modify_hdr->action;
+                               type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR;
+                               save_dump_file(data, size, type, id,
+                                               (void *)(&actions_num), file);
+                               e = LIST_NEXT(e, next);
+                       }
+               }
+
+               if (unlikely(lcore_index == MLX5_LIST_NLCORE))
+                       rte_spinlock_unlock(&h->l_const.lcore_lock);
+       }
+
+       /* get counter */
+       MLX5_ASSERT(cmng->n_valid <= cmng->n);
+       max = MLX5_COUNTERS_PER_POOL * cmng->n_valid;
+       for (j = 1; j <= max; j++) {
+               action = NULL;
+               flow_dv_query_count_ptr(dev, j, &action, error);
+               if (action) {
+                       if (!flow_dv_query_count(dev, j, &count, error)) {
+                               type = DR_DUMP_REC_TYPE_PMD_COUNTER;
+                               id = (uint64_t)(uintptr_t)action;
+                               save_dump_file(NULL, 0, type,
+                                               id, (void *)&count, file);
+                       }
+               }
+       }
+       return 0;
+}
+#endif
+
 /**
  * Dump flow raw hw data to file
  *
@@ -7913,16 +8553,24 @@ mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx,
        }
 
        /* dump all */
-       if (!flow_idx)
+       if (!flow_idx) {
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+               if (mlx5_flow_dev_dump_sh_all(dev, file, error))
+                       return -EINVAL;
+#endif
                return mlx5_devx_cmd_flow_dump(sh->fdb_domain,
                                        sh->rx_domain,
                                        sh->tx_domain, file);
+       }
        /* dump one */
-       flow = mlx5_ipool_get(priv->sh->ipool
-                       [MLX5_IPOOL_RTE_FLOW], (uintptr_t)(void *)flow_idx);
+       flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN],
+                       (uintptr_t)(void *)flow_idx);
        if (!flow)
-               return -ENOENT;
+               return -EINVAL;
 
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+       mlx5_flow_dev_dump_ipool(dev, flow, file, error);
+#endif
        handle_idx = flow->dev_handles;
        while (handle_idx) {
                dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
@@ -8208,6 +8856,116 @@ mlx5_action_handle_flush(struct rte_eth_dev *dev)
        return ret;
 }
 
+/**
+ * Validate existing indirect actions against current device configuration
+ * and attach them to device resources.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_action_handle_attach(struct rte_eth_dev *dev)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_indexed_pool *ipool =
+                       priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS];
+       struct mlx5_shared_action_rss *shared_rss, *shared_rss_last;
+       int ret = 0;
+       uint32_t idx;
+
+       ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {
+               struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;
+               const char *message;
+               uint32_t queue_idx;
+
+               ret = mlx5_validate_rss_queues(dev, ind_tbl->queues,
+                                              ind_tbl->queues_n,
+                                              &message, &queue_idx);
+               if (ret != 0) {
+                       DRV_LOG(ERR, "Port %u cannot use queue %u in RSS: %s",
+                               dev->data->port_id, ind_tbl->queues[queue_idx],
+                               message);
+                       break;
+               }
+       }
+       if (ret != 0)
+               return ret;
+       ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {
+               struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;
+
+               ret = mlx5_ind_table_obj_attach(dev, ind_tbl);
+               if (ret != 0) {
+                       DRV_LOG(ERR, "Port %u could not attach "
+                               "indirection table obj %p",
+                               dev->data->port_id, (void *)ind_tbl);
+                       goto error;
+               }
+       }
+       return 0;
+error:
+       shared_rss_last = shared_rss;
+       ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {
+               struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;
+
+               if (shared_rss == shared_rss_last)
+                       break;
+               if (mlx5_ind_table_obj_detach(dev, ind_tbl) != 0)
+                       DRV_LOG(CRIT, "Port %u could not detach "
+                               "indirection table obj %p on rollback",
+                               dev->data->port_id, (void *)ind_tbl);
+       }
+       return ret;
+}
+
+/**
+ * Detach indirect actions of the device from its resources.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_action_handle_detach(struct rte_eth_dev *dev)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_indexed_pool *ipool =
+                       priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS];
+       struct mlx5_shared_action_rss *shared_rss, *shared_rss_last;
+       int ret = 0;
+       uint32_t idx;
+
+       ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {
+               struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;
+
+               ret = mlx5_ind_table_obj_detach(dev, ind_tbl);
+               if (ret != 0) {
+                       DRV_LOG(ERR, "Port %u could not detach "
+                               "indirection table obj %p",
+                               dev->data->port_id, (void *)ind_tbl);
+                       goto error;
+               }
+       }
+       return 0;
+error:
+       shared_rss_last = shared_rss;
+       ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {
+               struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;
+
+               if (shared_rss == shared_rss_last)
+                       break;
+               if (mlx5_ind_table_obj_attach(dev, ind_tbl) != 0)
+                       DRV_LOG(CRIT, "Port %u could not attach "
+                               "indirection table obj %p on rollback",
+                               dev->data->port_id, (void *)ind_tbl);
+       }
+       return ret;
+}
+
 #ifndef HAVE_MLX5DV_DR
 #define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1))
 #else
@@ -8342,7 +9100,7 @@ flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
                                (error, EINVAL,
                                RTE_FLOW_ERROR_TYPE_ACTION_CONF,
                                NULL, "invalid port configuration");
-               if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+               if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
                        ctx->action_rss.types = 0;
                for (i = 0; i != priv->reta_idx_n; ++i)
                        ctx->queue[i] = (*priv->reta_idx)[i];
@@ -8395,7 +9153,7 @@ tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_dev_ctx_shared *sh = priv->sh;
-       struct mlx5_hlist_entry *he;
+       struct mlx5_list_entry *he;
        union tunnel_offload_mark mbits = { .val = mark };
        union mlx5_flow_tbl_key table_key = {
                {
@@ -8407,16 +9165,20 @@ tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark)
                        .is_egress = 0,
                }
        };
-       he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL);
+       struct mlx5_flow_cb_ctx ctx = {
+               .data = &table_key.v64,
+       };
+
+       he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, &ctx);
        return he ?
               container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL;
 }
 
 static void
-mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list,
-                                  struct mlx5_hlist_entry *entry)
+mlx5_flow_tunnel_grp2tbl_remove_cb(void *tool_ctx,
+                                  struct mlx5_list_entry *entry)
 {
-       struct mlx5_dev_ctx_shared *sh = list->ctx;
+       struct mlx5_dev_ctx_shared *sh = tool_ctx;
        struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
 
        mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
@@ -8425,26 +9187,26 @@ mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list,
 }
 
 static int
-mlx5_flow_tunnel_grp2tbl_match_cb(struct mlx5_hlist *list __rte_unused,
-                                 struct mlx5_hlist_entry *entry,
-                                 uint64_t key, void *cb_ctx __rte_unused)
+mlx5_flow_tunnel_grp2tbl_match_cb(void *tool_ctx __rte_unused,
+                                 struct mlx5_list_entry *entry, void *cb_ctx)
 {
+       struct mlx5_flow_cb_ctx *ctx = cb_ctx;
        union tunnel_tbl_key tbl = {
-               .val = key,
+               .val = *(uint64_t *)(ctx->data),
        };
        struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
 
        return tbl.tunnel_id != tte->tunnel_id || tbl.group != tte->group;
 }
 
-static struct mlx5_hlist_entry *
-mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list, uint64_t key,
-                                  void *ctx __rte_unused)
+static struct mlx5_list_entry *
+mlx5_flow_tunnel_grp2tbl_create_cb(void *tool_ctx, void *cb_ctx)
 {
-       struct mlx5_dev_ctx_shared *sh = list->ctx;
+       struct mlx5_dev_ctx_shared *sh = tool_ctx;
+       struct mlx5_flow_cb_ctx *ctx = cb_ctx;
        struct tunnel_tbl_entry *tte;
        union tunnel_tbl_key tbl = {
-               .val = key,
+               .val = *(uint64_t *)(ctx->data),
        };
 
        tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO,
@@ -8473,13 +9235,36 @@ err:
        return NULL;
 }
 
+static struct mlx5_list_entry *
+mlx5_flow_tunnel_grp2tbl_clone_cb(void *tool_ctx __rte_unused,
+                                 struct mlx5_list_entry *oentry,
+                                 void *cb_ctx __rte_unused)
+{
+       struct tunnel_tbl_entry *tte = mlx5_malloc(MLX5_MEM_SYS, sizeof(*tte),
+                                                  0, SOCKET_ID_ANY);
+
+       if (!tte)
+               return NULL;
+       memcpy(tte, oentry, sizeof(*tte));
+       return &tte->hash;
+}
+
+static void
+mlx5_flow_tunnel_grp2tbl_clone_free_cb(void *tool_ctx __rte_unused,
+                                      struct mlx5_list_entry *entry)
+{
+       struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
+
+       mlx5_free(tte);
+}
+
 static uint32_t
 tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
                                const struct mlx5_flow_tunnel *tunnel,
                                uint32_t group, uint32_t *table,
                                struct rte_flow_error *error)
 {
-       struct mlx5_hlist_entry *he;
+       struct mlx5_list_entry *he;
        struct tunnel_tbl_entry *tte;
        union tunnel_tbl_key key = {
                .tunnel_id = tunnel ? tunnel->tunnel_id : 0,
@@ -8487,9 +9272,12 @@ tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
        };
        struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
        struct mlx5_hlist *group_hash;
+       struct mlx5_flow_cb_ctx ctx = {
+               .data = &key.val,
+       };
 
        group_hash = tunnel ? tunnel->groups : thub->groups;
-       he = mlx5_hlist_register(group_hash, key.val, NULL);
+       he = mlx5_hlist_register(group_hash, key.val, &ctx);
        if (!he)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
@@ -8603,15 +9391,17 @@ mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
                DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id);
                return NULL;
        }
-       tunnel->groups = mlx5_hlist_create("tunnel groups", 1024, 0, 0,
+       tunnel->groups = mlx5_hlist_create("tunnel groups", 64, false, true,
+                                          priv->sh,
                                           mlx5_flow_tunnel_grp2tbl_create_cb,
                                           mlx5_flow_tunnel_grp2tbl_match_cb,
-                                          mlx5_flow_tunnel_grp2tbl_remove_cb);
+                                          mlx5_flow_tunnel_grp2tbl_remove_cb,
+                                          mlx5_flow_tunnel_grp2tbl_clone_cb,
+                                       mlx5_flow_tunnel_grp2tbl_clone_free_cb);
        if (!tunnel->groups) {
                mlx5_ipool_free(ipool, id);
                return NULL;
        }
-       tunnel->groups->ctx = priv->sh;
        /* initiate new PMD tunnel */
        memcpy(&tunnel->app_tunnel, app_tunnel, sizeof(*app_tunnel));
        tunnel->tunnel_id = id;
@@ -8710,16 +9500,17 @@ int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh)
                return -ENOMEM;
        LIST_INIT(&thub->tunnels);
        rte_spinlock_init(&thub->sl);
-       thub->groups = mlx5_hlist_create("flow groups",
-                                        rte_align32pow2(MLX5_MAX_TABLES), 0,
-                                        0, mlx5_flow_tunnel_grp2tbl_create_cb,
+       thub->groups = mlx5_hlist_create("flow groups", 64,
+                                        false, true, sh,
+                                        mlx5_flow_tunnel_grp2tbl_create_cb,
                                         mlx5_flow_tunnel_grp2tbl_match_cb,
-                                        mlx5_flow_tunnel_grp2tbl_remove_cb);
+                                        mlx5_flow_tunnel_grp2tbl_remove_cb,
+                                        mlx5_flow_tunnel_grp2tbl_clone_cb,
+                                       mlx5_flow_tunnel_grp2tbl_clone_free_cb);
        if (!thub->groups) {
                err = -rte_errno;
                goto err;
        }
-       thub->groups->ctx = sh;
        sh->tunnel_hub = thub;
 
        return 0;
@@ -8732,30 +9523,37 @@ err:
        return err;
 }
 
-static inline bool
+static inline int
 mlx5_flow_tunnel_validate(struct rte_eth_dev *dev,
                          struct rte_flow_tunnel *tunnel,
-                         const char *err_msg)
+                         struct rte_flow_error *error)
 {
-       err_msg = NULL;
-       if (!is_tunnel_offload_active(dev)) {
-               err_msg = "tunnel offload was not activated";
-               goto out;
-       } else if (!tunnel) {
-               err_msg = "no application tunnel";
-               goto out;
-       }
+       struct mlx5_priv *priv = dev->data->dev_private;
 
+       if (!priv->config.dv_flow_en)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+                                         "flow DV interface is off");
+       if (!is_tunnel_offload_active(dev))
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+                                         "tunnel offload was not activated");
+       if (!tunnel)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+                                         "no application tunnel");
        switch (tunnel->type) {
        default:
-               err_msg = "unsupported tunnel type";
-               goto out;
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+                                         "unsupported tunnel type");
        case RTE_FLOW_ITEM_TYPE_VXLAN:
+       case RTE_FLOW_ITEM_TYPE_GRE:
+       case RTE_FLOW_ITEM_TYPE_NVGRE:
+       case RTE_FLOW_ITEM_TYPE_GENEVE:
                break;
        }
-
-out:
-       return !err_msg;
+       return 0;
 }
 
 static int
@@ -8765,15 +9563,11 @@ mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,
                    uint32_t *num_of_actions,
                    struct rte_flow_error *error)
 {
-       int ret;
        struct mlx5_flow_tunnel *tunnel;
-       const char *err_msg = NULL;
-       bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
+       int ret = mlx5_flow_tunnel_validate(dev, app_tunnel, error);
 
-       if (!verdict)
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
-                                         err_msg);
+       if (ret)
+               return ret;
        ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
        if (ret < 0) {
                return rte_flow_error_set(error, ret,
@@ -8792,15 +9586,11 @@ mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
                       uint32_t *num_of_items,
                       struct rte_flow_error *error)
 {
-       int ret;
        struct mlx5_flow_tunnel *tunnel;
-       const char *err_msg = NULL;
-       bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
+       int ret = mlx5_flow_tunnel_validate(dev, app_tunnel, error);
 
-       if (!verdict)
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-                                         err_msg);
+       if (ret)
+               return ret;
        ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
        if (ret < 0) {
                return rte_flow_error_set(error, ret,
@@ -8903,7 +9693,7 @@ mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
 {
        uint64_t ol_flags = m->ol_flags;
        const struct mlx5_flow_tbl_data_entry *tble;
-       const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID;
+       const uint64_t mask = RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
 
        if (!is_tunnel_offload_active(dev)) {
                info->flags = 0;
@@ -9026,6 +9816,45 @@ mlx5_release_tunnel_hub(__rte_unused struct mlx5_dev_ctx_shared *sh,
 }
 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
 
+/* Flex flow item API */
+static struct rte_flow_item_flex_handle *
+mlx5_flow_flex_item_create(struct rte_eth_dev *dev,
+                          const struct rte_flow_item_flex_conf *conf,
+                          struct rte_flow_error *error)
+{
+       static const char err_msg[] = "flex item creation unsupported";
+       struct rte_flow_attr attr = { .transfer = 0 };
+       const struct mlx5_flow_driver_ops *fops =
+                       flow_get_drv_ops(flow_get_drv_type(dev, &attr));
+
+       if (!fops->item_create) {
+               DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
+               rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+                                  NULL, err_msg);
+               return NULL;
+       }
+       return fops->item_create(dev, conf, error);
+}
+
+static int
+mlx5_flow_flex_item_release(struct rte_eth_dev *dev,
+                           const struct rte_flow_item_flex_handle *handle,
+                           struct rte_flow_error *error)
+{
+       static const char err_msg[] = "flex item release unsupported";
+       struct rte_flow_attr attr = { .transfer = 0 };
+       const struct mlx5_flow_driver_ops *fops =
+                       flow_get_drv_ops(flow_get_drv_type(dev, &attr));
+
+       if (!fops->item_release) {
+               DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
+               rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+                                  NULL, err_msg);
+               return -rte_errno;
+       }
+       return fops->item_release(dev, handle, error);
+}
+
 static void
 mlx5_dbg__print_pattern(const struct rte_flow_item *item)
 {
@@ -9044,3 +9873,143 @@ mlx5_dbg__print_pattern(const struct rte_flow_item *item)
        }
        printf("END\n");
 }
+
+static int
+mlx5_flow_is_std_vxlan_port(const struct rte_flow_item *udp_item)
+{
+       const struct rte_flow_item_udp *spec = udp_item->spec;
+       const struct rte_flow_item_udp *mask = udp_item->mask;
+       uint16_t udp_dport = 0;
+
+       if (spec != NULL) {
+               if (!mask)
+                       mask = &rte_flow_item_udp_mask;
+               udp_dport = rte_be_to_cpu_16(spec->hdr.dst_port &
+                               mask->hdr.dst_port);
+       }
+       return (!udp_dport || udp_dport == MLX5_UDP_PORT_VXLAN);
+}
+
+static const struct mlx5_flow_expand_node *
+mlx5_flow_expand_rss_adjust_node(const struct rte_flow_item *pattern,
+               unsigned int item_idx,
+               const struct mlx5_flow_expand_node graph[],
+               const struct mlx5_flow_expand_node *node)
+{
+       const struct rte_flow_item *item = pattern + item_idx, *prev_item;
+
+       if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN &&
+                       node != NULL &&
+                       node->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
+               /*
+                * The expansion node is VXLAN and it is also the last
+                * expandable item in the pattern, so need to continue
+                * expansion of the inner tunnel.
+                */
+               MLX5_ASSERT(item_idx > 0);
+               prev_item = pattern + item_idx - 1;
+               MLX5_ASSERT(prev_item->type == RTE_FLOW_ITEM_TYPE_UDP);
+               if (mlx5_flow_is_std_vxlan_port(prev_item))
+                       return &graph[MLX5_EXPANSION_STD_VXLAN];
+               return &graph[MLX5_EXPANSION_L3_VXLAN];
+       }
+       return node;
+}
+
+/* Map of Verbs to Flow priority with 8 Verbs priorities. */
+static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
+       { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
+};
+
+/* Map of Verbs to Flow priority with 16 Verbs priorities. */
+static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {
+       { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 },
+       { 9, 10, 11 }, { 12, 13, 14 },
+};
+
+/**
+ * Discover the number of available flow priorities.
+ *
+ * @param dev
+ *   Ethernet device.
+ *
+ * @return
+ *   On success, number of available flow priorities.
+ *   On failure, a negative errno-style code and rte_errno is set.
+ */
+int
+mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
+{
+       static const uint16_t vprio[] = {8, 16};
+       const struct mlx5_priv *priv = dev->data->dev_private;
+       const struct mlx5_flow_driver_ops *fops;
+       enum mlx5_flow_drv_type type;
+       int ret;
+
+       type = mlx5_flow_os_get_type();
+       if (type == MLX5_FLOW_TYPE_MAX) {
+               type = MLX5_FLOW_TYPE_VERBS;
+               if (priv->sh->devx && priv->config.dv_flow_en)
+                       type = MLX5_FLOW_TYPE_DV;
+       }
+       fops = flow_get_drv_ops(type);
+       if (fops->discover_priorities == NULL) {
+               DRV_LOG(ERR, "Priority discovery not supported");
+               rte_errno = ENOTSUP;
+               return -rte_errno;
+       }
+       ret = fops->discover_priorities(dev, vprio, RTE_DIM(vprio));
+       if (ret < 0)
+               return ret;
+       switch (ret) {
+       case 8:
+               ret = RTE_DIM(priority_map_3);
+               break;
+       case 16:
+               ret = RTE_DIM(priority_map_5);
+               break;
+       default:
+               rte_errno = ENOTSUP;
+               DRV_LOG(ERR,
+                       "port %u maximum priority: %d expected 8/16",
+                       dev->data->port_id, ret);
+               return -rte_errno;
+       }
+       DRV_LOG(INFO, "port %u supported flow priorities:"
+               " 0-%d for ingress or egress root table,"
+               " 0-%d for non-root table or transfer root table.",
+               dev->data->port_id, ret - 2,
+               MLX5_NON_ROOT_FLOW_MAX_PRIO - 1);
+       return ret;
+}
+
+/**
+ * Adjust flow priority based on the highest layer and the request priority.
+ *
+ * @param[in] dev
+ *   Pointer to the Ethernet device structure.
+ * @param[in] priority
+ *   The rule base priority.
+ * @param[in] subpriority
+ *   The priority based on the items.
+ *
+ * @return
+ *   The new priority.
+ */
+uint32_t
+mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
+                         uint32_t subpriority)
+{
+       uint32_t res = 0;
+       struct mlx5_priv *priv = dev->data->dev_private;
+
+       switch (priv->sh->flow_max_priority) {
+       case RTE_DIM(priority_map_3):
+               res = priority_map_3[priority][subpriority];
+               break;
+       case RTE_DIM(priority_map_5):
+               res = priority_map_5[priority][subpriority];
+               break;
+       }
+       return  res;
+}