net/txgbe: add queue stats mapping
[dpdk.git] / drivers / net / mlx5 / mlx5_flow.c
index 52047db..d7243a8 100644 (file)
 #include <string.h>
 #include <stdbool.h>
 
-/* Verbs header. */
-/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
-#include <infiniband/verbs.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
-
 #include <rte_common.h>
 #include <rte_ether.h>
 #include <rte_ethdev_driver.h>
+#include <rte_eal_paging.h>
 #include <rte_flow.h>
 #include <rte_cycles.h>
 #include <rte_flow_driver.h>
 #include <rte_malloc.h>
 #include <rte_ip.h>
 
+#include <mlx5_glue.h>
 #include <mlx5_devx_cmds.h>
 #include <mlx5_prm.h>
 #include <mlx5_malloc.h>
@@ -38,6 +30,7 @@
 #include "mlx5_flow.h"
 #include "mlx5_flow_os.h"
 #include "mlx5_rxtx.h"
+#include "mlx5_common_os.h"
 
 /** Device flow drivers. */
 extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops;
@@ -53,6 +46,331 @@ const struct mlx5_flow_driver_ops *flow_drv_ops[] = {
        [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops
 };
 
+/** Helper macro to build input graph for mlx5_flow_expand_rss(). */
+#define MLX5_FLOW_EXPAND_RSS_NEXT(...) \
+       (const int []){ \
+               __VA_ARGS__, 0, \
+       }
+
+/** Node object of input graph for mlx5_flow_expand_rss(). */
+struct mlx5_flow_expand_node {
+       const int *const next;
+       /**<
+        * List of next node indexes. Index 0 is interpreted as a terminator.
+        */
+       const enum rte_flow_item_type type;
+       /**< Pattern item type of current node. */
+       uint64_t rss_types;
+       /**<
+        * RSS types bit-field associated with this node
+        * (see ETH_RSS_* definitions).
+        */
+};
+
+/** Object returned by mlx5_flow_expand_rss(). */
+struct mlx5_flow_expand_rss {
+       uint32_t entries;
+       /**< Number of entries @p patterns and @p priorities. */
+       struct {
+               struct rte_flow_item *pattern; /**< Expanded pattern array. */
+               uint32_t priority; /**< Priority offset for each expansion. */
+       } entry[];
+};
+
+static enum rte_flow_item_type
+mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item)
+{
+       enum rte_flow_item_type ret = RTE_FLOW_ITEM_TYPE_VOID;
+       uint16_t ether_type = 0;
+       uint16_t ether_type_m;
+       uint8_t ip_next_proto = 0;
+       uint8_t ip_next_proto_m;
+
+       if (item == NULL || item->spec == NULL)
+               return ret;
+       switch (item->type) {
+       case RTE_FLOW_ITEM_TYPE_ETH:
+               if (item->mask)
+                       ether_type_m = ((const struct rte_flow_item_eth *)
+                                               (item->mask))->type;
+               else
+                       ether_type_m = rte_flow_item_eth_mask.type;
+               if (ether_type_m != RTE_BE16(0xFFFF))
+                       break;
+               ether_type = ((const struct rte_flow_item_eth *)
+                               (item->spec))->type;
+               if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
+                       ret = RTE_FLOW_ITEM_TYPE_IPV4;
+               else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
+                       ret = RTE_FLOW_ITEM_TYPE_IPV6;
+               else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
+                       ret = RTE_FLOW_ITEM_TYPE_VLAN;
+               else
+                       ret = RTE_FLOW_ITEM_TYPE_END;
+               break;
+       case RTE_FLOW_ITEM_TYPE_VLAN:
+               if (item->mask)
+                       ether_type_m = ((const struct rte_flow_item_vlan *)
+                                               (item->mask))->inner_type;
+               else
+                       ether_type_m = rte_flow_item_vlan_mask.inner_type;
+               if (ether_type_m != RTE_BE16(0xFFFF))
+                       break;
+               ether_type = ((const struct rte_flow_item_vlan *)
+                               (item->spec))->inner_type;
+               if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
+                       ret = RTE_FLOW_ITEM_TYPE_IPV4;
+               else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
+                       ret = RTE_FLOW_ITEM_TYPE_IPV6;
+               else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
+                       ret = RTE_FLOW_ITEM_TYPE_VLAN;
+               else
+                       ret = RTE_FLOW_ITEM_TYPE_END;
+               break;
+       case RTE_FLOW_ITEM_TYPE_IPV4:
+               if (item->mask)
+                       ip_next_proto_m = ((const struct rte_flow_item_ipv4 *)
+                                       (item->mask))->hdr.next_proto_id;
+               else
+                       ip_next_proto_m =
+                               rte_flow_item_ipv4_mask.hdr.next_proto_id;
+               if (ip_next_proto_m != 0xFF)
+                       break;
+               ip_next_proto = ((const struct rte_flow_item_ipv4 *)
+                               (item->spec))->hdr.next_proto_id;
+               if (ip_next_proto == IPPROTO_UDP)
+                       ret = RTE_FLOW_ITEM_TYPE_UDP;
+               else if (ip_next_proto == IPPROTO_TCP)
+                       ret = RTE_FLOW_ITEM_TYPE_TCP;
+               else if (ip_next_proto == IPPROTO_IP)
+                       ret = RTE_FLOW_ITEM_TYPE_IPV4;
+               else if (ip_next_proto == IPPROTO_IPV6)
+                       ret = RTE_FLOW_ITEM_TYPE_IPV6;
+               else
+                       ret = RTE_FLOW_ITEM_TYPE_END;
+               break;
+       case RTE_FLOW_ITEM_TYPE_IPV6:
+               if (item->mask)
+                       ip_next_proto_m = ((const struct rte_flow_item_ipv6 *)
+                                               (item->mask))->hdr.proto;
+               else
+                       ip_next_proto_m =
+                               rte_flow_item_ipv6_mask.hdr.proto;
+               if (ip_next_proto_m != 0xFF)
+                       break;
+               ip_next_proto = ((const struct rte_flow_item_ipv6 *)
+                               (item->spec))->hdr.proto;
+               if (ip_next_proto == IPPROTO_UDP)
+                       ret = RTE_FLOW_ITEM_TYPE_UDP;
+               else if (ip_next_proto == IPPROTO_TCP)
+                       ret = RTE_FLOW_ITEM_TYPE_TCP;
+               else if (ip_next_proto == IPPROTO_IP)
+                       ret = RTE_FLOW_ITEM_TYPE_IPV4;
+               else if (ip_next_proto == IPPROTO_IPV6)
+                       ret = RTE_FLOW_ITEM_TYPE_IPV6;
+               else
+                       ret = RTE_FLOW_ITEM_TYPE_END;
+               break;
+       default:
+               ret = RTE_FLOW_ITEM_TYPE_VOID;
+               break;
+       }
+       return ret;
+}
+
+/**
+ * Expand RSS flows into several possible flows according to the RSS hash
+ * fields requested and the driver capabilities.
+ *
+ * @param[out] buf
+ *   Buffer to store the result expansion.
+ * @param[in] size
+ *   Buffer size in bytes. If 0, @p buf can be NULL.
+ * @param[in] pattern
+ *   User flow pattern.
+ * @param[in] types
+ *   RSS types to expand (see ETH_RSS_* definitions).
+ * @param[in] graph
+ *   Input graph to expand @p pattern according to @p types.
+ * @param[in] graph_root_index
+ *   Index of root node in @p graph, typically 0.
+ *
+ * @return
+ *   A positive value representing the size of @p buf in bytes regardless of
+ *   @p size on success, a negative errno value otherwise and rte_errno is
+ *   set, the following errors are defined:
+ *
+ *   -E2BIG: graph-depth @p graph is too deep.
+ */
+static int
+mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
+                    const struct rte_flow_item *pattern, uint64_t types,
+                    const struct mlx5_flow_expand_node graph[],
+                    int graph_root_index)
+{
+       const int elt_n = 8;
+       const struct rte_flow_item *item;
+       const struct mlx5_flow_expand_node *node = &graph[graph_root_index];
+       const int *next_node;
+       const int *stack[elt_n];
+       int stack_pos = 0;
+       struct rte_flow_item flow_items[elt_n];
+       unsigned int i;
+       size_t lsize;
+       size_t user_pattern_size = 0;
+       void *addr = NULL;
+       const struct mlx5_flow_expand_node *next = NULL;
+       struct rte_flow_item missed_item;
+       int missed = 0;
+       int elt = 0;
+       const struct rte_flow_item *last_item = NULL;
+
+       memset(&missed_item, 0, sizeof(missed_item));
+       lsize = offsetof(struct mlx5_flow_expand_rss, entry) +
+               elt_n * sizeof(buf->entry[0]);
+       if (lsize <= size) {
+               buf->entry[0].priority = 0;
+               buf->entry[0].pattern = (void *)&buf->entry[elt_n];
+               buf->entries = 0;
+               addr = buf->entry[0].pattern;
+       }
+       for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+               if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
+                       last_item = item;
+               for (i = 0; node->next && node->next[i]; ++i) {
+                       next = &graph[node->next[i]];
+                       if (next->type == item->type)
+                               break;
+               }
+               if (next)
+                       node = next;
+               user_pattern_size += sizeof(*item);
+       }
+       user_pattern_size += sizeof(*item); /* Handle END item. */
+       lsize += user_pattern_size;
+       /* Copy the user pattern in the first entry of the buffer. */
+       if (lsize <= size) {
+               rte_memcpy(addr, pattern, user_pattern_size);
+               addr = (void *)(((uintptr_t)addr) + user_pattern_size);
+               buf->entries = 1;
+       }
+       /* Start expanding. */
+       memset(flow_items, 0, sizeof(flow_items));
+       user_pattern_size -= sizeof(*item);
+       /*
+        * Check if the last valid item has spec set, need complete pattern,
+        * and the pattern can be used for expansion.
+        */
+       missed_item.type = mlx5_flow_expand_rss_item_complete(last_item);
+       if (missed_item.type == RTE_FLOW_ITEM_TYPE_END) {
+               /* Item type END indicates expansion is not required. */
+               return lsize;
+       }
+       if (missed_item.type != RTE_FLOW_ITEM_TYPE_VOID) {
+               next = NULL;
+               missed = 1;
+               for (i = 0; node->next && node->next[i]; ++i) {
+                       next = &graph[node->next[i]];
+                       if (next->type == missed_item.type) {
+                               flow_items[0].type = missed_item.type;
+                               flow_items[1].type = RTE_FLOW_ITEM_TYPE_END;
+                               break;
+                       }
+                       next = NULL;
+               }
+       }
+       if (next && missed) {
+               elt = 2; /* missed item + item end. */
+               node = next;
+               lsize += elt * sizeof(*item) + user_pattern_size;
+               if ((node->rss_types & types) && lsize <= size) {
+                       buf->entry[buf->entries].priority = 1;
+                       buf->entry[buf->entries].pattern = addr;
+                       buf->entries++;
+                       rte_memcpy(addr, buf->entry[0].pattern,
+                                  user_pattern_size);
+                       addr = (void *)(((uintptr_t)addr) + user_pattern_size);
+                       rte_memcpy(addr, flow_items, elt * sizeof(*item));
+                       addr = (void *)(((uintptr_t)addr) +
+                                       elt * sizeof(*item));
+               }
+       }
+       memset(flow_items, 0, sizeof(flow_items));
+       next_node = node->next;
+       stack[stack_pos] = next_node;
+       node = next_node ? &graph[*next_node] : NULL;
+       while (node) {
+               flow_items[stack_pos].type = node->type;
+               if (node->rss_types & types) {
+                       /*
+                        * compute the number of items to copy from the
+                        * expansion and copy it.
+                        * When the stack_pos is 0, there are 1 element in it,
+                        * plus the addition END item.
+                        */
+                       elt = stack_pos + 2;
+                       flow_items[stack_pos + 1].type = RTE_FLOW_ITEM_TYPE_END;
+                       lsize += elt * sizeof(*item) + user_pattern_size;
+                       if (lsize <= size) {
+                               size_t n = elt * sizeof(*item);
+
+                               buf->entry[buf->entries].priority =
+                                       stack_pos + 1 + missed;
+                               buf->entry[buf->entries].pattern = addr;
+                               buf->entries++;
+                               rte_memcpy(addr, buf->entry[0].pattern,
+                                          user_pattern_size);
+                               addr = (void *)(((uintptr_t)addr) +
+                                               user_pattern_size);
+                               rte_memcpy(addr, &missed_item,
+                                          missed * sizeof(*item));
+                               addr = (void *)(((uintptr_t)addr) +
+                                       missed * sizeof(*item));
+                               rte_memcpy(addr, flow_items, n);
+                               addr = (void *)(((uintptr_t)addr) + n);
+                       }
+               }
+               /* Go deeper. */
+               if (node->next) {
+                       next_node = node->next;
+                       if (stack_pos++ == elt_n) {
+                               rte_errno = E2BIG;
+                               return -rte_errno;
+                       }
+                       stack[stack_pos] = next_node;
+               } else if (*(next_node + 1)) {
+                       /* Follow up with the next possibility. */
+                       ++next_node;
+               } else {
+                       /* Move to the next path. */
+                       if (stack_pos)
+                               next_node = stack[--stack_pos];
+                       next_node++;
+                       stack[stack_pos] = next_node;
+               }
+               node = *next_node ? &graph[*next_node] : NULL;
+       };
+       /* no expanded flows but we have missed item, create one rule for it */
+       if (buf->entries == 1 && missed != 0) {
+               elt = 2;
+               lsize += elt * sizeof(*item) + user_pattern_size;
+               if (lsize <= size) {
+                       buf->entry[buf->entries].priority = 1;
+                       buf->entry[buf->entries].pattern = addr;
+                       buf->entries++;
+                       flow_items[0].type = missed_item.type;
+                       flow_items[1].type = RTE_FLOW_ITEM_TYPE_END;
+                       rte_memcpy(addr, buf->entry[0].pattern,
+                                  user_pattern_size);
+                       addr = (void *)(((uintptr_t)addr) + user_pattern_size);
+                       rte_memcpy(addr, flow_items, elt * sizeof(*item));
+                       addr = (void *)(((uintptr_t)addr) +
+                                       elt * sizeof(*item));
+               }
+       }
+       return lsize;
+}
+
 enum mlx5_expansion {
        MLX5_EXPANSION_ROOT,
        MLX5_EXPANSION_ROOT_OUTER,
@@ -83,46 +401,47 @@ enum mlx5_expansion {
 };
 
 /** Supported expansion of items. */
-static const struct rte_flow_expand_node mlx5_support_expansion[] = {
+static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
        [MLX5_EXPANSION_ROOT] = {
-               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
-                                                MLX5_EXPANSION_IPV4,
-                                                MLX5_EXPANSION_IPV6),
+               .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
+                                                 MLX5_EXPANSION_IPV4,
+                                                 MLX5_EXPANSION_IPV6),
                .type = RTE_FLOW_ITEM_TYPE_END,
        },
        [MLX5_EXPANSION_ROOT_OUTER] = {
-               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH,
-                                                MLX5_EXPANSION_OUTER_IPV4,
-                                                MLX5_EXPANSION_OUTER_IPV6),
+               .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH,
+                                                 MLX5_EXPANSION_OUTER_IPV4,
+                                                 MLX5_EXPANSION_OUTER_IPV6),
                .type = RTE_FLOW_ITEM_TYPE_END,
        },
        [MLX5_EXPANSION_ROOT_ETH_VLAN] = {
-               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN),
+               .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN),
                .type = RTE_FLOW_ITEM_TYPE_END,
        },
        [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = {
-               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH_VLAN),
+               .next = MLX5_FLOW_EXPAND_RSS_NEXT
+                                               (MLX5_EXPANSION_OUTER_ETH_VLAN),
                .type = RTE_FLOW_ITEM_TYPE_END,
        },
        [MLX5_EXPANSION_OUTER_ETH] = {
-               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
-                                                MLX5_EXPANSION_OUTER_IPV6,
-                                                MLX5_EXPANSION_MPLS),
+               .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
+                                                 MLX5_EXPANSION_OUTER_IPV6,
+                                                 MLX5_EXPANSION_MPLS),
                .type = RTE_FLOW_ITEM_TYPE_ETH,
                .rss_types = 0,
        },
        [MLX5_EXPANSION_OUTER_ETH_VLAN] = {
-               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN),
+               .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN),
                .type = RTE_FLOW_ITEM_TYPE_ETH,
                .rss_types = 0,
        },
        [MLX5_EXPANSION_OUTER_VLAN] = {
-               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
-                                                MLX5_EXPANSION_OUTER_IPV6),
+               .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
+                                                 MLX5_EXPANSION_OUTER_IPV6),
                .type = RTE_FLOW_ITEM_TYPE_VLAN,
        },
        [MLX5_EXPANSION_OUTER_IPV4] = {
-               .next = RTE_FLOW_EXPAND_RSS_NEXT
+               .next = MLX5_FLOW_EXPAND_RSS_NEXT
                        (MLX5_EXPANSION_OUTER_IPV4_UDP,
                         MLX5_EXPANSION_OUTER_IPV4_TCP,
                         MLX5_EXPANSION_GRE,
@@ -133,8 +452,8 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = {
                        ETH_RSS_NONFRAG_IPV4_OTHER,
        },
        [MLX5_EXPANSION_OUTER_IPV4_UDP] = {
-               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
-                                                MLX5_EXPANSION_VXLAN_GPE),
+               .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
+                                                 MLX5_EXPANSION_VXLAN_GPE),
                .type = RTE_FLOW_ITEM_TYPE_UDP,
                .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
        },
@@ -143,7 +462,7 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = {
                .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
        },
        [MLX5_EXPANSION_OUTER_IPV6] = {
-               .next = RTE_FLOW_EXPAND_RSS_NEXT
+               .next = MLX5_FLOW_EXPAND_RSS_NEXT
                        (MLX5_EXPANSION_OUTER_IPV6_UDP,
                         MLX5_EXPANSION_OUTER_IPV6_TCP,
                         MLX5_EXPANSION_IPV4,
@@ -153,8 +472,8 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = {
                        ETH_RSS_NONFRAG_IPV6_OTHER,
        },
        [MLX5_EXPANSION_OUTER_IPV6_UDP] = {
-               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
-                                                MLX5_EXPANSION_VXLAN_GPE),
+               .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
+                                                 MLX5_EXPANSION_VXLAN_GPE),
                .type = RTE_FLOW_ITEM_TYPE_UDP,
                .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
        },
@@ -163,43 +482,43 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = {
                .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
        },
        [MLX5_EXPANSION_VXLAN] = {
-               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
-                                                MLX5_EXPANSION_IPV4,
-                                                MLX5_EXPANSION_IPV6),
+               .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
+                                                 MLX5_EXPANSION_IPV4,
+                                                 MLX5_EXPANSION_IPV6),
                .type = RTE_FLOW_ITEM_TYPE_VXLAN,
        },
        [MLX5_EXPANSION_VXLAN_GPE] = {
-               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
-                                                MLX5_EXPANSION_IPV4,
-                                                MLX5_EXPANSION_IPV6),
+               .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
+                                                 MLX5_EXPANSION_IPV4,
+                                                 MLX5_EXPANSION_IPV6),
                .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
        },
        [MLX5_EXPANSION_GRE] = {
-               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4),
+               .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4),
                .type = RTE_FLOW_ITEM_TYPE_GRE,
        },
        [MLX5_EXPANSION_MPLS] = {
-               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
-                                                MLX5_EXPANSION_IPV6),
+               .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
+                                                 MLX5_EXPANSION_IPV6),
                .type = RTE_FLOW_ITEM_TYPE_MPLS,
        },
        [MLX5_EXPANSION_ETH] = {
-               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
-                                                MLX5_EXPANSION_IPV6),
+               .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
+                                                 MLX5_EXPANSION_IPV6),
                .type = RTE_FLOW_ITEM_TYPE_ETH,
        },
        [MLX5_EXPANSION_ETH_VLAN] = {
-               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN),
+               .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN),
                .type = RTE_FLOW_ITEM_TYPE_ETH,
        },
        [MLX5_EXPANSION_VLAN] = {
-               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
-                                                MLX5_EXPANSION_IPV6),
+               .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
+                                                 MLX5_EXPANSION_IPV6),
                .type = RTE_FLOW_ITEM_TYPE_VLAN,
        },
        [MLX5_EXPANSION_IPV4] = {
-               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
-                                                MLX5_EXPANSION_IPV4_TCP),
+               .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
+                                                 MLX5_EXPANSION_IPV4_TCP),
                .type = RTE_FLOW_ITEM_TYPE_IPV4,
                .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
                        ETH_RSS_NONFRAG_IPV4_OTHER,
@@ -213,8 +532,8 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = {
                .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
        },
        [MLX5_EXPANSION_IPV6] = {
-               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
-                                                MLX5_EXPANSION_IPV6_TCP),
+               .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
+                                                 MLX5_EXPANSION_IPV6_TCP),
                .type = RTE_FLOW_ITEM_TYPE_IPV6,
                .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
                        ETH_RSS_NONFRAG_IPV6_OTHER,
@@ -362,7 +681,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
        case MLX5_METADATA_FDB:
                switch (config->dv_xmeta_en) {
                case MLX5_XMETA_MODE_LEGACY:
-                       return REG_NONE;
+                       return REG_NON;
                case MLX5_XMETA_MODE_META16:
                        return REG_C_0;
                case MLX5_XMETA_MODE_META32:
@@ -372,7 +691,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
        case MLX5_FLOW_MARK:
                switch (config->dv_xmeta_en) {
                case MLX5_XMETA_MODE_LEGACY:
-                       return REG_NONE;
+                       return REG_NON;
                case MLX5_XMETA_MODE_META16:
                        return REG_C_1;
                case MLX5_XMETA_MODE_META32:
@@ -390,7 +709,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
                        return priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
                               REG_C_3;
        case MLX5_MTR_COLOR:
-               MLX5_ASSERT(priv->mtr_color_reg != REG_NONE);
+               MLX5_ASSERT(priv->mtr_color_reg != REG_NON);
                return priv->mtr_color_reg;
        case MLX5_COPY_MARK:
                /*
@@ -413,7 +732,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
                        return rte_flow_error_set(error, EINVAL,
                                                  RTE_FLOW_ERROR_TYPE_ITEM,
                                                  NULL, "invalid tag id");
-               if (config->flow_mreg_c[id + start_reg - REG_C_0] == REG_NONE)
+               if (config->flow_mreg_c[id + start_reg - REG_C_0] == REG_NON)
                        return rte_flow_error_set(error, ENOTSUP,
                                                  RTE_FLOW_ERROR_TYPE_ITEM,
                                                  NULL, "unsupported tag id");
@@ -430,7 +749,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
                                                       RTE_FLOW_ERROR_TYPE_ITEM,
                                                        NULL, "invalid tag id");
                        if (config->flow_mreg_c
-                           [id + 1 + start_reg - REG_C_0] != REG_NONE)
+                           [id + 1 + start_reg - REG_C_0] != REG_NON)
                                return config->flow_mreg_c
                                               [id + 1 + start_reg - REG_C_0];
                        return rte_flow_error_set(error, ENOTSUP,
@@ -468,7 +787,7 @@ mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev)
         * - reg_c's are preserved across different domain (FDB and NIC) on
         *   packet loopback by flow lookup miss.
         */
-       return config->flow_mreg_c[2] != REG_NONE;
+       return config->flow_mreg_c[2] != REG_NON;
 }
 
 /**
@@ -483,6 +802,8 @@ mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev)
  *   Bit-masks covering supported fields by the NIC to compare with user mask.
  * @param[in] size
  *   Bit-masks size in bytes.
+ * @param[in] range_accepted
+ *   True if range of values is accepted for specific fields, false otherwise.
  * @param[out] error
  *   Pointer to error structure.
  *
@@ -494,6 +815,7 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item,
                          const uint8_t *mask,
                          const uint8_t *nic_mask,
                          unsigned int size,
+                         bool range_accepted,
                          struct rte_flow_error *error)
 {
        unsigned int i;
@@ -511,7 +833,7 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "mask/last without a spec is not"
                                          " supported");
-       if (item->spec && item->last) {
+       if (item->spec && item->last && !range_accepted) {
                uint8_t spec[size];
                uint8_t last[size];
                unsigned int i;
@@ -1286,7 +1608,8 @@ mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item,
        ret = mlx5_flow_item_acceptable
                (item, (const uint8_t *)mask,
                 (const uint8_t *)&rte_flow_item_icmp6_mask,
-                sizeof(struct rte_flow_item_icmp6), error);
+                sizeof(struct rte_flow_item_icmp6),
+                MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
        if (ret < 0)
                return ret;
        return 0;
@@ -1312,6 +1635,12 @@ mlx5_flow_validate_item_icmp(const struct rte_flow_item *item,
                             struct rte_flow_error *error)
 {
        const struct rte_flow_item_icmp *mask = item->mask;
+       const struct rte_flow_item_icmp nic_mask = {
+               .hdr.icmp_type = 0xff,
+               .hdr.icmp_code = 0xff,
+               .hdr.icmp_ident = RTE_BE16(0xffff),
+               .hdr.icmp_seq_nb = RTE_BE16(0xffff),
+       };
        const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
        const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
                                      MLX5_FLOW_LAYER_OUTER_L3_IPV4;
@@ -1334,11 +1663,12 @@ mlx5_flow_validate_item_icmp(const struct rte_flow_item *item,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "multiple L4 layers not supported");
        if (!mask)
-               mask = &rte_flow_item_icmp_mask;
+               mask = &nic_mask;
        ret = mlx5_flow_item_acceptable
                (item, (const uint8_t *)mask,
-                (const uint8_t *)&rte_flow_item_icmp_mask,
-                sizeof(struct rte_flow_item_icmp), error);
+                (const uint8_t *)&nic_mask,
+                sizeof(struct rte_flow_item_icmp),
+                MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
        if (ret < 0)
                return ret;
        return 0;
@@ -1393,7 +1723,7 @@ mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
        ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
                                        (const uint8_t *)&nic_mask,
                                        sizeof(struct rte_flow_item_eth),
-                                       error);
+                                       MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
        return ret;
 }
 
@@ -1447,7 +1777,7 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
        ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
                                        (const uint8_t *)&nic_mask,
                                        sizeof(struct rte_flow_item_vlan),
-                                       error);
+                                       MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
        if (ret)
                return ret;
        if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
@@ -1499,6 +1829,8 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
  * @param[in] acc_mask
  *   Acceptable mask, if NULL default internal default mask
  *   will be used to check whether item fields are supported.
+ * @param[in] range_accepted
+ *   True if range of values is accepted for specific fields, false otherwise.
  * @param[out] error
  *   Pointer to error structure.
  *
@@ -1511,6 +1843,7 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
                             uint64_t last_item,
                             uint16_t ether_type,
                             const struct rte_flow_item_ipv4 *acc_mask,
+                            bool range_accepted,
                             struct rte_flow_error *error)
 {
        const struct rte_flow_item_ipv4 *mask = item->mask;
@@ -1581,7 +1914,7 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
                                        acc_mask ? (const uint8_t *)acc_mask
                                                 : (const uint8_t *)&nic_mask,
                                        sizeof(struct rte_flow_item_ipv4),
-                                       error);
+                                       range_accepted, error);
        if (ret < 0)
                return ret;
        return 0;
@@ -1646,9 +1979,9 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "IPv6 cannot follow L2/VLAN layer "
                                          "which ether type is not IPv6");
+       if (mask && mask->hdr.proto == UINT8_MAX && spec)
+               next_proto = spec->hdr.proto;
        if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) {
-               if (mask && spec)
-                       next_proto = mask->hdr.proto & spec->hdr.proto;
                if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
                        return rte_flow_error_set(error, EINVAL,
                                                  RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1656,6 +1989,16 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
                                                  "multiple tunnel "
                                                  "not supported");
        }
+       if (next_proto == IPPROTO_HOPOPTS  ||
+           next_proto == IPPROTO_ROUTING  ||
+           next_proto == IPPROTO_FRAGMENT ||
+           next_proto == IPPROTO_ESP      ||
+           next_proto == IPPROTO_AH       ||
+           next_proto == IPPROTO_DSTOPTS)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "IPv6 proto (next header) should "
+                                         "not be set as extension header");
        if (item_flags & MLX5_FLOW_LAYER_IPIP)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
@@ -1680,7 +2023,7 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
                                        acc_mask ? (const uint8_t *)acc_mask
                                                 : (const uint8_t *)&nic_mask,
                                        sizeof(struct rte_flow_item_ipv6),
-                                       error);
+                                       MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
        if (ret < 0)
                return ret;
        return 0;
@@ -1735,7 +2078,8 @@ mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
        ret = mlx5_flow_item_acceptable
                (item, (const uint8_t *)mask,
                 (const uint8_t *)&rte_flow_item_udp_mask,
-                sizeof(struct rte_flow_item_udp), error);
+                sizeof(struct rte_flow_item_udp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
+                error);
        if (ret < 0)
                return ret;
        return 0;
@@ -1790,7 +2134,8 @@ mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
        ret = mlx5_flow_item_acceptable
                (item, (const uint8_t *)mask,
                 (const uint8_t *)flow_mask,
-                sizeof(struct rte_flow_item_tcp), error);
+                sizeof(struct rte_flow_item_tcp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
+                error);
        if (ret < 0)
                return ret;
        return 0;
@@ -1844,7 +2189,7 @@ mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
                (item, (const uint8_t *)mask,
                 (const uint8_t *)&rte_flow_item_vxlan_mask,
                 sizeof(struct rte_flow_item_vxlan),
-                error);
+                MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
        if (ret < 0)
                return ret;
        if (spec) {
@@ -1915,7 +2260,7 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
                (item, (const uint8_t *)mask,
                 (const uint8_t *)&rte_flow_item_vxlan_gpe_mask,
                 sizeof(struct rte_flow_item_vxlan_gpe),
-                error);
+                MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
        if (ret < 0)
                return ret;
        if (spec) {
@@ -1989,7 +2334,7 @@ mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item,
        ret = mlx5_flow_item_acceptable
                (item, (const uint8_t *)mask,
                 (const uint8_t *)&gre_key_default_mask,
-                sizeof(rte_be32_t), error);
+                sizeof(rte_be32_t), MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
        return ret;
 }
 
@@ -2041,7 +2386,8 @@ mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
        ret = mlx5_flow_item_acceptable
                (item, (const uint8_t *)mask,
                 (const uint8_t *)&nic_mask,
-                sizeof(struct rte_flow_item_gre), error);
+                sizeof(struct rte_flow_item_gre), MLX5_ITEM_RANGE_NOT_ACCEPTED,
+                error);
        if (ret < 0)
                return ret;
 #ifndef HAVE_MLX5DV_DR
@@ -2116,7 +2462,8 @@ mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
        ret = mlx5_flow_item_acceptable
                                  (item, (const uint8_t *)mask,
                                   (const uint8_t *)&nic_mask,
-                                  sizeof(struct rte_flow_item_geneve), error);
+                                  sizeof(struct rte_flow_item_geneve),
+                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
        if (ret)
                return ret;
        if (spec) {
@@ -2199,7 +2546,8 @@ mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused,
        ret = mlx5_flow_item_acceptable
                (item, (const uint8_t *)mask,
                 (const uint8_t *)&rte_flow_item_mpls_mask,
-                sizeof(struct rte_flow_item_mpls), error);
+                sizeof(struct rte_flow_item_mpls),
+                MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
        if (ret < 0)
                return ret;
        return 0;
@@ -2254,7 +2602,8 @@ mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item,
        ret = mlx5_flow_item_acceptable
                (item, (const uint8_t *)mask,
                 (const uint8_t *)&rte_flow_item_nvgre_mask,
-                sizeof(struct rte_flow_item_nvgre), error);
+                sizeof(struct rte_flow_item_nvgre),
+                MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
        if (ret < 0)
                return ret;
        return 0;
@@ -2348,7 +2697,7 @@ mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item,
                                         acc_mask ? (const uint8_t *)acc_mask
                                                  : (const uint8_t *)&nic_mask,
                                         sizeof(struct rte_flow_item_ecpri),
-                                        error);
+                                        MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
 }
 
 /* Allocate unique ID for the split Q/RSS subflows. */
@@ -2885,10 +3234,10 @@ flow_check_meter_action(const struct rte_flow_action actions[], uint32_t *mtr)
 }
 
 /**
- * Check if the flow should be splited due to hairpin.
+ * Check if the flow should be split due to hairpin.
  * The reason for the split is that in current HW we can't
- * support encap on Rx, so if a flow have encap we move it
- * to Tx.
+ * support encap and push-vlan on Rx, so if a flow contains
+ * these actions we move it to Tx.
  *
  * @param dev
  *   Pointer to Ethernet device.
@@ -2908,7 +3257,7 @@ flow_check_hairpin_split(struct rte_eth_dev *dev,
 {
        int queue_action = 0;
        int action_n = 0;
-       int encap = 0;
+       int split = 0;
        const struct rte_flow_action_queue *queue;
        const struct rte_flow_action_rss *rss;
        const struct rte_flow_action_raw_encap *raw_encap;
@@ -2939,7 +3288,10 @@ flow_check_hairpin_split(struct rte_eth_dev *dev,
                        break;
                case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
                case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
-                       encap = 1;
+               case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+               case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+               case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
+                       split++;
                        action_n++;
                        break;
                case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
@@ -2947,7 +3299,7 @@ flow_check_hairpin_split(struct rte_eth_dev *dev,
                        if (raw_encap->size >
                            (sizeof(struct rte_flow_item_eth) +
                             sizeof(struct rte_flow_item_ipv4)))
-                               encap = 1;
+                               split++;
                        action_n++;
                        break;
                default:
@@ -2955,7 +3307,7 @@ flow_check_hairpin_split(struct rte_eth_dev *dev,
                        break;
                }
        }
-       if (encap == 1 && queue_action)
+       if (split && queue_action)
                return action_n;
        return 0;
 }
@@ -3017,7 +3369,7 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
        };
        struct mlx5_flow_action_copy_mreg cp_mreg = {
                .dst = REG_B,
-               .src = 0,
+               .src = REG_NON,
        };
        struct rte_flow_action_jump jump = {
                .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
@@ -3397,7 +3749,8 @@ flow_mreg_update_copy_table(struct rte_eth_dev *dev,
 
 /**
  * Split the hairpin flow.
- * Since HW can't support encap on Rx we move the encap to Tx.
+ * Since HW can't support encap and push-vlan on Rx, we move these
+ * actions to Tx.
  * If the count action is after the encap then we also
  * move the count action. in this case the count will also measure
  * the outer bytes.
@@ -3441,6 +3794,9 @@ flow_hairpin_split(struct rte_eth_dev *dev,
                switch (actions->type) {
                case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
                case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+               case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+               case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+               case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
                        rte_memcpy(actions_tx, actions,
                               sizeof(struct rte_flow_action));
                        actions_tx++;
@@ -3501,7 +3857,7 @@ flow_hairpin_split(struct rte_eth_dev *dev,
        actions_rx++;
        set_tag = (void *)actions_rx;
        set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL);
-       MLX5_ASSERT(set_tag->id > REG_NONE);
+       MLX5_ASSERT(set_tag->id > REG_NON);
        set_tag->data = *flow_id;
        tag_action->conf = set_tag;
        /* Create Tx item list. */
@@ -3513,14 +3869,13 @@ flow_hairpin_split(struct rte_eth_dev *dev,
        tag_item = (void *)addr;
        tag_item->data = *flow_id;
        tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL);
-       MLX5_ASSERT(set_tag->id > REG_NONE);
+       MLX5_ASSERT(set_tag->id > REG_NON);
        item->spec = tag_item;
        addr += sizeof(struct mlx5_rte_flow_item_tag);
        tag_item = (void *)addr;
        tag_item->data = UINT32_MAX;
        tag_item->id = UINT16_MAX;
        item->mask = tag_item;
-       addr += sizeof(struct mlx5_rte_flow_item_tag);
        item->last = NULL;
        item++;
        item->type = RTE_FLOW_ITEM_TYPE_END;
@@ -3539,6 +3894,8 @@ flow_hairpin_split(struct rte_eth_dev *dev,
  *   Pointer to return the created subflow, may be NULL.
  * @param[in] prefix_layers
  *   Prefix subflow layers, may be 0.
+ * @param[in] prefix_mark
+ *   Prefix subflow mark flag, may be 0.
  * @param[in] attr
  *   Flow rule attributes.
  * @param[in] items
@@ -3559,6 +3916,7 @@ flow_create_split_inner(struct rte_eth_dev *dev,
                        struct rte_flow *flow,
                        struct mlx5_flow **sub_flow,
                        uint64_t prefix_layers,
+                       uint32_t prefix_mark,
                        const struct rte_flow_attr *attr,
                        const struct rte_flow_item items[],
                        const struct rte_flow_action actions[],
@@ -3578,10 +3936,13 @@ flow_create_split_inner(struct rte_eth_dev *dev,
                      dev_flow->handle, next);
        /*
         * If dev_flow is as one of the suffix flow, some actions in suffix
-        * flow may need some user defined item layer flags.
+        * flow may need some user defined item layer flags, and pass the
+        * Metadate rxq mark flag to suffix flow as well.
         */
        if (prefix_layers)
                dev_flow->handle->layers = prefix_layers;
+       if (prefix_mark)
+               dev_flow->handle->mark = 1;
        if (sub_flow)
                *sub_flow = dev_flow;
        return flow_drv_translate(dev, dev_flow, attr, items, actions, error);
@@ -3911,6 +4272,203 @@ flow_mreg_tx_copy_prep(struct rte_eth_dev *dev,
        return 0;
 }
 
+/**
+ * Check the match action from the action list.
+ *
+ * @param[in] actions
+ *   Pointer to the list of actions.
+ * @param[in] attr
+ *   Flow rule attributes.
+ * @param[in] action
+ *   The action to be check if exist.
+ * @param[out] match_action_pos
+ *   Pointer to the position of the matched action if exists, otherwise is -1.
+ * @param[out] qrss_action_pos
+ *   Pointer to the position of the Queue/RSS action if exists, otherwise is -1.
+ *
+ * @return
+ *   > 0 the total number of actions.
+ *   0 if not found match action in action list.
+ */
+static int
+flow_check_match_action(const struct rte_flow_action actions[],
+                       const struct rte_flow_attr *attr,
+                       enum rte_flow_action_type action,
+                       int *match_action_pos, int *qrss_action_pos)
+{
+       const struct rte_flow_action_sample *sample;
+       int actions_n = 0;
+       int jump_flag = 0;
+       uint32_t ratio = 0;
+       int sub_type = 0;
+       int flag = 0;
+
+       *match_action_pos = -1;
+       *qrss_action_pos = -1;
+       for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+               if (actions->type == action) {
+                       flag = 1;
+                       *match_action_pos = actions_n;
+               }
+               if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE ||
+                   actions->type == RTE_FLOW_ACTION_TYPE_RSS)
+                       *qrss_action_pos = actions_n;
+               if (actions->type == RTE_FLOW_ACTION_TYPE_JUMP)
+                       jump_flag = 1;
+               if (actions->type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
+                       sample = actions->conf;
+                       ratio = sample->ratio;
+                       sub_type = ((const struct rte_flow_action *)
+                                       (sample->actions))->type;
+               }
+               actions_n++;
+       }
+       if (flag && action == RTE_FLOW_ACTION_TYPE_SAMPLE && attr->transfer) {
+               if (ratio == 1) {
+                       /* JUMP Action not support for Mirroring;
+                        * Mirroring support multi-destination;
+                        */
+                       if (!jump_flag && sub_type != RTE_FLOW_ACTION_TYPE_END)
+                               flag = 0;
+               }
+       }
+       /* Count RTE_FLOW_ACTION_TYPE_END. */
+       return flag ? actions_n + 1 : 0;
+}
+
+#define SAMPLE_SUFFIX_ITEM 2
+
+/**
+ * Split the sample flow.
+ *
+ * As sample flow will split to two sub flow, sample flow with
+ * sample action, the other actions will move to new suffix flow.
+ *
+ * Also add unique tag id with tag action in the sample flow,
+ * the same tag id will be as match in the suffix flow.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param[in] fdb_tx
+ *   FDB egress flow flag.
+ * @param[out] sfx_items
+ *   Suffix flow match items (list terminated by the END pattern item).
+ * @param[in] actions
+ *   Associated actions (list terminated by the END action).
+ * @param[out] actions_sfx
+ *   Suffix flow actions.
+ * @param[out] actions_pre
+ *   Prefix flow actions.
+ * @param[in] actions_n
+ *  The total number of actions.
+ * @param[in] sample_action_pos
+ *   The sample action position.
+ * @param[in] qrss_action_pos
+ *   The Queue/RSS action position.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL.
+ *
+ * @return
+ *   0 on success, or unique flow_id, a negative errno value
+ *   otherwise and rte_errno is set.
+ */
+static int
+flow_sample_split_prep(struct rte_eth_dev *dev,
+                      uint32_t fdb_tx,
+                      struct rte_flow_item sfx_items[],
+                      const struct rte_flow_action actions[],
+                      struct rte_flow_action actions_sfx[],
+                      struct rte_flow_action actions_pre[],
+                      int actions_n,
+                      int sample_action_pos,
+                      int qrss_action_pos,
+                      struct rte_flow_error *error)
+{
+       struct mlx5_rte_flow_action_set_tag *set_tag;
+       struct mlx5_rte_flow_item_tag *tag_spec;
+       struct mlx5_rte_flow_item_tag *tag_mask;
+       uint32_t tag_id = 0;
+       int index;
+       int ret;
+
+       if (sample_action_pos < 0)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION,
+                                         NULL, "invalid position of sample "
+                                         "action in list");
+       if (!fdb_tx) {
+               /* Prepare the prefix tag action. */
+               set_tag = (void *)(actions_pre + actions_n + 1);
+               ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, 0, error);
+               if (ret < 0)
+                       return ret;
+               set_tag->id = ret;
+               tag_id = flow_qrss_get_id(dev);
+               set_tag->data = tag_id;
+               /* Prepare the suffix subflow items. */
+               tag_spec = (void *)(sfx_items + SAMPLE_SUFFIX_ITEM);
+               tag_spec->data = tag_id;
+               tag_spec->id = set_tag->id;
+               tag_mask = tag_spec + 1;
+               tag_mask->data = UINT32_MAX;
+               sfx_items[0] = (struct rte_flow_item){
+                       .type = (enum rte_flow_item_type)
+                               MLX5_RTE_FLOW_ITEM_TYPE_TAG,
+                       .spec = tag_spec,
+                       .last = NULL,
+                       .mask = tag_mask,
+               };
+               sfx_items[1] = (struct rte_flow_item){
+                       .type = (enum rte_flow_item_type)
+                               RTE_FLOW_ITEM_TYPE_END,
+               };
+       }
+       /* Prepare the actions for prefix and suffix flow. */
+       if (qrss_action_pos >= 0 && qrss_action_pos < sample_action_pos) {
+               index = qrss_action_pos;
+               /* Put the preceding the Queue/RSS action into prefix flow. */
+               if (index != 0)
+                       memcpy(actions_pre, actions,
+                              sizeof(struct rte_flow_action) * index);
+               /* Put others preceding the sample action into prefix flow. */
+               if (sample_action_pos > index + 1)
+                       memcpy(actions_pre + index, actions + index + 1,
+                              sizeof(struct rte_flow_action) *
+                              (sample_action_pos - index - 1));
+               index = sample_action_pos - 1;
+               /* Put Queue/RSS action into Suffix flow. */
+               memcpy(actions_sfx, actions + qrss_action_pos,
+                      sizeof(struct rte_flow_action));
+               actions_sfx++;
+       } else {
+               index = sample_action_pos;
+               if (index != 0)
+                       memcpy(actions_pre, actions,
+                              sizeof(struct rte_flow_action) * index);
+       }
+       /* Add the extra tag action for NIC-RX and E-Switch ingress. */
+       if (!fdb_tx) {
+               actions_pre[index++] =
+                       (struct rte_flow_action){
+                       .type = (enum rte_flow_action_type)
+                               MLX5_RTE_FLOW_ACTION_TYPE_TAG,
+                       .conf = set_tag,
+               };
+       }
+       memcpy(actions_pre + index, actions + sample_action_pos,
+              sizeof(struct rte_flow_action));
+       index += 1;
+       actions_pre[index] = (struct rte_flow_action){
+               .type = (enum rte_flow_action_type)
+                       RTE_FLOW_ACTION_TYPE_END,
+       };
+       /* Put the actions after sample into Suffix flow. */
+       memcpy(actions_sfx, actions + sample_action_pos + 1,
+              sizeof(struct rte_flow_action) *
+              (actions_n - sample_action_pos - 1));
+       return tag_id;
+}
+
 /**
  * The splitting for metadata feature.
  *
@@ -3927,6 +4485,8 @@ flow_mreg_tx_copy_prep(struct rte_eth_dev *dev,
  *   Parent flow structure pointer.
  * @param[in] prefix_layers
  *   Prefix flow layer flags.
+ * @param[in] prefix_mark
+ *   Prefix subflow mark flag, may be 0.
  * @param[in] attr
  *   Flow rule attributes.
  * @param[in] items
@@ -3946,6 +4506,7 @@ static int
 flow_create_split_metadata(struct rte_eth_dev *dev,
                           struct rte_flow *flow,
                           uint64_t prefix_layers,
+                          uint32_t prefix_mark,
                           const struct rte_flow_attr *attr,
                           const struct rte_flow_item items[],
                           const struct rte_flow_action actions[],
@@ -3969,8 +4530,9 @@ flow_create_split_metadata(struct rte_eth_dev *dev,
            config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
            !mlx5_flow_ext_mreg_supported(dev))
                return flow_create_split_inner(dev, flow, NULL, prefix_layers,
-                                              attr, items, actions, external,
-                                              flow_idx, error);
+                                              prefix_mark, attr, items,
+                                              actions, external, flow_idx,
+                                              error);
        actions_n = flow_parse_metadata_split_actions_info(actions, &qrss,
                                                           &encap_idx);
        if (qrss) {
@@ -4055,7 +4617,8 @@ flow_create_split_metadata(struct rte_eth_dev *dev,
                        goto exit;
        }
        /* Add the unmodified original or prefix subflow. */
-       ret = flow_create_split_inner(dev, flow, &dev_flow, prefix_layers, attr,
+       ret = flow_create_split_inner(dev, flow, &dev_flow, prefix_layers,
+                                     prefix_mark, attr,
                                      items, ext_actions ? ext_actions :
                                      actions, external, flow_idx, error);
        if (ret < 0)
@@ -4069,7 +4632,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev,
                /* Internal PMD action to set register. */
                struct mlx5_rte_flow_item_tag q_tag_spec = {
                        .data = qrss_id,
-                       .id = 0,
+                       .id = REG_NON,
                };
                struct rte_flow_item q_items[] = {
                        {
@@ -4118,7 +4681,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev,
                }
                dev_flow = NULL;
                /* Add suffix subflow to execute Q/RSS. */
-               ret = flow_create_split_inner(dev, flow, &dev_flow, layers,
+               ret = flow_create_split_inner(dev, flow, &dev_flow, layers, 0,
                                              &q_attr, mtr_sfx ? items :
                                              q_items, q_actions,
                                              external, flow_idx, error);
@@ -4154,6 +4717,10 @@ exit:
  *   Pointer to Ethernet device.
  * @param[in] flow
  *   Parent flow structure pointer.
+ * @param[in] prefix_layers
+ *   Prefix subflow layers, may be 0.
+ * @param[in] prefix_mark
+ *   Prefix subflow mark flag, may be 0.
  * @param[in] attr
  *   Flow rule attributes.
  * @param[in] items
@@ -4171,12 +4738,14 @@ exit:
  */
 static int
 flow_create_split_meter(struct rte_eth_dev *dev,
-                          struct rte_flow *flow,
-                          const struct rte_flow_attr *attr,
-                          const struct rte_flow_item items[],
-                          const struct rte_flow_action actions[],
-                          bool external, uint32_t flow_idx,
-                          struct rte_flow_error *error)
+                       struct rte_flow *flow,
+                       uint64_t prefix_layers,
+                       uint32_t prefix_mark,
+                       const struct rte_flow_attr *attr,
+                       const struct rte_flow_item items[],
+                       const struct rte_flow_action actions[],
+                       bool external, uint32_t flow_idx,
+                       struct rte_flow_error *error)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct rte_flow_action *sfx_actions = NULL;
@@ -4219,8 +4788,10 @@ flow_create_split_meter(struct rte_eth_dev *dev,
                        goto exit;
                }
                /* Add the prefix subflow. */
-               ret = flow_create_split_inner(dev, flow, &dev_flow, 0, attr,
-                                             items, pre_actions, external,
+               ret = flow_create_split_inner(dev, flow, &dev_flow,
+                                             prefix_layers, 0,
+                                             attr, items,
+                                             pre_actions, external,
                                              flow_idx, error);
                if (ret) {
                        ret = -rte_errno;
@@ -4235,8 +4806,10 @@ flow_create_split_meter(struct rte_eth_dev *dev,
        /* Add the prefix subflow. */
        ret = flow_create_split_metadata(dev, flow, dev_flow ?
                                         flow_get_prefix_layer_flags(dev_flow) :
-                                        0, &sfx_attr,
-                                        sfx_items ? sfx_items : items,
+                                        prefix_layers, dev_flow ?
+                                        dev_flow->handle->mark : prefix_mark,
+                                        &sfx_attr, sfx_items ?
+                                        sfx_items : items,
                                         sfx_actions ? sfx_actions : actions,
                                         external, flow_idx, error);
 exit:
@@ -4245,6 +4818,138 @@ exit:
        return ret;
 }
 
+/**
+ * The splitting for sample feature.
+ *
+ * Once Sample action is detected in the action list, the flow actions should
+ * be split into prefix sub flow and suffix sub flow.
+ *
+ * The original items remain in the prefix sub flow, all actions preceding the
+ * sample action and the sample action itself will be copied to the prefix
+ * sub flow, the actions following the sample action will be copied to the
+ * suffix sub flow, Queue action always be located in the suffix sub flow.
+ *
+ * In order to make the packet from prefix sub flow matches with suffix sub
+ * flow, an extra tag action be added into prefix sub flow, and the suffix sub
+ * flow uses tag item with the unique flow id.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param[in] flow
+ *   Parent flow structure pointer.
+ * @param[in] attr
+ *   Flow rule attributes.
+ * @param[in] items
+ *   Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ *   Associated actions (list terminated by the END action).
+ * @param[in] external
+ *   This flow rule is created by request external to PMD.
+ * @param[in] flow_idx
+ *   This memory pool index to the flow.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL.
+ * @return
+ *   0 on success, negative value otherwise
+ */
+static int
+flow_create_split_sample(struct rte_eth_dev *dev,
+                        struct rte_flow *flow,
+                        const struct rte_flow_attr *attr,
+                        const struct rte_flow_item items[],
+                        const struct rte_flow_action actions[],
+                        bool external, uint32_t flow_idx,
+                        struct rte_flow_error *error)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct rte_flow_action *sfx_actions = NULL;
+       struct rte_flow_action *pre_actions = NULL;
+       struct rte_flow_item *sfx_items = NULL;
+       struct mlx5_flow *dev_flow = NULL;
+       struct rte_flow_attr sfx_attr = *attr;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+       struct mlx5_flow_dv_sample_resource *sample_res;
+       struct mlx5_flow_tbl_data_entry *sfx_tbl_data;
+       struct mlx5_flow_tbl_resource *sfx_tbl;
+       union mlx5_flow_tbl_key sfx_table_key;
+#endif
+       size_t act_size;
+       size_t item_size;
+       uint32_t fdb_tx = 0;
+       int32_t tag_id = 0;
+       int actions_n = 0;
+       int sample_action_pos;
+       int qrss_action_pos;
+       int ret = 0;
+
+       if (priv->sampler_en)
+               actions_n = flow_check_match_action(actions, attr,
+                                       RTE_FLOW_ACTION_TYPE_SAMPLE,
+                                       &sample_action_pos, &qrss_action_pos);
+       if (actions_n) {
+               /* The prefix actions must includes sample, tag, end. */
+               act_size = sizeof(struct rte_flow_action) * (actions_n * 2 + 1)
+                          + sizeof(struct mlx5_rte_flow_action_set_tag);
+               item_size = sizeof(struct rte_flow_item) * SAMPLE_SUFFIX_ITEM +
+                           sizeof(struct mlx5_rte_flow_item_tag) * 2;
+               sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size +
+                                         item_size), 0, SOCKET_ID_ANY);
+               if (!sfx_actions)
+                       return rte_flow_error_set(error, ENOMEM,
+                                                 RTE_FLOW_ERROR_TYPE_ACTION,
+                                                 NULL, "no memory to split "
+                                                 "sample flow");
+               /* The representor_id is -1 for uplink. */
+               fdb_tx = (attr->transfer && priv->representor_id != -1);
+               if (!fdb_tx)
+                       sfx_items = (struct rte_flow_item *)((char *)sfx_actions
+                                       + act_size);
+               pre_actions = sfx_actions + actions_n;
+               tag_id = flow_sample_split_prep(dev, fdb_tx, sfx_items,
+                                               actions, sfx_actions,
+                                               pre_actions, actions_n,
+                                               sample_action_pos,
+                                               qrss_action_pos, error);
+               if (tag_id < 0 || (!fdb_tx && !tag_id)) {
+                       ret = -rte_errno;
+                       goto exit;
+               }
+               /* Add the prefix subflow. */
+               ret = flow_create_split_inner(dev, flow, &dev_flow, 0, 0, attr,
+                                             items, pre_actions, external,
+                                             flow_idx, error);
+               if (ret) {
+                       ret = -rte_errno;
+                       goto exit;
+               }
+               dev_flow->handle->split_flow_id = tag_id;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+               /* Set the sfx group attr. */
+               sample_res = (struct mlx5_flow_dv_sample_resource *)
+                                       dev_flow->dv.sample_res;
+               sfx_tbl = (struct mlx5_flow_tbl_resource *)
+                                       sample_res->normal_path_tbl;
+               sfx_tbl_data = container_of(sfx_tbl,
+                                       struct mlx5_flow_tbl_data_entry, tbl);
+               sfx_table_key.v64 = sfx_tbl_data->entry.key;
+               sfx_attr.group = sfx_attr.transfer ?
+                                       (sfx_table_key.table_id - 1) :
+                                        sfx_table_key.table_id;
+#endif
+       }
+       /* Add the suffix subflow. */
+       ret = flow_create_split_meter(dev, flow, dev_flow ?
+                                flow_get_prefix_layer_flags(dev_flow) : 0,
+                                dev_flow ? dev_flow->handle->mark : 0,
+                                &sfx_attr, sfx_items ? sfx_items : items,
+                                sfx_actions ? sfx_actions : actions,
+                                external, flow_idx, error);
+exit:
+       if (sfx_actions)
+               mlx5_free(sfx_actions);
+       return ret;
+}
+
 /**
  * Split the flow to subflow set. The splitters might be linked
  * in the chain, like this:
@@ -4293,8 +4998,8 @@ flow_create_split_outer(struct rte_eth_dev *dev,
 {
        int ret;
 
-       ret = flow_create_split_meter(dev, flow, attr, items,
-                                        actions, external, flow_idx, error);
+       ret = flow_create_split_sample(dev, flow, attr, items,
+                                      actions, external, flow_idx, error);
        MLX5_ASSERT(ret <= 0);
        return ret;
 }
@@ -4335,7 +5040,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
        struct mlx5_flow *dev_flow;
        const struct rte_flow_action_rss *rss;
        union {
-               struct rte_flow_expand_rss buf;
+               struct mlx5_flow_expand_rss buf;
                uint8_t buffer[2048];
        } expand_buffer;
        union {
@@ -4350,7 +5055,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
                struct rte_flow_item items[MLX5_MAX_SPLIT_ITEMS];
                uint8_t buffer[2048];
        } items_tx;
-       struct rte_flow_expand_rss *buf = &expand_buffer.buf;
+       struct mlx5_flow_expand_rss *buf = &expand_buffer.buf;
        struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *)
                                              priv->rss_desc)[!!priv->flow_idx];
        const struct rte_flow_action *p_actions_rx = actions;
@@ -4359,10 +5064,14 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
        int hairpin_flow;
        uint32_t hairpin_id = 0;
        struct rte_flow_attr attr_tx = { .priority = 0 };
+       struct rte_flow_attr attr_factor = {0};
        int ret;
 
-       hairpin_flow = flow_check_hairpin_split(dev, attr, actions);
-       ret = flow_drv_validate(dev, attr, items, p_actions_rx,
+       memcpy((void *)&attr_factor, (const void *)attr, sizeof(*attr));
+       if (external)
+               attr_factor.group *= MLX5_FLOW_TABLE_FACTOR;
+       hairpin_flow = flow_check_hairpin_split(dev, &attr_factor, actions);
+       ret = flow_drv_validate(dev, &attr_factor, items, p_actions_rx,
                                external, hairpin_flow, error);
        if (ret < 0)
                return 0;
@@ -4381,7 +5090,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
                rte_errno = ENOMEM;
                goto error_before_flow;
        }
-       flow->drv_type = flow_get_drv_type(dev, attr);
+       flow->drv_type = flow_get_drv_type(dev, &attr_factor);
        if (hairpin_id != 0)
                flow->hairpin_flow_id = hairpin_id;
        MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
@@ -4402,10 +5111,9 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
                unsigned int graph_root;
 
                graph_root = find_graph_root(items, rss->level);
-               ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
-                                         items, rss->types,
-                                         mlx5_support_expansion,
-                                         graph_root);
+               ret = mlx5_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
+                                          items, rss->types,
+                                          mlx5_support_expansion, graph_root);
                MLX5_ASSERT(ret > 0 &&
                       (unsigned int)ret < sizeof(expand_buffer.buffer));
        } else {
@@ -4427,7 +5135,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
                 * depending on configuration. In the simplest
                 * case it just creates unmodified original flow.
                 */
-               ret = flow_create_split_outer(dev, flow, attr,
+               ret = flow_create_split_outer(dev, flow, &attr_factor,
                                              buf->entry[i].pattern,
                                              p_actions_rx, external, idx,
                                              error);
@@ -4464,8 +5172,8 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
         * the egress Flows belong to the different device and
         * copy table should be updated in peer NIC Rx domain.
         */
-       if (attr->ingress &&
-           (external || attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) {
+       if (attr_factor.ingress &&
+           (external || attr_factor.group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) {
                ret = flow_mreg_update_copy_table(dev, flow, actions, error);
                if (ret)
                        goto error;
@@ -5134,6 +5842,10 @@ mlx5_flow_isolate(struct rte_eth_dev *dev,
                dev->dev_ops = &mlx5_os_dev_ops_isolate;
        else
                dev->dev_ops = &mlx5_os_dev_ops;
+
+       dev->rx_descriptor_status = mlx5_rx_descriptor_status;
+       dev->tx_descriptor_status = mlx5_tx_descriptor_status;
+
        return 0;
 }
 
@@ -5879,28 +6591,113 @@ mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt,
        return -ENOTSUP;
 }
 
-#define MLX5_POOL_QUERY_FREQ_US 1000000
-
 /**
- * Get number of all validate pools.
+ * Allocate a new memory for the counter values wrapped by all the needed
+ * management.
  *
  * @param[in] sh
  *   Pointer to mlx5_dev_ctx_shared object.
  *
  * @return
- *   The number of all validate pools.
+ *   0 on success, a negative errno value otherwise.
  */
-static uint32_t
-mlx5_get_all_valid_pool_count(struct mlx5_dev_ctx_shared *sh)
+static int
+mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
 {
+       struct mlx5_devx_mkey_attr mkey_attr;
+       struct mlx5_counter_stats_mem_mng *mem_mng;
+       volatile struct flow_counter_stats *raw_data;
+       int raws_n = MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES;
+       int size = (sizeof(struct flow_counter_stats) *
+                       MLX5_COUNTERS_PER_POOL +
+                       sizeof(struct mlx5_counter_stats_raw)) * raws_n +
+                       sizeof(struct mlx5_counter_stats_mem_mng);
+       size_t pgsize = rte_mem_page_size();
+       uint8_t *mem;
        int i;
-       uint32_t pools_n = 0;
 
-       for (i = 0; i < MLX5_CCONT_TYPE_MAX; ++i)
-               pools_n += rte_atomic16_read(&sh->cmng.ccont[i].n_valid);
-       return pools_n;
+       if (pgsize == (size_t)-1) {
+               DRV_LOG(ERR, "Failed to get mem page size");
+               rte_errno = ENOMEM;
+               return -ENOMEM;
+       }
+       mem = mlx5_malloc(MLX5_MEM_ZERO, size, pgsize, SOCKET_ID_ANY);
+       if (!mem) {
+               rte_errno = ENOMEM;
+               return -ENOMEM;
+       }
+       mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
+       size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
+       mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size,
+                                                IBV_ACCESS_LOCAL_WRITE);
+       if (!mem_mng->umem) {
+               rte_errno = errno;
+               mlx5_free(mem);
+               return -rte_errno;
+       }
+       mkey_attr.addr = (uintptr_t)mem;
+       mkey_attr.size = size;
+       mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem);
+       mkey_attr.pd = sh->pdn;
+       mkey_attr.log_entity_size = 0;
+       mkey_attr.pg_access = 0;
+       mkey_attr.klm_array = NULL;
+       mkey_attr.klm_num = 0;
+       mkey_attr.relaxed_ordering = sh->cmng.relaxed_ordering;
+       mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
+       if (!mem_mng->dm) {
+               mlx5_glue->devx_umem_dereg(mem_mng->umem);
+               rte_errno = errno;
+               mlx5_free(mem);
+               return -rte_errno;
+       }
+       mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
+       raw_data = (volatile struct flow_counter_stats *)mem;
+       for (i = 0; i < raws_n; ++i) {
+               mem_mng->raws[i].mem_mng = mem_mng;
+               mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
+       }
+       for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
+               LIST_INSERT_HEAD(&sh->cmng.free_stat_raws,
+                                mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE + i,
+                                next);
+       LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
+       sh->cmng.mem_mng = mem_mng;
+       return 0;
 }
 
+/**
+ * Set the statistic memory to the new counter pool.
+ *
+ * @param[in] sh
+ *   Pointer to mlx5_dev_ctx_shared object.
+ * @param[in] pool
+ *   Pointer to the pool to set the statistic memory.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise.
+ */
+static int
+mlx5_flow_set_counter_stat_mem(struct mlx5_dev_ctx_shared *sh,
+                              struct mlx5_flow_counter_pool *pool)
+{
+       struct mlx5_flow_counter_mng *cmng = &sh->cmng;
+       /* Resize statistic memory once used out. */
+       if (!(pool->index % MLX5_CNT_CONTAINER_RESIZE) &&
+           mlx5_flow_create_counter_stat_mem_mng(sh)) {
+               DRV_LOG(ERR, "Cannot resize counter stat mem.");
+               return -1;
+       }
+       rte_spinlock_lock(&pool->sl);
+       pool->raw = cmng->mem_mng->raws + pool->index %
+                   MLX5_CNT_CONTAINER_RESIZE;
+       rte_spinlock_unlock(&pool->sl);
+       pool->raw_hw = NULL;
+       return 0;
+}
+
+#define MLX5_POOL_QUERY_FREQ_US 1000000
+
 /**
  * Set the periodic procedure for triggering asynchronous batch queries for all
  * the counter pools.
@@ -5913,7 +6710,7 @@ mlx5_set_query_alarm(struct mlx5_dev_ctx_shared *sh)
 {
        uint32_t pools_n, us;
 
-       pools_n = mlx5_get_all_valid_pool_count(sh);
+       pools_n = __atomic_load_n(&sh->cmng.n_valid, __ATOMIC_RELAXED);
        us = MLX5_POOL_QUERY_FREQ_US / pools_n;
        DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us);
        if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) {
@@ -5935,37 +6732,21 @@ void
 mlx5_flow_query_alarm(void *arg)
 {
        struct mlx5_dev_ctx_shared *sh = arg;
-       struct mlx5_devx_obj *dcs;
-       uint16_t offset;
        int ret;
-       uint8_t batch = sh->cmng.batch;
-       uint8_t age = sh->cmng.age;
        uint16_t pool_index = sh->cmng.pool_index;
-       struct mlx5_pools_container *cont;
+       struct mlx5_flow_counter_mng *cmng = &sh->cmng;
        struct mlx5_flow_counter_pool *pool;
-       int cont_loop = MLX5_CCONT_TYPE_MAX;
+       uint16_t n_valid;
 
        if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES)
                goto set_alarm;
-next_container:
-       cont = MLX5_CNT_CONTAINER(sh, batch, age);
-       rte_spinlock_lock(&cont->resize_sl);
-       if (!cont->pools) {
-               rte_spinlock_unlock(&cont->resize_sl);
-               /* Check if all the containers are empty. */
-               if (unlikely(--cont_loop == 0))
-                       goto set_alarm;
-               batch ^= 0x1;
-               pool_index = 0;
-               if (batch == 0 && pool_index == 0) {
-                       age ^= 0x1;
-                       sh->cmng.batch = batch;
-                       sh->cmng.age = age;
-               }
-               goto next_container;
-       }
-       pool = cont->pools[pool_index];
-       rte_spinlock_unlock(&cont->resize_sl);
+       rte_spinlock_lock(&cmng->pool_update_sl);
+       pool = cmng->pools[pool_index];
+       n_valid = cmng->n_valid;
+       rte_spinlock_unlock(&cmng->pool_update_sl);
+       /* Set the statistic memory to the new created pool. */
+       if ((!pool->raw && mlx5_flow_set_counter_stat_mem(sh, pool)))
+               goto set_alarm;
        if (pool->raw_hw)
                /* There is a pool query in progress. */
                goto set_alarm;
@@ -5974,21 +6755,19 @@ next_container:
        if (!pool->raw_hw)
                /* No free counter statistics raw memory. */
                goto set_alarm;
-       dcs = (struct mlx5_devx_obj *)(uintptr_t)rte_atomic64_read
-                                                             (&pool->a64_dcs);
-       offset = batch ? 0 : dcs->id % MLX5_COUNTERS_PER_POOL;
        /*
         * Identify the counters released between query trigger and query
-        * handle more effiecntly. The counter released in this gap period
+        * handle more efficiently. The counter released in this gap period
         * should wait for a new round of query as the new arrived packets
         * will not be taken into account.
         */
        pool->query_gen++;
-       ret = mlx5_devx_cmd_flow_counter_query(dcs, 0, MLX5_COUNTERS_PER_POOL -
-                                              offset, NULL, NULL,
+       ret = mlx5_devx_cmd_flow_counter_query(pool->min_dcs, 0,
+                                              MLX5_COUNTERS_PER_POOL,
+                                              NULL, NULL,
                                               pool->raw_hw->mem_mng->dm->id,
                                               (void *)(uintptr_t)
-                                              (pool->raw_hw->data + offset),
+                                              pool->raw_hw->data,
                                               sh->devx_comp,
                                               (uint64_t)(uintptr_t)pool);
        if (ret) {
@@ -5997,20 +6776,13 @@ next_container:
                pool->raw_hw = NULL;
                goto set_alarm;
        }
-       pool->raw_hw->min_dcs_id = dcs->id;
        LIST_REMOVE(pool->raw_hw, next);
        sh->cmng.pending_queries++;
        pool_index++;
-       if (pool_index >= rte_atomic16_read(&cont->n_valid)) {
-               batch ^= 0x1;
+       if (pool_index >= n_valid)
                pool_index = 0;
-               if (batch == 0 && pool_index == 0)
-                       age ^= 0x1;
-       }
 set_alarm:
-       sh->cmng.batch = batch;
        sh->cmng.pool_index = pool_index;
-       sh->cmng.age = age;
        mlx5_set_query_alarm(sh);
 }
 
@@ -6032,19 +6804,26 @@ mlx5_flow_aging_check(struct mlx5_dev_ctx_shared *sh,
        struct mlx5_age_param *age_param;
        struct mlx5_counter_stats_raw *cur = pool->raw_hw;
        struct mlx5_counter_stats_raw *prev = pool->raw;
-       uint16_t curr = rte_rdtsc() / (rte_get_tsc_hz() / 10);
+       const uint64_t curr_time = MLX5_CURR_TIME_SEC;
+       const uint32_t time_delta = curr_time - pool->time_of_last_age_check;
+       uint16_t expected = AGE_CANDIDATE;
        uint32_t i;
 
+       pool->time_of_last_age_check = curr_time;
        for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
                cnt = MLX5_POOL_GET_CNT(pool, i);
                age_param = MLX5_CNT_TO_AGE(cnt);
-               if (rte_atomic16_read(&age_param->state) != AGE_CANDIDATE)
+               if (__atomic_load_n(&age_param->state,
+                                   __ATOMIC_RELAXED) != AGE_CANDIDATE)
                        continue;
                if (cur->data[i].hits != prev->data[i].hits) {
-                       age_param->expire = curr + age_param->timeout;
+                       __atomic_store_n(&age_param->sec_since_last_hit, 0,
+                                        __ATOMIC_RELAXED);
                        continue;
                }
-               if ((uint16_t)(curr - age_param->expire) >= (UINT16_MAX / 2))
+               if (__atomic_add_fetch(&age_param->sec_since_last_hit,
+                                      time_delta,
+                                      __ATOMIC_RELAXED) <= age_param->timeout)
                        continue;
                /**
                 * Hold the lock first, or if between the
@@ -6055,12 +6834,10 @@ mlx5_flow_aging_check(struct mlx5_dev_ctx_shared *sh,
                priv = rte_eth_devices[age_param->port_id].data->dev_private;
                age_info = GET_PORT_AGE_INFO(priv);
                rte_spinlock_lock(&age_info->aged_sl);
-               /* If the cpmset fails, release happens. */
-               if (rte_atomic16_cmpset((volatile uint16_t *)
-                                       &age_param->state,
-                                       AGE_CANDIDATE,
-                                       AGE_TMOUT) ==
-                                       AGE_CANDIDATE) {
+               if (__atomic_compare_exchange_n(&age_param->state, &expected,
+                                               AGE_TMOUT, false,
+                                               __ATOMIC_RELAXED,
+                                               __ATOMIC_RELAXED)) {
                        TAILQ_INSERT_TAIL(&age_info->aged_counters, cnt, next);
                        MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);
                }
@@ -6071,7 +6848,7 @@ mlx5_flow_aging_check(struct mlx5_dev_ctx_shared *sh,
                if (!MLX5_AGE_GET(age_info, MLX5_AGE_EVENT_NEW))
                        continue;
                if (MLX5_AGE_GET(age_info, MLX5_AGE_TRIGGER))
-                       _rte_eth_dev_callback_process
+                       rte_eth_dev_callback_process
                                (&rte_eth_devices[sh->port[i].devx_ih_port_id],
                                RTE_ETH_EVENT_FLOW_AGED, NULL);
                age_info->flags = 0;
@@ -6096,27 +6873,28 @@ mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,
        struct mlx5_flow_counter_pool *pool =
                (struct mlx5_flow_counter_pool *)(uintptr_t)async_id;
        struct mlx5_counter_stats_raw *raw_to_free;
-       uint8_t age = !!IS_AGE_POOL(pool);
        uint8_t query_gen = pool->query_gen ^ 1;
-       struct mlx5_pools_container *cont =
-               MLX5_CNT_CONTAINER(sh, !IS_EXT_POOL(pool), age);
+       struct mlx5_flow_counter_mng *cmng = &sh->cmng;
+       enum mlx5_counter_type cnt_type =
+               pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
+                               MLX5_COUNTER_TYPE_ORIGIN;
 
        if (unlikely(status)) {
                raw_to_free = pool->raw_hw;
        } else {
                raw_to_free = pool->raw;
-               if (IS_AGE_POOL(pool))
+               if (pool->is_aged)
                        mlx5_flow_aging_check(sh, pool);
                rte_spinlock_lock(&pool->sl);
                pool->raw = pool->raw_hw;
                rte_spinlock_unlock(&pool->sl);
                /* Be sure the new raw counters data is updated in memory. */
-               rte_cio_wmb();
+               rte_io_wmb();
                if (!TAILQ_EMPTY(&pool->counters[query_gen])) {
-                       rte_spinlock_lock(&cont->csl);
-                       TAILQ_CONCAT(&cont->counters,
+                       rte_spinlock_lock(&cmng->csl[cnt_type]);
+                       TAILQ_CONCAT(&cmng->counters[cnt_type],
                                     &pool->counters[query_gen], next);
-                       rte_spinlock_unlock(&cont->csl);
+                       rte_spinlock_unlock(&cmng->csl[cnt_type]);
                }
        }
        LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next);
@@ -6233,7 +7011,7 @@ mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
                flow_list_destroy(dev, NULL, flow_idx);
        }
        for (; n < MLX5_MREG_C_NUM; ++n)
-               config->flow_mreg_c[n] = REG_NONE;
+               config->flow_mreg_c[n] = REG_NON;
        return 0;
 }
 
@@ -6258,6 +7036,11 @@ mlx5_flow_dev_dump(struct rte_eth_dev *dev,
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_dev_ctx_shared *sh = priv->sh;
 
+       if (!priv->config.dv_flow_en) {
+               if (fputs("device dv flow disabled\n", file) <= 0)
+                       return -errno;
+               return -ENOTSUP;
+       }
        return mlx5_devx_cmd_flow_dump(sh->fdb_domain, sh->rx_domain,
                                       sh->tx_domain, file);
 }