net/mlx5: fix meter policy creation assert
[dpdk.git] / drivers / net / mlx5 / mlx5_flow.c
index 5d19ef1..15a4a8c 100644 (file)
@@ -76,6 +76,7 @@ const struct mlx5_flow_driver_ops *flow_drv_ops[] = {
        [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops,
 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
        [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops,
+       [MLX5_FLOW_TYPE_HW] = &mlx5_flow_hw_drv_ops,
 #endif
        [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops,
        [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops
@@ -164,105 +165,152 @@ mlx5_flow_is_rss_expandable_item(const struct rte_flow_item *item)
        return false;
 }
 
+/**
+ * Network Service Header (NSH) and its next protocol values
+ * are described in RFC-8393.
+ */
+static enum rte_flow_item_type
+mlx5_nsh_proto_to_item_type(uint8_t proto_spec, uint8_t proto_mask)
+{
+       enum rte_flow_item_type type;
+
+       switch (proto_mask & proto_spec) {
+       case 0:
+               type = RTE_FLOW_ITEM_TYPE_VOID;
+               break;
+       case RTE_VXLAN_GPE_TYPE_IPV4:
+               type = RTE_FLOW_ITEM_TYPE_IPV4;
+               break;
+       case RTE_VXLAN_GPE_TYPE_IPV6:
+               type = RTE_VXLAN_GPE_TYPE_IPV6;
+               break;
+       case RTE_VXLAN_GPE_TYPE_ETH:
+               type = RTE_FLOW_ITEM_TYPE_ETH;
+               break;
+       default:
+               type = RTE_FLOW_ITEM_TYPE_END;
+       }
+       return type;
+}
+
+static enum rte_flow_item_type
+mlx5_inet_proto_to_item_type(uint8_t proto_spec, uint8_t proto_mask)
+{
+       enum rte_flow_item_type type;
+
+       switch (proto_mask & proto_spec) {
+       case 0:
+               type = RTE_FLOW_ITEM_TYPE_VOID;
+               break;
+       case IPPROTO_UDP:
+               type = RTE_FLOW_ITEM_TYPE_UDP;
+               break;
+       case IPPROTO_TCP:
+               type = RTE_FLOW_ITEM_TYPE_TCP;
+               break;
+       case IPPROTO_IPIP:
+               type = RTE_FLOW_ITEM_TYPE_IPV4;
+               break;
+       case IPPROTO_IPV6:
+               type = RTE_FLOW_ITEM_TYPE_IPV6;
+               break;
+       default:
+               type = RTE_FLOW_ITEM_TYPE_END;
+       }
+       return type;
+}
+
+static enum rte_flow_item_type
+mlx5_ethertype_to_item_type(rte_be16_t type_spec,
+                           rte_be16_t type_mask, bool is_tunnel)
+{
+       enum rte_flow_item_type type;
+
+       switch (rte_be_to_cpu_16(type_spec & type_mask)) {
+       case 0:
+               type = RTE_FLOW_ITEM_TYPE_VOID;
+               break;
+       case RTE_ETHER_TYPE_TEB:
+               type = is_tunnel ?
+                      RTE_FLOW_ITEM_TYPE_ETH : RTE_FLOW_ITEM_TYPE_END;
+               break;
+       case RTE_ETHER_TYPE_VLAN:
+               type = !is_tunnel ?
+                      RTE_FLOW_ITEM_TYPE_VLAN : RTE_FLOW_ITEM_TYPE_END;
+               break;
+       case RTE_ETHER_TYPE_IPV4:
+               type = RTE_FLOW_ITEM_TYPE_IPV4;
+               break;
+       case RTE_ETHER_TYPE_IPV6:
+               type = RTE_FLOW_ITEM_TYPE_IPV6;
+               break;
+       default:
+               type = RTE_FLOW_ITEM_TYPE_END;
+       }
+       return type;
+}
+
 static enum rte_flow_item_type
 mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item)
 {
-       enum rte_flow_item_type ret = RTE_FLOW_ITEM_TYPE_VOID;
-       uint16_t ether_type = 0;
-       uint16_t ether_type_m;
-       uint8_t ip_next_proto = 0;
-       uint8_t ip_next_proto_m;
+#define MLX5_XSET_ITEM_MASK_SPEC(type, fld)                              \
+       do {                                                             \
+               const void *m = item->mask;                              \
+               const void *s = item->spec;                              \
+               mask = m ?                                               \
+                       ((const struct rte_flow_item_##type *)m)->fld :  \
+                       rte_flow_item_##type##_mask.fld;                 \
+               spec = ((const struct rte_flow_item_##type *)s)->fld;    \
+       } while (0)
+
+       enum rte_flow_item_type ret;
+       uint16_t spec, mask;
 
        if (item == NULL || item->spec == NULL)
-               return ret;
+               return RTE_FLOW_ITEM_TYPE_VOID;
        switch (item->type) {
        case RTE_FLOW_ITEM_TYPE_ETH:
-               if (item->mask)
-                       ether_type_m = ((const struct rte_flow_item_eth *)
-                                               (item->mask))->type;
-               else
-                       ether_type_m = rte_flow_item_eth_mask.type;
-               if (ether_type_m != RTE_BE16(0xFFFF))
-                       break;
-               ether_type = ((const struct rte_flow_item_eth *)
-                               (item->spec))->type;
-               if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
-                       ret = RTE_FLOW_ITEM_TYPE_IPV4;
-               else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
-                       ret = RTE_FLOW_ITEM_TYPE_IPV6;
-               else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
-                       ret = RTE_FLOW_ITEM_TYPE_VLAN;
-               else
-                       ret = RTE_FLOW_ITEM_TYPE_END;
+               MLX5_XSET_ITEM_MASK_SPEC(eth, type);
+               if (!mask)
+                       return RTE_FLOW_ITEM_TYPE_VOID;
+               ret = mlx5_ethertype_to_item_type(spec, mask, false);
                break;
        case RTE_FLOW_ITEM_TYPE_VLAN:
-               if (item->mask)
-                       ether_type_m = ((const struct rte_flow_item_vlan *)
-                                               (item->mask))->inner_type;
-               else
-                       ether_type_m = rte_flow_item_vlan_mask.inner_type;
-               if (ether_type_m != RTE_BE16(0xFFFF))
-                       break;
-               ether_type = ((const struct rte_flow_item_vlan *)
-                               (item->spec))->inner_type;
-               if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
-                       ret = RTE_FLOW_ITEM_TYPE_IPV4;
-               else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
-                       ret = RTE_FLOW_ITEM_TYPE_IPV6;
-               else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
-                       ret = RTE_FLOW_ITEM_TYPE_VLAN;
-               else
-                       ret = RTE_FLOW_ITEM_TYPE_END;
+               MLX5_XSET_ITEM_MASK_SPEC(vlan, inner_type);
+               if (!mask)
+                       return RTE_FLOW_ITEM_TYPE_VOID;
+               ret = mlx5_ethertype_to_item_type(spec, mask, false);
                break;
        case RTE_FLOW_ITEM_TYPE_IPV4:
-               if (item->mask)
-                       ip_next_proto_m = ((const struct rte_flow_item_ipv4 *)
-                                       (item->mask))->hdr.next_proto_id;
-               else
-                       ip_next_proto_m =
-                               rte_flow_item_ipv4_mask.hdr.next_proto_id;
-               if (ip_next_proto_m != 0xFF)
-                       break;
-               ip_next_proto = ((const struct rte_flow_item_ipv4 *)
-                               (item->spec))->hdr.next_proto_id;
-               if (ip_next_proto == IPPROTO_UDP)
-                       ret = RTE_FLOW_ITEM_TYPE_UDP;
-               else if (ip_next_proto == IPPROTO_TCP)
-                       ret = RTE_FLOW_ITEM_TYPE_TCP;
-               else if (ip_next_proto == IPPROTO_IP)
-                       ret = RTE_FLOW_ITEM_TYPE_IPV4;
-               else if (ip_next_proto == IPPROTO_IPV6)
-                       ret = RTE_FLOW_ITEM_TYPE_IPV6;
-               else
-                       ret = RTE_FLOW_ITEM_TYPE_END;
+               MLX5_XSET_ITEM_MASK_SPEC(ipv4, hdr.next_proto_id);
+               if (!mask)
+                       return RTE_FLOW_ITEM_TYPE_VOID;
+               ret = mlx5_inet_proto_to_item_type(spec, mask);
                break;
        case RTE_FLOW_ITEM_TYPE_IPV6:
-               if (item->mask)
-                       ip_next_proto_m = ((const struct rte_flow_item_ipv6 *)
-                                               (item->mask))->hdr.proto;
-               else
-                       ip_next_proto_m =
-                               rte_flow_item_ipv6_mask.hdr.proto;
-               if (ip_next_proto_m != 0xFF)
-                       break;
-               ip_next_proto = ((const struct rte_flow_item_ipv6 *)
-                               (item->spec))->hdr.proto;
-               if (ip_next_proto == IPPROTO_UDP)
-                       ret = RTE_FLOW_ITEM_TYPE_UDP;
-               else if (ip_next_proto == IPPROTO_TCP)
-                       ret = RTE_FLOW_ITEM_TYPE_TCP;
-               else if (ip_next_proto == IPPROTO_IP)
-                       ret = RTE_FLOW_ITEM_TYPE_IPV4;
-               else if (ip_next_proto == IPPROTO_IPV6)
-                       ret = RTE_FLOW_ITEM_TYPE_IPV6;
-               else
-                       ret = RTE_FLOW_ITEM_TYPE_END;
+               MLX5_XSET_ITEM_MASK_SPEC(ipv6, hdr.proto);
+               if (!mask)
+                       return RTE_FLOW_ITEM_TYPE_VOID;
+               ret = mlx5_inet_proto_to_item_type(spec, mask);
+               break;
+       case RTE_FLOW_ITEM_TYPE_GENEVE:
+               MLX5_XSET_ITEM_MASK_SPEC(geneve, protocol);
+               ret = mlx5_ethertype_to_item_type(spec, mask, true);
+               break;
+       case RTE_FLOW_ITEM_TYPE_GRE:
+               MLX5_XSET_ITEM_MASK_SPEC(gre, protocol);
+               ret = mlx5_ethertype_to_item_type(spec, mask, true);
+               break;
+       case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+               MLX5_XSET_ITEM_MASK_SPEC(vxlan_gpe, protocol);
+               ret = mlx5_nsh_proto_to_item_type(spec, mask);
                break;
        default:
                ret = RTE_FLOW_ITEM_TYPE_VOID;
                break;
        }
        return ret;
+#undef MLX5_XSET_ITEM_MASK_SPEC
 }
 
 static const int *
@@ -391,13 +439,20 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
        if (missed_item.type != RTE_FLOW_ITEM_TYPE_VOID) {
                next = NULL;
                missed = 1;
-               for (i = 0; node->next && node->next[i]; ++i) {
+               i = 0;
+               while (node->next && node->next[i]) {
                        next = &graph[node->next[i]];
                        if (next->type == missed_item.type) {
                                flow_items[0].type = missed_item.type;
                                flow_items[1].type = RTE_FLOW_ITEM_TYPE_END;
                                break;
                        }
+                       if (next->node_flags & MLX5_EXPANSION_NODE_EXPLICIT) {
+                               node = next;
+                               i = 0;
+                       } else {
+                               ++i;
+                       }
                        next = NULL;
                }
        }
@@ -523,7 +578,8 @@ enum mlx5_expansion {
        MLX5_EXPANSION_IPV6_UDP,
        MLX5_EXPANSION_IPV6_TCP,
        MLX5_EXPANSION_IPV6_FRAG_EXT,
-       MLX5_EXPANSION_GTP
+       MLX5_EXPANSION_GTP,
+       MLX5_EXPANSION_GENEVE,
 };
 
 /** Supported expansion of items. */
@@ -567,6 +623,7 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
                                                  MLX5_EXPANSION_VXLAN_GPE,
                                                  MLX5_EXPANSION_MPLS,
+                                                 MLX5_EXPANSION_GENEVE,
                                                  MLX5_EXPANSION_GTP),
                .type = RTE_FLOW_ITEM_TYPE_UDP,
                .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
@@ -591,6 +648,7 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
                                                  MLX5_EXPANSION_VXLAN_GPE,
                                                  MLX5_EXPANSION_MPLS,
+                                                 MLX5_EXPANSION_GENEVE,
                                                  MLX5_EXPANSION_GTP),
                .type = RTE_FLOW_ITEM_TYPE_UDP,
                .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
@@ -621,7 +679,8 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
                .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
        },
        [MLX5_EXPANSION_GRE] = {
-               .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
+               .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
+                                                 MLX5_EXPANSION_IPV4,
                                                  MLX5_EXPANSION_IPV6,
                                                  MLX5_EXPANSION_GRE_KEY,
                                                  MLX5_EXPANSION_MPLS),
@@ -694,6 +753,12 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
                                                  MLX5_EXPANSION_IPV6),
                .type = RTE_FLOW_ITEM_TYPE_GTP,
        },
+       [MLX5_EXPANSION_GENEVE] = {
+               .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
+                                                 MLX5_EXPANSION_IPV4,
+                                                 MLX5_EXPANSION_IPV6),
+               .type = RTE_FLOW_ITEM_TYPE_GENEVE,
+       },
 };
 
 static struct rte_flow_action_handle *
@@ -741,6 +806,110 @@ mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
                                  struct rte_mbuf *m,
                                  struct rte_flow_restore_info *info,
                                  struct rte_flow_error *err);
+static struct rte_flow_item_flex_handle *
+mlx5_flow_flex_item_create(struct rte_eth_dev *dev,
+                          const struct rte_flow_item_flex_conf *conf,
+                          struct rte_flow_error *error);
+static int
+mlx5_flow_flex_item_release(struct rte_eth_dev *dev,
+                           const struct rte_flow_item_flex_handle *handle,
+                           struct rte_flow_error *error);
+static int
+mlx5_flow_info_get(struct rte_eth_dev *dev,
+                  struct rte_flow_port_info *port_info,
+                  struct rte_flow_queue_info *queue_info,
+                  struct rte_flow_error *error);
+static int
+mlx5_flow_port_configure(struct rte_eth_dev *dev,
+                        const struct rte_flow_port_attr *port_attr,
+                        uint16_t nb_queue,
+                        const struct rte_flow_queue_attr *queue_attr[],
+                        struct rte_flow_error *err);
+
+static struct rte_flow_pattern_template *
+mlx5_flow_pattern_template_create(struct rte_eth_dev *dev,
+               const struct rte_flow_pattern_template_attr *attr,
+               const struct rte_flow_item items[],
+               struct rte_flow_error *error);
+
+static int
+mlx5_flow_pattern_template_destroy(struct rte_eth_dev *dev,
+                                  struct rte_flow_pattern_template *template,
+                                  struct rte_flow_error *error);
+static struct rte_flow_actions_template *
+mlx5_flow_actions_template_create(struct rte_eth_dev *dev,
+                       const struct rte_flow_actions_template_attr *attr,
+                       const struct rte_flow_action actions[],
+                       const struct rte_flow_action masks[],
+                       struct rte_flow_error *error);
+static int
+mlx5_flow_actions_template_destroy(struct rte_eth_dev *dev,
+                                  struct rte_flow_actions_template *template,
+                                  struct rte_flow_error *error);
+
+static struct rte_flow_template_table *
+mlx5_flow_table_create(struct rte_eth_dev *dev,
+                      const struct rte_flow_template_table_attr *attr,
+                      struct rte_flow_pattern_template *item_templates[],
+                      uint8_t nb_item_templates,
+                      struct rte_flow_actions_template *action_templates[],
+                      uint8_t nb_action_templates,
+                      struct rte_flow_error *error);
+static int
+mlx5_flow_table_destroy(struct rte_eth_dev *dev,
+                       struct rte_flow_template_table *table,
+                       struct rte_flow_error *error);
+static struct rte_flow *
+mlx5_flow_async_flow_create(struct rte_eth_dev *dev,
+                           uint32_t queue,
+                           const struct rte_flow_op_attr *attr,
+                           struct rte_flow_template_table *table,
+                           const struct rte_flow_item items[],
+                           uint8_t pattern_template_index,
+                           const struct rte_flow_action actions[],
+                           uint8_t action_template_index,
+                           void *user_data,
+                           struct rte_flow_error *error);
+static int
+mlx5_flow_async_flow_destroy(struct rte_eth_dev *dev,
+                            uint32_t queue,
+                            const struct rte_flow_op_attr *attr,
+                            struct rte_flow *flow,
+                            void *user_data,
+                            struct rte_flow_error *error);
+static int
+mlx5_flow_pull(struct rte_eth_dev *dev,
+              uint32_t queue,
+              struct rte_flow_op_result res[],
+              uint16_t n_res,
+              struct rte_flow_error *error);
+static int
+mlx5_flow_push(struct rte_eth_dev *dev,
+              uint32_t queue,
+              struct rte_flow_error *error);
+
+static struct rte_flow_action_handle *
+mlx5_flow_async_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,
+                                const struct rte_flow_op_attr *attr,
+                                const struct rte_flow_indir_action_conf *conf,
+                                const struct rte_flow_action *action,
+                                void *user_data,
+                                struct rte_flow_error *error);
+
+static int
+mlx5_flow_async_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,
+                                const struct rte_flow_op_attr *attr,
+                                struct rte_flow_action_handle *handle,
+                                const void *update,
+                                void *user_data,
+                                struct rte_flow_error *error);
+
+static int
+mlx5_flow_async_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,
+                                 const struct rte_flow_op_attr *attr,
+                                 struct rte_flow_action_handle *handle,
+                                 void *user_data,
+                                 struct rte_flow_error *error);
 
 static const struct rte_flow_ops mlx5_flow_ops = {
        .validate = mlx5_flow_validate,
@@ -760,6 +929,23 @@ static const struct rte_flow_ops mlx5_flow_ops = {
        .tunnel_action_decap_release = mlx5_flow_tunnel_action_release,
        .tunnel_item_release = mlx5_flow_tunnel_item_release,
        .get_restore_info = mlx5_flow_tunnel_get_restore_info,
+       .flex_item_create = mlx5_flow_flex_item_create,
+       .flex_item_release = mlx5_flow_flex_item_release,
+       .info_get = mlx5_flow_info_get,
+       .configure = mlx5_flow_port_configure,
+       .pattern_template_create = mlx5_flow_pattern_template_create,
+       .pattern_template_destroy = mlx5_flow_pattern_template_destroy,
+       .actions_template_create = mlx5_flow_actions_template_create,
+       .actions_template_destroy = mlx5_flow_actions_template_destroy,
+       .template_table_create = mlx5_flow_table_create,
+       .template_table_destroy = mlx5_flow_table_destroy,
+       .async_create = mlx5_flow_async_flow_create,
+       .async_destroy = mlx5_flow_async_flow_destroy,
+       .pull = mlx5_flow_pull,
+       .push = mlx5_flow_push,
+       .async_action_handle_create = mlx5_flow_async_action_handle_create,
+       .async_action_handle_update = mlx5_flow_async_action_handle_update,
+       .async_action_handle_destroy = mlx5_flow_async_action_handle_destroy,
 };
 
 /* Tunnel information. */
@@ -836,7 +1022,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
                     struct rte_flow_error *error)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_dev_config *config = &priv->config;
+       struct mlx5_sh_config *config = &priv->sh->config;
        enum modify_reg start_reg;
        bool skip_mtr_reg = false;
 
@@ -890,6 +1076,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
        case MLX5_MTR_COLOR:
        case MLX5_ASO_FLOW_HIT:
        case MLX5_ASO_CONNTRACK:
+       case MLX5_SAMPLE_ID:
                /* All features use the same REG_C. */
                MLX5_ASSERT(priv->mtr_color_reg != REG_NON);
                return priv->mtr_color_reg;
@@ -914,7 +1101,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
                        return rte_flow_error_set(error, EINVAL,
                                                  RTE_FLOW_ERROR_TYPE_ITEM,
                                                  NULL, "invalid tag id");
-               if (config->flow_mreg_c[id + start_reg - REG_C_0] == REG_NON)
+               if (priv->sh->flow_mreg_c[id + start_reg - REG_C_0] == REG_NON)
                        return rte_flow_error_set(error, ENOTSUP,
                                                  RTE_FLOW_ERROR_TYPE_ITEM,
                                                  NULL, "unsupported tag id");
@@ -924,21 +1111,21 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
                 * If the available index REG_C_y >= REG_C_x, skip the
                 * color register.
                 */
-               if (skip_mtr_reg && config->flow_mreg_c
+               if (skip_mtr_reg && priv->sh->flow_mreg_c
                    [id + start_reg - REG_C_0] >= priv->mtr_color_reg) {
                        if (id >= (uint32_t)(REG_C_7 - start_reg))
                                return rte_flow_error_set(error, EINVAL,
                                                       RTE_FLOW_ERROR_TYPE_ITEM,
                                                        NULL, "invalid tag id");
-                       if (config->flow_mreg_c
+                       if (priv->sh->flow_mreg_c
                            [id + 1 + start_reg - REG_C_0] != REG_NON)
-                               return config->flow_mreg_c
+                               return priv->sh->flow_mreg_c
                                               [id + 1 + start_reg - REG_C_0];
                        return rte_flow_error_set(error, ENOTSUP,
                                                  RTE_FLOW_ERROR_TYPE_ITEM,
                                                  NULL, "unsupported tag id");
                }
-               return config->flow_mreg_c[id + start_reg - REG_C_0];
+               return priv->sh->flow_mreg_c[id + start_reg - REG_C_0];
        }
        MLX5_ASSERT(false);
        return rte_flow_error_set(error, EINVAL,
@@ -959,7 +1146,6 @@ bool
 mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_dev_config *config = &priv->config;
 
        /*
         * Having available reg_c can be regarded inclusively as supporting
@@ -969,7 +1155,7 @@ mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev)
         * - reg_c's are preserved across different domain (FDB and NIC) on
         *   packet loopback by flow lookup miss.
         */
-       return config->flow_mreg_c[2] != REG_NON;
+       return priv->sh->flow_mreg_c[2] != REG_NON;
 }
 
 /**
@@ -990,7 +1176,7 @@ mlx5_get_lowest_priority(struct rte_eth_dev *dev,
        struct mlx5_priv *priv = dev->data->dev_private;
 
        if (!attr->group && !attr->transfer)
-               return priv->config.flow_prio - 2;
+               return priv->sh->flow_max_priority - 2;
        return MLX5_NON_ROOT_FLOW_MAX_PRIO - 1;
 }
 
@@ -1018,11 +1204,11 @@ mlx5_get_matcher_priority(struct rte_eth_dev *dev,
 
        if (!attr->group && !attr->transfer) {
                if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
-                       priority = priv->config.flow_prio - 1;
+                       priority = priv->sh->flow_max_priority - 1;
                return mlx5_os_flow_adjust_priority(dev, priority, subpriority);
        } else if (!external && attr->transfer && attr->group == 0 &&
                   attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR) {
-               return (priv->config.flow_prio - 1) * 3;
+               return (priv->sh->flow_max_priority - 1) * 3;
        }
        if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
                priority = MLX5_NON_ROOT_FLOW_MAX_PRIO;
@@ -1156,7 +1342,7 @@ flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
 }
 
 /**
- * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive
+ * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the device
  * flow.
  *
  * @param[in] dev
@@ -1169,7 +1355,6 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
                       struct mlx5_flow_handle *dev_handle)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       const int mark = dev_handle->mark;
        const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
        struct mlx5_ind_table_obj *ind_tbl = NULL;
        unsigned int i;
@@ -1194,24 +1379,16 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
                return;
        for (i = 0; i != ind_tbl->queues_n; ++i) {
                int idx = ind_tbl->queues[i];
-               struct mlx5_rxq_ctrl *rxq_ctrl =
-                       container_of((*priv->rxqs)[idx],
-                                    struct mlx5_rxq_ctrl, rxq);
+               struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx);
 
+               MLX5_ASSERT(rxq_ctrl != NULL);
+               if (rxq_ctrl == NULL)
+                       continue;
                /*
                 * To support metadata register copy on Tx loopback,
                 * this must be always enabled (metadata may arive
                 * from other port - not from local flows only.
                 */
-               if (priv->config.dv_flow_en &&
-                   priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
-                   mlx5_flow_ext_mreg_supported(dev)) {
-                       rxq_ctrl->rxq.mark = 1;
-                       rxq_ctrl->flow_mark_n = 1;
-               } else if (mark) {
-                       rxq_ctrl->rxq.mark = 1;
-                       rxq_ctrl->flow_mark_n++;
-               }
                if (tunnel) {
                        unsigned int j;
 
@@ -1229,6 +1406,20 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
        }
 }
 
+static void
+flow_rxq_mark_flag_set(struct rte_eth_dev *dev)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_rxq_ctrl *rxq_ctrl;
+
+       if (priv->mark_enabled)
+               return;
+       LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
+               rxq_ctrl->rxq.mark = 1;
+       }
+       priv->mark_enabled = 1;
+}
+
 /**
  * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow
  *
@@ -1243,7 +1434,11 @@ flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
        struct mlx5_priv *priv = dev->data->dev_private;
        uint32_t handle_idx;
        struct mlx5_flow_handle *dev_handle;
+       struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
 
+       MLX5_ASSERT(wks);
+       if (wks->mark)
+               flow_rxq_mark_flag_set(dev);
        SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
                       handle_idx, dev_handle, next)
                flow_drv_rxq_flags_set(dev, dev_handle);
@@ -1263,7 +1458,6 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
                        struct mlx5_flow_handle *dev_handle)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       const int mark = dev_handle->mark;
        const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
        struct mlx5_ind_table_obj *ind_tbl = NULL;
        unsigned int i;
@@ -1289,19 +1483,11 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
        MLX5_ASSERT(dev->data->dev_started);
        for (i = 0; i != ind_tbl->queues_n; ++i) {
                int idx = ind_tbl->queues[i];
-               struct mlx5_rxq_ctrl *rxq_ctrl =
-                       container_of((*priv->rxqs)[idx],
-                                    struct mlx5_rxq_ctrl, rxq);
-
-               if (priv->config.dv_flow_en &&
-                   priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
-                   mlx5_flow_ext_mreg_supported(dev)) {
-                       rxq_ctrl->rxq.mark = 1;
-                       rxq_ctrl->flow_mark_n = 1;
-               } else if (mark) {
-                       rxq_ctrl->flow_mark_n--;
-                       rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
-               }
+               struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx);
+
+               MLX5_ASSERT(rxq_ctrl != NULL);
+               if (rxq_ctrl == NULL)
+                       continue;
                if (tunnel) {
                        unsigned int j;
 
@@ -1353,19 +1539,17 @@ flow_rxq_flags_clear(struct rte_eth_dev *dev)
        unsigned int i;
 
        for (i = 0; i != priv->rxqs_n; ++i) {
-               struct mlx5_rxq_ctrl *rxq_ctrl;
+               struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);
                unsigned int j;
 
-               if (!(*priv->rxqs)[i])
+               if (rxq == NULL || rxq->ctrl == NULL)
                        continue;
-               rxq_ctrl = container_of((*priv->rxqs)[i],
-                                       struct mlx5_rxq_ctrl, rxq);
-               rxq_ctrl->flow_mark_n = 0;
-               rxq_ctrl->rxq.mark = 0;
+               rxq->ctrl->rxq.mark = 0;
                for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
-                       rxq_ctrl->flow_tunnels_n[j] = 0;
-               rxq_ctrl->rxq.tunnel = 0;
+                       rxq->ctrl->flow_tunnels_n[j] = 0;
+               rxq->ctrl->rxq.tunnel = 0;
        }
+       priv->mark_enabled = 0;
 }
 
 /**
@@ -1378,13 +1562,15 @@ void
 mlx5_flow_rxq_dynf_metadata_set(struct rte_eth_dev *dev)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_rxq_data *data;
        unsigned int i;
 
        for (i = 0; i != priv->rxqs_n; ++i) {
-               if (!(*priv->rxqs)[i])
+               struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);
+               struct mlx5_rxq_data *data;
+
+               if (rxq == NULL || rxq->ctrl == NULL)
                        continue;
-               data = (*priv->rxqs)[i];
+               data = &rxq->ctrl->rxq;
                if (!rte_flow_dynf_metadata_avail()) {
                        data->dynf_meta = 0;
                        data->flow_meta_mask = 0;
@@ -1566,6 +1752,12 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
                                          "can't have 2 fate actions in"
                                          " same flow");
+       if (attr->egress)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
+                                         "queue action not supported for egress.");
+       if (mlx5_is_external_rxq(dev, queue->index))
+               return 0;
        if (!priv->rxqs_n)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
@@ -1575,16 +1767,72 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
                                          &queue->index,
                                          "queue index out of range");
-       if (!(*priv->rxqs)[queue->index])
+       if (mlx5_rxq_get(dev, queue->index) == NULL)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
                                          &queue->index,
                                          "queue is not configured");
-       if (attr->egress)
-               return rte_flow_error_set(error, ENOTSUP,
-                                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
-                                         "queue action not supported for "
-                                         "egress");
+       return 0;
+}
+
+/**
+ * Validate queue numbers for device RSS.
+ *
+ * @param[in] dev
+ *   Configured device.
+ * @param[in] queues
+ *   Array of queue numbers.
+ * @param[in] queues_n
+ *   Size of the @p queues array.
+ * @param[out] error
+ *   On error, filled with a textual error description.
+ * @param[out] queue_idx
+ *   On error, filled with an offending queue index in @p queues array.
+ *
+ * @return
+ *   0 on success, a negative errno code on error.
+ */
+static int
+mlx5_validate_rss_queues(struct rte_eth_dev *dev,
+                        const uint16_t *queues, uint32_t queues_n,
+                        const char **error, uint32_t *queue_idx)
+{
+       const struct mlx5_priv *priv = dev->data->dev_private;
+       bool is_hairpin = false;
+       bool is_ext_rss = false;
+       uint32_t i;
+
+       for (i = 0; i != queues_n; ++i) {
+               struct mlx5_rxq_ctrl *rxq_ctrl;
+
+               if (mlx5_is_external_rxq(dev, queues[0])) {
+                       is_ext_rss = true;
+                       continue;
+               }
+               if (is_ext_rss) {
+                       *error = "Combining external and regular RSS queues is not supported";
+                       *queue_idx = i;
+                       return -ENOTSUP;
+               }
+               if (queues[i] >= priv->rxqs_n) {
+                       *error = "queue index out of range";
+                       *queue_idx = i;
+                       return -EINVAL;
+               }
+               rxq_ctrl = mlx5_rxq_ctrl_get(dev, queues[i]);
+               if (rxq_ctrl == NULL) {
+                       *error =  "queue is not configured";
+                       *queue_idx = i;
+                       return -EINVAL;
+               }
+               if (i == 0 && rxq_ctrl->is_hairpin)
+                       is_hairpin = true;
+               if (is_hairpin != rxq_ctrl->is_hairpin) {
+                       *error = "combining hairpin and regular RSS queues is not supported";
+                       *queue_idx = i;
+                       return -ENOTSUP;
+               }
+       }
        return 0;
 }
 
@@ -1608,8 +1856,9 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev,
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        const struct rte_flow_action_rss *rss = action->conf;
-       enum mlx5_rxq_type rxq_type = MLX5_RXQ_TYPE_UNDEFINED;
-       unsigned int i;
+       int ret;
+       const char *message;
+       uint32_t queue_idx;
 
        if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
            rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
@@ -1642,7 +1891,7 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
                                          &rss->key_len,
                                          "RSS hash key too large");
-       if (rss->queue_num > priv->config.ind_table_max_size)
+       if (rss->queue_num > priv->sh->dev_cap.ind_table_max_size)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
                                          &rss->queue_num,
@@ -1665,7 +1914,7 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
                                          "L4 partial RSS requested but L4 RSS"
                                          " type not specified");
-       if (!priv->rxqs_n)
+       if (!priv->rxqs_n && priv->ext_rxqs == NULL)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
                                          NULL, "No Rx queues configured");
@@ -1673,27 +1922,12 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev,
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
                                          NULL, "No queues configured");
-       for (i = 0; i != rss->queue_num; ++i) {
-               struct mlx5_rxq_ctrl *rxq_ctrl;
-
-               if (rss->queue[i] >= priv->rxqs_n)
-                       return rte_flow_error_set
-                               (error, EINVAL,
-                                RTE_FLOW_ERROR_TYPE_ACTION_CONF,
-                                &rss->queue[i], "queue index out of range");
-               if (!(*priv->rxqs)[rss->queue[i]])
-                       return rte_flow_error_set
-                               (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
-                                &rss->queue[i], "queue is not configured");
-               rxq_ctrl = container_of((*priv->rxqs)[rss->queue[i]],
-                                       struct mlx5_rxq_ctrl, rxq);
-               if (i == 0)
-                       rxq_type = rxq_ctrl->type;
-               if (rxq_type != rxq_ctrl->type)
-                       return rte_flow_error_set
-                               (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
-                                &rss->queue[i],
-                                "combining hairpin and regular RSS queues is not supported");
+       ret = mlx5_validate_rss_queues(dev, rss->queue, rss->queue_num,
+                                      &message, &queue_idx);
+       if (ret != 0) {
+               return rte_flow_error_set(error, -ret,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                                         &rss->queue[queue_idx], message);
        }
        return 0;
 }
@@ -1877,7 +2111,7 @@ mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
                              struct rte_flow_error *error)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       uint32_t priority_max = priv->config.flow_prio - 1;
+       uint32_t priority_max = priv->sh->flow_max_priority - 1;
 
        if (attributes->group)
                return rte_flow_error_set(error, ENOTSUP,
@@ -1892,7 +2126,7 @@ mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
                                          "egress is not supported");
-       if (attributes->transfer && !priv->config.dv_esw_en)
+       if (attributes->transfer && !priv->sh->config.dv_esw_en)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
                                          NULL, "transfer is not supported");
@@ -2609,7 +2843,7 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
                uint8_t vni[4];
        } id = { .vlan_id = 0, };
 
-       if (!priv->config.l3_vxlan_en)
+       if (!priv->sh->config.l3_vxlan_en)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "L3 VXLAN is not enabled by device"
@@ -2712,6 +2946,107 @@ mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item,
        return ret;
 }
 
+/**
+ * Validate GRE optional item.
+ *
+ * @param[in] dev
+ *   Pointer to the Ethernet device structure.
+ * @param[in] item
+ *   Item specification.
+ * @param[in] item_flags
+ *   Bit flags to mark detected items.
+ * @param[in] attr
+ *   Flow rule attributes.
+ * @param[in] gre_item
+ *   Pointer to gre_item
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_item_gre_option(struct rte_eth_dev *dev,
+                                  const struct rte_flow_item *item,
+                                  uint64_t item_flags,
+                                  const struct rte_flow_attr *attr,
+                                  const struct rte_flow_item *gre_item,
+                                  struct rte_flow_error *error)
+{
+       const struct rte_flow_item_gre *gre_spec = gre_item->spec;
+       const struct rte_flow_item_gre *gre_mask = gre_item->mask;
+       const struct rte_flow_item_gre_opt *spec = item->spec;
+       const struct rte_flow_item_gre_opt *mask = item->mask;
+       struct mlx5_priv *priv = dev->data->dev_private;
+       int ret = 0;
+       struct rte_flow_item_gre_opt nic_mask = {
+               .checksum_rsvd = {
+                       .checksum = RTE_BE16(UINT16_MAX),
+                       .reserved1 = 0x0,
+               },
+               .key = {
+                       .key = RTE_BE32(UINT32_MAX),
+               },
+               .sequence = {
+                       .sequence = RTE_BE32(UINT32_MAX),
+               },
+       };
+
+       if (!(item_flags & MLX5_FLOW_LAYER_GRE))
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "No preceding GRE header");
+       if (item_flags & MLX5_FLOW_LAYER_INNER)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "GRE option following a wrong item");
+       if (!spec || !mask)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "At least one field gre_option(checksum/key/sequence) must be specified");
+       if (!gre_mask)
+               gre_mask = &rte_flow_item_gre_mask;
+       if (mask->checksum_rsvd.checksum)
+               if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x8000)) &&
+                                !(gre_spec->c_rsvd0_ver & RTE_BE16(0x8000)))
+                       return rte_flow_error_set(error, EINVAL,
+                                                 RTE_FLOW_ERROR_TYPE_ITEM,
+                                                 item,
+                                                 "Checksum bit must be on");
+       if (mask->key.key)
+               if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) &&
+                                !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000)))
+                       return rte_flow_error_set(error, EINVAL,
+                                                 RTE_FLOW_ERROR_TYPE_ITEM,
+                                                 item, "Key bit must be on");
+       if (mask->sequence.sequence)
+               if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x1000)) &&
+                                !(gre_spec->c_rsvd0_ver & RTE_BE16(0x1000)))
+                       return rte_flow_error_set(error, EINVAL,
+                                                 RTE_FLOW_ERROR_TYPE_ITEM,
+                                                 item,
+                                                 "Sequence bit must be on");
+       if (mask->checksum_rsvd.checksum || mask->sequence.sequence) {
+               if (priv->sh->steering_format_version ==
+                   MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 ||
+                   ((attr->group || attr->transfer) &&
+                    !priv->sh->misc5_cap) ||
+                   (!(priv->sh->tunnel_header_0_1 &&
+                      priv->sh->tunnel_header_2_3) &&
+                   !attr->group && !attr->transfer))
+                       return rte_flow_error_set(error, EINVAL,
+                                                 RTE_FLOW_ERROR_TYPE_ITEM,
+                                                 item,
+                                                 "Checksum/Sequence not supported");
+       }
+       ret = mlx5_flow_item_acceptable
+               (item, (const uint8_t *)mask,
+                (const uint8_t *)&nic_mask,
+                sizeof(struct rte_flow_item_gre_opt),
+                MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
+       return ret;
+}
+
 /**
  * Validate GRE item.
  *
@@ -2804,7 +3139,7 @@ mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
        const struct rte_flow_item_geneve *mask = item->mask;
        int ret;
        uint16_t gbhdr;
-       uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ?
+       uint8_t opt_len = priv->sh->cdev->config.hca_attr.geneve_max_opt_len ?
                          MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0;
        const struct rte_flow_item_geneve nic_mask = {
                .ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80),
@@ -2812,7 +3147,7 @@ mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
                .protocol = RTE_BE16(UINT16_MAX),
        };
 
-       if (!priv->config.hca_attr.tunnel_stateless_geneve_rx)
+       if (!priv->sh->cdev->config.hca_attr.tunnel_stateless_geneve_rx)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "L3 Geneve is not enabled by device"
@@ -2892,10 +3227,9 @@ mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item,
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_dev_ctx_shared *sh = priv->sh;
        struct mlx5_geneve_tlv_option_resource *geneve_opt_resource;
-       struct mlx5_hca_attr *hca_attr = &priv->config.hca_attr;
+       struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
        uint8_t data_max_supported =
                        hca_attr->max_geneve_tlv_option_data_len * 4;
-       struct mlx5_dev_config *config = &priv->config;
        const struct rte_flow_item_geneve *geneve_spec;
        const struct rte_flow_item_geneve *geneve_mask;
        const struct rte_flow_item_geneve_opt *spec = item->spec;
@@ -2919,7 +3253,7 @@ mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item,
        if ((uint32_t)spec->option_len > MLX5_GENEVE_OPTLEN_MASK)
                return rte_flow_error_set
                        (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
-                       "Geneve TLV opt length exceeeds the limit (31)");
+                       "Geneve TLV opt length exceeds the limit (31)");
        /* Check if class type and length masks are full. */
        if (full_mask.option_class != mask->option_class ||
            full_mask.option_type != mask->option_type ||
@@ -2929,11 +3263,11 @@ mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item,
                        "Geneve TLV opt class/type/length masks must be full");
        /* Check if length is supported */
        if ((uint32_t)spec->option_len >
-                       config->hca_attr.max_geneve_tlv_option_data_len)
+                       hca_attr->max_geneve_tlv_option_data_len)
                return rte_flow_error_set
                        (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
                        "Geneve TLV opt length not supported");
-       if (config->hca_attr.max_geneve_tlv_options > 1)
+       if (hca_attr->max_geneve_tlv_options > 1)
                DRV_LOG(DEBUG,
                        "max_geneve_tlv_options supports more than 1 option");
        /* Check GENEVE item preceding. */
@@ -2988,7 +3322,7 @@ mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item,
                                        "Data mask is of unsupported size");
        }
        /* Check GENEVE option is supported in NIC. */
-       if (!config->hca_attr.geneve_tlv_opt)
+       if (!hca_attr->geneve_tlv_opt)
                return rte_flow_error_set
                        (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
                        "Geneve TLV opt not supported");
@@ -3037,7 +3371,7 @@ mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused,
        struct mlx5_priv *priv = dev->data->dev_private;
        int ret;
 
-       if (!priv->config.mpls_en)
+       if (!priv->sh->dev_cap.mpls_en)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "MPLS not supported or"
@@ -3327,12 +3661,18 @@ flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)
 
        if (type != MLX5_FLOW_TYPE_MAX)
                return type;
+       /*
+        * Currently when dv_flow_en == 2, only HW steering engine is
+        * supported. New engines can also be chosen here if ready.
+        */
+       if (priv->sh->config.dv_flow_en == 2)
+               return MLX5_FLOW_TYPE_HW;
        /* If no OS specific type - continue with DV/VERBS selection */
-       if (attr->transfer && priv->config.dv_esw_en)
+       if (attr->transfer && priv->sh->config.dv_esw_en)
                type = MLX5_FLOW_TYPE_DV;
        if (!attr->transfer)
-               type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :
-                                                MLX5_FLOW_TYPE_VERBS;
+               type = priv->sh->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :
+                                                    MLX5_FLOW_TYPE_VERBS;
        return type;
 }
 
@@ -3676,8 +4016,11 @@ flow_aso_age_get_by_idx(struct rte_eth_dev *dev, uint32_t age_idx)
        uint16_t offset = (age_idx >> 16) & UINT16_MAX;
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
-       struct mlx5_aso_age_pool *pool = mng->pools[pool_idx];
+       struct mlx5_aso_age_pool *pool;
 
+       rte_rwlock_read_lock(&mng->resize_rwl);
+       pool = mng->pools[pool_idx];
+       rte_rwlock_read_unlock(&mng->resize_rwl);
        return &pool->actions[offset - 1];
 }
 
@@ -3865,7 +4208,7 @@ find_graph_root(uint32_t rss_level)
  *  subflow.
  *
  * @param[in] dev_flow
- *   Pointer the created preifx subflow.
+ *   Pointer the created prefix subflow.
  *
  * @return
  *   The layers get from prefix subflow.
@@ -4001,7 +4344,7 @@ static bool flow_check_modify_action_type(struct rte_eth_dev *dev,
                return true;
        case RTE_FLOW_ACTION_TYPE_FLAG:
        case RTE_FLOW_ACTION_TYPE_MARK:
-               if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
+               if (priv->sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
                        return true;
                else
                        return false;
@@ -4192,7 +4535,7 @@ flow_dv_mreg_create_cb(void *tool_ctx, void *cb_ctx)
                [3] = { .type = RTE_FLOW_ACTION_TYPE_END, },
        };
 
-       /* Fill the register fileds in the flow. */
+       /* Fill the register fields in the flow. */
        ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
        if (ret < 0)
                return NULL;
@@ -4261,7 +4604,7 @@ flow_dv_mreg_create_cb(void *tool_ctx, void *cb_ctx)
        /*
         * The copy Flows are not included in any list. There
         * ones are referenced from other Flows and can not
-        * be applied, removed, deleted in ardbitrary order
+        * be applied, removed, deleted in arbitrary order
         * by list traversing.
         */
        mcp_res->rix_flow = flow_list_create(dev, MLX5_FLOW_TYPE_MCP,
@@ -4440,8 +4783,8 @@ flow_mreg_add_default_copy_action(struct rte_eth_dev *dev,
        uint32_t mark_id;
 
        /* Check whether extensive metadata feature is engaged. */
-       if (!priv->config.dv_flow_en ||
-           priv->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
+       if (!priv->sh->config.dv_flow_en ||
+           priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
            !mlx5_flow_ext_mreg_supported(dev) ||
            !priv->sh->dv_regc0_mask)
                return 0;
@@ -4500,7 +4843,7 @@ flow_mreg_update_copy_table(struct rte_eth_dev *dev,
                            struct rte_flow_error *error)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_dev_config *config = &priv->config;
+       struct mlx5_sh_config *config = &priv->sh->config;
        struct mlx5_flow_mreg_copy_resource *mcp_res;
        const struct rte_flow_action_mark *mark;
 
@@ -4704,6 +5047,7 @@ flow_create_split_inner(struct rte_eth_dev *dev,
                        struct rte_flow_error *error)
 {
        struct mlx5_flow *dev_flow;
+       struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
 
        dev_flow = flow_drv_prepare(dev, flow, attr, items, actions,
                                    flow_split_info->flow_idx, error);
@@ -4718,12 +5062,14 @@ flow_create_split_inner(struct rte_eth_dev *dev,
        /*
         * If dev_flow is as one of the suffix flow, some actions in suffix
         * flow may need some user defined item layer flags, and pass the
-        * Metadate rxq mark flag to suffix flow as well.
+        * Metadata rxq mark flag to suffix flow as well.
         */
        if (flow_split_info->prefix_layers)
                dev_flow->handle->layers = flow_split_info->prefix_layers;
-       if (flow_split_info->prefix_mark)
-               dev_flow->handle->mark = 1;
+       if (flow_split_info->prefix_mark) {
+               MLX5_ASSERT(wks);
+               wks->mark = 1;
+       }
        if (sub_flow)
                *sub_flow = dev_flow;
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
@@ -4914,6 +5260,7 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
        uint32_t tag_id = 0;
        struct rte_flow_item *vlan_item_dst = NULL;
        const struct rte_flow_item *vlan_item_src = NULL;
+       const struct rte_flow_item *orig_items = items;
        struct rte_flow_action *hw_mtr_action;
        struct rte_flow_action *action_pre_head = NULL;
        int32_t flow_src_port = priv->representor_id;
@@ -5038,7 +5385,8 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
 
                if (!fm->def_policy) {
                        sub_policy = get_meter_sub_policy(dev, flow, wks,
-                                                         attr, items, error);
+                                                         attr, orig_items,
+                                                         error);
                        if (!sub_policy)
                                return -rte_errno;
                } else {
@@ -5159,6 +5507,8 @@ exit:
  *   Pointer to the Q/RSS action.
  * @param[in] actions_n
  *   Number of original actions.
+ * @param[in] mtr_sfx
+ *   Check if it is in meter suffix table.
  * @param[out] error
  *   Perform verbose error reporting if not NULL.
  *
@@ -5171,7 +5521,8 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev,
                          struct rte_flow_action *split_actions,
                          const struct rte_flow_action *actions,
                          const struct rte_flow_action *qrss,
-                         int actions_n, struct rte_flow_error *error)
+                         int actions_n, int mtr_sfx,
+                         struct rte_flow_error *error)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_rte_flow_action_set_tag *set_tag;
@@ -5186,15 +5537,15 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev,
         * - Add jump to mreg CP_TBL.
         * As a result, there will be one more action.
         */
-       ++actions_n;
        memcpy(split_actions, actions, sizeof(*split_actions) * actions_n);
+       /* Count MLX5_RTE_FLOW_ACTION_TYPE_TAG. */
+       ++actions_n;
        set_tag = (void *)(split_actions + actions_n);
        /*
-        * If tag action is not set to void(it means we are not the meter
-        * suffix flow), add the tag action. Since meter suffix flow already
-        * has the tag added.
+        * If we are not the meter suffix flow, add the tag action.
+        * Since meter suffix flow already has the tag added.
         */
-       if (split_actions[qrss_idx].type != RTE_FLOW_ACTION_TYPE_VOID) {
+       if (!mtr_sfx) {
                /*
                 * Allocate the new subflow ID. This one is unique within
                 * device and not shared with representors. Otherwise,
@@ -5227,6 +5578,12 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev,
                                MLX5_RTE_FLOW_ACTION_TYPE_TAG,
                        .conf = set_tag,
                };
+       } else {
+               /*
+                * If we are the suffix flow of meter, tag already exist.
+                * Set the QUEUE/RSS action to void.
+                */
+               split_actions[qrss_idx].type = RTE_FLOW_ACTION_TYPE_VOID;
        }
        /* JUMP action to jump to mreg copy table (CP_TBL). */
        jump = (void *)(set_tag + 1);
@@ -5258,7 +5615,7 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev,
  * @param[out] error
  *   Perform verbose error reporting if not NULL.
  * @param[in] encap_idx
- *   The encap action inndex.
+ *   The encap action index.
  *
  * @return
  *   0 on success, negative value otherwise
@@ -5523,7 +5880,7 @@ flow_sample_split_prep(struct rte_eth_dev *dev,
                /* Prepare the prefix tag action. */
                append_index++;
                set_tag = (void *)(actions_pre + actions_n + append_index);
-               ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, 0, error);
+               ret = mlx5_flow_get_reg_id(dev, MLX5_SAMPLE_ID, 0, error);
                if (ret < 0)
                        return ret;
                mlx5_ipool_malloc(priv->sh->ipool
@@ -5624,7 +5981,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev,
                           struct rte_flow_error *error)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_dev_config *config = &priv->config;
+       struct mlx5_sh_config *config = &priv->sh->config;
        const struct rte_flow_action *qrss = NULL;
        struct rte_flow_action *ext_actions = NULL;
        struct mlx5_flow *dev_flow = NULL;
@@ -5649,15 +6006,13 @@ flow_create_split_metadata(struct rte_eth_dev *dev,
                        const struct rte_flow_action_queue *queue;
 
                        queue = qrss->conf;
-                       if (mlx5_rxq_get_type(dev, queue->index) ==
-                           MLX5_RXQ_TYPE_HAIRPIN)
+                       if (mlx5_rxq_is_hairpin(dev, queue->index))
                                qrss = NULL;
                } else if (qrss->type == RTE_FLOW_ACTION_TYPE_RSS) {
                        const struct rte_flow_action_rss *rss;
 
                        rss = qrss->conf;
-                       if (mlx5_rxq_get_type(dev, rss->queue[0]) ==
-                           MLX5_RXQ_TYPE_HAIRPIN)
+                       if (mlx5_rxq_is_hairpin(dev, rss->queue[0]))
                                qrss = NULL;
                }
        }
@@ -5681,17 +6036,6 @@ flow_create_split_metadata(struct rte_eth_dev *dev,
                                                  RTE_FLOW_ERROR_TYPE_ACTION,
                                                  NULL, "no memory to split "
                                                  "metadata flow");
-               /*
-                * If we are the suffix flow of meter, tag already exist.
-                * Set the tag action to void.
-                */
-               if (mtr_sfx)
-                       ext_actions[qrss - actions].type =
-                                               RTE_FLOW_ACTION_TYPE_VOID;
-               else
-                       ext_actions[qrss - actions].type =
-                                               (enum rte_flow_action_type)
-                                               MLX5_RTE_FLOW_ACTION_TYPE_TAG;
                /*
                 * Create the new actions list with removed Q/RSS action
                 * and appended set tag and jump to register copy table
@@ -5699,7 +6043,8 @@ flow_create_split_metadata(struct rte_eth_dev *dev,
                 * in advance, because it is needed for set tag action.
                 */
                qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions,
-                                                   qrss, actions_n, error);
+                                                   qrss, actions_n,
+                                                   mtr_sfx, error);
                if (!mtr_sfx && !qrss_id) {
                        ret = -rte_errno;
                        goto exit;
@@ -5790,6 +6135,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev,
                /* Add suffix subflow to execute Q/RSS. */
                flow_split_info->prefix_layers = layers;
                flow_split_info->prefix_mark = 0;
+               flow_split_info->table_id = 0;
                ret = flow_create_split_inner(dev, flow, &dev_flow,
                                              &q_attr, mtr_sfx ? items :
                                              q_items, q_actions,
@@ -5950,6 +6296,8 @@ flow_create_split_meter(struct rte_eth_dev *dev,
                                                                  fm->policy_id,
                                                                  NULL);
                        MLX5_ASSERT(wks->policy);
+                       if (wks->policy->mark)
+                               wks->mark = 1;
                        if (wks->policy->is_hierarchy) {
                                wks->final_policy =
                                mlx5_flow_meter_hierarchy_get_final_policy(dev,
@@ -6004,7 +6352,6 @@ flow_create_split_meter(struct rte_eth_dev *dev,
                        goto exit;
                }
                /* Add the prefix subflow. */
-               flow_split_info->prefix_mark = 0;
                skip_scale_restore = flow_split_info->skip_scale;
                flow_split_info->skip_scale |=
                        1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT;
@@ -6037,7 +6384,7 @@ flow_create_split_meter(struct rte_eth_dev *dev,
                                 MLX5_FLOW_TABLE_LEVEL_METER;
                flow_split_info->prefix_layers =
                                flow_get_prefix_layer_flags(dev_flow);
-               flow_split_info->prefix_mark = dev_flow->handle->mark;
+               flow_split_info->prefix_mark |= wks->mark;
                flow_split_info->table_id = MLX5_MTR_TABLE_ID_SUFFIX;
        }
        /* Add the prefix subflow. */
@@ -6103,6 +6450,7 @@ flow_create_split_sample(struct rte_eth_dev *dev,
        struct mlx5_flow_dv_sample_resource *sample_res;
        struct mlx5_flow_tbl_data_entry *sfx_tbl_data;
        struct mlx5_flow_tbl_resource *sfx_tbl;
+       struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
 #endif
        size_t act_size;
        size_t item_size;
@@ -6141,7 +6489,8 @@ flow_create_split_sample(struct rte_eth_dev *dev,
                 * When reg_c_preserve is set, metadata registers Cx preserve
                 * their value even through packet duplication.
                 */
-               add_tag = (!fdb_tx || priv->config.hca_attr.reg_c_preserve);
+               add_tag = (!fdb_tx ||
+                          priv->sh->cdev->config.hca_attr.reg_c_preserve);
                if (add_tag)
                        sfx_items = (struct rte_flow_item *)((char *)sfx_actions
                                        + act_size);
@@ -6189,7 +6538,8 @@ flow_create_split_sample(struct rte_eth_dev *dev,
                }
                flow_split_info->prefix_layers =
                                flow_get_prefix_layer_flags(dev_flow);
-               flow_split_info->prefix_mark = dev_flow->handle->mark;
+               MLX5_ASSERT(wks);
+               flow_split_info->prefix_mark |= wks->mark;
                /* Suffix group level already be scaled with factor, set
                 * MLX5_SCALE_FLOW_GROUP_BIT of skip_scale to 1 to avoid scale
                 * again in translation.
@@ -6728,6 +7078,15 @@ mlx5_flow_create(struct rte_eth_dev *dev,
                 const struct rte_flow_action actions[],
                 struct rte_flow_error *error)
 {
+       struct mlx5_priv *priv = dev->data->dev_private;
+
+       if (priv->sh->config.dv_flow_en == 2) {
+               rte_flow_error_set(error, ENOTSUP,
+                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                         NULL,
+                         "Flow non-Q creation not supported");
+               return NULL;
+       }
        /*
         * If the device is not started yet, it is not allowed to created a
         * flow from application. PMD default flows and traffic control flows
@@ -6793,7 +7152,7 @@ flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
  * @param type
  *   Flow type to be flushed.
  * @param active
- *   If flushing is called avtively.
+ *   If flushing is called actively.
  */
 void
 mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type,
@@ -6803,6 +7162,14 @@ mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type,
        uint32_t num_flushed = 0, fidx = 1;
        struct rte_flow *flow;
 
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+       if (priv->sh->config.dv_flow_en == 2 &&
+           type == MLX5_FLOW_TYPE_GEN) {
+               flow_hw_q_flow_flush(dev, NULL);
+               return;
+       }
+#endif
+
        MLX5_IPOOL_FOREACH(priv->flows[type], fidx, flow) {
                flow_list_destroy(dev, type, fidx);
                num_flushed++;
@@ -6899,8 +7266,7 @@ flow_alloc_thread_workspace(void)
        data->rssq_num = MLX5_RSSQ_DEFAULT_NUM;
        return data;
 err:
-       if (data->rss_desc.queue)
-               free(data->rss_desc.queue);
+       free(data->rss_desc.queue);
        free(data);
        return NULL;
 }
@@ -7225,10 +7591,17 @@ mlx5_flow_destroy(struct rte_eth_dev *dev,
                  struct rte_flow *flow,
                  struct rte_flow_error *error __rte_unused)
 {
-       flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN,
-                               (uintptr_t)(void *)flow);
-       return 0;
-}
+       struct mlx5_priv *priv = dev->data->dev_private;
+
+       if (priv->sh->config.dv_flow_en == 2)
+               return rte_flow_error_set(error, ENOTSUP,
+                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                         NULL,
+                         "Flow non-Q destruction not supported");
+       flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN,
+                               (uintptr_t)(void *)flow);
+       return 0;
+}
 
 /**
  * Destroy all flows.
@@ -7322,7 +7695,13 @@ mlx5_flow_query(struct rte_eth_dev *dev,
                struct rte_flow_error *error)
 {
        int ret;
+       struct mlx5_priv *priv = dev->data->dev_private;
 
+       if (priv->sh->config.dv_flow_en == 2)
+               return rte_flow_error_set(error, ENOTSUP,
+                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                         NULL,
+                         "Flow non-Q query not supported");
        ret = flow_drv_query(dev, (uintptr_t)(void *)flow, actions, data,
                             error);
        if (ret < 0)
@@ -7660,48 +8039,584 @@ mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt)
        const struct mlx5_flow_driver_ops *fops;
        struct rte_flow_attr attr = { .transfer = 0 };
 
-       if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
-               fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
-               fops->counter_free(dev, cnt);
-               return;
-       }
-       DRV_LOG(ERR,
-               "port %u counter free is not supported.",
-                dev->data->port_id);
+       if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
+               fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+               fops->counter_free(dev, cnt);
+               return;
+       }
+       DRV_LOG(ERR,
+               "port %u counter free is not supported.",
+                dev->data->port_id);
+}
+
+/**
+ * Query counter statistics.
+ *
+ * @param[in] dev
+ *   Pointer to Ethernet device structure.
+ * @param[in] cnt
+ *   Index to counter to query.
+ * @param[in] clear
+ *   Set to clear counter statistics.
+ * @param[out] pkts
+ *   The counter hits packets number to save.
+ * @param[out] bytes
+ *   The counter hits bytes number to save.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise.
+ */
+int
+mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt,
+                  bool clear, uint64_t *pkts, uint64_t *bytes, void **action)
+{
+       const struct mlx5_flow_driver_ops *fops;
+       struct rte_flow_attr attr = { .transfer = 0 };
+
+       if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
+               fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+               return fops->counter_query(dev, cnt, clear, pkts,
+                                       bytes, action);
+       }
+       DRV_LOG(ERR,
+               "port %u counter query is not supported.",
+                dev->data->port_id);
+       return -ENOTSUP;
+}
+
+/**
+ * Get information about HWS pre-configurable resources.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[out] port_info
+ *   Pointer to port information.
+ * @param[out] queue_info
+ *   Pointer to queue information.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_info_get(struct rte_eth_dev *dev,
+                  struct rte_flow_port_info *port_info,
+                  struct rte_flow_queue_info *queue_info,
+                  struct rte_flow_error *error)
+{
+       const struct mlx5_flow_driver_ops *fops;
+
+       if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
+               return rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL,
+                               "info get with incorrect steering mode");
+       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+       return fops->info_get(dev, port_info, queue_info, error);
+}
+
+/**
+ * Configure port HWS resources.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] port_attr
+ *   Port configuration attributes.
+ * @param[in] nb_queue
+ *   Number of queue.
+ * @param[in] queue_attr
+ *   Array that holds attributes for each flow queue.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_port_configure(struct rte_eth_dev *dev,
+                        const struct rte_flow_port_attr *port_attr,
+                        uint16_t nb_queue,
+                        const struct rte_flow_queue_attr *queue_attr[],
+                        struct rte_flow_error *error)
+{
+       const struct mlx5_flow_driver_ops *fops;
+
+       if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
+               return rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL,
+                               "port configure with incorrect steering mode");
+       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+       return fops->configure(dev, port_attr, nb_queue, queue_attr, error);
+}
+
+/**
+ * Create flow item template.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] attr
+ *   Pointer to the item template attributes.
+ * @param[in] items
+ *   The template item pattern.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static struct rte_flow_pattern_template *
+mlx5_flow_pattern_template_create(struct rte_eth_dev *dev,
+               const struct rte_flow_pattern_template_attr *attr,
+               const struct rte_flow_item items[],
+               struct rte_flow_error *error)
+{
+       const struct mlx5_flow_driver_ops *fops;
+
+       if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) {
+               rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL,
+                               "pattern create with incorrect steering mode");
+               return NULL;
+       }
+       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+       return fops->pattern_template_create(dev, attr, items, error);
+}
+
+/**
+ * Destroy flow item template.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] template
+ *   Pointer to the item template to be destroyed.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_pattern_template_destroy(struct rte_eth_dev *dev,
+                                  struct rte_flow_pattern_template *template,
+                                  struct rte_flow_error *error)
+{
+       const struct mlx5_flow_driver_ops *fops;
+
+       if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
+               return rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL,
+                               "pattern destroy with incorrect steering mode");
+       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+       return fops->pattern_template_destroy(dev, template, error);
+}
+
+/**
+ * Create flow item template.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] attr
+ *   Pointer to the action template attributes.
+ * @param[in] actions
+ *   Associated actions (list terminated by the END action).
+ * @param[in] masks
+ *   List of actions that marks which of the action's member is constant.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static struct rte_flow_actions_template *
+mlx5_flow_actions_template_create(struct rte_eth_dev *dev,
+                       const struct rte_flow_actions_template_attr *attr,
+                       const struct rte_flow_action actions[],
+                       const struct rte_flow_action masks[],
+                       struct rte_flow_error *error)
+{
+       const struct mlx5_flow_driver_ops *fops;
+
+       if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) {
+               rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL,
+                               "action create with incorrect steering mode");
+               return NULL;
+       }
+       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+       return fops->actions_template_create(dev, attr, actions, masks, error);
+}
+
+/**
+ * Destroy flow action template.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] template
+ *   Pointer to the action template to be destroyed.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_actions_template_destroy(struct rte_eth_dev *dev,
+                                  struct rte_flow_actions_template *template,
+                                  struct rte_flow_error *error)
+{
+       const struct mlx5_flow_driver_ops *fops;
+
+       if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
+               return rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL,
+                               "action destroy with incorrect steering mode");
+       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+       return fops->actions_template_destroy(dev, template, error);
+}
+
+/**
+ * Create flow table.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] attr
+ *   Pointer to the table attributes.
+ * @param[in] item_templates
+ *   Item template array to be binded to the table.
+ * @param[in] nb_item_templates
+ *   Number of item template.
+ * @param[in] action_templates
+ *   Action template array to be binded to the table.
+ * @param[in] nb_action_templates
+ *   Number of action template.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *    Table on success, NULL otherwise and rte_errno is set.
+ */
+static struct rte_flow_template_table *
+mlx5_flow_table_create(struct rte_eth_dev *dev,
+                      const struct rte_flow_template_table_attr *attr,
+                      struct rte_flow_pattern_template *item_templates[],
+                      uint8_t nb_item_templates,
+                      struct rte_flow_actions_template *action_templates[],
+                      uint8_t nb_action_templates,
+                      struct rte_flow_error *error)
+{
+       const struct mlx5_flow_driver_ops *fops;
+
+       if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) {
+               rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL,
+                               "table create with incorrect steering mode");
+               return NULL;
+       }
+       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+       return fops->template_table_create(dev,
+                                          attr,
+                                          item_templates,
+                                          nb_item_templates,
+                                          action_templates,
+                                          nb_action_templates,
+                                          error);
+}
+
+/**
+ * PMD destroy flow table.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] table
+ *   Pointer to the table to be destroyed.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_table_destroy(struct rte_eth_dev *dev,
+                       struct rte_flow_template_table *table,
+                       struct rte_flow_error *error)
+{
+       const struct mlx5_flow_driver_ops *fops;
+
+       if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
+               return rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL,
+                               "table destroy with incorrect steering mode");
+       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+       return fops->template_table_destroy(dev, table, error);
+}
+
+/**
+ * Enqueue flow creation.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] queue_id
+ *   The queue to create the flow.
+ * @param[in] attr
+ *   Pointer to the flow operation attributes.
+ * @param[in] items
+ *   Items with flow spec value.
+ * @param[in] pattern_template_index
+ *   The item pattern flow follows from the table.
+ * @param[in] actions
+ *   Action with flow spec value.
+ * @param[in] action_template_index
+ *   The action pattern flow follows from the table.
+ * @param[in] user_data
+ *   Pointer to the user_data.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *    Flow pointer on success, NULL otherwise and rte_errno is set.
+ */
+static struct rte_flow *
+mlx5_flow_async_flow_create(struct rte_eth_dev *dev,
+                           uint32_t queue_id,
+                           const struct rte_flow_op_attr *attr,
+                           struct rte_flow_template_table *table,
+                           const struct rte_flow_item items[],
+                           uint8_t pattern_template_index,
+                           const struct rte_flow_action actions[],
+                           uint8_t action_template_index,
+                           void *user_data,
+                           struct rte_flow_error *error)
+{
+       const struct mlx5_flow_driver_ops *fops;
+
+       if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) {
+               rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL,
+                               "flow_q create with incorrect steering mode");
+               return NULL;
+       }
+       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+       return fops->async_flow_create(dev, queue_id, attr, table,
+                                      items, pattern_template_index,
+                                      actions, action_template_index,
+                                      user_data, error);
+}
+
+/**
+ * Enqueue flow destruction.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] queue
+ *   The queue to destroy the flow.
+ * @param[in] attr
+ *   Pointer to the flow operation attributes.
+ * @param[in] flow
+ *   Pointer to the flow to be destroyed.
+ * @param[in] user_data
+ *   Pointer to the user_data.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *    0 on success, negative value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_async_flow_destroy(struct rte_eth_dev *dev,
+                            uint32_t queue,
+                            const struct rte_flow_op_attr *attr,
+                            struct rte_flow *flow,
+                            void *user_data,
+                            struct rte_flow_error *error)
+{
+       const struct mlx5_flow_driver_ops *fops;
+
+       if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
+               return rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL,
+                               "flow_q destroy with incorrect steering mode");
+       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+       return fops->async_flow_destroy(dev, queue, attr, flow,
+                                       user_data, error);
+}
+
+/**
+ * Pull the enqueued flows.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] queue
+ *   The queue to pull the result.
+ * @param[in/out] res
+ *   Array to save the results.
+ * @param[in] n_res
+ *   Available result with the array.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *    Result number on success, negative value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_pull(struct rte_eth_dev *dev,
+              uint32_t queue,
+              struct rte_flow_op_result res[],
+              uint16_t n_res,
+              struct rte_flow_error *error)
+{
+       const struct mlx5_flow_driver_ops *fops;
+
+       if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
+               return rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL,
+                               "flow_q pull with incorrect steering mode");
+       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+       return fops->pull(dev, queue, res, n_res, error);
+}
+
+/**
+ * Push the enqueued flows.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] queue
+ *   The queue to push the flows.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *    0 on success, negative value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_push(struct rte_eth_dev *dev,
+              uint32_t queue,
+              struct rte_flow_error *error)
+{
+       const struct mlx5_flow_driver_ops *fops;
+
+       if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
+               return rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL,
+                               "flow_q push with incorrect steering mode");
+       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+       return fops->push(dev, queue, error);
+}
+
+/**
+ * Create shared action.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] queue
+ *   Which queue to be used..
+ * @param[in] attr
+ *   Operation attribute.
+ * @param[in] conf
+ *   Indirect action configuration.
+ * @param[in] action
+ *   rte_flow action detail.
+ * @param[in] user_data
+ *   Pointer to the user_data.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   Action handle on success, NULL otherwise and rte_errno is set.
+ */
+static struct rte_flow_action_handle *
+mlx5_flow_async_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,
+                                const struct rte_flow_op_attr *attr,
+                                const struct rte_flow_indir_action_conf *conf,
+                                const struct rte_flow_action *action,
+                                void *user_data,
+                                struct rte_flow_error *error)
+{
+       const struct mlx5_flow_driver_ops *fops =
+                       flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+
+       return fops->async_action_create(dev, queue, attr, conf, action,
+                                        user_data, error);
+}
+
+/**
+ * Update shared action.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] queue
+ *   Which queue to be used..
+ * @param[in] attr
+ *   Operation attribute.
+ * @param[in] handle
+ *   Action handle to be updated.
+ * @param[in] update
+ *   Update value.
+ * @param[in] user_data
+ *   Pointer to the user_data.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, negative value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_async_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,
+                                    const struct rte_flow_op_attr *attr,
+                                    struct rte_flow_action_handle *handle,
+                                    const void *update,
+                                    void *user_data,
+                                    struct rte_flow_error *error)
+{
+       const struct mlx5_flow_driver_ops *fops =
+                       flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+
+       return fops->async_action_update(dev, queue, attr, handle,
+                                        update, user_data, error);
 }
 
 /**
- * Query counter statistics.
+ * Destroy shared action.
  *
  * @param[in] dev
- *   Pointer to Ethernet device structure.
- * @param[in] cnt
- *   Index to counter to query.
- * @param[in] clear
- *   Set to clear counter statistics.
- * @param[out] pkts
- *   The counter hits packets number to save.
- * @param[out] bytes
- *   The counter hits bytes number to save.
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] queue
+ *   Which queue to be used..
+ * @param[in] attr
+ *   Operation attribute.
+ * @param[in] handle
+ *   Action handle to be destroyed.
+ * @param[in] user_data
+ *   Pointer to the user_data.
+ * @param[out] error
+ *   Pointer to error structure.
  *
  * @return
- *   0 on success, a negative errno value otherwise.
+ *   0 on success, negative value otherwise and rte_errno is set.
  */
-int
-mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt,
-                  bool clear, uint64_t *pkts, uint64_t *bytes)
+static int
+mlx5_flow_async_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,
+                                     const struct rte_flow_op_attr *attr,
+                                     struct rte_flow_action_handle *handle,
+                                     void *user_data,
+                                     struct rte_flow_error *error)
 {
-       const struct mlx5_flow_driver_ops *fops;
-       struct rte_flow_attr attr = { .transfer = 0 };
+       const struct mlx5_flow_driver_ops *fops =
+                       flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
 
-       if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
-               fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
-               return fops->counter_query(dev, cnt, clear, pkts, bytes);
-       }
-       DRV_LOG(ERR,
-               "port %u counter query is not supported.",
-                dev->data->port_id);
-       return -ENOTSUP;
+       return fops->async_action_destroy(dev, queue, attr, handle,
+                                         user_data, error);
 }
 
 /**
@@ -7717,7 +8632,6 @@ mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt,
 static int
 mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
 {
-       struct mlx5_devx_mkey_attr mkey_attr;
        struct mlx5_counter_stats_mem_mng *mem_mng;
        volatile struct flow_counter_stats *raw_data;
        int raws_n = MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES;
@@ -7727,6 +8641,7 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
                        sizeof(struct mlx5_counter_stats_mem_mng);
        size_t pgsize = rte_mem_page_size();
        uint8_t *mem;
+       int ret;
        int i;
 
        if (pgsize == (size_t)-1) {
@@ -7741,23 +8656,10 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
        }
        mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
        size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
-       mem_mng->umem = mlx5_os_umem_reg(sh->cdev->ctx, mem, size,
-                                                IBV_ACCESS_LOCAL_WRITE);
-       if (!mem_mng->umem) {
-               rte_errno = errno;
-               mlx5_free(mem);
-               return -rte_errno;
-       }
-       memset(&mkey_attr, 0, sizeof(mkey_attr));
-       mkey_attr.addr = (uintptr_t)mem;
-       mkey_attr.size = size;
-       mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem);
-       mkey_attr.pd = sh->cdev->pdn;
-       mkey_attr.relaxed_ordering_write = sh->cmng.relaxed_ordering_write;
-       mkey_attr.relaxed_ordering_read = sh->cmng.relaxed_ordering_read;
-       mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->cdev->ctx, &mkey_attr);
-       if (!mem_mng->dm) {
-               mlx5_os_umem_dereg(mem_mng->umem);
+       ret = mlx5_os_wrapped_mkey_create(sh->cdev->ctx, sh->cdev->pd,
+                                         sh->cdev->pdn, mem, size,
+                                         &mem_mng->wm);
+       if (ret) {
                rte_errno = errno;
                mlx5_free(mem);
                return -rte_errno;
@@ -7876,7 +8778,7 @@ mlx5_flow_query_alarm(void *arg)
        ret = mlx5_devx_cmd_flow_counter_query(pool->min_dcs, 0,
                                               MLX5_COUNTERS_PER_POOL,
                                               NULL, NULL,
-                                              pool->raw_hw->mem_mng->dm->id,
+                                              pool->raw_hw->mem_mng->wm.lkey,
                                               (void *)(uintptr_t)
                                               pool->raw_hw->data,
                                               sh->devx_comp,
@@ -8112,13 +9014,12 @@ int
 mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_dev_config *config = &priv->config;
        enum modify_reg idx;
        int n = 0;
 
        /* reg_c[0] and reg_c[1] are reserved. */
-       config->flow_mreg_c[n++] = REG_C_0;
-       config->flow_mreg_c[n++] = REG_C_1;
+       priv->sh->flow_mreg_c[n++] = REG_C_0;
+       priv->sh->flow_mreg_c[n++] = REG_C_1;
        /* Discover availability of other reg_c's. */
        for (idx = REG_C_2; idx <= REG_C_7; ++idx) {
                struct rte_flow_attr attr = {
@@ -8154,7 +9055,7 @@ mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
                struct rte_flow *flow;
                struct rte_flow_error error;
 
-               if (!config->dv_flow_en)
+               if (!priv->sh->config.dv_flow_en)
                        break;
                /* Create internal flow, validation skips copy action. */
                flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_GEN, &attr,
@@ -8163,17 +9064,18 @@ mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
                                      flow_idx);
                if (!flow)
                        continue;
-               config->flow_mreg_c[n++] = idx;
+               priv->sh->flow_mreg_c[n++] = idx;
                flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, flow_idx);
        }
        for (; n < MLX5_MREG_C_NUM; ++n)
-               config->flow_mreg_c[n] = REG_NON;
+               priv->sh->flow_mreg_c[n] = REG_NON;
+       priv->sh->metadata_regc_check_flag = 1;
        return 0;
 }
 
 int
 save_dump_file(const uint8_t *data, uint32_t size,
-       uint32_t type, uint32_t id, void *arg, FILE *file)
+       uint32_t type, uint64_t id, void *arg, FILE *file)
 {
        char line[BUF_SIZE];
        uint32_t out = 0;
@@ -8185,17 +9087,18 @@ save_dump_file(const uint8_t *data, uint32_t size,
        switch (type) {
        case DR_DUMP_REC_TYPE_PMD_MODIFY_HDR:
                actions_num = *(uint32_t *)(arg);
-               out += snprintf(line + out, BUF_SIZE - out, "%d,0x%x,%d,",
+               out += snprintf(line + out, BUF_SIZE - out, "%d,0x%" PRIx64 ",%d,",
                                type, id, actions_num);
                break;
        case DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT:
-               out += snprintf(line + out, BUF_SIZE - out, "%d,0x%x,",
+               out += snprintf(line + out, BUF_SIZE - out, "%d,0x%" PRIx64 ",",
                                type, id);
                break;
        case DR_DUMP_REC_TYPE_PMD_COUNTER:
                count = (struct rte_flow_query_count *)arg;
-               fprintf(file, "%d,0x%x,%" PRIu64 ",%" PRIu64 "\n", type,
-                               id, count->hits, count->bytes);
+               fprintf(file,
+                       "%d,0x%" PRIx64 ",%" PRIu64 ",%" PRIu64 "\n",
+                       type, id, count->hits, count->bytes);
                return 0;
        default:
                return -1;
@@ -8269,29 +9172,33 @@ mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev,
        uint32_t actions_num;
        const uint8_t *data;
        size_t size;
-       uint32_t id;
+       uint64_t id;
        uint32_t type;
+       void *action = NULL;
 
        if (!flow) {
                return rte_flow_error_set(error, ENOENT,
-                       RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-                       NULL,
-                       "invalid flow handle");
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL,
+                               "invalid flow handle");
        }
        handle_idx = flow->dev_handles;
+       /* query counter */
+       if (flow->counter &&
+       (!mlx5_counter_query(dev, flow->counter, false,
+       &count.hits, &count.bytes, &action)) && action) {
+               id = (uint64_t)(uintptr_t)action;
+               type = DR_DUMP_REC_TYPE_PMD_COUNTER;
+               save_dump_file(NULL, 0, type,
+                       id, (void *)&count, file);
+       }
+
        while (handle_idx) {
                dh = mlx5_ipool_get(priv->sh->ipool
-                       [MLX5_IPOOL_MLX5_FLOW], handle_idx);
+                               [MLX5_IPOOL_MLX5_FLOW], handle_idx);
                if (!dh)
                        continue;
                handle_idx = dh->next.next;
-               id = (uint32_t)(uintptr_t)dh->drv_flow;
-
-               /* query counter */
-               type = DR_DUMP_REC_TYPE_PMD_COUNTER;
-               if (!mlx5_flow_query_counter(dev, flow, &count, error))
-                       save_dump_file(NULL, 0, type,
-                                       id, (void *)&count, file);
 
                /* Get modify_hdr and encap_decap buf from ipools. */
                encap_decap = NULL;
@@ -8305,14 +9212,16 @@ mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev,
                if (modify_hdr) {
                        data = (const uint8_t *)modify_hdr->actions;
                        size = (size_t)(modify_hdr->actions_num) * 8;
+                       id = (uint64_t)(uintptr_t)modify_hdr->action;
                        actions_num = modify_hdr->actions_num;
                        type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR;
                        save_dump_file(data, size, type, id,
-                                       (void *)(&actions_num), file);
+                                               (void *)(&actions_num), file);
                }
                if (encap_decap) {
                        data = encap_decap->buf;
                        size = encap_decap->size;
+                       id = (uint64_t)(uintptr_t)encap_decap->action;
                        type = DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT;
                        save_dump_file(data, size, type,
                                                id, NULL, file);
@@ -8320,6 +9229,117 @@ mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev,
        }
        return 0;
 }
+
+/**
+ * Dump all flow's encap_decap/modify_hdr/counter data to file
+ *
+ * @param[in] dev
+ *   The pointer to Ethernet device.
+ * @param[in] file
+ *   A pointer to a file for output.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL. PMDs initialize this
+ *   structure in case of error only.
+ * @return
+ *   0 on success, a negative value otherwise.
+ */
+static int
+mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev,
+       FILE *file, struct rte_flow_error *error __rte_unused)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_dev_ctx_shared *sh = priv->sh;
+       struct mlx5_hlist *h;
+       struct mlx5_flow_dv_modify_hdr_resource  *modify_hdr;
+       struct mlx5_flow_dv_encap_decap_resource *encap_decap;
+       struct rte_flow_query_count count;
+       uint32_t actions_num;
+       const uint8_t *data;
+       size_t size;
+       uint64_t id;
+       uint32_t type;
+       uint32_t i;
+       uint32_t j;
+       struct mlx5_list_inconst *l_inconst;
+       struct mlx5_list_entry *e;
+       int lcore_index;
+       struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
+       uint32_t max;
+       void *action;
+
+       /* encap_decap hlist is lcore_share, get global core cache. */
+       i = MLX5_LIST_GLOBAL;
+       h = sh->encaps_decaps;
+       if (h) {
+               for (j = 0; j <= h->mask; j++) {
+                       l_inconst = &h->buckets[j].l;
+                       if (!l_inconst || !l_inconst->cache[i])
+                               continue;
+
+                       e = LIST_FIRST(&l_inconst->cache[i]->h);
+                       while (e) {
+                               encap_decap =
+                               (struct mlx5_flow_dv_encap_decap_resource *)e;
+                               data = encap_decap->buf;
+                               size = encap_decap->size;
+                               id = (uint64_t)(uintptr_t)encap_decap->action;
+                               type = DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT;
+                               save_dump_file(data, size, type,
+                                       id, NULL, file);
+                               e = LIST_NEXT(e, next);
+                       }
+               }
+       }
+
+       /* get modify_hdr */
+       h = sh->modify_cmds;
+       if (h) {
+               lcore_index = rte_lcore_index(rte_lcore_id());
+               if (unlikely(lcore_index == -1)) {
+                       lcore_index = MLX5_LIST_NLCORE;
+                       rte_spinlock_lock(&h->l_const.lcore_lock);
+               }
+               i = lcore_index;
+
+               for (j = 0; j <= h->mask; j++) {
+                       l_inconst = &h->buckets[j].l;
+                       if (!l_inconst || !l_inconst->cache[i])
+                               continue;
+
+                       e = LIST_FIRST(&l_inconst->cache[i]->h);
+                       while (e) {
+                               modify_hdr =
+                               (struct mlx5_flow_dv_modify_hdr_resource *)e;
+                               data = (const uint8_t *)modify_hdr->actions;
+                               size = (size_t)(modify_hdr->actions_num) * 8;
+                               actions_num = modify_hdr->actions_num;
+                               id = (uint64_t)(uintptr_t)modify_hdr->action;
+                               type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR;
+                               save_dump_file(data, size, type, id,
+                                               (void *)(&actions_num), file);
+                               e = LIST_NEXT(e, next);
+                       }
+               }
+
+               if (unlikely(lcore_index == MLX5_LIST_NLCORE))
+                       rte_spinlock_unlock(&h->l_const.lcore_lock);
+       }
+
+       /* get counter */
+       MLX5_ASSERT(cmng->n_valid <= cmng->n);
+       max = MLX5_COUNTERS_PER_POOL * cmng->n_valid;
+       for (j = 1; j <= max; j++) {
+               action = NULL;
+               if ((!mlx5_counter_query(dev, j, false, &count.hits,
+               &count.bytes, &action)) && action) {
+                       id = (uint64_t)(uintptr_t)action;
+                       type = DR_DUMP_REC_TYPE_PMD_COUNTER;
+                       save_dump_file(NULL, 0, type,
+                                       id, (void *)&count, file);
+               }
+       }
+       return 0;
+}
 #endif
 
 /**
@@ -8333,7 +9353,7 @@ mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev,
  *   Perform verbose error reporting if not NULL. PMDs initialize this
  *   structure in case of error only.
  * @return
- *   0 on success, a nagative value otherwise.
+ *   0 on success, a negative value otherwise.
  */
 int
 mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx,
@@ -8346,11 +9366,8 @@ mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx,
        int ret;
        struct mlx5_flow_handle *dh;
        struct rte_flow *flow;
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
-       uint32_t idx;
-#endif
 
-       if (!priv->config.dv_flow_en) {
+       if (!sh->config.dv_flow_en) {
                if (fputs("device dv flow disabled\n", file) <= 0)
                        return -errno;
                return -ENOTSUP;
@@ -8359,8 +9376,8 @@ mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx,
        /* dump all */
        if (!flow_idx) {
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
-               MLX5_IPOOL_FOREACH(priv->flows[MLX5_FLOW_TYPE_GEN], idx, flow)
-                       mlx5_flow_dev_dump_ipool(dev, flow, file, error);
+               if (mlx5_flow_dev_dump_sh_all(dev, file, error))
+                       return -EINVAL;
 #endif
                return mlx5_devx_cmd_flow_dump(sh->fdb_domain,
                                        sh->rx_domain,
@@ -8370,7 +9387,7 @@ mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx,
        flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN],
                        (uintptr_t)(void *)flow_idx);
        if (!flow)
-               return -ENOENT;
+               return -EINVAL;
 
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
        mlx5_flow_dev_dump_ipool(dev, flow, file, error);
@@ -8660,6 +9677,102 @@ mlx5_action_handle_flush(struct rte_eth_dev *dev)
        return ret;
 }
 
+/**
+ * Validate existing indirect actions against current device configuration
+ * and attach them to device resources.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_action_handle_attach(struct rte_eth_dev *dev)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       int ret = 0;
+       struct mlx5_ind_table_obj *ind_tbl, *ind_tbl_last;
+
+       LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) {
+               const char *message;
+               uint32_t queue_idx;
+
+               ret = mlx5_validate_rss_queues(dev, ind_tbl->queues,
+                                              ind_tbl->queues_n,
+                                              &message, &queue_idx);
+               if (ret != 0) {
+                       DRV_LOG(ERR, "Port %u cannot use queue %u in RSS: %s",
+                               dev->data->port_id, ind_tbl->queues[queue_idx],
+                               message);
+                       break;
+               }
+       }
+       if (ret != 0)
+               return ret;
+       LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) {
+               ret = mlx5_ind_table_obj_attach(dev, ind_tbl);
+               if (ret != 0) {
+                       DRV_LOG(ERR, "Port %u could not attach "
+                               "indirection table obj %p",
+                               dev->data->port_id, (void *)ind_tbl);
+                       goto error;
+               }
+       }
+
+       return 0;
+error:
+       ind_tbl_last = ind_tbl;
+       LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) {
+               if (ind_tbl == ind_tbl_last)
+                       break;
+               if (mlx5_ind_table_obj_detach(dev, ind_tbl) != 0)
+                       DRV_LOG(CRIT, "Port %u could not detach "
+                               "indirection table obj %p on rollback",
+                               dev->data->port_id, (void *)ind_tbl);
+       }
+       return ret;
+}
+
+/**
+ * Detach indirect actions of the device from its resources.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_action_handle_detach(struct rte_eth_dev *dev)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       int ret = 0;
+       struct mlx5_ind_table_obj *ind_tbl, *ind_tbl_last;
+
+       LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) {
+               ret = mlx5_ind_table_obj_detach(dev, ind_tbl);
+               if (ret != 0) {
+                       DRV_LOG(ERR, "Port %u could not detach "
+                               "indirection table obj %p",
+                               dev->data->port_id, (void *)ind_tbl);
+                       goto error;
+               }
+       }
+       return 0;
+error:
+       ind_tbl_last = ind_tbl;
+       LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) {
+               if (ind_tbl == ind_tbl_last)
+                       break;
+               if (mlx5_ind_table_obj_attach(dev, ind_tbl) != 0)
+                       DRV_LOG(CRIT, "Port %u could not attach "
+                               "indirection table obj %p on rollback",
+                               dev->data->port_id, (void *)ind_tbl);
+       }
+       return ret;
+}
+
 #ifndef HAVE_MLX5DV_DR
 #define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1))
 #else
@@ -8704,7 +9817,7 @@ mlx5_get_tof(const struct rte_flow_item *item,
 }
 
 /**
- * tunnel offload functionalilty is defined for DV environment only
+ * tunnel offload functionality is defined for DV environment only
  */
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
 __extension__
@@ -9217,33 +10330,37 @@ err:
        return err;
 }
 
-static inline bool
+static inline int
 mlx5_flow_tunnel_validate(struct rte_eth_dev *dev,
                          struct rte_flow_tunnel *tunnel,
-                         const char *err_msg)
+                         struct rte_flow_error *error)
 {
-       err_msg = NULL;
-       if (!is_tunnel_offload_active(dev)) {
-               err_msg = "tunnel offload was not activated";
-               goto out;
-       } else if (!tunnel) {
-               err_msg = "no application tunnel";
-               goto out;
-       }
+       struct mlx5_priv *priv = dev->data->dev_private;
 
+       if (!priv->sh->config.dv_flow_en)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+                                         "flow DV interface is off");
+       if (!is_tunnel_offload_active(dev))
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+                                         "tunnel offload was not activated");
+       if (!tunnel)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+                                         "no application tunnel");
        switch (tunnel->type) {
        default:
-               err_msg = "unsupported tunnel type";
-               goto out;
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+                                         "unsupported tunnel type");
        case RTE_FLOW_ITEM_TYPE_VXLAN:
        case RTE_FLOW_ITEM_TYPE_GRE:
        case RTE_FLOW_ITEM_TYPE_NVGRE:
        case RTE_FLOW_ITEM_TYPE_GENEVE:
                break;
        }
-
-out:
-       return !err_msg;
+       return 0;
 }
 
 static int
@@ -9253,15 +10370,11 @@ mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,
                    uint32_t *num_of_actions,
                    struct rte_flow_error *error)
 {
-       int ret;
        struct mlx5_flow_tunnel *tunnel;
-       const char *err_msg = NULL;
-       bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
+       int ret = mlx5_flow_tunnel_validate(dev, app_tunnel, error);
 
-       if (!verdict)
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
-                                         err_msg);
+       if (ret)
+               return ret;
        ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
        if (ret < 0) {
                return rte_flow_error_set(error, ret,
@@ -9280,15 +10393,11 @@ mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
                       uint32_t *num_of_items,
                       struct rte_flow_error *error)
 {
-       int ret;
        struct mlx5_flow_tunnel *tunnel;
-       const char *err_msg = NULL;
-       bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
+       int ret = mlx5_flow_tunnel_validate(dev, app_tunnel, error);
 
-       if (!verdict)
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-                                         err_msg);
+       if (ret)
+               return ret;
        ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
        if (ret < 0) {
                return rte_flow_error_set(error, ret,
@@ -9514,6 +10623,45 @@ mlx5_release_tunnel_hub(__rte_unused struct mlx5_dev_ctx_shared *sh,
 }
 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
 
+/* Flex flow item API */
+static struct rte_flow_item_flex_handle *
+mlx5_flow_flex_item_create(struct rte_eth_dev *dev,
+                          const struct rte_flow_item_flex_conf *conf,
+                          struct rte_flow_error *error)
+{
+       static const char err_msg[] = "flex item creation unsupported";
+       struct rte_flow_attr attr = { .transfer = 0 };
+       const struct mlx5_flow_driver_ops *fops =
+                       flow_get_drv_ops(flow_get_drv_type(dev, &attr));
+
+       if (!fops->item_create) {
+               DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
+               rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+                                  NULL, err_msg);
+               return NULL;
+       }
+       return fops->item_create(dev, conf, error);
+}
+
+static int
+mlx5_flow_flex_item_release(struct rte_eth_dev *dev,
+                           const struct rte_flow_item_flex_handle *handle,
+                           struct rte_flow_error *error)
+{
+       static const char err_msg[] = "flex item release unsupported";
+       struct rte_flow_attr attr = { .transfer = 0 };
+       const struct mlx5_flow_driver_ops *fops =
+                       flow_get_drv_ops(flow_get_drv_type(dev, &attr));
+
+       if (!fops->item_release) {
+               DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
+               rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+                                  NULL, err_msg);
+               return -rte_errno;
+       }
+       return fops->item_release(dev, handle, error);
+}
+
 static void
 mlx5_dbg__print_pattern(const struct rte_flow_item *item)
 {
@@ -9556,17 +10704,119 @@ mlx5_flow_expand_rss_adjust_node(const struct rte_flow_item *pattern,
                const struct mlx5_flow_expand_node *node)
 {
        const struct rte_flow_item *item = pattern + item_idx, *prev_item;
-       switch (item->type) {
-       case RTE_FLOW_ITEM_TYPE_VXLAN:
+
+       if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN &&
+                       node != NULL &&
+                       node->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
+               /*
+                * The expansion node is VXLAN and it is also the last
+                * expandable item in the pattern, so need to continue
+                * expansion of the inner tunnel.
+                */
                MLX5_ASSERT(item_idx > 0);
                prev_item = pattern + item_idx - 1;
                MLX5_ASSERT(prev_item->type == RTE_FLOW_ITEM_TYPE_UDP);
                if (mlx5_flow_is_std_vxlan_port(prev_item))
                        return &graph[MLX5_EXPANSION_STD_VXLAN];
-               else
-                       return &graph[MLX5_EXPANSION_L3_VXLAN];
+               return &graph[MLX5_EXPANSION_L3_VXLAN];
+       }
+       return node;
+}
+
+/* Map of Verbs to Flow priority with 8 Verbs priorities. */
+static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
+       { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
+};
+
+/* Map of Verbs to Flow priority with 16 Verbs priorities. */
+static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {
+       { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 },
+       { 9, 10, 11 }, { 12, 13, 14 },
+};
+
+/**
+ * Discover the number of available flow priorities.
+ *
+ * @param dev
+ *   Ethernet device.
+ *
+ * @return
+ *   On success, number of available flow priorities.
+ *   On failure, a negative errno-style code and rte_errno is set.
+ */
+int
+mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
+{
+       static const uint16_t vprio[] = {8, 16};
+       const struct mlx5_priv *priv = dev->data->dev_private;
+       const struct mlx5_flow_driver_ops *fops;
+       enum mlx5_flow_drv_type type;
+       int ret;
+
+       type = mlx5_flow_os_get_type();
+       if (type == MLX5_FLOW_TYPE_MAX) {
+               type = MLX5_FLOW_TYPE_VERBS;
+               if (priv->sh->cdev->config.devx && priv->sh->config.dv_flow_en)
+                       type = MLX5_FLOW_TYPE_DV;
+       }
+       fops = flow_get_drv_ops(type);
+       if (fops->discover_priorities == NULL) {
+               DRV_LOG(ERR, "Priority discovery not supported");
+               rte_errno = ENOTSUP;
+               return -rte_errno;
+       }
+       ret = fops->discover_priorities(dev, vprio, RTE_DIM(vprio));
+       if (ret < 0)
+               return ret;
+       switch (ret) {
+       case 8:
+               ret = RTE_DIM(priority_map_3);
+               break;
+       case 16:
+               ret = RTE_DIM(priority_map_5);
                break;
        default:
-               return node;
+               rte_errno = ENOTSUP;
+               DRV_LOG(ERR,
+                       "port %u maximum priority: %d expected 8/16",
+                       dev->data->port_id, ret);
+               return -rte_errno;
+       }
+       DRV_LOG(INFO, "port %u supported flow priorities:"
+               " 0-%d for ingress or egress root table,"
+               " 0-%d for non-root table or transfer root table.",
+               dev->data->port_id, ret - 2,
+               MLX5_NON_ROOT_FLOW_MAX_PRIO - 1);
+       return ret;
+}
+
+/**
+ * Adjust flow priority based on the highest layer and the request priority.
+ *
+ * @param[in] dev
+ *   Pointer to the Ethernet device structure.
+ * @param[in] priority
+ *   The rule base priority.
+ * @param[in] subpriority
+ *   The priority based on the items.
+ *
+ * @return
+ *   The new priority.
+ */
+uint32_t
+mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
+                         uint32_t subpriority)
+{
+       uint32_t res = 0;
+       struct mlx5_priv *priv = dev->data->dev_private;
+
+       switch (priv->sh->flow_max_priority) {
+       case RTE_DIM(priority_map_3):
+               res = priority_map_3[priority][subpriority];
+               break;
+       case RTE_DIM(priority_map_5):
+               res = priority_map_5[priority][subpriority];
+               break;
        }
+       return  res;
 }