net/mlx5: fix GENEVE and VXLAN-GPE flow item matching
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
index f2fde91..3da122c 100644 (file)
@@ -15,6 +15,7 @@
 #include <rte_flow_driver.h>
 #include <rte_malloc.h>
 #include <rte_cycles.h>
+#include <rte_bus_pci.h>
 #include <rte_ip.h>
 #include <rte_gre.h>
 #include <rte_vxlan.h>
@@ -92,6 +93,37 @@ static int
 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
                                  uint32_t rix_jump);
 
+static inline uint16_t
+mlx5_translate_tunnel_etypes(uint64_t pattern_flags)
+{
+       if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
+               return RTE_ETHER_TYPE_TEB;
+       else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
+               return RTE_ETHER_TYPE_IPV4;
+       else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
+               return RTE_ETHER_TYPE_IPV6;
+       else if (pattern_flags & MLX5_FLOW_LAYER_MPLS)
+               return RTE_ETHER_TYPE_MPLS;
+       return 0;
+}
+
+static int16_t
+flow_dv_get_esw_manager_vport_id(struct rte_eth_dev *dev)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+
+       if (priv->pci_dev == NULL)
+               return 0;
+       switch (priv->pci_dev->id.device_id) {
+       case PCI_DEVICE_ID_MELLANOX_CONNECTX5BF:
+       case PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF:
+       case PCI_DEVICE_ID_MELLANOX_CONNECTX7BF:
+               return (int16_t)0xfffe;
+       default:
+               return 0;
+       }
+}
+
 /**
  * Initialize flow attributes structure according to flow items' types.
  *
@@ -269,31 +301,6 @@ struct field_modify_info modify_tcp[] = {
        {0, 0, 0},
 };
 
-static const struct rte_flow_item *
-mlx5_flow_find_tunnel_item(const struct rte_flow_item *item)
-{
-       for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
-               switch (item->type) {
-               default:
-                       break;
-               case RTE_FLOW_ITEM_TYPE_VXLAN:
-               case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
-               case RTE_FLOW_ITEM_TYPE_GRE:
-               case RTE_FLOW_ITEM_TYPE_MPLS:
-               case RTE_FLOW_ITEM_TYPE_NVGRE:
-               case RTE_FLOW_ITEM_TYPE_GENEVE:
-                       return item;
-               case RTE_FLOW_ITEM_TYPE_IPV4:
-               case RTE_FLOW_ITEM_TYPE_IPV6:
-                       if (item[1].type == RTE_FLOW_ITEM_TYPE_IPV4 ||
-                           item[1].type == RTE_FLOW_ITEM_TYPE_IPV6)
-                               return item;
-                       break;
-               }
-       }
-       return NULL;
-}
-
 static void
 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
                          uint8_t next_protocol, uint64_t *item_flags,
@@ -1390,9 +1397,13 @@ flow_dv_convert_action_modify_ipv6_dscp
 }
 
 static int
-mlx5_flow_item_field_width(struct mlx5_priv *priv,
-                          enum rte_flow_field_id field, int inherit)
+mlx5_flow_item_field_width(struct rte_eth_dev *dev,
+                          enum rte_flow_field_id field, int inherit,
+                          const struct rte_flow_attr *attr,
+                          struct rte_flow_error *error)
 {
+       struct mlx5_priv *priv = dev->data->dev_private;
+
        switch (field) {
        case RTE_FLOW_FIELD_START:
                return 32;
@@ -1439,7 +1450,8 @@ mlx5_flow_item_field_width(struct mlx5_priv *priv,
        case RTE_FLOW_FIELD_MARK:
                return __builtin_popcount(priv->sh->dv_mark_mask);
        case RTE_FLOW_FIELD_META:
-               return __builtin_popcount(priv->sh->dv_meta_mask);
+               return (flow_dv_get_metadata_reg(dev, attr, error) == REG_C_0) ?
+                       __builtin_popcount(priv->sh->dv_meta_mask) : 32;
        case RTE_FLOW_FIELD_POINTER:
        case RTE_FLOW_FIELD_VALUE:
                return inherit < 0 ? 0 : inherit;
@@ -1472,11 +1484,11 @@ mlx5_flow_field_id_to_modify_info
                                info[idx] = (struct field_modify_info){2, 4,
                                                MLX5_MODI_OUT_DMAC_15_0};
                                if (width < 16) {
-                                       mask[idx] = rte_cpu_to_be_16(0xffff >>
+                                       mask[1] = rte_cpu_to_be_16(0xffff >>
                                                                 (16 - width));
                                        width = 0;
                                } else {
-                                       mask[idx] = RTE_BE16(0xffff);
+                                       mask[1] = RTE_BE16(0xffff);
                                        width -= 16;
                                }
                                if (!width)
@@ -1485,11 +1497,11 @@ mlx5_flow_field_id_to_modify_info
                        }
                        info[idx] = (struct field_modify_info){4, 0,
                                                MLX5_MODI_OUT_DMAC_47_16};
-                       mask[idx] = rte_cpu_to_be_32((0xffffffff >>
-                                                     (32 - width)) << off);
+                       mask[0] = rte_cpu_to_be_32((0xffffffff >>
+                                                   (32 - width)) << off);
                } else {
                        if (data->offset < 16)
-                               info[idx++] = (struct field_modify_info){2, 4,
+                               info[idx++] = (struct field_modify_info){2, 0,
                                                MLX5_MODI_OUT_DMAC_15_0};
                        info[idx] = (struct field_modify_info){4, 0,
                                                MLX5_MODI_OUT_DMAC_47_16};
@@ -1502,11 +1514,11 @@ mlx5_flow_field_id_to_modify_info
                                info[idx] = (struct field_modify_info){2, 4,
                                                MLX5_MODI_OUT_SMAC_15_0};
                                if (width < 16) {
-                                       mask[idx] = rte_cpu_to_be_16(0xffff >>
+                                       mask[1] = rte_cpu_to_be_16(0xffff >>
                                                                 (16 - width));
                                        width = 0;
                                } else {
-                                       mask[idx] = RTE_BE16(0xffff);
+                                       mask[1] = RTE_BE16(0xffff);
                                        width -= 16;
                                }
                                if (!width)
@@ -1515,11 +1527,11 @@ mlx5_flow_field_id_to_modify_info
                        }
                        info[idx] = (struct field_modify_info){4, 0,
                                                MLX5_MODI_OUT_SMAC_47_16};
-                       mask[idx] = rte_cpu_to_be_32((0xffffffff >>
-                                                     (32 - width)) << off);
+                       mask[0] = rte_cpu_to_be_32((0xffffffff >>
+                                                   (32 - width)) << off);
                } else {
                        if (data->offset < 16)
-                               info[idx++] = (struct field_modify_info){2, 4,
+                               info[idx++] = (struct field_modify_info){2, 0,
                                                MLX5_MODI_OUT_SMAC_15_0};
                        info[idx] = (struct field_modify_info){4, 0,
                                                MLX5_MODI_OUT_SMAC_47_16};
@@ -1584,12 +1596,12 @@ mlx5_flow_field_id_to_modify_info
                                info[idx] = (struct field_modify_info){4, 12,
                                                MLX5_MODI_OUT_SIPV6_31_0};
                                if (width < 32) {
-                                       mask[idx] =
+                                       mask[3] =
                                                rte_cpu_to_be_32(0xffffffff >>
                                                                 (32 - width));
                                        width = 0;
                                } else {
-                                       mask[idx] = RTE_BE32(0xffffffff);
+                                       mask[3] = RTE_BE32(0xffffffff);
                                        width -= 32;
                                }
                                if (!width)
@@ -1600,12 +1612,12 @@ mlx5_flow_field_id_to_modify_info
                                info[idx] = (struct field_modify_info){4, 8,
                                                MLX5_MODI_OUT_SIPV6_63_32};
                                if (width < 32) {
-                                       mask[idx] =
+                                       mask[2] =
                                                rte_cpu_to_be_32(0xffffffff >>
                                                                 (32 - width));
                                        width = 0;
                                } else {
-                                       mask[idx] = RTE_BE32(0xffffffff);
+                                       mask[2] = RTE_BE32(0xffffffff);
                                        width -= 32;
                                }
                                if (!width)
@@ -1616,12 +1628,12 @@ mlx5_flow_field_id_to_modify_info
                                info[idx] = (struct field_modify_info){4, 4,
                                                MLX5_MODI_OUT_SIPV6_95_64};
                                if (width < 32) {
-                                       mask[idx] =
+                                       mask[1] =
                                                rte_cpu_to_be_32(0xffffffff >>
                                                                 (32 - width));
                                        width = 0;
                                } else {
-                                       mask[idx] = RTE_BE32(0xffffffff);
+                                       mask[1] = RTE_BE32(0xffffffff);
                                        width -= 32;
                                }
                                if (!width)
@@ -1630,17 +1642,16 @@ mlx5_flow_field_id_to_modify_info
                        }
                        info[idx] = (struct field_modify_info){4, 0,
                                                MLX5_MODI_OUT_SIPV6_127_96};
-                       mask[idx] = rte_cpu_to_be_32(0xffffffff >>
-                                                    (32 - width));
+                       mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
                } else {
                        if (data->offset < 32)
-                               info[idx++] = (struct field_modify_info){4, 12,
+                               info[idx++] = (struct field_modify_info){4, 0,
                                                MLX5_MODI_OUT_SIPV6_31_0};
                        if (data->offset < 64)
-                               info[idx++] = (struct field_modify_info){4, 8,
+                               info[idx++] = (struct field_modify_info){4, 0,
                                                MLX5_MODI_OUT_SIPV6_63_32};
                        if (data->offset < 96)
-                               info[idx++] = (struct field_modify_info){4, 4,
+                               info[idx++] = (struct field_modify_info){4, 0,
                                                MLX5_MODI_OUT_SIPV6_95_64};
                        if (data->offset < 128)
                                info[idx++] = (struct field_modify_info){4, 0,
@@ -1653,12 +1664,12 @@ mlx5_flow_field_id_to_modify_info
                                info[idx] = (struct field_modify_info){4, 12,
                                                MLX5_MODI_OUT_DIPV6_31_0};
                                if (width < 32) {
-                                       mask[idx] =
+                                       mask[3] =
                                                rte_cpu_to_be_32(0xffffffff >>
                                                                 (32 - width));
                                        width = 0;
                                } else {
-                                       mask[idx] = RTE_BE32(0xffffffff);
+                                       mask[3] = RTE_BE32(0xffffffff);
                                        width -= 32;
                                }
                                if (!width)
@@ -1669,12 +1680,12 @@ mlx5_flow_field_id_to_modify_info
                                info[idx] = (struct field_modify_info){4, 8,
                                                MLX5_MODI_OUT_DIPV6_63_32};
                                if (width < 32) {
-                                       mask[idx] =
+                                       mask[2] =
                                                rte_cpu_to_be_32(0xffffffff >>
                                                                 (32 - width));
                                        width = 0;
                                } else {
-                                       mask[idx] = RTE_BE32(0xffffffff);
+                                       mask[2] = RTE_BE32(0xffffffff);
                                        width -= 32;
                                }
                                if (!width)
@@ -1685,12 +1696,12 @@ mlx5_flow_field_id_to_modify_info
                                info[idx] = (struct field_modify_info){4, 4,
                                                MLX5_MODI_OUT_DIPV6_95_64};
                                if (width < 32) {
-                                       mask[idx] =
+                                       mask[1] =
                                                rte_cpu_to_be_32(0xffffffff >>
                                                                 (32 - width));
                                        width = 0;
                                } else {
-                                       mask[idx] = RTE_BE32(0xffffffff);
+                                       mask[1] = RTE_BE32(0xffffffff);
                                        width -= 32;
                                }
                                if (!width)
@@ -1699,17 +1710,16 @@ mlx5_flow_field_id_to_modify_info
                        }
                        info[idx] = (struct field_modify_info){4, 0,
                                                MLX5_MODI_OUT_DIPV6_127_96};
-                       mask[idx] = rte_cpu_to_be_32(0xffffffff >>
-                                                    (32 - width));
+                       mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
                } else {
                        if (data->offset < 32)
-                               info[idx++] = (struct field_modify_info){4, 12,
+                               info[idx++] = (struct field_modify_info){4, 0,
                                                MLX5_MODI_OUT_DIPV6_31_0};
                        if (data->offset < 64)
-                               info[idx++] = (struct field_modify_info){4, 8,
+                               info[idx++] = (struct field_modify_info){4, 0,
                                                MLX5_MODI_OUT_DIPV6_63_32};
                        if (data->offset < 96)
-                               info[idx++] = (struct field_modify_info){4, 4,
+                               info[idx++] = (struct field_modify_info){4, 0,
                                                MLX5_MODI_OUT_DIPV6_95_64};
                        if (data->offset < 128)
                                info[idx++] = (struct field_modify_info){4, 0,
@@ -2184,6 +2194,8 @@ flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
                return ret;
        if (!spec)
                return 0;
+       if (spec->id == MLX5_PORT_ESW_MGR)
+               return 0;
        esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
        if (!esw_priv)
                return rte_flow_error_set(error, rte_errno,
@@ -3175,11 +3187,14 @@ flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
                                 const struct rte_flow_attr *attr,
                                 struct rte_flow_error *error)
 {
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_dev_config *config = &priv->config;
        const struct rte_flow_action_set_meta *conf;
        uint32_t nic_mask = UINT32_MAX;
        int reg;
 
-       if (!mlx5_flow_ext_mreg_supported(dev))
+       if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
+           !mlx5_flow_ext_mreg_supported(dev))
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
                                          "extended metadata register"
@@ -4813,10 +4828,12 @@ flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
        struct mlx5_dev_config *config = &priv->config;
        const struct rte_flow_action_modify_field *action_modify_field =
                action->conf;
-       uint32_t dst_width = mlx5_flow_item_field_width(priv,
-                               action_modify_field->dst.field, -1);
-       uint32_t src_width = mlx5_flow_item_field_width(priv,
-                               action_modify_field->src.field, dst_width);
+       uint32_t dst_width = mlx5_flow_item_field_width(dev,
+                               action_modify_field->dst.field,
+                               -1, attr, error);
+       uint32_t src_width = mlx5_flow_item_field_width(dev,
+                               action_modify_field->src.field,
+                               dst_width, attr, error);
 
        ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
        if (ret)
@@ -4909,15 +4926,27 @@ flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
                                "modifications of the GENEVE Network"
                                " Identifier is not supported");
        if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK ||
-           action_modify_field->src.field == RTE_FLOW_FIELD_MARK ||
-           action_modify_field->dst.field == RTE_FLOW_FIELD_META ||
-           action_modify_field->src.field == RTE_FLOW_FIELD_META) {
+           action_modify_field->src.field == RTE_FLOW_FIELD_MARK)
                if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
                    !mlx5_flow_ext_mreg_supported(dev))
                        return rte_flow_error_set(error, ENOTSUP,
                                        RTE_FLOW_ERROR_TYPE_ACTION, action,
-                                       "cannot modify mark or metadata without"
-                                       " extended metadata register support");
+                                       "cannot modify mark in legacy mode"
+                                       " or without extensive registers");
+       if (action_modify_field->dst.field == RTE_FLOW_FIELD_META ||
+           action_modify_field->src.field == RTE_FLOW_FIELD_META) {
+               if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
+                   !mlx5_flow_ext_mreg_supported(dev))
+                       return rte_flow_error_set(error, ENOTSUP,
+                                       RTE_FLOW_ERROR_TYPE_ACTION, action,
+                                       "cannot modify meta without"
+                                       " extensive registers support");
+               ret = flow_dv_get_metadata_reg(dev, attr, error);
+               if (ret < 0 || ret == REG_NON)
+                       return rte_flow_error_set(error, ENOTSUP,
+                                       RTE_FLOW_ERROR_TYPE_ACTION, action,
+                                       "cannot modify meta without"
+                                       " extensive registers available");
        }
        if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
                return rte_flow_error_set(error, ENOTSUP,
@@ -5104,6 +5133,8 @@ flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
  *   Pointer to rte_eth_dev structure.
  * @param[in] action_flags
  *   Bit-fields that holds the actions detected until now.
+ * @param[in] item_flags
+ *   Holds the items detected.
  * @param[in] action
  *   Pointer to the meter action.
  * @param[in] attr
@@ -5118,7 +5149,7 @@ flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
  */
 static int
 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
-                               uint64_t action_flags,
+                               uint64_t action_flags, uint64_t item_flags,
                                const struct rte_flow_action *action,
                                const struct rte_flow_attr *attr,
                                const struct rte_flow_item *port_id_item,
@@ -5222,6 +5253,35 @@ mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
                                                NULL,
                                                "Flow and meter policy "
                                                "have different src port.");
+               } else if (mtr_policy->is_rss) {
+                       struct mlx5_flow_meter_policy *fp;
+                       struct mlx5_meter_policy_action_container *acg;
+                       struct mlx5_meter_policy_action_container *acy;
+                       const struct rte_flow_action *rss_act;
+                       int ret;
+
+                       fp = mlx5_flow_meter_hierarchy_get_final_policy(dev,
+                                                               mtr_policy);
+                       if (fp == NULL)
+                               return rte_flow_error_set(error, EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+                                                 "Unable to get the final "
+                                                 "policy in the hierarchy");
+                       acg = &fp->act_cnt[RTE_COLOR_GREEN];
+                       acy = &fp->act_cnt[RTE_COLOR_YELLOW];
+                       MLX5_ASSERT(acg->fate_action ==
+                                   MLX5_FLOW_FATE_SHARED_RSS ||
+                                   acy->fate_action ==
+                                   MLX5_FLOW_FATE_SHARED_RSS);
+                       if (acg->fate_action == MLX5_FLOW_FATE_SHARED_RSS)
+                               rss_act = acg->rss;
+                       else
+                               rss_act = acy->rss;
+                       ret = mlx5_flow_validate_action_rss(rss_act,
+                                       action_flags, dev, attr,
+                                       item_flags, error);
+                       if (ret)
+                               return ret;
                }
                *def_policy = false;
        }
@@ -5557,6 +5617,10 @@ flow_dv_validate_action_sample(uint64_t *action_flags,
                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
                                          "wrong action order, jump should "
                                          "be after sample action");
+       if (*action_flags & MLX5_FLOW_ACTION_CT)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
+                                         "Sample after CT not supported");
        act = sample->actions;
        for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
                if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
@@ -6381,14 +6445,17 @@ flow_dv_mtr_pool_create(struct rte_eth_dev *dev, struct mlx5_aso_mtr **mtr_free)
                return NULL;
        }
        pool->devx_obj = dcs;
+       rte_rwlock_write_lock(&pools_mng->resize_mtrwl);
        pool->index = pools_mng->n_valid;
        if (pool->index == pools_mng->n && flow_dv_mtr_container_resize(dev)) {
                mlx5_free(pool);
                claim_zero(mlx5_devx_cmd_destroy(dcs));
+               rte_rwlock_write_unlock(&pools_mng->resize_mtrwl);
                return NULL;
        }
        pools_mng->pools[pool->index] = pool;
        pools_mng->n_valid++;
+       rte_rwlock_write_unlock(&pools_mng->resize_mtrwl);
        for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
                pool->mtrs[i].offset = i;
                LIST_INSERT_HEAD(&pools_mng->meters, &pool->mtrs[i], next);
@@ -6561,119 +6628,85 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev,
        return ret;
 }
 
-static uint16_t
-mlx5_flow_locate_proto_l3(const struct rte_flow_item **head,
-                         const struct rte_flow_item *end)
+static int
+validate_integrity_bits(const struct rte_flow_item_integrity *mask,
+                       int64_t pattern_flags, uint64_t l3_flags,
+                       uint64_t l4_flags, uint64_t ip4_flag,
+                       struct rte_flow_error *error)
 {
-       const struct rte_flow_item *item = *head;
-       uint16_t l3_protocol;
+       if (mask->l3_ok && !(pattern_flags & l3_flags))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM,
+                                         NULL, "missing L3 protocol");
+
+       if (mask->ipv4_csum_ok && !(pattern_flags & ip4_flag))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM,
+                                         NULL, "missing IPv4 protocol");
+
+       if ((mask->l4_ok || mask->l4_csum_ok) && !(pattern_flags & l4_flags))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM,
+                                         NULL, "missing L4 protocol");
 
-       for (; item != end; item++) {
-               switch (item->type) {
-               default:
-                       break;
-               case RTE_FLOW_ITEM_TYPE_IPV4:
-                       l3_protocol = RTE_ETHER_TYPE_IPV4;
-                       goto l3_ok;
-               case RTE_FLOW_ITEM_TYPE_IPV6:
-                       l3_protocol = RTE_ETHER_TYPE_IPV6;
-                       goto l3_ok;
-               case RTE_FLOW_ITEM_TYPE_ETH:
-                       if (item->mask && item->spec) {
-                               MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_eth,
-                                                           type, item,
-                                                           l3_protocol);
-                               if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
-                                   l3_protocol == RTE_ETHER_TYPE_IPV6)
-                                       goto l3_ok;
-                       }
-                       break;
-               case RTE_FLOW_ITEM_TYPE_VLAN:
-                       if (item->mask && item->spec) {
-                               MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_vlan,
-                                                           inner_type, item,
-                                                           l3_protocol);
-                               if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
-                                   l3_protocol == RTE_ETHER_TYPE_IPV6)
-                                       goto l3_ok;
-                       }
-                       break;
-               }
-       }
        return 0;
-l3_ok:
-       *head = item;
-       return l3_protocol;
 }
 
-static uint8_t
-mlx5_flow_locate_proto_l4(const struct rte_flow_item **head,
-                         const struct rte_flow_item *end)
+static int
+flow_dv_validate_item_integrity_post(const struct
+                                    rte_flow_item *integrity_items[2],
+                                    int64_t pattern_flags,
+                                    struct rte_flow_error *error)
 {
-       const struct rte_flow_item *item = *head;
-       uint8_t l4_protocol;
+       const struct rte_flow_item_integrity *mask;
+       int ret;
 
-       for (; item != end; item++) {
-               switch (item->type) {
-               default:
-                       break;
-               case RTE_FLOW_ITEM_TYPE_TCP:
-                       l4_protocol = IPPROTO_TCP;
-                       goto l4_ok;
-               case RTE_FLOW_ITEM_TYPE_UDP:
-                       l4_protocol = IPPROTO_UDP;
-                       goto l4_ok;
-               case RTE_FLOW_ITEM_TYPE_IPV4:
-                       if (item->mask && item->spec) {
-                               const struct rte_flow_item_ipv4 *mask, *spec;
-
-                               mask = (typeof(mask))item->mask;
-                               spec = (typeof(spec))item->spec;
-                               l4_protocol = mask->hdr.next_proto_id &
-                                             spec->hdr.next_proto_id;
-                               if (l4_protocol == IPPROTO_TCP ||
-                                   l4_protocol == IPPROTO_UDP)
-                                       goto l4_ok;
-                       }
-                       break;
-               case RTE_FLOW_ITEM_TYPE_IPV6:
-                       if (item->mask && item->spec) {
-                               const struct rte_flow_item_ipv6 *mask, *spec;
-                               mask = (typeof(mask))item->mask;
-                               spec = (typeof(spec))item->spec;
-                               l4_protocol = mask->hdr.proto & spec->hdr.proto;
-                               if (l4_protocol == IPPROTO_TCP ||
-                                   l4_protocol == IPPROTO_UDP)
-                                       goto l4_ok;
-                       }
-                       break;
-               }
+       if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY) {
+               mask = (typeof(mask))integrity_items[0]->mask;
+               ret = validate_integrity_bits(mask, pattern_flags,
+                                             MLX5_FLOW_LAYER_OUTER_L3,
+                                             MLX5_FLOW_LAYER_OUTER_L4,
+                                             MLX5_FLOW_LAYER_OUTER_L3_IPV4,
+                                             error);
+               if (ret)
+                       return ret;
+       }
+       if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY) {
+               mask = (typeof(mask))integrity_items[1]->mask;
+               ret = validate_integrity_bits(mask, pattern_flags,
+                                             MLX5_FLOW_LAYER_INNER_L3,
+                                             MLX5_FLOW_LAYER_INNER_L4,
+                                             MLX5_FLOW_LAYER_INNER_L3_IPV4,
+                                             error);
+               if (ret)
+                       return ret;
        }
        return 0;
-l4_ok:
-       *head = item;
-       return l4_protocol;
 }
 
 static int
 flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
-                               const struct rte_flow_item *rule_items,
                                const struct rte_flow_item *integrity_item,
+                               uint64_t pattern_flags, uint64_t *last_item,
+                               const struct rte_flow_item *integrity_items[2],
                                struct rte_flow_error *error)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       const struct rte_flow_item *tunnel_item, *end_item, *item = rule_items;
        const struct rte_flow_item_integrity *mask = (typeof(mask))
                                                     integrity_item->mask;
        const struct rte_flow_item_integrity *spec = (typeof(spec))
                                                     integrity_item->spec;
-       uint32_t protocol;
 
        if (!priv->config.hca_attr.pkt_integrity_match)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ITEM,
                                          integrity_item,
                                          "packet integrity integrity_item not supported");
+       if (!spec)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM,
+                                         integrity_item,
+                                         "no spec for integrity item");
        if (!mask)
                mask = &rte_flow_item_integrity_mask;
        if (!mlx5_validate_integrity_item(mask))
@@ -6681,35 +6714,105 @@ flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
                                          RTE_FLOW_ERROR_TYPE_ITEM,
                                          integrity_item,
                                          "unsupported integrity filter");
-       tunnel_item = mlx5_flow_find_tunnel_item(rule_items);
        if (spec->level > 1) {
-               if (!tunnel_item)
-                       return rte_flow_error_set(error, ENOTSUP,
-                                                 RTE_FLOW_ERROR_TYPE_ITEM,
-                                                 integrity_item,
-                                                 "missing tunnel item");
-               item = tunnel_item;
-               end_item = mlx5_find_end_item(tunnel_item);
+               if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY)
+                       return rte_flow_error_set
+                               (error, ENOTSUP,
+                                RTE_FLOW_ERROR_TYPE_ITEM,
+                                NULL, "multiple inner integrity items not supported");
+               integrity_items[1] = integrity_item;
+               *last_item |= MLX5_FLOW_ITEM_INNER_INTEGRITY;
        } else {
-               end_item = tunnel_item ? tunnel_item :
-                          mlx5_find_end_item(integrity_item);
-       }
-       if (mask->l3_ok || mask->ipv4_csum_ok) {
-               protocol = mlx5_flow_locate_proto_l3(&item, end_item);
-               if (!protocol)
-                       return rte_flow_error_set(error, EINVAL,
-                                                 RTE_FLOW_ERROR_TYPE_ITEM,
-                                                 integrity_item,
-                                                 "missing L3 protocol");
+               if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY)
+                       return rte_flow_error_set
+                               (error, ENOTSUP,
+                                RTE_FLOW_ERROR_TYPE_ITEM,
+                                NULL, "multiple outer integrity items not supported");
+               integrity_items[0] = integrity_item;
+               *last_item |= MLX5_FLOW_ITEM_OUTER_INTEGRITY;
        }
-       if (mask->l4_ok || mask->l4_csum_ok) {
-               protocol = mlx5_flow_locate_proto_l4(&item, end_item);
-               if (!protocol)
-                       return rte_flow_error_set(error, EINVAL,
-                                                 RTE_FLOW_ERROR_TYPE_ITEM,
-                                                 integrity_item,
-                                                 "missing L4 protocol");
+       return 0;
+}
+
+static int
+flow_dv_validate_item_flex(struct rte_eth_dev *dev,
+                          const struct rte_flow_item *item,
+                          uint64_t item_flags,
+                          uint64_t *last_item,
+                          bool is_inner,
+                          struct rte_flow_error *error)
+{
+       const struct rte_flow_item_flex *flow_spec = item->spec;
+       const struct rte_flow_item_flex *flow_mask = item->mask;
+       struct mlx5_flex_item *flex;
+
+       if (!flow_spec)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+                                         "flex flow item spec cannot be NULL");
+       if (!flow_mask)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+                                         "flex flow item mask cannot be NULL");
+       if (item->last)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+                                         "flex flow item last not supported");
+       if (mlx5_flex_acquire_index(dev, flow_spec->handle, false) < 0)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+                                         "invalid flex flow item handle");
+       flex = (struct mlx5_flex_item *)flow_spec->handle;
+       switch (flex->tunnel_mode) {
+       case FLEX_TUNNEL_MODE_SINGLE:
+               if (item_flags &
+                   (MLX5_FLOW_ITEM_OUTER_FLEX | MLX5_FLOW_ITEM_INNER_FLEX))
+                       rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_ITEM,
+                                          NULL, "multiple flex items not supported");
+               break;
+       case FLEX_TUNNEL_MODE_OUTER:
+               if (is_inner)
+                       rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_ITEM,
+                                          NULL, "inner flex item was not configured");
+               if (item_flags & MLX5_FLOW_ITEM_OUTER_FLEX)
+                       rte_flow_error_set(error, ENOTSUP,
+                                          RTE_FLOW_ERROR_TYPE_ITEM,
+                                          NULL, "multiple flex items not supported");
+               break;
+       case FLEX_TUNNEL_MODE_INNER:
+               if (!is_inner)
+                       rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_ITEM,
+                                          NULL, "outer flex item was not configured");
+               if (item_flags & MLX5_FLOW_ITEM_INNER_FLEX)
+                       rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_ITEM,
+                                          NULL, "multiple flex items not supported");
+               break;
+       case FLEX_TUNNEL_MODE_MULTI:
+               if ((is_inner && (item_flags & MLX5_FLOW_ITEM_INNER_FLEX)) ||
+                   (!is_inner && (item_flags & MLX5_FLOW_ITEM_OUTER_FLEX))) {
+                       rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_ITEM,
+                                          NULL, "multiple flex items not supported");
+               }
+               break;
+       case FLEX_TUNNEL_MODE_TUNNEL:
+               if (is_inner || (item_flags & MLX5_FLOW_ITEM_FLEX_TUNNEL))
+                       rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_ITEM,
+                                          NULL, "multiple flex tunnel items not supported");
+               break;
+       default:
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ITEM,
+                                  NULL, "invalid flex item configuration");
        }
+       *last_item = flex->tunnel_mode == FLEX_TUNNEL_MODE_TUNNEL ?
+                    MLX5_FLOW_ITEM_FLEX_TUNNEL : is_inner ?
+                    MLX5_FLOW_ITEM_INNER_FLEX : MLX5_FLOW_ITEM_OUTER_FLEX;
        return 0;
 }
 
@@ -6805,7 +6908,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                .std_tbl_fix = true,
        };
        const struct rte_eth_hairpin_conf *conf;
-       const struct rte_flow_item *rule_items = items;
+       const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
        const struct rte_flow_item *port_id_item = NULL;
        bool def_policy = false;
        uint16_t udp_dport = 0;
@@ -6815,6 +6918,11 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
        tunnel = is_tunnel_offload_active(dev) ?
                 mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
        if (tunnel) {
+               if (!priv->config.dv_flow_en)
+                       return rte_flow_error_set
+                               (error, ENOTSUP,
+                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                NULL, "tunnel offload requires DV flow interface");
                if (priv->representor)
                        return rte_flow_error_set
                                (error, ENOTSUP,
@@ -7132,16 +7240,13 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                        last_item = MLX5_FLOW_LAYER_ECPRI;
                        break;
                case RTE_FLOW_ITEM_TYPE_INTEGRITY:
-                       if (item_flags & MLX5_FLOW_ITEM_INTEGRITY)
-                               return rte_flow_error_set
-                                       (error, ENOTSUP,
-                                        RTE_FLOW_ERROR_TYPE_ITEM,
-                                        NULL, "multiple integrity items not supported");
-                       ret = flow_dv_validate_item_integrity(dev, rule_items,
-                                                             items, error);
+                       ret = flow_dv_validate_item_integrity(dev, items,
+                                                             item_flags,
+                                                             &last_item,
+                                                             integrity_items,
+                                                             error);
                        if (ret < 0)
                                return ret;
-                       last_item = MLX5_FLOW_ITEM_INTEGRITY;
                        break;
                case RTE_FLOW_ITEM_TYPE_CONNTRACK:
                        ret = flow_dv_validate_item_aso_ct(dev, items,
@@ -7154,6 +7259,13 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                         * list it here as a supported type
                         */
                        break;
+               case RTE_FLOW_ITEM_TYPE_FLEX:
+                       ret = flow_dv_validate_item_flex(dev, items, item_flags,
+                                                        &last_item,
+                                                        tunnel != 0, error);
+                       if (ret < 0)
+                               return ret;
+                       break;
                default:
                        return rte_flow_error_set(error, ENOTSUP,
                                                  RTE_FLOW_ERROR_TYPE_ITEM,
@@ -7161,6 +7273,12 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                }
                item_flags |= last_item;
        }
+       if (item_flags & MLX5_FLOW_ITEM_INTEGRITY) {
+               ret = flow_dv_validate_item_integrity_post(integrity_items,
+                                                          item_flags, error);
+               if (ret)
+                       return ret;
+       }
        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
                int type = actions->type;
                bool shared_count = false;
@@ -7597,6 +7715,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                case RTE_FLOW_ACTION_TYPE_METER:
                        ret = mlx5_flow_validate_action_meter(dev,
                                                              action_flags,
+                                                             item_flags,
                                                              actions, attr,
                                                              port_id_item,
                                                              &def_policy,
@@ -8643,18 +8762,19 @@ flow_dv_translate_item_gre_key(void *matcher, void *key,
  *   Flow matcher value.
  * @param[in] item
  *   Flow pattern to translate.
- * @param[in] inner
- *   Item is inner pattern.
+ * @param[in] pattern_flags
+ *   Accumulated pattern flags.
  */
 static void
 flow_dv_translate_item_gre(void *matcher, void *key,
                           const struct rte_flow_item *item,
-                          int inner)
+                          uint64_t pattern_flags)
 {
+       static const struct rte_flow_item_gre empty_gre = {0,};
        const struct rte_flow_item_gre *gre_m = item->mask;
        const struct rte_flow_item_gre *gre_v = item->spec;
-       void *headers_m;
-       void *headers_v;
+       void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
+       void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
        void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
        void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
        struct {
@@ -8671,26 +8791,17 @@ flow_dv_translate_item_gre(void *matcher, void *key,
                        uint16_t value;
                };
        } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
+       uint16_t protocol_m, protocol_v;
 
-       if (inner) {
-               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
-                                        inner_headers);
-               headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
-       } else {
-               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
-                                        outer_headers);
-               headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
-       }
        MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
        MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
-       if (!gre_v)
-               return;
-       if (!gre_m)
-               gre_m = &rte_flow_item_gre_mask;
-       MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
-                rte_be_to_cpu_16(gre_m->protocol));
-       MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
-                rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
+       if (!gre_v) {
+               gre_v = &empty_gre;
+               gre_m = &empty_gre;
+       } else {
+               if (!gre_m)
+                       gre_m = &rte_flow_item_gre_mask;
+       }
        gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
        gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
        MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
@@ -8708,6 +8819,17 @@ flow_dv_translate_item_gre(void *matcher, void *key,
        MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
                 gre_crks_rsvd0_ver_v.s_present &
                 gre_crks_rsvd0_ver_m.s_present);
+       protocol_m = rte_be_to_cpu_16(gre_m->protocol);
+       protocol_v = rte_be_to_cpu_16(gre_v->protocol);
+       if (!protocol_m) {
+               /* Force next protocol to prevent matchers duplication */
+               protocol_v = mlx5_translate_tunnel_etypes(pattern_flags);
+               if (protocol_v)
+                       protocol_m = 0xFFFF;
+       }
+       MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, protocol_m);
+       MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
+                protocol_m & protocol_v);
 }
 
 /**
@@ -8719,13 +8841,13 @@ flow_dv_translate_item_gre(void *matcher, void *key,
  *   Flow matcher value.
  * @param[in] item
  *   Flow pattern to translate.
- * @param[in] inner
- *   Item is inner pattern.
+ * @param[in] pattern_flags
+ *   Accumulated pattern flags.
  */
 static void
 flow_dv_translate_item_nvgre(void *matcher, void *key,
                             const struct rte_flow_item *item,
-                            int inner)
+                            unsigned long pattern_flags)
 {
        const struct rte_flow_item_nvgre *nvgre_m = item->mask;
        const struct rte_flow_item_nvgre *nvgre_v = item->spec;
@@ -8752,7 +8874,7 @@ flow_dv_translate_item_nvgre(void *matcher, void *key,
                .mask = &gre_mask,
                .last = NULL,
        };
-       flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
+       flow_dv_translate_item_gre(matcher, key, &gre_item, pattern_flags);
        if (!nvgre_v)
                return;
        if (!nvgre_m)
@@ -8889,46 +9011,40 @@ flow_dv_translate_item_vxlan(struct rte_eth_dev *dev,
 
 static void
 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
-                                const struct rte_flow_item *item, int inner)
+                                const struct rte_flow_item *item,
+                                const uint64_t pattern_flags)
 {
+       static const struct rte_flow_item_vxlan_gpe dummy_vxlan_gpe_hdr = {0, };
        const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
        const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
-       void *headers_m;
-       void *headers_v;
+       /* The item was validated to be on the outer side */
+       void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
+       void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
        void *misc_m =
                MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
        void *misc_v =
                MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
-       char *vni_m;
-       char *vni_v;
-       uint16_t dport;
-       int size;
-       int i;
+       char *vni_m =
+               MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
+       char *vni_v =
+               MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
+       int i, size = sizeof(vxlan_m->vni);
        uint8_t flags_m = 0xff;
        uint8_t flags_v = 0xc;
+       uint8_t m_protocol, v_protocol;
 
-       if (inner) {
-               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
-                                        inner_headers);
-               headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
-       } else {
-               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
-                                        outer_headers);
-               headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
-       }
-       dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
-               MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
        if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
                MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
-               MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
+               MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
+                        MLX5_UDP_PORT_VXLAN_GPE);
+       }
+       if (!vxlan_v) {
+               vxlan_v = &dummy_vxlan_gpe_hdr;
+               vxlan_m = &dummy_vxlan_gpe_hdr;
+       } else {
+               if (!vxlan_m)
+                       vxlan_m = &rte_flow_item_vxlan_gpe_mask;
        }
-       if (!vxlan_v)
-               return;
-       if (!vxlan_m)
-               vxlan_m = &rte_flow_item_vxlan_gpe_mask;
-       size = sizeof(vxlan_m->vni);
-       vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
-       vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
        memcpy(vni_m, vxlan_m->vni, size);
        for (i = 0; i < size; ++i)
                vni_v[i] = vni_m[i] & vxlan_v->vni[i];
@@ -8938,10 +9054,23 @@ flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
        }
        MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
        MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
-       MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
-                vxlan_m->protocol);
-       MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
-                vxlan_v->protocol);
+       m_protocol = vxlan_m->protocol;
+       v_protocol = vxlan_v->protocol;
+       if (!m_protocol) {
+               /* Force next protocol to ensure next headers parsing. */
+               if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
+                       v_protocol = RTE_VXLAN_GPE_TYPE_ETH;
+               else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
+                       v_protocol = RTE_VXLAN_GPE_TYPE_IPV4;
+               else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
+                       v_protocol = RTE_VXLAN_GPE_TYPE_IPV6;
+               if (v_protocol)
+                       m_protocol = 0xFF;
+       }
+       MLX5_SET(fte_match_set_misc3, misc_m,
+                outer_vxlan_gpe_next_protocol, m_protocol);
+       MLX5_SET(fte_match_set_misc3, misc_v,
+                outer_vxlan_gpe_next_protocol, m_protocol & v_protocol);
 }
 
 /**
@@ -8959,49 +9088,39 @@ flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
 
 static void
 flow_dv_translate_item_geneve(void *matcher, void *key,
-                             const struct rte_flow_item *item, int inner)
+                             const struct rte_flow_item *item,
+                             uint64_t pattern_flags)
 {
+       static const struct rte_flow_item_geneve empty_geneve = {0,};
        const struct rte_flow_item_geneve *geneve_m = item->mask;
        const struct rte_flow_item_geneve *geneve_v = item->spec;
-       void *headers_m;
-       void *headers_v;
+       /* GENEVE flow item validation allows single tunnel item */
+       void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
+       void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
        void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
        void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
-       uint16_t dport;
        uint16_t gbhdr_m;
        uint16_t gbhdr_v;
-       char *vni_m;
-       char *vni_v;
-       size_t size, i;
+       char *vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
+       char *vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
+       size_t size = sizeof(geneve_m->vni), i;
+       uint16_t protocol_m, protocol_v;
 
-       if (inner) {
-               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
-                                        inner_headers);
-               headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
-       } else {
-               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
-                                        outer_headers);
-               headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
-       }
-       dport = MLX5_UDP_PORT_GENEVE;
        if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
                MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
-               MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
+               MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
+                        MLX5_UDP_PORT_GENEVE);
+       }
+       if (!geneve_v) {
+               geneve_v = &empty_geneve;
+               geneve_m = &empty_geneve;
+       } else {
+               if (!geneve_m)
+                       geneve_m = &rte_flow_item_geneve_mask;
        }
-       if (!geneve_v)
-               return;
-       if (!geneve_m)
-               geneve_m = &rte_flow_item_geneve_mask;
-       size = sizeof(geneve_m->vni);
-       vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
-       vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
        memcpy(vni_m, geneve_m->vni, size);
        for (i = 0; i < size; ++i)
                vni_v[i] = vni_m[i] & geneve_v->vni[i];
-       MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
-                rte_be_to_cpu_16(geneve_m->protocol));
-       MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
-                rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
        gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
        gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
        MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
@@ -9013,6 +9132,17 @@ flow_dv_translate_item_geneve(void *matcher, void *key,
        MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
                 MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
                 MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
+       protocol_m = rte_be_to_cpu_16(geneve_m->protocol);
+       protocol_v = rte_be_to_cpu_16(geneve_v->protocol);
+       if (!protocol_m) {
+               /* Force next protocol to prevent matchers duplication */
+               protocol_v = mlx5_translate_tunnel_etypes(pattern_flags);
+               if (protocol_v)
+                       protocol_m = 0xFFFF;
+       }
+       MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type, protocol_m);
+       MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
+                protocol_m & protocol_v);
 }
 
 /**
@@ -9209,16 +9339,22 @@ flow_dv_translate_item_mpls(void *matcher, void *key,
 
        switch (prev_layer) {
        case MLX5_FLOW_LAYER_OUTER_L4_UDP:
-               MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
-               MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
-                        MLX5_UDP_PORT_MPLS);
+               if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
+                       MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
+                                0xffff);
+                       MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
+                                MLX5_UDP_PORT_MPLS);
+               }
                break;
        case MLX5_FLOW_LAYER_GRE:
                /* Fall-through. */
        case MLX5_FLOW_LAYER_GRE_KEY:
-               MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
-               MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
-                        RTE_ETHER_TYPE_MPLS);
+               if (!MLX5_GET16(fte_match_set_misc, misc_v, gre_protocol)) {
+                       MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
+                                0xffff);
+                       MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
+                                RTE_ETHER_TYPE_MPLS);
+               }
                break;
        default:
                break;
@@ -9575,6 +9711,11 @@ flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
        struct mlx5_priv *priv;
        uint16_t mask, id;
 
+       if (pid_v && pid_v->id == MLX5_PORT_ESW_MGR) {
+               flow_dv_translate_item_source_vport(matcher, key,
+                       flow_dv_get_esw_manager_vport_id(dev), 0xffff);
+               return 0;
+       }
        mask = pid_m ? pid_m->id : 0xffff;
        id = pid_v ? pid_v->id : dev->data->port_id;
        priv = mlx5_port_to_eswitch_info(id, item == NULL);
@@ -9910,7 +10051,7 @@ flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
         */
        if (!ecpri_m->hdr.common.u32)
                return;
-       samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
+       samples = priv->sh->ecpri_parser.ids;
        /* Need to take the whole DW as the mask to fill the entry. */
        dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
                            prog_sample_field_value_0);
@@ -10013,6 +10154,27 @@ flow_dv_translate_item_aso_ct(struct rte_eth_dev *dev,
                               reg_value, reg_mask);
 }
 
+static void
+flow_dv_translate_item_flex(struct rte_eth_dev *dev, void *matcher, void *key,
+                           const struct rte_flow_item *item,
+                           struct mlx5_flow *dev_flow, bool is_inner)
+{
+       const struct rte_flow_item_flex *spec =
+               (const struct rte_flow_item_flex *)item->spec;
+       int index = mlx5_flex_acquire_index(dev, spec->handle, false);
+
+       MLX5_ASSERT(index >= 0 && index <= (int)(sizeof(uint32_t) * CHAR_BIT));
+       if (index < 0)
+               return;
+       if (!(dev_flow->handle->flex_item & RTE_BIT32(index))) {
+               /* Don't count both inner and outer flex items in one rule. */
+               if (mlx5_flex_acquire_index(dev, spec->handle, true) != index)
+                       MLX5_ASSERT(false);
+               dev_flow->handle->flex_item |= RTE_BIT32(index);
+       }
+       mlx5_flex_flow_translate_item(dev, matcher, key, item, is_inner);
+}
+
 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
 
 #define HEADER_IS_ZERO(match_criteria, headers)                                     \
@@ -10793,22 +10955,22 @@ flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
        void *misc_v =
                MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
        struct mlx5_txq_ctrl *txq;
-       uint32_t queue;
-
+       uint32_t queue, mask;
 
        queue_m = (const void *)item->mask;
-       if (!queue_m)
-               return;
        queue_v = (const void *)item->spec;
        if (!queue_v)
                return;
        txq = mlx5_txq_get(dev, queue_v->queue);
        if (!txq)
                return;
-       queue = txq->obj->sq->id;
-       MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
-       MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
-                queue & queue_m->queue);
+       if (txq->type == MLX5_TXQ_TYPE_HAIRPIN)
+               queue = txq->obj->sq->id;
+       else
+               queue = txq->obj->sq_obj.sq->id;
+       mask = queue_m == NULL ? UINT32_MAX : queue_m->queue;
+       MLX5_SET(fte_match_set_misc, misc_m, source_sqn, mask);
+       MLX5_SET(fte_match_set_misc, misc_v, source_sqn, queue & mask);
        mlx5_txq_release(dev, queue_v->queue);
 }
 
@@ -10836,9 +10998,9 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
        if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
            (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
                if (rss_types & MLX5_IPV4_LAYER_TYPES) {
-                       if (rss_types & ETH_RSS_L3_SRC_ONLY)
+                       if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
                                dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
-                       else if (rss_types & ETH_RSS_L3_DST_ONLY)
+                       else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
                                dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
                        else
                                dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
@@ -10846,9 +11008,9 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
        } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
                   (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
                if (rss_types & MLX5_IPV6_LAYER_TYPES) {
-                       if (rss_types & ETH_RSS_L3_SRC_ONLY)
+                       if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
                                dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
-                       else if (rss_types & ETH_RSS_L3_DST_ONLY)
+                       else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
                                dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
                        else
                                dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
@@ -10862,11 +11024,11 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
                return;
        if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
            (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
-               if (rss_types & ETH_RSS_UDP) {
-                       if (rss_types & ETH_RSS_L4_SRC_ONLY)
+               if (rss_types & RTE_ETH_RSS_UDP) {
+                       if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
                                dev_flow->hash_fields |=
                                                IBV_RX_HASH_SRC_PORT_UDP;
-                       else if (rss_types & ETH_RSS_L4_DST_ONLY)
+                       else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
                                dev_flow->hash_fields |=
                                                IBV_RX_HASH_DST_PORT_UDP;
                        else
@@ -10874,11 +11036,11 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
                }
        } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
                   (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
-               if (rss_types & ETH_RSS_TCP) {
-                       if (rss_types & ETH_RSS_L4_SRC_ONLY)
+               if (rss_types & RTE_ETH_RSS_TCP) {
+                       if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
                                dev_flow->hash_fields |=
                                                IBV_RX_HASH_SRC_PORT_TCP;
-                       else if (rss_types & ETH_RSS_L4_DST_ONLY)
+                       else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
                                dev_flow->hash_fields |=
                                                IBV_RX_HASH_DST_PORT_TCP;
                        else
@@ -11878,18 +12040,18 @@ flow_dv_age_pool_create(struct rte_eth_dev *dev,
        }
        pool->flow_hit_aso_obj = obj;
        pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
-       rte_spinlock_lock(&mng->resize_sl);
+       rte_rwlock_write_lock(&mng->resize_rwl);
        pool->index = mng->next;
        /* Resize pools array if there is no room for the new pool in it. */
        if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
                claim_zero(mlx5_devx_cmd_destroy(obj));
                mlx5_free(pool);
-               rte_spinlock_unlock(&mng->resize_sl);
+               rte_rwlock_write_unlock(&mng->resize_rwl);
                return NULL;
        }
        mng->pools[pool->index] = pool;
        mng->next++;
-       rte_spinlock_unlock(&mng->resize_sl);
+       rte_rwlock_write_unlock(&mng->resize_rwl);
        /* Assign the first action in the new pool, the rest go to free list. */
        *age_free = &pool->actions[0];
        for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
@@ -12007,34 +12169,24 @@ flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask,
                               void *headers_m, void *headers_v)
 {
        if (mask->l4_ok) {
-               /* application l4_ok filter aggregates all hardware l4 filters
-                * therefore hw l4_checksum_ok must be implicitly added here.
+               /* RTE l4_ok filter aggregates hardware l4_ok and
+                * l4_checksum_ok filters.
+                * Positive RTE l4_ok match requires hardware match on both L4
+                * hardware integrity bits.
+                * For negative match, check hardware l4_checksum_ok bit only,
+                * because hardware sets that bit to 0 for all packets
+                * with bad L4.
                 */
-               struct rte_flow_item_integrity local_item;
-
-               local_item.l4_csum_ok = 1;
-               MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
-                        local_item.l4_csum_ok);
                if (value->l4_ok) {
-                       /* application l4_ok = 1 matches sets both hw flags
-                        * l4_ok and l4_checksum_ok flags to 1.
-                        */
-                       MLX5_SET(fte_match_set_lyr_2_4, headers_v,
-                                l4_checksum_ok, local_item.l4_csum_ok);
-                       MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok,
-                                mask->l4_ok);
-                       MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok,
-                                value->l4_ok);
-               } else {
-                       /* application l4_ok = 0 matches on hw flag
-                        * l4_checksum_ok = 0 only.
-                        */
-                       MLX5_SET(fte_match_set_lyr_2_4, headers_v,
-                                l4_checksum_ok, 0);
+                       MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok, 1);
+                       MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok, 1);
                }
-       } else if (mask->l4_csum_ok) {
-               MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
-                        mask->l4_csum_ok);
+               MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, 1);
+               MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
+                        !!value->l4_ok);
+       }
+       if (mask->l4_csum_ok) {
+               MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, 1);
                MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
                         value->l4_csum_ok);
        }
@@ -12043,77 +12195,102 @@ flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask,
 static void
 flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask,
                               const struct rte_flow_item_integrity *value,
-                              void *headers_m, void *headers_v,
-                              bool is_ipv4)
+                              void *headers_m, void *headers_v, bool is_ipv4)
 {
        if (mask->l3_ok) {
-               /* application l3_ok filter aggregates all hardware l3 filters
-                * therefore hw ipv4_checksum_ok must be implicitly added here.
+               /* RTE l3_ok filter aggregates for IPv4 hardware l3_ok and
+                * ipv4_csum_ok filters.
+                * Positive RTE l3_ok match requires hardware match on both L3
+                * hardware integrity bits.
+                * For negative match, check hardware l3_csum_ok bit only,
+                * because hardware sets that bit to 0 for all packets
+                * with bad L3.
                 */
-               struct rte_flow_item_integrity local_item;
-
-               local_item.ipv4_csum_ok = !!is_ipv4;
-               MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
-                        local_item.ipv4_csum_ok);
-               if (value->l3_ok) {
+               if (is_ipv4) {
+                       if (value->l3_ok) {
+                               MLX5_SET(fte_match_set_lyr_2_4, headers_m,
+                                        l3_ok, 1);
+                               MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+                                        l3_ok, 1);
+                       }
+                       MLX5_SET(fte_match_set_lyr_2_4, headers_m,
+                                ipv4_checksum_ok, 1);
                        MLX5_SET(fte_match_set_lyr_2_4, headers_v,
-                                ipv4_checksum_ok, local_item.ipv4_csum_ok);
-                       MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok,
-                                mask->l3_ok);
+                                ipv4_checksum_ok, !!value->l3_ok);
+               } else {
+                       MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok, 1);
                        MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok,
                                 value->l3_ok);
-               } else {
-                       MLX5_SET(fte_match_set_lyr_2_4, headers_v,
-                                ipv4_checksum_ok, 0);
                }
-       } else if (mask->ipv4_csum_ok) {
-               MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
-                        mask->ipv4_csum_ok);
+       }
+       if (mask->ipv4_csum_ok) {
+               MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok, 1);
                MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
                         value->ipv4_csum_ok);
        }
 }
 
 static void
-flow_dv_translate_item_integrity(void *matcher, void *key,
-                                const struct rte_flow_item *head_item,
-                                const struct rte_flow_item *integrity_item)
+set_integrity_bits(void *headers_m, void *headers_v,
+                  const struct rte_flow_item *integrity_item, bool is_l3_ip4)
 {
+       const struct rte_flow_item_integrity *spec = integrity_item->spec;
        const struct rte_flow_item_integrity *mask = integrity_item->mask;
-       const struct rte_flow_item_integrity *value = integrity_item->spec;
-       const struct rte_flow_item *tunnel_item, *end_item, *item;
-       void *headers_m;
-       void *headers_v;
-       uint32_t l3_protocol;
 
-       if (!value)
-               return;
+       /* Integrity bits validation cleared spec pointer */
+       MLX5_ASSERT(spec != NULL);
        if (!mask)
                mask = &rte_flow_item_integrity_mask;
-       if (value->level > 1) {
+       flow_dv_translate_integrity_l3(mask, spec, headers_m, headers_v,
+                                      is_l3_ip4);
+       flow_dv_translate_integrity_l4(mask, spec, headers_m, headers_v);
+}
+
+static void
+flow_dv_translate_item_integrity_post(void *matcher, void *key,
+                                     const
+                                     struct rte_flow_item *integrity_items[2],
+                                     uint64_t pattern_flags)
+{
+       void *headers_m, *headers_v;
+       bool is_l3_ip4;
+
+       if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY) {
                headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
                                         inner_headers);
                headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
-       } else {
+               is_l3_ip4 = (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4) !=
+                           0;
+               set_integrity_bits(headers_m, headers_v,
+                                  integrity_items[1], is_l3_ip4);
+       }
+       if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY) {
                headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
                                         outer_headers);
                headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+               is_l3_ip4 = (pattern_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4) !=
+                           0;
+               set_integrity_bits(headers_m, headers_v,
+                                  integrity_items[0], is_l3_ip4);
        }
-       tunnel_item = mlx5_flow_find_tunnel_item(head_item);
-       if (value->level > 1) {
-               /* tunnel item was verified during the item validation */
-               item = tunnel_item;
-               end_item = mlx5_find_end_item(tunnel_item);
+}
+
+static void
+flow_dv_translate_item_integrity(const struct rte_flow_item *item,
+                                const struct rte_flow_item *integrity_items[2],
+                                uint64_t *last_item)
+{
+       const struct rte_flow_item_integrity *spec = (typeof(spec))item->spec;
+
+       /* integrity bits validation cleared spec pointer */
+       MLX5_ASSERT(spec != NULL);
+       if (spec->level > 1) {
+               integrity_items[1] = item;
+               *last_item |= MLX5_FLOW_ITEM_INNER_INTEGRITY;
        } else {
-               item = head_item;
-               end_item = tunnel_item ? tunnel_item :
-                          mlx5_find_end_item(integrity_item);
+               integrity_items[0] = item;
+               *last_item |= MLX5_FLOW_ITEM_OUTER_INTEGRITY;
        }
-       l3_protocol = mask->l3_ok ?
-                     mlx5_flow_locate_proto_l3(&item, end_item) : 0;
-       flow_dv_translate_integrity_l3(mask, value, headers_m, headers_v,
-                                      l3_protocol == RTE_ETHER_TYPE_IPV4);
-       flow_dv_translate_integrity_l4(mask, value, headers_m, headers_v);
 }
 
 /**
@@ -12529,7 +12706,8 @@ flow_dv_translate(struct rte_eth_dev *dev,
                        (1 << MLX5_SCALE_FLOW_GROUP_BIT),
                .std_tbl_fix = true,
        };
-       const struct rte_flow_item *head_item = items;
+       const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
+       const struct rte_flow_item *tunnel_item = NULL;
 
        if (!wks)
                return rte_flow_error_set(error, ENOMEM,
@@ -13299,10 +13477,9 @@ flow_dv_translate(struct rte_eth_dev *dev,
                                             MLX5_FLOW_LAYER_OUTER_L4_UDP;
                        break;
                case RTE_FLOW_ITEM_TYPE_GRE:
-                       flow_dv_translate_item_gre(match_mask, match_value,
-                                                  items, tunnel);
                        matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
                        last_item = MLX5_FLOW_LAYER_GRE;
+                       tunnel_item = items;
                        break;
                case RTE_FLOW_ITEM_TYPE_GRE_KEY:
                        flow_dv_translate_item_gre_key(match_mask,
@@ -13310,10 +13487,9 @@ flow_dv_translate(struct rte_eth_dev *dev,
                        last_item = MLX5_FLOW_LAYER_GRE_KEY;
                        break;
                case RTE_FLOW_ITEM_TYPE_NVGRE:
-                       flow_dv_translate_item_nvgre(match_mask, match_value,
-                                                    items, tunnel);
                        matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
                        last_item = MLX5_FLOW_LAYER_GRE;
+                       tunnel_item = items;
                        break;
                case RTE_FLOW_ITEM_TYPE_VXLAN:
                        flow_dv_translate_item_vxlan(dev, attr,
@@ -13323,17 +13499,14 @@ flow_dv_translate(struct rte_eth_dev *dev,
                        last_item = MLX5_FLOW_LAYER_VXLAN;
                        break;
                case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
-                       flow_dv_translate_item_vxlan_gpe(match_mask,
-                                                        match_value, items,
-                                                        tunnel);
                        matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
                        last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
+                       tunnel_item = items;
                        break;
                case RTE_FLOW_ITEM_TYPE_GENEVE:
-                       flow_dv_translate_item_geneve(match_mask, match_value,
-                                                     items, tunnel);
                        matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
                        last_item = MLX5_FLOW_LAYER_GENEVE;
+                       tunnel_item = items;
                        break;
                case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
                        ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
@@ -13422,14 +13595,20 @@ flow_dv_translate(struct rte_eth_dev *dev,
                        last_item = MLX5_FLOW_LAYER_ECPRI;
                        break;
                case RTE_FLOW_ITEM_TYPE_INTEGRITY:
-                       flow_dv_translate_item_integrity(match_mask,
-                                                        match_value,
-                                                        head_item, items);
+                       flow_dv_translate_item_integrity(items, integrity_items,
+                                                        &last_item);
                        break;
                case RTE_FLOW_ITEM_TYPE_CONNTRACK:
                        flow_dv_translate_item_aso_ct(dev, match_mask,
                                                      match_value, items);
                        break;
+               case RTE_FLOW_ITEM_TYPE_FLEX:
+                       flow_dv_translate_item_flex(dev, match_mask,
+                                                   match_value, items,
+                                                   dev_flow, tunnel != 0);
+                       last_item = tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
+                                   MLX5_FLOW_ITEM_OUTER_FLEX;
+                       break;
                default:
                        break;
                }
@@ -13448,6 +13627,27 @@ flow_dv_translate(struct rte_eth_dev *dev,
                                                   match_value, NULL, attr))
                        return -rte_errno;
        }
+       if (item_flags & MLX5_FLOW_ITEM_INTEGRITY) {
+               flow_dv_translate_item_integrity_post(match_mask, match_value,
+                                                     integrity_items,
+                                                     item_flags);
+       }
+       if (item_flags & MLX5_FLOW_LAYER_VXLAN_GPE)
+               flow_dv_translate_item_vxlan_gpe(match_mask, match_value,
+                                                tunnel_item, item_flags);
+       else if (item_flags & MLX5_FLOW_LAYER_GENEVE)
+               flow_dv_translate_item_geneve(match_mask, match_value,
+                                             tunnel_item, item_flags);
+       else if (item_flags & MLX5_FLOW_LAYER_GRE) {
+               if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE)
+                       flow_dv_translate_item_gre(match_mask, match_value,
+                                                  tunnel_item, item_flags);
+               else if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
+                       flow_dv_translate_item_nvgre(match_mask, match_value,
+                                                    tunnel_item, item_flags);
+               else
+                       MLX5_ASSERT(false);
+       }
 #ifdef RTE_LIBRTE_MLX5_DEBUG
        MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
                                              dev_flow->dv.value.buf));
@@ -13531,7 +13731,8 @@ flow_dv_translate(struct rte_eth_dev *dev,
        matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
                                    matcher.mask.size);
        matcher.priority = mlx5_get_matcher_priority(dev, attr,
-                                       matcher.priority);
+                                                    matcher.priority,
+                                                    dev_flow->external);
        /**
         * When creating meter drop flow in drop table, using original
         * 5-tuple match, the matcher priority should be lower than
@@ -14303,6 +14504,12 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
                if (!dev_handle)
                        return;
                flow->dev_handles = dev_handle->next.next;
+               while (dev_handle->flex_item) {
+                       int index = rte_bsf32(dev_handle->flex_item);
+
+                       mlx5_flex_release_index(dev, index);
+                       dev_handle->flex_item &= ~RTE_BIT32(index);
+               }
                if (dev_handle->dvh.matcher)
                        flow_dv_matcher_release(dev, dev_handle);
                if (dev_handle->dvh.rix_sample)
@@ -14418,9 +14625,9 @@ __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
        case MLX5_RSS_HASH_IPV4:
                if (rss_types & MLX5_IPV4_LAYER_TYPES) {
                        *hash_field &= ~MLX5_RSS_HASH_IPV4;
-                       if (rss_types & ETH_RSS_L3_DST_ONLY)
+                       if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
                                *hash_field |= IBV_RX_HASH_DST_IPV4;
-                       else if (rss_types & ETH_RSS_L3_SRC_ONLY)
+                       else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
                                *hash_field |= IBV_RX_HASH_SRC_IPV4;
                        else
                                *hash_field |= MLX5_RSS_HASH_IPV4;
@@ -14429,9 +14636,9 @@ __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
        case MLX5_RSS_HASH_IPV6:
                if (rss_types & MLX5_IPV6_LAYER_TYPES) {
                        *hash_field &= ~MLX5_RSS_HASH_IPV6;
-                       if (rss_types & ETH_RSS_L3_DST_ONLY)
+                       if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
                                *hash_field |= IBV_RX_HASH_DST_IPV6;
-                       else if (rss_types & ETH_RSS_L3_SRC_ONLY)
+                       else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
                                *hash_field |= IBV_RX_HASH_SRC_IPV6;
                        else
                                *hash_field |= MLX5_RSS_HASH_IPV6;
@@ -14440,11 +14647,11 @@ __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
        case MLX5_RSS_HASH_IPV4_UDP:
                /* fall-through. */
        case MLX5_RSS_HASH_IPV6_UDP:
-               if (rss_types & ETH_RSS_UDP) {
+               if (rss_types & RTE_ETH_RSS_UDP) {
                        *hash_field &= ~MLX5_UDP_IBV_RX_HASH;
-                       if (rss_types & ETH_RSS_L4_DST_ONLY)
+                       if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
                                *hash_field |= IBV_RX_HASH_DST_PORT_UDP;
-                       else if (rss_types & ETH_RSS_L4_SRC_ONLY)
+                       else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
                                *hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
                        else
                                *hash_field |= MLX5_UDP_IBV_RX_HASH;
@@ -14453,11 +14660,11 @@ __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
        case MLX5_RSS_HASH_IPV4_TCP:
                /* fall-through. */
        case MLX5_RSS_HASH_IPV6_TCP:
-               if (rss_types & ETH_RSS_TCP) {
+               if (rss_types & RTE_ETH_RSS_TCP) {
                        *hash_field &= ~MLX5_TCP_IBV_RX_HASH;
-                       if (rss_types & ETH_RSS_L4_DST_ONLY)
+                       if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
                                *hash_field |= IBV_RX_HASH_DST_PORT_TCP;
-                       else if (rss_types & ETH_RSS_L4_SRC_ONLY)
+                       else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
                                *hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
                        else
                                *hash_field |= MLX5_TCP_IBV_RX_HASH;
@@ -14496,7 +14703,8 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
        size_t i;
        int err;
 
-       if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) {
+       if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl,
+                                    !!dev->data->dev_started)) {
                return rte_flow_error_set(error, rte_errno,
                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
                                          "cannot setup indirection table");
@@ -14536,7 +14744,7 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
 error_hrxq_new:
        err = rte_errno;
        __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
-       if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true))
+       if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true, true))
                shared_rss->ind_tbl = NULL;
        rte_errno = err;
        return -rte_errno;
@@ -14605,8 +14813,8 @@ __flow_dv_action_rss_create(struct rte_eth_dev *dev,
        origin = &shared_rss->origin;
        origin->func = rss->func;
        origin->level = rss->level;
-       /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
-       origin->types = !rss->types ? ETH_RSS_IP : rss->types;
+       /* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */
+       origin->types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
        /* NULL RSS key indicates default RSS key. */
        rss_key = !rss->key ? rss_hash_default_key : rss->key;
        memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
@@ -14679,7 +14887,8 @@ __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
                                          NULL,
                                          "shared rss hrxq has references");
        queue = shared_rss->ind_tbl->queues;
-       remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
+       remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true,
+                                              !!dev->data->dev_started);
        if (remaining)
                return rte_flow_error_set(error, EBUSY,
                                          RTE_FLOW_ERROR_TYPE_ACTION,
@@ -14867,6 +15076,7 @@ __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
        void *queue = NULL;
        uint16_t *queue_old = NULL;
        uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
+       bool dev_started = !!dev->data->dev_started;
 
        if (!shared_rss)
                return rte_flow_error_set(error, EINVAL,
@@ -14889,7 +15099,10 @@ __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
        rte_spinlock_lock(&shared_rss->action_rss_sl);
        queue_old = shared_rss->ind_tbl->queues;
        ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
-                                       queue, action_conf->queue_num, true);
+                                       queue, action_conf->queue_num,
+                                       true /* standalone */,
+                                       dev_started /* ref_new_qs */,
+                                       dev_started /* deref_old_qs */);
        if (ret) {
                mlx5_free(queue);
                ret = rte_flow_error_set(error, rte_errno,
@@ -15622,7 +15835,7 @@ flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev,
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
-static int
+int
 flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data,
                    struct rte_flow_error *error)
 {
@@ -15660,6 +15873,48 @@ flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data,
                                  "counters are not available");
 }
 
+
+/**
+ * Query counter's action pointer for a DV flow rule via DevX.
+ *
+ * @param[in] dev
+ *   Pointer to Ethernet device.
+ * @param[in] cnt_idx
+ *   Index to the flow counter.
+ * @param[out] action_ptr
+ *   Action pointer for counter.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+flow_dv_query_count_ptr(struct rte_eth_dev *dev, uint32_t cnt_idx,
+       void **action_ptr, struct rte_flow_error *error)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+
+       if (!priv->sh->devx || !action_ptr)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                         NULL,
+                                         "counters are not supported");
+
+       if (cnt_idx) {
+               struct mlx5_flow_counter *cnt = NULL;
+               cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
+               if (cnt) {
+                       *action_ptr = cnt->action;
+                       return 0;
+               }
+       }
+       return rte_flow_error_set(error, EINVAL,
+                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                 NULL,
+                                 "counters are not available");
+}
+
 static int
 flow_dv_action_query(struct rte_eth_dev *dev,
                     const struct rte_flow_action_handle *handle, void *data,
@@ -17411,12 +17666,22 @@ static inline int
 flow_dv_mtr_policy_rss_compare(const struct rte_flow_action_rss *r1,
                               const struct rte_flow_action_rss *r2)
 {
-       if (!r1 || !r2)
+       if (r1 == NULL || r2 == NULL)
                return 0;
-       if (r1->func != r2->func || r1->level != r2->level ||
-           r1->types != r2->types || r1->key_len != r2->key_len ||
-           memcmp(r1->key, r2->key, r1->key_len))
+       if (!(r1->level <= 1 && r2->level <= 1) &&
+           !(r1->level > 1 && r2->level > 1))
                return 1;
+       if (r1->types != r2->types &&
+           !((r1->types == 0 || r1->types == RTE_ETH_RSS_IP) &&
+             (r2->types == 0 || r2->types == RTE_ETH_RSS_IP)))
+               return 1;
+       if (r1->key || r2->key) {
+               const void *key1 = r1->key ? r1->key : rss_hash_default_key;
+               const void *key2 = r2->key ? r2->key : rss_hash_default_key;
+
+               if (memcmp(key1, key2, MLX5_RSS_HASH_KEY_LEN))
+                       return 1;
+       }
        return 0;
 }
 
@@ -17568,6 +17833,8 @@ flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
                *policy_mode = MLX5_MTR_POLICY_MODE_OG;
        } else if (def_green && !def_yellow) {
                *policy_mode = MLX5_MTR_POLICY_MODE_OY;
+       } else {
+               *policy_mode = MLX5_MTR_POLICY_MODE_ALL;
        }
        /* Set to empty string in case of NULL pointer access by user. */
        flow_err.message = "";
@@ -17883,6 +18150,108 @@ flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
        return 0;
 }
 
+/**
+ * Discover the number of available flow priorities
+ * by trying to create a flow with the highest priority value
+ * for each possible number.
+ *
+ * @param[in] dev
+ *   Ethernet device.
+ * @param[in] vprio
+ *   List of possible number of available priorities.
+ * @param[in] vprio_n
+ *   Size of @p vprio array.
+ * @return
+ *   On success, number of available flow priorities.
+ *   On failure, a negative errno-style code and rte_errno is set.
+ */
+static int
+flow_dv_discover_priorities(struct rte_eth_dev *dev,
+                           const uint16_t *vprio, int vprio_n)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_indexed_pool *pool = priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW];
+       struct rte_flow_item_eth eth;
+       struct rte_flow_item item = {
+               .type = RTE_FLOW_ITEM_TYPE_ETH,
+               .spec = &eth,
+               .mask = &eth,
+       };
+       struct mlx5_flow_dv_matcher matcher = {
+               .mask = {
+                       .size = sizeof(matcher.mask.buf),
+               },
+       };
+       union mlx5_flow_tbl_key tbl_key;
+       struct mlx5_flow flow;
+       void *action;
+       struct rte_flow_error error;
+       uint8_t misc_mask;
+       int i, err, ret = -ENOTSUP;
+
+       /*
+        * Prepare a flow with a catch-all pattern and a drop action.
+        * Use drop queue, because shared drop action may be unavailable.
+        */
+       action = priv->drop_queue.hrxq->action;
+       if (action == NULL) {
+               DRV_LOG(ERR, "Priority discovery requires a drop action");
+               rte_errno = ENOTSUP;
+               return -rte_errno;
+       }
+       memset(&flow, 0, sizeof(flow));
+       flow.handle = mlx5_ipool_zmalloc(pool, &flow.handle_idx);
+       if (flow.handle == NULL) {
+               DRV_LOG(ERR, "Cannot create flow handle");
+               rte_errno = ENOMEM;
+               return -rte_errno;
+       }
+       flow.ingress = true;
+       flow.dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
+       flow.dv.actions[0] = action;
+       flow.dv.actions_n = 1;
+       memset(&eth, 0, sizeof(eth));
+       flow_dv_translate_item_eth(matcher.mask.buf, flow.dv.value.buf,
+                                  &item, /* inner */ false, /* group */ 0);
+       matcher.crc = rte_raw_cksum(matcher.mask.buf, matcher.mask.size);
+       for (i = 0; i < vprio_n; i++) {
+               /* Configure the next proposed maximum priority. */
+               matcher.priority = vprio[i] - 1;
+               memset(&tbl_key, 0, sizeof(tbl_key));
+               err = flow_dv_matcher_register(dev, &matcher, &tbl_key, &flow,
+                                              /* tunnel */ NULL,
+                                              /* group */ 0,
+                                              &error);
+               if (err != 0) {
+                       /* This action is pure SW and must always succeed. */
+                       DRV_LOG(ERR, "Cannot register matcher");
+                       ret = -rte_errno;
+                       break;
+               }
+               /* Try to apply the flow to HW. */
+               misc_mask = flow_dv_matcher_enable(flow.dv.value.buf);
+               __flow_dv_adjust_buf_size(&flow.dv.value.size, misc_mask);
+               err = mlx5_flow_os_create_flow
+                               (flow.handle->dvh.matcher->matcher_object,
+                                (void *)&flow.dv.value, flow.dv.actions_n,
+                                flow.dv.actions, &flow.handle->drv_flow);
+               if (err == 0) {
+                       claim_zero(mlx5_flow_os_destroy_flow
+                                               (flow.handle->drv_flow));
+                       flow.handle->drv_flow = NULL;
+               }
+               claim_zero(flow_dv_matcher_release(dev, flow.handle));
+               if (err != 0)
+                       break;
+               ret = vprio[i];
+       }
+       mlx5_ipool_free(pool, flow.handle_idx);
+       /* Set rte_errno if no expected priority value matched. */
+       if (ret < 0)
+               rte_errno = -ret;
+       return ret;
+}
+
 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
        .validate = flow_dv_validate,
        .prepare = flow_dv_prepare,
@@ -17916,6 +18285,9 @@ const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
        .action_update = flow_dv_action_update,
        .action_query = flow_dv_action_query,
        .sync_domain = flow_dv_sync_domain,
+       .discover_priorities = flow_dv_discover_priorities,
+       .item_create = flow_dv_item_create,
+       .item_release = flow_dv_item_release,
 };
 
 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */