net/mlx5: fix GRE flow item matching
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
index 38110cc..1a9c040 100644 (file)
@@ -93,6 +93,20 @@ static int
 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
                                  uint32_t rix_jump);
 
+static inline uint16_t
+mlx5_translate_tunnel_etypes(uint64_t pattern_flags)
+{
+       if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
+               return RTE_ETHER_TYPE_TEB;
+       else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
+               return RTE_ETHER_TYPE_IPV4;
+       else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
+               return RTE_ETHER_TYPE_IPV6;
+       else if (pattern_flags & MLX5_FLOW_LAYER_MPLS)
+               return RTE_ETHER_TYPE_MPLS;
+       return 0;
+}
+
 static int16_t
 flow_dv_get_esw_manager_vport_id(struct rte_eth_dev *dev)
 {
@@ -1470,11 +1484,11 @@ mlx5_flow_field_id_to_modify_info
                                info[idx] = (struct field_modify_info){2, 4,
                                                MLX5_MODI_OUT_DMAC_15_0};
                                if (width < 16) {
-                                       mask[idx] = rte_cpu_to_be_16(0xffff >>
+                                       mask[1] = rte_cpu_to_be_16(0xffff >>
                                                                 (16 - width));
                                        width = 0;
                                } else {
-                                       mask[idx] = RTE_BE16(0xffff);
+                                       mask[1] = RTE_BE16(0xffff);
                                        width -= 16;
                                }
                                if (!width)
@@ -1483,11 +1497,11 @@ mlx5_flow_field_id_to_modify_info
                        }
                        info[idx] = (struct field_modify_info){4, 0,
                                                MLX5_MODI_OUT_DMAC_47_16};
-                       mask[idx] = rte_cpu_to_be_32((0xffffffff >>
-                                                     (32 - width)) << off);
+                       mask[0] = rte_cpu_to_be_32((0xffffffff >>
+                                                   (32 - width)) << off);
                } else {
                        if (data->offset < 16)
-                               info[idx++] = (struct field_modify_info){2, 4,
+                               info[idx++] = (struct field_modify_info){2, 0,
                                                MLX5_MODI_OUT_DMAC_15_0};
                        info[idx] = (struct field_modify_info){4, 0,
                                                MLX5_MODI_OUT_DMAC_47_16};
@@ -1500,11 +1514,11 @@ mlx5_flow_field_id_to_modify_info
                                info[idx] = (struct field_modify_info){2, 4,
                                                MLX5_MODI_OUT_SMAC_15_0};
                                if (width < 16) {
-                                       mask[idx] = rte_cpu_to_be_16(0xffff >>
+                                       mask[1] = rte_cpu_to_be_16(0xffff >>
                                                                 (16 - width));
                                        width = 0;
                                } else {
-                                       mask[idx] = RTE_BE16(0xffff);
+                                       mask[1] = RTE_BE16(0xffff);
                                        width -= 16;
                                }
                                if (!width)
@@ -1513,11 +1527,11 @@ mlx5_flow_field_id_to_modify_info
                        }
                        info[idx] = (struct field_modify_info){4, 0,
                                                MLX5_MODI_OUT_SMAC_47_16};
-                       mask[idx] = rte_cpu_to_be_32((0xffffffff >>
-                                                     (32 - width)) << off);
+                       mask[0] = rte_cpu_to_be_32((0xffffffff >>
+                                                   (32 - width)) << off);
                } else {
                        if (data->offset < 16)
-                               info[idx++] = (struct field_modify_info){2, 4,
+                               info[idx++] = (struct field_modify_info){2, 0,
                                                MLX5_MODI_OUT_SMAC_15_0};
                        info[idx] = (struct field_modify_info){4, 0,
                                                MLX5_MODI_OUT_SMAC_47_16};
@@ -1582,12 +1596,12 @@ mlx5_flow_field_id_to_modify_info
                                info[idx] = (struct field_modify_info){4, 12,
                                                MLX5_MODI_OUT_SIPV6_31_0};
                                if (width < 32) {
-                                       mask[idx] =
+                                       mask[3] =
                                                rte_cpu_to_be_32(0xffffffff >>
                                                                 (32 - width));
                                        width = 0;
                                } else {
-                                       mask[idx] = RTE_BE32(0xffffffff);
+                                       mask[3] = RTE_BE32(0xffffffff);
                                        width -= 32;
                                }
                                if (!width)
@@ -1598,12 +1612,12 @@ mlx5_flow_field_id_to_modify_info
                                info[idx] = (struct field_modify_info){4, 8,
                                                MLX5_MODI_OUT_SIPV6_63_32};
                                if (width < 32) {
-                                       mask[idx] =
+                                       mask[2] =
                                                rte_cpu_to_be_32(0xffffffff >>
                                                                 (32 - width));
                                        width = 0;
                                } else {
-                                       mask[idx] = RTE_BE32(0xffffffff);
+                                       mask[2] = RTE_BE32(0xffffffff);
                                        width -= 32;
                                }
                                if (!width)
@@ -1614,12 +1628,12 @@ mlx5_flow_field_id_to_modify_info
                                info[idx] = (struct field_modify_info){4, 4,
                                                MLX5_MODI_OUT_SIPV6_95_64};
                                if (width < 32) {
-                                       mask[idx] =
+                                       mask[1] =
                                                rte_cpu_to_be_32(0xffffffff >>
                                                                 (32 - width));
                                        width = 0;
                                } else {
-                                       mask[idx] = RTE_BE32(0xffffffff);
+                                       mask[1] = RTE_BE32(0xffffffff);
                                        width -= 32;
                                }
                                if (!width)
@@ -1628,17 +1642,16 @@ mlx5_flow_field_id_to_modify_info
                        }
                        info[idx] = (struct field_modify_info){4, 0,
                                                MLX5_MODI_OUT_SIPV6_127_96};
-                       mask[idx] = rte_cpu_to_be_32(0xffffffff >>
-                                                    (32 - width));
+                       mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
                } else {
                        if (data->offset < 32)
-                               info[idx++] = (struct field_modify_info){4, 12,
+                               info[idx++] = (struct field_modify_info){4, 0,
                                                MLX5_MODI_OUT_SIPV6_31_0};
                        if (data->offset < 64)
-                               info[idx++] = (struct field_modify_info){4, 8,
+                               info[idx++] = (struct field_modify_info){4, 0,
                                                MLX5_MODI_OUT_SIPV6_63_32};
                        if (data->offset < 96)
-                               info[idx++] = (struct field_modify_info){4, 4,
+                               info[idx++] = (struct field_modify_info){4, 0,
                                                MLX5_MODI_OUT_SIPV6_95_64};
                        if (data->offset < 128)
                                info[idx++] = (struct field_modify_info){4, 0,
@@ -1651,12 +1664,12 @@ mlx5_flow_field_id_to_modify_info
                                info[idx] = (struct field_modify_info){4, 12,
                                                MLX5_MODI_OUT_DIPV6_31_0};
                                if (width < 32) {
-                                       mask[idx] =
+                                       mask[3] =
                                                rte_cpu_to_be_32(0xffffffff >>
                                                                 (32 - width));
                                        width = 0;
                                } else {
-                                       mask[idx] = RTE_BE32(0xffffffff);
+                                       mask[3] = RTE_BE32(0xffffffff);
                                        width -= 32;
                                }
                                if (!width)
@@ -1667,12 +1680,12 @@ mlx5_flow_field_id_to_modify_info
                                info[idx] = (struct field_modify_info){4, 8,
                                                MLX5_MODI_OUT_DIPV6_63_32};
                                if (width < 32) {
-                                       mask[idx] =
+                                       mask[2] =
                                                rte_cpu_to_be_32(0xffffffff >>
                                                                 (32 - width));
                                        width = 0;
                                } else {
-                                       mask[idx] = RTE_BE32(0xffffffff);
+                                       mask[2] = RTE_BE32(0xffffffff);
                                        width -= 32;
                                }
                                if (!width)
@@ -1683,12 +1696,12 @@ mlx5_flow_field_id_to_modify_info
                                info[idx] = (struct field_modify_info){4, 4,
                                                MLX5_MODI_OUT_DIPV6_95_64};
                                if (width < 32) {
-                                       mask[idx] =
+                                       mask[1] =
                                                rte_cpu_to_be_32(0xffffffff >>
                                                                 (32 - width));
                                        width = 0;
                                } else {
-                                       mask[idx] = RTE_BE32(0xffffffff);
+                                       mask[1] = RTE_BE32(0xffffffff);
                                        width -= 32;
                                }
                                if (!width)
@@ -1697,17 +1710,16 @@ mlx5_flow_field_id_to_modify_info
                        }
                        info[idx] = (struct field_modify_info){4, 0,
                                                MLX5_MODI_OUT_DIPV6_127_96};
-                       mask[idx] = rte_cpu_to_be_32(0xffffffff >>
-                                                    (32 - width));
+                       mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
                } else {
                        if (data->offset < 32)
-                               info[idx++] = (struct field_modify_info){4, 12,
+                               info[idx++] = (struct field_modify_info){4, 0,
                                                MLX5_MODI_OUT_DIPV6_31_0};
                        if (data->offset < 64)
-                               info[idx++] = (struct field_modify_info){4, 8,
+                               info[idx++] = (struct field_modify_info){4, 0,
                                                MLX5_MODI_OUT_DIPV6_63_32};
                        if (data->offset < 96)
-                               info[idx++] = (struct field_modify_info){4, 4,
+                               info[idx++] = (struct field_modify_info){4, 0,
                                                MLX5_MODI_OUT_DIPV6_95_64};
                        if (data->offset < 128)
                                info[idx++] = (struct field_modify_info){4, 0,
@@ -5121,6 +5133,8 @@ flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
  *   Pointer to rte_eth_dev structure.
  * @param[in] action_flags
  *   Bit-fields that holds the actions detected until now.
+ * @param[in] item_flags
+ *   Holds the items detected.
  * @param[in] action
  *   Pointer to the meter action.
  * @param[in] attr
@@ -5135,7 +5149,7 @@ flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
  */
 static int
 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
-                               uint64_t action_flags,
+                               uint64_t action_flags, uint64_t item_flags,
                                const struct rte_flow_action *action,
                                const struct rte_flow_attr *attr,
                                const struct rte_flow_item *port_id_item,
@@ -5239,6 +5253,35 @@ mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
                                                NULL,
                                                "Flow and meter policy "
                                                "have different src port.");
+               } else if (mtr_policy->is_rss) {
+                       struct mlx5_flow_meter_policy *fp;
+                       struct mlx5_meter_policy_action_container *acg;
+                       struct mlx5_meter_policy_action_container *acy;
+                       const struct rte_flow_action *rss_act;
+                       int ret;
+
+                       fp = mlx5_flow_meter_hierarchy_get_final_policy(dev,
+                                                               mtr_policy);
+                       if (fp == NULL)
+                               return rte_flow_error_set(error, EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+                                                 "Unable to get the final "
+                                                 "policy in the hierarchy");
+                       acg = &fp->act_cnt[RTE_COLOR_GREEN];
+                       acy = &fp->act_cnt[RTE_COLOR_YELLOW];
+                       MLX5_ASSERT(acg->fate_action ==
+                                   MLX5_FLOW_FATE_SHARED_RSS ||
+                                   acy->fate_action ==
+                                   MLX5_FLOW_FATE_SHARED_RSS);
+                       if (acg->fate_action == MLX5_FLOW_FATE_SHARED_RSS)
+                               rss_act = acg->rss;
+                       else
+                               rss_act = acy->rss;
+                       ret = mlx5_flow_validate_action_rss(rss_act,
+                                       action_flags, dev, attr,
+                                       item_flags, error);
+                       if (ret)
+                               return ret;
                }
                *def_policy = false;
        }
@@ -5574,6 +5617,10 @@ flow_dv_validate_action_sample(uint64_t *action_flags,
                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
                                          "wrong action order, jump should "
                                          "be after sample action");
+       if (*action_flags & MLX5_FLOW_ACTION_CT)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
+                                         "Sample after CT not supported");
        act = sample->actions;
        for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
                if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
@@ -6687,6 +6734,88 @@ flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
        return 0;
 }
 
+static int
+flow_dv_validate_item_flex(struct rte_eth_dev *dev,
+                          const struct rte_flow_item *item,
+                          uint64_t item_flags,
+                          uint64_t *last_item,
+                          bool is_inner,
+                          struct rte_flow_error *error)
+{
+       const struct rte_flow_item_flex *flow_spec = item->spec;
+       const struct rte_flow_item_flex *flow_mask = item->mask;
+       struct mlx5_flex_item *flex;
+
+       if (!flow_spec)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+                                         "flex flow item spec cannot be NULL");
+       if (!flow_mask)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+                                         "flex flow item mask cannot be NULL");
+       if (item->last)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+                                         "flex flow item last not supported");
+       if (mlx5_flex_acquire_index(dev, flow_spec->handle, false) < 0)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+                                         "invalid flex flow item handle");
+       flex = (struct mlx5_flex_item *)flow_spec->handle;
+       switch (flex->tunnel_mode) {
+       case FLEX_TUNNEL_MODE_SINGLE:
+               if (item_flags &
+                   (MLX5_FLOW_ITEM_OUTER_FLEX | MLX5_FLOW_ITEM_INNER_FLEX))
+                       rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_ITEM,
+                                          NULL, "multiple flex items not supported");
+               break;
+       case FLEX_TUNNEL_MODE_OUTER:
+               if (is_inner)
+                       rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_ITEM,
+                                          NULL, "inner flex item was not configured");
+               if (item_flags & MLX5_FLOW_ITEM_OUTER_FLEX)
+                       rte_flow_error_set(error, ENOTSUP,
+                                          RTE_FLOW_ERROR_TYPE_ITEM,
+                                          NULL, "multiple flex items not supported");
+               break;
+       case FLEX_TUNNEL_MODE_INNER:
+               if (!is_inner)
+                       rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_ITEM,
+                                          NULL, "outer flex item was not configured");
+               if (item_flags & MLX5_FLOW_ITEM_INNER_FLEX)
+                       rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_ITEM,
+                                          NULL, "multiple flex items not supported");
+               break;
+       case FLEX_TUNNEL_MODE_MULTI:
+               if ((is_inner && (item_flags & MLX5_FLOW_ITEM_INNER_FLEX)) ||
+                   (!is_inner && (item_flags & MLX5_FLOW_ITEM_OUTER_FLEX))) {
+                       rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_ITEM,
+                                          NULL, "multiple flex items not supported");
+               }
+               break;
+       case FLEX_TUNNEL_MODE_TUNNEL:
+               if (is_inner || (item_flags & MLX5_FLOW_ITEM_FLEX_TUNNEL))
+                       rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_ITEM,
+                                          NULL, "multiple flex tunnel items not supported");
+               break;
+       default:
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ITEM,
+                                  NULL, "invalid flex item configuration");
+       }
+       *last_item = flex->tunnel_mode == FLEX_TUNNEL_MODE_TUNNEL ?
+                    MLX5_FLOW_ITEM_FLEX_TUNNEL : is_inner ?
+                    MLX5_FLOW_ITEM_INNER_FLEX : MLX5_FLOW_ITEM_OUTER_FLEX;
+       return 0;
+}
+
 /**
  * Internal validation function. For validating both actions and items.
  *
@@ -6789,6 +6918,11 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
        tunnel = is_tunnel_offload_active(dev) ?
                 mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
        if (tunnel) {
+               if (!priv->config.dv_flow_en)
+                       return rte_flow_error_set
+                               (error, ENOTSUP,
+                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                NULL, "tunnel offload requires DV flow interface");
                if (priv->representor)
                        return rte_flow_error_set
                                (error, ENOTSUP,
@@ -7125,6 +7259,13 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                         * list it here as a supported type
                         */
                        break;
+               case RTE_FLOW_ITEM_TYPE_FLEX:
+                       ret = flow_dv_validate_item_flex(dev, items, item_flags,
+                                                        &last_item,
+                                                        tunnel != 0, error);
+                       if (ret < 0)
+                               return ret;
+                       break;
                default:
                        return rte_flow_error_set(error, ENOTSUP,
                                                  RTE_FLOW_ERROR_TYPE_ITEM,
@@ -7574,6 +7715,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                case RTE_FLOW_ACTION_TYPE_METER:
                        ret = mlx5_flow_validate_action_meter(dev,
                                                              action_flags,
+                                                             item_flags,
                                                              actions, attr,
                                                              port_id_item,
                                                              &def_policy,
@@ -8620,18 +8762,19 @@ flow_dv_translate_item_gre_key(void *matcher, void *key,
  *   Flow matcher value.
  * @param[in] item
  *   Flow pattern to translate.
- * @param[in] inner
- *   Item is inner pattern.
+ * @param[in] pattern_flags
+ *   Accumulated pattern flags.
  */
 static void
 flow_dv_translate_item_gre(void *matcher, void *key,
                           const struct rte_flow_item *item,
-                          int inner)
+                          uint64_t pattern_flags)
 {
+       static const struct rte_flow_item_gre empty_gre = {0,};
        const struct rte_flow_item_gre *gre_m = item->mask;
        const struct rte_flow_item_gre *gre_v = item->spec;
-       void *headers_m;
-       void *headers_v;
+       void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
+       void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
        void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
        void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
        struct {
@@ -8648,26 +8791,17 @@ flow_dv_translate_item_gre(void *matcher, void *key,
                        uint16_t value;
                };
        } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
+       uint16_t protocol_m, protocol_v;
 
-       if (inner) {
-               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
-                                        inner_headers);
-               headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
-       } else {
-               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
-                                        outer_headers);
-               headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
-       }
        MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
        MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
-       if (!gre_v)
-               return;
-       if (!gre_m)
-               gre_m = &rte_flow_item_gre_mask;
-       MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
-                rte_be_to_cpu_16(gre_m->protocol));
-       MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
-                rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
+       if (!gre_v) {
+               gre_v = &empty_gre;
+               gre_m = &empty_gre;
+       } else {
+               if (!gre_m)
+                       gre_m = &rte_flow_item_gre_mask;
+       }
        gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
        gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
        MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
@@ -8685,6 +8819,17 @@ flow_dv_translate_item_gre(void *matcher, void *key,
        MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
                 gre_crks_rsvd0_ver_v.s_present &
                 gre_crks_rsvd0_ver_m.s_present);
+       protocol_m = rte_be_to_cpu_16(gre_m->protocol);
+       protocol_v = rte_be_to_cpu_16(gre_v->protocol);
+       if (!protocol_m) {
+               /* Force next protocol to prevent matchers duplication */
+               protocol_v = mlx5_translate_tunnel_etypes(pattern_flags);
+               if (protocol_v)
+                       protocol_m = 0xFFFF;
+       }
+       MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, protocol_m);
+       MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
+                protocol_m & protocol_v);
 }
 
 /**
@@ -8696,13 +8841,13 @@ flow_dv_translate_item_gre(void *matcher, void *key,
  *   Flow matcher value.
  * @param[in] item
  *   Flow pattern to translate.
- * @param[in] inner
- *   Item is inner pattern.
+ * @param[in] pattern_flags
+ *   Accumulated pattern flags.
  */
 static void
 flow_dv_translate_item_nvgre(void *matcher, void *key,
                             const struct rte_flow_item *item,
-                            int inner)
+                            unsigned long pattern_flags)
 {
        const struct rte_flow_item_nvgre *nvgre_m = item->mask;
        const struct rte_flow_item_nvgre *nvgre_v = item->spec;
@@ -8729,7 +8874,7 @@ flow_dv_translate_item_nvgre(void *matcher, void *key,
                .mask = &gre_mask,
                .last = NULL,
        };
-       flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
+       flow_dv_translate_item_gre(matcher, key, &gre_item, pattern_flags);
        if (!nvgre_v)
                return;
        if (!nvgre_m)
@@ -8866,46 +9011,40 @@ flow_dv_translate_item_vxlan(struct rte_eth_dev *dev,
 
 static void
 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
-                                const struct rte_flow_item *item, int inner)
+                                const struct rte_flow_item *item,
+                                const uint64_t pattern_flags)
 {
+       static const struct rte_flow_item_vxlan_gpe dummy_vxlan_gpe_hdr = {0, };
        const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
        const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
-       void *headers_m;
-       void *headers_v;
+       /* The item was validated to be on the outer side */
+       void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
+       void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
        void *misc_m =
                MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
        void *misc_v =
                MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
-       char *vni_m;
-       char *vni_v;
-       uint16_t dport;
-       int size;
-       int i;
+       char *vni_m =
+               MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
+       char *vni_v =
+               MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
+       int i, size = sizeof(vxlan_m->vni);
        uint8_t flags_m = 0xff;
        uint8_t flags_v = 0xc;
+       uint8_t m_protocol, v_protocol;
 
-       if (inner) {
-               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
-                                        inner_headers);
-               headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
-       } else {
-               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
-                                        outer_headers);
-               headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
-       }
-       dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
-               MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
        if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
                MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
-               MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
+               MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
+                        MLX5_UDP_PORT_VXLAN_GPE);
+       }
+       if (!vxlan_v) {
+               vxlan_v = &dummy_vxlan_gpe_hdr;
+               vxlan_m = &dummy_vxlan_gpe_hdr;
+       } else {
+               if (!vxlan_m)
+                       vxlan_m = &rte_flow_item_vxlan_gpe_mask;
        }
-       if (!vxlan_v)
-               return;
-       if (!vxlan_m)
-               vxlan_m = &rte_flow_item_vxlan_gpe_mask;
-       size = sizeof(vxlan_m->vni);
-       vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
-       vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
        memcpy(vni_m, vxlan_m->vni, size);
        for (i = 0; i < size; ++i)
                vni_v[i] = vni_m[i] & vxlan_v->vni[i];
@@ -8915,10 +9054,22 @@ flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
        }
        MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
        MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
-       MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
-                vxlan_m->protocol);
-       MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
-                vxlan_v->protocol);
+       m_protocol = vxlan_m->protocol;
+       v_protocol = vxlan_v->protocol;
+       if (!m_protocol) {
+               m_protocol = 0xff;
+               /* Force next protocol to ensure next headers parsing. */
+               if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
+                       v_protocol = RTE_VXLAN_GPE_TYPE_ETH;
+               else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
+                       v_protocol = RTE_VXLAN_GPE_TYPE_IPV4;
+               else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
+                       v_protocol = RTE_VXLAN_GPE_TYPE_IPV6;
+       }
+       MLX5_SET(fte_match_set_misc3, misc_m,
+                outer_vxlan_gpe_next_protocol, m_protocol);
+       MLX5_SET(fte_match_set_misc3, misc_v,
+                outer_vxlan_gpe_next_protocol, m_protocol & v_protocol);
 }
 
 /**
@@ -8936,49 +9087,39 @@ flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
 
 static void
 flow_dv_translate_item_geneve(void *matcher, void *key,
-                             const struct rte_flow_item *item, int inner)
+                             const struct rte_flow_item *item,
+                             uint64_t pattern_flags)
 {
+       static const struct rte_flow_item_geneve empty_geneve = {0,};
        const struct rte_flow_item_geneve *geneve_m = item->mask;
        const struct rte_flow_item_geneve *geneve_v = item->spec;
-       void *headers_m;
-       void *headers_v;
+       /* GENEVE flow item validation allows single tunnel item */
+       void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
+       void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
        void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
        void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
-       uint16_t dport;
        uint16_t gbhdr_m;
        uint16_t gbhdr_v;
-       char *vni_m;
-       char *vni_v;
-       size_t size, i;
+       char *vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
+       char *vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
+       size_t size = sizeof(geneve_m->vni), i;
+       uint16_t protocol_m, protocol_v;
 
-       if (inner) {
-               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
-                                        inner_headers);
-               headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
-       } else {
-               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
-                                        outer_headers);
-               headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
-       }
-       dport = MLX5_UDP_PORT_GENEVE;
        if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
                MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
-               MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
+               MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
+                        MLX5_UDP_PORT_GENEVE);
+       }
+       if (!geneve_v) {
+               geneve_v = &empty_geneve;
+               geneve_m = &empty_geneve;
+       } else {
+               if (!geneve_m)
+                       geneve_m = &rte_flow_item_geneve_mask;
        }
-       if (!geneve_v)
-               return;
-       if (!geneve_m)
-               geneve_m = &rte_flow_item_geneve_mask;
-       size = sizeof(geneve_m->vni);
-       vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
-       vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
        memcpy(vni_m, geneve_m->vni, size);
        for (i = 0; i < size; ++i)
                vni_v[i] = vni_m[i] & geneve_v->vni[i];
-       MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
-                rte_be_to_cpu_16(geneve_m->protocol));
-       MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
-                rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
        gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
        gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
        MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
@@ -8990,6 +9131,16 @@ flow_dv_translate_item_geneve(void *matcher, void *key,
        MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
                 MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
                 MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
+       protocol_m = rte_be_to_cpu_16(geneve_m->protocol);
+       protocol_v = rte_be_to_cpu_16(geneve_v->protocol);
+       if (!protocol_m) {
+               /* Force next protocol to prevent matchers duplication */
+               protocol_m = 0xFFFF;
+               protocol_v = mlx5_translate_tunnel_etypes(pattern_flags);
+       }
+       MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type, protocol_m);
+       MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
+                protocol_m & protocol_v);
 }
 
 /**
@@ -9186,16 +9337,22 @@ flow_dv_translate_item_mpls(void *matcher, void *key,
 
        switch (prev_layer) {
        case MLX5_FLOW_LAYER_OUTER_L4_UDP:
-               MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
-               MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
-                        MLX5_UDP_PORT_MPLS);
+               if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
+                       MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
+                                0xffff);
+                       MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
+                                MLX5_UDP_PORT_MPLS);
+               }
                break;
        case MLX5_FLOW_LAYER_GRE:
                /* Fall-through. */
        case MLX5_FLOW_LAYER_GRE_KEY:
-               MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
-               MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
-                        RTE_ETHER_TYPE_MPLS);
+               if (!MLX5_GET16(fte_match_set_misc, misc_v, gre_protocol)) {
+                       MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
+                                0xffff);
+                       MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
+                                RTE_ETHER_TYPE_MPLS);
+               }
                break;
        default:
                break;
@@ -9995,6 +10152,27 @@ flow_dv_translate_item_aso_ct(struct rte_eth_dev *dev,
                               reg_value, reg_mask);
 }
 
+static void
+flow_dv_translate_item_flex(struct rte_eth_dev *dev, void *matcher, void *key,
+                           const struct rte_flow_item *item,
+                           struct mlx5_flow *dev_flow, bool is_inner)
+{
+       const struct rte_flow_item_flex *spec =
+               (const struct rte_flow_item_flex *)item->spec;
+       int index = mlx5_flex_acquire_index(dev, spec->handle, false);
+
+       MLX5_ASSERT(index >= 0 && index <= (int)(sizeof(uint32_t) * CHAR_BIT));
+       if (index < 0)
+               return;
+       if (!(dev_flow->handle->flex_item & RTE_BIT32(index))) {
+               /* Don't count both inner and outer flex items in one rule. */
+               if (mlx5_flex_acquire_index(dev, spec->handle, true) != index)
+                       MLX5_ASSERT(false);
+               dev_flow->handle->flex_item |= RTE_BIT32(index);
+       }
+       mlx5_flex_flow_translate_item(dev, matcher, key, item, is_inner);
+}
+
 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
 
 #define HEADER_IS_ZERO(match_criteria, headers)                                     \
@@ -11989,34 +12167,24 @@ flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask,
                               void *headers_m, void *headers_v)
 {
        if (mask->l4_ok) {
-               /* application l4_ok filter aggregates all hardware l4 filters
-                * therefore hw l4_checksum_ok must be implicitly added here.
+               /* RTE l4_ok filter aggregates hardware l4_ok and
+                * l4_checksum_ok filters.
+                * Positive RTE l4_ok match requires hardware match on both L4
+                * hardware integrity bits.
+                * For negative match, check hardware l4_checksum_ok bit only,
+                * because hardware sets that bit to 0 for all packets
+                * with bad L4.
                 */
-               struct rte_flow_item_integrity local_item;
-
-               local_item.l4_csum_ok = 1;
-               MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
-                        local_item.l4_csum_ok);
                if (value->l4_ok) {
-                       /* application l4_ok = 1 matches sets both hw flags
-                        * l4_ok and l4_checksum_ok flags to 1.
-                        */
-                       MLX5_SET(fte_match_set_lyr_2_4, headers_v,
-                                l4_checksum_ok, local_item.l4_csum_ok);
-                       MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok,
-                                mask->l4_ok);
-                       MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok,
-                                value->l4_ok);
-               } else {
-                       /* application l4_ok = 0 matches on hw flag
-                        * l4_checksum_ok = 0 only.
-                        */
-                       MLX5_SET(fte_match_set_lyr_2_4, headers_v,
-                                l4_checksum_ok, 0);
+                       MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok, 1);
+                       MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok, 1);
                }
-       } else if (mask->l4_csum_ok) {
-               MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
-                        mask->l4_csum_ok);
+               MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, 1);
+               MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
+                        !!value->l4_ok);
+       }
+       if (mask->l4_csum_ok) {
+               MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, 1);
                MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
                         value->l4_csum_ok);
        }
@@ -12028,28 +12196,33 @@ flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask,
                               void *headers_m, void *headers_v, bool is_ipv4)
 {
        if (mask->l3_ok) {
-               /* application l3_ok filter aggregates all hardware l3 filters
-                * therefore hw ipv4_checksum_ok must be implicitly added here.
+               /* RTE l3_ok filter aggregates for IPv4 hardware l3_ok and
+                * ipv4_csum_ok filters.
+                * Positive RTE l3_ok match requires hardware match on both L3
+                * hardware integrity bits.
+                * For negative match, check hardware l3_csum_ok bit only,
+                * because hardware sets that bit to 0 for all packets
+                * with bad L3.
                 */
-               struct rte_flow_item_integrity local_item;
-
-               local_item.ipv4_csum_ok = !!is_ipv4;
-               MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
-                        local_item.ipv4_csum_ok);
-               if (value->l3_ok) {
+               if (is_ipv4) {
+                       if (value->l3_ok) {
+                               MLX5_SET(fte_match_set_lyr_2_4, headers_m,
+                                        l3_ok, 1);
+                               MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+                                        l3_ok, 1);
+                       }
+                       MLX5_SET(fte_match_set_lyr_2_4, headers_m,
+                                ipv4_checksum_ok, 1);
                        MLX5_SET(fte_match_set_lyr_2_4, headers_v,
-                                ipv4_checksum_ok, local_item.ipv4_csum_ok);
-                       MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok,
-                                mask->l3_ok);
+                                ipv4_checksum_ok, !!value->l3_ok);
+               } else {
+                       MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok, 1);
                        MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok,
                                 value->l3_ok);
-               } else {
-                       MLX5_SET(fte_match_set_lyr_2_4, headers_v,
-                                ipv4_checksum_ok, 0);
                }
-       } else if (mask->ipv4_csum_ok) {
-               MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
-                        mask->ipv4_csum_ok);
+       }
+       if (mask->ipv4_csum_ok) {
+               MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok, 1);
                MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
                         value->ipv4_csum_ok);
        }
@@ -12532,6 +12705,7 @@ flow_dv_translate(struct rte_eth_dev *dev,
                .std_tbl_fix = true,
        };
        const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
+       const struct rte_flow_item *tunnel_item = NULL;
 
        if (!wks)
                return rte_flow_error_set(error, ENOMEM,
@@ -13301,10 +13475,9 @@ flow_dv_translate(struct rte_eth_dev *dev,
                                             MLX5_FLOW_LAYER_OUTER_L4_UDP;
                        break;
                case RTE_FLOW_ITEM_TYPE_GRE:
-                       flow_dv_translate_item_gre(match_mask, match_value,
-                                                  items, tunnel);
                        matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
                        last_item = MLX5_FLOW_LAYER_GRE;
+                       tunnel_item = items;
                        break;
                case RTE_FLOW_ITEM_TYPE_GRE_KEY:
                        flow_dv_translate_item_gre_key(match_mask,
@@ -13312,10 +13485,9 @@ flow_dv_translate(struct rte_eth_dev *dev,
                        last_item = MLX5_FLOW_LAYER_GRE_KEY;
                        break;
                case RTE_FLOW_ITEM_TYPE_NVGRE:
-                       flow_dv_translate_item_nvgre(match_mask, match_value,
-                                                    items, tunnel);
                        matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
                        last_item = MLX5_FLOW_LAYER_GRE;
+                       tunnel_item = items;
                        break;
                case RTE_FLOW_ITEM_TYPE_VXLAN:
                        flow_dv_translate_item_vxlan(dev, attr,
@@ -13325,17 +13497,14 @@ flow_dv_translate(struct rte_eth_dev *dev,
                        last_item = MLX5_FLOW_LAYER_VXLAN;
                        break;
                case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
-                       flow_dv_translate_item_vxlan_gpe(match_mask,
-                                                        match_value, items,
-                                                        tunnel);
                        matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
                        last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
+                       tunnel_item = items;
                        break;
                case RTE_FLOW_ITEM_TYPE_GENEVE:
-                       flow_dv_translate_item_geneve(match_mask, match_value,
-                                                     items, tunnel);
                        matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
                        last_item = MLX5_FLOW_LAYER_GENEVE;
+                       tunnel_item = items;
                        break;
                case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
                        ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
@@ -13431,6 +13600,13 @@ flow_dv_translate(struct rte_eth_dev *dev,
                        flow_dv_translate_item_aso_ct(dev, match_mask,
                                                      match_value, items);
                        break;
+               case RTE_FLOW_ITEM_TYPE_FLEX:
+                       flow_dv_translate_item_flex(dev, match_mask,
+                                                   match_value, items,
+                                                   dev_flow, tunnel != 0);
+                       last_item = tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
+                                   MLX5_FLOW_ITEM_OUTER_FLEX;
+                       break;
                default:
                        break;
                }
@@ -13454,6 +13630,22 @@ flow_dv_translate(struct rte_eth_dev *dev,
                                                      integrity_items,
                                                      item_flags);
        }
+       if (item_flags & MLX5_FLOW_LAYER_VXLAN_GPE)
+               flow_dv_translate_item_vxlan_gpe(match_mask, match_value,
+                                                tunnel_item, item_flags);
+       else if (item_flags & MLX5_FLOW_LAYER_GENEVE)
+               flow_dv_translate_item_geneve(match_mask, match_value,
+                                             tunnel_item, item_flags);
+       else if (item_flags & MLX5_FLOW_LAYER_GRE) {
+               if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE)
+                       flow_dv_translate_item_gre(match_mask, match_value,
+                                                  tunnel_item, item_flags);
+               else if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
+                       flow_dv_translate_item_nvgre(match_mask, match_value,
+                                                    tunnel_item, item_flags);
+               else
+                       MLX5_ASSERT(false);
+       }
 #ifdef RTE_LIBRTE_MLX5_DEBUG
        MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
                                              dev_flow->dv.value.buf));
@@ -14310,6 +14502,12 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
                if (!dev_handle)
                        return;
                flow->dev_handles = dev_handle->next.next;
+               while (dev_handle->flex_item) {
+                       int index = rte_bsf32(dev_handle->flex_item);
+
+                       mlx5_flex_release_index(dev, index);
+                       dev_handle->flex_item &= ~RTE_BIT32(index);
+               }
                if (dev_handle->dvh.matcher)
                        flow_dv_matcher_release(dev, dev_handle);
                if (dev_handle->dvh.rix_sample)
@@ -14503,7 +14701,8 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
        size_t i;
        int err;
 
-       if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) {
+       if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl,
+                                    !!dev->data->dev_started)) {
                return rte_flow_error_set(error, rte_errno,
                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
                                          "cannot setup indirection table");
@@ -14543,7 +14742,7 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
 error_hrxq_new:
        err = rte_errno;
        __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
-       if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true))
+       if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true, true))
                shared_rss->ind_tbl = NULL;
        rte_errno = err;
        return -rte_errno;
@@ -14686,7 +14885,8 @@ __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
                                          NULL,
                                          "shared rss hrxq has references");
        queue = shared_rss->ind_tbl->queues;
-       remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
+       remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true,
+                                              !!dev->data->dev_started);
        if (remaining)
                return rte_flow_error_set(error, EBUSY,
                                          RTE_FLOW_ERROR_TYPE_ACTION,
@@ -14874,6 +15074,7 @@ __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
        void *queue = NULL;
        uint16_t *queue_old = NULL;
        uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
+       bool dev_started = !!dev->data->dev_started;
 
        if (!shared_rss)
                return rte_flow_error_set(error, EINVAL,
@@ -14896,7 +15097,10 @@ __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
        rte_spinlock_lock(&shared_rss->action_rss_sl);
        queue_old = shared_rss->ind_tbl->queues;
        ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
-                                       queue, action_conf->queue_num, true);
+                                       queue, action_conf->queue_num,
+                                       true /* standalone */,
+                                       dev_started /* ref_new_qs */,
+                                       dev_started /* deref_old_qs */);
        if (ret) {
                mlx5_free(queue);
                ret = rte_flow_error_set(error, rte_errno,
@@ -17460,12 +17664,22 @@ static inline int
 flow_dv_mtr_policy_rss_compare(const struct rte_flow_action_rss *r1,
                               const struct rte_flow_action_rss *r2)
 {
-       if (!r1 || !r2)
+       if (r1 == NULL || r2 == NULL)
                return 0;
-       if (r1->func != r2->func || r1->level != r2->level ||
-           r1->types != r2->types || r1->key_len != r2->key_len ||
-           memcmp(r1->key, r2->key, r1->key_len))
+       if (!(r1->level <= 1 && r2->level <= 1) &&
+           !(r1->level > 1 && r2->level > 1))
                return 1;
+       if (r1->types != r2->types &&
+           !((r1->types == 0 || r1->types == RTE_ETH_RSS_IP) &&
+             (r2->types == 0 || r2->types == RTE_ETH_RSS_IP)))
+               return 1;
+       if (r1->key || r2->key) {
+               const void *key1 = r1->key ? r1->key : rss_hash_default_key;
+               const void *key2 = r2->key ? r2->key : rss_hash_default_key;
+
+               if (memcmp(key1, key2, MLX5_RSS_HASH_KEY_LEN))
+                       return 1;
+       }
        return 0;
 }
 
@@ -17617,6 +17831,8 @@ flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
                *policy_mode = MLX5_MTR_POLICY_MODE_OG;
        } else if (def_green && !def_yellow) {
                *policy_mode = MLX5_MTR_POLICY_MODE_OY;
+       } else {
+               *policy_mode = MLX5_MTR_POLICY_MODE_ALL;
        }
        /* Set to empty string in case of NULL pointer access by user. */
        flow_err.message = "";
@@ -18071,5 +18287,6 @@ const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
        .item_create = flow_dv_item_create,
        .item_release = flow_dv_item_release,
 };
+
 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */