net/mlx5: fix RSS RETA update
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
index 6413b45..a5ced50 100644 (file)
@@ -287,31 +287,6 @@ struct field_modify_info modify_tcp[] = {
        {0, 0, 0},
 };
 
-static const struct rte_flow_item *
-mlx5_flow_find_tunnel_item(const struct rte_flow_item *item)
-{
-       for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
-               switch (item->type) {
-               default:
-                       break;
-               case RTE_FLOW_ITEM_TYPE_VXLAN:
-               case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
-               case RTE_FLOW_ITEM_TYPE_GRE:
-               case RTE_FLOW_ITEM_TYPE_MPLS:
-               case RTE_FLOW_ITEM_TYPE_NVGRE:
-               case RTE_FLOW_ITEM_TYPE_GENEVE:
-                       return item;
-               case RTE_FLOW_ITEM_TYPE_IPV4:
-               case RTE_FLOW_ITEM_TYPE_IPV6:
-                       if (item[1].type == RTE_FLOW_ITEM_TYPE_IPV4 ||
-                           item[1].type == RTE_FLOW_ITEM_TYPE_IPV6)
-                               return item;
-                       break;
-               }
-       }
-       return NULL;
-}
-
 static void
 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
                          uint8_t next_protocol, uint64_t *item_flags,
@@ -6581,119 +6556,85 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev,
        return ret;
 }
 
-static uint16_t
-mlx5_flow_locate_proto_l3(const struct rte_flow_item **head,
-                         const struct rte_flow_item *end)
+static int
+validate_integrity_bits(const struct rte_flow_item_integrity *mask,
+                       int64_t pattern_flags, uint64_t l3_flags,
+                       uint64_t l4_flags, uint64_t ip4_flag,
+                       struct rte_flow_error *error)
 {
-       const struct rte_flow_item *item = *head;
-       uint16_t l3_protocol;
+       if (mask->l3_ok && !(pattern_flags & l3_flags))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM,
+                                         NULL, "missing L3 protocol");
+
+       if (mask->ipv4_csum_ok && !(pattern_flags & ip4_flag))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM,
+                                         NULL, "missing IPv4 protocol");
+
+       if ((mask->l4_ok || mask->l4_csum_ok) && !(pattern_flags & l4_flags))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM,
+                                         NULL, "missing L4 protocol");
 
-       for (; item != end; item++) {
-               switch (item->type) {
-               default:
-                       break;
-               case RTE_FLOW_ITEM_TYPE_IPV4:
-                       l3_protocol = RTE_ETHER_TYPE_IPV4;
-                       goto l3_ok;
-               case RTE_FLOW_ITEM_TYPE_IPV6:
-                       l3_protocol = RTE_ETHER_TYPE_IPV6;
-                       goto l3_ok;
-               case RTE_FLOW_ITEM_TYPE_ETH:
-                       if (item->mask && item->spec) {
-                               MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_eth,
-                                                           type, item,
-                                                           l3_protocol);
-                               if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
-                                   l3_protocol == RTE_ETHER_TYPE_IPV6)
-                                       goto l3_ok;
-                       }
-                       break;
-               case RTE_FLOW_ITEM_TYPE_VLAN:
-                       if (item->mask && item->spec) {
-                               MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_vlan,
-                                                           inner_type, item,
-                                                           l3_protocol);
-                               if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
-                                   l3_protocol == RTE_ETHER_TYPE_IPV6)
-                                       goto l3_ok;
-                       }
-                       break;
-               }
-       }
        return 0;
-l3_ok:
-       *head = item;
-       return l3_protocol;
 }
 
-static uint8_t
-mlx5_flow_locate_proto_l4(const struct rte_flow_item **head,
-                         const struct rte_flow_item *end)
+static int
+flow_dv_validate_item_integrity_post(const struct
+                                    rte_flow_item *integrity_items[2],
+                                    int64_t pattern_flags,
+                                    struct rte_flow_error *error)
 {
-       const struct rte_flow_item *item = *head;
-       uint8_t l4_protocol;
+       const struct rte_flow_item_integrity *mask;
+       int ret;
 
-       for (; item != end; item++) {
-               switch (item->type) {
-               default:
-                       break;
-               case RTE_FLOW_ITEM_TYPE_TCP:
-                       l4_protocol = IPPROTO_TCP;
-                       goto l4_ok;
-               case RTE_FLOW_ITEM_TYPE_UDP:
-                       l4_protocol = IPPROTO_UDP;
-                       goto l4_ok;
-               case RTE_FLOW_ITEM_TYPE_IPV4:
-                       if (item->mask && item->spec) {
-                               const struct rte_flow_item_ipv4 *mask, *spec;
-
-                               mask = (typeof(mask))item->mask;
-                               spec = (typeof(spec))item->spec;
-                               l4_protocol = mask->hdr.next_proto_id &
-                                             spec->hdr.next_proto_id;
-                               if (l4_protocol == IPPROTO_TCP ||
-                                   l4_protocol == IPPROTO_UDP)
-                                       goto l4_ok;
-                       }
-                       break;
-               case RTE_FLOW_ITEM_TYPE_IPV6:
-                       if (item->mask && item->spec) {
-                               const struct rte_flow_item_ipv6 *mask, *spec;
-                               mask = (typeof(mask))item->mask;
-                               spec = (typeof(spec))item->spec;
-                               l4_protocol = mask->hdr.proto & spec->hdr.proto;
-                               if (l4_protocol == IPPROTO_TCP ||
-                                   l4_protocol == IPPROTO_UDP)
-                                       goto l4_ok;
-                       }
-                       break;
-               }
+       if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY) {
+               mask = (typeof(mask))integrity_items[0]->mask;
+               ret = validate_integrity_bits(mask, pattern_flags,
+                                             MLX5_FLOW_LAYER_OUTER_L3,
+                                             MLX5_FLOW_LAYER_OUTER_L4,
+                                             MLX5_FLOW_LAYER_OUTER_L3_IPV4,
+                                             error);
+               if (ret)
+                       return ret;
+       }
+       if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY) {
+               mask = (typeof(mask))integrity_items[1]->mask;
+               ret = validate_integrity_bits(mask, pattern_flags,
+                                             MLX5_FLOW_LAYER_INNER_L3,
+                                             MLX5_FLOW_LAYER_INNER_L4,
+                                             MLX5_FLOW_LAYER_INNER_L3_IPV4,
+                                             error);
+               if (ret)
+                       return ret;
        }
        return 0;
-l4_ok:
-       *head = item;
-       return l4_protocol;
 }
 
 static int
 flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
-                               const struct rte_flow_item *rule_items,
                                const struct rte_flow_item *integrity_item,
+                               uint64_t pattern_flags, uint64_t *last_item,
+                               const struct rte_flow_item *integrity_items[2],
                                struct rte_flow_error *error)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       const struct rte_flow_item *tunnel_item, *end_item, *item = rule_items;
        const struct rte_flow_item_integrity *mask = (typeof(mask))
                                                     integrity_item->mask;
        const struct rte_flow_item_integrity *spec = (typeof(spec))
                                                     integrity_item->spec;
-       uint32_t protocol;
 
        if (!priv->config.hca_attr.pkt_integrity_match)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ITEM,
                                          integrity_item,
                                          "packet integrity integrity_item not supported");
+       if (!spec)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM,
+                                         integrity_item,
+                                         "no spec for integrity item");
        if (!mask)
                mask = &rte_flow_item_integrity_mask;
        if (!mlx5_validate_integrity_item(mask))
@@ -6701,34 +6642,22 @@ flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
                                          RTE_FLOW_ERROR_TYPE_ITEM,
                                          integrity_item,
                                          "unsupported integrity filter");
-       tunnel_item = mlx5_flow_find_tunnel_item(rule_items);
        if (spec->level > 1) {
-               if (!tunnel_item)
-                       return rte_flow_error_set(error, ENOTSUP,
-                                                 RTE_FLOW_ERROR_TYPE_ITEM,
-                                                 integrity_item,
-                                                 "missing tunnel item");
-               item = tunnel_item;
-               end_item = mlx5_find_end_item(tunnel_item);
+               if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY)
+                       return rte_flow_error_set
+                               (error, ENOTSUP,
+                                RTE_FLOW_ERROR_TYPE_ITEM,
+                                NULL, "multiple inner integrity items not supported");
+               integrity_items[1] = integrity_item;
+               *last_item |= MLX5_FLOW_ITEM_INNER_INTEGRITY;
        } else {
-               end_item = tunnel_item ? tunnel_item :
-                          mlx5_find_end_item(integrity_item);
-       }
-       if (mask->l3_ok || mask->ipv4_csum_ok) {
-               protocol = mlx5_flow_locate_proto_l3(&item, end_item);
-               if (!protocol)
-                       return rte_flow_error_set(error, EINVAL,
-                                                 RTE_FLOW_ERROR_TYPE_ITEM,
-                                                 integrity_item,
-                                                 "missing L3 protocol");
-       }
-       if (mask->l4_ok || mask->l4_csum_ok) {
-               protocol = mlx5_flow_locate_proto_l4(&item, end_item);
-               if (!protocol)
-                       return rte_flow_error_set(error, EINVAL,
-                                                 RTE_FLOW_ERROR_TYPE_ITEM,
-                                                 integrity_item,
-                                                 "missing L4 protocol");
+               if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY)
+                       return rte_flow_error_set
+                               (error, ENOTSUP,
+                                RTE_FLOW_ERROR_TYPE_ITEM,
+                                NULL, "multiple outer integrity items not supported");
+               integrity_items[0] = integrity_item;
+               *last_item |= MLX5_FLOW_ITEM_OUTER_INTEGRITY;
        }
        return 0;
 }
@@ -6825,7 +6754,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                .std_tbl_fix = true,
        };
        const struct rte_eth_hairpin_conf *conf;
-       const struct rte_flow_item *rule_items = items;
+       const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
        const struct rte_flow_item *port_id_item = NULL;
        bool def_policy = false;
        uint16_t udp_dport = 0;
@@ -7152,16 +7081,13 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                        last_item = MLX5_FLOW_LAYER_ECPRI;
                        break;
                case RTE_FLOW_ITEM_TYPE_INTEGRITY:
-                       if (item_flags & MLX5_FLOW_ITEM_INTEGRITY)
-                               return rte_flow_error_set
-                                       (error, ENOTSUP,
-                                        RTE_FLOW_ERROR_TYPE_ITEM,
-                                        NULL, "multiple integrity items not supported");
-                       ret = flow_dv_validate_item_integrity(dev, rule_items,
-                                                             items, error);
+                       ret = flow_dv_validate_item_integrity(dev, items,
+                                                             item_flags,
+                                                             &last_item,
+                                                             integrity_items,
+                                                             error);
                        if (ret < 0)
                                return ret;
-                       last_item = MLX5_FLOW_ITEM_INTEGRITY;
                        break;
                case RTE_FLOW_ITEM_TYPE_CONNTRACK:
                        ret = flow_dv_validate_item_aso_ct(dev, items,
@@ -7181,6 +7107,12 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                }
                item_flags |= last_item;
        }
+       if (item_flags & MLX5_FLOW_ITEM_INTEGRITY) {
+               ret = flow_dv_validate_item_integrity_post(integrity_items,
+                                                          item_flags, error);
+               if (ret)
+                       return ret;
+       }
        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
                int type = actions->type;
                bool shared_count = false;
@@ -10861,9 +10793,9 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
        if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
            (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
                if (rss_types & MLX5_IPV4_LAYER_TYPES) {
-                       if (rss_types & ETH_RSS_L3_SRC_ONLY)
+                       if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
                                dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
-                       else if (rss_types & ETH_RSS_L3_DST_ONLY)
+                       else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
                                dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
                        else
                                dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
@@ -10871,9 +10803,9 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
        } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
                   (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
                if (rss_types & MLX5_IPV6_LAYER_TYPES) {
-                       if (rss_types & ETH_RSS_L3_SRC_ONLY)
+                       if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
                                dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
-                       else if (rss_types & ETH_RSS_L3_DST_ONLY)
+                       else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
                                dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
                        else
                                dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
@@ -10887,11 +10819,11 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
                return;
        if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
            (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
-               if (rss_types & ETH_RSS_UDP) {
-                       if (rss_types & ETH_RSS_L4_SRC_ONLY)
+               if (rss_types & RTE_ETH_RSS_UDP) {
+                       if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
                                dev_flow->hash_fields |=
                                                IBV_RX_HASH_SRC_PORT_UDP;
-                       else if (rss_types & ETH_RSS_L4_DST_ONLY)
+                       else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
                                dev_flow->hash_fields |=
                                                IBV_RX_HASH_DST_PORT_UDP;
                        else
@@ -10899,11 +10831,11 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
                }
        } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
                   (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
-               if (rss_types & ETH_RSS_TCP) {
-                       if (rss_types & ETH_RSS_L4_SRC_ONLY)
+               if (rss_types & RTE_ETH_RSS_TCP) {
+                       if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
                                dev_flow->hash_fields |=
                                                IBV_RX_HASH_SRC_PORT_TCP;
-                       else if (rss_types & ETH_RSS_L4_DST_ONLY)
+                       else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
                                dev_flow->hash_fields |=
                                                IBV_RX_HASH_DST_PORT_TCP;
                        else
@@ -12068,8 +12000,7 @@ flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask,
 static void
 flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask,
                               const struct rte_flow_item_integrity *value,
-                              void *headers_m, void *headers_v,
-                              bool is_ipv4)
+                              void *headers_m, void *headers_v, bool is_ipv4)
 {
        if (mask->l3_ok) {
                /* application l3_ok filter aggregates all hardware l3 filters
@@ -12100,45 +12031,66 @@ flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask,
 }
 
 static void
-flow_dv_translate_item_integrity(void *matcher, void *key,
-                                const struct rte_flow_item *head_item,
-                                const struct rte_flow_item *integrity_item)
+set_integrity_bits(void *headers_m, void *headers_v,
+                  const struct rte_flow_item *integrity_item, bool is_l3_ip4)
 {
+       const struct rte_flow_item_integrity *spec = integrity_item->spec;
        const struct rte_flow_item_integrity *mask = integrity_item->mask;
-       const struct rte_flow_item_integrity *value = integrity_item->spec;
-       const struct rte_flow_item *tunnel_item, *end_item, *item;
-       void *headers_m;
-       void *headers_v;
-       uint32_t l3_protocol;
 
-       if (!value)
-               return;
+       /* Integrity bits validation cleared spec pointer */
+       MLX5_ASSERT(spec != NULL);
        if (!mask)
                mask = &rte_flow_item_integrity_mask;
-       if (value->level > 1) {
+       flow_dv_translate_integrity_l3(mask, spec, headers_m, headers_v,
+                                      is_l3_ip4);
+       flow_dv_translate_integrity_l4(mask, spec, headers_m, headers_v);
+}
+
+static void
+flow_dv_translate_item_integrity_post(void *matcher, void *key,
+                                     const
+                                     struct rte_flow_item *integrity_items[2],
+                                     uint64_t pattern_flags)
+{
+       void *headers_m, *headers_v;
+       bool is_l3_ip4;
+
+       if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY) {
                headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
                                         inner_headers);
                headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
-       } else {
+               is_l3_ip4 = (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4) !=
+                           0;
+               set_integrity_bits(headers_m, headers_v,
+                                  integrity_items[1], is_l3_ip4);
+       }
+       if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY) {
                headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
                                         outer_headers);
                headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+               is_l3_ip4 = (pattern_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4) !=
+                           0;
+               set_integrity_bits(headers_m, headers_v,
+                                  integrity_items[0], is_l3_ip4);
        }
-       tunnel_item = mlx5_flow_find_tunnel_item(head_item);
-       if (value->level > 1) {
-               /* tunnel item was verified during the item validation */
-               item = tunnel_item;
-               end_item = mlx5_find_end_item(tunnel_item);
+}
+
+static void
+flow_dv_translate_item_integrity(const struct rte_flow_item *item,
+                                const struct rte_flow_item *integrity_items[2],
+                                uint64_t *last_item)
+{
+       const struct rte_flow_item_integrity *spec = (typeof(spec))item->spec;
+
+       /* integrity bits validation cleared spec pointer */
+       MLX5_ASSERT(spec != NULL);
+       if (spec->level > 1) {
+               integrity_items[1] = item;
+               *last_item |= MLX5_FLOW_ITEM_INNER_INTEGRITY;
        } else {
-               item = head_item;
-               end_item = tunnel_item ? tunnel_item :
-                          mlx5_find_end_item(integrity_item);
+               integrity_items[0] = item;
+               *last_item |= MLX5_FLOW_ITEM_OUTER_INTEGRITY;
        }
-       l3_protocol = mask->l3_ok ?
-                     mlx5_flow_locate_proto_l3(&item, end_item) : 0;
-       flow_dv_translate_integrity_l3(mask, value, headers_m, headers_v,
-                                      l3_protocol == RTE_ETHER_TYPE_IPV4);
-       flow_dv_translate_integrity_l4(mask, value, headers_m, headers_v);
 }
 
 /**
@@ -12554,7 +12506,7 @@ flow_dv_translate(struct rte_eth_dev *dev,
                        (1 << MLX5_SCALE_FLOW_GROUP_BIT),
                .std_tbl_fix = true,
        };
-       const struct rte_flow_item *head_item = items;
+       const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
 
        if (!wks)
                return rte_flow_error_set(error, ENOMEM,
@@ -13447,9 +13399,8 @@ flow_dv_translate(struct rte_eth_dev *dev,
                        last_item = MLX5_FLOW_LAYER_ECPRI;
                        break;
                case RTE_FLOW_ITEM_TYPE_INTEGRITY:
-                       flow_dv_translate_item_integrity(match_mask,
-                                                        match_value,
-                                                        head_item, items);
+                       flow_dv_translate_item_integrity(items, integrity_items,
+                                                        &last_item);
                        break;
                case RTE_FLOW_ITEM_TYPE_CONNTRACK:
                        flow_dv_translate_item_aso_ct(dev, match_mask,
@@ -13473,6 +13424,11 @@ flow_dv_translate(struct rte_eth_dev *dev,
                                                   match_value, NULL, attr))
                        return -rte_errno;
        }
+       if (item_flags & MLX5_FLOW_ITEM_INTEGRITY) {
+               flow_dv_translate_item_integrity_post(match_mask, match_value,
+                                                     integrity_items,
+                                                     item_flags);
+       }
 #ifdef RTE_LIBRTE_MLX5_DEBUG
        MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
                                              dev_flow->dv.value.buf));
@@ -14444,9 +14400,9 @@ __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
        case MLX5_RSS_HASH_IPV4:
                if (rss_types & MLX5_IPV4_LAYER_TYPES) {
                        *hash_field &= ~MLX5_RSS_HASH_IPV4;
-                       if (rss_types & ETH_RSS_L3_DST_ONLY)
+                       if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
                                *hash_field |= IBV_RX_HASH_DST_IPV4;
-                       else if (rss_types & ETH_RSS_L3_SRC_ONLY)
+                       else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
                                *hash_field |= IBV_RX_HASH_SRC_IPV4;
                        else
                                *hash_field |= MLX5_RSS_HASH_IPV4;
@@ -14455,9 +14411,9 @@ __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
        case MLX5_RSS_HASH_IPV6:
                if (rss_types & MLX5_IPV6_LAYER_TYPES) {
                        *hash_field &= ~MLX5_RSS_HASH_IPV6;
-                       if (rss_types & ETH_RSS_L3_DST_ONLY)
+                       if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
                                *hash_field |= IBV_RX_HASH_DST_IPV6;
-                       else if (rss_types & ETH_RSS_L3_SRC_ONLY)
+                       else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
                                *hash_field |= IBV_RX_HASH_SRC_IPV6;
                        else
                                *hash_field |= MLX5_RSS_HASH_IPV6;
@@ -14466,11 +14422,11 @@ __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
        case MLX5_RSS_HASH_IPV4_UDP:
                /* fall-through. */
        case MLX5_RSS_HASH_IPV6_UDP:
-               if (rss_types & ETH_RSS_UDP) {
+               if (rss_types & RTE_ETH_RSS_UDP) {
                        *hash_field &= ~MLX5_UDP_IBV_RX_HASH;
-                       if (rss_types & ETH_RSS_L4_DST_ONLY)
+                       if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
                                *hash_field |= IBV_RX_HASH_DST_PORT_UDP;
-                       else if (rss_types & ETH_RSS_L4_SRC_ONLY)
+                       else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
                                *hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
                        else
                                *hash_field |= MLX5_UDP_IBV_RX_HASH;
@@ -14479,11 +14435,11 @@ __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
        case MLX5_RSS_HASH_IPV4_TCP:
                /* fall-through. */
        case MLX5_RSS_HASH_IPV6_TCP:
-               if (rss_types & ETH_RSS_TCP) {
+               if (rss_types & RTE_ETH_RSS_TCP) {
                        *hash_field &= ~MLX5_TCP_IBV_RX_HASH;
-                       if (rss_types & ETH_RSS_L4_DST_ONLY)
+                       if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
                                *hash_field |= IBV_RX_HASH_DST_PORT_TCP;
-                       else if (rss_types & ETH_RSS_L4_SRC_ONLY)
+                       else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
                                *hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
                        else
                                *hash_field |= MLX5_TCP_IBV_RX_HASH;
@@ -14631,8 +14587,8 @@ __flow_dv_action_rss_create(struct rte_eth_dev *dev,
        origin = &shared_rss->origin;
        origin->func = rss->func;
        origin->level = rss->level;
-       /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
-       origin->types = !rss->types ? ETH_RSS_IP : rss->types;
+       /* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */
+       origin->types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
        /* NULL RSS key indicates default RSS key. */
        rss_key = !rss->key ? rss_hash_default_key : rss->key;
        memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
@@ -15648,7 +15604,7 @@ flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev,
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
-static int
+int
 flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data,
                    struct rte_flow_error *error)
 {
@@ -15686,6 +15642,48 @@ flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data,
                                  "counters are not available");
 }
 
+
+/**
+ * Query counter's action pointer for a DV flow rule via DevX.
+ *
+ * @param[in] dev
+ *   Pointer to Ethernet device.
+ * @param[in] cnt_idx
+ *   Index to the flow counter.
+ * @param[out] action_ptr
+ *   Action pointer for counter.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+flow_dv_query_count_ptr(struct rte_eth_dev *dev, uint32_t cnt_idx,
+       void **action_ptr, struct rte_flow_error *error)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+
+       if (!priv->sh->devx || !action_ptr)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                         NULL,
+                                         "counters are not supported");
+
+       if (cnt_idx) {
+               struct mlx5_flow_counter *cnt = NULL;
+               cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
+               if (cnt) {
+                       *action_ptr = cnt->action;
+                       return 0;
+               }
+       }
+       return rte_flow_error_set(error, EINVAL,
+                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                 NULL,
+                                 "counters are not available");
+}
+
 static int
 flow_dv_action_query(struct rte_eth_dev *dev,
                     const struct rte_flow_action_handle *handle, void *data,