net/ngbe: support MAC filters
[dpdk.git] / drivers / net / mlx5 / mlx5_flow.c
index 1d493f1..d222914 100644 (file)
@@ -98,7 +98,7 @@ struct mlx5_flow_expand_node {
        uint64_t rss_types;
        /**<
         * RSS types bit-field associated with this node
-        * (see ETH_RSS_* definitions).
+        * (see RTE_ETH_RSS_* definitions).
         */
        uint64_t node_flags;
        /**<
@@ -298,7 +298,7 @@ mlx5_flow_expand_rss_skip_explicit(const struct mlx5_flow_expand_node graph[],
  * @param[in] pattern
  *   User flow pattern.
  * @param[in] types
- *   RSS types to expand (see ETH_RSS_* definitions).
+ *   RSS types to expand (see RTE_ETH_RSS_* definitions).
  * @param[in] graph
  *   Input graph to expand @p pattern according to @p types.
  * @param[in] graph_root_index
@@ -560,8 +560,8 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
                         MLX5_EXPANSION_IPV4,
                         MLX5_EXPANSION_IPV6),
                .type = RTE_FLOW_ITEM_TYPE_IPV4,
-               .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-                       ETH_RSS_NONFRAG_IPV4_OTHER,
+               .rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+                       RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
        },
        [MLX5_EXPANSION_OUTER_IPV4_UDP] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
@@ -569,11 +569,11 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
                                                  MLX5_EXPANSION_MPLS,
                                                  MLX5_EXPANSION_GTP),
                .type = RTE_FLOW_ITEM_TYPE_UDP,
-               .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
+               .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
        },
        [MLX5_EXPANSION_OUTER_IPV4_TCP] = {
                .type = RTE_FLOW_ITEM_TYPE_TCP,
-               .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
+               .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
        },
        [MLX5_EXPANSION_OUTER_IPV6] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT
@@ -584,8 +584,8 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
                         MLX5_EXPANSION_GRE,
                         MLX5_EXPANSION_NVGRE),
                .type = RTE_FLOW_ITEM_TYPE_IPV6,
-               .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
-                       ETH_RSS_NONFRAG_IPV6_OTHER,
+               .rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+                       RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
        },
        [MLX5_EXPANSION_OUTER_IPV6_UDP] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
@@ -593,11 +593,11 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
                                                  MLX5_EXPANSION_MPLS,
                                                  MLX5_EXPANSION_GTP),
                .type = RTE_FLOW_ITEM_TYPE_UDP,
-               .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
+               .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
        },
        [MLX5_EXPANSION_OUTER_IPV6_TCP] = {
                .type = RTE_FLOW_ITEM_TYPE_TCP,
-               .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
+               .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
        },
        [MLX5_EXPANSION_VXLAN] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
@@ -659,32 +659,32 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
                                                  MLX5_EXPANSION_IPV4_TCP),
                .type = RTE_FLOW_ITEM_TYPE_IPV4,
-               .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-                       ETH_RSS_NONFRAG_IPV4_OTHER,
+               .rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+                       RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
        },
        [MLX5_EXPANSION_IPV4_UDP] = {
                .type = RTE_FLOW_ITEM_TYPE_UDP,
-               .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
+               .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
        },
        [MLX5_EXPANSION_IPV4_TCP] = {
                .type = RTE_FLOW_ITEM_TYPE_TCP,
-               .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
+               .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
        },
        [MLX5_EXPANSION_IPV6] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
                                                  MLX5_EXPANSION_IPV6_TCP,
                                                  MLX5_EXPANSION_IPV6_FRAG_EXT),
                .type = RTE_FLOW_ITEM_TYPE_IPV6,
-               .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
-                       ETH_RSS_NONFRAG_IPV6_OTHER,
+               .rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+                       RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
        },
        [MLX5_EXPANSION_IPV6_UDP] = {
                .type = RTE_FLOW_ITEM_TYPE_UDP,
-               .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
+               .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
        },
        [MLX5_EXPANSION_IPV6_TCP] = {
                .type = RTE_FLOW_ITEM_TYPE_TCP,
-               .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
+               .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
        },
        [MLX5_EXPANSION_IPV6_FRAG_EXT] = {
                .type = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT,
@@ -914,7 +914,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
                        return rte_flow_error_set(error, EINVAL,
                                                  RTE_FLOW_ERROR_TYPE_ITEM,
                                                  NULL, "invalid tag id");
-               if (config->flow_mreg_c[id + start_reg - REG_C_0] == REG_NON)
+               if (priv->sh->flow_mreg_c[id + start_reg - REG_C_0] == REG_NON)
                        return rte_flow_error_set(error, ENOTSUP,
                                                  RTE_FLOW_ERROR_TYPE_ITEM,
                                                  NULL, "unsupported tag id");
@@ -924,21 +924,21 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
                 * If the available index REG_C_y >= REG_C_x, skip the
                 * color register.
                 */
-               if (skip_mtr_reg && config->flow_mreg_c
+               if (skip_mtr_reg && priv->sh->flow_mreg_c
                    [id + start_reg - REG_C_0] >= priv->mtr_color_reg) {
                        if (id >= (uint32_t)(REG_C_7 - start_reg))
                                return rte_flow_error_set(error, EINVAL,
                                                       RTE_FLOW_ERROR_TYPE_ITEM,
                                                        NULL, "invalid tag id");
-                       if (config->flow_mreg_c
+                       if (priv->sh->flow_mreg_c
                            [id + 1 + start_reg - REG_C_0] != REG_NON)
-                               return config->flow_mreg_c
+                               return priv->sh->flow_mreg_c
                                               [id + 1 + start_reg - REG_C_0];
                        return rte_flow_error_set(error, ENOTSUP,
                                                  RTE_FLOW_ERROR_TYPE_ITEM,
                                                  NULL, "unsupported tag id");
                }
-               return config->flow_mreg_c[id + start_reg - REG_C_0];
+               return priv->sh->flow_mreg_c[id + start_reg - REG_C_0];
        }
        MLX5_ASSERT(false);
        return rte_flow_error_set(error, EINVAL,
@@ -959,7 +959,6 @@ bool
 mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_dev_config *config = &priv->config;
 
        /*
         * Having available reg_c can be regarded inclusively as supporting
@@ -969,7 +968,7 @@ mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev)
         * - reg_c's are preserved across different domain (FDB and NIC) on
         *   packet loopback by flow lookup miss.
         */
-       return config->flow_mreg_c[2] != REG_NON;
+       return priv->sh->flow_mreg_c[2] != REG_NON;
 }
 
 /**
@@ -990,7 +989,7 @@ mlx5_get_lowest_priority(struct rte_eth_dev *dev,
        struct mlx5_priv *priv = dev->data->dev_private;
 
        if (!attr->group && !attr->transfer)
-               return priv->config.flow_prio - 2;
+               return priv->sh->flow_max_priority - 2;
        return MLX5_NON_ROOT_FLOW_MAX_PRIO - 1;
 }
 
@@ -1018,11 +1017,11 @@ mlx5_get_matcher_priority(struct rte_eth_dev *dev,
 
        if (!attr->group && !attr->transfer) {
                if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
-                       priority = priv->config.flow_prio - 1;
+                       priority = priv->sh->flow_max_priority - 1;
                return mlx5_os_flow_adjust_priority(dev, priority, subpriority);
        } else if (!external && attr->transfer && attr->group == 0 &&
                   attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR) {
-               return (priv->config.flow_prio - 1) * 3;
+               return (priv->sh->flow_max_priority - 1) * 3;
        }
        if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
                priority = MLX5_NON_ROOT_FLOW_MAX_PRIO;
@@ -1100,7 +1099,7 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item,
  * @param[in] tunnel
  *   1 when the hash field is for a tunnel item.
  * @param[in] layer_types
- *   ETH_RSS_* types.
+ *   RTE_ETH_RSS_* types.
  * @param[in] hash_fields
  *   Item hash fields.
  *
@@ -1653,14 +1652,14 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev,
                                          &rss->types,
                                          "some RSS protocols are not"
                                          " supported");
-       if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) &&
-           !(rss->types & ETH_RSS_IP))
+       if ((rss->types & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY)) &&
+           !(rss->types & RTE_ETH_RSS_IP))
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
                                          "L3 partial RSS requested but L3 RSS"
                                          " type not specified");
-       if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) &&
-           !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP)))
+       if ((rss->types & (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)) &&
+           !(rss->types & (RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP)))
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
                                          "L4 partial RSS requested but L4 RSS"
@@ -1877,7 +1876,7 @@ mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
                              struct rte_flow_error *error)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       uint32_t priority_max = priv->config.flow_prio - 1;
+       uint32_t priority_max = priv->sh->flow_max_priority - 1;
 
        if (attributes->group)
                return rte_flow_error_set(error, ENOTSUP,
@@ -3599,6 +3598,8 @@ flow_get_rss_action(struct rte_eth_dev *dev,
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        const struct rte_flow_action_rss *rss = NULL;
+       struct mlx5_meter_policy_action_container *acg;
+       struct mlx5_meter_policy_action_container *acy;
 
        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
                switch (actions->type) {
@@ -3634,9 +3635,18 @@ flow_get_rss_action(struct rte_eth_dev *dev,
                                        if (!policy)
                                                return NULL;
                                }
-                               if (policy->is_rss)
-                                       rss =
-                               policy->act_cnt[RTE_COLOR_GREEN].rss->conf;
+                               if (policy->is_rss) {
+                                       acg =
+                                       &policy->act_cnt[RTE_COLOR_GREEN];
+                                       acy =
+                                       &policy->act_cnt[RTE_COLOR_YELLOW];
+                                       if (acg->fate_action ==
+                                           MLX5_FLOW_FATE_SHARED_RSS)
+                                               rss = acg->rss->conf;
+                                       else if (acy->fate_action ==
+                                                MLX5_FLOW_FATE_SHARED_RSS)
+                                               rss = acy->rss->conf;
+                               }
                        }
                        break;
                }
@@ -6416,8 +6426,8 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
                 * mlx5_flow_hashfields_adjust() in advance.
                 */
                rss_desc->level = rss->level;
-               /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
-               rss_desc->types = !rss->types ? ETH_RSS_IP : rss->types;
+               /* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */
+               rss_desc->types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
        }
        flow->dev_handles = 0;
        if (rss && rss->types) {
@@ -7115,7 +7125,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
        if (!priv->reta_idx_n || !priv->rxqs_n) {
                return 0;
        }
-       if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+       if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
                action_rss.types = 0;
        for (i = 0; i != priv->reta_idx_n; ++i)
                queue[i] = (*priv->reta_idx)[i];
@@ -8101,13 +8111,12 @@ int
 mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_dev_config *config = &priv->config;
        enum modify_reg idx;
        int n = 0;
 
        /* reg_c[0] and reg_c[1] are reserved. */
-       config->flow_mreg_c[n++] = REG_C_0;
-       config->flow_mreg_c[n++] = REG_C_1;
+       priv->sh->flow_mreg_c[n++] = REG_C_0;
+       priv->sh->flow_mreg_c[n++] = REG_C_1;
        /* Discover availability of other reg_c's. */
        for (idx = REG_C_2; idx <= REG_C_7; ++idx) {
                struct rte_flow_attr attr = {
@@ -8143,7 +8152,7 @@ mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
                struct rte_flow *flow;
                struct rte_flow_error error;
 
-               if (!config->dv_flow_en)
+               if (!priv->config.dv_flow_en)
                        break;
                /* Create internal flow, validation skips copy action. */
                flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_GEN, &attr,
@@ -8152,17 +8161,18 @@ mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
                                      flow_idx);
                if (!flow)
                        continue;
-               config->flow_mreg_c[n++] = idx;
+               priv->sh->flow_mreg_c[n++] = idx;
                flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, flow_idx);
        }
        for (; n < MLX5_MREG_C_NUM; ++n)
-               config->flow_mreg_c[n] = REG_NON;
+               priv->sh->flow_mreg_c[n] = REG_NON;
+       priv->sh->metadata_regc_check_flag = 1;
        return 0;
 }
 
 int
 save_dump_file(const uint8_t *data, uint32_t size,
-       uint32_t type, uint32_t id, void *arg, FILE *file)
+       uint32_t type, uint64_t id, void *arg, FILE *file)
 {
        char line[BUF_SIZE];
        uint32_t out = 0;
@@ -8174,17 +8184,18 @@ save_dump_file(const uint8_t *data, uint32_t size,
        switch (type) {
        case DR_DUMP_REC_TYPE_PMD_MODIFY_HDR:
                actions_num = *(uint32_t *)(arg);
-               out += snprintf(line + out, BUF_SIZE - out, "%d,0x%x,%d,",
+               out += snprintf(line + out, BUF_SIZE - out, "%d,0x%" PRIx64 ",%d,",
                                type, id, actions_num);
                break;
        case DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT:
-               out += snprintf(line + out, BUF_SIZE - out, "%d,0x%x,",
+               out += snprintf(line + out, BUF_SIZE - out, "%d,0x%" PRIx64 ",",
                                type, id);
                break;
        case DR_DUMP_REC_TYPE_PMD_COUNTER:
                count = (struct rte_flow_query_count *)arg;
-               fprintf(file, "%d,0x%x,%" PRIu64 ",%" PRIu64 "\n", type,
-                               id, count->hits, count->bytes);
+               fprintf(file,
+                       "%d,0x%" PRIx64 ",%" PRIu64 ",%" PRIu64 "\n",
+                       type, id, count->hits, count->bytes);
                return 0;
        default:
                return -1;
@@ -8258,30 +8269,34 @@ mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev,
        uint32_t actions_num;
        const uint8_t *data;
        size_t size;
-       uint32_t id;
+       uint64_t id;
        uint32_t type;
+       void *action = NULL;
 
        if (!flow) {
                return rte_flow_error_set(error, ENOENT,
-                       RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-                       NULL,
-                       "invalid flow handle");
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL,
+                               "invalid flow handle");
        }
        handle_idx = flow->dev_handles;
        while (handle_idx) {
                dh = mlx5_ipool_get(priv->sh->ipool
-                       [MLX5_IPOOL_MLX5_FLOW], handle_idx);
+                               [MLX5_IPOOL_MLX5_FLOW], handle_idx);
                if (!dh)
                        continue;
                handle_idx = dh->next.next;
-               id = (uint32_t)(uintptr_t)dh->drv_flow;
 
                /* query counter */
                type = DR_DUMP_REC_TYPE_PMD_COUNTER;
-               if (!mlx5_flow_query_counter(dev, flow, &count, error))
-                       save_dump_file(NULL, 0, type,
-                                       id, (void *)&count, file);
-
+               flow_dv_query_count_ptr(dev, flow->counter,
+                                               &action, error);
+               if (action) {
+                       id = (uint64_t)(uintptr_t)action;
+                       if (!mlx5_flow_query_counter(dev, flow, &count, error))
+                               save_dump_file(NULL, 0, type,
+                                               id, (void *)&count, file);
+               }
                /* Get modify_hdr and encap_decap buf from ipools. */
                encap_decap = NULL;
                modify_hdr = dh->dvh.modify_hdr;
@@ -8294,14 +8309,16 @@ mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev,
                if (modify_hdr) {
                        data = (const uint8_t *)modify_hdr->actions;
                        size = (size_t)(modify_hdr->actions_num) * 8;
+                       id = (uint64_t)(uintptr_t)modify_hdr->action;
                        actions_num = modify_hdr->actions_num;
                        type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR;
                        save_dump_file(data, size, type, id,
-                                       (void *)(&actions_num), file);
+                                               (void *)(&actions_num), file);
                }
                if (encap_decap) {
                        data = encap_decap->buf;
                        size = encap_decap->size;
+                       id = (uint64_t)(uintptr_t)encap_decap->action;
                        type = DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT;
                        save_dump_file(data, size, type,
                                                id, NULL, file);
@@ -8309,6 +8326,119 @@ mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev,
        }
        return 0;
 }
+
+/**
+ * Dump all flow's encap_decap/modify_hdr/counter data to file
+ *
+ * @param[in] dev
+ *   The pointer to Ethernet device.
+ * @param[in] file
+ *   A pointer to a file for output.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL. PMDs initialize this
+ *   structure in case of error only.
+ * @return
+ *   0 on success, a negative value otherwise.
+ */
+static int
+mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev,
+       FILE *file, struct rte_flow_error *error)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_dev_ctx_shared *sh = priv->sh;
+       struct mlx5_hlist *h;
+       struct mlx5_flow_dv_modify_hdr_resource  *modify_hdr;
+       struct mlx5_flow_dv_encap_decap_resource *encap_decap;
+       struct rte_flow_query_count count;
+       uint32_t actions_num;
+       const uint8_t *data;
+       size_t size;
+       uint64_t id;
+       uint32_t type;
+       uint32_t i;
+       uint32_t j;
+       struct mlx5_list_inconst *l_inconst;
+       struct mlx5_list_entry *e;
+       int lcore_index;
+       struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
+       uint32_t max;
+       void *action;
+
+       /* encap_decap hlist is lcore_share, get global core cache. */
+       i = MLX5_LIST_GLOBAL;
+       h = sh->encaps_decaps;
+       if (h) {
+               for (j = 0; j <= h->mask; j++) {
+                       l_inconst = &h->buckets[j].l;
+                       if (!l_inconst || !l_inconst->cache[i])
+                               continue;
+
+                       e = LIST_FIRST(&l_inconst->cache[i]->h);
+                       while (e) {
+                               encap_decap =
+                               (struct mlx5_flow_dv_encap_decap_resource *)e;
+                               data = encap_decap->buf;
+                               size = encap_decap->size;
+                               id = (uint64_t)(uintptr_t)encap_decap->action;
+                               type = DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT;
+                               save_dump_file(data, size, type,
+                                       id, NULL, file);
+                               e = LIST_NEXT(e, next);
+                       }
+               }
+       }
+
+       /* get modify_hdr */
+       h = sh->modify_cmds;
+       if (h) {
+               lcore_index = rte_lcore_index(rte_lcore_id());
+               if (unlikely(lcore_index == -1)) {
+                       lcore_index = MLX5_LIST_NLCORE;
+                       rte_spinlock_lock(&h->l_const.lcore_lock);
+               }
+               i = lcore_index;
+
+               for (j = 0; j <= h->mask; j++) {
+                       l_inconst = &h->buckets[j].l;
+                       if (!l_inconst || !l_inconst->cache[i])
+                               continue;
+
+                       e = LIST_FIRST(&l_inconst->cache[i]->h);
+                       while (e) {
+                               modify_hdr =
+                               (struct mlx5_flow_dv_modify_hdr_resource *)e;
+                               data = (const uint8_t *)modify_hdr->actions;
+                               size = (size_t)(modify_hdr->actions_num) * 8;
+                               actions_num = modify_hdr->actions_num;
+                               id = (uint64_t)(uintptr_t)modify_hdr->action;
+                               type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR;
+                               save_dump_file(data, size, type, id,
+                                               (void *)(&actions_num), file);
+                               e = LIST_NEXT(e, next);
+                       }
+               }
+
+               if (unlikely(lcore_index == MLX5_LIST_NLCORE))
+                       rte_spinlock_unlock(&h->l_const.lcore_lock);
+       }
+
+       /* get counter */
+       MLX5_ASSERT(cmng->n_valid <= cmng->n);
+       max = MLX5_COUNTERS_PER_POOL * cmng->n_valid;
+       for (j = 1; j <= max; j++) {
+               action = NULL;
+               flow_dv_query_count_ptr(dev, j, &action, error);
+               if (action) {
+                       if (!flow_dv_query_count(dev, j, &count, error)) {
+                               type = DR_DUMP_REC_TYPE_PMD_COUNTER;
+                               id = (uint64_t)(uintptr_t)action;
+                               save_dump_file(NULL, 0, type,
+                                               id, (void *)&count, file);
+                       }
+               }
+       }
+       return 0;
+}
 #endif
 
 /**
@@ -8335,9 +8465,6 @@ mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx,
        int ret;
        struct mlx5_flow_handle *dh;
        struct rte_flow *flow;
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
-       uint32_t idx;
-#endif
 
        if (!priv->config.dv_flow_en) {
                if (fputs("device dv flow disabled\n", file) <= 0)
@@ -8348,8 +8475,8 @@ mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx,
        /* dump all */
        if (!flow_idx) {
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
-               MLX5_IPOOL_FOREACH(priv->flows[MLX5_FLOW_TYPE_GEN], idx, flow)
-                       mlx5_flow_dev_dump_ipool(dev, flow, file, error);
+               if (mlx5_flow_dev_dump_sh_all(dev, file, error))
+                       return -EINVAL;
 #endif
                return mlx5_devx_cmd_flow_dump(sh->fdb_domain,
                                        sh->rx_domain,
@@ -8359,7 +8486,7 @@ mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx,
        flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN],
                        (uintptr_t)(void *)flow_idx);
        if (!flow)
-               return -ENOENT;
+               return -EINVAL;
 
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
        mlx5_flow_dev_dump_ipool(dev, flow, file, error);
@@ -8783,7 +8910,7 @@ flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
                                (error, EINVAL,
                                RTE_FLOW_ERROR_TYPE_ACTION_CONF,
                                NULL, "invalid port configuration");
-               if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+               if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
                        ctx->action_rss.types = 0;
                for (i = 0; i != priv->reta_idx_n; ++i)
                        ctx->queue[i] = (*priv->reta_idx)[i];
@@ -9380,7 +9507,7 @@ mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
 {
        uint64_t ol_flags = m->ol_flags;
        const struct mlx5_flow_tbl_data_entry *tble;
-       const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID;
+       const uint64_t mask = RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
 
        if (!is_tunnel_offload_active(dev)) {
                info->flags = 0;