net/mlx5: add pattern template management
[dpdk.git] / drivers / net / mlx5 / mlx5_flow.c
index d7cb1eb..673e0ec 100644 (file)
@@ -76,6 +76,7 @@ const struct mlx5_flow_driver_ops *flow_drv_ops[] = {
        [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops,
 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
        [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops,
+       [MLX5_FLOW_TYPE_HW] = &mlx5_flow_hw_drv_ops,
 #endif
        [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops,
        [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops
@@ -804,6 +805,28 @@ static int
 mlx5_flow_flex_item_release(struct rte_eth_dev *dev,
                            const struct rte_flow_item_flex_handle *handle,
                            struct rte_flow_error *error);
+static int
+mlx5_flow_info_get(struct rte_eth_dev *dev,
+                  struct rte_flow_port_info *port_info,
+                  struct rte_flow_queue_info *queue_info,
+                  struct rte_flow_error *error);
+static int
+mlx5_flow_port_configure(struct rte_eth_dev *dev,
+                        const struct rte_flow_port_attr *port_attr,
+                        uint16_t nb_queue,
+                        const struct rte_flow_queue_attr *queue_attr[],
+                        struct rte_flow_error *err);
+
+static struct rte_flow_pattern_template *
+mlx5_flow_pattern_template_create(struct rte_eth_dev *dev,
+               const struct rte_flow_pattern_template_attr *attr,
+               const struct rte_flow_item items[],
+               struct rte_flow_error *error);
+
+static int
+mlx5_flow_pattern_template_destroy(struct rte_eth_dev *dev,
+                                  struct rte_flow_pattern_template *template,
+                                  struct rte_flow_error *error);
 
 static const struct rte_flow_ops mlx5_flow_ops = {
        .validate = mlx5_flow_validate,
@@ -825,6 +848,10 @@ static const struct rte_flow_ops mlx5_flow_ops = {
        .get_restore_info = mlx5_flow_tunnel_get_restore_info,
        .flex_item_create = mlx5_flow_flex_item_create,
        .flex_item_release = mlx5_flow_flex_item_release,
+       .info_get = mlx5_flow_info_get,
+       .configure = mlx5_flow_port_configure,
+       .pattern_template_create = mlx5_flow_pattern_template_create,
+       .pattern_template_destroy = mlx5_flow_pattern_template_destroy,
 };
 
 /* Tunnel information. */
@@ -901,7 +928,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
                     struct rte_flow_error *error)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_dev_config *config = &priv->config;
+       struct mlx5_sh_config *config = &priv->sh->config;
        enum modify_reg start_reg;
        bool skip_mtr_reg = false;
 
@@ -1759,7 +1786,7 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
                                          &rss->key_len,
                                          "RSS hash key too large");
-       if (rss->queue_num > priv->config.ind_table_max_size)
+       if (rss->queue_num > priv->sh->dev_cap.ind_table_max_size)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
                                          &rss->queue_num,
@@ -1994,7 +2021,7 @@ mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
                                          "egress is not supported");
-       if (attributes->transfer && !priv->config.dv_esw_en)
+       if (attributes->transfer && !priv->sh->config.dv_esw_en)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
                                          NULL, "transfer is not supported");
@@ -2711,7 +2738,7 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
                uint8_t vni[4];
        } id = { .vlan_id = 0, };
 
-       if (!priv->config.l3_vxlan_en)
+       if (!priv->sh->config.l3_vxlan_en)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "L3 VXLAN is not enabled by device"
@@ -2906,7 +2933,7 @@ mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
        const struct rte_flow_item_geneve *mask = item->mask;
        int ret;
        uint16_t gbhdr;
-       uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ?
+       uint8_t opt_len = priv->sh->cdev->config.hca_attr.geneve_max_opt_len ?
                          MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0;
        const struct rte_flow_item_geneve nic_mask = {
                .ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80),
@@ -2914,7 +2941,7 @@ mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
                .protocol = RTE_BE16(UINT16_MAX),
        };
 
-       if (!priv->config.hca_attr.tunnel_stateless_geneve_rx)
+       if (!priv->sh->cdev->config.hca_attr.tunnel_stateless_geneve_rx)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "L3 Geneve is not enabled by device"
@@ -2994,10 +3021,9 @@ mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item,
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_dev_ctx_shared *sh = priv->sh;
        struct mlx5_geneve_tlv_option_resource *geneve_opt_resource;
-       struct mlx5_hca_attr *hca_attr = &priv->config.hca_attr;
+       struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
        uint8_t data_max_supported =
                        hca_attr->max_geneve_tlv_option_data_len * 4;
-       struct mlx5_dev_config *config = &priv->config;
        const struct rte_flow_item_geneve *geneve_spec;
        const struct rte_flow_item_geneve *geneve_mask;
        const struct rte_flow_item_geneve_opt *spec = item->spec;
@@ -3031,11 +3057,11 @@ mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item,
                        "Geneve TLV opt class/type/length masks must be full");
        /* Check if length is supported */
        if ((uint32_t)spec->option_len >
-                       config->hca_attr.max_geneve_tlv_option_data_len)
+                       hca_attr->max_geneve_tlv_option_data_len)
                return rte_flow_error_set
                        (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
                        "Geneve TLV opt length not supported");
-       if (config->hca_attr.max_geneve_tlv_options > 1)
+       if (hca_attr->max_geneve_tlv_options > 1)
                DRV_LOG(DEBUG,
                        "max_geneve_tlv_options supports more than 1 option");
        /* Check GENEVE item preceding. */
@@ -3090,7 +3116,7 @@ mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item,
                                        "Data mask is of unsupported size");
        }
        /* Check GENEVE option is supported in NIC. */
-       if (!config->hca_attr.geneve_tlv_opt)
+       if (!hca_attr->geneve_tlv_opt)
                return rte_flow_error_set
                        (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
                        "Geneve TLV opt not supported");
@@ -3139,7 +3165,7 @@ mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused,
        struct mlx5_priv *priv = dev->data->dev_private;
        int ret;
 
-       if (!priv->config.mpls_en)
+       if (!priv->sh->dev_cap.mpls_en)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "MPLS not supported or"
@@ -3429,12 +3455,18 @@ flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)
 
        if (type != MLX5_FLOW_TYPE_MAX)
                return type;
+       /*
+        * Currently when dv_flow_en == 2, only HW steering engine is
+        * supported. New engines can also be chosen here if ready.
+        */
+       if (priv->sh->config.dv_flow_en == 2)
+               return MLX5_FLOW_TYPE_HW;
        /* If no OS specific type - continue with DV/VERBS selection */
-       if (attr->transfer && priv->config.dv_esw_en)
+       if (attr->transfer && priv->sh->config.dv_esw_en)
                type = MLX5_FLOW_TYPE_DV;
        if (!attr->transfer)
-               type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :
-                                                MLX5_FLOW_TYPE_VERBS;
+               type = priv->sh->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :
+                                                    MLX5_FLOW_TYPE_VERBS;
        return type;
 }
 
@@ -4106,7 +4138,7 @@ static bool flow_check_modify_action_type(struct rte_eth_dev *dev,
                return true;
        case RTE_FLOW_ACTION_TYPE_FLAG:
        case RTE_FLOW_ACTION_TYPE_MARK:
-               if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
+               if (priv->sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
                        return true;
                else
                        return false;
@@ -4545,8 +4577,8 @@ flow_mreg_add_default_copy_action(struct rte_eth_dev *dev,
        uint32_t mark_id;
 
        /* Check whether extensive metadata feature is engaged. */
-       if (!priv->config.dv_flow_en ||
-           priv->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
+       if (!priv->sh->config.dv_flow_en ||
+           priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
            !mlx5_flow_ext_mreg_supported(dev) ||
            !priv->sh->dv_regc0_mask)
                return 0;
@@ -4605,7 +4637,7 @@ flow_mreg_update_copy_table(struct rte_eth_dev *dev,
                            struct rte_flow_error *error)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_dev_config *config = &priv->config;
+       struct mlx5_sh_config *config = &priv->sh->config;
        struct mlx5_flow_mreg_copy_resource *mcp_res;
        const struct rte_flow_action_mark *mark;
 
@@ -5022,6 +5054,7 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
        uint32_t tag_id = 0;
        struct rte_flow_item *vlan_item_dst = NULL;
        const struct rte_flow_item *vlan_item_src = NULL;
+       const struct rte_flow_item *orig_items = items;
        struct rte_flow_action *hw_mtr_action;
        struct rte_flow_action *action_pre_head = NULL;
        int32_t flow_src_port = priv->representor_id;
@@ -5146,7 +5179,8 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
 
                if (!fm->def_policy) {
                        sub_policy = get_meter_sub_policy(dev, flow, wks,
-                                                         attr, items, error);
+                                                         attr, orig_items,
+                                                         error);
                        if (!sub_policy)
                                return -rte_errno;
                } else {
@@ -5741,7 +5775,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev,
                           struct rte_flow_error *error)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_dev_config *config = &priv->config;
+       struct mlx5_sh_config *config = &priv->sh->config;
        const struct rte_flow_action *qrss = NULL;
        struct rte_flow_action *ext_actions = NULL;
        struct mlx5_flow *dev_flow = NULL;
@@ -6249,7 +6283,8 @@ flow_create_split_sample(struct rte_eth_dev *dev,
                 * When reg_c_preserve is set, metadata registers Cx preserve
                 * their value even through packet duplication.
                 */
-               add_tag = (!fdb_tx || priv->config.hca_attr.reg_c_preserve);
+               add_tag = (!fdb_tx ||
+                          priv->sh->cdev->config.hca_attr.reg_c_preserve);
                if (add_tag)
                        sfx_items = (struct rte_flow_item *)((char *)sfx_actions
                                        + act_size);
@@ -6837,6 +6872,15 @@ mlx5_flow_create(struct rte_eth_dev *dev,
                 const struct rte_flow_action actions[],
                 struct rte_flow_error *error)
 {
+       struct mlx5_priv *priv = dev->data->dev_private;
+
+       if (priv->sh->config.dv_flow_en == 2) {
+               rte_flow_error_set(error, ENOTSUP,
+                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                         NULL,
+                         "Flow non-Q creation not supported");
+               return NULL;
+       }
        /*
         * If the device is not started yet, it is not allowed to created a
         * flow from application. PMD default flows and traffic control flows
@@ -7008,8 +7052,7 @@ flow_alloc_thread_workspace(void)
        data->rssq_num = MLX5_RSSQ_DEFAULT_NUM;
        return data;
 err:
-       if (data->rss_desc.queue)
-               free(data->rss_desc.queue);
+       free(data->rss_desc.queue);
        free(data);
        return NULL;
 }
@@ -7334,6 +7377,13 @@ mlx5_flow_destroy(struct rte_eth_dev *dev,
                  struct rte_flow *flow,
                  struct rte_flow_error *error __rte_unused)
 {
+       struct mlx5_priv *priv = dev->data->dev_private;
+
+       if (priv->sh->config.dv_flow_en == 2)
+               return rte_flow_error_set(error, ENOTSUP,
+                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                         NULL,
+                         "Flow non-Q destruction not supported");
        flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN,
                                (uintptr_t)(void *)flow);
        return 0;
@@ -7431,7 +7481,13 @@ mlx5_flow_query(struct rte_eth_dev *dev,
                struct rte_flow_error *error)
 {
        int ret;
+       struct mlx5_priv *priv = dev->data->dev_private;
 
+       if (priv->sh->config.dv_flow_en == 2)
+               return rte_flow_error_set(error, ENOTSUP,
+                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                         NULL,
+                         "Flow non-Q query not supported");
        ret = flow_drv_query(dev, (uintptr_t)(void *)flow, actions, data,
                             error);
        if (ret < 0)
@@ -7798,14 +7854,15 @@ mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt)
  */
 int
 mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt,
-                  bool clear, uint64_t *pkts, uint64_t *bytes)
+                  bool clear, uint64_t *pkts, uint64_t *bytes, void **action)
 {
        const struct mlx5_flow_driver_ops *fops;
        struct rte_flow_attr attr = { .transfer = 0 };
 
        if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
                fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
-               return fops->counter_query(dev, cnt, clear, pkts, bytes);
+               return fops->counter_query(dev, cnt, clear, pkts,
+                                       bytes, action);
        }
        DRV_LOG(ERR,
                "port %u counter query is not supported.",
@@ -7813,6 +7870,136 @@ mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt,
        return -ENOTSUP;
 }
 
+/**
+ * Get information about HWS pre-configurable resources.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[out] port_info
+ *   Pointer to port information.
+ * @param[out] queue_info
+ *   Pointer to queue information.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_info_get(struct rte_eth_dev *dev,
+                  struct rte_flow_port_info *port_info,
+                  struct rte_flow_queue_info *queue_info,
+                  struct rte_flow_error *error)
+{
+       const struct mlx5_flow_driver_ops *fops;
+
+       if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
+               return rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL,
+                               "info get with incorrect steering mode");
+       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+       return fops->info_get(dev, port_info, queue_info, error);
+}
+
+/**
+ * Configure port HWS resources.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] port_attr
+ *   Port configuration attributes.
+ * @param[in] nb_queue
+ *   Number of queue.
+ * @param[in] queue_attr
+ *   Array that holds attributes for each flow queue.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_port_configure(struct rte_eth_dev *dev,
+                        const struct rte_flow_port_attr *port_attr,
+                        uint16_t nb_queue,
+                        const struct rte_flow_queue_attr *queue_attr[],
+                        struct rte_flow_error *error)
+{
+       const struct mlx5_flow_driver_ops *fops;
+
+       if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
+               return rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL,
+                               "port configure with incorrect steering mode");
+       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+       return fops->configure(dev, port_attr, nb_queue, queue_attr, error);
+}
+
+/**
+ * Create flow item template.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] attr
+ *   Pointer to the item template attributes.
+ * @param[in] items
+ *   The template item pattern.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static struct rte_flow_pattern_template *
+mlx5_flow_pattern_template_create(struct rte_eth_dev *dev,
+               const struct rte_flow_pattern_template_attr *attr,
+               const struct rte_flow_item items[],
+               struct rte_flow_error *error)
+{
+       const struct mlx5_flow_driver_ops *fops;
+
+       if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) {
+               rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL,
+                               "pattern create with incorrect steering mode");
+               return NULL;
+       }
+       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+       return fops->pattern_template_create(dev, attr, items, error);
+}
+
+/**
+ * Destroy flow item template.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] template
+ *   Pointer to the item template to be destroyed.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_pattern_template_destroy(struct rte_eth_dev *dev,
+                                  struct rte_flow_pattern_template *template,
+                                  struct rte_flow_error *error)
+{
+       const struct mlx5_flow_driver_ops *fops;
+
+       if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
+               return rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL,
+                               "pattern destroy with incorrect steering mode");
+       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+       return fops->pattern_template_destroy(dev, template, error);
+}
+
 /**
  * Allocate a new memory for the counter values wrapped by all the needed
  * management.
@@ -8249,7 +8436,7 @@ mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
                struct rte_flow *flow;
                struct rte_flow_error error;
 
-               if (!priv->config.dv_flow_en)
+               if (!priv->sh->config.dv_flow_en)
                        break;
                /* Create internal flow, validation skips copy action. */
                flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_GEN, &attr,
@@ -8377,6 +8564,16 @@ mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev,
                                "invalid flow handle");
        }
        handle_idx = flow->dev_handles;
+       /* query counter */
+       if (flow->counter &&
+       (!mlx5_counter_query(dev, flow->counter, false,
+       &count.hits, &count.bytes, &action)) && action) {
+               id = (uint64_t)(uintptr_t)action;
+               type = DR_DUMP_REC_TYPE_PMD_COUNTER;
+               save_dump_file(NULL, 0, type,
+                       id, (void *)&count, file);
+       }
+
        while (handle_idx) {
                dh = mlx5_ipool_get(priv->sh->ipool
                                [MLX5_IPOOL_MLX5_FLOW], handle_idx);
@@ -8384,16 +8581,6 @@ mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev,
                        continue;
                handle_idx = dh->next.next;
 
-               /* query counter */
-               type = DR_DUMP_REC_TYPE_PMD_COUNTER;
-               flow_dv_query_count_ptr(dev, flow->counter,
-                                               &action, error);
-               if (action) {
-                       id = (uint64_t)(uintptr_t)action;
-                       if (!mlx5_flow_query_counter(dev, flow, &count, error))
-                               save_dump_file(NULL, 0, type,
-                                               id, (void *)&count, file);
-               }
                /* Get modify_hdr and encap_decap buf from ipools. */
                encap_decap = NULL;
                modify_hdr = dh->dvh.modify_hdr;
@@ -8439,7 +8626,7 @@ mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev,
  */
 static int
 mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev,
-       FILE *file, struct rte_flow_error *error)
+       FILE *file, struct rte_flow_error *error __rte_unused)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_dev_ctx_shared *sh = priv->sh;
@@ -8524,14 +8711,12 @@ mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev,
        max = MLX5_COUNTERS_PER_POOL * cmng->n_valid;
        for (j = 1; j <= max; j++) {
                action = NULL;
-               flow_dv_query_count_ptr(dev, j, &action, error);
-               if (action) {
-                       if (!flow_dv_query_count(dev, j, &count, error)) {
-                               type = DR_DUMP_REC_TYPE_PMD_COUNTER;
-                               id = (uint64_t)(uintptr_t)action;
-                               save_dump_file(NULL, 0, type,
-                                               id, (void *)&count, file);
-                       }
+               if ((!mlx5_counter_query(dev, j, false, &count.hits,
+               &count.bytes, &action)) && action) {
+                       id = (uint64_t)(uintptr_t)action;
+                       type = DR_DUMP_REC_TYPE_PMD_COUNTER;
+                       save_dump_file(NULL, 0, type,
+                                       id, (void *)&count, file);
                }
        }
        return 0;
@@ -8563,7 +8748,7 @@ mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx,
        struct mlx5_flow_handle *dh;
        struct rte_flow *flow;
 
-       if (!priv->config.dv_flow_en) {
+       if (!sh->config.dv_flow_en) {
                if (fputs("device dv flow disabled\n", file) <= 0)
                        return -errno;
                return -ENOTSUP;
@@ -9547,7 +9732,7 @@ mlx5_flow_tunnel_validate(struct rte_eth_dev *dev,
 {
        struct mlx5_priv *priv = dev->data->dev_private;
 
-       if (!priv->config.dv_flow_en)
+       if (!priv->sh->config.dv_flow_en)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
                                          "flow DV interface is off");
@@ -9966,7 +10151,7 @@ mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
        type = mlx5_flow_os_get_type();
        if (type == MLX5_FLOW_TYPE_MAX) {
                type = MLX5_FLOW_TYPE_VERBS;
-               if (priv->sh->devx && priv->config.dv_flow_en)
+               if (priv->sh->cdev->config.devx && priv->sh->config.dv_flow_en)
                        type = MLX5_FLOW_TYPE_DV;
        }
        fops = flow_get_drv_ops(type);