net/mlx5: add flow flush
[dpdk.git] / drivers / net / mlx5 / mlx5_flow.c
index 5b5b00c..1672939 100644 (file)
@@ -76,6 +76,7 @@ const struct mlx5_flow_driver_ops *flow_drv_ops[] = {
        [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops,
 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
        [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops,
+       [MLX5_FLOW_TYPE_HW] = &mlx5_flow_hw_drv_ops,
 #endif
        [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops,
        [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops
@@ -804,6 +805,79 @@ static int
 mlx5_flow_flex_item_release(struct rte_eth_dev *dev,
                            const struct rte_flow_item_flex_handle *handle,
                            struct rte_flow_error *error);
+static int
+mlx5_flow_info_get(struct rte_eth_dev *dev,
+                  struct rte_flow_port_info *port_info,
+                  struct rte_flow_queue_info *queue_info,
+                  struct rte_flow_error *error);
+static int
+mlx5_flow_port_configure(struct rte_eth_dev *dev,
+                        const struct rte_flow_port_attr *port_attr,
+                        uint16_t nb_queue,
+                        const struct rte_flow_queue_attr *queue_attr[],
+                        struct rte_flow_error *err);
+
+static struct rte_flow_pattern_template *
+mlx5_flow_pattern_template_create(struct rte_eth_dev *dev,
+               const struct rte_flow_pattern_template_attr *attr,
+               const struct rte_flow_item items[],
+               struct rte_flow_error *error);
+
+static int
+mlx5_flow_pattern_template_destroy(struct rte_eth_dev *dev,
+                                  struct rte_flow_pattern_template *template,
+                                  struct rte_flow_error *error);
+static struct rte_flow_actions_template *
+mlx5_flow_actions_template_create(struct rte_eth_dev *dev,
+                       const struct rte_flow_actions_template_attr *attr,
+                       const struct rte_flow_action actions[],
+                       const struct rte_flow_action masks[],
+                       struct rte_flow_error *error);
+static int
+mlx5_flow_actions_template_destroy(struct rte_eth_dev *dev,
+                                  struct rte_flow_actions_template *template,
+                                  struct rte_flow_error *error);
+
+static struct rte_flow_template_table *
+mlx5_flow_table_create(struct rte_eth_dev *dev,
+                      const struct rte_flow_template_table_attr *attr,
+                      struct rte_flow_pattern_template *item_templates[],
+                      uint8_t nb_item_templates,
+                      struct rte_flow_actions_template *action_templates[],
+                      uint8_t nb_action_templates,
+                      struct rte_flow_error *error);
+static int
+mlx5_flow_table_destroy(struct rte_eth_dev *dev,
+                       struct rte_flow_template_table *table,
+                       struct rte_flow_error *error);
+static struct rte_flow *
+mlx5_flow_async_flow_create(struct rte_eth_dev *dev,
+                           uint32_t queue,
+                           const struct rte_flow_op_attr *attr,
+                           struct rte_flow_template_table *table,
+                           const struct rte_flow_item items[],
+                           uint8_t pattern_template_index,
+                           const struct rte_flow_action actions[],
+                           uint8_t action_template_index,
+                           void *user_data,
+                           struct rte_flow_error *error);
+static int
+mlx5_flow_async_flow_destroy(struct rte_eth_dev *dev,
+                            uint32_t queue,
+                            const struct rte_flow_op_attr *attr,
+                            struct rte_flow *flow,
+                            void *user_data,
+                            struct rte_flow_error *error);
+static int
+mlx5_flow_pull(struct rte_eth_dev *dev,
+              uint32_t queue,
+              struct rte_flow_op_result res[],
+              uint16_t n_res,
+              struct rte_flow_error *error);
+static int
+mlx5_flow_push(struct rte_eth_dev *dev,
+              uint32_t queue,
+              struct rte_flow_error *error);
 
 static const struct rte_flow_ops mlx5_flow_ops = {
        .validate = mlx5_flow_validate,
@@ -825,6 +899,18 @@ static const struct rte_flow_ops mlx5_flow_ops = {
        .get_restore_info = mlx5_flow_tunnel_get_restore_info,
        .flex_item_create = mlx5_flow_flex_item_create,
        .flex_item_release = mlx5_flow_flex_item_release,
+       .info_get = mlx5_flow_info_get,
+       .configure = mlx5_flow_port_configure,
+       .pattern_template_create = mlx5_flow_pattern_template_create,
+       .pattern_template_destroy = mlx5_flow_pattern_template_destroy,
+       .actions_template_create = mlx5_flow_actions_template_create,
+       .actions_template_destroy = mlx5_flow_actions_template_destroy,
+       .template_table_create = mlx5_flow_table_create,
+       .template_table_destroy = mlx5_flow_table_destroy,
+       .async_create = mlx5_flow_async_flow_create,
+       .async_destroy = mlx5_flow_async_flow_destroy,
+       .pull = mlx5_flow_pull,
+       .push = mlx5_flow_push,
 };
 
 /* Tunnel information. */
@@ -3428,6 +3514,12 @@ flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)
 
        if (type != MLX5_FLOW_TYPE_MAX)
                return type;
+       /*
+        * Currently when dv_flow_en == 2, only HW steering engine is
+        * supported. New engines can also be chosen here if ready.
+        */
+       if (priv->sh->config.dv_flow_en == 2)
+               return MLX5_FLOW_TYPE_HW;
        /* If no OS specific type - continue with DV/VERBS selection */
        if (attr->transfer && priv->sh->config.dv_esw_en)
                type = MLX5_FLOW_TYPE_DV;
@@ -5021,6 +5113,7 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
        uint32_t tag_id = 0;
        struct rte_flow_item *vlan_item_dst = NULL;
        const struct rte_flow_item *vlan_item_src = NULL;
+       const struct rte_flow_item *orig_items = items;
        struct rte_flow_action *hw_mtr_action;
        struct rte_flow_action *action_pre_head = NULL;
        int32_t flow_src_port = priv->representor_id;
@@ -5145,7 +5238,8 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
 
                if (!fm->def_policy) {
                        sub_policy = get_meter_sub_policy(dev, flow, wks,
-                                                         attr, items, error);
+                                                         attr, orig_items,
+                                                         error);
                        if (!sub_policy)
                                return -rte_errno;
                } else {
@@ -6837,6 +6931,15 @@ mlx5_flow_create(struct rte_eth_dev *dev,
                 const struct rte_flow_action actions[],
                 struct rte_flow_error *error)
 {
+       struct mlx5_priv *priv = dev->data->dev_private;
+
+       if (priv->sh->config.dv_flow_en == 2) {
+               rte_flow_error_set(error, ENOTSUP,
+                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                         NULL,
+                         "Flow non-Q creation not supported");
+               return NULL;
+       }
        /*
         * If the device is not started yet, it is not allowed to created a
         * flow from application. PMD default flows and traffic control flows
@@ -6912,6 +7015,14 @@ mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type,
        uint32_t num_flushed = 0, fidx = 1;
        struct rte_flow *flow;
 
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+       if (priv->sh->config.dv_flow_en == 2 &&
+           type == MLX5_FLOW_TYPE_GEN) {
+               flow_hw_q_flow_flush(dev, NULL);
+               return;
+       }
+#endif
+
        MLX5_IPOOL_FOREACH(priv->flows[type], fidx, flow) {
                flow_list_destroy(dev, type, fidx);
                num_flushed++;
@@ -7333,6 +7444,13 @@ mlx5_flow_destroy(struct rte_eth_dev *dev,
                  struct rte_flow *flow,
                  struct rte_flow_error *error __rte_unused)
 {
+       struct mlx5_priv *priv = dev->data->dev_private;
+
+       if (priv->sh->config.dv_flow_en == 2)
+               return rte_flow_error_set(error, ENOTSUP,
+                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                         NULL,
+                         "Flow non-Q destruction not supported");
        flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN,
                                (uintptr_t)(void *)flow);
        return 0;
@@ -7430,7 +7548,13 @@ mlx5_flow_query(struct rte_eth_dev *dev,
                struct rte_flow_error *error)
 {
        int ret;
+       struct mlx5_priv *priv = dev->data->dev_private;
 
+       if (priv->sh->config.dv_flow_en == 2)
+               return rte_flow_error_set(error, ENOTSUP,
+                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                         NULL,
+                         "Flow non-Q query not supported");
        ret = flow_drv_query(dev, (uintptr_t)(void *)flow, actions, data,
                             error);
        if (ret < 0)
@@ -7797,14 +7921,15 @@ mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt)
  */
 int
 mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt,
-                  bool clear, uint64_t *pkts, uint64_t *bytes)
+                  bool clear, uint64_t *pkts, uint64_t *bytes, void **action)
 {
        const struct mlx5_flow_driver_ops *fops;
        struct rte_flow_attr attr = { .transfer = 0 };
 
        if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
                fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
-               return fops->counter_query(dev, cnt, clear, pkts, bytes);
+               return fops->counter_query(dev, cnt, clear, pkts,
+                                       bytes, action);
        }
        DRV_LOG(ERR,
                "port %u counter query is not supported.",
@@ -7812,6 +7937,436 @@ mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt,
        return -ENOTSUP;
 }
 
+/**
+ * Get information about HWS pre-configurable resources.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[out] port_info
+ *   Pointer to port information.
+ * @param[out] queue_info
+ *   Pointer to queue information.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_info_get(struct rte_eth_dev *dev,
+                  struct rte_flow_port_info *port_info,
+                  struct rte_flow_queue_info *queue_info,
+                  struct rte_flow_error *error)
+{
+       const struct mlx5_flow_driver_ops *fops;
+
+       if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
+               return rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL,
+                               "info get with incorrect steering mode");
+       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+       return fops->info_get(dev, port_info, queue_info, error);
+}
+
+/**
+ * Configure port HWS resources.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] port_attr
+ *   Port configuration attributes.
+ * @param[in] nb_queue
+ *   Number of queue.
+ * @param[in] queue_attr
+ *   Array that holds attributes for each flow queue.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_port_configure(struct rte_eth_dev *dev,
+                        const struct rte_flow_port_attr *port_attr,
+                        uint16_t nb_queue,
+                        const struct rte_flow_queue_attr *queue_attr[],
+                        struct rte_flow_error *error)
+{
+       const struct mlx5_flow_driver_ops *fops;
+
+       if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
+               return rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL,
+                               "port configure with incorrect steering mode");
+       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+       return fops->configure(dev, port_attr, nb_queue, queue_attr, error);
+}
+
+/**
+ * Create flow item template.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] attr
+ *   Pointer to the item template attributes.
+ * @param[in] items
+ *   The template item pattern.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static struct rte_flow_pattern_template *
+mlx5_flow_pattern_template_create(struct rte_eth_dev *dev,
+               const struct rte_flow_pattern_template_attr *attr,
+               const struct rte_flow_item items[],
+               struct rte_flow_error *error)
+{
+       const struct mlx5_flow_driver_ops *fops;
+
+       if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) {
+               rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL,
+                               "pattern create with incorrect steering mode");
+               return NULL;
+       }
+       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+       return fops->pattern_template_create(dev, attr, items, error);
+}
+
+/**
+ * Destroy flow item template.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] template
+ *   Pointer to the item template to be destroyed.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_pattern_template_destroy(struct rte_eth_dev *dev,
+                                  struct rte_flow_pattern_template *template,
+                                  struct rte_flow_error *error)
+{
+       const struct mlx5_flow_driver_ops *fops;
+
+       if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
+               return rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL,
+                               "pattern destroy with incorrect steering mode");
+       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+       return fops->pattern_template_destroy(dev, template, error);
+}
+
+/**
+ * Create flow item template.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] attr
+ *   Pointer to the action template attributes.
+ * @param[in] actions
+ *   Associated actions (list terminated by the END action).
+ * @param[in] masks
+ *   List of actions that marks which of the action's member is constant.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static struct rte_flow_actions_template *
+mlx5_flow_actions_template_create(struct rte_eth_dev *dev,
+                       const struct rte_flow_actions_template_attr *attr,
+                       const struct rte_flow_action actions[],
+                       const struct rte_flow_action masks[],
+                       struct rte_flow_error *error)
+{
+       const struct mlx5_flow_driver_ops *fops;
+
+       if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) {
+               rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL,
+                               "action create with incorrect steering mode");
+               return NULL;
+       }
+       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+       return fops->actions_template_create(dev, attr, actions, masks, error);
+}
+
+/**
+ * Destroy flow action template.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] template
+ *   Pointer to the action template to be destroyed.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_actions_template_destroy(struct rte_eth_dev *dev,
+                                  struct rte_flow_actions_template *template,
+                                  struct rte_flow_error *error)
+{
+       const struct mlx5_flow_driver_ops *fops;
+
+       if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
+               return rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL,
+                               "action destroy with incorrect steering mode");
+       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+       return fops->actions_template_destroy(dev, template, error);
+}
+
+/**
+ * Create flow table.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] attr
+ *   Pointer to the table attributes.
+ * @param[in] item_templates
+ *   Item template array to be binded to the table.
+ * @param[in] nb_item_templates
+ *   Number of item template.
+ * @param[in] action_templates
+ *   Action template array to be binded to the table.
+ * @param[in] nb_action_templates
+ *   Number of action template.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *    Table on success, NULL otherwise and rte_errno is set.
+ */
+static struct rte_flow_template_table *
+mlx5_flow_table_create(struct rte_eth_dev *dev,
+                      const struct rte_flow_template_table_attr *attr,
+                      struct rte_flow_pattern_template *item_templates[],
+                      uint8_t nb_item_templates,
+                      struct rte_flow_actions_template *action_templates[],
+                      uint8_t nb_action_templates,
+                      struct rte_flow_error *error)
+{
+       const struct mlx5_flow_driver_ops *fops;
+
+       if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) {
+               rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL,
+                               "table create with incorrect steering mode");
+               return NULL;
+       }
+       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+       return fops->template_table_create(dev,
+                                          attr,
+                                          item_templates,
+                                          nb_item_templates,
+                                          action_templates,
+                                          nb_action_templates,
+                                          error);
+}
+
+/**
+ * PMD destroy flow table.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] table
+ *   Pointer to the table to be destroyed.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_table_destroy(struct rte_eth_dev *dev,
+                       struct rte_flow_template_table *table,
+                       struct rte_flow_error *error)
+{
+       const struct mlx5_flow_driver_ops *fops;
+
+       if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
+               return rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL,
+                               "table destroy with incorrect steering mode");
+       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+       return fops->template_table_destroy(dev, table, error);
+}
+
+/**
+ * Enqueue flow creation.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] queue_id
+ *   The queue to create the flow.
+ * @param[in] attr
+ *   Pointer to the flow operation attributes.
+ * @param[in] items
+ *   Items with flow spec value.
+ * @param[in] pattern_template_index
+ *   The item pattern flow follows from the table.
+ * @param[in] actions
+ *   Action with flow spec value.
+ * @param[in] action_template_index
+ *   The action pattern flow follows from the table.
+ * @param[in] user_data
+ *   Pointer to the user_data.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *    Flow pointer on success, NULL otherwise and rte_errno is set.
+ */
+static struct rte_flow *
+mlx5_flow_async_flow_create(struct rte_eth_dev *dev,
+                           uint32_t queue_id,
+                           const struct rte_flow_op_attr *attr,
+                           struct rte_flow_template_table *table,
+                           const struct rte_flow_item items[],
+                           uint8_t pattern_template_index,
+                           const struct rte_flow_action actions[],
+                           uint8_t action_template_index,
+                           void *user_data,
+                           struct rte_flow_error *error)
+{
+       const struct mlx5_flow_driver_ops *fops;
+
+       if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) {
+               rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL,
+                               "flow_q create with incorrect steering mode");
+               return NULL;
+       }
+       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+       return fops->async_flow_create(dev, queue_id, attr, table,
+                                      items, pattern_template_index,
+                                      actions, action_template_index,
+                                      user_data, error);
+}
+
+/**
+ * Enqueue flow destruction.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] queue
+ *   The queue to destroy the flow.
+ * @param[in] attr
+ *   Pointer to the flow operation attributes.
+ * @param[in] flow
+ *   Pointer to the flow to be destroyed.
+ * @param[in] user_data
+ *   Pointer to the user_data.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *    0 on success, negative value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_async_flow_destroy(struct rte_eth_dev *dev,
+                            uint32_t queue,
+                            const struct rte_flow_op_attr *attr,
+                            struct rte_flow *flow,
+                            void *user_data,
+                            struct rte_flow_error *error)
+{
+       const struct mlx5_flow_driver_ops *fops;
+
+       if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
+               return rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL,
+                               "flow_q destroy with incorrect steering mode");
+       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+       return fops->async_flow_destroy(dev, queue, attr, flow,
+                                       user_data, error);
+}
+
+/**
+ * Pull the enqueued flows.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] queue
+ *   The queue to pull the result.
+ * @param[in/out] res
+ *   Array to save the results.
+ * @param[in] n_res
+ *   Available result with the array.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *    Result number on success, negative value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_pull(struct rte_eth_dev *dev,
+              uint32_t queue,
+              struct rte_flow_op_result res[],
+              uint16_t n_res,
+              struct rte_flow_error *error)
+{
+       const struct mlx5_flow_driver_ops *fops;
+
+       if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
+               return rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL,
+                               "flow_q pull with incorrect steering mode");
+       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+       return fops->pull(dev, queue, res, n_res, error);
+}
+
+/**
+ * Push the enqueued flows.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] queue
+ *   The queue to push the flows.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *    0 on success, negative value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_push(struct rte_eth_dev *dev,
+              uint32_t queue,
+              struct rte_flow_error *error)
+{
+       const struct mlx5_flow_driver_ops *fops;
+
+       if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
+               return rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL,
+                               "flow_q push with incorrect steering mode");
+       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+       return fops->push(dev, queue, error);
+}
+
 /**
  * Allocate a new memory for the counter values wrapped by all the needed
  * management.
@@ -8376,6 +8931,16 @@ mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev,
                                "invalid flow handle");
        }
        handle_idx = flow->dev_handles;
+       /* query counter */
+       if (flow->counter &&
+       (!mlx5_counter_query(dev, flow->counter, false,
+       &count.hits, &count.bytes, &action)) && action) {
+               id = (uint64_t)(uintptr_t)action;
+               type = DR_DUMP_REC_TYPE_PMD_COUNTER;
+               save_dump_file(NULL, 0, type,
+                       id, (void *)&count, file);
+       }
+
        while (handle_idx) {
                dh = mlx5_ipool_get(priv->sh->ipool
                                [MLX5_IPOOL_MLX5_FLOW], handle_idx);
@@ -8383,16 +8948,6 @@ mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev,
                        continue;
                handle_idx = dh->next.next;
 
-               /* query counter */
-               type = DR_DUMP_REC_TYPE_PMD_COUNTER;
-               flow_dv_query_count_ptr(dev, flow->counter,
-                                               &action, error);
-               if (action) {
-                       id = (uint64_t)(uintptr_t)action;
-                       if (!mlx5_flow_query_counter(dev, flow, &count, error))
-                               save_dump_file(NULL, 0, type,
-                                               id, (void *)&count, file);
-               }
                /* Get modify_hdr and encap_decap buf from ipools. */
                encap_decap = NULL;
                modify_hdr = dh->dvh.modify_hdr;
@@ -8438,7 +8993,7 @@ mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev,
  */
 static int
 mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev,
-       FILE *file, struct rte_flow_error *error)
+       FILE *file, struct rte_flow_error *error __rte_unused)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_dev_ctx_shared *sh = priv->sh;
@@ -8523,14 +9078,12 @@ mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev,
        max = MLX5_COUNTERS_PER_POOL * cmng->n_valid;
        for (j = 1; j <= max; j++) {
                action = NULL;
-               flow_dv_query_count_ptr(dev, j, &action, error);
-               if (action) {
-                       if (!flow_dv_query_count(dev, j, &count, error)) {
-                               type = DR_DUMP_REC_TYPE_PMD_COUNTER;
-                               id = (uint64_t)(uintptr_t)action;
-                               save_dump_file(NULL, 0, type,
-                                               id, (void *)&count, file);
-                       }
+               if ((!mlx5_counter_query(dev, j, false, &count.hits,
+               &count.bytes, &action)) && action) {
+                       id = (uint64_t)(uintptr_t)action;
+                       type = DR_DUMP_REC_TYPE_PMD_COUNTER;
+                       save_dump_file(NULL, 0, type,
+                                       id, (void *)&count, file);
                }
        }
        return 0;