net/mlx5: optimize flow RSS struct
[dpdk.git] / drivers / net / mlx5 / mlx5_flow.c
index adba168..0565f6f 100644 (file)
@@ -8,6 +8,7 @@
 #include <stdalign.h>
 #include <stdint.h>
 #include <string.h>
+#include <stdbool.h>
 
 /* Verbs header. */
 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
@@ -167,7 +168,9 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = {
                .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
        },
        [MLX5_EXPANSION_VXLAN] = {
-               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH),
+               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
+                                                MLX5_EXPANSION_IPV4,
+                                                MLX5_EXPANSION_IPV6),
                .type = RTE_FLOW_ITEM_TYPE_VXLAN,
        },
        [MLX5_EXPANSION_VXLAN_GPE] = {
@@ -402,7 +405,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
                        return priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
                               REG_C_3;
        case MLX5_MTR_COLOR:
-               RTE_ASSERT(priv->mtr_color_reg != REG_NONE);
+               MLX5_ASSERT(priv->mtr_color_reg != REG_NONE);
                return priv->mtr_color_reg;
        case MLX5_COPY_MARK:
                /*
@@ -447,7 +450,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
                }
                return config->flow_mreg_c[id + start_reg - REG_C_0];
        }
-       assert(false);
+       MLX5_ASSERT(false);
        return rte_flow_error_set(error, EINVAL,
                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
                                  NULL, "invalid feature name");
@@ -606,7 +609,7 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item,
 {
        unsigned int i;
 
-       assert(nic_mask);
+       MLX5_ASSERT(nic_mask);
        for (i = 0; i < size; ++i)
                if ((nic_mask[i] | mask[i]) != nic_mask[i])
                        return rte_flow_error_set(error, ENOTSUP,
@@ -655,13 +658,12 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item,
  *   The hash fields that should be used.
  */
 uint64_t
-mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow,
+mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc,
                            int tunnel __rte_unused, uint64_t layer_types,
                            uint64_t hash_fields)
 {
-       struct rte_flow *flow = dev_flow->flow;
 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
-       int rss_request_inner = flow->rss.level >= 2;
+       int rss_request_inner = rss_desc->level >= 2;
 
        /* Check RSS hash level for tunnel. */
        if (tunnel && rss_request_inner)
@@ -670,7 +672,7 @@ mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow,
                return 0;
 #endif
        /* Check if requested layer matches RSS hash fields. */
-       if (!(flow->rss.types & layer_types))
+       if (!(rss_desc->types & layer_types))
                return 0;
        return hash_fields;
 }
@@ -709,21 +711,27 @@ flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
  *
  * @param[in] dev
  *   Pointer to the Ethernet device structure.
- * @param[in] dev_flow
- *   Pointer to device flow structure.
+ * @param[in] dev_handle
+ *   Pointer to device flow handle structure.
  */
 static void
-flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
+flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
+                      struct mlx5_flow_handle *dev_handle)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct rte_flow *flow = dev_flow->flow;
-       const int mark = !!(dev_flow->actions &
-                           (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
-       const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+       const int mark = dev_handle->mark;
+       const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
+       struct mlx5_hrxq *hrxq;
        unsigned int i;
 
-       for (i = 0; i != flow->rss.queue_num; ++i) {
-               int idx = (*flow->rss.queue)[i];
+       if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE)
+               return;
+       hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+                             dev_handle->rix_hrxq);
+       if (!hrxq)
+               return;
+       for (i = 0; i != hrxq->ind_table->queues_n; ++i) {
+               int idx = hrxq->ind_table->queues[i];
                struct mlx5_rxq_ctrl *rxq_ctrl =
                        container_of((*priv->rxqs)[idx],
                                     struct mlx5_rxq_ctrl, rxq);
@@ -748,7 +756,7 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
                        /* Increase the counter matching the flow. */
                        for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
                                if ((tunnels_info[j].tunnel &
-                                    dev_flow->layers) ==
+                                    dev_handle->layers) ==
                                    tunnels_info[j].tunnel) {
                                        rxq_ctrl->flow_tunnels_n[j]++;
                                        break;
@@ -770,10 +778,13 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
 static void
 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
-       struct mlx5_flow *dev_flow;
+       struct mlx5_priv *priv = dev->data->dev_private;
+       uint32_t handle_idx;
+       struct mlx5_flow_handle *dev_handle;
 
-       LIST_FOREACH(dev_flow, &flow->dev_flows, next)
-               flow_drv_rxq_flags_set(dev, dev_flow);
+       SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
+                      handle_idx, dev_handle, next)
+               flow_drv_rxq_flags_set(dev, dev_handle);
 }
 
 /**
@@ -782,22 +793,28 @@ flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @param[in] dev_flow
- *   Pointer to the device flow.
+ * @param[in] dev_handle
+ *   Pointer to the device flow handle structure.
  */
 static void
-flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
+flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
+                       struct mlx5_flow_handle *dev_handle)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct rte_flow *flow = dev_flow->flow;
-       const int mark = !!(dev_flow->actions &
-                           (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
-       const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+       const int mark = dev_handle->mark;
+       const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
+       struct mlx5_hrxq *hrxq;
        unsigned int i;
 
-       assert(dev->data->dev_started);
-       for (i = 0; i != flow->rss.queue_num; ++i) {
-               int idx = (*flow->rss.queue)[i];
+       if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE)
+               return;
+       hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+                             dev_handle->rix_hrxq);
+       if (!hrxq)
+               return;
+       MLX5_ASSERT(dev->data->dev_started);
+       for (i = 0; i != hrxq->ind_table->queues_n; ++i) {
+               int idx = hrxq->ind_table->queues[i];
                struct mlx5_rxq_ctrl *rxq_ctrl =
                        container_of((*priv->rxqs)[idx],
                                     struct mlx5_rxq_ctrl, rxq);
@@ -817,7 +834,7 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
                        /* Decrease the counter matching the flow. */
                        for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
                                if ((tunnels_info[j].tunnel &
-                                    dev_flow->layers) ==
+                                    dev_handle->layers) ==
                                    tunnels_info[j].tunnel) {
                                        rxq_ctrl->flow_tunnels_n[j]--;
                                        break;
@@ -840,10 +857,13 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
 static void
 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
-       struct mlx5_flow *dev_flow;
+       struct mlx5_priv *priv = dev->data->dev_private;
+       uint32_t handle_idx;
+       struct mlx5_flow_handle *dev_handle;
 
-       LIST_FOREACH(dev_flow, &flow->dev_flows, next)
-               flow_drv_rxq_flags_trim(dev, dev_flow);
+       SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
+                      handle_idx, dev_handle, next)
+               flow_drv_rxq_flags_trim(dev, dev_handle);
 }
 
 /**
@@ -1638,7 +1658,6 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
                                "\xff\xff\xff\xff\xff\xff\xff\xff",
                        .vtc_flow = RTE_BE32(0xffffffff),
                        .proto = 0xff,
-                       .hop_limits = 0xff,
                },
        };
        const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
@@ -1783,7 +1802,7 @@ mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
                                      MLX5_FLOW_LAYER_OUTER_L4;
        int ret;
 
-       assert(flow_mask);
+       MLX5_ASSERT(flow_mask);
        if (target_protocol != 0xff && target_protocol != IPPROTO_TCP)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
@@ -1835,7 +1854,6 @@ mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
                uint32_t vlan_id;
                uint8_t vni[4];
        } id = { .vlan_id = 0, };
-       uint32_t vlan_id = 0;
 
 
        if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
@@ -1862,23 +1880,8 @@ mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
                return ret;
        if (spec) {
                memcpy(&id.vni[1], spec->vni, 3);
-               vlan_id = id.vlan_id;
                memcpy(&id.vni[1], mask->vni, 3);
-               vlan_id &= id.vlan_id;
        }
-       /*
-        * Tunnel id 0 is equivalent as not adding a VXLAN layer, if
-        * only this layer is defined in the Verbs specification it is
-        * interpreted as wildcard and all packets will match this
-        * rule, if it follows a full stack layer (ex: eth / ipv4 /
-        * udp), all packets matching the layers before will also
-        * match this rule.  To avoid such situation, VNI 0 is
-        * currently refused.
-        */
-       if (!vlan_id)
-               return rte_flow_error_set(error, ENOTSUP,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "VXLAN vni cannot be 0");
        if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
@@ -1917,7 +1920,6 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
                uint32_t vlan_id;
                uint8_t vni[4];
        } id = { .vlan_id = 0, };
-       uint32_t vlan_id = 0;
 
        if (!priv->config.l3_vxlan_en)
                return rte_flow_error_set(error, ENOTSUP,
@@ -1955,22 +1957,8 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
                                                  "VxLAN-GPE protocol"
                                                  " not supported");
                memcpy(&id.vni[1], spec->vni, 3);
-               vlan_id = id.vlan_id;
                memcpy(&id.vni[1], mask->vni, 3);
-               vlan_id &= id.vlan_id;
        }
-       /*
-        * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this
-        * layer is defined in the Verbs specification it is interpreted as
-        * wildcard and all packets will match this rule, if it follows a full
-        * stack layer (ex: eth / ipv4 / udp), all packets matching the layers
-        * before will also match this rule.  To avoid such situation, VNI 0
-        * is currently refused.
-        */
-       if (!vlan_id)
-               return rte_flow_error_set(error, ENOTSUP,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "VXLAN-GPE vni cannot be 0");
        if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
@@ -2135,9 +2123,7 @@ mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
                .protocol = RTE_BE16(UINT16_MAX),
        };
 
-       if (!(priv->config.hca_attr.flex_parser_protocols &
-             MLX5_HCA_FLEX_GENEVE_ENABLED) ||
-           !priv->config.hca_attr.tunnel_stateless_geneve_rx)
+       if (!priv->config.hca_attr.tunnel_stateless_geneve_rx)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "L3 Geneve is not enabled by device"
@@ -2314,7 +2300,7 @@ flow_qrss_get_id(struct rte_eth_dev *dev)
        ret = mlx5_flow_id_get(priv->qrss_id_pool, &qrss_id);
        if (ret)
                return 0;
-       assert(qrss_id);
+       MLX5_ASSERT(qrss_id);
        return qrss_id;
 }
 
@@ -2340,11 +2326,14 @@ static void
 flow_mreg_split_qrss_release(struct rte_eth_dev *dev,
                             struct rte_flow *flow)
 {
-       struct mlx5_flow *dev_flow;
+       struct mlx5_priv *priv = dev->data->dev_private;
+       uint32_t handle_idx;
+       struct mlx5_flow_handle *dev_handle;
 
-       LIST_FOREACH(dev_flow, &flow->dev_flows, next)
-               if (dev_flow->qrss_id)
-                       flow_qrss_free_id(dev, dev_flow->qrss_id);
+       SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
+                      handle_idx, dev_handle, next)
+               if (dev_handle->split_flow_id)
+                       flow_qrss_free_id(dev, dev_handle->split_flow_id);
 }
 
 static int
@@ -2360,7 +2349,8 @@ flow_null_validate(struct rte_eth_dev *dev __rte_unused,
 }
 
 static struct mlx5_flow *
-flow_null_prepare(const struct rte_flow_attr *attr __rte_unused,
+flow_null_prepare(struct rte_eth_dev *dev __rte_unused,
+                 const struct rte_flow_attr *attr __rte_unused,
                  const struct rte_flow_item items[] __rte_unused,
                  const struct rte_flow_action actions[] __rte_unused,
                  struct rte_flow_error *error)
@@ -2500,6 +2490,8 @@ flow_drv_validate(struct rte_eth_dev *dev,
  *   setting backward reference to the flow should be done out of this function.
  *   layers field is not filled either.
  *
+ * @param[in] dev
+ *   Pointer to the dev structure.
  * @param[in] attr
  *   Pointer to the flow attributes.
  * @param[in] items
@@ -2513,7 +2505,8 @@ flow_drv_validate(struct rte_eth_dev *dev,
  *   Pointer to device flow on success, otherwise NULL and rte_errno is set.
  */
 static inline struct mlx5_flow *
-flow_drv_prepare(const struct rte_flow *flow,
+flow_drv_prepare(struct rte_eth_dev *dev,
+                const struct rte_flow *flow,
                 const struct rte_flow_attr *attr,
                 const struct rte_flow_item items[],
                 const struct rte_flow_action actions[],
@@ -2522,9 +2515,9 @@ flow_drv_prepare(const struct rte_flow *flow,
        const struct mlx5_flow_driver_ops *fops;
        enum mlx5_flow_drv_type type = flow->drv_type;
 
-       assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+       MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
        fops = flow_get_drv_ops(type);
-       return fops->prepare(attr, items, actions, error);
+       return fops->prepare(dev, attr, items, actions, error);
 }
 
 /**
@@ -2566,7 +2559,7 @@ flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
        const struct mlx5_flow_driver_ops *fops;
        enum mlx5_flow_drv_type type = dev_flow->flow->drv_type;
 
-       assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+       MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
        fops = flow_get_drv_ops(type);
        return fops->translate(dev, dev_flow, attr, items, actions, error);
 }
@@ -2593,7 +2586,7 @@ flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
        const struct mlx5_flow_driver_ops *fops;
        enum mlx5_flow_drv_type type = flow->drv_type;
 
-       assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+       MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
        fops = flow_get_drv_ops(type);
        return fops->apply(dev, flow, error);
 }
@@ -2615,7 +2608,7 @@ flow_drv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
        const struct mlx5_flow_driver_ops *fops;
        enum mlx5_flow_drv_type type = flow->drv_type;
 
-       assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+       MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
        fops = flow_get_drv_ops(type);
        fops->remove(dev, flow);
 }
@@ -2637,7 +2630,7 @@ flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
        enum mlx5_flow_drv_type type = flow->drv_type;
 
        flow_mreg_split_qrss_release(dev, flow);
-       assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+       MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
        fops = flow_get_drv_ops(type);
        fops->destroy(dev, flow);
 }
@@ -2663,26 +2656,6 @@ mlx5_flow_validate(struct rte_eth_dev *dev,
        return 0;
 }
 
-/**
- * Get port id item from the item list.
- *
- * @param[in] item
- *   Pointer to the list of items.
- *
- * @return
- *   Pointer to the port id item if exist, else return NULL.
- */
-static const struct rte_flow_item *
-find_port_id_item(const struct rte_flow_item *item)
-{
-       assert(item);
-       for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
-               if (item->type == RTE_FLOW_ITEM_TYPE_PORT_ID)
-                       return item;
-       }
-       return NULL;
-}
-
 /**
  * Get RSS action from the action list.
  *
@@ -2727,7 +2700,48 @@ find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level)
 }
 
 /**
- * Get QUEUE/RSS action from the action list.
+ *  Get layer flags from the prefix flow.
+ *
+ *  Some flows may be split to several subflows, the prefix subflow gets the
+ *  match items and the suffix sub flow gets the actions.
+ *  Some actions need the user defined match item flags to get the detail for
+ *  the action.
+ *  This function helps the suffix flow to get the item layer flags from prefix
+ *  subflow.
+ *
+ * @param[in] dev_flow
+ *   Pointer the created preifx subflow.
+ *
+ * @return
+ *   The layers get from prefix subflow.
+ */
+static inline uint64_t
+flow_get_prefix_layer_flags(struct mlx5_flow *dev_flow)
+{
+       uint64_t layers = 0;
+
+       /*
+        * Layers bits could be localization, but usually the compiler will
+        * help to do the optimization work for source code.
+        * If no decap actions, use the layers directly.
+        */
+       if (!(dev_flow->act_flags & MLX5_FLOW_ACTION_DECAP))
+               return dev_flow->handle->layers;
+       /* Convert L3 layers with decap action. */
+       if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4)
+               layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+       else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV6)
+               layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+       /* Convert L4 layers with decap action.  */
+       if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_TCP)
+               layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP;
+       else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_UDP)
+               layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
+       return layers;
+}
+
+/**
+ * Get metadata split action information.
  *
  * @param[in] actions
  *   Pointer to the list of actions.
@@ -2736,18 +2750,38 @@ find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level)
  * @param[out] qrss_type
  *   Pointer to the action type to return. RTE_FLOW_ACTION_TYPE_END is returned
  *   if no QUEUE/RSS is found.
+ * @param[out] encap_idx
+ *   Pointer to the index of the encap action if exists, otherwise the last
+ *   action index.
  *
  * @return
  *   Total number of actions.
  */
 static int
-flow_parse_qrss_action(const struct rte_flow_action actions[],
-                      const struct rte_flow_action **qrss)
+flow_parse_metadata_split_actions_info(const struct rte_flow_action actions[],
+                                      const struct rte_flow_action **qrss,
+                                      int *encap_idx)
 {
+       const struct rte_flow_action_raw_encap *raw_encap;
        int actions_n = 0;
+       int raw_decap_idx = -1;
 
+       *encap_idx = -1;
        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
                switch (actions->type) {
+               case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+               case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+                       *encap_idx = actions_n;
+                       break;
+               case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+                       raw_decap_idx = actions_n;
+                       break;
+               case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+                       raw_encap = actions->conf;
+                       if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
+                               *encap_idx = raw_decap_idx != -1 ?
+                                                     raw_decap_idx : actions_n;
+                       break;
                case RTE_FLOW_ACTION_TYPE_QUEUE:
                case RTE_FLOW_ACTION_TYPE_RSS:
                        *qrss = actions;
@@ -2757,6 +2791,8 @@ flow_parse_qrss_action(const struct rte_flow_action actions[],
                }
                actions_n++;
        }
+       if (*encap_idx == -1)
+               *encap_idx = actions_n;
        /* Count RTE_FLOW_ACTION_TYPE_END. */
        return actions_n + 1;
 }
@@ -2777,7 +2813,7 @@ flow_check_meter_action(const struct rte_flow_action actions[], uint32_t *mtr)
 {
        int actions_n = 0;
 
-       assert(mtr);
+       MLX5_ASSERT(mtr);
        *mtr = 0;
        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
                switch (actions->type) {
@@ -2935,6 +2971,7 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
                [3] = { .type = RTE_FLOW_ACTION_TYPE_END, },
        };
        struct mlx5_flow_mreg_copy_resource *mcp_res;
+       uint32_t idx = 0;
        int ret;
 
        /* Fill the register fileds in the flow. */
@@ -2947,13 +2984,14 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
                return NULL;
        cp_mreg.src = ret;
        /* Check if already registered. */
-       assert(priv->mreg_cp_tbl);
+       MLX5_ASSERT(priv->mreg_cp_tbl);
        mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id);
        if (mcp_res) {
                /* For non-default rule. */
                if (mark_id != MLX5_DEFAULT_COPY_ID)
                        mcp_res->refcnt++;
-               assert(mark_id != MLX5_DEFAULT_COPY_ID || mcp_res->refcnt == 1);
+               MLX5_ASSERT(mark_id != MLX5_DEFAULT_COPY_ID ||
+                           mcp_res->refcnt == 1);
                return mcp_res;
        }
        /* Provide the full width of FLAG specific value. */
@@ -3002,11 +3040,12 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
                };
        }
        /* Build a new entry. */
-       mcp_res = rte_zmalloc(__func__, sizeof(*mcp_res), 0);
+       mcp_res = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MCP], &idx);
        if (!mcp_res) {
                rte_errno = ENOMEM;
                return NULL;
        }
+       mcp_res->idx = idx;
        /*
         * The copy Flows are not included in any list. There
         * ones are referenced from other Flows and can not
@@ -3021,14 +3060,14 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
        mcp_res->hlist_ent.key = mark_id;
        ret = mlx5_hlist_insert(priv->mreg_cp_tbl,
                                &mcp_res->hlist_ent);
-       assert(!ret);
+       MLX5_ASSERT(!ret);
        if (ret)
                goto error;
        return mcp_res;
 error:
        if (mcp_res->flow)
                flow_list_destroy(dev, NULL, mcp_res->flow);
-       rte_free(mcp_res);
+       mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
        return NULL;
 }
 
@@ -3044,13 +3083,17 @@ static void
 flow_mreg_del_copy_action(struct rte_eth_dev *dev,
                          struct rte_flow *flow)
 {
-       struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy;
+       struct mlx5_flow_mreg_copy_resource *mcp_res;
        struct mlx5_priv *priv = dev->data->dev_private;
 
+       if (!flow->rix_mreg_copy)
+               return;
+       mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
+                                flow->rix_mreg_copy);
        if (!mcp_res || !priv->mreg_cp_tbl)
                return;
        if (flow->copy_applied) {
-               assert(mcp_res->appcnt);
+               MLX5_ASSERT(mcp_res->appcnt);
                flow->copy_applied = 0;
                --mcp_res->appcnt;
                if (!mcp_res->appcnt)
@@ -3062,11 +3105,11 @@ flow_mreg_del_copy_action(struct rte_eth_dev *dev,
         */
        if (--mcp_res->refcnt)
                return;
-       assert(mcp_res->flow);
+       MLX5_ASSERT(mcp_res->flow);
        flow_list_destroy(dev, NULL, mcp_res->flow);
        mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
-       rte_free(mcp_res);
-       flow->mreg_copy = NULL;
+       mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
+       flow->rix_mreg_copy = 0;
 }
 
 /**
@@ -3084,10 +3127,15 @@ static int
 flow_mreg_start_copy_action(struct rte_eth_dev *dev,
                            struct rte_flow *flow)
 {
-       struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy;
+       struct mlx5_flow_mreg_copy_resource *mcp_res;
+       struct mlx5_priv *priv = dev->data->dev_private;
        int ret;
 
-       if (!mcp_res || flow->copy_applied)
+       if (!flow->rix_mreg_copy || flow->copy_applied)
+               return 0;
+       mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
+                                flow->rix_mreg_copy);
+       if (!mcp_res)
                return 0;
        if (!mcp_res->appcnt) {
                ret = flow_drv_apply(dev, mcp_res->flow, NULL);
@@ -3111,11 +3159,16 @@ static void
 flow_mreg_stop_copy_action(struct rte_eth_dev *dev,
                           struct rte_flow *flow)
 {
-       struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy;
+       struct mlx5_flow_mreg_copy_resource *mcp_res;
+       struct mlx5_priv *priv = dev->data->dev_private;
 
-       if (!mcp_res || !flow->copy_applied)
+       if (!flow->rix_mreg_copy || !flow->copy_applied)
                return;
-       assert(mcp_res->appcnt);
+       mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
+                                flow->rix_mreg_copy);
+       if (!mcp_res)
+               return;
+       MLX5_ASSERT(mcp_res->appcnt);
        --mcp_res->appcnt;
        flow->copy_applied = 0;
        if (!mcp_res->appcnt)
@@ -3141,10 +3194,10 @@ flow_mreg_del_default_copy_action(struct rte_eth_dev *dev)
                                            MLX5_DEFAULT_COPY_ID);
        if (!mcp_res)
                return;
-       assert(mcp_res->flow);
+       MLX5_ASSERT(mcp_res->flow);
        flow_list_destroy(dev, NULL, mcp_res->flow);
        mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
-       rte_free(mcp_res);
+       mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
 }
 
 /**
@@ -3236,7 +3289,7 @@ flow_mreg_update_copy_table(struct rte_eth_dev *dev,
                                (dev, MLX5_FLOW_MARK_DEFAULT, error);
                        if (!mcp_res)
                                return -rte_errno;
-                       flow->mreg_copy = mcp_res;
+                       flow->rix_mreg_copy = mcp_res->idx;
                        if (dev->data->dev_started) {
                                mcp_res->appcnt++;
                                flow->copy_applied = 1;
@@ -3249,7 +3302,7 @@ flow_mreg_update_copy_table(struct rte_eth_dev *dev,
                                flow_mreg_add_copy_action(dev, mark->id, error);
                        if (!mcp_res)
                                return -rte_errno;
-                       flow->mreg_copy = mcp_res;
+                       flow->rix_mreg_copy = mcp_res->idx;
                        if (dev->data->dev_started) {
                                mcp_res->appcnt++;
                                flow->copy_applied = 1;
@@ -3370,7 +3423,7 @@ flow_hairpin_split(struct rte_eth_dev *dev,
        actions_rx++;
        set_tag = (void *)actions_rx;
        set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL);
-       assert(set_tag->id > REG_NONE);
+       MLX5_ASSERT(set_tag->id > REG_NONE);
        set_tag->data = *flow_id;
        tag_action->conf = set_tag;
        /* Create Tx item list. */
@@ -3381,7 +3434,7 @@ flow_hairpin_split(struct rte_eth_dev *dev,
        tag_item = (void *)addr;
        tag_item->data = *flow_id;
        tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL);
-       assert(set_tag->id > REG_NONE);
+       MLX5_ASSERT(set_tag->id > REG_NONE);
        item->spec = tag_item;
        addr += sizeof(struct mlx5_rte_flow_item_tag);
        tag_item = (void *)addr;
@@ -3399,12 +3452,14 @@ flow_hairpin_split(struct rte_eth_dev *dev,
  * The last stage of splitting chain, just creates the subflow
  * without any modification.
  *
- * @param dev
+ * @param[in] dev
  *   Pointer to Ethernet device.
  * @param[in] flow
  *   Parent flow structure pointer.
  * @param[in, out] sub_flow
  *   Pointer to return the created subflow, may be NULL.
+ * @param[in] prefix_layers
+ *   Prefix subflow layers, may be 0.
  * @param[in] attr
  *   Flow rule attributes.
  * @param[in] items
@@ -3422,6 +3477,7 @@ static int
 flow_create_split_inner(struct rte_eth_dev *dev,
                        struct rte_flow *flow,
                        struct mlx5_flow **sub_flow,
+                       uint64_t prefix_layers,
                        const struct rte_flow_attr *attr,
                        const struct rte_flow_item items[],
                        const struct rte_flow_action actions[],
@@ -3429,13 +3485,20 @@ flow_create_split_inner(struct rte_eth_dev *dev,
 {
        struct mlx5_flow *dev_flow;
 
-       dev_flow = flow_drv_prepare(flow, attr, items, actions, error);
+       dev_flow = flow_drv_prepare(dev, flow, attr, items, actions, error);
        if (!dev_flow)
                return -rte_errno;
        dev_flow->flow = flow;
        dev_flow->external = external;
        /* Subflow object was created, we must include one in the list. */
-       LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
+       SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
+                     dev_flow->handle, next);
+       /*
+        * If dev_flow is as one of the suffix flow, some actions in suffix
+        * flow may need some user defined item layer flags.
+        */
+       if (prefix_layers)
+               dev_flow->handle->layers = prefix_layers;
        if (sub_flow)
                *sub_flow = dev_flow;
        return flow_drv_translate(dev, dev_flow, attr, items, actions, error);
@@ -3455,6 +3518,10 @@ flow_create_split_inner(struct rte_eth_dev *dev,
  *
  * @param dev
  *   Pointer to Ethernet device.
+ * @param[in] items
+ *   Pattern specification (list terminated by the END pattern item).
+ * @param[out] sfx_items
+ *   Suffix flow match items (list terminated by the END pattern item).
  * @param[in] actions
  *   Associated actions (list terminated by the END action).
  * @param[out] actions_sfx
@@ -3471,66 +3538,60 @@ flow_create_split_inner(struct rte_eth_dev *dev,
  */
 static int
 flow_meter_split_prep(struct rte_eth_dev *dev,
+                const struct rte_flow_item items[],
+                struct rte_flow_item sfx_items[],
                 const struct rte_flow_action actions[],
                 struct rte_flow_action actions_sfx[],
                 struct rte_flow_action actions_pre[])
 {
-       struct rte_flow_action *tag_action;
+       struct rte_flow_action *tag_action = NULL;
+       struct rte_flow_item *tag_item;
        struct mlx5_rte_flow_action_set_tag *set_tag;
        struct rte_flow_error error;
        const struct rte_flow_action_raw_encap *raw_encap;
        const struct rte_flow_action_raw_decap *raw_decap;
+       struct mlx5_rte_flow_item_tag *tag_spec;
+       struct mlx5_rte_flow_item_tag *tag_mask;
        uint32_t tag_id;
+       bool copy_vlan = false;
 
-       /* Add the extra tag action first. */
-       tag_action = actions_pre;
-       tag_action->type = MLX5_RTE_FLOW_ACTION_TYPE_TAG;
-       actions_pre++;
        /* Prepare the actions for prefix and suffix flow. */
        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+               struct rte_flow_action **action_cur = NULL;
+
                switch (actions->type) {
                case RTE_FLOW_ACTION_TYPE_METER:
+                       /* Add the extra tag action first. */
+                       tag_action = actions_pre;
+                       tag_action->type = MLX5_RTE_FLOW_ACTION_TYPE_TAG;
+                       actions_pre++;
+                       action_cur = &actions_pre;
+                       break;
                case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
                case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
-                       memcpy(actions_pre, actions,
-                              sizeof(struct rte_flow_action));
-                       actions_pre++;
+                       action_cur = &actions_pre;
                        break;
                case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
                        raw_encap = actions->conf;
-                       if (raw_encap->size >
-                           (sizeof(struct rte_flow_item_eth) +
-                            sizeof(struct rte_flow_item_ipv4))) {
-                               memcpy(actions_sfx, actions,
-                                      sizeof(struct rte_flow_action));
-                               actions_sfx++;
-                       } else {
-                               rte_memcpy(actions_pre, actions,
-                                          sizeof(struct rte_flow_action));
-                               actions_pre++;
-                       }
+                       if (raw_encap->size < MLX5_ENCAPSULATION_DECISION_SIZE)
+                               action_cur = &actions_pre;
                        break;
                case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
                        raw_decap = actions->conf;
-                       /* Size 0 decap means 50 bytes as vxlan decap. */
-                       if (raw_decap->size && (raw_decap->size <
-                           (sizeof(struct rte_flow_item_eth) +
-                            sizeof(struct rte_flow_item_ipv4)))) {
-                               memcpy(actions_sfx, actions,
-                                      sizeof(struct rte_flow_action));
-                               actions_sfx++;
-                       } else {
-                               rte_memcpy(actions_pre, actions,
-                                          sizeof(struct rte_flow_action));
-                               actions_pre++;
-                       }
+                       if (raw_decap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
+                               action_cur = &actions_pre;
+                       break;
+               case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+               case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+                       copy_vlan = true;
                        break;
                default:
-                       memcpy(actions_sfx, actions,
-                               sizeof(struct rte_flow_action));
-                       actions_sfx++;
                        break;
                }
+               if (!action_cur)
+                       action_cur = &actions_sfx;
+               memcpy(*action_cur, actions, sizeof(struct rte_flow_action));
+               (*action_cur)++;
        }
        /* Add end action to the actions. */
        actions_sfx->type = RTE_FLOW_ACTION_TYPE_END;
@@ -3544,7 +3605,44 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
         */
        tag_id = flow_qrss_get_id(dev);
        set_tag->data = tag_id << MLX5_MTR_COLOR_BITS;
+       assert(tag_action);
        tag_action->conf = set_tag;
+       /* Prepare the suffix subflow items. */
+       tag_item = sfx_items++;
+       for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+               int item_type = items->type;
+
+               switch (item_type) {
+               case RTE_FLOW_ITEM_TYPE_PORT_ID:
+                       memcpy(sfx_items, items, sizeof(*sfx_items));
+                       sfx_items++;
+                       break;
+               case RTE_FLOW_ITEM_TYPE_VLAN:
+                       if (copy_vlan) {
+                               memcpy(sfx_items, items, sizeof(*sfx_items));
+                               /*
+                                * Convert to internal match item, it is used
+                                * for vlan push and set vid.
+                                */
+                               sfx_items->type = MLX5_RTE_FLOW_ITEM_TYPE_VLAN;
+                               sfx_items++;
+                       }
+                       break;
+               default:
+                       break;
+               }
+       }
+       sfx_items->type = RTE_FLOW_ITEM_TYPE_END;
+       sfx_items++;
+       tag_spec = (struct mlx5_rte_flow_item_tag *)sfx_items;
+       tag_spec->data = tag_id << MLX5_MTR_COLOR_BITS;
+       tag_spec->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error);
+       tag_mask = tag_spec + 1;
+       tag_mask->data = 0xffffff00;
+       tag_item->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG;
+       tag_item->spec = tag_spec;
+       tag_item->last = NULL;
+       tag_item->mask = tag_mask;
        return tag_id;
 }
 
@@ -3677,6 +3775,8 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev,
  *   Number of actions in the list.
  * @param[out] error
  *   Perform verbose error reporting if not NULL.
+ * @param[in] encap_idx
+ *   The encap action inndex.
  *
  * @return
  *   0 on success, negative value otherwise
@@ -3685,7 +3785,8 @@ static int
 flow_mreg_tx_copy_prep(struct rte_eth_dev *dev,
                       struct rte_flow_action *ext_actions,
                       const struct rte_flow_action *actions,
-                      int actions_n, struct rte_flow_error *error)
+                      int actions_n, struct rte_flow_error *error,
+                      int encap_idx)
 {
        struct mlx5_flow_action_copy_mreg *cp_mreg =
                (struct mlx5_flow_action_copy_mreg *)
@@ -3700,15 +3801,24 @@ flow_mreg_tx_copy_prep(struct rte_eth_dev *dev,
        if (ret < 0)
                return ret;
        cp_mreg->src = ret;
-       memcpy(ext_actions, actions,
-                       sizeof(*ext_actions) * actions_n);
-       ext_actions[actions_n - 1] = (struct rte_flow_action){
-               .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
-               .conf = cp_mreg,
-       };
-       ext_actions[actions_n] = (struct rte_flow_action){
-               .type = RTE_FLOW_ACTION_TYPE_END,
-       };
+       if (encap_idx != 0)
+               memcpy(ext_actions, actions, sizeof(*ext_actions) * encap_idx);
+       if (encap_idx == actions_n - 1) {
+               ext_actions[actions_n - 1] = (struct rte_flow_action){
+                       .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
+                       .conf = cp_mreg,
+               };
+               ext_actions[actions_n] = (struct rte_flow_action){
+                       .type = RTE_FLOW_ACTION_TYPE_END,
+               };
+       } else {
+               ext_actions[encap_idx] = (struct rte_flow_action){
+                       .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
+                       .conf = cp_mreg,
+               };
+               memcpy(ext_actions + encap_idx + 1, actions + encap_idx,
+                               sizeof(*ext_actions) * (actions_n - encap_idx));
+       }
        return 0;
 }
 
@@ -3726,6 +3836,8 @@ flow_mreg_tx_copy_prep(struct rte_eth_dev *dev,
  *   Pointer to Ethernet device.
  * @param[in] flow
  *   Parent flow structure pointer.
+ * @param[in] prefix_layers
+ *   Prefix flow layer flags.
  * @param[in] attr
  *   Flow rule attributes.
  * @param[in] items
@@ -3742,6 +3854,7 @@ flow_mreg_tx_copy_prep(struct rte_eth_dev *dev,
 static int
 flow_create_split_metadata(struct rte_eth_dev *dev,
                           struct rte_flow *flow,
+                          uint64_t prefix_layers,
                           const struct rte_flow_attr *attr,
                           const struct rte_flow_item items[],
                           const struct rte_flow_action actions[],
@@ -3756,15 +3869,18 @@ flow_create_split_metadata(struct rte_eth_dev *dev,
        int mtr_sfx = 0;
        size_t act_size;
        int actions_n;
+       int encap_idx;
        int ret;
 
        /* Check whether extensive metadata feature is engaged. */
        if (!config->dv_flow_en ||
            config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
            !mlx5_flow_ext_mreg_supported(dev))
-               return flow_create_split_inner(dev, flow, NULL, attr, items,
-                                              actions, external, error);
-       actions_n = flow_parse_qrss_action(actions, &qrss);
+               return flow_create_split_inner(dev, flow, NULL, prefix_layers,
+                                              attr, items, actions, external,
+                                              error);
+       actions_n = flow_parse_metadata_split_actions_info(actions, &qrss,
+                                                          &encap_idx);
        if (qrss) {
                /* Exclude hairpin flows from splitting. */
                if (qrss->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
@@ -3839,17 +3955,17 @@ flow_create_split_metadata(struct rte_eth_dev *dev,
                                                  "metadata flow");
                /* Create the action list appended with copy register. */
                ret = flow_mreg_tx_copy_prep(dev, ext_actions, actions,
-                                            actions_n, error);
+                                            actions_n, error, encap_idx);
                if (ret < 0)
                        goto exit;
        }
        /* Add the unmodified original or prefix subflow. */
-       ret = flow_create_split_inner(dev, flow, &dev_flow, attr, items,
-                                     ext_actions ? ext_actions : actions,
-                                     external, error);
+       ret = flow_create_split_inner(dev, flow, &dev_flow, prefix_layers, attr,
+                                     items, ext_actions ? ext_actions :
+                                     actions, external, error);
        if (ret < 0)
                goto exit;
-       assert(dev_flow);
+       MLX5_ASSERT(dev_flow);
        if (qrss) {
                const struct rte_flow_attr q_attr = {
                        .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
@@ -3880,7 +3996,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev,
                                .type = RTE_FLOW_ACTION_TYPE_END,
                        },
                };
-               uint64_t hash_fields = dev_flow->hash_fields;
+               uint64_t layers = flow_get_prefix_layer_flags(dev_flow);
 
                /*
                 * Configure the tag item only if there is no meter subflow.
@@ -3889,7 +4005,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev,
                 */
                if (qrss_id) {
                        /* Not meter subflow. */
-                       assert(!mtr_sfx);
+                       MLX5_ASSERT(!mtr_sfx);
                        /*
                         * Put unique id in prefix flow due to it is destroyed
                         * after suffix flow and id will be freed after there
@@ -3897,8 +4013,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev,
                         * reallocation becomes possible (for example, for
                         * other flows in other threads).
                         */
-                       dev_flow->qrss_id = qrss_id;
-                       qrss_id = 0;
+                       dev_flow->handle->split_flow_id = qrss_id;
                        ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0,
                                                   error);
                        if (ret < 0)
@@ -3907,14 +4022,15 @@ flow_create_split_metadata(struct rte_eth_dev *dev,
                }
                dev_flow = NULL;
                /* Add suffix subflow to execute Q/RSS. */
-               ret = flow_create_split_inner(dev, flow, &dev_flow,
+               ret = flow_create_split_inner(dev, flow, &dev_flow, layers,
                                              &q_attr, mtr_sfx ? items :
                                              q_items, q_actions,
                                              external, error);
                if (ret < 0)
                        goto exit;
-               assert(dev_flow);
-               dev_flow->hash_fields = hash_fields;
+               /* qrss ID should be freed if failed. */
+               qrss_id = 0;
+               MLX5_ASSERT(dev_flow);
        }
 
 exit:
@@ -3967,7 +4083,6 @@ flow_create_split_meter(struct rte_eth_dev *dev,
        struct rte_flow_action *sfx_actions = NULL;
        struct rte_flow_action *pre_actions = NULL;
        struct rte_flow_item *sfx_items = NULL;
-       const  struct rte_flow_item *sfx_port_id_item;
        struct mlx5_flow *dev_flow = NULL;
        struct rte_flow_attr sfx_attr = *attr;
        uint32_t mtr = 0;
@@ -3980,13 +4095,11 @@ flow_create_split_meter(struct rte_eth_dev *dev,
        if (priv->mtr_en)
                actions_n = flow_check_meter_action(actions, &mtr);
        if (mtr) {
-               struct mlx5_rte_flow_item_tag *tag_spec;
-               struct mlx5_rte_flow_item_tag *tag_mask;
                /* The five prefix actions: meter, decap, encap, tag, end. */
                act_size = sizeof(struct rte_flow_action) * (actions_n + 5) +
-                          sizeof(struct rte_flow_action_set_tag);
-               /* tag, end. */
-#define METER_SUFFIX_ITEM 3
+                          sizeof(struct mlx5_rte_flow_action_set_tag);
+               /* tag, vlan, port id, end. */
+#define METER_SUFFIX_ITEM 4
                item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM +
                            sizeof(struct mlx5_rte_flow_item_tag) * 2;
                sfx_actions = rte_zmalloc(__func__, (act_size + item_size), 0);
@@ -3995,51 +4108,34 @@ flow_create_split_meter(struct rte_eth_dev *dev,
                                                  RTE_FLOW_ERROR_TYPE_ACTION,
                                                  NULL, "no memory to split "
                                                  "meter flow");
+               sfx_items = (struct rte_flow_item *)((char *)sfx_actions +
+                            act_size);
                pre_actions = sfx_actions + actions_n;
-               mtr_tag_id = flow_meter_split_prep(dev, actions, sfx_actions,
-                                                    pre_actions);
+               mtr_tag_id = flow_meter_split_prep(dev, items, sfx_items,
+                                                  actions, sfx_actions,
+                                                  pre_actions);
                if (!mtr_tag_id) {
                        ret = -rte_errno;
                        goto exit;
                }
                /* Add the prefix subflow. */
-               ret = flow_create_split_inner(dev, flow, &dev_flow, attr, items,
-                                                 pre_actions, external, error);
+               ret = flow_create_split_inner(dev, flow, &dev_flow, 0, attr,
+                                             items, pre_actions, external,
+                                             error);
                if (ret) {
                        ret = -rte_errno;
                        goto exit;
                }
-               dev_flow->mtr_flow_id = mtr_tag_id;
-               /* Prepare the suffix flow match pattern. */
-               sfx_items = (struct rte_flow_item *)((char *)sfx_actions +
-                            act_size);
-               tag_spec = (struct mlx5_rte_flow_item_tag *)(sfx_items +
-                           METER_SUFFIX_ITEM);
-               tag_spec->data = dev_flow->mtr_flow_id << MLX5_MTR_COLOR_BITS;
-               tag_spec->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0,
-                                                   error);
-               tag_mask = tag_spec + 1;
-               tag_mask->data = 0xffffff00;
-               sfx_items->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG;
-               sfx_items->spec = tag_spec;
-               sfx_items->last = NULL;
-               sfx_items->mask = tag_mask;
-               sfx_items++;
-               sfx_port_id_item = find_port_id_item(items);
-               if (sfx_port_id_item) {
-                       memcpy(sfx_items, sfx_port_id_item,
-                              sizeof(*sfx_items));
-                       sfx_items++;
-               }
-               sfx_items->type = RTE_FLOW_ITEM_TYPE_END;
-               sfx_items -= sfx_port_id_item ? 2 : 1;
+               dev_flow->handle->split_flow_id = mtr_tag_id;
                /* Setting the sfx group atrr. */
                sfx_attr.group = sfx_attr.transfer ?
                                (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
                                 MLX5_FLOW_TABLE_LEVEL_SUFFIX;
        }
        /* Add the prefix subflow. */
-       ret = flow_create_split_metadata(dev, flow, &sfx_attr,
+       ret = flow_create_split_metadata(dev, flow, dev_flow ?
+                                        flow_get_prefix_layer_flags(dev_flow) :
+                                        0, &sfx_attr,
                                         sfx_items ? sfx_items : items,
                                         sfx_actions ? sfx_actions : actions,
                                         external, error);
@@ -4096,7 +4192,7 @@ flow_create_split_outer(struct rte_eth_dev *dev,
 
        ret = flow_create_split_meter(dev, flow, attr, items,
                                         actions, external, error);
-       assert(ret <= 0);
+       MLX5_ASSERT(ret <= 0);
        return ret;
 }
 
@@ -4152,14 +4248,18 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
                uint8_t buffer[2048];
        } items_tx;
        struct rte_flow_expand_rss *buf = &expand_buffer.buf;
+       struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *)
+                                             priv->rss_desc)[!!priv->flow_idx];
        const struct rte_flow_action *p_actions_rx = actions;
-       int ret;
        uint32_t i;
-       uint32_t flow_size;
        int hairpin_flow = 0;
        uint32_t hairpin_id = 0;
        struct rte_flow_attr attr_tx = { .priority = 0 };
+       int ret = flow_drv_validate(dev, attr, items, p_actions_rx, external,
+                                   error);
 
+       if (ret < 0)
+               return NULL;
        hairpin_flow = flow_check_hairpin_split(dev, attr, actions);
        if (hairpin_flow > 0) {
                if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) {
@@ -4171,18 +4271,7 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
                                   &hairpin_id);
                p_actions_rx = actions_rx.actions;
        }
-       ret = flow_drv_validate(dev, attr, items, p_actions_rx, external,
-                               error);
-       if (ret < 0)
-               goto error_before_flow;
-       flow_size = sizeof(struct rte_flow);
-       rss = flow_get_rss_action(p_actions_rx);
-       if (rss)
-               flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t),
-                                           sizeof(void *));
-       else
-               flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *));
-       flow = rte_calloc(__func__, 1, flow_size, 0);
+       flow = rte_calloc(__func__, 1, sizeof(struct rte_flow), 0);
        if (!flow) {
                rte_errno = ENOMEM;
                goto error_before_flow;
@@ -4190,19 +4279,20 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
        flow->drv_type = flow_get_drv_type(dev, attr);
        if (hairpin_id != 0)
                flow->hairpin_flow_id = hairpin_id;
-       assert(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
-              flow->drv_type < MLX5_FLOW_TYPE_MAX);
-       flow->rss.queue = (void *)(flow + 1);
+       MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
+                   flow->drv_type < MLX5_FLOW_TYPE_MAX);
+       memset(rss_desc, 0, sizeof(*rss_desc));
+       rss = flow_get_rss_action(p_actions_rx);
        if (rss) {
                /*
                 * The following information is required by
                 * mlx5_flow_hashfields_adjust() in advance.
                 */
-               flow->rss.level = rss->level;
+               rss_desc->level = rss->level;
                /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
-               flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
+               rss_desc->types = !rss->types ? ETH_RSS_IP : rss->types;
        }
-       LIST_INIT(&flow->dev_flows);
+       flow->dev_handles = 0;
        if (rss && rss->types) {
                unsigned int graph_root;
 
@@ -4211,12 +4301,21 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
                                          items, rss->types,
                                          mlx5_support_expansion,
                                          graph_root);
-               assert(ret > 0 &&
+               MLX5_ASSERT(ret > 0 &&
                       (unsigned int)ret < sizeof(expand_buffer.buffer));
        } else {
                buf->entries = 1;
                buf->entry[0].pattern = (void *)(uintptr_t)items;
        }
+       /*
+        * Record the start index when there is a nested call. All sub-flows
+        * need to be translated before another calling.
+        * No need to use ping-pong buffer to save memory here.
+        */
+       if (priv->flow_idx) {
+               MLX5_ASSERT(!priv->flow_nested_idx);
+               priv->flow_nested_idx = priv->flow_idx;
+       }
        for (i = 0; i < buf->entries; ++i) {
                /*
                 * The splitter may create multiple dev_flows,
@@ -4235,13 +4334,14 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
                attr_tx.group = MLX5_HAIRPIN_TX_TABLE;
                attr_tx.ingress = 0;
                attr_tx.egress = 1;
-               dev_flow = flow_drv_prepare(flow, &attr_tx, items_tx.items,
+               dev_flow = flow_drv_prepare(dev, flow, &attr_tx, items_tx.items,
                                            actions_hairpin_tx.actions, error);
                if (!dev_flow)
                        goto error;
                dev_flow->flow = flow;
                dev_flow->external = 0;
-               LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
+               SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
+                             dev_flow->handle, next);
                ret = flow_drv_translate(dev, dev_flow, &attr_tx,
                                         items_tx.items,
                                         actions_hairpin_tx.actions, error);
@@ -4264,7 +4364,11 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
                if (ret)
                        goto error;
        }
-       if (dev->data->dev_started) {
+       /*
+        * If the flow is external (from application) OR device is started, then
+        * the flow will be applied immediately.
+        */
+       if (external || dev->data->dev_started) {
                ret = flow_drv_apply(dev, flow, error);
                if (ret < 0)
                        goto error;
@@ -4272,23 +4376,27 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
        if (list)
                TAILQ_INSERT_TAIL(list, flow, next);
        flow_rxq_flags_set(dev, flow);
+       /* Nested flow creation index recovery. */
+       priv->flow_idx = priv->flow_nested_idx;
+       if (priv->flow_nested_idx)
+               priv->flow_nested_idx = 0;
        return flow;
-error_before_flow:
-       if (hairpin_id)
-               mlx5_flow_id_release(priv->sh->flow_id_pool,
-                                    hairpin_id);
-       return NULL;
 error:
-       assert(flow);
-       flow_mreg_del_copy_action(dev, flow);
+       MLX5_ASSERT(flow);
        ret = rte_errno; /* Save rte_errno before cleanup. */
-       if (flow->hairpin_flow_id)
-               mlx5_flow_id_release(priv->sh->flow_id_pool,
-                                    flow->hairpin_flow_id);
-       assert(flow);
+       flow_mreg_del_copy_action(dev, flow);
        flow_drv_destroy(dev, flow);
        rte_free(flow);
        rte_errno = ret; /* Restore rte_errno. */
+error_before_flow:
+       ret = rte_errno;
+       if (hairpin_id)
+               mlx5_flow_id_release(priv->sh->flow_id_pool,
+                                    hairpin_id);
+       rte_errno = ret;
+       priv->flow_idx = priv->flow_nested_idx;
+       if (priv->flow_nested_idx)
+               priv->flow_nested_idx = 0;
        return NULL;
 }
 
@@ -4356,6 +4464,17 @@ mlx5_flow_create(struct rte_eth_dev *dev,
 {
        struct mlx5_priv *priv = dev->data->dev_private;
 
+       /*
+        * If the device is not started yet, it is not allowed to created a
+        * flow from application. PMD default flows and traffic control flows
+        * are not affected.
+        */
+       if (unlikely(!dev->data->dev_started)) {
+               rte_errno = ENODEV;
+               DRV_LOG(DEBUG, "port %u is not started when "
+                       "inserting a flow", dev->data->port_id);
+               return NULL;
+       }
        return flow_list_create(dev, &priv->flows,
                                attr, items, actions, true, error);
 }
@@ -4376,6 +4495,7 @@ flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
                  struct rte_flow *flow)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_fdir_flow *priv_fdir_flow = NULL;
 
        /*
         * Update RX queue flags only if port is started, otherwise it is
@@ -4390,7 +4510,17 @@ flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
        if (list)
                TAILQ_REMOVE(list, flow, next);
        flow_mreg_del_copy_action(dev, flow);
-       rte_free(flow->fdir);
+       if (flow->fdir) {
+               LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) {
+                       if (priv_fdir_flow->flow == flow)
+                               break;
+               }
+               if (priv_fdir_flow) {
+                       LIST_REMOVE(priv_fdir_flow, next);
+                       rte_free(priv_fdir_flow->fdir);
+                       rte_free(priv_fdir_flow);
+               }
+       }
        rte_free(flow);
 }
 
@@ -4401,15 +4531,25 @@ flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
  *   Pointer to Ethernet device.
  * @param list
  *   Pointer to a TAILQ flow list.
+ * @param active
+ *   If flushing is called avtively.
  */
 void
-mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)
+mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list,
+                    bool active)
 {
+       uint32_t num_flushed = 0;
+
        while (!TAILQ_EMPTY(list)) {
                struct rte_flow *flow;
 
                flow = TAILQ_FIRST(list);
                flow_list_destroy(dev, list, flow);
+               num_flushed++;
+       }
+       if (active) {
+               DRV_LOG(INFO, "port %u: %u flows flushed before stopping",
+                       dev->data->port_id, num_flushed);
        }
 }
 
@@ -4474,6 +4614,79 @@ error:
        return -rte_errno;
 }
 
+/**
+ * Stop all default actions for flows.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ */
+void
+mlx5_flow_stop_default(struct rte_eth_dev *dev)
+{
+       flow_mreg_del_default_copy_action(dev);
+}
+
+/**
+ * Start all default actions for flows.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_start_default(struct rte_eth_dev *dev)
+{
+       struct rte_flow_error error;
+
+       /* Make sure default copy action (reg_c[0] -> reg_b) is created. */
+       return flow_mreg_add_default_copy_action(dev, &error);
+}
+
+/**
+ * Allocate intermediate resources for flow creation.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ */
+void
+mlx5_flow_alloc_intermediate(struct rte_eth_dev *dev)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+
+       if (!priv->inter_flows) {
+               priv->inter_flows = rte_calloc(__func__, 1,
+                                   MLX5_NUM_MAX_DEV_FLOWS *
+                                   sizeof(struct mlx5_flow) +
+                                   (sizeof(struct mlx5_flow_rss_desc) +
+                                   sizeof(uint16_t) * UINT16_MAX) * 2, 0);
+               if (!priv->inter_flows) {
+                       DRV_LOG(ERR, "can't allocate intermediate memory.");
+                       return;
+               }
+       }
+       priv->rss_desc = &((struct mlx5_flow *)priv->inter_flows)
+                        [MLX5_NUM_MAX_DEV_FLOWS];
+       /* Reset the index. */
+       priv->flow_idx = 0;
+       priv->flow_nested_idx = 0;
+}
+
+/**
+ * Free intermediate resources for flows.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ */
+void
+mlx5_flow_free_intermediate(struct rte_eth_dev *dev)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+
+       rte_free(priv->inter_flows);
+       priv->inter_flows = NULL;
+}
+
 /**
  * Verify the flow list is empty
  *
@@ -4689,7 +4902,7 @@ mlx5_flow_flush(struct rte_eth_dev *dev,
 {
        struct mlx5_priv *priv = dev->data->dev_private;
 
-       mlx5_flow_list_flush(dev, &priv->flows);
+       mlx5_flow_list_flush(dev, &priv->flows, false);
        return 0;
 }
 
@@ -4737,7 +4950,7 @@ flow_drv_query(struct rte_eth_dev *dev,
        const struct mlx5_flow_driver_ops *fops;
        enum mlx5_flow_drv_type ftype = flow->drv_type;
 
-       assert(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX);
+       MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX);
        fops = flow_get_drv_ops(ftype);
 
        return fops->query(dev, flow, actions, data, error);
@@ -5001,12 +5214,14 @@ flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct rte_flow *flow = NULL;
+       struct mlx5_fdir_flow *priv_fdir_flow = NULL;
 
-       assert(fdir_flow);
-       TAILQ_FOREACH(flow, &priv->flows, next) {
-               if (flow->fdir && !flow_fdir_cmp(flow->fdir, fdir_flow)) {
+       MLX5_ASSERT(fdir_flow);
+       LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) {
+               if (!flow_fdir_cmp(priv_fdir_flow->fdir, fdir_flow)) {
                        DRV_LOG(DEBUG, "port %u found FDIR flow %p",
                                dev->data->port_id, (void *)flow);
+                       flow = priv_fdir_flow->flow;
                        break;
                }
        }
@@ -5031,6 +5246,7 @@ flow_fdir_filter_add(struct rte_eth_dev *dev,
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_fdir *fdir_flow;
        struct rte_flow *flow;
+       struct mlx5_fdir_flow *priv_fdir_flow = NULL;
        int ret;
 
        fdir_flow = rte_zmalloc(__func__, sizeof(*fdir_flow), 0);
@@ -5046,17 +5262,26 @@ flow_fdir_filter_add(struct rte_eth_dev *dev,
                rte_errno = EEXIST;
                goto error;
        }
+       priv_fdir_flow = rte_zmalloc(__func__, sizeof(struct mlx5_fdir_flow),
+                                    0);
+       if (!priv_fdir_flow) {
+               rte_errno = ENOMEM;
+               goto error;
+       }
        flow = flow_list_create(dev, &priv->flows, &fdir_flow->attr,
                                fdir_flow->items, fdir_flow->actions, true,
                                NULL);
        if (!flow)
                goto error;
-       assert(!flow->fdir);
-       flow->fdir = fdir_flow;
+       flow->fdir = 1;
+       priv_fdir_flow->fdir = fdir_flow;
+       priv_fdir_flow->flow = flow;
+       LIST_INSERT_HEAD(&priv->fdir_flows, priv_fdir_flow, next);
        DRV_LOG(DEBUG, "port %u created FDIR flow %p",
                dev->data->port_id, (void *)flow);
        return 0;
 error:
+       rte_free(priv_fdir_flow);
        rte_free(fdir_flow);
        return -rte_errno;
 }
@@ -5081,17 +5306,26 @@ flow_fdir_filter_delete(struct rte_eth_dev *dev,
        struct mlx5_fdir fdir_flow = {
                .attr.group = 0,
        };
+       struct mlx5_fdir_flow *priv_fdir_flow = NULL;
        int ret;
 
        ret = flow_fdir_filter_convert(dev, fdir_filter, &fdir_flow);
        if (ret)
                return -rte_errno;
-       flow = flow_fdir_filter_lookup(dev, &fdir_flow);
-       if (!flow) {
-               rte_errno = ENOENT;
-               return -rte_errno;
+       LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) {
+               /* Find the fdir in priv list */
+               if (!flow_fdir_cmp(priv_fdir_flow->fdir, &fdir_flow))
+                       break;
        }
+       if (!priv_fdir_flow)
+               return 0;
+       LIST_REMOVE(priv_fdir_flow, next);
+       flow = priv_fdir_flow->flow;
+       /* Fdir resource will be releasd after flow destroy. */
+       flow->fdir = 0;
        flow_list_destroy(dev, &priv->flows, flow);
+       rte_free(priv_fdir_flow->fdir);
+       rte_free(priv_fdir_flow);
        DRV_LOG(DEBUG, "port %u deleted FDIR flow %p",
                dev->data->port_id, (void *)flow);
        return 0;
@@ -5130,8 +5364,16 @@ static void
 flow_fdir_filter_flush(struct rte_eth_dev *dev)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-
-       mlx5_flow_list_flush(dev, &priv->flows);
+       struct mlx5_fdir_flow *priv_fdir_flow = NULL;
+
+       while (!LIST_EMPTY(&priv->fdir_flows)) {
+               priv_fdir_flow = LIST_FIRST(&priv->fdir_flows);
+               LIST_REMOVE(priv_fdir_flow, next);
+               priv_fdir_flow->flow->fdir = 0;
+               flow_list_destroy(dev, &priv->flows, priv_fdir_flow->flow);
+               rte_free(priv_fdir_flow->fdir);
+               rte_free(priv_fdir_flow);
+       }
 }
 
 /**
@@ -5345,9 +5587,9 @@ mlx5_flow_destroy_policer_rules(struct rte_eth_dev *dev,
  *   Pointer to Ethernet device structure.
  *
  * @return
- *   Pointer to allocated counter  on success, NULL otherwise.
+ *   Index to allocated counter  on success, 0 otherwise.
  */
-struct mlx5_flow_counter *
+uint32_t
 mlx5_counter_alloc(struct rte_eth_dev *dev)
 {
        const struct mlx5_flow_driver_ops *fops;
@@ -5360,7 +5602,7 @@ mlx5_counter_alloc(struct rte_eth_dev *dev)
        DRV_LOG(ERR,
                "port %u counter allocate is not supported.",
                 dev->data->port_id);
-       return NULL;
+       return 0;
 }
 
 /**
@@ -5369,10 +5611,10 @@ mlx5_counter_alloc(struct rte_eth_dev *dev)
  * @param[in] dev
  *   Pointer to Ethernet device structure.
  * @param[in] cnt
- *   Pointer to counter to be free.
+ *   Index to counter to be free.
  */
 void
-mlx5_counter_free(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt)
+mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt)
 {
        const struct mlx5_flow_driver_ops *fops;
        struct rte_flow_attr attr = { .transfer = 0 };
@@ -5393,7 +5635,7 @@ mlx5_counter_free(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt)
  * @param[in] dev
  *   Pointer to Ethernet device structure.
  * @param[in] cnt
- *   Pointer to counter to query.
+ *   Index to counter to query.
  * @param[in] clear
  *   Set to clear counter statistics.
  * @param[out] pkts
@@ -5405,7 +5647,7 @@ mlx5_counter_free(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt)
  *   0 on success, a negative errno value otherwise.
  */
 int
-mlx5_counter_query(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt,
+mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt,
                   bool clear, uint64_t *pkts, uint64_t *bytes)
 {
        const struct mlx5_flow_driver_ops *fops;
@@ -5506,6 +5748,13 @@ next_container:
        dcs = (struct mlx5_devx_obj *)(uintptr_t)rte_atomic64_read
                                                              (&pool->a64_dcs);
        offset = batch ? 0 : dcs->id % MLX5_COUNTERS_PER_POOL;
+       /*
+        * Identify the counters released between query trigger and query
+        * handle more effiecntly. The counter released in this gap period
+        * should wait for a new round of query as the new arrived packets
+        * will not be taken into account.
+        */
+       rte_atomic64_add(&pool->start_query_gen, 1);
        ret = mlx5_devx_cmd_flow_counter_query(dcs, 0, MLX5_COUNTERS_PER_POOL -
                                               offset, NULL, NULL,
                                               pool->raw_hw->mem_mng->dm->id,
@@ -5514,6 +5763,7 @@ next_container:
                                               sh->devx_comp,
                                               (uint64_t)(uintptr_t)pool);
        if (ret) {
+               rte_atomic64_sub(&pool->start_query_gen, 1);
                DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID"
                        " %d", pool->min_dcs->id);
                pool->raw_hw = NULL;
@@ -5553,13 +5803,17 @@ mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh,
        struct mlx5_counter_stats_raw *raw_to_free;
 
        if (unlikely(status)) {
+               rte_atomic64_sub(&pool->start_query_gen, 1);
                raw_to_free = pool->raw_hw;
        } else {
                raw_to_free = pool->raw;
                rte_spinlock_lock(&pool->sl);
                pool->raw = pool->raw_hw;
                rte_spinlock_unlock(&pool->sl);
-               rte_atomic64_add(&pool->query_gen, 1);
+               MLX5_ASSERT(rte_atomic64_read(&pool->end_query_gen) + 1 ==
+                           rte_atomic64_read(&pool->start_query_gen));
+               rte_atomic64_set(&pool->end_query_gen,
+                                rte_atomic64_read(&pool->start_query_gen));
                /* Be sure the new raw counters data is updated in memory. */
                rte_cio_wmb();
        }