net/mlx5: refactor getting counter action pointer
[dpdk.git] / drivers / net / mlx5 / mlx5_flow.c
index 43598f9..a87ac8e 100644 (file)
@@ -164,128 +164,143 @@ mlx5_flow_is_rss_expandable_item(const struct rte_flow_item *item)
        return false;
 }
 
+/**
+ * Network Service Header (NSH) and its next protocol values
+ * are described in RFC-8393.
+ */
+static enum rte_flow_item_type
+mlx5_nsh_proto_to_item_type(uint8_t proto_spec, uint8_t proto_mask)
+{
+       enum rte_flow_item_type type;
+
+       switch (proto_mask & proto_spec) {
+       case RTE_VXLAN_GPE_TYPE_IPV4:
+               type = RTE_FLOW_ITEM_TYPE_IPV4;
+               break;
+       case RTE_VXLAN_GPE_TYPE_IPV6:
+               type = RTE_VXLAN_GPE_TYPE_IPV6;
+               break;
+       case RTE_VXLAN_GPE_TYPE_ETH:
+               type = RTE_FLOW_ITEM_TYPE_ETH;
+               break;
+       default:
+               type = RTE_FLOW_ITEM_TYPE_END;
+       }
+       return type;
+}
+
+static enum rte_flow_item_type
+mlx5_inet_proto_to_item_type(uint8_t proto_spec, uint8_t proto_mask)
+{
+       enum rte_flow_item_type type;
+
+       switch (proto_mask & proto_spec) {
+       case IPPROTO_UDP:
+               type = RTE_FLOW_ITEM_TYPE_UDP;
+               break;
+       case IPPROTO_TCP:
+               type = RTE_FLOW_ITEM_TYPE_TCP;
+               break;
+       case IPPROTO_IP:
+               type = RTE_FLOW_ITEM_TYPE_IPV4;
+               break;
+       case IPPROTO_IPV6:
+               type = RTE_FLOW_ITEM_TYPE_IPV6;
+               break;
+       default:
+               type = RTE_FLOW_ITEM_TYPE_END;
+       }
+       return type;
+}
+
+static enum rte_flow_item_type
+mlx5_ethertype_to_item_type(rte_be16_t type_spec,
+                           rte_be16_t type_mask, bool is_tunnel)
+{
+       enum rte_flow_item_type type;
+
+       switch (rte_be_to_cpu_16(type_spec & type_mask)) {
+       case RTE_ETHER_TYPE_TEB:
+               type = is_tunnel ?
+                      RTE_FLOW_ITEM_TYPE_ETH : RTE_FLOW_ITEM_TYPE_END;
+               break;
+       case RTE_ETHER_TYPE_VLAN:
+               type = !is_tunnel ?
+                      RTE_FLOW_ITEM_TYPE_VLAN : RTE_FLOW_ITEM_TYPE_END;
+               break;
+       case RTE_ETHER_TYPE_IPV4:
+               type = RTE_FLOW_ITEM_TYPE_IPV4;
+               break;
+       case RTE_ETHER_TYPE_IPV6:
+               type = RTE_FLOW_ITEM_TYPE_IPV6;
+               break;
+       default:
+               type = RTE_FLOW_ITEM_TYPE_END;
+       }
+       return type;
+}
+
 static enum rte_flow_item_type
 mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item)
 {
-       enum rte_flow_item_type ret = RTE_FLOW_ITEM_TYPE_VOID;
-       uint16_t ether_type = 0;
-       uint16_t ether_type_m;
-       uint8_t ip_next_proto = 0;
-       uint8_t ip_next_proto_m;
+#define MLX5_XSET_ITEM_MASK_SPEC(type, fld)                              \
+       do {                                                             \
+               const void *m = item->mask;                              \
+               const void *s = item->spec;                              \
+               mask = m ?                                               \
+                       ((const struct rte_flow_item_##type *)m)->fld :  \
+                       rte_flow_item_##type##_mask.fld;                 \
+               spec = ((const struct rte_flow_item_##type *)s)->fld;    \
+       } while (0)
+
+       enum rte_flow_item_type ret;
+       uint16_t spec, mask;
 
        if (item == NULL || item->spec == NULL)
-               return ret;
+               return RTE_FLOW_ITEM_TYPE_VOID;
        switch (item->type) {
        case RTE_FLOW_ITEM_TYPE_ETH:
-               if (item->mask)
-                       ether_type_m = ((const struct rte_flow_item_eth *)
-                                               (item->mask))->type;
-               else
-                       ether_type_m = rte_flow_item_eth_mask.type;
-               if (ether_type_m != RTE_BE16(0xFFFF))
-                       break;
-               ether_type = ((const struct rte_flow_item_eth *)
-                               (item->spec))->type;
-               if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
-                       ret = RTE_FLOW_ITEM_TYPE_IPV4;
-               else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
-                       ret = RTE_FLOW_ITEM_TYPE_IPV6;
-               else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
-                       ret = RTE_FLOW_ITEM_TYPE_VLAN;
-               else
-                       ret = RTE_FLOW_ITEM_TYPE_END;
+               MLX5_XSET_ITEM_MASK_SPEC(eth, type);
+               if (!mask)
+                       return RTE_FLOW_ITEM_TYPE_VOID;
+               ret = mlx5_ethertype_to_item_type(spec, mask, false);
                break;
        case RTE_FLOW_ITEM_TYPE_VLAN:
-               if (item->mask)
-                       ether_type_m = ((const struct rte_flow_item_vlan *)
-                                               (item->mask))->inner_type;
-               else
-                       ether_type_m = rte_flow_item_vlan_mask.inner_type;
-               if (ether_type_m != RTE_BE16(0xFFFF))
-                       break;
-               ether_type = ((const struct rte_flow_item_vlan *)
-                               (item->spec))->inner_type;
-               if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
-                       ret = RTE_FLOW_ITEM_TYPE_IPV4;
-               else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
-                       ret = RTE_FLOW_ITEM_TYPE_IPV6;
-               else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
-                       ret = RTE_FLOW_ITEM_TYPE_VLAN;
-               else
-                       ret = RTE_FLOW_ITEM_TYPE_END;
+               MLX5_XSET_ITEM_MASK_SPEC(vlan, inner_type);
+               if (!mask)
+                       return RTE_FLOW_ITEM_TYPE_VOID;
+               ret = mlx5_ethertype_to_item_type(spec, mask, false);
                break;
        case RTE_FLOW_ITEM_TYPE_IPV4:
-               if (item->mask)
-                       ip_next_proto_m = ((const struct rte_flow_item_ipv4 *)
-                                       (item->mask))->hdr.next_proto_id;
-               else
-                       ip_next_proto_m =
-                               rte_flow_item_ipv4_mask.hdr.next_proto_id;
-               if (ip_next_proto_m != 0xFF)
-                       break;
-               ip_next_proto = ((const struct rte_flow_item_ipv4 *)
-                               (item->spec))->hdr.next_proto_id;
-               if (ip_next_proto == IPPROTO_UDP)
-                       ret = RTE_FLOW_ITEM_TYPE_UDP;
-               else if (ip_next_proto == IPPROTO_TCP)
-                       ret = RTE_FLOW_ITEM_TYPE_TCP;
-               else if (ip_next_proto == IPPROTO_IP)
-                       ret = RTE_FLOW_ITEM_TYPE_IPV4;
-               else if (ip_next_proto == IPPROTO_IPV6)
-                       ret = RTE_FLOW_ITEM_TYPE_IPV6;
-               else
-                       ret = RTE_FLOW_ITEM_TYPE_END;
+               MLX5_XSET_ITEM_MASK_SPEC(ipv4, hdr.next_proto_id);
+               if (!mask)
+                       return RTE_FLOW_ITEM_TYPE_VOID;
+               ret = mlx5_inet_proto_to_item_type(spec, mask);
                break;
        case RTE_FLOW_ITEM_TYPE_IPV6:
-               if (item->mask)
-                       ip_next_proto_m = ((const struct rte_flow_item_ipv6 *)
-                                               (item->mask))->hdr.proto;
-               else
-                       ip_next_proto_m =
-                               rte_flow_item_ipv6_mask.hdr.proto;
-               if (ip_next_proto_m != 0xFF)
-                       break;
-               ip_next_proto = ((const struct rte_flow_item_ipv6 *)
-                               (item->spec))->hdr.proto;
-               if (ip_next_proto == IPPROTO_UDP)
-                       ret = RTE_FLOW_ITEM_TYPE_UDP;
-               else if (ip_next_proto == IPPROTO_TCP)
-                       ret = RTE_FLOW_ITEM_TYPE_TCP;
-               else if (ip_next_proto == IPPROTO_IP)
-                       ret = RTE_FLOW_ITEM_TYPE_IPV4;
-               else if (ip_next_proto == IPPROTO_IPV6)
-                       ret = RTE_FLOW_ITEM_TYPE_IPV6;
-               else
-                       ret = RTE_FLOW_ITEM_TYPE_END;
+               MLX5_XSET_ITEM_MASK_SPEC(ipv6, hdr.proto);
+               if (!mask)
+                       return RTE_FLOW_ITEM_TYPE_VOID;
+               ret = mlx5_inet_proto_to_item_type(spec, mask);
                break;
        case RTE_FLOW_ITEM_TYPE_GENEVE:
-               ether_type_m = item->mask ?
-                              ((const struct rte_flow_item_geneve *)
-                              (item->mask))->protocol :
-                              rte_flow_item_geneve_mask.protocol;
-               ether_type = ((const struct rte_flow_item_geneve *)
-                            (item->spec))->protocol;
-               ether_type_m = rte_be_to_cpu_16(ether_type_m);
-               ether_type = rte_be_to_cpu_16(ether_type);
-               switch (ether_type_m & ether_type) {
-               case RTE_ETHER_TYPE_TEB:
-                       ret = RTE_FLOW_ITEM_TYPE_ETH;
-                       break;
-               case RTE_ETHER_TYPE_IPV4:
-                       ret = RTE_FLOW_ITEM_TYPE_IPV4;
-                       break;
-               case RTE_ETHER_TYPE_IPV6:
-                       ret = RTE_FLOW_ITEM_TYPE_IPV6;
-                       break;
-               default:
-                       ret = RTE_FLOW_ITEM_TYPE_END;
-               }
+               MLX5_XSET_ITEM_MASK_SPEC(geneve, protocol);
+               ret = mlx5_ethertype_to_item_type(spec, mask, true);
+               break;
+       case RTE_FLOW_ITEM_TYPE_GRE:
+               MLX5_XSET_ITEM_MASK_SPEC(gre, protocol);
+               ret = mlx5_ethertype_to_item_type(spec, mask, true);
+               break;
+       case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+               MLX5_XSET_ITEM_MASK_SPEC(vxlan_gpe, protocol);
+               ret = mlx5_nsh_proto_to_item_type(spec, mask);
                break;
        default:
                ret = RTE_FLOW_ITEM_TYPE_VOID;
                break;
        }
        return ret;
+#undef MLX5_XSET_ITEM_MASK_SPEC
 }
 
 static const int *
@@ -886,7 +901,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
                     struct rte_flow_error *error)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_dev_config *config = &priv->config;
+       struct mlx5_sh_config *config = &priv->sh->config;
        enum modify_reg start_reg;
        bool skip_mtr_reg = false;
 
@@ -1206,7 +1221,7 @@ flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
 }
 
 /**
- * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive
+ * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the device
  * flow.
  *
  * @param[in] dev
@@ -1219,7 +1234,6 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
                       struct mlx5_flow_handle *dev_handle)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       const int mark = dev_handle->mark;
        const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
        struct mlx5_ind_table_obj *ind_tbl = NULL;
        unsigned int i;
@@ -1254,15 +1268,6 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
                 * this must be always enabled (metadata may arive
                 * from other port - not from local flows only.
                 */
-               if (priv->config.dv_flow_en &&
-                   priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
-                   mlx5_flow_ext_mreg_supported(dev)) {
-                       rxq_ctrl->rxq.mark = 1;
-                       rxq_ctrl->flow_mark_n = 1;
-               } else if (mark) {
-                       rxq_ctrl->rxq.mark = 1;
-                       rxq_ctrl->flow_mark_n++;
-               }
                if (tunnel) {
                        unsigned int j;
 
@@ -1280,6 +1285,20 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
        }
 }
 
+static void
+flow_rxq_mark_flag_set(struct rte_eth_dev *dev)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_rxq_ctrl *rxq_ctrl;
+
+       if (priv->mark_enabled)
+               return;
+       LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
+               rxq_ctrl->rxq.mark = 1;
+       }
+       priv->mark_enabled = 1;
+}
+
 /**
  * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow
  *
@@ -1294,7 +1313,11 @@ flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
        struct mlx5_priv *priv = dev->data->dev_private;
        uint32_t handle_idx;
        struct mlx5_flow_handle *dev_handle;
+       struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
 
+       MLX5_ASSERT(wks);
+       if (wks->mark)
+               flow_rxq_mark_flag_set(dev);
        SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
                       handle_idx, dev_handle, next)
                flow_drv_rxq_flags_set(dev, dev_handle);
@@ -1314,7 +1337,6 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
                        struct mlx5_flow_handle *dev_handle)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       const int mark = dev_handle->mark;
        const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
        struct mlx5_ind_table_obj *ind_tbl = NULL;
        unsigned int i;
@@ -1345,15 +1367,6 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
                MLX5_ASSERT(rxq_ctrl != NULL);
                if (rxq_ctrl == NULL)
                        continue;
-               if (priv->config.dv_flow_en &&
-                   priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
-                   mlx5_flow_ext_mreg_supported(dev)) {
-                       rxq_ctrl->rxq.mark = 1;
-                       rxq_ctrl->flow_mark_n = 1;
-               } else if (mark) {
-                       rxq_ctrl->flow_mark_n--;
-                       rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
-               }
                if (tunnel) {
                        unsigned int j;
 
@@ -1410,12 +1423,12 @@ flow_rxq_flags_clear(struct rte_eth_dev *dev)
 
                if (rxq == NULL || rxq->ctrl == NULL)
                        continue;
-               rxq->ctrl->flow_mark_n = 0;
                rxq->ctrl->rxq.mark = 0;
                for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
                        rxq->ctrl->flow_tunnels_n[j] = 0;
                rxq->ctrl->rxq.tunnel = 0;
        }
+       priv->mark_enabled = 0;
 }
 
 /**
@@ -1746,7 +1759,7 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
                                          &rss->key_len,
                                          "RSS hash key too large");
-       if (rss->queue_num > priv->config.ind_table_max_size)
+       if (rss->queue_num > priv->sh->dev_cap.ind_table_max_size)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
                                          &rss->queue_num,
@@ -1981,7 +1994,7 @@ mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
                                          "egress is not supported");
-       if (attributes->transfer && !priv->config.dv_esw_en)
+       if (attributes->transfer && !priv->sh->config.dv_esw_en)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
                                          NULL, "transfer is not supported");
@@ -2698,7 +2711,7 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
                uint8_t vni[4];
        } id = { .vlan_id = 0, };
 
-       if (!priv->config.l3_vxlan_en)
+       if (!priv->sh->config.l3_vxlan_en)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "L3 VXLAN is not enabled by device"
@@ -2893,7 +2906,7 @@ mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
        const struct rte_flow_item_geneve *mask = item->mask;
        int ret;
        uint16_t gbhdr;
-       uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ?
+       uint8_t opt_len = priv->sh->cdev->config.hca_attr.geneve_max_opt_len ?
                          MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0;
        const struct rte_flow_item_geneve nic_mask = {
                .ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80),
@@ -2901,7 +2914,7 @@ mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
                .protocol = RTE_BE16(UINT16_MAX),
        };
 
-       if (!priv->config.hca_attr.tunnel_stateless_geneve_rx)
+       if (!priv->sh->cdev->config.hca_attr.tunnel_stateless_geneve_rx)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "L3 Geneve is not enabled by device"
@@ -2981,10 +2994,9 @@ mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item,
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_dev_ctx_shared *sh = priv->sh;
        struct mlx5_geneve_tlv_option_resource *geneve_opt_resource;
-       struct mlx5_hca_attr *hca_attr = &priv->config.hca_attr;
+       struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
        uint8_t data_max_supported =
                        hca_attr->max_geneve_tlv_option_data_len * 4;
-       struct mlx5_dev_config *config = &priv->config;
        const struct rte_flow_item_geneve *geneve_spec;
        const struct rte_flow_item_geneve *geneve_mask;
        const struct rte_flow_item_geneve_opt *spec = item->spec;
@@ -3008,7 +3020,7 @@ mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item,
        if ((uint32_t)spec->option_len > MLX5_GENEVE_OPTLEN_MASK)
                return rte_flow_error_set
                        (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
-                       "Geneve TLV opt length exceeeds the limit (31)");
+                       "Geneve TLV opt length exceeds the limit (31)");
        /* Check if class type and length masks are full. */
        if (full_mask.option_class != mask->option_class ||
            full_mask.option_type != mask->option_type ||
@@ -3018,11 +3030,11 @@ mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item,
                        "Geneve TLV opt class/type/length masks must be full");
        /* Check if length is supported */
        if ((uint32_t)spec->option_len >
-                       config->hca_attr.max_geneve_tlv_option_data_len)
+                       hca_attr->max_geneve_tlv_option_data_len)
                return rte_flow_error_set
                        (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
                        "Geneve TLV opt length not supported");
-       if (config->hca_attr.max_geneve_tlv_options > 1)
+       if (hca_attr->max_geneve_tlv_options > 1)
                DRV_LOG(DEBUG,
                        "max_geneve_tlv_options supports more than 1 option");
        /* Check GENEVE item preceding. */
@@ -3077,7 +3089,7 @@ mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item,
                                        "Data mask is of unsupported size");
        }
        /* Check GENEVE option is supported in NIC. */
-       if (!config->hca_attr.geneve_tlv_opt)
+       if (!hca_attr->geneve_tlv_opt)
                return rte_flow_error_set
                        (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
                        "Geneve TLV opt not supported");
@@ -3126,7 +3138,7 @@ mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused,
        struct mlx5_priv *priv = dev->data->dev_private;
        int ret;
 
-       if (!priv->config.mpls_en)
+       if (!priv->sh->dev_cap.mpls_en)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "MPLS not supported or"
@@ -3417,11 +3429,11 @@ flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)
        if (type != MLX5_FLOW_TYPE_MAX)
                return type;
        /* If no OS specific type - continue with DV/VERBS selection */
-       if (attr->transfer && priv->config.dv_esw_en)
+       if (attr->transfer && priv->sh->config.dv_esw_en)
                type = MLX5_FLOW_TYPE_DV;
        if (!attr->transfer)
-               type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :
-                                                MLX5_FLOW_TYPE_VERBS;
+               type = priv->sh->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :
+                                                    MLX5_FLOW_TYPE_VERBS;
        return type;
 }
 
@@ -3957,7 +3969,7 @@ find_graph_root(uint32_t rss_level)
  *  subflow.
  *
  * @param[in] dev_flow
- *   Pointer the created preifx subflow.
+ *   Pointer the created prefix subflow.
  *
  * @return
  *   The layers get from prefix subflow.
@@ -4093,7 +4105,7 @@ static bool flow_check_modify_action_type(struct rte_eth_dev *dev,
                return true;
        case RTE_FLOW_ACTION_TYPE_FLAG:
        case RTE_FLOW_ACTION_TYPE_MARK:
-               if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
+               if (priv->sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
                        return true;
                else
                        return false;
@@ -4284,7 +4296,7 @@ flow_dv_mreg_create_cb(void *tool_ctx, void *cb_ctx)
                [3] = { .type = RTE_FLOW_ACTION_TYPE_END, },
        };
 
-       /* Fill the register fileds in the flow. */
+       /* Fill the register fields in the flow. */
        ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
        if (ret < 0)
                return NULL;
@@ -4353,7 +4365,7 @@ flow_dv_mreg_create_cb(void *tool_ctx, void *cb_ctx)
        /*
         * The copy Flows are not included in any list. There
         * ones are referenced from other Flows and can not
-        * be applied, removed, deleted in ardbitrary order
+        * be applied, removed, deleted in arbitrary order
         * by list traversing.
         */
        mcp_res->rix_flow = flow_list_create(dev, MLX5_FLOW_TYPE_MCP,
@@ -4532,8 +4544,8 @@ flow_mreg_add_default_copy_action(struct rte_eth_dev *dev,
        uint32_t mark_id;
 
        /* Check whether extensive metadata feature is engaged. */
-       if (!priv->config.dv_flow_en ||
-           priv->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
+       if (!priv->sh->config.dv_flow_en ||
+           priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
            !mlx5_flow_ext_mreg_supported(dev) ||
            !priv->sh->dv_regc0_mask)
                return 0;
@@ -4592,7 +4604,7 @@ flow_mreg_update_copy_table(struct rte_eth_dev *dev,
                            struct rte_flow_error *error)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_dev_config *config = &priv->config;
+       struct mlx5_sh_config *config = &priv->sh->config;
        struct mlx5_flow_mreg_copy_resource *mcp_res;
        const struct rte_flow_action_mark *mark;
 
@@ -4796,6 +4808,7 @@ flow_create_split_inner(struct rte_eth_dev *dev,
                        struct rte_flow_error *error)
 {
        struct mlx5_flow *dev_flow;
+       struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
 
        dev_flow = flow_drv_prepare(dev, flow, attr, items, actions,
                                    flow_split_info->flow_idx, error);
@@ -4810,12 +4823,14 @@ flow_create_split_inner(struct rte_eth_dev *dev,
        /*
         * If dev_flow is as one of the suffix flow, some actions in suffix
         * flow may need some user defined item layer flags, and pass the
-        * Metadate rxq mark flag to suffix flow as well.
+        * Metadata rxq mark flag to suffix flow as well.
         */
        if (flow_split_info->prefix_layers)
                dev_flow->handle->layers = flow_split_info->prefix_layers;
-       if (flow_split_info->prefix_mark)
-               dev_flow->handle->mark = 1;
+       if (flow_split_info->prefix_mark) {
+               MLX5_ASSERT(wks);
+               wks->mark = 1;
+       }
        if (sub_flow)
                *sub_flow = dev_flow;
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
@@ -5006,6 +5021,7 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
        uint32_t tag_id = 0;
        struct rte_flow_item *vlan_item_dst = NULL;
        const struct rte_flow_item *vlan_item_src = NULL;
+       const struct rte_flow_item *orig_items = items;
        struct rte_flow_action *hw_mtr_action;
        struct rte_flow_action *action_pre_head = NULL;
        int32_t flow_src_port = priv->representor_id;
@@ -5130,7 +5146,8 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
 
                if (!fm->def_policy) {
                        sub_policy = get_meter_sub_policy(dev, flow, wks,
-                                                         attr, items, error);
+                                                         attr, orig_items,
+                                                         error);
                        if (!sub_policy)
                                return -rte_errno;
                } else {
@@ -5251,6 +5268,8 @@ exit:
  *   Pointer to the Q/RSS action.
  * @param[in] actions_n
  *   Number of original actions.
+ * @param[in] mtr_sfx
+ *   Check if it is in meter suffix table.
  * @param[out] error
  *   Perform verbose error reporting if not NULL.
  *
@@ -5263,7 +5282,8 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev,
                          struct rte_flow_action *split_actions,
                          const struct rte_flow_action *actions,
                          const struct rte_flow_action *qrss,
-                         int actions_n, struct rte_flow_error *error)
+                         int actions_n, int mtr_sfx,
+                         struct rte_flow_error *error)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_rte_flow_action_set_tag *set_tag;
@@ -5278,15 +5298,15 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev,
         * - Add jump to mreg CP_TBL.
         * As a result, there will be one more action.
         */
-       ++actions_n;
        memcpy(split_actions, actions, sizeof(*split_actions) * actions_n);
+       /* Count MLX5_RTE_FLOW_ACTION_TYPE_TAG. */
+       ++actions_n;
        set_tag = (void *)(split_actions + actions_n);
        /*
-        * If tag action is not set to void(it means we are not the meter
-        * suffix flow), add the tag action. Since meter suffix flow already
-        * has the tag added.
+        * If we are not the meter suffix flow, add the tag action.
+        * Since meter suffix flow already has the tag added.
         */
-       if (split_actions[qrss_idx].type != RTE_FLOW_ACTION_TYPE_VOID) {
+       if (!mtr_sfx) {
                /*
                 * Allocate the new subflow ID. This one is unique within
                 * device and not shared with representors. Otherwise,
@@ -5319,6 +5339,12 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev,
                                MLX5_RTE_FLOW_ACTION_TYPE_TAG,
                        .conf = set_tag,
                };
+       } else {
+               /*
+                * If we are the suffix flow of meter, tag already exist.
+                * Set the QUEUE/RSS action to void.
+                */
+               split_actions[qrss_idx].type = RTE_FLOW_ACTION_TYPE_VOID;
        }
        /* JUMP action to jump to mreg copy table (CP_TBL). */
        jump = (void *)(set_tag + 1);
@@ -5350,7 +5376,7 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev,
  * @param[out] error
  *   Perform verbose error reporting if not NULL.
  * @param[in] encap_idx
- *   The encap action inndex.
+ *   The encap action index.
  *
  * @return
  *   0 on success, negative value otherwise
@@ -5716,7 +5742,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev,
                           struct rte_flow_error *error)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_dev_config *config = &priv->config;
+       struct mlx5_sh_config *config = &priv->sh->config;
        const struct rte_flow_action *qrss = NULL;
        struct rte_flow_action *ext_actions = NULL;
        struct mlx5_flow *dev_flow = NULL;
@@ -5773,17 +5799,6 @@ flow_create_split_metadata(struct rte_eth_dev *dev,
                                                  RTE_FLOW_ERROR_TYPE_ACTION,
                                                  NULL, "no memory to split "
                                                  "metadata flow");
-               /*
-                * If we are the suffix flow of meter, tag already exist.
-                * Set the tag action to void.
-                */
-               if (mtr_sfx)
-                       ext_actions[qrss - actions].type =
-                                               RTE_FLOW_ACTION_TYPE_VOID;
-               else
-                       ext_actions[qrss - actions].type =
-                                               (enum rte_flow_action_type)
-                                               MLX5_RTE_FLOW_ACTION_TYPE_TAG;
                /*
                 * Create the new actions list with removed Q/RSS action
                 * and appended set tag and jump to register copy table
@@ -5791,7 +5806,8 @@ flow_create_split_metadata(struct rte_eth_dev *dev,
                 * in advance, because it is needed for set tag action.
                 */
                qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions,
-                                                   qrss, actions_n, error);
+                                                   qrss, actions_n,
+                                                   mtr_sfx, error);
                if (!mtr_sfx && !qrss_id) {
                        ret = -rte_errno;
                        goto exit;
@@ -5882,6 +5898,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev,
                /* Add suffix subflow to execute Q/RSS. */
                flow_split_info->prefix_layers = layers;
                flow_split_info->prefix_mark = 0;
+               flow_split_info->table_id = 0;
                ret = flow_create_split_inner(dev, flow, &dev_flow,
                                              &q_attr, mtr_sfx ? items :
                                              q_items, q_actions,
@@ -6096,7 +6113,6 @@ flow_create_split_meter(struct rte_eth_dev *dev,
                        goto exit;
                }
                /* Add the prefix subflow. */
-               flow_split_info->prefix_mark = 0;
                skip_scale_restore = flow_split_info->skip_scale;
                flow_split_info->skip_scale |=
                        1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT;
@@ -6129,7 +6145,7 @@ flow_create_split_meter(struct rte_eth_dev *dev,
                                 MLX5_FLOW_TABLE_LEVEL_METER;
                flow_split_info->prefix_layers =
                                flow_get_prefix_layer_flags(dev_flow);
-               flow_split_info->prefix_mark = dev_flow->handle->mark;
+               flow_split_info->prefix_mark |= wks->mark;
                flow_split_info->table_id = MLX5_MTR_TABLE_ID_SUFFIX;
        }
        /* Add the prefix subflow. */
@@ -6195,6 +6211,7 @@ flow_create_split_sample(struct rte_eth_dev *dev,
        struct mlx5_flow_dv_sample_resource *sample_res;
        struct mlx5_flow_tbl_data_entry *sfx_tbl_data;
        struct mlx5_flow_tbl_resource *sfx_tbl;
+       struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
 #endif
        size_t act_size;
        size_t item_size;
@@ -6233,7 +6250,8 @@ flow_create_split_sample(struct rte_eth_dev *dev,
                 * When reg_c_preserve is set, metadata registers Cx preserve
                 * their value even through packet duplication.
                 */
-               add_tag = (!fdb_tx || priv->config.hca_attr.reg_c_preserve);
+               add_tag = (!fdb_tx ||
+                          priv->sh->cdev->config.hca_attr.reg_c_preserve);
                if (add_tag)
                        sfx_items = (struct rte_flow_item *)((char *)sfx_actions
                                        + act_size);
@@ -6281,7 +6299,8 @@ flow_create_split_sample(struct rte_eth_dev *dev,
                }
                flow_split_info->prefix_layers =
                                flow_get_prefix_layer_flags(dev_flow);
-               flow_split_info->prefix_mark = dev_flow->handle->mark;
+               MLX5_ASSERT(wks);
+               flow_split_info->prefix_mark |= wks->mark;
                /* Suffix group level already be scaled with factor, set
                 * MLX5_SCALE_FLOW_GROUP_BIT of skip_scale to 1 to avoid scale
                 * again in translation.
@@ -6885,7 +6904,7 @@ flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
  * @param type
  *   Flow type to be flushed.
  * @param active
- *   If flushing is called avtively.
+ *   If flushing is called actively.
  */
 void
 mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type,
@@ -6991,8 +7010,7 @@ flow_alloc_thread_workspace(void)
        data->rssq_num = MLX5_RSSQ_DEFAULT_NUM;
        return data;
 err:
-       if (data->rss_desc.queue)
-               free(data->rss_desc.queue);
+       free(data->rss_desc.queue);
        free(data);
        return NULL;
 }
@@ -7781,14 +7799,15 @@ mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt)
  */
 int
 mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt,
-                  bool clear, uint64_t *pkts, uint64_t *bytes)
+                  bool clear, uint64_t *pkts, uint64_t *bytes, void **action)
 {
        const struct mlx5_flow_driver_ops *fops;
        struct rte_flow_attr attr = { .transfer = 0 };
 
        if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
                fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
-               return fops->counter_query(dev, cnt, clear, pkts, bytes);
+               return fops->counter_query(dev, cnt, clear, pkts,
+                                       bytes, action);
        }
        DRV_LOG(ERR,
                "port %u counter query is not supported.",
@@ -8232,7 +8251,7 @@ mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
                struct rte_flow *flow;
                struct rte_flow_error error;
 
-               if (!priv->config.dv_flow_en)
+               if (!priv->sh->config.dv_flow_en)
                        break;
                /* Create internal flow, validation skips copy action. */
                flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_GEN, &attr,
@@ -8360,6 +8379,16 @@ mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev,
                                "invalid flow handle");
        }
        handle_idx = flow->dev_handles;
+       /* query counter */
+       if (flow->counter &&
+       (!mlx5_counter_query(dev, flow->counter, false,
+       &count.hits, &count.bytes, &action)) && action) {
+               id = (uint64_t)(uintptr_t)action;
+               type = DR_DUMP_REC_TYPE_PMD_COUNTER;
+               save_dump_file(NULL, 0, type,
+                       id, (void *)&count, file);
+       }
+
        while (handle_idx) {
                dh = mlx5_ipool_get(priv->sh->ipool
                                [MLX5_IPOOL_MLX5_FLOW], handle_idx);
@@ -8367,16 +8396,6 @@ mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev,
                        continue;
                handle_idx = dh->next.next;
 
-               /* query counter */
-               type = DR_DUMP_REC_TYPE_PMD_COUNTER;
-               flow_dv_query_count_ptr(dev, flow->counter,
-                                               &action, error);
-               if (action) {
-                       id = (uint64_t)(uintptr_t)action;
-                       if (!mlx5_flow_query_counter(dev, flow, &count, error))
-                               save_dump_file(NULL, 0, type,
-                                               id, (void *)&count, file);
-               }
                /* Get modify_hdr and encap_decap buf from ipools. */
                encap_decap = NULL;
                modify_hdr = dh->dvh.modify_hdr;
@@ -8422,7 +8441,7 @@ mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev,
  */
 static int
 mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev,
-       FILE *file, struct rte_flow_error *error)
+       FILE *file, struct rte_flow_error *error __rte_unused)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_dev_ctx_shared *sh = priv->sh;
@@ -8507,14 +8526,12 @@ mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev,
        max = MLX5_COUNTERS_PER_POOL * cmng->n_valid;
        for (j = 1; j <= max; j++) {
                action = NULL;
-               flow_dv_query_count_ptr(dev, j, &action, error);
-               if (action) {
-                       if (!flow_dv_query_count(dev, j, &count, error)) {
-                               type = DR_DUMP_REC_TYPE_PMD_COUNTER;
-                               id = (uint64_t)(uintptr_t)action;
-                               save_dump_file(NULL, 0, type,
-                                               id, (void *)&count, file);
-                       }
+               if ((!mlx5_counter_query(dev, j, false, &count.hits,
+               &count.bytes, &action)) && action) {
+                       id = (uint64_t)(uintptr_t)action;
+                       type = DR_DUMP_REC_TYPE_PMD_COUNTER;
+                       save_dump_file(NULL, 0, type,
+                                       id, (void *)&count, file);
                }
        }
        return 0;
@@ -8532,7 +8549,7 @@ mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev,
  *   Perform verbose error reporting if not NULL. PMDs initialize this
  *   structure in case of error only.
  * @return
- *   0 on success, a nagative value otherwise.
+ *   0 on success, a negative value otherwise.
  */
 int
 mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx,
@@ -8546,7 +8563,7 @@ mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx,
        struct mlx5_flow_handle *dh;
        struct rte_flow *flow;
 
-       if (!priv->config.dv_flow_en) {
+       if (!sh->config.dv_flow_en) {
                if (fputs("device dv flow disabled\n", file) <= 0)
                        return -errno;
                return -ENOTSUP;
@@ -9010,7 +9027,7 @@ mlx5_get_tof(const struct rte_flow_item *item,
 }
 
 /**
- * tunnel offload functionalilty is defined for DV environment only
+ * tunnel offload functionality is defined for DV environment only
  */
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
 __extension__
@@ -9530,7 +9547,7 @@ mlx5_flow_tunnel_validate(struct rte_eth_dev *dev,
 {
        struct mlx5_priv *priv = dev->data->dev_private;
 
-       if (!priv->config.dv_flow_en)
+       if (!priv->sh->config.dv_flow_en)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
                                          "flow DV interface is off");
@@ -9949,7 +9966,7 @@ mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
        type = mlx5_flow_os_get_type();
        if (type == MLX5_FLOW_TYPE_MAX) {
                type = MLX5_FLOW_TYPE_VERBS;
-               if (priv->sh->devx && priv->config.dv_flow_en)
+               if (priv->sh->cdev->config.devx && priv->sh->config.dv_flow_en)
                        type = MLX5_FLOW_TYPE_DV;
        }
        fops = flow_get_drv_ops(type);