net/cnxk: support time read/write/adjust
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
index 6e2a3e8..62edc4f 100644 (file)
@@ -268,6 +268,31 @@ struct field_modify_info modify_tcp[] = {
        {0, 0, 0},
 };
 
+static const struct rte_flow_item *
+mlx5_flow_find_tunnel_item(const struct rte_flow_item *item)
+{
+       for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+               switch (item->type) {
+               default:
+                       break;
+               case RTE_FLOW_ITEM_TYPE_VXLAN:
+               case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+               case RTE_FLOW_ITEM_TYPE_GRE:
+               case RTE_FLOW_ITEM_TYPE_MPLS:
+               case RTE_FLOW_ITEM_TYPE_NVGRE:
+               case RTE_FLOW_ITEM_TYPE_GENEVE:
+                       return item;
+               case RTE_FLOW_ITEM_TYPE_IPV4:
+               case RTE_FLOW_ITEM_TYPE_IPV6:
+                       if (item[1].type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+                           item[1].type == RTE_FLOW_ITEM_TYPE_IPV6)
+                               return item;
+                       break;
+               }
+       }
+       return NULL;
+}
+
 static void
 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
                          uint8_t next_protocol, uint64_t *item_flags,
@@ -401,6 +426,8 @@ flow_dv_convert_modify_action(struct rte_flow_item *item,
                unsigned int off_b;
                uint32_t mask;
                uint32_t data;
+               bool next_field = true;
+               bool next_dcopy = true;
 
                if (i >= MLX5_MAX_MODIFY_NUM)
                        return rte_flow_error_set(error, EINVAL,
@@ -418,15 +445,13 @@ flow_dv_convert_modify_action(struct rte_flow_item *item,
                size_b = sizeof(uint32_t) * CHAR_BIT -
                         off_b - __builtin_clz(mask);
                MLX5_ASSERT(size_b);
-               size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
                actions[i] = (struct mlx5_modification_cmd) {
                        .action_type = type,
                        .field = field->id,
                        .offset = off_b,
-                       .length = size_b,
+                       .length = (size_b == sizeof(uint32_t) * CHAR_BIT) ?
+                               0 : size_b,
                };
-               /* Convert entire record to expected big-endian format. */
-               actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
                if (type == MLX5_MODIFICATION_TYPE_COPY) {
                        MLX5_ASSERT(dcopy);
                        actions[i].dst_field = dcopy->id;
@@ -434,7 +459,27 @@ flow_dv_convert_modify_action(struct rte_flow_item *item,
                                (int)dcopy->offset < 0 ? off_b : dcopy->offset;
                        /* Convert entire record to big-endian format. */
                        actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
-                       ++dcopy;
+                       /*
+                        * Destination field overflow. Copy leftovers of
+                        * a source field to the next destination field.
+                        */
+                       if ((size_b > dcopy->size * CHAR_BIT) && dcopy->size) {
+                               actions[i].length = dcopy->size * CHAR_BIT;
+                               field->offset += dcopy->size;
+                               next_field = false;
+                       }
+                       /*
+                        * Not enough bits in a source filed to fill a
+                        * destination field. Switch to the next source.
+                        */
+                       if (dcopy->size > field->size &&
+                           (size_b == field->size * CHAR_BIT)) {
+                               actions[i].length = field->size * CHAR_BIT;
+                               dcopy->offset += field->size * CHAR_BIT;
+                               next_dcopy = false;
+                       }
+                       if (next_dcopy)
+                               ++dcopy;
                } else {
                        MLX5_ASSERT(item->spec);
                        data = flow_dv_fetch_field((const uint8_t *)item->spec +
@@ -443,8 +488,11 @@ flow_dv_convert_modify_action(struct rte_flow_item *item,
                        data = (data & mask) >> off_b;
                        actions[i].data1 = rte_cpu_to_be_32(data);
                }
+               /* Convert entire record to expected big-endian format. */
+               actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
+               if (next_field)
+                       ++field;
                ++i;
-               ++field;
        } while (field->size);
        if (resource->actions_num == i)
                return rte_flow_error_set(error, EINVAL,
@@ -1214,8 +1262,8 @@ flow_dv_convert_action_set_meta
                         const struct rte_flow_action_set_meta *conf,
                         struct rte_flow_error *error)
 {
-       uint32_t data = conf->data;
-       uint32_t mask = conf->mask;
+       uint32_t mask = rte_cpu_to_be_32(conf->mask);
+       uint32_t data = rte_cpu_to_be_32(conf->data) & mask;
        struct rte_flow_item item = {
                .spec = &data,
                .mask = &mask,
@@ -1228,25 +1276,14 @@ flow_dv_convert_action_set_meta
        if (reg < 0)
                return reg;
        MLX5_ASSERT(reg != REG_NON);
-       /*
-        * In datapath code there is no endianness
-        * coversions for perfromance reasons, all
-        * pattern conversions are done in rte_flow.
-        */
        if (reg == REG_C_0) {
                struct mlx5_priv *priv = dev->data->dev_private;
                uint32_t msk_c0 = priv->sh->dv_regc0_mask;
-               uint32_t shl_c0;
+               uint32_t shl_c0 = rte_bsf32(msk_c0);
 
-               MLX5_ASSERT(msk_c0);
-#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
-               shl_c0 = rte_bsf32(msk_c0);
-#else
-               shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
-#endif
-               mask <<= shl_c0;
-               data <<= shl_c0;
-               MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
+               data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
+               mask = rte_cpu_to_be_32(mask) & msk_c0;
+               mask = rte_cpu_to_be_32(mask << shl_c0);
        }
        reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
        /* The routine expects parameters in memory as big-endian ones. */
@@ -1331,7 +1368,8 @@ flow_dv_convert_action_modify_ipv6_dscp
 }
 
 static int
-mlx5_flow_item_field_width(enum rte_flow_field_id field)
+mlx5_flow_item_field_width(struct mlx5_dev_config *config,
+                          enum rte_flow_field_id field)
 {
        switch (field) {
        case RTE_FLOW_FIELD_START:
@@ -1366,7 +1404,7 @@ mlx5_flow_item_field_width(enum rte_flow_field_id field)
        case RTE_FLOW_FIELD_TCP_ACK_NUM:
                return 32;
        case RTE_FLOW_FIELD_TCP_FLAGS:
-               return 6;
+               return 9;
        case RTE_FLOW_FIELD_UDP_PORT_SRC:
        case RTE_FLOW_FIELD_UDP_PORT_DST:
                return 16;
@@ -1379,7 +1417,12 @@ mlx5_flow_item_field_width(enum rte_flow_field_id field)
        case RTE_FLOW_FIELD_MARK:
                return 24;
        case RTE_FLOW_FIELD_META:
-               return 32;
+               if (config->dv_xmeta_en == MLX5_XMETA_MODE_META16)
+                       return 16;
+               else if (config->dv_xmeta_en == MLX5_XMETA_MODE_META32)
+                       return 32;
+               else
+                       return 0;
        case RTE_FLOW_FIELD_POINTER:
        case RTE_FLOW_FIELD_VALUE:
                return 64;
@@ -1399,7 +1442,10 @@ mlx5_flow_field_id_to_modify_info
                 const struct rte_flow_attr *attr,
                 struct rte_flow_error *error)
 {
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_dev_config *config = &priv->config;
        uint32_t idx = 0;
+       uint32_t off = 0;
        uint64_t val = 0;
        switch (data->field) {
        case RTE_FLOW_FIELD_START:
@@ -1407,61 +1453,63 @@ mlx5_flow_field_id_to_modify_info
                MLX5_ASSERT(false);
                break;
        case RTE_FLOW_FIELD_MAC_DST:
+               off = data->offset > 16 ? data->offset - 16 : 0;
                if (mask) {
-                       if (data->offset < 32) {
-                               info[idx] = (struct field_modify_info){4, 0,
-                                               MLX5_MODI_OUT_DMAC_47_16};
-                               if (width < 32) {
-                                       mask[idx] =
-                                               rte_cpu_to_be_32(0xffffffff >>
-                                                                (32 - width));
+                       if (data->offset < 16) {
+                               info[idx] = (struct field_modify_info){2, 0,
+                                               MLX5_MODI_OUT_DMAC_15_0};
+                               if (width < 16) {
+                                       mask[idx] = rte_cpu_to_be_16(0xffff >>
+                                                                (16 - width));
                                        width = 0;
                                } else {
-                                       mask[idx] = RTE_BE32(0xffffffff);
-                                       width -= 32;
+                                       mask[idx] = RTE_BE16(0xffff);
+                                       width -= 16;
                                }
                                if (!width)
                                        break;
                                ++idx;
                        }
-                       info[idx] = (struct field_modify_info){2, 4 * idx,
-                                               MLX5_MODI_OUT_DMAC_15_0};
-                       mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
-               } else {
-                       if (data->offset < 32)
-                               info[idx++] = (struct field_modify_info){4, 0,
+                       info[idx] = (struct field_modify_info){4, 4 * idx,
                                                MLX5_MODI_OUT_DMAC_47_16};
-                       info[idx] = (struct field_modify_info){2, 0,
+                       mask[idx] = rte_cpu_to_be_32((0xffffffff >>
+                                                     (32 - width)) << off);
+               } else {
+                       if (data->offset < 16)
+                               info[idx++] = (struct field_modify_info){2, 0,
                                                MLX5_MODI_OUT_DMAC_15_0};
+                       info[idx] = (struct field_modify_info){4, off,
+                                               MLX5_MODI_OUT_DMAC_47_16};
                }
                break;
        case RTE_FLOW_FIELD_MAC_SRC:
+               off = data->offset > 16 ? data->offset - 16 : 0;
                if (mask) {
-                       if (data->offset < 32) {
-                               info[idx] = (struct field_modify_info){4, 0,
-                                               MLX5_MODI_OUT_SMAC_47_16};
-                               if (width < 32) {
-                                       mask[idx] =
-                                               rte_cpu_to_be_32(0xffffffff >>
-                                                               (32 - width));
+                       if (data->offset < 16) {
+                               info[idx] = (struct field_modify_info){2, 0,
+                                               MLX5_MODI_OUT_SMAC_15_0};
+                               if (width < 16) {
+                                       mask[idx] = rte_cpu_to_be_16(0xffff >>
+                                                                (16 - width));
                                        width = 0;
                                } else {
-                                       mask[idx] = RTE_BE32(0xffffffff);
-                                       width -= 32;
+                                       mask[idx] = RTE_BE16(0xffff);
+                                       width -= 16;
                                }
                                if (!width)
                                        break;
                                ++idx;
                        }
-                       info[idx] = (struct field_modify_info){2, 4 * idx,
-                                               MLX5_MODI_OUT_SMAC_15_0};
-                       mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
-               } else {
-                       if (data->offset < 32)
-                               info[idx++] = (struct field_modify_info){4, 0,
+                       info[idx] = (struct field_modify_info){4, 4 * idx,
                                                MLX5_MODI_OUT_SMAC_47_16};
-                       info[idx] = (struct field_modify_info){2, 0,
+                       mask[idx] = rte_cpu_to_be_32((0xffffffff >>
+                                                     (32 - width)) << off);
+               } else {
+                       if (data->offset < 16)
+                               info[idx++] = (struct field_modify_info){2, 0,
                                                MLX5_MODI_OUT_SMAC_15_0};
+                       info[idx] = (struct field_modify_info){4, off,
+                                               MLX5_MODI_OUT_SMAC_47_16};
                }
                break;
        case RTE_FLOW_FIELD_VLAN_TYPE:
@@ -1688,10 +1736,10 @@ mlx5_flow_field_id_to_modify_info
                                                     (32 - width));
                break;
        case RTE_FLOW_FIELD_TCP_FLAGS:
-               info[idx] = (struct field_modify_info){1, 0,
+               info[idx] = (struct field_modify_info){2, 0,
                                        MLX5_MODI_OUT_TCP_FLAGS};
                if (mask)
-                       mask[idx] = 0x3f >> (6 - width);
+                       mask[idx] = rte_cpu_to_be_16(0x1ff >> (9 - width));
                break;
        case RTE_FLOW_FIELD_UDP_PORT_SRC:
                info[idx] = (struct field_modify_info){2, 0,
@@ -1752,17 +1800,28 @@ mlx5_flow_field_id_to_modify_info
                break;
        case RTE_FLOW_FIELD_META:
                {
+                       unsigned int xmeta = config->dv_xmeta_en;
                        int reg = flow_dv_get_metadata_reg(dev, attr, error);
                        if (reg < 0)
                                return;
                        MLX5_ASSERT(reg != REG_NON);
                        MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
-                       info[idx] = (struct field_modify_info){4, 0,
-                                               reg_to_field[reg]};
-                       if (mask)
-                               mask[idx] =
-                                       rte_cpu_to_be_32(0xffffffff >>
-                                                        (32 - width));
+                       if (xmeta == MLX5_XMETA_MODE_META16) {
+                               info[idx] = (struct field_modify_info){2, 0,
+                                                       reg_to_field[reg]};
+                               if (mask)
+                                       mask[idx] = rte_cpu_to_be_16(0xffff >>
+                                                               (16 - width));
+                       } else if (xmeta == MLX5_XMETA_MODE_META32) {
+                               info[idx] = (struct field_modify_info){4, 0,
+                                                       reg_to_field[reg]};
+                               if (mask)
+                                       mask[idx] =
+                                               rte_cpu_to_be_32(0xffffffff >>
+                                                               (32 - width));
+                       } else {
+                               MLX5_ASSERT(false);
+                       }
                }
                break;
        case RTE_FLOW_FIELD_POINTER:
@@ -1774,7 +1833,12 @@ mlx5_flow_field_id_to_modify_info
                        val = data->value;
                for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) {
                        if (mask[idx]) {
-                               if (dst_width > 16) {
+                               if (dst_width == 48) {
+                                       /*special case for MAC addresses */
+                                       value[idx] = rte_cpu_to_be_16(val);
+                                       val >>= 16;
+                                       dst_width -= 16;
+                               } else if (dst_width > 16) {
                                        value[idx] = rte_cpu_to_be_32(val);
                                        val >>= 32;
                                } else if (dst_width > 8) {
@@ -1820,6 +1884,8 @@ flow_dv_convert_action_modify_field
                         const struct rte_flow_attr *attr,
                         struct rte_flow_error *error)
 {
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_dev_config *config = &priv->config;
        const struct rte_flow_action_modify_field *conf =
                (const struct rte_flow_action_modify_field *)(action->conf);
        struct rte_flow_item item;
@@ -1830,7 +1896,8 @@ flow_dv_convert_action_modify_field
        uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
        uint32_t value[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
        uint32_t type;
-       uint32_t dst_width = mlx5_flow_item_field_width(conf->dst.field);
+       uint32_t dst_width = mlx5_flow_item_field_width(config,
+                                                       conf->dst.field);
 
        if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
                conf->src.field == RTE_FLOW_FIELD_VALUE) {
@@ -2598,6 +2665,51 @@ flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
                                  "specified range not supported");
 }
 
+/*
+ * Validate ASO CT item.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] item
+ *   Item specification.
+ * @param[in] item_flags
+ *   Pointer to bit-fields that holds the items detected until now.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev,
+                            const struct rte_flow_item *item,
+                            uint64_t *item_flags,
+                            struct rte_flow_error *error)
+{
+       const struct rte_flow_item_conntrack *spec = item->spec;
+       const struct rte_flow_item_conntrack *mask = item->mask;
+       RTE_SET_USED(dev);
+       uint32_t flags;
+
+       if (*item_flags & MLX5_FLOW_LAYER_ASO_CT)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+                                         "Only one CT is supported");
+       if (!mask)
+               mask = &rte_flow_item_conntrack_mask;
+       flags = spec->flags & mask->flags;
+       if ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID) &&
+           ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID) ||
+            (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD) ||
+            (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+                                         "Conflict status bits");
+       /* State change also needs to be considered. */
+       *item_flags |= MLX5_FLOW_LAYER_ASO_CT;
+       return 0;
+}
+
 /**
  * Validate the pop VLAN action.
  *
@@ -3146,13 +3258,33 @@ flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
        return 0;
 }
 
+/**
+ * Check if action counter is shared by either old or new mechanism.
+ *
+ * @param[in] action
+ *   Pointer to the action structure.
+ *
+ * @return
+ *   True when counter is shared, false otherwise.
+ */
+static inline bool
+is_shared_action_count(const struct rte_flow_action *action)
+{
+       const struct rte_flow_action_count *count =
+                       (const struct rte_flow_action_count *)action->conf;
+
+       if ((int)action->type == MLX5_RTE_FLOW_ACTION_TYPE_COUNT)
+               return true;
+       return !!(count && count->shared);
+}
+
 /**
  * Validate count action.
  *
  * @param[in] dev
  *   Pointer to rte_eth_dev structure.
- * @param[in] action
- *   Pointer to the action structure.
+ * @param[in] shared
+ *   Indicator if action is shared.
  * @param[in] action_flags
  *   Holds the actions detected until now.
  * @param[out] error
@@ -3162,13 +3294,11 @@ flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
-flow_dv_validate_action_count(struct rte_eth_dev *dev,
-                             const struct rte_flow_action *action,
+flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,
                              uint64_t action_flags,
                              struct rte_flow_error *error)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       const struct rte_flow_action_count *count;
 
        if (!priv->config.devx)
                goto notsup_err;
@@ -3176,8 +3306,7 @@ flow_dv_validate_action_count(struct rte_eth_dev *dev,
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
                                          "duplicate count actions set");
-       count = (const struct rte_flow_action_count *)action->conf;
-       if (count && count->shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
+       if (shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
            !priv->sh->flow_hit_aso_en)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
@@ -3400,6 +3529,57 @@ flow_dv_validate_action_raw_encap_decap
        return 0;
 }
 
+/*
+ * Validate the ASO CT action.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] action_flags
+ *   Holds the actions detected until now.
+ * @param[in] item_flags
+ *   The items found in this flow rule.
+ * @param[in] attr
+ *   Pointer to flow attributes.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
+                              uint64_t action_flags,
+                              uint64_t item_flags,
+                              const struct rte_flow_attr *attr,
+                              struct rte_flow_error *error)
+{
+       RTE_SET_USED(dev);
+
+       if (attr->group == 0 && !attr->transfer)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                         NULL,
+                                         "Only support non-root table");
+       if (action_flags & MLX5_FLOW_FATE_ACTIONS)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+                                         "CT cannot follow a fate action");
+       if ((action_flags & MLX5_FLOW_ACTION_METER) ||
+           (action_flags & MLX5_FLOW_ACTION_AGE))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+                                         "Only one ASO action is supported");
+       if (action_flags & MLX5_FLOW_ACTION_ENCAP)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+                                         "Encap cannot exist before CT");
+       if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                                         "Not a outer TCP packet");
+       return 0;
+}
+
 /**
  * Match encap_decap resource.
  *
@@ -4572,10 +4752,10 @@ flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
        struct mlx5_dev_config *config = &priv->config;
        const struct rte_flow_action_modify_field *action_modify_field =
                action->conf;
-       uint32_t dst_width =
-               mlx5_flow_item_field_width(action_modify_field->dst.field);
-       uint32_t src_width =
-               mlx5_flow_item_field_width(action_modify_field->src.field);
+       uint32_t dst_width = mlx5_flow_item_field_width(config,
+                               action_modify_field->dst.field);
+       uint32_t src_width = mlx5_flow_item_field_width(config,
+                               action_modify_field->src.field);
 
        ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
        if (ret)
@@ -4628,8 +4808,10 @@ flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
                                        "inner header fields modification"
                                        " is not supported");
        }
-       if (action_modify_field->dst.field ==
-           action_modify_field->src.field)
+       if ((action_modify_field->dst.field ==
+            action_modify_field->src.field) &&
+           (action_modify_field->dst.level ==
+            action_modify_field->src.level))
                return rte_flow_error_set(error, EINVAL,
                                RTE_FLOW_ERROR_TYPE_ACTION, action,
                                "source and destination fields"
@@ -5254,7 +5436,7 @@ flow_dv_validate_action_sample(uint64_t *action_flags,
                        break;
                case RTE_FLOW_ACTION_TYPE_COUNT:
                        ret = flow_dv_validate_action_count
-                               (dev, act,
+                               (dev, is_shared_action_count(act),
                                 *action_flags | sub_action_flags,
                                 error);
                        if (ret < 0)
@@ -5424,7 +5606,7 @@ flow_dv_modify_hdr_resource_register
  * @param[in] idx
  *   mlx5 flow counter index in the container.
  * @param[out] ppool
- *   mlx5 flow counter pool in the container,
+ *   mlx5 flow counter pool in the container.
  *
  * @return
  *   Pointer to the counter, NULL otherwise.
@@ -5554,7 +5736,7 @@ flow_dv_container_resize(struct rte_eth_dev *dev)
  *
  * @param[in] dev
  *   Pointer to the Ethernet device structure.
- * @param[in] cnt
+ * @param[in] counter
  *   Index to the flow counter.
  * @param[out] pkts
  *   The statistics value of packets.
@@ -5795,6 +5977,13 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
        if (!fallback && !priv->sh->cmng.query_thread_on)
                /* Start the asynchronous batch query by the host thread. */
                mlx5_set_query_alarm(priv->sh);
+       /*
+        * When the count action isn't shared (by ID), shared_info field is
+        * used for indirect action API's refcnt.
+        * When the counter action is not shared neither by ID nor by indirect
+        * action API, shared info must be 1.
+        */
+       cnt_free->shared_info.refcnt = 1;
        return cnt_idx;
 err:
        if (cnt_free) {
@@ -5941,11 +6130,33 @@ flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
                return;
        cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
        MLX5_ASSERT(pool);
-       if (IS_SHARED_CNT(counter) &&
-           mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id))
-               return;
-       if (pool->is_aged)
+       if (pool->is_aged) {
                flow_dv_counter_remove_from_age(dev, counter, cnt);
+       } else {
+               /*
+                * If the counter action is shared by ID, the l3t_clear_entry
+                * function reduces its references counter. If after the
+                * reduction the action is still referenced, the function
+                * returns here and does not release it.
+                */
+               if (IS_LEGACY_SHARED_CNT(counter) &&
+                   mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl,
+                                        cnt->shared_info.id))
+                       return;
+               /*
+                * If the counter action is shared by indirect action API,
+                * the atomic function reduces its references counter.
+                * If after the reduction the action is still referenced, the
+                * function returns here and does not release it.
+                * When the counter action is not shared neither by ID nor by
+                * indirect action API, shared info is 1 before the reduction,
+                * so this condition is failed and function doesn't return here.
+                */
+               if (!IS_LEGACY_SHARED_CNT(counter) &&
+                   __atomic_sub_fetch(&cnt->shared_info.refcnt, 1,
+                                      __ATOMIC_RELAXED))
+                       return;
+       }
        cnt->pool = pool;
        /*
         * Put the counter back to list to be updated in none fallback mode.
@@ -5955,7 +6166,6 @@ flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
         * container counter list. The list changes while query starts. In
         * this case, lock will not be needed as query callback and release
         * function both operate with the different list.
-        *
         */
        if (!priv->sh->cmng.counter_fallback) {
                rte_spinlock_lock(&pool->csl);
@@ -6230,6 +6440,158 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev,
        return ret;
 }
 
+static uint16_t
+mlx5_flow_locate_proto_l3(const struct rte_flow_item **head,
+                         const struct rte_flow_item *end)
+{
+       const struct rte_flow_item *item = *head;
+       uint16_t l3_protocol;
+
+       for (; item != end; item++) {
+               switch (item->type) {
+               default:
+                       break;
+               case RTE_FLOW_ITEM_TYPE_IPV4:
+                       l3_protocol = RTE_ETHER_TYPE_IPV4;
+                       goto l3_ok;
+               case RTE_FLOW_ITEM_TYPE_IPV6:
+                       l3_protocol = RTE_ETHER_TYPE_IPV6;
+                       goto l3_ok;
+               case RTE_FLOW_ITEM_TYPE_ETH:
+                       if (item->mask && item->spec) {
+                               MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_eth,
+                                                           type, item,
+                                                           l3_protocol);
+                               if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
+                                   l3_protocol == RTE_ETHER_TYPE_IPV6)
+                                       goto l3_ok;
+                       }
+                       break;
+               case RTE_FLOW_ITEM_TYPE_VLAN:
+                       if (item->mask && item->spec) {
+                               MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_vlan,
+                                                           inner_type, item,
+                                                           l3_protocol);
+                               if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
+                                   l3_protocol == RTE_ETHER_TYPE_IPV6)
+                                       goto l3_ok;
+                       }
+                       break;
+               }
+       }
+       return 0;
+l3_ok:
+       *head = item;
+       return l3_protocol;
+}
+
+static uint8_t
+mlx5_flow_locate_proto_l4(const struct rte_flow_item **head,
+                         const struct rte_flow_item *end)
+{
+       const struct rte_flow_item *item = *head;
+       uint8_t l4_protocol;
+
+       for (; item != end; item++) {
+               switch (item->type) {
+               default:
+                       break;
+               case RTE_FLOW_ITEM_TYPE_TCP:
+                       l4_protocol = IPPROTO_TCP;
+                       goto l4_ok;
+               case RTE_FLOW_ITEM_TYPE_UDP:
+                       l4_protocol = IPPROTO_UDP;
+                       goto l4_ok;
+               case RTE_FLOW_ITEM_TYPE_IPV4:
+                       if (item->mask && item->spec) {
+                               const struct rte_flow_item_ipv4 *mask, *spec;
+
+                               mask = (typeof(mask))item->mask;
+                               spec = (typeof(spec))item->spec;
+                               l4_protocol = mask->hdr.next_proto_id &
+                                             spec->hdr.next_proto_id;
+                               if (l4_protocol == IPPROTO_TCP ||
+                                   l4_protocol == IPPROTO_UDP)
+                                       goto l4_ok;
+                       }
+                       break;
+               case RTE_FLOW_ITEM_TYPE_IPV6:
+                       if (item->mask && item->spec) {
+                               const struct rte_flow_item_ipv6 *mask, *spec;
+                               mask = (typeof(mask))item->mask;
+                               spec = (typeof(spec))item->spec;
+                               l4_protocol = mask->hdr.proto & spec->hdr.proto;
+                               if (l4_protocol == IPPROTO_TCP ||
+                                   l4_protocol == IPPROTO_UDP)
+                                       goto l4_ok;
+                       }
+                       break;
+               }
+       }
+       return 0;
+l4_ok:
+       *head = item;
+       return l4_protocol;
+}
+
+static int
+flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
+                               const struct rte_flow_item *rule_items,
+                               const struct rte_flow_item *integrity_item,
+                               struct rte_flow_error *error)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       const struct rte_flow_item *tunnel_item, *end_item, *item = rule_items;
+       const struct rte_flow_item_integrity *mask = (typeof(mask))
+                                                    integrity_item->mask;
+       const struct rte_flow_item_integrity *spec = (typeof(spec))
+                                                    integrity_item->spec;
+       uint32_t protocol;
+
+       if (!priv->config.hca_attr.pkt_integrity_match)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM,
+                                         integrity_item,
+                                         "packet integrity integrity_item not supported");
+       if (!mask)
+               mask = &rte_flow_item_integrity_mask;
+       if (!mlx5_validate_integrity_item(mask))
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM,
+                                         integrity_item,
+                                         "unsupported integrity filter");
+       tunnel_item = mlx5_flow_find_tunnel_item(rule_items);
+       if (spec->level > 1) {
+               if (!tunnel_item)
+                       return rte_flow_error_set(error, ENOTSUP,
+                                                 RTE_FLOW_ERROR_TYPE_ITEM,
+                                                 integrity_item,
+                                                 "missing tunnel item");
+               item = tunnel_item;
+               end_item = mlx5_find_end_item(tunnel_item);
+       } else {
+               end_item = tunnel_item ? tunnel_item :
+                          mlx5_find_end_item(integrity_item);
+       }
+       if (mask->l3_ok || mask->ipv4_csum_ok) {
+               protocol = mlx5_flow_locate_proto_l3(&item, end_item);
+               if (!protocol)
+                       return rte_flow_error_set(error, EINVAL,
+                                                 RTE_FLOW_ERROR_TYPE_ITEM,
+                                                 integrity_item,
+                                                 "missing L3 protocol");
+       }
+       if (mask->l4_ok || mask->l4_csum_ok) {
+               protocol = mlx5_flow_locate_proto_l4(&item, end_item);
+               if (!protocol)
+                       return rte_flow_error_set(error, EINVAL,
+                                                 RTE_FLOW_ERROR_TYPE_ITEM,
+                                                 integrity_item,
+                                                 "missing L4 protocol");
+       }
+       return 0;
+}
+
 /**
  * Internal validation function. For validating both actions and items.
  *
@@ -6274,7 +6636,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
        const struct rte_flow_action_raw_encap *encap;
        const struct rte_flow_action_rss *rss = NULL;
        const struct rte_flow_action_rss *sample_rss = NULL;
-       const struct rte_flow_action_count *count = NULL;
        const struct rte_flow_action_count *sample_count = NULL;
        const struct rte_flow_item_tcp nic_tcp_mask = {
                .hdr = {
@@ -6315,33 +6676,35 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
        uint32_t rw_act_num = 0;
        uint64_t is_root;
        const struct mlx5_flow_tunnel *tunnel;
+       enum mlx5_tof_rule_type tof_rule_type;
        struct flow_grp_info grp_info = {
                .external = !!external,
                .transfer = !!attr->transfer,
                .fdb_def_rule = !!priv->fdb_def_rule,
+               .std_tbl_fix = true,
        };
        const struct rte_eth_hairpin_conf *conf;
+       const struct rte_flow_item *rule_items = items;
        bool def_policy = false;
 
        if (items == NULL)
                return -1;
-       if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
-               tunnel = flow_items_to_tunnel(items);
-               action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
-                               MLX5_FLOW_ACTION_DECAP;
-       } else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) {
-               tunnel = flow_actions_to_tunnel(actions);
-               action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
-       } else {
-               tunnel = NULL;
+       tunnel = is_tunnel_offload_active(dev) ?
+                mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
+       if (tunnel) {
+               if (priv->representor)
+                       return rte_flow_error_set
+                               (error, ENOTSUP,
+                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                NULL, "decap not supported for VF representor");
+               if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE)
+                       action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
+               else if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE)
+                       action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
+                                       MLX5_FLOW_ACTION_DECAP;
+               grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
+                                       (dev, attr, tunnel, tof_rule_type);
        }
-       if (tunnel && priv->representor)
-               return rte_flow_error_set(error, ENOTSUP,
-                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-                                         "decap not supported "
-                                         "for VF representor");
-       grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
-                               (dev, tunnel, attr, items, actions);
        ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
        if (ret < 0)
                return ret;
@@ -6355,15 +6718,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                                                  RTE_FLOW_ERROR_TYPE_ITEM,
                                                  NULL, "item not supported");
                switch (type) {
-               case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
-                       if (items[0].type != (typeof(items[0].type))
-                                               MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL)
-                               return rte_flow_error_set
-                                               (error, EINVAL,
-                                               RTE_FLOW_ERROR_TYPE_ITEM,
-                                               NULL, "MLX5 private items "
-                                               "must be the first");
-                       break;
                case RTE_FLOW_ITEM_TYPE_VOID:
                        break;
                case RTE_FLOW_ITEM_TYPE_PORT_ID:
@@ -6644,6 +6998,29 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                                return ret;
                        last_item = MLX5_FLOW_LAYER_ECPRI;
                        break;
+               case RTE_FLOW_ITEM_TYPE_INTEGRITY:
+                       if (item_flags & MLX5_FLOW_ITEM_INTEGRITY)
+                               return rte_flow_error_set
+                                       (error, ENOTSUP,
+                                        RTE_FLOW_ERROR_TYPE_ITEM,
+                                        NULL, "multiple integrity items not supported");
+                       ret = flow_dv_validate_item_integrity(dev, rule_items,
+                                                             items, error);
+                       if (ret < 0)
+                               return ret;
+                       last_item = MLX5_FLOW_ITEM_INTEGRITY;
+                       break;
+               case RTE_FLOW_ITEM_TYPE_CONNTRACK:
+                       ret = flow_dv_validate_item_aso_ct(dev, items,
+                                                          &item_flags, error);
+                       if (ret < 0)
+                               return ret;
+                       break;
+               case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
+                       /* tunnel offload item was processed before
+                        * list it here as a supported type
+                        */
+                       break;
                default:
                        return rte_flow_error_set(error, ENOTSUP,
                                                  RTE_FLOW_ERROR_TYPE_ITEM,
@@ -6653,6 +7030,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
        }
        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
                int type = actions->type;
+               bool shared_count = false;
 
                if (!mlx5_flow_os_action_supported(type))
                        return rte_flow_error_set(error, ENOTSUP,
@@ -6802,13 +7180,14 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                        action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
                        ++actions_n;
                        break;
+               case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
                case RTE_FLOW_ACTION_TYPE_COUNT:
-                       ret = flow_dv_validate_action_count(dev, actions,
+                       shared_count = is_shared_action_count(actions);
+                       ret = flow_dv_validate_action_count(dev, shared_count,
                                                            action_flags,
                                                            error);
                        if (ret < 0)
                                return ret;
-                       count = actions->conf;
                        action_flags |= MLX5_FLOW_ACTION_COUNT;
                        ++actions_n;
                        break;
@@ -7102,6 +7481,12 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
                                                                           NULL,
                          "Shared ASO age action is not supported for group 0");
+                       if (action_flags & MLX5_FLOW_ACTION_AGE)
+                               return rte_flow_error_set
+                                                 (error, EINVAL,
+                                                  RTE_FLOW_ERROR_TYPE_ACTION,
+                                                  NULL,
+                                                  "duplicate age actions set");
                        action_flags |= MLX5_FLOW_ACTION_AGE;
                        ++actions_n;
                        break;
@@ -7116,7 +7501,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                         * mutual exclusion with share counter actions.
                         */
                        if (!priv->sh->flow_hit_aso_en) {
-                               if (count && count->shared)
+                               if (shared_count)
                                        return rte_flow_error_set
                                                (error, EINVAL,
                                                RTE_FLOW_ERROR_TYPE_ACTION,
@@ -7177,17 +7562,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                        action_flags |= MLX5_FLOW_ACTION_SAMPLE;
                        ++actions_n;
                        break;
-               case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
-                       if (actions[0].type != (typeof(actions[0].type))
-                               MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET)
-                               return rte_flow_error_set
-                                               (error, EINVAL,
-                                               RTE_FLOW_ERROR_TYPE_ACTION,
-                                               NULL, "MLX5 private action "
-                                               "must be the first");
-
-                       action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
-                       break;
                case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
                        ret = flow_dv_validate_action_modify_field(dev,
                                                                   action_flags,
@@ -7204,6 +7578,19 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                        action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
                        rw_act_num += ret;
                        break;
+               case RTE_FLOW_ACTION_TYPE_CONNTRACK:
+                       ret = flow_dv_validate_action_aso_ct(dev, action_flags,
+                                                            item_flags, attr,
+                                                            error);
+                       if (ret < 0)
+                               return ret;
+                       action_flags |= MLX5_FLOW_ACTION_CT;
+                       break;
+               case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
+                       /* tunnel offload action was processed before
+                        * list it here as a supported type
+                        */
+                       break;
                default:
                        return rte_flow_error_set(error, ENOTSUP,
                                                  RTE_FLOW_ERROR_TYPE_ACTION,
@@ -7439,6 +7826,7 @@ flow_dv_prepare(struct rte_eth_dev *dev,
        struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
 
        MLX5_ASSERT(wks);
+       wks->skip_matcher_reg = 0;
        /* In case of corrupting the memory. */
        if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
                rte_flow_error_set(error, ENOSPC,
@@ -8856,27 +9244,14 @@ flow_dv_translate_item_meta(struct rte_eth_dev *dev,
                if (reg < 0)
                        return;
                MLX5_ASSERT(reg != REG_NON);
-               /*
-                * In datapath code there is no endianness
-                * coversions for perfromance reasons, all
-                * pattern conversions are done in rte_flow.
-                */
-               value = rte_cpu_to_be_32(value);
-               mask = rte_cpu_to_be_32(mask);
                if (reg == REG_C_0) {
                        struct mlx5_priv *priv = dev->data->dev_private;
                        uint32_t msk_c0 = priv->sh->dv_regc0_mask;
                        uint32_t shl_c0 = rte_bsf32(msk_c0);
-#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
-                       uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
 
-                       value >>= shr_c0;
-                       mask >>= shr_c0;
-#endif
-                       value <<= shl_c0;
+                       mask &= msk_c0;
                        mask <<= shl_c0;
-                       MLX5_ASSERT(msk_c0);
-                       MLX5_ASSERT(!(~msk_c0 & mask));
+                       value <<= shl_c0;
                }
                flow_dv_match_meta_reg(matcher, key, reg, value, mask);
        }
@@ -9377,6 +9752,64 @@ flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
        }
 }
 
+/*
+ * Add connection tracking status item to matcher
+ *
+ * @param[in] dev
+ *   The devich to configure through.
+ * @param[in, out] matcher
+ *   Flow matcher.
+ * @param[in, out] key
+ *   Flow matcher value.
+ * @param[in] item
+ *   Flow pattern to translate.
+ */
+static void
+flow_dv_translate_item_aso_ct(struct rte_eth_dev *dev,
+                             void *matcher, void *key,
+                             const struct rte_flow_item *item)
+{
+       uint32_t reg_value = 0;
+       int reg_id;
+       /* 8LSB 0b 11/0000/11, middle 4 bits are reserved. */
+       uint32_t reg_mask = 0;
+       const struct rte_flow_item_conntrack *spec = item->spec;
+       const struct rte_flow_item_conntrack *mask = item->mask;
+       uint32_t flags;
+       struct rte_flow_error error;
+
+       if (!mask)
+               mask = &rte_flow_item_conntrack_mask;
+       if (!spec || !mask->flags)
+               return;
+       flags = spec->flags & mask->flags;
+       /* The conflict should be checked in the validation. */
+       if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID)
+               reg_value |= MLX5_CT_SYNDROME_VALID;
+       if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
+               reg_value |= MLX5_CT_SYNDROME_STATE_CHANGE;
+       if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID)
+               reg_value |= MLX5_CT_SYNDROME_INVALID;
+       if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)
+               reg_value |= MLX5_CT_SYNDROME_TRAP;
+       if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
+               reg_value |= MLX5_CT_SYNDROME_BAD_PACKET;
+       if (mask->flags & (RTE_FLOW_CONNTRACK_PKT_STATE_VALID |
+                          RTE_FLOW_CONNTRACK_PKT_STATE_INVALID |
+                          RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED))
+               reg_mask |= 0xc0;
+       if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
+               reg_mask |= MLX5_CT_SYNDROME_STATE_CHANGE;
+       if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
+               reg_mask |= MLX5_CT_SYNDROME_BAD_PACKET;
+       /* The REG_C_x value could be saved during startup. */
+       reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, &error);
+       if (reg_id == REG_NON)
+               return;
+       flow_dv_match_meta_reg(matcher, key, (enum modify_reg)reg_id,
+                              reg_value, reg_mask);
+}
+
 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
 
 #define HEADER_IS_ZERO(match_criteria, headers)                                     \
@@ -9931,6 +10364,8 @@ flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
  *
  * @param[in] dev
  *   Pointer to rte_eth_dev structure.
+ * @param[in] dev_flow
+ *   Pointer to the mlx5_flow.
  * @param[out] count
  *   Pointer to the counter action configuration.
  * @param[in] age
@@ -9954,7 +10389,7 @@ flow_dv_translate_create_counter(struct rte_eth_dev *dev,
                counter = flow_dv_counter_alloc(dev, !!age);
        if (!counter || age == NULL)
                return counter;
-       age_param  = flow_dv_counter_idx_get_age(dev, counter);
+       age_param = flow_dv_counter_idx_get_age(dev, counter);
        age_param->context = age->context ? age->context :
                (void *)(uintptr_t)(dev_flow->flow_idx);
        age_param->timeout = age->timeout;
@@ -11084,194 +11519,639 @@ flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
 }
 
 /**
- * Create a age action using ASO mechanism.
+ * Initialize flow ASO age parameters.
  *
  * @param[in] dev
  *   Pointer to rte_eth_dev structure.
- * @param[in] age
- *   Pointer to the aging action configuration.
- * @param[out] error
- *   Pointer to the error structure.
+ * @param[in] age_idx
+ *   Index of ASO age action.
+ * @param[in] context
+ *   Pointer to flow counter age context.
+ * @param[in] timeout
+ *   Aging timeout in seconds.
  *
- * @return
- *   Index to flow counter on success, 0 otherwise.
  */
-static uint32_t
-flow_dv_translate_create_aso_age(struct rte_eth_dev *dev,
-                                const struct rte_flow_action_age *age,
-                                struct rte_flow_error *error)
+static void
+flow_dv_aso_age_params_init(struct rte_eth_dev *dev,
+                           uint32_t age_idx,
+                           void *context,
+                           uint32_t timeout)
 {
-       uint32_t age_idx = 0;
        struct mlx5_aso_age_action *aso_age;
 
-       age_idx = flow_dv_aso_age_alloc(dev, error);
-       if (!age_idx)
-               return 0;
        aso_age = flow_aso_age_get_by_idx(dev, age_idx);
-       aso_age->age_params.context = age->context;
-       aso_age->age_params.timeout = age->timeout;
+       MLX5_ASSERT(aso_age);
+       aso_age->age_params.context = context;
+       aso_age->age_params.timeout = timeout;
        aso_age->age_params.port_id = dev->data->port_id;
        __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
                         __ATOMIC_RELAXED);
        __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
                         __ATOMIC_RELAXED);
-       return age_idx;
+}
+
+static void
+flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask,
+                              const struct rte_flow_item_integrity *value,
+                              void *headers_m, void *headers_v)
+{
+       if (mask->l4_ok) {
+               /* application l4_ok filter aggregates all hardware l4 filters
+                * therefore hw l4_checksum_ok must be implicitly added here.
+                */
+               struct rte_flow_item_integrity local_item;
+
+               local_item.l4_csum_ok = 1;
+               MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
+                        local_item.l4_csum_ok);
+               if (value->l4_ok) {
+                       /* application l4_ok = 1 matches sets both hw flags
+                        * l4_ok and l4_checksum_ok flags to 1.
+                        */
+                       MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+                                l4_checksum_ok, local_item.l4_csum_ok);
+                       MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok,
+                                mask->l4_ok);
+                       MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok,
+                                value->l4_ok);
+               } else {
+                       /* application l4_ok = 0 matches on hw flag
+                        * l4_checksum_ok = 0 only.
+                        */
+                       MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+                                l4_checksum_ok, 0);
+               }
+       } else if (mask->l4_csum_ok) {
+               MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
+                        mask->l4_csum_ok);
+               MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
+                        value->l4_csum_ok);
+       }
+}
+
+static void
+flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask,
+                              const struct rte_flow_item_integrity *value,
+                              void *headers_m, void *headers_v,
+                              bool is_ipv4)
+{
+       if (mask->l3_ok) {
+               /* application l3_ok filter aggregates all hardware l3 filters
+                * therefore hw ipv4_checksum_ok must be implicitly added here.
+                */
+               struct rte_flow_item_integrity local_item;
+
+               local_item.ipv4_csum_ok = !!is_ipv4;
+               MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
+                        local_item.ipv4_csum_ok);
+               if (value->l3_ok) {
+                       MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+                                ipv4_checksum_ok, local_item.ipv4_csum_ok);
+                       MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok,
+                                mask->l3_ok);
+                       MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok,
+                                value->l3_ok);
+               } else {
+                       MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+                                ipv4_checksum_ok, 0);
+               }
+       } else if (mask->ipv4_csum_ok) {
+               MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
+                        mask->ipv4_csum_ok);
+               MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
+                        value->ipv4_csum_ok);
+       }
+}
+
+static void
+flow_dv_translate_item_integrity(void *matcher, void *key,
+                                const struct rte_flow_item *head_item,
+                                const struct rte_flow_item *integrity_item)
+{
+       const struct rte_flow_item_integrity *mask = integrity_item->mask;
+       const struct rte_flow_item_integrity *value = integrity_item->spec;
+       const struct rte_flow_item *tunnel_item, *end_item, *item;
+       void *headers_m;
+       void *headers_v;
+       uint32_t l3_protocol;
+
+       if (!value)
+               return;
+       if (!mask)
+               mask = &rte_flow_item_integrity_mask;
+       if (value->level > 1) {
+               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+                                        inner_headers);
+               headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+       } else {
+               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+                                        outer_headers);
+               headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+       }
+       tunnel_item = mlx5_flow_find_tunnel_item(head_item);
+       if (value->level > 1) {
+               /* tunnel item was verified during the item validation */
+               item = tunnel_item;
+               end_item = mlx5_find_end_item(tunnel_item);
+       } else {
+               item = head_item;
+               end_item = tunnel_item ? tunnel_item :
+                          mlx5_find_end_item(integrity_item);
+       }
+       l3_protocol = mask->l3_ok ?
+                     mlx5_flow_locate_proto_l3(&item, end_item) : 0;
+       flow_dv_translate_integrity_l3(mask, value, headers_m, headers_v,
+                                      l3_protocol == RTE_ETHER_TYPE_IPV4);
+       flow_dv_translate_integrity_l4(mask, value, headers_m, headers_v);
 }
 
 /**
- * Fill the flow with DV spec, lock free
- * (mutex should be acquired by caller).
+ * Prepares DV flow counter with aging configuration.
+ * Gets it by index when exists, creates a new one when doesn't.
  *
  * @param[in] dev
  *   Pointer to rte_eth_dev structure.
- * @param[in, out] dev_flow
+ * @param[in] dev_flow
+ *   Pointer to the mlx5_flow.
+ * @param[in, out] flow
  *   Pointer to the sub flow.
- * @param[in] attr
- *   Pointer to the flow attributes.
- * @param[in] items
- *   Pointer to the list of items.
- * @param[in] actions
- *   Pointer to the list of actions.
+ * @param[in] count
+ *   Pointer to the counter action configuration.
+ * @param[in] age
+ *   Pointer to the aging action configuration.
  * @param[out] error
  *   Pointer to the error structure.
  *
  * @return
- *   0 on success, a negative errno value otherwise and rte_errno is set.
+ *   Pointer to the counter, NULL otherwise.
  */
-static int
-flow_dv_translate(struct rte_eth_dev *dev,
-                 struct mlx5_flow *dev_flow,
-                 const struct rte_flow_attr *attr,
-                 const struct rte_flow_item items[],
-                 const struct rte_flow_action actions[],
-                 struct rte_flow_error *error)
+static struct mlx5_flow_counter *
+flow_dv_prepare_counter(struct rte_eth_dev *dev,
+                       struct mlx5_flow *dev_flow,
+                       struct rte_flow *flow,
+                       const struct rte_flow_action_count *count,
+                       const struct rte_flow_action_age *age,
+                       struct rte_flow_error *error)
 {
-       struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_dev_config *dev_conf = &priv->config;
-       struct rte_flow *flow = dev_flow->flow;
-       struct mlx5_flow_handle *handle = dev_flow->handle;
-       struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
-       struct mlx5_flow_rss_desc *rss_desc;
-       uint64_t item_flags = 0;
-       uint64_t last_item = 0;
-       uint64_t action_flags = 0;
-       struct mlx5_flow_dv_matcher matcher = {
-               .mask = {
-                       .size = sizeof(matcher.mask.buf) -
-                               MLX5_ST_SZ_BYTES(fte_match_set_misc4),
-               },
-       };
-       int actions_n = 0;
-       bool actions_end = false;
-       union {
-               struct mlx5_flow_dv_modify_hdr_resource res;
-               uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
-                           sizeof(struct mlx5_modification_cmd) *
-                           (MLX5_MAX_MODIFY_NUM + 1)];
-       } mhdr_dummy;
-       struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
-       const struct rte_flow_action_count *count = NULL;
-       const struct rte_flow_action_age *age = NULL;
-       union flow_dv_attr flow_attr = { .attr = 0 };
-       uint32_t tag_be;
-       union mlx5_flow_tbl_key tbl_key;
-       uint32_t modify_action_position = UINT32_MAX;
-       void *match_mask = matcher.mask.buf;
-       void *match_value = dev_flow->dv.value.buf;
-       uint8_t next_protocol = 0xff;
-       struct rte_vlan_hdr vlan = { 0 };
-       struct mlx5_flow_dv_dest_array_resource mdest_res;
-       struct mlx5_flow_dv_sample_resource sample_res;
-       void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
-       const struct rte_flow_action_sample *sample = NULL;
-       struct mlx5_flow_sub_actions_list *sample_act;
-       uint32_t sample_act_pos = UINT32_MAX;
-       uint32_t num_of_dest = 0;
-       int tmp_actions_n = 0;
-       uint32_t table;
-       int ret = 0;
-       const struct mlx5_flow_tunnel *tunnel;
-       struct flow_grp_info grp_info = {
-               .external = !!dev_flow->external,
-               .transfer = !!attr->transfer,
-               .fdb_def_rule = !!priv->fdb_def_rule,
-               .skip_scale = dev_flow->skip_scale &
-                       (1 << MLX5_SCALE_FLOW_GROUP_BIT),
-       };
-
-       if (!wks)
-               return rte_flow_error_set(error, ENOMEM,
-                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-                                         NULL,
-                                         "failed to push flow workspace");
-       rss_desc = &wks->rss_desc;
-       memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
-       memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
-       mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
-                                          MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
-       /* update normal path action resource into last index of array */
-       sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
-       tunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ?
-                flow_items_to_tunnel(items) :
-                is_flow_tunnel_steer_rule(dev, attr, items, actions) ?
-                flow_actions_to_tunnel(actions) :
-                dev_flow->tunnel ? dev_flow->tunnel : NULL;
-       mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
-                                          MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
-       grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
-                               (dev, tunnel, attr, items, actions);
-       ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
-                                      &grp_info, error);
-       if (ret)
-               return ret;
-       dev_flow->dv.group = table;
-       if (attr->transfer)
-               mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
-       /* number of actions must be set to 0 in case of dirty stack. */
-       mhdr_res->actions_num = 0;
-       if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
-               /*
-                * do not add decap action if match rule drops packet
-                * HW rejects rules with decap & drop
-                *
-                * if tunnel match rule was inserted before matching tunnel set
-                * rule flow table used in the match rule must be registered.
-                * current implementation handles that in the
-                * flow_dv_match_register() at the function end.
-                */
-               bool add_decap = true;
-               const struct rte_flow_action *ptr = actions;
-
-               for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
-                       if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
-                               add_decap = false;
-                               break;
-                       }
-               }
-               if (add_decap) {
-                       if (flow_dv_create_action_l2_decap(dev, dev_flow,
-                                                          attr->transfer,
-                                                          error))
-                               return -rte_errno;
-                       dev_flow->dv.actions[actions_n++] =
-                                       dev_flow->dv.encap_decap->action;
-                       action_flags |= MLX5_FLOW_ACTION_DECAP;
+       if (!flow->counter) {
+               flow->counter = flow_dv_translate_create_counter(dev, dev_flow,
+                                                                count, age);
+               if (!flow->counter) {
+                       rte_flow_error_set(error, rte_errno,
+                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+                                          "cannot create counter object.");
+                       return NULL;
                }
        }
-       for (; !actions_end ; actions++) {
-               const struct rte_flow_action_queue *queue;
-               const struct rte_flow_action_rss *rss;
-               const struct rte_flow_action *action = actions;
-               const uint8_t *rss_key;
-               struct mlx5_flow_tbl_resource *tbl;
-               struct mlx5_aso_age_action *age_act;
-               uint32_t port_id = 0;
-               struct mlx5_flow_dv_port_id_action_resource port_id_resource;
-               int action_type = actions->type;
-               const struct rte_flow_action *found_action = NULL;
-               uint32_t jump_group = 0;
+       return flow_dv_counter_get_by_idx(dev, flow->counter, NULL);
+}
 
-               if (!mlx5_flow_os_action_supported(action_type))
+/*
+ * Release an ASO CT action by its own device.
+ *
+ * @param[in] dev
+ *   Pointer to the Ethernet device structure.
+ * @param[in] idx
+ *   Index of ASO CT action to release.
+ *
+ * @return
+ *   0 when CT action was removed, otherwise the number of references.
+ */
+static inline int
+flow_dv_aso_ct_dev_release(struct rte_eth_dev *dev, uint32_t idx)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
+       uint32_t ret;
+       struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);
+       enum mlx5_aso_ct_state state =
+                       __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+
+       /* Cannot release when CT is in the ASO SQ. */
+       if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)
+               return -1;
+       ret = __atomic_sub_fetch(&ct->refcnt, 1, __ATOMIC_RELAXED);
+       if (!ret) {
+               if (ct->dr_action_orig) {
+#ifdef HAVE_MLX5_DR_ACTION_ASO_CT
+                       claim_zero(mlx5_glue->destroy_flow_action
+                                       (ct->dr_action_orig));
+#endif
+                       ct->dr_action_orig = NULL;
+               }
+               if (ct->dr_action_rply) {
+#ifdef HAVE_MLX5_DR_ACTION_ASO_CT
+                       claim_zero(mlx5_glue->destroy_flow_action
+                                       (ct->dr_action_rply));
+#endif
+                       ct->dr_action_rply = NULL;
+               }
+               /* Clear the state to free, no need in 1st allocation. */
+               MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_FREE);
+               rte_spinlock_lock(&mng->ct_sl);
+               LIST_INSERT_HEAD(&mng->free_cts, ct, next);
+               rte_spinlock_unlock(&mng->ct_sl);
+       }
+       return (int)ret;
+}
+
+static inline int
+flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t own_idx)
+{
+       uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
+       uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
+       struct rte_eth_dev *owndev = &rte_eth_devices[owner];
+       RTE_SET_USED(dev);
+
+       MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
+       if (dev->data->dev_started != 1)
+               return -1;
+       return flow_dv_aso_ct_dev_release(owndev, idx);
+}
+
+/*
+ * Resize the ASO CT pools array by 64 pools.
+ *
+ * @param[in] dev
+ *   Pointer to the Ethernet device structure.
+ *
+ * @return
+ *   0 on success, otherwise negative errno value and rte_errno is set.
+ */
+static int
+flow_dv_aso_ct_pools_resize(struct rte_eth_dev *dev)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
+       void *old_pools = mng->pools;
+       /* Magic number now, need a macro. */
+       uint32_t resize = mng->n + 64;
+       uint32_t mem_size = sizeof(struct mlx5_aso_ct_pool *) * resize;
+       void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
+
+       if (!pools) {
+               rte_errno = ENOMEM;
+               return -rte_errno;
+       }
+       rte_rwlock_write_lock(&mng->resize_rwl);
+       /* ASO SQ/QP was already initialized in the startup. */
+       if (old_pools) {
+               /* Realloc could be an alternative choice. */
+               rte_memcpy(pools, old_pools,
+                          mng->n * sizeof(struct mlx5_aso_ct_pool *));
+               mlx5_free(old_pools);
+       }
+       mng->n = resize;
+       mng->pools = pools;
+       rte_rwlock_write_unlock(&mng->resize_rwl);
+       return 0;
+}
+
+/*
+ * Create and initialize a new ASO CT pool.
+ *
+ * @param[in] dev
+ *   Pointer to the Ethernet device structure.
+ * @param[out] ct_free
+ *   Where to put the pointer of a new CT action.
+ *
+ * @return
+ *   The CT actions pool pointer and @p ct_free is set on success,
+ *   NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_aso_ct_pool *
+flow_dv_ct_pool_create(struct rte_eth_dev *dev,
+                      struct mlx5_aso_ct_action **ct_free)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
+       struct mlx5_aso_ct_pool *pool = NULL;
+       struct mlx5_devx_obj *obj = NULL;
+       uint32_t i;
+       uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);
+
+       obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->ctx,
+                                               priv->sh->pdn, log_obj_size);
+       if (!obj) {
+               rte_errno = ENODATA;
+               DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
+               return NULL;
+       }
+       pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
+       if (!pool) {
+               rte_errno = ENOMEM;
+               claim_zero(mlx5_devx_cmd_destroy(obj));
+               return NULL;
+       }
+       pool->devx_obj = obj;
+       pool->index = mng->next;
+       /* Resize pools array if there is no room for the new pool in it. */
+       if (pool->index == mng->n && flow_dv_aso_ct_pools_resize(dev)) {
+               claim_zero(mlx5_devx_cmd_destroy(obj));
+               mlx5_free(pool);
+               return NULL;
+       }
+       mng->pools[pool->index] = pool;
+       mng->next++;
+       /* Assign the first action in the new pool, the rest go to free list. */
+       *ct_free = &pool->actions[0];
+       /* Lock outside, the list operation is safe here. */
+       for (i = 1; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
+               /* refcnt is 0 when allocating the memory. */
+               pool->actions[i].offset = i;
+               LIST_INSERT_HEAD(&mng->free_cts, &pool->actions[i], next);
+       }
+       return pool;
+}
+
+/*
+ * Allocate a ASO CT action from free list.
+ *
+ * @param[in] dev
+ *   Pointer to the Ethernet device structure.
+ * @param[out] error
+ *   Pointer to the error structure.
+ *
+ * @return
+ *   Index to ASO CT action on success, 0 otherwise and rte_errno is set.
+ */
+static uint32_t
+flow_dv_aso_ct_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
+       struct mlx5_aso_ct_action *ct = NULL;
+       struct mlx5_aso_ct_pool *pool;
+       uint8_t reg_c;
+       uint32_t ct_idx;
+
+       MLX5_ASSERT(mng);
+       if (!priv->config.devx) {
+               rte_errno = ENOTSUP;
+               return 0;
+       }
+       /* Get a free CT action, if no, a new pool will be created. */
+       rte_spinlock_lock(&mng->ct_sl);
+       ct = LIST_FIRST(&mng->free_cts);
+       if (ct) {
+               LIST_REMOVE(ct, next);
+       } else if (!flow_dv_ct_pool_create(dev, &ct)) {
+               rte_spinlock_unlock(&mng->ct_sl);
+               rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
+                                  NULL, "failed to create ASO CT pool");
+               return 0;
+       }
+       rte_spinlock_unlock(&mng->ct_sl);
+       pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
+       ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);
+       /* 0: inactive, 1: created, 2+: used by flows. */
+       __atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);
+       reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);
+       if (!ct->dr_action_orig) {
+#ifdef HAVE_MLX5_DR_ACTION_ASO_CT
+               ct->dr_action_orig = mlx5_glue->dv_create_flow_action_aso
+                       (priv->sh->rx_domain, pool->devx_obj->obj,
+                        ct->offset,
+                        MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_INITIATOR,
+                        reg_c - REG_C_0);
+#else
+               RTE_SET_USED(reg_c);
+#endif
+               if (!ct->dr_action_orig) {
+                       flow_dv_aso_ct_dev_release(dev, ct_idx);
+                       rte_flow_error_set(error, rte_errno,
+                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+                                          "failed to create ASO CT action");
+                       return 0;
+               }
+       }
+       if (!ct->dr_action_rply) {
+#ifdef HAVE_MLX5_DR_ACTION_ASO_CT
+               ct->dr_action_rply = mlx5_glue->dv_create_flow_action_aso
+                       (priv->sh->rx_domain, pool->devx_obj->obj,
+                        ct->offset,
+                        MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_RESPONDER,
+                        reg_c - REG_C_0);
+#endif
+               if (!ct->dr_action_rply) {
+                       flow_dv_aso_ct_dev_release(dev, ct_idx);
+                       rte_flow_error_set(error, rte_errno,
+                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+                                          "failed to create ASO CT action");
+                       return 0;
+               }
+       }
+       return ct_idx;
+}
+
+/*
+ * Create a conntrack object with context and actions by using ASO mechanism.
+ *
+ * @param[in] dev
+ *   Pointer to rte_eth_dev structure.
+ * @param[in] pro
+ *   Pointer to conntrack information profile.
+ * @param[out] error
+ *   Pointer to the error structure.
+ *
+ * @return
+ *   Index to conntrack object on success, 0 otherwise.
+ */
+static uint32_t
+flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,
+                                  const struct rte_flow_action_conntrack *pro,
+                                  struct rte_flow_error *error)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_dev_ctx_shared *sh = priv->sh;
+       struct mlx5_aso_ct_action *ct;
+       uint32_t idx;
+
+       if (!sh->ct_aso_en)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+                                         "Connection is not supported");
+       idx = flow_dv_aso_ct_alloc(dev, error);
+       if (!idx)
+               return rte_flow_error_set(error, rte_errno,
+                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+                                         "Failed to allocate CT object");
+       ct = flow_aso_ct_get_by_dev_idx(dev, idx);
+       if (mlx5_aso_ct_update_by_wqe(sh, ct, pro))
+               return rte_flow_error_set(error, EBUSY,
+                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+                                         "Failed to update CT");
+       ct->is_original = !!pro->is_original_dir;
+       ct->peer = pro->peer_port;
+       return idx;
+}
+
+/**
+ * Fill the flow with DV spec, lock free
+ * (mutex should be acquired by caller).
+ *
+ * @param[in] dev
+ *   Pointer to rte_eth_dev structure.
+ * @param[in, out] dev_flow
+ *   Pointer to the sub flow.
+ * @param[in] attr
+ *   Pointer to the flow attributes.
+ * @param[in] items
+ *   Pointer to the list of items.
+ * @param[in] actions
+ *   Pointer to the list of actions.
+ * @param[out] error
+ *   Pointer to the error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_translate(struct rte_eth_dev *dev,
+                 struct mlx5_flow *dev_flow,
+                 const struct rte_flow_attr *attr,
+                 const struct rte_flow_item items[],
+                 const struct rte_flow_action actions[],
+                 struct rte_flow_error *error)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_dev_config *dev_conf = &priv->config;
+       struct rte_flow *flow = dev_flow->flow;
+       struct mlx5_flow_handle *handle = dev_flow->handle;
+       struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
+       struct mlx5_flow_rss_desc *rss_desc;
+       uint64_t item_flags = 0;
+       uint64_t last_item = 0;
+       uint64_t action_flags = 0;
+       struct mlx5_flow_dv_matcher matcher = {
+               .mask = {
+                       .size = sizeof(matcher.mask.buf) -
+                               MLX5_ST_SZ_BYTES(fte_match_set_misc4),
+               },
+       };
+       int actions_n = 0;
+       bool actions_end = false;
+       union {
+               struct mlx5_flow_dv_modify_hdr_resource res;
+               uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
+                           sizeof(struct mlx5_modification_cmd) *
+                           (MLX5_MAX_MODIFY_NUM + 1)];
+       } mhdr_dummy;
+       struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
+       const struct rte_flow_action_count *count = NULL;
+       const struct rte_flow_action_age *non_shared_age = NULL;
+       union flow_dv_attr flow_attr = { .attr = 0 };
+       uint32_t tag_be;
+       union mlx5_flow_tbl_key tbl_key;
+       uint32_t modify_action_position = UINT32_MAX;
+       void *match_mask = matcher.mask.buf;
+       void *match_value = dev_flow->dv.value.buf;
+       uint8_t next_protocol = 0xff;
+       struct rte_vlan_hdr vlan = { 0 };
+       struct mlx5_flow_dv_dest_array_resource mdest_res;
+       struct mlx5_flow_dv_sample_resource sample_res;
+       void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
+       const struct rte_flow_action_sample *sample = NULL;
+       struct mlx5_flow_sub_actions_list *sample_act;
+       uint32_t sample_act_pos = UINT32_MAX;
+       uint32_t age_act_pos = UINT32_MAX;
+       uint32_t num_of_dest = 0;
+       int tmp_actions_n = 0;
+       uint32_t table;
+       int ret = 0;
+       const struct mlx5_flow_tunnel *tunnel = NULL;
+       struct flow_grp_info grp_info = {
+               .external = !!dev_flow->external,
+               .transfer = !!attr->transfer,
+               .fdb_def_rule = !!priv->fdb_def_rule,
+               .skip_scale = dev_flow->skip_scale &
+                       (1 << MLX5_SCALE_FLOW_GROUP_BIT),
+               .std_tbl_fix = true,
+       };
+       const struct rte_flow_item *head_item = items;
+
+       if (!wks)
+               return rte_flow_error_set(error, ENOMEM,
+                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                         NULL,
+                                         "failed to push flow workspace");
+       rss_desc = &wks->rss_desc;
+       memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
+       memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
+       mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
+                                          MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
+       /* update normal path action resource into last index of array */
+       sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
+       if (is_tunnel_offload_active(dev)) {
+               if (dev_flow->tunnel) {
+                       RTE_VERIFY(dev_flow->tof_type ==
+                                  MLX5_TUNNEL_OFFLOAD_MISS_RULE);
+                       tunnel = dev_flow->tunnel;
+               } else {
+                       tunnel = mlx5_get_tof(items, actions,
+                                             &dev_flow->tof_type);
+                       dev_flow->tunnel = tunnel;
+               }
+               grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
+                                       (dev, attr, tunnel, dev_flow->tof_type);
+       }
+       mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
+                                          MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
+       ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
+                                      &grp_info, error);
+       if (ret)
+               return ret;
+       dev_flow->dv.group = table;
+       if (attr->transfer)
+               mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
+       /* number of actions must be set to 0 in case of dirty stack. */
+       mhdr_res->actions_num = 0;
+       if (is_flow_tunnel_match_rule(dev_flow->tof_type)) {
+               /*
+                * do not add decap action if match rule drops packet
+                * HW rejects rules with decap & drop
+                *
+                * if tunnel match rule was inserted before matching tunnel set
+                * rule flow table used in the match rule must be registered.
+                * current implementation handles that in the
+                * flow_dv_match_register() at the function end.
+                */
+               bool add_decap = true;
+               const struct rte_flow_action *ptr = actions;
+
+               for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
+                       if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
+                               add_decap = false;
+                               break;
+                       }
+               }
+               if (add_decap) {
+                       if (flow_dv_create_action_l2_decap(dev, dev_flow,
+                                                          attr->transfer,
+                                                          error))
+                               return -rte_errno;
+                       dev_flow->dv.actions[actions_n++] =
+                                       dev_flow->dv.encap_decap->action;
+                       action_flags |= MLX5_FLOW_ACTION_DECAP;
+               }
+       }
+       for (; !actions_end ; actions++) {
+               const struct rte_flow_action_queue *queue;
+               const struct rte_flow_action_rss *rss;
+               const struct rte_flow_action *action = actions;
+               const uint8_t *rss_key;
+               struct mlx5_flow_tbl_resource *tbl;
+               struct mlx5_aso_age_action *age_act;
+               struct mlx5_flow_counter *cnt_act;
+               uint32_t port_id = 0;
+               struct mlx5_flow_dv_port_id_action_resource port_id_resource;
+               int action_type = actions->type;
+               const struct rte_flow_action *found_action = NULL;
+               uint32_t jump_group = 0;
+               uint32_t owner_idx;
+               struct mlx5_aso_ct_action *ct;
+
+               if (!mlx5_flow_os_action_supported(action_type))
                        return rte_flow_error_set(error, ENOTSUP,
                                                  RTE_FLOW_ERROR_TYPE_ACTION,
                                                  actions,
@@ -11407,34 +12287,23 @@ flow_dv_translate(struct rte_eth_dev *dev,
                        age_act = flow_aso_age_get_by_idx(dev, flow->age);
                        __atomic_fetch_add(&age_act->refcnt, 1,
                                           __ATOMIC_RELAXED);
-                       dev_flow->dv.actions[actions_n++] = age_act->dr_action;
+                       age_act_pos = actions_n++;
                        action_flags |= MLX5_FLOW_ACTION_AGE;
                        break;
                case RTE_FLOW_ACTION_TYPE_AGE:
-                       if (priv->sh->flow_hit_aso_en && attr->group) {
-                               /*
-                                * Create one shared age action, to be used
-                                * by all sub-flows.
-                                */
-                               if (!flow->age) {
-                                       flow->age =
-                                               flow_dv_translate_create_aso_age
-                                                       (dev, action->conf,
-                                                        error);
-                                       if (!flow->age)
-                                               return rte_flow_error_set
-                                               (error, rte_errno,
-                                                RTE_FLOW_ERROR_TYPE_ACTION,
-                                                NULL,
-                                                "can't create ASO age action");
-                               }
-                               dev_flow->dv.actions[actions_n++] =
-                                         (flow_aso_age_get_by_idx
-                                               (dev, flow->age))->dr_action;
-                               action_flags |= MLX5_FLOW_ACTION_AGE;
-                               break;
-                       }
-                       /* Fall-through */
+                       non_shared_age = action->conf;
+                       age_act_pos = actions_n++;
+                       action_flags |= MLX5_FLOW_ACTION_AGE;
+                       break;
+               case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
+                       flow->counter = (uint32_t)(uintptr_t)(action->conf);
+                       cnt_act = flow_dv_counter_get_by_idx(dev, flow->counter,
+                                                            NULL);
+                       __atomic_fetch_add(&cnt_act->shared_info.refcnt, 1,
+                                          __ATOMIC_RELAXED);
+                       /* Save information first, will apply later. */
+                       action_flags |= MLX5_FLOW_ACTION_COUNT;
+                       break;
                case RTE_FLOW_ACTION_TYPE_COUNT:
                        if (!dev_conf->devx) {
                                return rte_flow_error_set
@@ -11444,10 +12313,7 @@ flow_dv_translate(struct rte_eth_dev *dev,
                                               "count action not supported");
                        }
                        /* Save information first, will apply later. */
-                       if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT)
-                               count = action->conf;
-                       else
-                               age = action->conf;
+                       count = action->conf;
                        action_flags |= MLX5_FLOW_ACTION_COUNT;
                        break;
                case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
@@ -11554,6 +12420,11 @@ flow_dv_translate(struct rte_eth_dev *dev,
                        /* If decap is followed by encap, handle it at encap. */
                        action_flags |= MLX5_FLOW_ACTION_DECAP;
                        break;
+               case MLX5_RTE_FLOW_ACTION_TYPE_JUMP:
+                       dev_flow->dv.actions[actions_n++] =
+                               (void *)(uintptr_t)action->conf;
+                       action_flags |= MLX5_FLOW_ACTION_JUMP;
+                       break;
                case RTE_FLOW_ACTION_TYPE_JUMP:
                        jump_group = ((const struct rte_flow_action_jump *)
                                                        action->conf)->group;
@@ -11732,6 +12603,31 @@ flow_dv_translate(struct rte_eth_dev *dev,
                                return -rte_errno;
                        action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
                        break;
+               case RTE_FLOW_ACTION_TYPE_CONNTRACK:
+                       owner_idx = (uint32_t)(uintptr_t)action->conf;
+                       ct = flow_aso_ct_get_by_idx(dev, owner_idx);
+                       if (!ct)
+                               return rte_flow_error_set(error, EINVAL,
+                                               RTE_FLOW_ERROR_TYPE_ACTION,
+                                               NULL,
+                                               "Failed to get CT object.");
+                       if (mlx5_aso_ct_available(priv->sh, ct))
+                               return rte_flow_error_set(error, rte_errno,
+                                               RTE_FLOW_ERROR_TYPE_ACTION,
+                                               NULL,
+                                               "CT is unavailable.");
+                       if (ct->is_original)
+                               dev_flow->dv.actions[actions_n] =
+                                                       ct->dr_action_orig;
+                       else
+                               dev_flow->dv.actions[actions_n] =
+                                                       ct->dr_action_rply;
+                       flow->indirect_type = MLX5_INDIRECT_ACTION_TYPE_CT;
+                       flow->ct = owner_idx;
+                       __atomic_fetch_add(&ct->refcnt, 1, __ATOMIC_RELAXED);
+                       actions_n++;
+                       action_flags |= MLX5_FLOW_ACTION_CT;
+                       break;
                case RTE_FLOW_ACTION_TYPE_END:
                        actions_end = true;
                        if (mhdr_res->actions_num) {
@@ -11742,27 +12638,57 @@ flow_dv_translate(struct rte_eth_dev *dev,
                                dev_flow->dv.actions[modify_action_position] =
                                        handle->dvh.modify_hdr->action;
                        }
+                       /*
+                        * Handle AGE and COUNT action by single HW counter
+                        * when they are not shared.
+                        */
+                       if (action_flags & MLX5_FLOW_ACTION_AGE) {
+                               if ((non_shared_age &&
+                                    count && !count->shared) ||
+                                   !(priv->sh->flow_hit_aso_en &&
+                                     (attr->group || attr->transfer))) {
+                                       /* Creates age by counters. */
+                                       cnt_act = flow_dv_prepare_counter
+                                                               (dev, dev_flow,
+                                                                flow, count,
+                                                                non_shared_age,
+                                                                error);
+                                       if (!cnt_act)
+                                               return -rte_errno;
+                                       dev_flow->dv.actions[age_act_pos] =
+                                                               cnt_act->action;
+                                       break;
+                               }
+                               if (!flow->age && non_shared_age) {
+                                       flow->age = flow_dv_aso_age_alloc
+                                                               (dev, error);
+                                       if (!flow->age)
+                                               return -rte_errno;
+                                       flow_dv_aso_age_params_init
+                                                   (dev, flow->age,
+                                                    non_shared_age->context ?
+                                                    non_shared_age->context :
+                                                    (void *)(uintptr_t)
+                                                    (dev_flow->flow_idx),
+                                                    non_shared_age->timeout);
+                               }
+                               age_act = flow_aso_age_get_by_idx(dev,
+                                                                 flow->age);
+                               dev_flow->dv.actions[age_act_pos] =
+                                                            age_act->dr_action;
+                       }
                        if (action_flags & MLX5_FLOW_ACTION_COUNT) {
                                /*
                                 * Create one count action, to be used
                                 * by all sub-flows.
                                 */
-                               if (!flow->counter) {
-                                       flow->counter =
-                                               flow_dv_translate_create_counter
-                                                       (dev, dev_flow, count,
-                                                        age);
-                                       if (!flow->counter)
-                                               return rte_flow_error_set
-                                               (error, rte_errno,
-                                                RTE_FLOW_ERROR_TYPE_ACTION,
-                                                NULL, "cannot create counter"
-                                                " object.");
-                               }
-                               dev_flow->dv.actions[actions_n] =
-                                         (flow_dv_counter_get_by_idx(dev,
-                                         flow->counter, NULL))->action;
-                               actions_n++;
+                               cnt_act = flow_dv_prepare_counter(dev, dev_flow,
+                                                                 flow, count,
+                                                                 NULL, error);
+                               if (!cnt_act)
+                                       return -rte_errno;
+                               dev_flow->dv.actions[actions_n++] =
+                                                               cnt_act->action;
                        }
                default:
                        break;
@@ -12014,6 +12940,15 @@ flow_dv_translate(struct rte_eth_dev *dev,
                        /* No other protocol should follow eCPRI layer. */
                        last_item = MLX5_FLOW_LAYER_ECPRI;
                        break;
+               case RTE_FLOW_ITEM_TYPE_INTEGRITY:
+                       flow_dv_translate_item_integrity(match_mask,
+                                                        match_value,
+                                                        head_item, items);
+                       break;
+               case RTE_FLOW_ITEM_TYPE_CONNTRACK:
+                       flow_dv_translate_item_aso_ct(dev, match_mask,
+                                                     match_value, items);
+                       break;
                default:
                        break;
                }
@@ -12109,6 +13044,8 @@ flow_dv_translate(struct rte_eth_dev *dev,
        }
        dev_flow->dv.actions_n = actions_n;
        dev_flow->act_flags = action_flags;
+       if (wks->skip_matcher_reg)
+               return 0;
        /* Register matcher. */
        matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
                                    matcher.mask.size);
@@ -12865,7 +13802,10 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
                        mlx5_flow_meter_detach(priv, fm);
                flow->meter = 0;
        }
-       if (flow->age)
+       /* Keep the current age handling by default. */
+       if (flow->indirect_type == MLX5_INDIRECT_ACTION_TYPE_CT && flow->ct)
+               flow_dv_aso_ct_release(dev, flow->ct);
+       else if (flow->age)
                flow_dv_aso_age_release(dev, flow->age);
        if (flow->geneve_tlv_option) {
                flow_dv_geneve_tlv_option_resource_release(dev);
@@ -13292,6 +14232,8 @@ flow_dv_action_create(struct rte_eth_dev *dev,
                      const struct rte_flow_action *action,
                      struct rte_flow_error *err)
 {
+       struct mlx5_priv *priv = dev->data->dev_private;
+       uint32_t age_idx = 0;
        uint32_t idx = 0;
        uint32_t ret = 0;
 
@@ -13302,17 +14244,32 @@ flow_dv_action_create(struct rte_eth_dev *dev,
                       MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
                break;
        case RTE_FLOW_ACTION_TYPE_AGE:
-               ret = flow_dv_translate_create_aso_age(dev, action->conf, err);
+               age_idx = flow_dv_aso_age_alloc(dev, err);
+               if (!age_idx) {
+                       ret = -rte_errno;
+                       break;
+               }
                idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
+                      MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
+               flow_dv_aso_age_params_init(dev, age_idx,
+                                       ((const struct rte_flow_action_age *)
+                                               action->conf)->context ?
+                                       ((const struct rte_flow_action_age *)
+                                               action->conf)->context :
+                                       (void *)(uintptr_t)idx,
+                                       ((const struct rte_flow_action_age *)
+                                               action->conf)->timeout);
+               ret = age_idx;
+               break;
+       case RTE_FLOW_ACTION_TYPE_COUNT:
+               ret = flow_dv_translate_create_counter(dev, NULL, NULL, NULL);
+               idx = (MLX5_INDIRECT_ACTION_TYPE_COUNT <<
                       MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
-               if (ret) {
-                       struct mlx5_aso_age_action *aso_age =
-                                             flow_aso_age_get_by_idx(dev, ret);
-
-                       if (!aso_age->age_params.context)
-                               aso_age->age_params.context =
-                                                        (void *)(uintptr_t)idx;
-               }
+               break;
+       case RTE_FLOW_ACTION_TYPE_CONNTRACK:
+               ret = flow_dv_translate_create_conntrack(dev, action->conf,
+                                                        err);
+               idx = MLX5_INDIRECT_ACT_CT_GEN_IDX(PORT_ID(priv), ret);
                break;
        default:
                rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
@@ -13347,11 +14304,25 @@ flow_dv_action_destroy(struct rte_eth_dev *dev,
        uint32_t act_idx = (uint32_t)(uintptr_t)handle;
        uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
        uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
+       struct mlx5_flow_counter *cnt;
+       uint32_t no_flow_refcnt = 1;
        int ret;
 
        switch (type) {
        case MLX5_INDIRECT_ACTION_TYPE_RSS:
                return __flow_dv_action_rss_release(dev, idx, error);
+       case MLX5_INDIRECT_ACTION_TYPE_COUNT:
+               cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
+               if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
+                                                &no_flow_refcnt, 1, false,
+                                                __ATOMIC_ACQUIRE,
+                                                __ATOMIC_RELAXED))
+                       return rte_flow_error_set(error, EBUSY,
+                                                 RTE_FLOW_ERROR_TYPE_ACTION,
+                                                 NULL,
+                                                 "Indirect count action has references");
+               flow_dv_counter_free(dev, idx);
+               return 0;
        case MLX5_INDIRECT_ACTION_TYPE_AGE:
                ret = flow_dv_aso_age_release(dev, idx);
                if (ret)
@@ -13362,6 +14333,14 @@ flow_dv_action_destroy(struct rte_eth_dev *dev,
                        DRV_LOG(DEBUG, "Indirect age action %" PRIu32 " was"
                                " released with references %d.", idx, ret);
                return 0;
+       case MLX5_INDIRECT_ACTION_TYPE_CT:
+               ret = flow_dv_aso_ct_release(dev, idx);
+               if (ret < 0)
+                       return ret;
+               if (ret > 0)
+                       DRV_LOG(DEBUG, "Connection tracking object %u still "
+                               "has references %d.", idx, ret);
+               return 0;
        default:
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ACTION,
@@ -13436,6 +14415,72 @@ __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
        return ret;
 }
 
+/*
+ * Updates in place conntrack context or direction.
+ * Context update should be synchronized.
+ *
+ * @param[in] dev
+ *   Pointer to the Ethernet device structure.
+ * @param[in] idx
+ *   The conntrack object ID to be updated.
+ * @param[in] update
+ *   Pointer to the structure of information to update.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL. Initialized in case of
+ *   error only.
+ *
+ * @return
+ *   0 on success, otherwise negative errno value.
+ */
+static int
+__flow_dv_action_ct_update(struct rte_eth_dev *dev, uint32_t idx,
+                          const struct rte_flow_modify_conntrack *update,
+                          struct rte_flow_error *error)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_aso_ct_action *ct;
+       const struct rte_flow_action_conntrack *new_prf;
+       int ret = 0;
+       uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
+       uint32_t dev_idx;
+
+       if (PORT_ID(priv) != owner)
+               return rte_flow_error_set(error, EACCES,
+                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                         NULL,
+                                         "CT object owned by another port");
+       dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
+       ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
+       if (!ct->refcnt)
+               return rte_flow_error_set(error, ENOMEM,
+                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                         NULL,
+                                         "CT object is inactive");
+       new_prf = &update->new_ct;
+       if (update->direction)
+               ct->is_original = !!new_prf->is_original_dir;
+       if (update->state) {
+               /* Only validate the profile when it needs to be updated. */
+               ret = mlx5_validate_action_ct(dev, new_prf, error);
+               if (ret)
+                       return ret;
+               ret = mlx5_aso_ct_update_by_wqe(priv->sh, ct, new_prf);
+               if (ret)
+                       return rte_flow_error_set(error, EIO,
+                                       RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                       NULL,
+                                       "Failed to send CT context update WQE");
+               /* Block until ready or a failure. */
+               ret = mlx5_aso_ct_available(priv->sh, ct);
+               if (ret)
+                       rte_flow_error_set(error, rte_errno,
+                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                          NULL,
+                                          "Timeout to get the CT update");
+       }
+       return ret;
+}
+
 /**
  * Updates in place shared action configuration, lock free,
  * (mutex should be acquired by caller).
@@ -13471,6 +14516,8 @@ flow_dv_action_update(struct rte_eth_dev *dev,
        case MLX5_INDIRECT_ACTION_TYPE_RSS:
                action_conf = ((const struct rte_flow_action *)update)->conf;
                return __flow_dv_action_rss_update(dev, idx, action_conf, err);
+       case MLX5_INDIRECT_ACTION_TYPE_CT:
+               return __flow_dv_action_ct_update(dev, idx, update, err);
        default:
                return rte_flow_error_set(err, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ACTION,
@@ -13479,37 +14526,6 @@ flow_dv_action_update(struct rte_eth_dev *dev,
        }
 }
 
-static int
-flow_dv_action_query(struct rte_eth_dev *dev,
-                    const struct rte_flow_action_handle *handle, void *data,
-                    struct rte_flow_error *error)
-{
-       struct mlx5_age_param *age_param;
-       struct rte_flow_query_age *resp;
-       uint32_t act_idx = (uint32_t)(uintptr_t)handle;
-       uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
-       uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
-
-       switch (type) {
-       case MLX5_INDIRECT_ACTION_TYPE_AGE:
-               age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
-               resp = data;
-               resp->aged = __atomic_load_n(&age_param->state,
-                                             __ATOMIC_RELAXED) == AGE_TMOUT ?
-                                                                         1 : 0;
-               resp->sec_since_last_hit_valid = !resp->aged;
-               if (resp->sec_since_last_hit_valid)
-                       resp->sec_since_last_hit = __atomic_load_n
-                            (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
-               return 0;
-       default:
-               return rte_flow_error_set(error, ENOTSUP,
-                                         RTE_FLOW_ERROR_TYPE_ACTION,
-                                         NULL,
-                                         "action type query not supported");
-       }
-}
-
 /**
  * Destroy the meter sub policy table rules.
  * Lock free, (mutex should be acquired by caller).
@@ -13727,12 +14743,6 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
                                MLX5_ASSERT(dev_flow.dv.tag_resource);
                                act_cnt->rix_mark =
                                        dev_flow.handle->dvh.rix_tag;
-                               if (action_flags & MLX5_FLOW_ACTION_QUEUE) {
-                                       dev_flow.handle->rix_hrxq =
-                       mtr_policy->sub_policys[domain][0]->rix_hrxq[i];
-                                       flow_drv_rxq_flags_set(dev,
-                                               dev_flow.handle);
-                               }
                                action_flags |= MLX5_FLOW_ACTION_MARK;
                                break;
                        }
@@ -13780,12 +14790,6 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
                                        "set tag action");
                                act_cnt->modify_hdr =
                                dev_flow.handle->dvh.modify_hdr;
-                               if (action_flags & MLX5_FLOW_ACTION_QUEUE) {
-                                       dev_flow.handle->rix_hrxq =
-                               mtr_policy->sub_policys[domain][0]->rix_hrxq[i];
-                                       flow_drv_rxq_flags_set(dev,
-                                               dev_flow.handle);
-                               }
                                action_flags |= MLX5_FLOW_ACTION_SET_TAG;
                                break;
                        }
@@ -13829,41 +14833,20 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
                        }
                        case RTE_FLOW_ACTION_TYPE_QUEUE:
                        {
-                               struct mlx5_hrxq *hrxq;
-                               uint32_t hrxq_idx;
-                               struct mlx5_flow_rss_desc rss_desc;
-                               struct mlx5_flow_meter_sub_policy *sub_policy =
-                               mtr_policy->sub_policys[domain][0];
-
                                if (i >= MLX5_MTR_RTE_COLORS)
                                        return -rte_mtr_error_set(error,
                                        ENOTSUP,
                                        RTE_MTR_ERROR_TYPE_METER_POLICY,
                                        NULL, "cannot create policy "
                                        "fate queue for this color");
-                               memset(&rss_desc, 0,
-                                       sizeof(struct mlx5_flow_rss_desc));
-                               rss_desc.queue_num = 1;
-                               rss_desc.const_q = act->conf;
-                               hrxq = flow_dv_hrxq_prepare(dev, &dev_flow,
-                                                   &rss_desc, &hrxq_idx);
-                               if (!hrxq)
-                                       return -rte_mtr_error_set(error,
-                                       ENOTSUP,
-                                       RTE_MTR_ERROR_TYPE_METER_POLICY,
-                                       NULL,
-                                       "cannot create policy fate queue");
-                               sub_policy->rix_hrxq[i] = hrxq_idx;
+                               act_cnt->queue =
+                               ((const struct rte_flow_action_queue *)
+                                       (act->conf))->index;
                                act_cnt->fate_action =
                                        MLX5_FLOW_FATE_QUEUE;
                                dev_flow.handle->fate_action =
                                        MLX5_FLOW_FATE_QUEUE;
-                               if (action_flags & MLX5_FLOW_ACTION_MARK ||
-                                   action_flags & MLX5_FLOW_ACTION_SET_TAG) {
-                                       dev_flow.handle->rix_hrxq = hrxq_idx;
-                                       flow_drv_rxq_flags_set(dev,
-                                               dev_flow.handle);
-                               }
+                               mtr_policy->is_queue = 1;
                                action_flags |= MLX5_FLOW_ACTION_QUEUE;
                                break;
                        }
@@ -14061,14 +15044,14 @@ flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev,
 }
 
 /**
- * Query a dv flow  rule for its statistics via devx.
+ * Query a DV flow rule for its statistics via DevX.
  *
  * @param[in] dev
  *   Pointer to Ethernet device.
- * @param[in] flow
- *   Pointer to the sub flow.
+ * @param[in] cnt_idx
+ *   Index to the flow counter.
  * @param[out] data
- *   data retrieved by the query.
+ *   Data retrieved by the query.
  * @param[out] error
  *   Perform verbose error reporting if not NULL.
  *
@@ -14076,8 +15059,8 @@ flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev,
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
-flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
-                   void *data, struct rte_flow_error *error)
+flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data,
+                   struct rte_flow_error *error)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct rte_flow_query_count *qc = data;
@@ -14087,19 +15070,16 @@ flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
                                          NULL,
                                          "counters are not supported");
-       if (flow->counter) {
+       if (cnt_idx) {
                uint64_t pkts, bytes;
                struct mlx5_flow_counter *cnt;
-
-               cnt = flow_dv_counter_get_by_idx(dev, flow->counter,
-                                                NULL);
-               int err = _flow_dv_query_count(dev, flow->counter, &pkts,
-                                              &bytes);
+               int err = _flow_dv_query_count(dev, cnt_idx, &pkts, &bytes);
 
                if (err)
                        return rte_flow_error_set(error, -err,
                                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
                                        NULL, "cannot read counters");
+               cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
                qc->hits_set = 1;
                qc->bytes_set = 1;
                qc->hits = pkts - cnt->hits;
@@ -14116,6 +15096,67 @@ flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
                                  "counters are not available");
 }
 
+static int
+flow_dv_action_query(struct rte_eth_dev *dev,
+                    const struct rte_flow_action_handle *handle, void *data,
+                    struct rte_flow_error *error)
+{
+       struct mlx5_age_param *age_param;
+       struct rte_flow_query_age *resp;
+       uint32_t act_idx = (uint32_t)(uintptr_t)handle;
+       uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
+       uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_aso_ct_action *ct;
+       uint16_t owner;
+       uint32_t dev_idx;
+
+       switch (type) {
+       case MLX5_INDIRECT_ACTION_TYPE_AGE:
+               age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
+               resp = data;
+               resp->aged = __atomic_load_n(&age_param->state,
+                                             __ATOMIC_RELAXED) == AGE_TMOUT ?
+                                                                         1 : 0;
+               resp->sec_since_last_hit_valid = !resp->aged;
+               if (resp->sec_since_last_hit_valid)
+                       resp->sec_since_last_hit = __atomic_load_n
+                            (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
+               return 0;
+       case MLX5_INDIRECT_ACTION_TYPE_COUNT:
+               return flow_dv_query_count(dev, idx, data, error);
+       case MLX5_INDIRECT_ACTION_TYPE_CT:
+               owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
+               if (owner != PORT_ID(priv))
+                       return rte_flow_error_set(error, EACCES,
+                                       RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                       NULL,
+                                       "CT object owned by another port");
+               dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
+               ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
+               MLX5_ASSERT(ct);
+               if (!ct->refcnt)
+                       return rte_flow_error_set(error, EFAULT,
+                                       RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                       NULL,
+                                       "CT object is inactive");
+               ((struct rte_flow_action_conntrack *)data)->peer_port =
+                                                       ct->peer;
+               ((struct rte_flow_action_conntrack *)data)->is_original_dir =
+                                                       ct->is_original;
+               if (mlx5_aso_ct_query_by_wqe(priv->sh, ct, data))
+                       return rte_flow_error_set(error, EIO,
+                                       RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                       NULL,
+                                       "Failed to query CT context");
+               return 0;
+       default:
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+                                         "action type query not supported");
+       }
+}
+
 /**
  * Query a flow rule AGE action for aging information.
  *
@@ -14185,7 +15226,8 @@ flow_dv_query(struct rte_eth_dev *dev,
                case RTE_FLOW_ACTION_TYPE_VOID:
                        break;
                case RTE_FLOW_ACTION_TYPE_COUNT:
-                       ret = flow_dv_query_count(dev, flow, data, error);
+                       ret = flow_dv_query_count(dev, flow->counter, data,
+                                                 error);
                        break;
                case RTE_FLOW_ACTION_TYPE_AGE:
                        ret = flow_dv_query_age(dev, flow, data, error);
@@ -14874,6 +15916,217 @@ policy_error:
        return -1;
 }
 
+/**
+ * Find the policy table for prefix table with RSS.
+ *
+ * @param[in] dev
+ *   Pointer to Ethernet device.
+ * @param[in] mtr_policy
+ *   Pointer to meter policy table.
+ * @param[in] rss_desc
+ *   Pointer to rss_desc
+ * @return
+ *   Pointer to table set on success, NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_flow_meter_sub_policy *
+flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
+               struct mlx5_flow_meter_policy *mtr_policy,
+               struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
+       uint32_t sub_policy_idx = 0;
+       uint32_t hrxq_idx[MLX5_MTR_RTE_COLORS] = {0};
+       uint32_t i, j;
+       struct mlx5_hrxq *hrxq;
+       struct mlx5_flow_handle dh;
+       struct mlx5_meter_policy_action_container *act_cnt;
+       uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
+       uint16_t sub_policy_num;
+
+       rte_spinlock_lock(&mtr_policy->sl);
+       for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
+               if (!rss_desc[i])
+                       continue;
+               hrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]);
+               if (!hrxq_idx[i]) {
+                       rte_spinlock_unlock(&mtr_policy->sl);
+                       return NULL;
+               }
+       }
+       sub_policy_num = (mtr_policy->sub_policy_num >>
+                       (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
+                       MLX5_MTR_SUB_POLICY_NUM_MASK;
+       for (i = 0; i < sub_policy_num;
+               i++) {
+               for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) {
+                       if (rss_desc[j] &&
+                               hrxq_idx[j] !=
+                       mtr_policy->sub_policys[domain][i]->rix_hrxq[j])
+                               break;
+               }
+               if (j >= MLX5_MTR_RTE_COLORS) {
+                       /*
+                        * Found the sub policy table with
+                        * the same queue per color
+                        */
+                       rte_spinlock_unlock(&mtr_policy->sl);
+                       for (j = 0; j < MLX5_MTR_RTE_COLORS; j++)
+                               mlx5_hrxq_release(dev, hrxq_idx[j]);
+                       return mtr_policy->sub_policys[domain][i];
+               }
+       }
+       /* Create sub policy. */
+       if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {
+               /* Reuse the first dummy sub_policy*/
+               sub_policy = mtr_policy->sub_policys[domain][0];
+               sub_policy_idx = sub_policy->idx;
+       } else {
+               sub_policy = mlx5_ipool_zmalloc
+                               (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
+                               &sub_policy_idx);
+               if (!sub_policy ||
+                       sub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM) {
+                       for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
+                               mlx5_hrxq_release(dev, hrxq_idx[i]);
+                       goto rss_sub_policy_error;
+               }
+               sub_policy->idx = sub_policy_idx;
+               sub_policy->main_policy = mtr_policy;
+       }
+       for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
+               if (!rss_desc[i])
+                       continue;
+               sub_policy->rix_hrxq[i] = hrxq_idx[i];
+               /*
+                * Overwrite the last action from
+                * RSS action to Queue action.
+                */
+               hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+                             hrxq_idx[i]);
+               if (!hrxq) {
+                       DRV_LOG(ERR, "Failed to create policy hrxq");
+                       goto rss_sub_policy_error;
+               }
+               act_cnt = &mtr_policy->act_cnt[i];
+               if (act_cnt->rix_mark || act_cnt->modify_hdr) {
+                       memset(&dh, 0, sizeof(struct mlx5_flow_handle));
+                       if (act_cnt->rix_mark)
+                               dh.mark = 1;
+                       dh.fate_action = MLX5_FLOW_FATE_QUEUE;
+                       dh.rix_hrxq = hrxq_idx[i];
+                       flow_drv_rxq_flags_set(dev, &dh);
+               }
+       }
+       if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
+               sub_policy, domain)) {
+               DRV_LOG(ERR, "Failed to create policy "
+                       "rules per domain.");
+               goto rss_sub_policy_error;
+       }
+       if (sub_policy != mtr_policy->sub_policys[domain][0]) {
+               i = (mtr_policy->sub_policy_num >>
+                       (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
+                       MLX5_MTR_SUB_POLICY_NUM_MASK;
+               mtr_policy->sub_policys[domain][i] = sub_policy;
+               i++;
+               if (i > MLX5_MTR_RSS_MAX_SUB_POLICY)
+                       goto rss_sub_policy_error;
+               mtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
+                       (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
+               mtr_policy->sub_policy_num |=
+                       (i & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
+                       (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
+       }
+       rte_spinlock_unlock(&mtr_policy->sl);
+       return sub_policy;
+rss_sub_policy_error:
+       if (sub_policy) {
+               __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
+               if (sub_policy != mtr_policy->sub_policys[domain][0]) {
+                       i = (mtr_policy->sub_policy_num >>
+                       (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
+                       MLX5_MTR_SUB_POLICY_NUM_MASK;
+                       mtr_policy->sub_policys[domain][i] = NULL;
+                       mlx5_ipool_free
+                       (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
+                                       sub_policy->idx);
+               }
+       }
+       if (sub_policy_idx)
+               mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
+                       sub_policy_idx);
+       rte_spinlock_unlock(&mtr_policy->sl);
+       return NULL;
+}
+
+
+/**
+ * Destroy the sub policy table with RX queue.
+ *
+ * @param[in] dev
+ *   Pointer to Ethernet device.
+ * @param[in] mtr_policy
+ *   Pointer to meter policy table.
+ */
+static void
+flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
+               struct mlx5_flow_meter_policy *mtr_policy)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
+       uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
+       uint32_t i, j;
+       uint16_t sub_policy_num, new_policy_num;
+
+       rte_spinlock_lock(&mtr_policy->sl);
+       for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
+               switch (mtr_policy->act_cnt[i].fate_action) {
+               case MLX5_FLOW_FATE_SHARED_RSS:
+                       sub_policy_num = (mtr_policy->sub_policy_num >>
+                       (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
+                       MLX5_MTR_SUB_POLICY_NUM_MASK;
+                       new_policy_num = sub_policy_num;
+                       for (j = 0; j < sub_policy_num; j++) {
+                               sub_policy =
+                                       mtr_policy->sub_policys[domain][j];
+                               if (sub_policy) {
+                                       __flow_dv_destroy_sub_policy_rules(dev,
+                                               sub_policy);
+                               if (sub_policy !=
+                                       mtr_policy->sub_policys[domain][0]) {
+                                       mtr_policy->sub_policys[domain][j] =
+                                                               NULL;
+                                       mlx5_ipool_free
+                               (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
+                                               sub_policy->idx);
+                                               new_policy_num--;
+                                       }
+                               }
+                       }
+                       if (new_policy_num != sub_policy_num) {
+                               mtr_policy->sub_policy_num &=
+                               ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
+                               (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
+                               mtr_policy->sub_policy_num |=
+                               (new_policy_num &
+                                       MLX5_MTR_SUB_POLICY_NUM_MASK) <<
+                               (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
+                       }
+                       break;
+               case MLX5_FLOW_FATE_QUEUE:
+                       sub_policy = mtr_policy->sub_policys[domain][0];
+                       __flow_dv_destroy_sub_policy_rules(dev,
+                                               sub_policy);
+                       break;
+               default:
+                       /*Other actions without queue and do nothing*/
+                       break;
+               }
+       }
+       rte_spinlock_unlock(&mtr_policy->sl);
+}
+
 /**
  * Validate the batch counter support in root table.
  *
@@ -14898,7 +16151,7 @@ mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
                .size = sizeof(value.buf),
        };
        struct mlx5dv_flow_matcher_attr dv_attr = {
-               .type = IBV_FLOW_ATTR_NORMAL,
+               .type = IBV_FLOW_ATTR_NORMAL | IBV_FLOW_ATTR_FLAGS_EGRESS,
                .priority = 0,
                .match_criteria_enable = 0,
                .match_mask = (void *)&mask,
@@ -14910,7 +16163,7 @@ mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
        void *flow = NULL;
        int ret = -1;
 
-       tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL,
+       tbl = flow_dv_tbl_resource_get(dev, 0, 1, 0, false, NULL,
                                        0, 0, 0, NULL);
        if (!tbl)
                goto err;
@@ -14921,14 +16174,12 @@ mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
                                                    &actions[0]);
        if (ret)
                goto err;
-       actions[1] = sh->dr_drop_action ? sh->dr_drop_action :
-                                         priv->drop_queue.hrxq->action;
        dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
        ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
                                               &matcher);
        if (ret)
                goto err;
-       ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 2,
+       ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
                                       actions, &flow);
 err:
        /*
@@ -15082,7 +16333,7 @@ flow_dv_counter_allocate(struct rte_eth_dev *dev)
  * @param[in] dev
  *   Pointer to the Ethernet device structure.
  * @param[in] conf
- *   Shared action configuration.
+ *   Indirect action configuration.
  * @param[in] action
  *   The indirect action object to validate.
  * @param[out] error
@@ -15109,22 +16360,42 @@ flow_dv_action_validate(struct rte_eth_dev *dev,
                 * sufficient, it is set to devx_obj_ops.
                 * Otherwise, it is set to ibv_obj_ops.
                 * ibv_obj_ops doesn't support ind_table_modify operation.
-                * In this case the shared RSS action can't be used.
+                * In this case the indirect RSS action can't be used.
                 */
                if (priv->obj_ops.ind_table_modify == NULL)
                        return rte_flow_error_set
                                        (err, ENOTSUP,
                                         RTE_FLOW_ERROR_TYPE_ACTION,
                                         NULL,
-                                        "shared RSS action not supported");
+                                        "Indirect RSS action not supported");
                return mlx5_validate_action_rss(dev, action, err);
        case RTE_FLOW_ACTION_TYPE_AGE:
                if (!priv->sh->aso_age_mng)
                        return rte_flow_error_set(err, ENOTSUP,
                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
                                                NULL,
-                                            "shared age action not supported");
+                                               "Indirect age action not supported");
                return flow_dv_validate_action_age(0, action, dev, err);
+       case RTE_FLOW_ACTION_TYPE_COUNT:
+               /*
+                * There are two mechanisms to share the action count.
+                * The old mechanism uses the shared field to share, while the
+                * new mechanism uses the indirect action API.
+                * This validation comes to make sure that the two mechanisms
+                * are not combined.
+                */
+               if (is_shared_action_count(action))
+                       return rte_flow_error_set(err, ENOTSUP,
+                                                 RTE_FLOW_ERROR_TYPE_ACTION,
+                                                 NULL,
+                                                 "Mix shared and indirect counter is not supported");
+               return flow_dv_validate_action_count(dev, true, 0, err);
+       case RTE_FLOW_ACTION_TYPE_CONNTRACK:
+               if (!priv->sh->ct_aso_en)
+                       return rte_flow_error_set(err, ENOTSUP,
+                                       RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                                       "ASO CT is not supported");
+               return mlx5_validate_action_ct(dev, action->conf, err);
        default:
                return rte_flow_error_set(err, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ACTION,
@@ -15464,6 +16735,8 @@ const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
        .destroy_policy_rules = flow_dv_destroy_policy_rules,
        .create_def_policy = flow_dv_create_def_policy,
        .destroy_def_policy = flow_dv_destroy_def_policy,
+       .meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,
+       .destroy_sub_policy_with_rxq = flow_dv_destroy_sub_policy_with_rxq,
        .counter_alloc = flow_dv_counter_allocate,
        .counter_free = flow_dv_counter_free,
        .counter_query = flow_dv_counter_query,