net/mlx5: replace flow list with indexed pool
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
index 10ca342..0b48024 100644 (file)
@@ -23,6 +23,7 @@
 #include <rte_mpls.h>
 #include <rte_mtr.h>
 #include <rte_mtr_driver.h>
+#include <rte_tailq.h>
 
 #include <mlx5_glue.h>
 #include <mlx5_devx_cmds.h>
@@ -413,6 +414,7 @@ flow_dv_convert_modify_action(struct rte_flow_item *item,
 {
        uint32_t i = resource->actions_num;
        struct mlx5_modification_cmd *actions = resource->actions;
+       uint32_t carry_b = 0;
 
        /*
         * The item and mask are provided in big-endian format.
@@ -422,10 +424,12 @@ flow_dv_convert_modify_action(struct rte_flow_item *item,
        MLX5_ASSERT(item->mask);
        MLX5_ASSERT(field->size);
        do {
-               unsigned int size_b;
-               unsigned int off_b;
+               uint32_t size_b;
+               uint32_t off_b;
                uint32_t mask;
                uint32_t data;
+               bool next_field = true;
+               bool next_dcopy = true;
 
                if (i >= MLX5_MAX_MODIFY_NUM)
                        return rte_flow_error_set(error, EINVAL,
@@ -439,19 +443,17 @@ flow_dv_convert_modify_action(struct rte_flow_item *item,
                        continue;
                }
                /* Deduce actual data width in bits from mask value. */
-               off_b = rte_bsf32(mask);
+               off_b = rte_bsf32(mask) + carry_b;
                size_b = sizeof(uint32_t) * CHAR_BIT -
                         off_b - __builtin_clz(mask);
                MLX5_ASSERT(size_b);
-               size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
                actions[i] = (struct mlx5_modification_cmd) {
                        .action_type = type,
                        .field = field->id,
                        .offset = off_b,
-                       .length = size_b,
+                       .length = (size_b == sizeof(uint32_t) * CHAR_BIT) ?
+                               0 : size_b,
                };
-               /* Convert entire record to expected big-endian format. */
-               actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
                if (type == MLX5_MODIFICATION_TYPE_COPY) {
                        MLX5_ASSERT(dcopy);
                        actions[i].dst_field = dcopy->id;
@@ -459,7 +461,31 @@ flow_dv_convert_modify_action(struct rte_flow_item *item,
                                (int)dcopy->offset < 0 ? off_b : dcopy->offset;
                        /* Convert entire record to big-endian format. */
                        actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
-                       ++dcopy;
+                       /*
+                        * Destination field overflow. Copy leftovers of
+                        * a source field to the next destination field.
+                        */
+                       carry_b = 0;
+                       if ((size_b > dcopy->size * CHAR_BIT - dcopy->offset) &&
+                           dcopy->size != 0) {
+                               actions[i].length =
+                                       dcopy->size * CHAR_BIT - dcopy->offset;
+                               carry_b = actions[i].length;
+                               next_field = false;
+                       }
+                       /*
+                        * Not enough bits in a source filed to fill a
+                        * destination field. Switch to the next source.
+                        */
+                       if ((size_b < dcopy->size * CHAR_BIT - dcopy->offset) &&
+                           (size_b == field->size * CHAR_BIT - off_b)) {
+                               actions[i].length =
+                                       field->size * CHAR_BIT - off_b;
+                               dcopy->offset += actions[i].length;
+                               next_dcopy = false;
+                       }
+                       if (next_dcopy)
+                               ++dcopy;
                } else {
                        MLX5_ASSERT(item->spec);
                        data = flow_dv_fetch_field((const uint8_t *)item->spec +
@@ -468,8 +494,11 @@ flow_dv_convert_modify_action(struct rte_flow_item *item,
                        data = (data & mask) >> off_b;
                        actions[i].data1 = rte_cpu_to_be_32(data);
                }
+               /* Convert entire record to expected big-endian format. */
+               actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
+               if (next_field)
+                       ++field;
                ++i;
-               ++field;
        } while (field->size);
        if (resource->actions_num == i)
                return rte_flow_error_set(error, EINVAL,
@@ -1239,8 +1268,8 @@ flow_dv_convert_action_set_meta
                         const struct rte_flow_action_set_meta *conf,
                         struct rte_flow_error *error)
 {
-       uint32_t data = conf->data;
-       uint32_t mask = conf->mask;
+       uint32_t mask = rte_cpu_to_be_32(conf->mask);
+       uint32_t data = rte_cpu_to_be_32(conf->data) & mask;
        struct rte_flow_item item = {
                .spec = &data,
                .mask = &mask,
@@ -1253,25 +1282,14 @@ flow_dv_convert_action_set_meta
        if (reg < 0)
                return reg;
        MLX5_ASSERT(reg != REG_NON);
-       /*
-        * In datapath code there is no endianness
-        * coversions for perfromance reasons, all
-        * pattern conversions are done in rte_flow.
-        */
        if (reg == REG_C_0) {
                struct mlx5_priv *priv = dev->data->dev_private;
                uint32_t msk_c0 = priv->sh->dv_regc0_mask;
-               uint32_t shl_c0;
+               uint32_t shl_c0 = rte_bsf32(msk_c0);
 
-               MLX5_ASSERT(msk_c0);
-#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
-               shl_c0 = rte_bsf32(msk_c0);
-#else
-               shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
-#endif
-               mask <<= shl_c0;
-               data <<= shl_c0;
-               MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
+               data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
+               mask = rte_cpu_to_be_32(mask) & msk_c0;
+               mask = rte_cpu_to_be_32(mask << shl_c0);
        }
        reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
        /* The routine expects parameters in memory as big-endian ones. */
@@ -1356,7 +1374,8 @@ flow_dv_convert_action_modify_ipv6_dscp
 }
 
 static int
-mlx5_flow_item_field_width(enum rte_flow_field_id field)
+mlx5_flow_item_field_width(struct mlx5_dev_config *config,
+                          enum rte_flow_field_id field)
 {
        switch (field) {
        case RTE_FLOW_FIELD_START:
@@ -1404,7 +1423,12 @@ mlx5_flow_item_field_width(enum rte_flow_field_id field)
        case RTE_FLOW_FIELD_MARK:
                return 24;
        case RTE_FLOW_FIELD_META:
-               return 32;
+               if (config->dv_xmeta_en == MLX5_XMETA_MODE_META16)
+                       return 16;
+               else if (config->dv_xmeta_en == MLX5_XMETA_MODE_META32)
+                       return 32;
+               else
+                       return 0;
        case RTE_FLOW_FIELD_POINTER:
        case RTE_FLOW_FIELD_VALUE:
                return 64;
@@ -1424,7 +1448,10 @@ mlx5_flow_field_id_to_modify_info
                 const struct rte_flow_attr *attr,
                 struct rte_flow_error *error)
 {
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_dev_config *config = &priv->config;
        uint32_t idx = 0;
+       uint32_t off = 0;
        uint64_t val = 0;
        switch (data->field) {
        case RTE_FLOW_FIELD_START:
@@ -1432,61 +1459,63 @@ mlx5_flow_field_id_to_modify_info
                MLX5_ASSERT(false);
                break;
        case RTE_FLOW_FIELD_MAC_DST:
+               off = data->offset > 16 ? data->offset - 16 : 0;
                if (mask) {
-                       if (data->offset < 32) {
-                               info[idx] = (struct field_modify_info){4, 0,
-                                               MLX5_MODI_OUT_DMAC_47_16};
-                               if (width < 32) {
-                                       mask[idx] =
-                                               rte_cpu_to_be_32(0xffffffff >>
-                                                                (32 - width));
+                       if (data->offset < 16) {
+                               info[idx] = (struct field_modify_info){2, 0,
+                                               MLX5_MODI_OUT_DMAC_15_0};
+                               if (width < 16) {
+                                       mask[idx] = rte_cpu_to_be_16(0xffff >>
+                                                                (16 - width));
                                        width = 0;
                                } else {
-                                       mask[idx] = RTE_BE32(0xffffffff);
-                                       width -= 32;
+                                       mask[idx] = RTE_BE16(0xffff);
+                                       width -= 16;
                                }
                                if (!width)
                                        break;
                                ++idx;
                        }
-                       info[idx] = (struct field_modify_info){2, 4 * idx,
-                                               MLX5_MODI_OUT_DMAC_15_0};
-                       mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
-               } else {
-                       if (data->offset < 32)
-                               info[idx++] = (struct field_modify_info){4, 0,
+                       info[idx] = (struct field_modify_info){4, 4 * idx,
                                                MLX5_MODI_OUT_DMAC_47_16};
-                       info[idx] = (struct field_modify_info){2, 0,
+                       mask[idx] = rte_cpu_to_be_32((0xffffffff >>
+                                                     (32 - width)) << off);
+               } else {
+                       if (data->offset < 16)
+                               info[idx++] = (struct field_modify_info){2, 0,
                                                MLX5_MODI_OUT_DMAC_15_0};
+                       info[idx] = (struct field_modify_info){4, off,
+                                               MLX5_MODI_OUT_DMAC_47_16};
                }
                break;
        case RTE_FLOW_FIELD_MAC_SRC:
+               off = data->offset > 16 ? data->offset - 16 : 0;
                if (mask) {
-                       if (data->offset < 32) {
-                               info[idx] = (struct field_modify_info){4, 0,
-                                               MLX5_MODI_OUT_SMAC_47_16};
-                               if (width < 32) {
-                                       mask[idx] =
-                                               rte_cpu_to_be_32(0xffffffff >>
-                                                               (32 - width));
+                       if (data->offset < 16) {
+                               info[idx] = (struct field_modify_info){2, 0,
+                                               MLX5_MODI_OUT_SMAC_15_0};
+                               if (width < 16) {
+                                       mask[idx] = rte_cpu_to_be_16(0xffff >>
+                                                                (16 - width));
                                        width = 0;
                                } else {
-                                       mask[idx] = RTE_BE32(0xffffffff);
-                                       width -= 32;
+                                       mask[idx] = RTE_BE16(0xffff);
+                                       width -= 16;
                                }
                                if (!width)
                                        break;
                                ++idx;
                        }
-                       info[idx] = (struct field_modify_info){2, 4 * idx,
-                                               MLX5_MODI_OUT_SMAC_15_0};
-                       mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
-               } else {
-                       if (data->offset < 32)
-                               info[idx++] = (struct field_modify_info){4, 0,
+                       info[idx] = (struct field_modify_info){4, 4 * idx,
                                                MLX5_MODI_OUT_SMAC_47_16};
-                       info[idx] = (struct field_modify_info){2, 0,
+                       mask[idx] = rte_cpu_to_be_32((0xffffffff >>
+                                                     (32 - width)) << off);
+               } else {
+                       if (data->offset < 16)
+                               info[idx++] = (struct field_modify_info){2, 0,
                                                MLX5_MODI_OUT_SMAC_15_0};
+                       info[idx] = (struct field_modify_info){4, off,
+                                               MLX5_MODI_OUT_SMAC_47_16};
                }
                break;
        case RTE_FLOW_FIELD_VLAN_TYPE:
@@ -1777,17 +1806,28 @@ mlx5_flow_field_id_to_modify_info
                break;
        case RTE_FLOW_FIELD_META:
                {
+                       unsigned int xmeta = config->dv_xmeta_en;
                        int reg = flow_dv_get_metadata_reg(dev, attr, error);
                        if (reg < 0)
                                return;
                        MLX5_ASSERT(reg != REG_NON);
                        MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
-                       info[idx] = (struct field_modify_info){4, 0,
-                                               reg_to_field[reg]};
-                       if (mask)
-                               mask[idx] =
-                                       rte_cpu_to_be_32(0xffffffff >>
-                                                        (32 - width));
+                       if (xmeta == MLX5_XMETA_MODE_META16) {
+                               info[idx] = (struct field_modify_info){2, 0,
+                                                       reg_to_field[reg]};
+                               if (mask)
+                                       mask[idx] = rte_cpu_to_be_16(0xffff >>
+                                                               (16 - width));
+                       } else if (xmeta == MLX5_XMETA_MODE_META32) {
+                               info[idx] = (struct field_modify_info){4, 0,
+                                                       reg_to_field[reg]};
+                               if (mask)
+                                       mask[idx] =
+                                               rte_cpu_to_be_32(0xffffffff >>
+                                                               (32 - width));
+                       } else {
+                               MLX5_ASSERT(false);
+                       }
                }
                break;
        case RTE_FLOW_FIELD_POINTER:
@@ -1799,7 +1839,12 @@ mlx5_flow_field_id_to_modify_info
                        val = data->value;
                for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) {
                        if (mask[idx]) {
-                               if (dst_width > 16) {
+                               if (dst_width == 48) {
+                                       /*special case for MAC addresses */
+                                       value[idx] = rte_cpu_to_be_16(val);
+                                       val >>= 16;
+                                       dst_width -= 16;
+                               } else if (dst_width > 16) {
                                        value[idx] = rte_cpu_to_be_32(val);
                                        val >>= 32;
                                } else if (dst_width > 8) {
@@ -1845,6 +1890,8 @@ flow_dv_convert_action_modify_field
                         const struct rte_flow_attr *attr,
                         struct rte_flow_error *error)
 {
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_dev_config *config = &priv->config;
        const struct rte_flow_action_modify_field *conf =
                (const struct rte_flow_action_modify_field *)(action->conf);
        struct rte_flow_item item;
@@ -1855,7 +1902,8 @@ flow_dv_convert_action_modify_field
        uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
        uint32_t value[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
        uint32_t type;
-       uint32_t dst_width = mlx5_flow_item_field_width(conf->dst.field);
+       uint32_t dst_width = mlx5_flow_item_field_width(config,
+                                                       conf->dst.field);
 
        if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
                conf->src.field == RTE_FLOW_FIELD_VALUE) {
@@ -4710,10 +4758,10 @@ flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
        struct mlx5_dev_config *config = &priv->config;
        const struct rte_flow_action_modify_field *action_modify_field =
                action->conf;
-       uint32_t dst_width =
-               mlx5_flow_item_field_width(action_modify_field->dst.field);
-       uint32_t src_width =
-               mlx5_flow_item_field_width(action_modify_field->src.field);
+       uint32_t dst_width = mlx5_flow_item_field_width(config,
+                               action_modify_field->dst.field);
+       uint32_t src_width = mlx5_flow_item_field_width(config,
+                               action_modify_field->src.field);
 
        ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
        if (ret)
@@ -4766,8 +4814,10 @@ flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
                                        "inner header fields modification"
                                        " is not supported");
        }
-       if (action_modify_field->dst.field ==
-           action_modify_field->src.field)
+       if ((action_modify_field->dst.field ==
+            action_modify_field->src.field) &&
+           (action_modify_field->dst.level ==
+            action_modify_field->src.level))
                return rte_flow_error_set(error, EINVAL,
                                RTE_FLOW_ERROR_TYPE_ACTION, action,
                                "source and destination fields"
@@ -4987,6 +5037,8 @@ flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
  *   Pointer to the meter action.
  * @param[in] attr
  *   Attributes of flow that includes this action.
+ * @param[in] port_id_item
+ *   Pointer to item indicating port id.
  * @param[out] error
  *   Pointer to error structure.
  *
@@ -4998,6 +5050,7 @@ mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
                                uint64_t action_flags,
                                const struct rte_flow_action *action,
                                const struct rte_flow_attr *attr,
+                               const struct rte_flow_item *port_id_item,
                                bool *def_policy,
                                struct rte_flow_error *error)
 {
@@ -5068,6 +5121,37 @@ mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
                                          "Flow attributes domain "
                                          "have a conflict with current "
                                          "meter domain attributes");
+               if (attr->transfer && mtr_policy->dev) {
+                       /**
+                        * When policy has fate action of port_id,
+                        * the flow should have the same src port as policy.
+                        */
+                       struct mlx5_priv *policy_port_priv =
+                                       mtr_policy->dev->data->dev_private;
+                       int32_t flow_src_port = priv->representor_id;
+
+                       if (port_id_item) {
+                               const struct rte_flow_item_port_id *spec =
+                                                       port_id_item->spec;
+                               struct mlx5_priv *port_priv =
+                                       mlx5_port_to_eswitch_info(spec->id,
+                                                                 false);
+                               if (!port_priv)
+                                       return rte_flow_error_set(error,
+                                               rte_errno,
+                                               RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+                                               spec,
+                                               "Failed to get port info.");
+                               flow_src_port = port_priv->representor_id;
+                       }
+                       if (flow_src_port != policy_port_priv->representor_id)
+                               return rte_flow_error_set(error,
+                                               rte_errno,
+                                               RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+                                               NULL,
+                                               "Flow and meter policy "
+                                               "have different src port.");
+               }
                *def_policy = false;
        }
        return 0;
@@ -5480,7 +5564,7 @@ flow_dv_validate_action_sample(uint64_t *action_flags,
                                                  "E-Switch must has a dest "
                                                  "port for mirroring");
                if (!priv->config.hca_attr.reg_c_preserve &&
-                    priv->representor_id != -1)
+                    priv->representor_id != UINT16_MAX)
                        *fdb_mirror_limit = 1;
        }
        /* Continue validation for Xcap actions.*/
@@ -6086,28 +6170,33 @@ flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
                return;
        cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
        MLX5_ASSERT(pool);
-       /*
-        * If the counter action is shared by ID, the l3t_clear_entry function
-        * reduces its references counter. If after the reduction the action is
-        * still referenced, the function returns here and does not release it.
-        */
-       if (IS_LEGACY_SHARED_CNT(counter) &&
-           mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id))
-               return;
-       /*
-        * If the counter action is shared by indirect action API, the atomic
-        * function reduces its references counter. If after the reduction the
-        * action is still referenced, the function returns here and does not
-        * release it.
-        * When the counter action is not shared neither by ID nor by indirect
-        * action API, shared info is 1 before the reduction, so this condition
-        * is failed and function doesn't return here.
-        */
-       if (!IS_LEGACY_SHARED_CNT(counter) &&
-           __atomic_sub_fetch(&cnt->shared_info.refcnt, 1, __ATOMIC_RELAXED))
-               return;
-       if (pool->is_aged)
+       if (pool->is_aged) {
                flow_dv_counter_remove_from_age(dev, counter, cnt);
+       } else {
+               /*
+                * If the counter action is shared by ID, the l3t_clear_entry
+                * function reduces its references counter. If after the
+                * reduction the action is still referenced, the function
+                * returns here and does not release it.
+                */
+               if (IS_LEGACY_SHARED_CNT(counter) &&
+                   mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl,
+                                        cnt->shared_info.id))
+                       return;
+               /*
+                * If the counter action is shared by indirect action API,
+                * the atomic function reduces its references counter.
+                * If after the reduction the action is still referenced, the
+                * function returns here and does not release it.
+                * When the counter action is not shared neither by ID nor by
+                * indirect action API, shared info is 1 before the reduction,
+                * so this condition is failed and function doesn't return here.
+                */
+               if (!IS_LEGACY_SHARED_CNT(counter) &&
+                   __atomic_sub_fetch(&cnt->shared_info.refcnt, 1,
+                                      __ATOMIC_RELAXED))
+                       return;
+       }
        cnt->pool = pool;
        /*
         * Put the counter back to list to be updated in none fallback mode.
@@ -6636,6 +6725,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
        };
        const struct rte_eth_hairpin_conf *conf;
        const struct rte_flow_item *rule_items = items;
+       const struct rte_flow_item *port_id_item = NULL;
        bool def_policy = false;
 
        if (items == NULL)
@@ -6677,6 +6767,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                        if (ret < 0)
                                return ret;
                        last_item = MLX5_FLOW_ITEM_PORT_ID;
+                       port_id_item = items;
                        break;
                case RTE_FLOW_ITEM_TYPE_ETH:
                        ret = mlx5_flow_validate_item_eth(items, item_flags,
@@ -6839,7 +6930,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                        last_item = MLX5_FLOW_LAYER_GRE_KEY;
                        break;
                case RTE_FLOW_ITEM_TYPE_VXLAN:
-                       ret = mlx5_flow_validate_item_vxlan(items, item_flags,
+                       ret = mlx5_flow_validate_item_vxlan(dev, items,
+                                                           item_flags, attr,
                                                            error);
                        if (ret < 0)
                                return ret;
@@ -7414,6 +7506,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                        ret = mlx5_flow_validate_action_meter(dev,
                                                              action_flags,
                                                              actions, attr,
+                                                             port_id_item,
                                                              &def_policy,
                                                              error);
                        if (ret < 0)
@@ -7524,7 +7617,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                        if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
                                modify_after_mirror = 1;
                        /* Count all modify-header actions as one action. */
-                       if (!(action_flags & MLX5_FLOW_ACTION_MODIFY_FIELD))
+                       if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
                                ++actions_n;
                        action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
                        rw_act_num += ret;
@@ -7778,6 +7871,8 @@ flow_dv_prepare(struct rte_eth_dev *dev,
 
        MLX5_ASSERT(wks);
        wks->skip_matcher_reg = 0;
+       wks->policy = NULL;
+       wks->final_policy = NULL;
        /* In case of corrupting the memory. */
        if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
                rte_flow_error_set(error, ENOSPC,
@@ -7798,15 +7893,7 @@ flow_dv_prepare(struct rte_eth_dev *dev,
        memset(dev_flow, 0, sizeof(*dev_flow));
        dev_flow->handle = dev_handle;
        dev_flow->handle_idx = handle_idx;
-       /*
-        * In some old rdma-core releases, before continuing, a check of the
-        * length of matching parameter will be done at first. It needs to use
-        * the length without misc4 param. If the flow has misc4 support, then
-        * the length needs to be adjusted accordingly. Each param member is
-        * aligned with a 64B boundary naturally.
-        */
-       dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
-                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4);
+       dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
        dev_flow->ingress = attr->ingress;
        dev_flow->dv.transfer = attr->transfer;
        return dev_flow;
@@ -8587,6 +8674,10 @@ flow_dv_translate_item_nvgre(void *matcher, void *key,
 /**
  * Add VXLAN item to matcher and to the value.
  *
+ * @param[in] dev
+ *   Pointer to the Ethernet device structure.
+ * @param[in] attr
+ *   Flow rule attributes.
  * @param[in, out] matcher
  *   Flow matcher.
  * @param[in, out] key
@@ -8597,7 +8688,9 @@ flow_dv_translate_item_nvgre(void *matcher, void *key,
  *   Item is inner pattern.
  */
 static void
-flow_dv_translate_item_vxlan(void *matcher, void *key,
+flow_dv_translate_item_vxlan(struct rte_eth_dev *dev,
+                            const struct rte_flow_attr *attr,
+                            void *matcher, void *key,
                             const struct rte_flow_item *item,
                             int inner)
 {
@@ -8605,13 +8698,16 @@ flow_dv_translate_item_vxlan(void *matcher, void *key,
        const struct rte_flow_item_vxlan *vxlan_v = item->spec;
        void *headers_m;
        void *headers_v;
-       void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
-       void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
-       char *vni_m;
-       char *vni_v;
+       void *misc5_m;
+       void *misc5_v;
+       uint32_t *tunnel_header_v;
+       uint32_t *tunnel_header_m;
        uint16_t dport;
-       int size;
-       int i;
+       struct mlx5_priv *priv = dev->data->dev_private;
+       const struct rte_flow_item_vxlan nic_mask = {
+               .vni = "\xff\xff\xff",
+               .rsvd1 = 0xff,
+       };
 
        if (inner) {
                headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
@@ -8630,14 +8726,52 @@ flow_dv_translate_item_vxlan(void *matcher, void *key,
        }
        if (!vxlan_v)
                return;
-       if (!vxlan_m)
-               vxlan_m = &rte_flow_item_vxlan_mask;
-       size = sizeof(vxlan_m->vni);
-       vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
-       vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
-       memcpy(vni_m, vxlan_m->vni, size);
-       for (i = 0; i < size; ++i)
-               vni_v[i] = vni_m[i] & vxlan_v->vni[i];
+       if (!vxlan_m) {
+               if ((!attr->group && !priv->sh->tunnel_header_0_1) ||
+                   (attr->group && !priv->sh->misc5_cap))
+                       vxlan_m = &rte_flow_item_vxlan_mask;
+               else
+                       vxlan_m = &nic_mask;
+       }
+       if ((!attr->group && !attr->transfer && !priv->sh->tunnel_header_0_1) ||
+           ((attr->group || attr->transfer) && !priv->sh->misc5_cap)) {
+               void *misc_m;
+               void *misc_v;
+               char *vni_m;
+               char *vni_v;
+               int size;
+               int i;
+               misc_m = MLX5_ADDR_OF(fte_match_param,
+                                     matcher, misc_parameters);
+               misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+               size = sizeof(vxlan_m->vni);
+               vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
+               vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
+               memcpy(vni_m, vxlan_m->vni, size);
+               for (i = 0; i < size; ++i)
+                       vni_v[i] = vni_m[i] & vxlan_v->vni[i];
+               return;
+       }
+       misc5_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_5);
+       misc5_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_5);
+       tunnel_header_v = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
+                                                  misc5_v,
+                                                  tunnel_header_1);
+       tunnel_header_m = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
+                                                  misc5_m,
+                                                  tunnel_header_1);
+       *tunnel_header_v = (vxlan_v->vni[0] & vxlan_m->vni[0]) |
+                          (vxlan_v->vni[1] & vxlan_m->vni[1]) << 8 |
+                          (vxlan_v->vni[2] & vxlan_m->vni[2]) << 16;
+       if (*tunnel_header_v)
+               *tunnel_header_m = vxlan_m->vni[0] |
+                       vxlan_m->vni[1] << 8 |
+                       vxlan_m->vni[2] << 16;
+       else
+               *tunnel_header_m = 0x0;
+       *tunnel_header_v |= (vxlan_v->rsvd1 & vxlan_m->rsvd1) << 24;
+       if (vxlan_v->rsvd1 & vxlan_m->rsvd1)
+               *tunnel_header_m |= vxlan_m->rsvd1 << 24;
 }
 
 /**
@@ -8978,14 +9112,13 @@ flow_dv_translate_item_mpls(void *matcher, void *key,
                         MLX5_UDP_PORT_MPLS);
                break;
        case MLX5_FLOW_LAYER_GRE:
+               /* Fall-through. */
+       case MLX5_FLOW_LAYER_GRE_KEY:
                MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
                MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
                         RTE_ETHER_TYPE_MPLS);
                break;
        default:
-               MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
-               MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
-                        IPPROTO_MPLS);
                break;
        }
        if (!in_mpls_v)
@@ -9195,27 +9328,14 @@ flow_dv_translate_item_meta(struct rte_eth_dev *dev,
                if (reg < 0)
                        return;
                MLX5_ASSERT(reg != REG_NON);
-               /*
-                * In datapath code there is no endianness
-                * coversions for perfromance reasons, all
-                * pattern conversions are done in rte_flow.
-                */
-               value = rte_cpu_to_be_32(value);
-               mask = rte_cpu_to_be_32(mask);
                if (reg == REG_C_0) {
                        struct mlx5_priv *priv = dev->data->dev_private;
                        uint32_t msk_c0 = priv->sh->dv_regc0_mask;
                        uint32_t shl_c0 = rte_bsf32(msk_c0);
-#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
-                       uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
 
-                       value >>= shr_c0;
-                       mask >>= shr_c0;
-#endif
-                       value <<= shl_c0;
+                       mask &= msk_c0;
                        mask <<= shl_c0;
-                       MLX5_ASSERT(msk_c0);
-                       MLX5_ASSERT(!(~msk_c0 & mask));
+                       value <<= shl_c0;
                }
                flow_dv_match_meta_reg(matcher, key, reg, value, mask);
        }
@@ -9812,9 +9932,32 @@ flow_dv_matcher_enable(uint32_t *match_criteria)
        match_criteria_enable |=
                (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
                MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
+       match_criteria_enable |=
+               (!HEADER_IS_ZERO(match_criteria, misc_parameters_5)) <<
+               MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT;
        return match_criteria_enable;
 }
 
+static void
+__flow_dv_adjust_buf_size(size_t *size, uint8_t match_criteria)
+{
+       /*
+        * Check flow matching criteria first, subtract misc5/4 length if flow
+        * doesn't own misc5/4 parameters. In some old rdma-core releases,
+        * misc5/4 are not supported, and matcher creation failure is expected
+        * w/o subtration. If misc5 is provided, misc4 must be counted in since
+        * misc5 is right after misc4.
+        */
+       if (!(match_criteria & (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT))) {
+               *size = MLX5_ST_SZ_BYTES(fte_match_param) -
+                       MLX5_ST_SZ_BYTES(fte_match_set_misc5);
+               if (!(match_criteria & (1 <<
+                       MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT))) {
+                       *size -= MLX5_ST_SZ_BYTES(fte_match_set_misc4);
+               }
+       }
+}
+
 struct mlx5_hlist_entry *
 flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
 {
@@ -10081,6 +10224,8 @@ flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
        *cache = *ref;
        dv_attr.match_criteria_enable =
                flow_dv_matcher_enable(cache->mask.buf);
+       __flow_dv_adjust_buf_size(&ref->mask.size,
+                                 dv_attr.match_criteria_enable);
        dv_attr.priority = ref->priority;
        if (tbl->is_egress)
                dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
@@ -10130,7 +10275,6 @@ flow_dv_matcher_register(struct rte_eth_dev *dev,
                .error = error,
                .data = ref,
        };
-
        /**
         * tunnel offload API requires this registration for cases when
         * tunnel match rule was inserted before tunnel set rule.
@@ -10306,7 +10450,7 @@ flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
                                          RTE_FLOW_ERROR_TYPE_ACTION,
                                          NULL,
                                          "No eswitch info was found for port");
-#ifdef HAVE_MLX5DV_DR_DEVX_PORT
+#ifdef HAVE_MLX5DV_DR_CREATE_DEST_IB_PORT
        /*
         * This parameter is transferred to
         * mlx5dv_dr_action_create_dest_ib_port().
@@ -11483,38 +11627,35 @@ flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
 }
 
 /**
- * Create a age action using ASO mechanism.
+ * Initialize flow ASO age parameters.
  *
  * @param[in] dev
  *   Pointer to rte_eth_dev structure.
- * @param[in] age
- *   Pointer to the aging action configuration.
- * @param[out] error
- *   Pointer to the error structure.
+ * @param[in] age_idx
+ *   Index of ASO age action.
+ * @param[in] context
+ *   Pointer to flow counter age context.
+ * @param[in] timeout
+ *   Aging timeout in seconds.
  *
- * @return
- *   Index to flow counter on success, 0 otherwise.
  */
-static uint32_t
-flow_dv_translate_create_aso_age(struct rte_eth_dev *dev,
-                                const struct rte_flow_action_age *age,
-                                struct rte_flow_error *error)
+static void
+flow_dv_aso_age_params_init(struct rte_eth_dev *dev,
+                           uint32_t age_idx,
+                           void *context,
+                           uint32_t timeout)
 {
-       uint32_t age_idx = 0;
        struct mlx5_aso_age_action *aso_age;
 
-       age_idx = flow_dv_aso_age_alloc(dev, error);
-       if (!age_idx)
-               return 0;
        aso_age = flow_aso_age_get_by_idx(dev, age_idx);
-       aso_age->age_params.context = age->context;
-       aso_age->age_params.timeout = age->timeout;
+       MLX5_ASSERT(aso_age);
+       aso_age->age_params.context = context;
+       aso_age->age_params.timeout = timeout;
        aso_age->age_params.port_id = dev->data->port_id;
        __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
                         __ATOMIC_RELAXED);
        __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
                         __ATOMIC_RELAXED);
-       return age_idx;
 }
 
 static void
@@ -11551,7 +11692,7 @@ flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask,
        } else if (mask->l4_csum_ok) {
                MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
                         mask->l4_csum_ok);
-               MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
+               MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
                         value->l4_csum_ok);
        }
 }
@@ -11992,8 +12133,7 @@ flow_dv_translate(struct rte_eth_dev *dev,
        uint64_t action_flags = 0;
        struct mlx5_flow_dv_matcher matcher = {
                .mask = {
-                       .size = sizeof(matcher.mask.buf) -
-                               MLX5_ST_SZ_BYTES(fte_match_set_misc4),
+                       .size = sizeof(matcher.mask.buf),
                },
        };
        int actions_n = 0;
@@ -12613,7 +12753,7 @@ flow_dv_translate(struct rte_eth_dev *dev,
                                if ((non_shared_age &&
                                     count && !count->shared) ||
                                    !(priv->sh->flow_hit_aso_en &&
-                                     attr->group)) {
+                                     (attr->group || attr->transfer))) {
                                        /* Creates age by counters. */
                                        cnt_act = flow_dv_prepare_counter
                                                                (dev, dev_flow,
@@ -12627,17 +12767,17 @@ flow_dv_translate(struct rte_eth_dev *dev,
                                        break;
                                }
                                if (!flow->age && non_shared_age) {
-                                       flow->age =
-                                               flow_dv_translate_create_aso_age
-                                                               (dev,
-                                                                non_shared_age,
-                                                                error);
+                                       flow->age = flow_dv_aso_age_alloc
+                                                               (dev, error);
                                        if (!flow->age)
-                                               return rte_flow_error_set
-                                                   (error, rte_errno,
-                                                    RTE_FLOW_ERROR_TYPE_ACTION,
-                                                    NULL,
-                                                    "can't create ASO age action");
+                                               return -rte_errno;
+                                       flow_dv_aso_age_params_init
+                                                   (dev, flow->age,
+                                                    non_shared_age->context ?
+                                                    non_shared_age->context :
+                                                    (void *)(uintptr_t)
+                                                    (dev_flow->flow_idx),
+                                                    non_shared_age->timeout);
                                }
                                age_act = flow_aso_age_get_by_idx(dev,
                                                                  flow->age);
@@ -12800,7 +12940,8 @@ flow_dv_translate(struct rte_eth_dev *dev,
                        last_item = MLX5_FLOW_LAYER_GRE;
                        break;
                case RTE_FLOW_ITEM_TYPE_VXLAN:
-                       flow_dv_translate_item_vxlan(match_mask, match_value,
+                       flow_dv_translate_item_vxlan(dev, attr,
+                                                    match_mask, match_value,
                                                     items, tunnel);
                        matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
                        last_item = MLX5_FLOW_LAYER_VXLAN;
@@ -12898,10 +13039,6 @@ flow_dv_translate(struct rte_eth_dev *dev,
                                                NULL,
                                                "cannot create eCPRI parser");
                        }
-                       /* Adjust the length matcher and device flow value. */
-                       matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
-                       dev_flow->dv.value.size =
-                                       MLX5_ST_SZ_BYTES(fte_match_param);
                        flow_dv_translate_item_ecpri(dev, match_mask,
                                                     match_value, items);
                        /* No other protocol should follow eCPRI layer. */
@@ -13018,6 +13155,15 @@ flow_dv_translate(struct rte_eth_dev *dev,
                                    matcher.mask.size);
        matcher.priority = mlx5_get_matcher_priority(dev, attr,
                                        matcher.priority);
+       /**
+        * When creating meter drop flow in drop table, using original
+        * 5-tuple match, the matcher priority should be lower than
+        * mtr_id matcher.
+        */
+       if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
+           dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP &&
+           matcher.priority <= MLX5_REG_BITS)
+               matcher.priority += MLX5_REG_BITS;
        /* reserved field no needs to be set to 0 here. */
        tbl_key.is_fdb = attr->transfer;
        tbl_key.is_egress = attr->egress;
@@ -13202,6 +13348,7 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
        int idx;
        struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
        struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
+       uint8_t misc_mask;
 
        MLX5_ASSERT(wks);
        for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
@@ -13272,14 +13419,20 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
                        }
                        dv->actions[n++] = priv->sh->default_miss_action;
                }
+               misc_mask = flow_dv_matcher_enable(dv->value.buf);
+               __flow_dv_adjust_buf_size(&dv->value.size, misc_mask);
                err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
                                               (void *)&dv->value, n,
                                               dv->actions, &dh->drv_flow);
                if (err) {
-                       rte_flow_error_set(error, errno,
-                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-                                          NULL,
-                                          "hardware refuses to create flow");
+                       rte_flow_error_set
+                               (error, errno,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL,
+                               (!priv->config.allow_duplicate_pattern &&
+                               errno == EEXIST) ?
+                               "duplicating pattern is not allowed" :
+                               "hardware refuses to create flow");
                        goto error;
                }
                if (priv->vmwa_context &&
@@ -13811,6 +13964,11 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
                    dev_handle->split_flow_id)
                        mlx5_ipool_free(fm->flow_ipool,
                                        dev_handle->split_flow_id);
+               else if (dev_handle->split_flow_id &&
+                   !dev_handle->is_meter_flow_id)
+                       mlx5_ipool_free(priv->sh->ipool
+                                       [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
+                                       dev_handle->split_flow_id);
                mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
                           tmp_idx);
        }
@@ -14199,9 +14357,10 @@ flow_dv_action_create(struct rte_eth_dev *dev,
                      const struct rte_flow_action *action,
                      struct rte_flow_error *err)
 {
+       struct mlx5_priv *priv = dev->data->dev_private;
+       uint32_t age_idx = 0;
        uint32_t idx = 0;
        uint32_t ret = 0;
-       struct mlx5_priv *priv = dev->data->dev_private;
 
        switch (action->type) {
        case RTE_FLOW_ACTION_TYPE_RSS:
@@ -14210,17 +14369,22 @@ flow_dv_action_create(struct rte_eth_dev *dev,
                       MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
                break;
        case RTE_FLOW_ACTION_TYPE_AGE:
-               ret = flow_dv_translate_create_aso_age(dev, action->conf, err);
-               idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
-                      MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
-               if (ret) {
-                       struct mlx5_aso_age_action *aso_age =
-                                             flow_aso_age_get_by_idx(dev, ret);
-
-                       if (!aso_age->age_params.context)
-                               aso_age->age_params.context =
-                                                        (void *)(uintptr_t)idx;
+               age_idx = flow_dv_aso_age_alloc(dev, err);
+               if (!age_idx) {
+                       ret = -rte_errno;
+                       break;
                }
+               idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
+                      MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
+               flow_dv_aso_age_params_init(dev, age_idx,
+                                       ((const struct rte_flow_action_age *)
+                                               action->conf)->context ?
+                                       ((const struct rte_flow_action_age *)
+                                               action->conf)->context :
+                                       (void *)(uintptr_t)idx,
+                                       ((const struct rte_flow_action_age *)
+                                               action->conf)->timeout);
+               ret = age_idx;
                break;
        case RTE_FLOW_ACTION_TYPE_COUNT:
                ret = flow_dv_translate_create_counter(dev, NULL, NULL, NULL);
@@ -14500,26 +14664,38 @@ static void
 __flow_dv_destroy_sub_policy_rules(struct rte_eth_dev *dev,
                             struct mlx5_flow_meter_sub_policy *sub_policy)
 {
+       struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_flow_tbl_data_entry *tbl;
-       int i;
+       struct mlx5_flow_meter_policy *policy = sub_policy->main_policy;
+       struct mlx5_flow_meter_info *next_fm;
+       struct mlx5_sub_policy_color_rule *color_rule;
+       void *tmp;
+       uint32_t i;
 
        for (i = 0; i < RTE_COLORS; i++) {
-               if (sub_policy->color_rule[i]) {
-                       claim_zero(mlx5_flow_os_destroy_flow
-                               (sub_policy->color_rule[i]));
-                       sub_policy->color_rule[i] = NULL;
-               }
-               if (sub_policy->color_matcher[i]) {
-                       tbl = container_of(sub_policy->color_matcher[i]->tbl,
-                               typeof(*tbl), tbl);
+               next_fm = NULL;
+               if (i == RTE_COLOR_GREEN && policy &&
+                   policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR)
+                       next_fm = mlx5_flow_meter_find(priv,
+                                       policy->act_cnt[i].next_mtr_id, NULL);
+               TAILQ_FOREACH_SAFE(color_rule, &sub_policy->color_rules[i],
+                                  next_port, tmp) {
+                       claim_zero(mlx5_flow_os_destroy_flow(color_rule->rule));
+                       tbl = container_of(color_rule->matcher->tbl,
+                                       typeof(*tbl), tbl);
                        mlx5_cache_unregister(&tbl->matchers,
-                                     &sub_policy->color_matcher[i]->entry);
-                       sub_policy->color_matcher[i] = NULL;
+                                               &color_rule->matcher->entry);
+                       TAILQ_REMOVE(&sub_policy->color_rules[i],
+                                       color_rule, next_port);
+                       mlx5_free(color_rule);
+                       if (next_fm)
+                               mlx5_flow_meter_detach(priv, next_fm);
                }
        }
        for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
                if (sub_policy->rix_hrxq[i]) {
-                       mlx5_hrxq_release(dev, sub_policy->rix_hrxq[i]);
+                       if (policy && !policy->is_hierarchy)
+                               mlx5_hrxq_release(dev, sub_policy->rix_hrxq[i]);
                        sub_policy->rix_hrxq[i] = 0;
                }
                if (sub_policy->jump_tbl[i]) {
@@ -14663,6 +14839,7 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
                            sizeof(struct mlx5_modification_cmd) *
                            (MLX5_MAX_MODIFY_NUM + 1)];
        } mhdr_dummy;
+       struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
 
        egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
        transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
@@ -14670,6 +14847,11 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
        memset(&dev_flow, 0, sizeof(struct mlx5_flow));
        memset(&port_id_action, 0,
                sizeof(struct mlx5_flow_dv_port_id_action_resource));
+       memset(mhdr_res, 0, sizeof(*mhdr_res));
+       mhdr_res->ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
+                                       egress ?
+                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
+                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
        dev_flow.handle = &dh;
        dev_flow.dv.port_id_action = &port_id_action;
        dev_flow.external = true;
@@ -14704,20 +14886,10 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
                                MLX5_ASSERT(dev_flow.dv.tag_resource);
                                act_cnt->rix_mark =
                                        dev_flow.handle->dvh.rix_tag;
-                               if (action_flags & MLX5_FLOW_ACTION_QUEUE) {
-                                       dev_flow.handle->rix_hrxq =
-                       mtr_policy->sub_policys[domain][0]->rix_hrxq[i];
-                                       flow_drv_rxq_flags_set(dev,
-                                               dev_flow.handle);
-                               }
                                action_flags |= MLX5_FLOW_ACTION_MARK;
                                break;
                        }
                        case RTE_FLOW_ACTION_TYPE_SET_TAG:
-                       {
-                               struct mlx5_flow_dv_modify_hdr_resource
-                                       *mhdr_res = &mhdr_dummy.res;
-
                                if (i >= MLX5_MTR_RTE_COLORS)
                                        return -rte_mtr_error_set(error,
                                          ENOTSUP,
@@ -14725,12 +14897,6 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
                                          NULL,
                                          "cannot create policy "
                                          "set tag action for this color");
-                               memset(mhdr_res, 0, sizeof(*mhdr_res));
-                               mhdr_res->ft_type = transfer ?
-                                       MLX5DV_FLOW_TABLE_TYPE_FDB :
-                                       egress ?
-                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
-                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
                                if (flow_dv_convert_action_set_tag
                                (dev, mhdr_res,
                                (const struct rte_flow_action_set_tag *)
@@ -14746,26 +14912,8 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
                                        RTE_MTR_ERROR_TYPE_METER_POLICY,
                                        NULL, "cannot find policy "
                                        "set tag action");
-                               /* create modify action if needed. */
-                               dev_flow.dv.group = 1;
-                               if (flow_dv_modify_hdr_resource_register
-                                       (dev, mhdr_res, &dev_flow, &flow_err))
-                                       return -rte_mtr_error_set(error,
-                                       ENOTSUP,
-                                       RTE_MTR_ERROR_TYPE_METER_POLICY,
-                                       NULL, "cannot register policy "
-                                       "set tag action");
-                               act_cnt->modify_hdr =
-                               dev_flow.handle->dvh.modify_hdr;
-                               if (action_flags & MLX5_FLOW_ACTION_QUEUE) {
-                                       dev_flow.handle->rix_hrxq =
-                               mtr_policy->sub_policys[domain][0]->rix_hrxq[i];
-                                       flow_drv_rxq_flags_set(dev,
-                                               dev_flow.handle);
-                               }
                                action_flags |= MLX5_FLOW_ACTION_SET_TAG;
                                break;
-                       }
                        case RTE_FLOW_ACTION_TYPE_DROP:
                        {
                                struct mlx5_flow_mtr_mng *mtrmng =
@@ -14806,41 +14954,20 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
                        }
                        case RTE_FLOW_ACTION_TYPE_QUEUE:
                        {
-                               struct mlx5_hrxq *hrxq;
-                               uint32_t hrxq_idx;
-                               struct mlx5_flow_rss_desc rss_desc;
-                               struct mlx5_flow_meter_sub_policy *sub_policy =
-                               mtr_policy->sub_policys[domain][0];
-
                                if (i >= MLX5_MTR_RTE_COLORS)
                                        return -rte_mtr_error_set(error,
                                        ENOTSUP,
                                        RTE_MTR_ERROR_TYPE_METER_POLICY,
                                        NULL, "cannot create policy "
                                        "fate queue for this color");
-                               memset(&rss_desc, 0,
-                                       sizeof(struct mlx5_flow_rss_desc));
-                               rss_desc.queue_num = 1;
-                               rss_desc.const_q = act->conf;
-                               hrxq = flow_dv_hrxq_prepare(dev, &dev_flow,
-                                                   &rss_desc, &hrxq_idx);
-                               if (!hrxq)
-                                       return -rte_mtr_error_set(error,
-                                       ENOTSUP,
-                                       RTE_MTR_ERROR_TYPE_METER_POLICY,
-                                       NULL,
-                                       "cannot create policy fate queue");
-                               sub_policy->rix_hrxq[i] = hrxq_idx;
+                               act_cnt->queue =
+                               ((const struct rte_flow_action_queue *)
+                                       (act->conf))->index;
                                act_cnt->fate_action =
                                        MLX5_FLOW_FATE_QUEUE;
                                dev_flow.handle->fate_action =
                                        MLX5_FLOW_FATE_QUEUE;
-                               if (action_flags & MLX5_FLOW_ACTION_MARK ||
-                                   action_flags & MLX5_FLOW_ACTION_SET_TAG) {
-                                       dev_flow.handle->rix_hrxq = hrxq_idx;
-                                       flow_drv_rxq_flags_set(dev,
-                                               dev_flow.handle);
-                               }
+                               mtr_policy->is_queue = 1;
                                action_flags |= MLX5_FLOW_ACTION_QUEUE;
                                break;
                        }
@@ -14985,11 +15112,81 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
                                action_flags |= MLX5_FLOW_ACTION_JUMP;
                                break;
                        }
+                       case RTE_FLOW_ACTION_TYPE_METER:
+                       {
+                               const struct rte_flow_action_meter *mtr;
+                               struct mlx5_flow_meter_info *next_fm;
+                               struct mlx5_flow_meter_policy *next_policy;
+                               struct rte_flow_action tag_action;
+                               struct mlx5_rte_flow_action_set_tag set_tag;
+                               uint32_t next_mtr_idx = 0;
+
+                               mtr = act->conf;
+                               next_fm = mlx5_flow_meter_find(priv,
+                                                       mtr->mtr_id,
+                                                       &next_mtr_idx);
+                               if (!next_fm)
+                                       return -rte_mtr_error_set(error, EINVAL,
+                                               RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+                                               "Fail to find next meter.");
+                               if (next_fm->def_policy)
+                                       return -rte_mtr_error_set(error, EINVAL,
+                                               RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+                               "Hierarchy only supports termination meter.");
+                               next_policy = mlx5_flow_meter_policy_find(dev,
+                                               next_fm->policy_id, NULL);
+                               MLX5_ASSERT(next_policy);
+                               if (next_fm->drop_cnt) {
+                                       set_tag.id =
+                                               (enum modify_reg)
+                                               mlx5_flow_get_reg_id(dev,
+                                               MLX5_MTR_ID,
+                                               0,
+                                               (struct rte_flow_error *)error);
+                                       set_tag.offset = (priv->mtr_reg_share ?
+                                               MLX5_MTR_COLOR_BITS : 0);
+                                       set_tag.length = (priv->mtr_reg_share ?
+                                              MLX5_MTR_IDLE_BITS_IN_COLOR_REG :
+                                              MLX5_REG_BITS);
+                                       set_tag.data = next_mtr_idx;
+                                       tag_action.type =
+                                               (enum rte_flow_action_type)
+                                               MLX5_RTE_FLOW_ACTION_TYPE_TAG;
+                                       tag_action.conf = &set_tag;
+                                       if (flow_dv_convert_action_set_reg
+                                               (mhdr_res, &tag_action,
+                                               (struct rte_flow_error *)error))
+                                               return -rte_errno;
+                                       action_flags |=
+                                               MLX5_FLOW_ACTION_SET_TAG;
+                               }
+                               act_cnt->fate_action = MLX5_FLOW_FATE_MTR;
+                               act_cnt->next_mtr_id = next_fm->meter_id;
+                               act_cnt->next_sub_policy = NULL;
+                               mtr_policy->is_hierarchy = 1;
+                               mtr_policy->dev = next_policy->dev;
+                               action_flags |=
+                               MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
+                               break;
+                       }
                        default:
                                return -rte_mtr_error_set(error, ENOTSUP,
                                          RTE_MTR_ERROR_TYPE_METER_POLICY,
                                          NULL, "action type not supported");
                        }
+                       if (action_flags & MLX5_FLOW_ACTION_SET_TAG) {
+                               /* create modify action if needed. */
+                               dev_flow.dv.group = 1;
+                               if (flow_dv_modify_hdr_resource_register
+                                       (dev, mhdr_res, &dev_flow, &flow_err))
+                                       return -rte_mtr_error_set(error,
+                                               ENOTSUP,
+                                               RTE_MTR_ERROR_TYPE_METER_POLICY,
+                                               NULL, "cannot register policy "
+                                               "set tag action");
+                               act_cnt->modify_hdr =
+                                       dev_flow.handle->dvh.modify_hdr;
+                       }
                }
        }
        return 0;
@@ -15342,23 +15539,22 @@ __flow_dv_create_policy_flow(struct rte_eth_dev *dev,
                        uint32_t color_reg_c_idx,
                        enum rte_color color, void *matcher_object,
                        int actions_n, void *actions,
-                       bool is_default_policy, void **rule,
-                       const struct rte_flow_attr *attr)
+                       bool match_src_port, const struct rte_flow_item *item,
+                       void **rule, const struct rte_flow_attr *attr)
 {
        int ret;
        struct mlx5_flow_dv_match_params value = {
-               .size = sizeof(value.buf) -
-                       MLX5_ST_SZ_BYTES(fte_match_set_misc4),
+               .size = sizeof(value.buf),
        };
        struct mlx5_flow_dv_match_params matcher = {
-               .size = sizeof(matcher.buf) -
-                       MLX5_ST_SZ_BYTES(fte_match_set_misc4),
+               .size = sizeof(matcher.buf),
        };
        struct mlx5_priv *priv = dev->data->dev_private;
+       uint8_t misc_mask;
 
-       if (!is_default_policy && (priv->representor || priv->master)) {
+       if (match_src_port && (priv->representor || priv->master)) {
                if (flow_dv_translate_item_port_id(dev, matcher.buf,
-                                                  value.buf, NULL, attr)) {
+                                                  value.buf, item, attr)) {
                        DRV_LOG(ERR,
                        "Failed to create meter policy flow with port.");
                        return -1;
@@ -15368,6 +15564,8 @@ __flow_dv_create_policy_flow(struct rte_eth_dev *dev,
                                (enum modify_reg)color_reg_c_idx,
                                rte_col_2_mlx5_col(color),
                                UINT32_MAX);
+       misc_mask = flow_dv_matcher_enable(value.buf);
+       __flow_dv_adjust_buf_size(&value.size, misc_mask);
        ret = mlx5_flow_os_create_flow(matcher_object,
                        (void *)&value, actions_n, actions, rule);
        if (ret) {
@@ -15383,21 +15581,21 @@ __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
                        uint16_t priority,
                        struct mlx5_flow_meter_sub_policy *sub_policy,
                        const struct rte_flow_attr *attr,
-                       bool is_default_policy,
+                       bool match_src_port,
+                       const struct rte_flow_item *item,
+                       struct mlx5_flow_dv_matcher **policy_matcher,
                        struct rte_flow_error *error)
 {
        struct mlx5_cache_entry *entry;
        struct mlx5_flow_tbl_resource *tbl_rsc = sub_policy->tbl_rsc;
        struct mlx5_flow_dv_matcher matcher = {
                .mask = {
-                       .size = sizeof(matcher.mask.buf) -
-                               MLX5_ST_SZ_BYTES(fte_match_set_misc4),
+                       .size = sizeof(matcher.mask.buf),
                },
                .tbl = tbl_rsc,
        };
        struct mlx5_flow_dv_match_params value = {
-               .size = sizeof(value.buf) -
-                       MLX5_ST_SZ_BYTES(fte_match_set_misc4),
+               .size = sizeof(value.buf),
        };
        struct mlx5_flow_cb_ctx ctx = {
                .error = error,
@@ -15407,9 +15605,9 @@ __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
        struct mlx5_priv *priv = dev->data->dev_private;
        uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1;
 
-       if (!is_default_policy && (priv->representor || priv->master)) {
+       if (match_src_port && (priv->representor || priv->master)) {
                if (flow_dv_translate_item_port_id(dev, matcher.mask.buf,
-                                                  value.buf, NULL, attr)) {
+                                                  value.buf, item, attr)) {
                        DRV_LOG(ERR,
                        "Failed to register meter drop matcher with port.");
                        return -1;
@@ -15427,7 +15625,7 @@ __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
                DRV_LOG(ERR, "Failed to register meter drop matcher.");
                return -1;
        }
-       sub_policy->color_matcher[priority] =
+       *policy_matcher =
                container_of(entry, struct mlx5_flow_dv_matcher, entry);
        return 0;
 }
@@ -15452,9 +15650,10 @@ __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
 static int
 __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
                struct mlx5_flow_meter_sub_policy *sub_policy,
-               uint8_t egress, uint8_t transfer, bool is_default_policy,
+               uint8_t egress, uint8_t transfer, bool match_src_port,
                struct mlx5_meter_policy_acts acts[RTE_COLORS])
 {
+       struct mlx5_priv *priv = dev->data->dev_private;
        struct rte_flow_error flow_err;
        uint32_t color_reg_c_idx;
        struct rte_flow_attr attr = {
@@ -15467,6 +15666,7 @@ __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
        };
        int i;
        int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
+       struct mlx5_sub_policy_color_rule *color_rule;
 
        if (ret < 0)
                return -1;
@@ -15484,29 +15684,56 @@ __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
        /* Prepare matchers. */
        color_reg_c_idx = ret;
        for (i = 0; i < RTE_COLORS; i++) {
+               TAILQ_INIT(&sub_policy->color_rules[i]);
                if (i == RTE_COLOR_YELLOW || !acts[i].actions_n)
                        continue;
+               color_rule = mlx5_malloc(MLX5_MEM_ZERO,
+                               sizeof(struct mlx5_sub_policy_color_rule),
+                               0, SOCKET_ID_ANY);
+               if (!color_rule) {
+                       DRV_LOG(ERR, "No memory to create color rule.");
+                       goto err_exit;
+               }
+               color_rule->src_port = priv->representor_id;
                attr.priority = i;
-               if (!sub_policy->color_matcher[i]) {
-                       /* Create matchers for Color. */
-                       if (__flow_dv_create_policy_matcher(dev,
-                               color_reg_c_idx, i, sub_policy,
-                               &attr, is_default_policy, &flow_err))
-                               return -1;
+               /* Create matchers for Color. */
+               if (__flow_dv_create_policy_matcher(dev,
+                               color_reg_c_idx, i, sub_policy, &attr,
+                               (i != RTE_COLOR_RED ? match_src_port : false),
+                               NULL, &color_rule->matcher, &flow_err)) {
+                       DRV_LOG(ERR, "Failed to create color matcher.");
+                       goto err_exit;
                }
                /* Create flow, matching color. */
-               if (acts[i].actions_n)
-                       if (__flow_dv_create_policy_flow(dev,
+               if (__flow_dv_create_policy_flow(dev,
                                color_reg_c_idx, (enum rte_color)i,
-                               sub_policy->color_matcher[i]->matcher_object,
+                               color_rule->matcher->matcher_object,
                                acts[i].actions_n,
                                acts[i].dv_actions,
-                               is_default_policy,
-                               &sub_policy->color_rule[i],
-                               &attr))
-                               return -1;
+                               (i != RTE_COLOR_RED ? match_src_port : false),
+                               NULL, &color_rule->rule,
+                               &attr)) {
+                       DRV_LOG(ERR, "Failed to create color rule.");
+                       goto err_exit;
+               }
+               TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
+                                 color_rule, next_port);
        }
        return 0;
+err_exit:
+       if (color_rule) {
+               if (color_rule->rule)
+                       mlx5_flow_os_destroy_flow(color_rule->rule);
+               if (color_rule->matcher) {
+                       struct mlx5_flow_tbl_data_entry *tbl =
+                               container_of(color_rule->matcher->tbl,
+                                               typeof(*tbl), tbl);
+                       mlx5_cache_unregister(&tbl->matchers,
+                                               &color_rule->matcher->entry);
+               }
+               mlx5_free(color_rule);
+       }
+       return -1;
 }
 
 static int
@@ -15520,7 +15747,15 @@ __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
        struct mlx5_flow_dv_tag_resource *tag;
        struct mlx5_flow_dv_port_id_action_resource *port_action;
        struct mlx5_hrxq *hrxq;
-       uint8_t egress, transfer;
+       struct mlx5_flow_meter_info *next_fm = NULL;
+       struct mlx5_flow_meter_policy *next_policy;
+       struct mlx5_flow_meter_sub_policy *next_sub_policy;
+       struct mlx5_flow_tbl_data_entry *tbl_data;
+       struct rte_flow_error error;
+       uint8_t egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
+       uint8_t transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
+       bool mtr_first = egress || (transfer && priv->representor_id != UINT16_MAX);
+       bool match_src_port = false;
        int i;
 
        for (i = 0; i < RTE_COLORS; i++) {
@@ -15534,13 +15769,39 @@ __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
                        acts[i].actions_n = 1;
                        continue;
                }
+               if (mtr_policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR) {
+                       struct rte_flow_attr attr = {
+                               .transfer = transfer
+                       };
+
+                       next_fm = mlx5_flow_meter_find(priv,
+                                       mtr_policy->act_cnt[i].next_mtr_id,
+                                       NULL);
+                       if (!next_fm) {
+                               DRV_LOG(ERR,
+                                       "Failed to get next hierarchy meter.");
+                               goto err_exit;
+                       }
+                       if (mlx5_flow_meter_attach(priv, next_fm,
+                                                  &attr, &error)) {
+                               DRV_LOG(ERR, "%s", error.message);
+                               next_fm = NULL;
+                               goto err_exit;
+                       }
+                       /* Meter action must be the first for TX. */
+                       if (mtr_first) {
+                               acts[i].dv_actions[acts[i].actions_n] =
+                                       next_fm->meter_action;
+                               acts[i].actions_n++;
+                       }
+               }
                if (mtr_policy->act_cnt[i].rix_mark) {
                        tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG],
                                        mtr_policy->act_cnt[i].rix_mark);
                        if (!tag) {
                                DRV_LOG(ERR, "Failed to find "
                                "mark action for policy.");
-                               return -1;
+                               goto err_exit;
                        }
                        acts[i].dv_actions[acts[i].actions_n] =
                                                tag->action;
@@ -15560,11 +15821,13 @@ __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
                                if (!port_action) {
                                        DRV_LOG(ERR, "Failed to find "
                                                "port action for policy.");
-                                       return -1;
+                                       goto err_exit;
                                }
                                acts[i].dv_actions[acts[i].actions_n] =
                                port_action->action;
                                acts[i].actions_n++;
+                               mtr_policy->dev = dev;
+                               match_src_port = true;
                                break;
                        case MLX5_FLOW_FATE_DROP:
                        case MLX5_FLOW_FATE_JUMP:
@@ -15580,27 +15843,59 @@ __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
                                if (!hrxq) {
                                        DRV_LOG(ERR, "Failed to find "
                                                "queue action for policy.");
-                                       return -1;
+                                       goto err_exit;
                                }
                                acts[i].dv_actions[acts[i].actions_n] =
                                hrxq->action;
                                acts[i].actions_n++;
                                break;
+                       case MLX5_FLOW_FATE_MTR:
+                               if (!next_fm) {
+                                       DRV_LOG(ERR,
+                                               "No next hierarchy meter.");
+                                       goto err_exit;
+                               }
+                               if (!mtr_first) {
+                                       acts[i].dv_actions[acts[i].actions_n] =
+                                                       next_fm->meter_action;
+                                       acts[i].actions_n++;
+                               }
+                               if (mtr_policy->act_cnt[i].next_sub_policy) {
+                                       next_sub_policy =
+                                       mtr_policy->act_cnt[i].next_sub_policy;
+                               } else {
+                                       next_policy =
+                                               mlx5_flow_meter_policy_find(dev,
+                                               next_fm->policy_id, NULL);
+                                       MLX5_ASSERT(next_policy);
+                                       next_sub_policy =
+                                       next_policy->sub_policys[domain][0];
+                               }
+                               tbl_data =
+                                       container_of(next_sub_policy->tbl_rsc,
+                                       struct mlx5_flow_tbl_data_entry, tbl);
+                               acts[i].dv_actions[acts[i].actions_n++] =
+                                                       tbl_data->jump.action;
+                               if (mtr_policy->act_cnt[i].modify_hdr)
+                                       match_src_port = !!transfer;
+                               break;
                        default:
                                /*Queue action do nothing*/
                                break;
                        }
                }
        }
-       egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
-       transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
        if (__flow_dv_create_domain_policy_rules(dev, sub_policy,
-                               egress, transfer, false, acts)) {
+                               egress, transfer, match_src_port, acts)) {
                DRV_LOG(ERR,
                "Failed to create policy rules per domain.");
-               return -1;
+               goto err_exit;
        }
        return 0;
+err_exit:
+       if (next_fm)
+               mlx5_flow_meter_detach(priv, next_fm);
+       return -1;
 }
 
 /**
@@ -15703,7 +15998,7 @@ __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain)
                /* Create default policy rules. */
                ret = __flow_dv_create_domain_policy_rules(dev,
                                        &def_policy->sub_policy,
-                                       egress, transfer, true, acts);
+                                       egress, transfer, false, acts);
                if (ret) {
                        DRV_LOG(ERR, "Failed to create "
                                "default policy rules.");
@@ -15774,12 +16069,10 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
        int domain, ret, i;
        struct mlx5_flow_counter *cnt;
        struct mlx5_flow_dv_match_params value = {
-               .size = sizeof(value.buf) -
-               MLX5_ST_SZ_BYTES(fte_match_set_misc4),
+               .size = sizeof(value.buf),
        };
        struct mlx5_flow_dv_match_params matcher_para = {
-               .size = sizeof(matcher_para.buf) -
-               MLX5_ST_SZ_BYTES(fte_match_set_misc4),
+               .size = sizeof(matcher_para.buf),
        };
        int mtr_id_reg_c = mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
                                                     0, &error);
@@ -15788,8 +16081,7 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
        struct mlx5_cache_entry *entry;
        struct mlx5_flow_dv_matcher matcher = {
                .mask = {
-                       .size = sizeof(matcher.mask.buf) -
-                       MLX5_ST_SZ_BYTES(fte_match_set_misc4),
+                       .size = sizeof(matcher.mask.buf),
                },
        };
        struct mlx5_flow_dv_matcher *drop_matcher;
@@ -15797,6 +16089,7 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
                .error = &error,
                .data = &matcher,
        };
+       uint8_t misc_mask;
 
        if (!priv->mtr_en || mtr_id_reg_c < 0) {
                rte_errno = ENOTSUP;
@@ -15846,6 +16139,8 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
                        actions[i++] = priv->sh->dr_drop_action;
                        flow_dv_match_meta_reg(matcher_para.buf, value.buf,
                                (enum modify_reg)mtr_id_reg_c, 0, 0);
+                       misc_mask = flow_dv_matcher_enable(value.buf);
+                       __flow_dv_adjust_buf_size(&value.size, misc_mask);
                        ret = mlx5_flow_os_create_flow
                                (mtrmng->def_matcher[domain]->matcher_object,
                                (void *)&value, i, actions,
@@ -15889,6 +16184,8 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
                                        fm->drop_cnt, NULL);
                actions[i++] = cnt->action;
                actions[i++] = priv->sh->dr_drop_action;
+               misc_mask = flow_dv_matcher_enable(value.buf);
+               __flow_dv_adjust_buf_size(&value.size, misc_mask);
                ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object,
                                               (void *)&value, i, actions,
                                               &fm->drop_rule[domain]);
@@ -15910,22 +16207,12 @@ policy_error:
        return -1;
 }
 
-/**
- * Find the policy table for prefix table with RSS.
- *
- * @param[in] dev
- *   Pointer to Ethernet device.
- * @param[in] mtr_policy
- *   Pointer to meter policy table.
- * @param[in] rss_desc
- *   Pointer to rss_desc
- * @return
- *   Pointer to table set on success, NULL otherwise and rte_errno is set.
- */
 static struct mlx5_flow_meter_sub_policy *
-flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
+__flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,
                struct mlx5_flow_meter_policy *mtr_policy,
-               struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
+               struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS],
+               struct mlx5_flow_meter_sub_policy *next_sub_policy,
+               bool *is_reuse)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
@@ -15967,6 +16254,7 @@ flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
                        rte_spinlock_unlock(&mtr_policy->sl);
                        for (j = 0; j < MLX5_MTR_RTE_COLORS; j++)
                                mlx5_hrxq_release(dev, hrxq_idx[j]);
+                       *is_reuse = true;
                        return mtr_policy->sub_policys[domain][i];
                }
        }
@@ -15992,24 +16280,30 @@ flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
                if (!rss_desc[i])
                        continue;
                sub_policy->rix_hrxq[i] = hrxq_idx[i];
-               /*
-                * Overwrite the last action from
-                * RSS action to Queue action.
-                */
-               hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
-                             hrxq_idx[i]);
-               if (!hrxq) {
-                       DRV_LOG(ERR, "Failed to create policy hrxq");
-                       goto rss_sub_policy_error;
-               }
-               act_cnt = &mtr_policy->act_cnt[i];
-               if (act_cnt->rix_mark || act_cnt->modify_hdr) {
-                       memset(&dh, 0, sizeof(struct mlx5_flow_handle));
-                       if (act_cnt->rix_mark)
-                               dh.mark = 1;
-                       dh.fate_action = MLX5_FLOW_FATE_QUEUE;
-                       dh.rix_hrxq = hrxq_idx[i];
-                       flow_drv_rxq_flags_set(dev, &dh);
+               if (mtr_policy->is_hierarchy) {
+                       act_cnt = &mtr_policy->act_cnt[i];
+                       act_cnt->next_sub_policy = next_sub_policy;
+                       mlx5_hrxq_release(dev, hrxq_idx[i]);
+               } else {
+                       /*
+                        * Overwrite the last action from
+                        * RSS action to Queue action.
+                        */
+                       hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+                               hrxq_idx[i]);
+                       if (!hrxq) {
+                               DRV_LOG(ERR, "Failed to create policy hrxq");
+                               goto rss_sub_policy_error;
+                       }
+                       act_cnt = &mtr_policy->act_cnt[i];
+                       if (act_cnt->rix_mark || act_cnt->modify_hdr) {
+                               memset(&dh, 0, sizeof(struct mlx5_flow_handle));
+                               if (act_cnt->rix_mark)
+                                       dh.mark = 1;
+                               dh.fate_action = MLX5_FLOW_FATE_QUEUE;
+                               dh.rix_hrxq = hrxq_idx[i];
+                               flow_drv_rxq_flags_set(dev, &dh);
+                       }
                }
        }
        if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
@@ -16033,6 +16327,7 @@ flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
                        (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
        }
        rte_spinlock_unlock(&mtr_policy->sl);
+       *is_reuse = false;
        return sub_policy;
 rss_sub_policy_error:
        if (sub_policy) {
@@ -16047,13 +16342,322 @@ rss_sub_policy_error:
                                        sub_policy->idx);
                }
        }
-       if (sub_policy_idx)
-               mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
-                       sub_policy_idx);
        rte_spinlock_unlock(&mtr_policy->sl);
        return NULL;
 }
 
+/**
+ * Find the policy table for prefix table with RSS.
+ *
+ * @param[in] dev
+ *   Pointer to Ethernet device.
+ * @param[in] mtr_policy
+ *   Pointer to meter policy table.
+ * @param[in] rss_desc
+ *   Pointer to rss_desc
+ * @return
+ *   Pointer to table set on success, NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_flow_meter_sub_policy *
+flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
+               struct mlx5_flow_meter_policy *mtr_policy,
+               struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
+       struct mlx5_flow_meter_info *next_fm;
+       struct mlx5_flow_meter_policy *next_policy;
+       struct mlx5_flow_meter_sub_policy *next_sub_policy = NULL;
+       struct mlx5_flow_meter_policy *policies[MLX5_MTR_CHAIN_MAX_NUM];
+       struct mlx5_flow_meter_sub_policy *sub_policies[MLX5_MTR_CHAIN_MAX_NUM];
+       uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
+       bool reuse_sub_policy;
+       uint32_t i = 0;
+       uint32_t j = 0;
+
+       while (true) {
+               /* Iterate hierarchy to get all policies in this hierarchy. */
+               policies[i++] = mtr_policy;
+               if (!mtr_policy->is_hierarchy)
+                       break;
+               if (i >= MLX5_MTR_CHAIN_MAX_NUM) {
+                       DRV_LOG(ERR, "Exceed max meter number in hierarchy.");
+                       return NULL;
+               }
+               next_fm = mlx5_flow_meter_find(priv,
+                       mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
+               if (!next_fm) {
+                       DRV_LOG(ERR, "Failed to get next meter in hierarchy.");
+                       return NULL;
+               }
+               next_policy =
+                       mlx5_flow_meter_policy_find(dev, next_fm->policy_id,
+                                                   NULL);
+               MLX5_ASSERT(next_policy);
+               mtr_policy = next_policy;
+       }
+       while (i) {
+               /**
+                * From last policy to the first one in hierarchy,
+                * create/get the sub policy for each of them.
+                */
+               sub_policy = __flow_dv_meter_get_rss_sub_policy(dev,
+                                                       policies[--i],
+                                                       rss_desc,
+                                                       next_sub_policy,
+                                                       &reuse_sub_policy);
+               if (!sub_policy) {
+                       DRV_LOG(ERR, "Failed to get the sub policy.");
+                       goto err_exit;
+               }
+               if (!reuse_sub_policy)
+                       sub_policies[j++] = sub_policy;
+               next_sub_policy = sub_policy;
+       }
+       return sub_policy;
+err_exit:
+       while (j) {
+               uint16_t sub_policy_num;
+
+               sub_policy = sub_policies[--j];
+               mtr_policy = sub_policy->main_policy;
+               __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
+               if (sub_policy != mtr_policy->sub_policys[domain][0]) {
+                       sub_policy_num = (mtr_policy->sub_policy_num >>
+                               (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
+                               MLX5_MTR_SUB_POLICY_NUM_MASK;
+                       mtr_policy->sub_policys[domain][sub_policy_num - 1] =
+                                                                       NULL;
+                       sub_policy_num--;
+                       mtr_policy->sub_policy_num &=
+                               ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
+                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i));
+                       mtr_policy->sub_policy_num |=
+                       (sub_policy_num & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
+                       (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i);
+                       mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
+                                       sub_policy->idx);
+               }
+       }
+       return NULL;
+}
+
+/**
+ * Create the sub policy tag rule for all meters in hierarchy.
+ *
+ * @param[in] dev
+ *   Pointer to Ethernet device.
+ * @param[in] fm
+ *   Meter information table.
+ * @param[in] src_port
+ *   The src port this extra rule should use.
+ * @param[in] item
+ *   The src port match item.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL.
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,
+                               struct mlx5_flow_meter_info *fm,
+                               int32_t src_port,
+                               const struct rte_flow_item *item,
+                               struct rte_flow_error *error)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_flow_meter_policy *mtr_policy;
+       struct mlx5_flow_meter_sub_policy *sub_policy;
+       struct mlx5_flow_meter_info *next_fm = NULL;
+       struct mlx5_flow_meter_policy *next_policy;
+       struct mlx5_flow_meter_sub_policy *next_sub_policy;
+       struct mlx5_flow_tbl_data_entry *tbl_data;
+       struct mlx5_sub_policy_color_rule *color_rule;
+       struct mlx5_meter_policy_acts acts;
+       uint32_t color_reg_c_idx;
+       bool mtr_first = (src_port != UINT16_MAX) ? true : false;
+       struct rte_flow_attr attr = {
+               .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
+               .priority = 0,
+               .ingress = 0,
+               .egress = 0,
+               .transfer = 1,
+               .reserved = 0,
+       };
+       uint32_t domain = MLX5_MTR_DOMAIN_TRANSFER;
+       int i;
+
+       mtr_policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
+       MLX5_ASSERT(mtr_policy);
+       if (!mtr_policy->is_hierarchy)
+               return 0;
+       next_fm = mlx5_flow_meter_find(priv,
+                       mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
+       if (!next_fm) {
+               return rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+                               "Failed to find next meter in hierarchy.");
+       }
+       if (!next_fm->drop_cnt)
+               goto exit;
+       color_reg_c_idx = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, error);
+       sub_policy = mtr_policy->sub_policys[domain][0];
+       for (i = 0; i < RTE_COLORS; i++) {
+               bool rule_exist = false;
+               struct mlx5_meter_policy_action_container *act_cnt;
+
+               if (i >= RTE_COLOR_YELLOW)
+                       break;
+               TAILQ_FOREACH(color_rule,
+                             &sub_policy->color_rules[i], next_port)
+                       if (color_rule->src_port == src_port) {
+                               rule_exist = true;
+                               break;
+                       }
+               if (rule_exist)
+                       continue;
+               color_rule = mlx5_malloc(MLX5_MEM_ZERO,
+                               sizeof(struct mlx5_sub_policy_color_rule),
+                               0, SOCKET_ID_ANY);
+               if (!color_rule)
+                       return rte_flow_error_set(error, ENOMEM,
+                               RTE_FLOW_ERROR_TYPE_ACTION,
+                               NULL, "No memory to create tag color rule.");
+               color_rule->src_port = src_port;
+               attr.priority = i;
+               next_policy = mlx5_flow_meter_policy_find(dev,
+                                               next_fm->policy_id, NULL);
+               MLX5_ASSERT(next_policy);
+               next_sub_policy = next_policy->sub_policys[domain][0];
+               tbl_data = container_of(next_sub_policy->tbl_rsc,
+                                       struct mlx5_flow_tbl_data_entry, tbl);
+               act_cnt = &mtr_policy->act_cnt[i];
+               if (mtr_first) {
+                       acts.dv_actions[0] = next_fm->meter_action;
+                       acts.dv_actions[1] = act_cnt->modify_hdr->action;
+               } else {
+                       acts.dv_actions[0] = act_cnt->modify_hdr->action;
+                       acts.dv_actions[1] = next_fm->meter_action;
+               }
+               acts.dv_actions[2] = tbl_data->jump.action;
+               acts.actions_n = 3;
+               if (mlx5_flow_meter_attach(priv, next_fm, &attr, error)) {
+                       next_fm = NULL;
+                       goto err_exit;
+               }
+               if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
+                                       i, sub_policy, &attr, true, item,
+                                       &color_rule->matcher, error)) {
+                       rte_flow_error_set(error, errno,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                               "Failed to create hierarchy meter matcher.");
+                       goto err_exit;
+               }
+               if (__flow_dv_create_policy_flow(dev, color_reg_c_idx,
+                                       (enum rte_color)i,
+                                       color_rule->matcher->matcher_object,
+                                       acts.actions_n, acts.dv_actions,
+                                       true, item,
+                                       &color_rule->rule, &attr)) {
+                       rte_flow_error_set(error, errno,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                               "Failed to create hierarchy meter rule.");
+                       goto err_exit;
+               }
+               TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
+                                 color_rule, next_port);
+       }
+exit:
+       /**
+        * Recursive call to iterate all meters in hierarchy and
+        * create needed rules.
+        */
+       return flow_dv_meter_hierarchy_rule_create(dev, next_fm,
+                                               src_port, item, error);
+err_exit:
+       if (color_rule) {
+               if (color_rule->rule)
+                       mlx5_flow_os_destroy_flow(color_rule->rule);
+               if (color_rule->matcher) {
+                       struct mlx5_flow_tbl_data_entry *tbl =
+                               container_of(color_rule->matcher->tbl,
+                                               typeof(*tbl), tbl);
+                       mlx5_cache_unregister(&tbl->matchers,
+                                               &color_rule->matcher->entry);
+               }
+               mlx5_free(color_rule);
+       }
+       if (next_fm)
+               mlx5_flow_meter_detach(priv, next_fm);
+       return -rte_errno;
+}
+
+/**
+ * Destroy the sub policy table with RX queue.
+ *
+ * @param[in] dev
+ *   Pointer to Ethernet device.
+ * @param[in] mtr_policy
+ *   Pointer to meter policy table.
+ */
+static void
+flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
+               struct mlx5_flow_meter_policy *mtr_policy)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
+       uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
+       uint32_t i, j;
+       uint16_t sub_policy_num, new_policy_num;
+
+       rte_spinlock_lock(&mtr_policy->sl);
+       for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
+               switch (mtr_policy->act_cnt[i].fate_action) {
+               case MLX5_FLOW_FATE_SHARED_RSS:
+                       sub_policy_num = (mtr_policy->sub_policy_num >>
+                       (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
+                       MLX5_MTR_SUB_POLICY_NUM_MASK;
+                       new_policy_num = sub_policy_num;
+                       for (j = 0; j < sub_policy_num; j++) {
+                               sub_policy =
+                                       mtr_policy->sub_policys[domain][j];
+                               if (sub_policy) {
+                                       __flow_dv_destroy_sub_policy_rules(dev,
+                                               sub_policy);
+                               if (sub_policy !=
+                                       mtr_policy->sub_policys[domain][0]) {
+                                       mtr_policy->sub_policys[domain][j] =
+                                                               NULL;
+                                       mlx5_ipool_free
+                               (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
+                                               sub_policy->idx);
+                                               new_policy_num--;
+                                       }
+                               }
+                       }
+                       if (new_policy_num != sub_policy_num) {
+                               mtr_policy->sub_policy_num &=
+                               ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
+                               (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
+                               mtr_policy->sub_policy_num |=
+                               (new_policy_num &
+                                       MLX5_MTR_SUB_POLICY_NUM_MASK) <<
+                               (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
+                       }
+                       break;
+               case MLX5_FLOW_FATE_QUEUE:
+                       sub_policy = mtr_policy->sub_policys[domain][0];
+                       __flow_dv_destroy_sub_policy_rules(dev,
+                                               sub_policy);
+                       break;
+               default:
+                       /*Other actions without queue and do nothing*/
+                       break;
+               }
+       }
+       rte_spinlock_unlock(&mtr_policy->sl);
+}
+
 /**
  * Validate the batch counter support in root table.
  *
@@ -16078,7 +16682,7 @@ mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
                .size = sizeof(value.buf),
        };
        struct mlx5dv_flow_matcher_attr dv_attr = {
-               .type = IBV_FLOW_ATTR_NORMAL,
+               .type = IBV_FLOW_ATTR_NORMAL | IBV_FLOW_ATTR_FLAGS_EGRESS,
                .priority = 0,
                .match_criteria_enable = 0,
                .match_mask = (void *)&mask,
@@ -16090,7 +16694,7 @@ mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
        void *flow = NULL;
        int ret = -1;
 
-       tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL,
+       tbl = flow_dv_tbl_resource_get(dev, 0, 1, 0, false, NULL,
                                        0, 0, 0, NULL);
        if (!tbl)
                goto err;
@@ -16101,14 +16705,14 @@ mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
                                                    &actions[0]);
        if (ret)
                goto err;
-       actions[1] = sh->dr_drop_action ? sh->dr_drop_action :
-                                         priv->drop_queue.hrxq->action;
        dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
+       __flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
        ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
                                               &matcher);
        if (ret)
                goto err;
-       ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 2,
+       __flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
+       ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
                                       actions, &flow);
 err:
        /*
@@ -16333,6 +16937,78 @@ flow_dv_action_validate(struct rte_eth_dev *dev,
        }
 }
 
+/**
+ * Validate the meter hierarchy chain for meter policy.
+ *
+ * @param[in] dev
+ *   Pointer to the Ethernet device structure.
+ * @param[in] meter_id
+ *   Meter id.
+ * @param[in] action_flags
+ *   Holds the actions detected until now.
+ * @param[out] is_rss
+ *   Is RSS or not.
+ * @param[out] hierarchy_domain
+ *   The domain bitmap for hierarchy policy.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL. Initialized in case of
+ *   error only.
+ *
+ * @return
+ *   0 on success, otherwise negative errno value with error set.
+ */
+static int
+flow_dv_validate_policy_mtr_hierarchy(struct rte_eth_dev *dev,
+                                 uint32_t meter_id,
+                                 uint64_t action_flags,
+                                 bool *is_rss,
+                                 uint8_t *hierarchy_domain,
+                                 struct rte_mtr_error *error)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_flow_meter_info *fm;
+       struct mlx5_flow_meter_policy *policy;
+       uint8_t cnt = 1;
+
+       if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
+                           MLX5_FLOW_FATE_ESWITCH_ACTIONS))
+               return -rte_mtr_error_set(error, EINVAL,
+                                       RTE_MTR_ERROR_TYPE_POLICER_ACTION_GREEN,
+                                       NULL,
+                                       "Multiple fate actions not supported.");
+       while (true) {
+               fm = mlx5_flow_meter_find(priv, meter_id, NULL);
+               if (!fm)
+                       return -rte_mtr_error_set(error, EINVAL,
+                                               RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+                                       "Meter not found in meter hierarchy.");
+               if (fm->def_policy)
+                       return -rte_mtr_error_set(error, EINVAL,
+                                       RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+                       "Non termination meter not supported in hierarchy.");
+               policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
+               MLX5_ASSERT(policy);
+               if (!policy->is_hierarchy) {
+                       if (policy->transfer)
+                               *hierarchy_domain |=
+                                               MLX5_MTR_DOMAIN_TRANSFER_BIT;
+                       if (policy->ingress)
+                               *hierarchy_domain |=
+                                               MLX5_MTR_DOMAIN_INGRESS_BIT;
+                       if (policy->egress)
+                               *hierarchy_domain |= MLX5_MTR_DOMAIN_EGRESS_BIT;
+                       *is_rss = policy->is_rss;
+                       break;
+               }
+               meter_id = policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id;
+               if (++cnt >= MLX5_MTR_CHAIN_MAX_NUM)
+                       return -rte_mtr_error_set(error, EINVAL,
+                                       RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+                                       "Exceed max hierarchy meter number.");
+       }
+       return 0;
+}
+
 /**
  * Validate meter policy actions.
  * Dispatcher for action type specific validation.
@@ -16368,6 +17044,8 @@ flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
        struct rte_flow_error flow_err;
        uint8_t domain_color[RTE_COLORS] = {0};
        uint8_t def_domain = MLX5_MTR_ALL_DOMAIN_BIT;
+       uint8_t hierarchy_domain = 0;
+       const struct rte_flow_action_meter *mtr;
 
        if (!priv->config.dv_esw_en)
                def_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
@@ -16545,6 +17223,27 @@ flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
                                ++actions_n;
                                action_flags |= MLX5_FLOW_ACTION_JUMP;
                                break;
+                       case RTE_FLOW_ACTION_TYPE_METER:
+                               if (i != RTE_COLOR_GREEN)
+                                       return -rte_mtr_error_set(error,
+                                               ENOTSUP,
+                                               RTE_MTR_ERROR_TYPE_METER_POLICY,
+                                               NULL, flow_err.message ?
+                                               flow_err.message :
+                                 "Meter hierarchy only supports GREEN color.");
+                               mtr = act->conf;
+                               ret = flow_dv_validate_policy_mtr_hierarchy(dev,
+                                                       mtr->mtr_id,
+                                                       action_flags,
+                                                       is_rss,
+                                                       &hierarchy_domain,
+                                                       error);
+                               if (ret)
+                                       return ret;
+                               ++actions_n;
+                               action_flags |=
+                               MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
+                               break;
                        default:
                                return -rte_mtr_error_set(error, ENOTSUP,
                                        RTE_MTR_ERROR_TYPE_METER_POLICY,
@@ -16565,6 +17264,9 @@ flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
                         * so MARK action only in ingress domain.
                         */
                        domain_color[i] = MLX5_MTR_DOMAIN_INGRESS_BIT;
+               else if (action_flags &
+                       MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
+                       domain_color[i] = hierarchy_domain;
                else
                        domain_color[i] = def_domain;
                /*
@@ -16665,6 +17367,8 @@ const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
        .create_def_policy = flow_dv_create_def_policy,
        .destroy_def_policy = flow_dv_destroy_def_policy,
        .meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,
+       .meter_hierarchy_rule_create = flow_dv_meter_hierarchy_rule_create,
+       .destroy_sub_policy_with_rxq = flow_dv_destroy_sub_policy_with_rxq,
        .counter_alloc = flow_dv_counter_allocate,
        .counter_free = flow_dv_counter_free,
        .counter_query = flow_dv_counter_query,