net/mlx5: fix packet length assert in MPRQ
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
index e28f01d..2fdd403 100644 (file)
@@ -434,6 +434,7 @@ flow_dv_convert_modify_action(struct rte_flow_item *item,
                /* Fetch variable byte size mask from the array. */
                mask = flow_dv_fetch_field((const uint8_t *)item->mask +
                                           field->offset, field->size);
+               MLX5_ASSERT(mask);
                if (!mask) {
                        ++field;
                        continue;
@@ -2490,14 +2491,14 @@ flow_dv_encap_decap_resource_register
                                (void *)cache_resource,
                                rte_atomic32_read(&cache_resource->refcnt));
                        rte_atomic32_inc(&cache_resource->refcnt);
-                       dev_flow->handle->dvh.encap_decap = idx;
+                       dev_flow->handle->dvh.rix_encap_decap = idx;
                        dev_flow->dv.encap_decap = cache_resource;
                        return 0;
                }
        }
        /* Register new encap/decap resource. */
        cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
-                                      &dev_flow->handle->dvh.encap_decap);
+                                      &dev_flow->handle->dvh.rix_encap_decap);
        if (!cache_resource)
                return rte_flow_error_set(error, ENOMEM,
                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -2518,7 +2519,8 @@ flow_dv_encap_decap_resource_register
        rte_atomic32_init(&cache_resource->refcnt);
        rte_atomic32_inc(&cache_resource->refcnt);
        ILIST_INSERT(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &sh->encaps_decaps,
-                    dev_flow->handle->dvh.encap_decap, cache_resource, next);
+                    dev_flow->handle->dvh.rix_encap_decap, cache_resource,
+                    next);
        dev_flow->dv.encap_decap = cache_resource;
        DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
                (void *)cache_resource,
@@ -2572,7 +2574,7 @@ flow_dv_jump_tbl_resource_register
                        (void *)&tbl_data->jump, cnt);
        }
        rte_atomic32_inc(&tbl_data->jump.refcnt);
-       dev_flow->handle->jump = tbl_data->idx;
+       dev_flow->handle->rix_jump = tbl_data->idx;
        dev_flow->dv.jump = &tbl_data->jump;
        return 0;
 }
@@ -2613,14 +2615,14 @@ flow_dv_port_id_action_resource_register
                                (void *)cache_resource,
                                rte_atomic32_read(&cache_resource->refcnt));
                        rte_atomic32_inc(&cache_resource->refcnt);
-                       dev_flow->handle->port_id_action = idx;
+                       dev_flow->handle->rix_port_id_action = idx;
                        dev_flow->dv.port_id_action = cache_resource;
                        return 0;
                }
        }
        /* Register new port id action resource. */
        cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID],
-                                      &dev_flow->handle->port_id_action);
+                                      &dev_flow->handle->rix_port_id_action);
        if (!cache_resource)
                return rte_flow_error_set(error, ENOMEM,
                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -2643,7 +2645,8 @@ flow_dv_port_id_action_resource_register
        rte_atomic32_init(&cache_resource->refcnt);
        rte_atomic32_inc(&cache_resource->refcnt);
        ILIST_INSERT(sh->ipool[MLX5_IPOOL_PORT_ID], &sh->port_id_action_list,
-                    dev_flow->handle->port_id_action, cache_resource, next);
+                    dev_flow->handle->rix_port_id_action, cache_resource,
+                    next);
        dev_flow->dv.port_id_action = cache_resource;
        DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
                (void *)cache_resource,
@@ -2689,14 +2692,14 @@ flow_dv_push_vlan_action_resource_register
                                (void *)cache_resource,
                                rte_atomic32_read(&cache_resource->refcnt));
                        rte_atomic32_inc(&cache_resource->refcnt);
-                       dev_flow->handle->dvh.push_vlan_res = idx;
+                       dev_flow->handle->dvh.rix_push_vlan = idx;
                        dev_flow->dv.push_vlan_res = cache_resource;
                        return 0;
                }
        }
        /* Register new push_vlan action resource. */
        cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
-                                      &dev_flow->handle->dvh.push_vlan_res);
+                                      &dev_flow->handle->dvh.rix_push_vlan);
        if (!cache_resource)
                return rte_flow_error_set(error, ENOMEM,
                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -2721,7 +2724,7 @@ flow_dv_push_vlan_action_resource_register
        rte_atomic32_inc(&cache_resource->refcnt);
        ILIST_INSERT(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
                     &sh->push_vlan_action_list,
-                    dev_flow->handle->dvh.push_vlan_res,
+                    dev_flow->handle->dvh.rix_push_vlan,
                     cache_resource, next);
        dev_flow->dv.push_vlan_res = cache_resource;
        DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
@@ -3705,9 +3708,9 @@ mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
                                          "Meter not found");
-       if (fm->ref_cnt && (!(fm->attr.transfer == attr->transfer ||
-             (!fm->attr.ingress && !attr->ingress && attr->egress) ||
-             (!fm->attr.egress && !attr->egress && attr->ingress))))
+       if (fm->ref_cnt && (!(fm->transfer == attr->transfer ||
+             (!fm->ingress && !attr->ingress && attr->egress) ||
+             (!fm->egress && !attr->egress && attr->ingress))))
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
                                          "Flow attributes are either invalid "
@@ -4488,7 +4491,9 @@ flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter)
  *   Pointer to error structure.
  *
  * @return
- *   0 on success, a negative errno value otherwise and rte_errno is set.
+ *   - 0 on success and non root table.
+ *   - 1 on success and root table.
+ *   - a negative errno value otherwise and rte_errno is set.
  */
 static int
 flow_dv_validate_attributes(struct rte_eth_dev *dev,
@@ -4498,6 +4503,7 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev,
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        uint32_t priority_max = priv->config.flow_prio - 1;
+       int ret = 0;
 
 #ifndef HAVE_MLX5DV_DR
        if (attributes->group)
@@ -4506,14 +4512,15 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev,
                                          NULL,
                                          "groups are not supported");
 #else
-       uint32_t table;
-       int ret;
+       uint32_t table = 0;
 
        ret = mlx5_flow_group_to_table(attributes, external,
                                       attributes->group, !!priv->fdb_def_rule,
                                       &table, error);
        if (ret)
                return ret;
+       if (!table)
+               ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
 #endif
        if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
            attributes->priority >= priority_max)
@@ -4543,7 +4550,7 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev,
                                          RTE_FLOW_ERROR_TYPE_ATTR, NULL,
                                          "must specify exactly one of "
                                          "ingress or egress");
-       return 0;
+       return ret;
 }
 
 /**
@@ -4559,6 +4566,8 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev,
  *   Pointer to the list of actions.
  * @param[in] external
  *   This flow rule is created by request external to PMD.
+ * @param[in] hairpin
+ *   Number of hairpin TX actions, 0 means classic flow.
  * @param[out] error
  *   Pointer to the error structure.
  *
@@ -4569,7 +4578,7 @@ static int
 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                 const struct rte_flow_item items[],
                 const struct rte_flow_action actions[],
-                bool external, struct rte_flow_error *error)
+                bool external, int hairpin, struct rte_flow_error *error)
 {
        int ret;
        uint64_t action_flags = 0;
@@ -4616,12 +4625,15 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
        struct mlx5_dev_config *dev_conf = &priv->config;
        uint16_t queue_index = 0xFFFF;
        const struct rte_flow_item_vlan *vlan_m = NULL;
+       int16_t rw_act_num = 0;
+       uint64_t is_root;
 
        if (items == NULL)
                return -1;
        ret = flow_dv_validate_attributes(dev, attr, external, error);
        if (ret < 0)
                return ret;
+       is_root = (uint64_t)ret;
        for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
                int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
                int type = items->type;
@@ -4898,6 +4910,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                                action_flags |= MLX5_FLOW_ACTION_FLAG;
                                ++actions_n;
                        }
+                       rw_act_num += MLX5_ACT_NUM_SET_MARK;
                        break;
                case RTE_FLOW_ACTION_TYPE_MARK:
                        ret = flow_dv_validate_action_mark(dev, actions,
@@ -4916,6 +4929,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                                action_flags |= MLX5_FLOW_ACTION_MARK;
                                ++actions_n;
                        }
+                       rw_act_num += MLX5_ACT_NUM_SET_MARK;
                        break;
                case RTE_FLOW_ACTION_TYPE_SET_META:
                        ret = flow_dv_validate_action_set_meta(dev, actions,
@@ -4927,6 +4941,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                        if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
                                ++actions_n;
                        action_flags |= MLX5_FLOW_ACTION_SET_META;
+                       rw_act_num += MLX5_ACT_NUM_SET_META;
                        break;
                case RTE_FLOW_ACTION_TYPE_SET_TAG:
                        ret = flow_dv_validate_action_set_tag(dev, actions,
@@ -4938,6 +4953,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                        if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
                                ++actions_n;
                        action_flags |= MLX5_FLOW_ACTION_SET_TAG;
+                       rw_act_num += MLX5_ACT_NUM_SET_TAG;
                        break;
                case RTE_FLOW_ACTION_TYPE_DROP:
                        ret = mlx5_flow_validate_action_drop(action_flags,
@@ -5015,6 +5031,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                                return ret;
                        /* Count VID with push_vlan command. */
                        action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
+                       rw_act_num += MLX5_ACT_NUM_MDF_VID;
                        break;
                case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
                case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
@@ -5076,8 +5093,15 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                                        RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
                                                MLX5_FLOW_ACTION_SET_MAC_SRC :
                                                MLX5_FLOW_ACTION_SET_MAC_DST;
+                       /*
+                        * Even if the source and destination MAC addresses have
+                        * overlap in the header with 4B alignment, the convert
+                        * function will handle them separately and 4 SW actions
+                        * will be created. And 2 actions will be added each
+                        * time no matter how many bytes of address will be set.
+                        */
+                       rw_act_num += MLX5_ACT_NUM_MDF_MAC;
                        break;
-
                case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
                case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
                        ret = flow_dv_validate_action_modify_ipv4(action_flags,
@@ -5093,6 +5117,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                                        RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
                                                MLX5_FLOW_ACTION_SET_IPV4_SRC :
                                                MLX5_FLOW_ACTION_SET_IPV4_DST;
+                       rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
                        break;
                case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
                case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
@@ -5115,6 +5140,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                                        RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
                                                MLX5_FLOW_ACTION_SET_IPV6_SRC :
                                                MLX5_FLOW_ACTION_SET_IPV6_DST;
+                       rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
                        break;
                case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
                case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
@@ -5131,6 +5157,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                                        RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
                                                MLX5_FLOW_ACTION_SET_TP_SRC :
                                                MLX5_FLOW_ACTION_SET_TP_DST;
+                       rw_act_num += MLX5_ACT_NUM_MDF_PORT;
                        break;
                case RTE_FLOW_ACTION_TYPE_DEC_TTL:
                case RTE_FLOW_ACTION_TYPE_SET_TTL:
@@ -5147,6 +5174,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                                        RTE_FLOW_ACTION_TYPE_SET_TTL ?
                                                MLX5_FLOW_ACTION_SET_TTL :
                                                MLX5_FLOW_ACTION_DEC_TTL;
+                       rw_act_num += MLX5_ACT_NUM_MDF_TTL;
                        break;
                case RTE_FLOW_ACTION_TYPE_JUMP:
                        ret = flow_dv_validate_action_jump(actions,
@@ -5174,6 +5202,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                                        RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
                                                MLX5_FLOW_ACTION_INC_TCP_SEQ :
                                                MLX5_FLOW_ACTION_DEC_TCP_SEQ;
+                       rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
                        break;
                case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
                case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
@@ -5191,10 +5220,13 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                                        RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
                                                MLX5_FLOW_ACTION_INC_TCP_ACK :
                                                MLX5_FLOW_ACTION_DEC_TCP_ACK;
+                       rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
                        break;
-               case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
                case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
+                       break;
+               case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
                case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
+                       rw_act_num += MLX5_ACT_NUM_SET_TAG;
                        break;
                case RTE_FLOW_ACTION_TYPE_METER:
                        ret = mlx5_flow_validate_action_meter(dev,
@@ -5205,6 +5237,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                                return ret;
                        action_flags |= MLX5_FLOW_ACTION_METER;
                        ++actions_n;
+                       /* Meter action will add one more TAG action. */
+                       rw_act_num += MLX5_ACT_NUM_SET_TAG;
                        break;
                case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
                        ret = flow_dv_validate_action_modify_ipv4_dscp
@@ -5218,6 +5252,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                        if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
                                ++actions_n;
                        action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
+                       rw_act_num += MLX5_ACT_NUM_SET_DSCP;
                        break;
                case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
                        ret = flow_dv_validate_action_modify_ipv6_dscp
@@ -5231,6 +5266,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                        if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
                                ++actions_n;
                        action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
+                       rw_act_num += MLX5_ACT_NUM_SET_DSCP;
                        break;
                default:
                        return rte_flow_error_set(error, ENOTSUP,
@@ -5303,6 +5339,21 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                                                  NULL, "encap is not supported"
                                                  " for ingress traffic");
        }
+       /* Hairpin flow will add one more TAG action. */
+       if (hairpin > 0)
+               rw_act_num += MLX5_ACT_NUM_SET_TAG;
+       /* extra metadata enabled: one more TAG action will be add. */
+       if (dev_conf->dv_flow_en &&
+           dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
+           mlx5_flow_ext_mreg_supported(dev))
+               rw_act_num += MLX5_ACT_NUM_SET_TAG;
+       if ((uint32_t)rw_act_num >=
+                       flow_dv_modify_hdr_action_max(dev, is_root)) {
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ACTION,
+                                         NULL, "too many header modify"
+                                         " actions to support");
+       }
        return 0;
 }
 
@@ -7103,7 +7154,7 @@ flow_dv_tag_resource_register
                cache_resource = container_of
                        (entry, struct mlx5_flow_dv_tag_resource, entry);
                rte_atomic32_inc(&cache_resource->refcnt);
-               dev_flow->handle->dvh.tag_resource = cache_resource->idx;
+               dev_flow->handle->dvh.rix_tag = cache_resource->idx;
                dev_flow->dv.tag_resource = cache_resource;
                DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
                        (void *)cache_resource,
@@ -7112,7 +7163,7 @@ flow_dv_tag_resource_register
        }
        /* Register new resource. */
        cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG],
-                                      &dev_flow->handle->dvh.tag_resource);
+                                      &dev_flow->handle->dvh.rix_tag);
        if (!cache_resource)
                return rte_flow_error_set(error, ENOMEM,
                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -7277,18 +7328,20 @@ flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
  *
  * @param[in] dev_flow
  *   Pointer to the mlx5_flow.
+ * @param[in] rss_desc
+ *   Pointer to the mlx5_flow_rss_desc.
  */
 static void
-flow_dv_hashfields_set(struct mlx5_flow *dev_flow)
+flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
+                      struct mlx5_flow_rss_desc *rss_desc)
 {
-       struct rte_flow *flow = dev_flow->flow;
        uint64_t items = dev_flow->handle->layers;
        int rss_inner = 0;
-       uint64_t rss_types = rte_eth_rss_hf_refine(flow->rss.types);
+       uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
 
        dev_flow->hash_fields = 0;
 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
-       if (flow->rss.level >= 2) {
+       if (rss_desc->level >= 2) {
                dev_flow->hash_fields |= IBV_RX_HASH_INNER;
                rss_inner = 1;
        }
@@ -7373,6 +7426,9 @@ __flow_dv_translate(struct rte_eth_dev *dev,
        struct mlx5_dev_config *dev_conf = &priv->config;
        struct rte_flow *flow = dev_flow->flow;
        struct mlx5_flow_handle *handle = dev_flow->handle;
+       struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *)
+                                             priv->rss_desc)
+                                             [!!priv->flow_nested_idx];
        uint64_t item_flags = 0;
        uint64_t last_item = 0;
        uint64_t action_flags = 0;
@@ -7428,6 +7484,7 @@ __flow_dv_translate(struct rte_eth_dev *dev,
                struct mlx5_flow_dv_port_id_action_resource port_id_resource;
                int action_type = actions->type;
                const struct rte_flow_action *found_action = NULL;
+               struct mlx5_flow_meter *fm = NULL;
 
                switch (action_type) {
                case RTE_FLOW_ACTION_TYPE_VOID:
@@ -7441,7 +7498,7 @@ __flow_dv_translate(struct rte_eth_dev *dev,
                        if (flow_dv_port_id_action_resource_register
                            (dev, &port_id_resource, dev_flow, error))
                                return -rte_errno;
-                       MLX5_ASSERT(!handle->port_id_action);
+                       MLX5_ASSERT(!handle->rix_port_id_action);
                        dev_flow->dv.actions[actions_n++] =
                                        dev_flow->dv.port_id_action->action;
                        action_flags |= MLX5_FLOW_ACTION_PORT_ID;
@@ -7468,7 +7525,7 @@ __flow_dv_translate(struct rte_eth_dev *dev,
                         * right now. So the pointer to the tag resource must be
                         * zero before the register process.
                         */
-                       MLX5_ASSERT(!handle->dvh.tag_resource);
+                       MLX5_ASSERT(!handle->dvh.rix_tag);
                        if (flow_dv_tag_resource_register(dev, tag_be,
                                                          dev_flow, error))
                                return -rte_errno;
@@ -7497,7 +7554,7 @@ __flow_dv_translate(struct rte_eth_dev *dev,
                        tag_be = mlx5_flow_mark_set
                              (((const struct rte_flow_action_mark *)
                               (actions->conf))->id);
-                       MLX5_ASSERT(!handle->dvh.tag_resource);
+                       MLX5_ASSERT(!handle->dvh.rix_tag);
                        if (flow_dv_tag_resource_register(dev, tag_be,
                                                          dev_flow, error))
                                return -rte_errno;
@@ -7526,23 +7583,20 @@ __flow_dv_translate(struct rte_eth_dev *dev,
                        dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
                        break;
                case RTE_FLOW_ACTION_TYPE_QUEUE:
-                       MLX5_ASSERT(flow->rss.queue);
                        queue = actions->conf;
-                       flow->rss.queue_num = 1;
-                       (*flow->rss.queue)[0] = queue->index;
+                       rss_desc->queue_num = 1;
+                       rss_desc->queue[0] = queue->index;
                        action_flags |= MLX5_FLOW_ACTION_QUEUE;
                        dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
                        break;
                case RTE_FLOW_ACTION_TYPE_RSS:
-                       MLX5_ASSERT(flow->rss.queue);
                        rss = actions->conf;
-                       if (flow->rss.queue)
-                               memcpy((*flow->rss.queue), rss->queue,
-                                      rss->queue_num * sizeof(uint16_t));
-                       flow->rss.queue_num = rss->queue_num;
+                       memcpy(rss_desc->queue, rss->queue,
+                              rss->queue_num * sizeof(uint16_t));
+                       rss_desc->queue_num = rss->queue_num;
                        /* NULL RSS key indicates default RSS key. */
                        rss_key = !rss->key ? rss_hash_default_key : rss->key;
-                       memcpy(flow->rss.key, rss_key, MLX5_RSS_HASH_KEY_LEN);
+                       memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
                        /*
                         * rss->level and rss.types should be set in advance
                         * when expanding items for RSS.
@@ -7804,20 +7858,31 @@ cnt_err:
                case RTE_FLOW_ACTION_TYPE_METER:
                        mtr = actions->conf;
                        if (!flow->meter) {
-                               flow->meter = mlx5_flow_meter_attach(priv,
-                                                       mtr->mtr_id, attr,
-                                                       error);
-                               if (!flow->meter)
+                               fm = mlx5_flow_meter_attach(priv, mtr->mtr_id,
+                                                           attr, error);
+                               if (!fm)
                                        return rte_flow_error_set(error,
                                                rte_errno,
                                                RTE_FLOW_ERROR_TYPE_ACTION,
                                                NULL,
                                                "meter not found "
                                                "or invalid parameters");
+                               flow->meter = fm->idx;
                        }
                        /* Set the meter action. */
+                       if (!fm) {
+                               fm = mlx5_ipool_get(priv->sh->ipool
+                                               [MLX5_IPOOL_MTR], flow->meter);
+                               if (!fm)
+                                       return rte_flow_error_set(error,
+                                               rte_errno,
+                                               RTE_FLOW_ERROR_TYPE_ACTION,
+                                               NULL,
+                                               "meter not found "
+                                               "or invalid parameters");
+                       }
                        dev_flow->dv.actions[actions_n++] =
-                               flow->meter->mfts->meter_action;
+                               fm->mfts->meter_action;
                        action_flags |= MLX5_FLOW_ACTION_METER;
                        break;
                case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
@@ -7942,7 +8007,7 @@ cnt_err:
                case RTE_FLOW_ITEM_TYPE_GRE:
                        flow_dv_translate_item_gre(match_mask, match_value,
                                                   items, tunnel);
-                       matcher.priority = flow->rss.level >= 2 ?
+                       matcher.priority = rss_desc->level >= 2 ?
                                    MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
                        last_item = MLX5_FLOW_LAYER_GRE;
                        break;
@@ -7954,14 +8019,14 @@ cnt_err:
                case RTE_FLOW_ITEM_TYPE_NVGRE:
                        flow_dv_translate_item_nvgre(match_mask, match_value,
                                                     items, tunnel);
-                       matcher.priority = flow->rss.level >= 2 ?
+                       matcher.priority = rss_desc->level >= 2 ?
                                    MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
                        last_item = MLX5_FLOW_LAYER_GRE;
                        break;
                case RTE_FLOW_ITEM_TYPE_VXLAN:
                        flow_dv_translate_item_vxlan(match_mask, match_value,
                                                     items, tunnel);
-                       matcher.priority = flow->rss.level >= 2 ?
+                       matcher.priority = rss_desc->level >= 2 ?
                                    MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
                        last_item = MLX5_FLOW_LAYER_VXLAN;
                        break;
@@ -7969,21 +8034,21 @@ cnt_err:
                        flow_dv_translate_item_vxlan_gpe(match_mask,
                                                         match_value, items,
                                                         tunnel);
-                       matcher.priority = flow->rss.level >= 2 ?
+                       matcher.priority = rss_desc->level >= 2 ?
                                    MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
                        last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
                        break;
                case RTE_FLOW_ITEM_TYPE_GENEVE:
                        flow_dv_translate_item_geneve(match_mask, match_value,
                                                      items, tunnel);
-                       matcher.priority = flow->rss.level >= 2 ?
+                       matcher.priority = rss_desc->level >= 2 ?
                                    MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
                        last_item = MLX5_FLOW_LAYER_GENEVE;
                        break;
                case RTE_FLOW_ITEM_TYPE_MPLS:
                        flow_dv_translate_item_mpls(match_mask, match_value,
                                                    items, last_item, tunnel);
-                       matcher.priority = flow->rss.level >= 2 ?
+                       matcher.priority = rss_desc->level >= 2 ?
                                    MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
                        last_item = MLX5_FLOW_LAYER_MPLS;
                        break;
@@ -8026,7 +8091,7 @@ cnt_err:
                case RTE_FLOW_ITEM_TYPE_GTP:
                        flow_dv_translate_item_gtp(match_mask, match_value,
                                                   items, tunnel);
-                       matcher.priority = flow->rss.level >= 2 ?
+                       matcher.priority = rss_desc->level >= 2 ?
                                    MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
                        last_item = MLX5_FLOW_LAYER_GTP;
                        break;
@@ -8058,7 +8123,7 @@ cnt_err:
         */
        handle->layers |= item_flags;
        if (action_flags & MLX5_FLOW_ACTION_RSS)
-               flow_dv_hashfields_set(dev_flow);
+               flow_dv_hashfields_set(dev_flow, rss_desc);
        /* Register matcher. */
        matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
                                    matcher.mask.size);
@@ -8127,26 +8192,29 @@ __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
                                 * the special index to hrxq to mark the queue
                                 * has been allocated.
                                 */
-                               dh->hrxq = UINT32_MAX;
+                               dh->rix_hrxq = UINT32_MAX;
                                dv->actions[n++] = drop_hrxq->action;
                        }
                } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) {
                        struct mlx5_hrxq *hrxq;
                        uint32_t hrxq_idx;
+                       struct mlx5_flow_rss_desc *rss_desc =
+                               &((struct mlx5_flow_rss_desc *)priv->rss_desc)
+                               [!!priv->flow_nested_idx];
 
-                       MLX5_ASSERT(flow->rss.queue);
-                       hrxq_idx = mlx5_hrxq_get(dev, flow->rss.key,
+                       MLX5_ASSERT(rss_desc->queue_num);
+                       hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key,
                                                 MLX5_RSS_HASH_KEY_LEN,
                                                 dev_flow->hash_fields,
-                                                (*flow->rss.queue),
-                                                flow->rss.queue_num);
+                                                rss_desc->queue,
+                                                rss_desc->queue_num);
                        if (!hrxq_idx) {
                                hrxq_idx = mlx5_hrxq_new
-                                               (dev, flow->rss.key,
+                                               (dev, rss_desc->key,
                                                MLX5_RSS_HASH_KEY_LEN,
                                                dev_flow->hash_fields,
-                                               (*flow->rss.queue),
-                                               flow->rss.queue_num,
+                                               rss_desc->queue,
+                                               rss_desc->queue_num,
                                                !!(dh->layers &
                                                MLX5_FLOW_LAYER_TUNNEL));
                        }
@@ -8159,7 +8227,7 @@ __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
                                         "cannot get hash queue");
                                goto error;
                        }
-                       dh->hrxq = hrxq_idx;
+                       dh->rix_hrxq = hrxq_idx;
                        dv->actions[n++] = hrxq->action;
                }
                dh->ib_flow =
@@ -8190,13 +8258,13 @@ error:
        SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
                       handle_idx, dh, next) {
                /* hrxq is union, don't clear it if the flag is not set. */
-               if (dh->hrxq) {
+               if (dh->rix_hrxq) {
                        if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
                                mlx5_hrxq_drop_release(dev);
-                               dh->hrxq = 0;
+                               dh->rix_hrxq = 0;
                        } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) {
-                               mlx5_hrxq_release(dev, dh->hrxq);
-                               dh->hrxq = 0;
+                               mlx5_hrxq_release(dev, dh->rix_hrxq);
+                               dh->rix_hrxq = 0;
                        }
                }
                if (dh->vf_vlan.tag && dh->vf_vlan.created)
@@ -8257,7 +8325,7 @@ flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
                                     struct mlx5_flow_handle *handle)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       uint32_t idx = handle->dvh.encap_decap;
+       uint32_t idx = handle->dvh.rix_encap_decap;
        struct mlx5_flow_dv_encap_decap_resource *cache_resource;
 
        cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
@@ -8302,7 +8370,7 @@ flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
        struct mlx5_flow_tbl_data_entry *tbl_data;
 
        tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
-                            handle->jump);
+                            handle->rix_jump);
        if (!tbl_data)
                return 0;
        cache_resource = &tbl_data->jump;
@@ -8370,7 +8438,7 @@ flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_flow_dv_port_id_action_resource *cache_resource;
-       uint32_t idx = handle->port_id_action;
+       uint32_t idx = handle->rix_port_id_action;
 
        cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID],
                                        idx);
@@ -8410,7 +8478,7 @@ flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
                                          struct mlx5_flow_handle *handle)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       uint32_t idx = handle->dvh.push_vlan_res;
+       uint32_t idx = handle->dvh.rix_push_vlan;
        struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
 
        cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN],
@@ -8447,19 +8515,19 @@ static void
 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
                               struct mlx5_flow_handle *handle)
 {
-       if (!handle->fate_idx)
+       if (!handle->rix_fate)
                return;
        if (handle->fate_action == MLX5_FLOW_FATE_DROP)
                mlx5_hrxq_drop_release(dev);
        else if (handle->fate_action == MLX5_FLOW_FATE_QUEUE)
-               mlx5_hrxq_release(dev, handle->hrxq);
+               mlx5_hrxq_release(dev, handle->rix_hrxq);
        else if (handle->fate_action == MLX5_FLOW_FATE_JUMP)
                flow_dv_jump_tbl_resource_release(dev, handle);
        else if (handle->fate_action == MLX5_FLOW_FATE_PORT_ID)
                flow_dv_port_id_action_resource_release(dev, handle);
        else
                DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
-       handle->fate_idx = 0;
+       handle->rix_fate = 0;
 }
 
 /**
@@ -8522,8 +8590,13 @@ __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
                flow->counter = 0;
        }
        if (flow->meter) {
-               mlx5_flow_meter_detach(flow->meter);
-               flow->meter = NULL;
+               struct mlx5_flow_meter *fm;
+
+               fm = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR],
+                                   flow->meter);
+               if (fm)
+                       mlx5_flow_meter_detach(fm);
+               flow->meter = 0;
        }
        while (flow->dev_handles) {
                uint32_t tmp_idx = flow->dev_handles;
@@ -8535,16 +8608,16 @@ __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
                flow->dev_handles = dev_handle->next.next;
                if (dev_handle->dvh.matcher)
                        flow_dv_matcher_release(dev, dev_handle);
-               if (dev_handle->dvh.encap_decap)
+               if (dev_handle->dvh.rix_encap_decap)
                        flow_dv_encap_decap_resource_release(dev, dev_handle);
                if (dev_handle->dvh.modify_hdr)
                        flow_dv_modify_hdr_resource_release(dev_handle);
-               if (dev_handle->dvh.push_vlan_res)
+               if (dev_handle->dvh.rix_push_vlan)
                        flow_dv_push_vlan_action_resource_release(dev,
                                                                  dev_handle);
-               if (dev_handle->dvh.tag_resource)
+               if (dev_handle->dvh.rix_tag)
                        flow_dv_tag_release(dev,
-                                           dev_handle->dvh.tag_resource);
+                                           dev_handle->dvh.rix_tag);
                flow_dv_fate_resource_release(dev, dev_handle);
                mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
                           tmp_idx);
@@ -8677,11 +8750,9 @@ flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
                claim_zero(mlx5_glue->dv_destroy_flow_matcher
                          (mtd->egress.any_matcher));
        if (mtd->egress.tbl)
-               claim_zero(flow_dv_tbl_resource_release(dev,
-                                                       mtd->egress.tbl));
+               flow_dv_tbl_resource_release(dev, mtd->egress.tbl);
        if (mtd->egress.sfx_tbl)
-               claim_zero(flow_dv_tbl_resource_release(dev,
-                                                       mtd->egress.sfx_tbl));
+               flow_dv_tbl_resource_release(dev, mtd->egress.sfx_tbl);
        if (mtd->ingress.color_matcher)
                claim_zero(mlx5_glue->dv_destroy_flow_matcher
                          (mtd->ingress.color_matcher));
@@ -8689,11 +8760,9 @@ flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
                claim_zero(mlx5_glue->dv_destroy_flow_matcher
                          (mtd->ingress.any_matcher));
        if (mtd->ingress.tbl)
-               claim_zero(flow_dv_tbl_resource_release(dev,
-                                                       mtd->ingress.tbl));
+               flow_dv_tbl_resource_release(dev, mtd->ingress.tbl);
        if (mtd->ingress.sfx_tbl)
-               claim_zero(flow_dv_tbl_resource_release(dev,
-                                                       mtd->ingress.sfx_tbl));
+               flow_dv_tbl_resource_release(dev, mtd->ingress.sfx_tbl);
        if (mtd->transfer.color_matcher)
                claim_zero(mlx5_glue->dv_destroy_flow_matcher
                          (mtd->transfer.color_matcher));
@@ -8701,11 +8770,9 @@ flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
                claim_zero(mlx5_glue->dv_destroy_flow_matcher
                          (mtd->transfer.any_matcher));
        if (mtd->transfer.tbl)
-               claim_zero(flow_dv_tbl_resource_release(dev,
-                                                       mtd->transfer.tbl));
+               flow_dv_tbl_resource_release(dev, mtd->transfer.tbl);
        if (mtd->transfer.sfx_tbl)
-               claim_zero(flow_dv_tbl_resource_release(dev,
-                                                       mtd->transfer.sfx_tbl));
+               flow_dv_tbl_resource_release(dev, mtd->transfer.sfx_tbl);
        if (mtd->drop_actn)
                claim_zero(mlx5_glue->destroy_flow_action(mtd->drop_actn));
        rte_free(mtd);
@@ -8988,7 +9055,7 @@ flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm,
                                       rte_col_2_mlx5_col(i), UINT8_MAX);
                if (mtb->count_actns[i])
                        actions[j++] = mtb->count_actns[i];
-               if (fm->params.action[i] == MTR_POLICER_ACTION_DROP)
+               if (fm->action[i] == MTR_POLICER_ACTION_DROP)
                        actions[j++] = mtb->drop_actn;
                else
                        actions[j++] = dtb->jump_actn;