net/sfc: add HW switch ID helpers
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
index f47d526..d3a3f23 100644 (file)
@@ -80,6 +80,10 @@ static int
 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
                                      uint32_t encap_decap_idx);
 
+static int
+flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
+                                       uint32_t port_id);
+
 /**
  * Initialize flow attributes structure according to flow items' types.
  *
@@ -1422,7 +1426,7 @@ flow_dv_validate_item_mark(struct rte_eth_dev *dev,
        ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
                                        (const uint8_t *)&nic_mask,
                                        sizeof(struct rte_flow_item_mark),
-                                       error);
+                                       MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
        if (ret < 0)
                return ret;
        return 0;
@@ -1498,7 +1502,7 @@ flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
        ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
                                        (const uint8_t *)&nic_mask,
                                        sizeof(struct rte_flow_item_meta),
-                                       error);
+                                       MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
        return ret;
 }
 
@@ -1551,7 +1555,7 @@ flow_dv_validate_item_tag(struct rte_eth_dev *dev,
        ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
                                        (const uint8_t *)&nic_mask,
                                        sizeof(struct rte_flow_item_tag),
-                                       error);
+                                       MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
        if (ret < 0)
                return ret;
        if (mask->index != 0xff)
@@ -1622,7 +1626,7 @@ flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
                                (item, (const uint8_t *)mask,
                                 (const uint8_t *)&rte_flow_item_port_id_mask,
                                 sizeof(struct rte_flow_item_port_id),
-                                error);
+                                MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
        if (ret)
                return ret;
        if (!spec)
@@ -1695,7 +1699,7 @@ flow_dv_validate_item_vlan(const struct rte_flow_item *item,
        ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
                                        (const uint8_t *)&nic_mask,
                                        sizeof(struct rte_flow_item_vlan),
-                                       error);
+                                       MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
        if (ret)
                return ret;
        if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
@@ -1782,11 +1786,240 @@ flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "Match is supported for GTP"
                                          " flags only");
-       return mlx5_flow_item_acceptable
-               (item, (const uint8_t *)mask,
-                (const uint8_t *)&nic_mask,
-                sizeof(struct rte_flow_item_gtp),
-                error);
+       return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+                                        (const uint8_t *)&nic_mask,
+                                        sizeof(struct rte_flow_item_gtp),
+                                        MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
+}
+
+/**
+ * Validate IPV4 item.
+ * Use existing validation function mlx5_flow_validate_item_ipv4(), and
+ * add specific validation of fragment_offset field,
+ *
+ * @param[in] item
+ *   Item specification.
+ * @param[in] item_flags
+ *   Bit-fields that holds the items detected until now.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
+                          uint64_t item_flags,
+                          uint64_t last_item,
+                          uint16_t ether_type,
+                          struct rte_flow_error *error)
+{
+       int ret;
+       const struct rte_flow_item_ipv4 *spec = item->spec;
+       const struct rte_flow_item_ipv4 *last = item->last;
+       const struct rte_flow_item_ipv4 *mask = item->mask;
+       rte_be16_t fragment_offset_spec = 0;
+       rte_be16_t fragment_offset_last = 0;
+       const struct rte_flow_item_ipv4 nic_ipv4_mask = {
+               .hdr = {
+                       .src_addr = RTE_BE32(0xffffffff),
+                       .dst_addr = RTE_BE32(0xffffffff),
+                       .type_of_service = 0xff,
+                       .fragment_offset = RTE_BE16(0xffff),
+                       .next_proto_id = 0xff,
+                       .time_to_live = 0xff,
+               },
+       };
+
+       ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
+                                          ether_type, &nic_ipv4_mask,
+                                          MLX5_ITEM_RANGE_ACCEPTED, error);
+       if (ret < 0)
+               return ret;
+       if (spec && mask)
+               fragment_offset_spec = spec->hdr.fragment_offset &
+                                      mask->hdr.fragment_offset;
+       if (!fragment_offset_spec)
+               return 0;
+       /*
+        * spec and mask are valid, enforce using full mask to make sure the
+        * complete value is used correctly.
+        */
+       if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
+                       != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+                                         item, "must use full mask for"
+                                         " fragment_offset");
+       /*
+        * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
+        * indicating this is 1st fragment of fragmented packet.
+        * This is not yet supported in MLX5, return appropriate error message.
+        */
+       if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "match on first fragment not "
+                                         "supported");
+       if (fragment_offset_spec && !last)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "specified value not supported");
+       /* spec and last are valid, validate the specified range. */
+       fragment_offset_last = last->hdr.fragment_offset &
+                              mask->hdr.fragment_offset;
+       /*
+        * Match on fragment_offset spec 0x2001 and last 0x3fff
+        * means MF is 1 and frag-offset is > 0.
+        * This packet is fragment 2nd and onward, excluding last.
+        * This is not yet supported in MLX5, return appropriate
+        * error message.
+        */
+       if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
+           fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM_LAST,
+                                         last, "match on following "
+                                         "fragments not supported");
+       /*
+        * Match on fragment_offset spec 0x0001 and last 0x1fff
+        * means MF is 0 and frag-offset is > 0.
+        * This packet is last fragment of fragmented packet.
+        * This is not yet supported in MLX5, return appropriate
+        * error message.
+        */
+       if (fragment_offset_spec == RTE_BE16(1) &&
+           fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM_LAST,
+                                         last, "match on last "
+                                         "fragment not supported");
+       /*
+        * Match on fragment_offset spec 0x0001 and last 0x3fff
+        * means MF and/or frag-offset is not 0.
+        * This is a fragmented packet.
+        * Other range values are invalid and rejected.
+        */
+       if (!(fragment_offset_spec == RTE_BE16(1) &&
+             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
+                                         "specified range not supported");
+       return 0;
+}
+
+/**
+ * Validate IPV6 fragment extension item.
+ *
+ * @param[in] item
+ *   Item specification.
+ * @param[in] item_flags
+ *   Bit-fields that holds the items detected until now.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
+                                   uint64_t item_flags,
+                                   struct rte_flow_error *error)
+{
+       const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
+       const struct rte_flow_item_ipv6_frag_ext *last = item->last;
+       const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
+       rte_be16_t frag_data_spec = 0;
+       rte_be16_t frag_data_last = 0;
+       const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+       const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+                                     MLX5_FLOW_LAYER_OUTER_L4;
+       int ret = 0;
+       struct rte_flow_item_ipv6_frag_ext nic_mask = {
+               .hdr = {
+                       .next_header = 0xff,
+                       .frag_data = RTE_BE16(0xffff),
+               },
+       };
+
+       if (item_flags & l4m)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "ipv6 fragment extension item cannot "
+                                         "follow L4 item.");
+       if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
+           (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "ipv6 fragment extension item must "
+                                         "follow ipv6 item");
+       if (spec && mask)
+               frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
+       if (!frag_data_spec)
+               return 0;
+       /*
+        * spec and mask are valid, enforce using full mask to make sure the
+        * complete value is used correctly.
+        */
+       if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
+                               RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+                                         item, "must use full mask for"
+                                         " frag_data");
+       /*
+        * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
+        * This is 1st fragment of fragmented packet.
+        */
+       if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "match on first fragment not "
+                                         "supported");
+       if (frag_data_spec && !last)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "specified value not supported");
+       ret = mlx5_flow_item_acceptable
+                               (item, (const uint8_t *)mask,
+                                (const uint8_t *)&nic_mask,
+                                sizeof(struct rte_flow_item_ipv6_frag_ext),
+                                MLX5_ITEM_RANGE_ACCEPTED, error);
+       if (ret)
+               return ret;
+       /* spec and last are valid, validate the specified range. */
+       frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
+       /*
+        * Match on frag_data spec 0x0009 and last 0xfff9
+        * means M is 1 and frag-offset is > 0.
+        * This packet is fragment 2nd and onward, excluding last.
+        * This is not yet supported in MLX5, return appropriate
+        * error message.
+        */
+       if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
+                                      RTE_IPV6_EHDR_MF_MASK) &&
+           frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM_LAST,
+                                         last, "match on following "
+                                         "fragments not supported");
+       /*
+        * Match on frag_data spec 0x0008 and last 0xfff8
+        * means M is 0 and frag-offset is > 0.
+        * This packet is last fragment of fragmented packet.
+        * This is not yet supported in MLX5, return appropriate
+        * error message.
+        */
+       if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
+           frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM_LAST,
+                                         last, "match on last "
+                                         "fragment not supported");
+       /* Other range values are invalid and rejected. */
+       return rte_flow_error_set(error, EINVAL,
+                                 RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
+                                 "specified range not supported");
 }
 
 /**
@@ -3946,14 +4179,14 @@ flow_dv_validate_action_age(uint64_t action_flags,
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
                                          "configuration cannot be null");
-       if (age->timeout >= UINT16_MAX / 2 / 10)
-               return rte_flow_error_set(error, ENOTSUP,
+       if (!(age->timeout))
+               return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
-                                         "Max age time: 3275 seconds");
+                                         "invalid timeout value 0");
        if (action_flags & MLX5_FLOW_ACTION_AGE)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
-                                         "Duplicate age ctions set");
+                                         "duplicate age actions set");
        return 0;
 }
 
@@ -4693,6 +4926,7 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
        TAILQ_INIT(&pool->counters[1]);
        TAILQ_INSERT_HEAD(&cont->pool_list, pool, next);
        pool->index = n_valid;
+       pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
        cont->pools[n_valid] = pool;
        if (!batch) {
                int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
@@ -4815,7 +5049,7 @@ retry:
                               (MLX5_CNT_CONTAINER
                               (priv->sh, batch, (age ^ 0x1)), dcs->id);
                        /*
-                        * Pool eixsts, counter will be added to the other
+                        * Pool exists, counter will be added to the other
                         * container, need to reallocate it later.
                         */
                        if (pool) {
@@ -5096,13 +5330,13 @@ flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
        struct mlx5_age_info *age_info;
        struct mlx5_age_param *age_param;
        struct mlx5_priv *priv = dev->data->dev_private;
+       uint16_t expected = AGE_CANDIDATE;
 
        age_info = GET_PORT_AGE_INFO(priv);
        age_param = flow_dv_counter_idx_get_age(dev, counter);
-       if (rte_atomic16_cmpset((volatile uint16_t *)
-                       &age_param->state,
-                       AGE_CANDIDATE, AGE_FREE)
-                       != AGE_CANDIDATE) {
+       if (!__atomic_compare_exchange_n(&age_param->state, &expected,
+                                        AGE_FREE, false, __ATOMIC_RELAXED,
+                                        __ATOMIC_RELAXED)) {
                /**
                 * We need the lock even it is age timeout,
                 * since counter may still in process.
@@ -5110,9 +5344,10 @@ flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
                rte_spinlock_lock(&age_info->aged_sl);
                TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
                rte_spinlock_unlock(&age_info->aged_sl);
+               __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
        }
-       rte_atomic16_set(&age_param->state, AGE_FREE);
 }
+
 /**
  * Release a flow counter.
  *
@@ -5286,15 +5521,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                        .dst_port = RTE_BE16(UINT16_MAX),
                }
        };
-       const struct rte_flow_item_ipv4 nic_ipv4_mask = {
-               .hdr = {
-                       .src_addr = RTE_BE32(0xffffffff),
-                       .dst_addr = RTE_BE32(0xffffffff),
-                       .type_of_service = 0xff,
-                       .next_proto_id = 0xff,
-                       .time_to_live = 0xff,
-               },
-       };
        const struct rte_flow_item_ipv6 nic_ipv6_mask = {
                .hdr = {
                        .src_addr =
@@ -5307,6 +5533,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                        .proto = 0xff,
                        .hop_limits = 0xff,
                },
+               .has_frag_ext = 1,
        };
        const struct rte_flow_item_ecpri nic_ecpri_mask = {
                .hdr = {
@@ -5394,11 +5621,9 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                case RTE_FLOW_ITEM_TYPE_IPV4:
                        mlx5_flow_tunnel_ip_check(items, next_protocol,
                                                  &item_flags, &tunnel);
-                       ret = mlx5_flow_validate_item_ipv4(items, item_flags,
-                                                          last_item,
-                                                          ether_type,
-                                                          &nic_ipv4_mask,
-                                                          error);
+                       ret = flow_dv_validate_item_ipv4(items, item_flags,
+                                                        last_item, ether_type,
+                                                        error);
                        if (ret < 0)
                                return ret;
                        last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
@@ -5446,6 +5671,29 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                                next_protocol = 0xff;
                        }
                        break;
+               case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
+                       ret = flow_dv_validate_item_ipv6_frag_ext(items,
+                                                                 item_flags,
+                                                                 error);
+                       if (ret < 0)
+                               return ret;
+                       last_item = tunnel ?
+                                       MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
+                                       MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
+                       if (items->mask != NULL &&
+                           ((const struct rte_flow_item_ipv6_frag_ext *)
+                            items->mask)->hdr.next_header) {
+                               next_protocol =
+                               ((const struct rte_flow_item_ipv6_frag_ext *)
+                                items->spec)->hdr.next_header;
+                               next_protocol &=
+                               ((const struct rte_flow_item_ipv6_frag_ext *)
+                                items->mask)->hdr.next_header;
+                       } else {
+                               /* Reset for inner layer. */
+                               next_protocol = 0xff;
+                       }
+                       break;
                case RTE_FLOW_ITEM_TYPE_TCP:
                        ret = mlx5_flow_validate_item_tcp
                                                (items, item_flags,
@@ -6507,6 +6755,10 @@ flow_dv_translate_item_ipv4(void *matcher, void *key,
                 ipv4_m->hdr.time_to_live);
        MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
                 ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
+       MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
+                !!(ipv4_m->hdr.fragment_offset));
+       MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
+                !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
 }
 
 /**
@@ -6622,6 +6874,61 @@ flow_dv_translate_item_ipv6(void *matcher, void *key,
                 ipv6_m->hdr.hop_limits);
        MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
                 ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
+       MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
+                !!(ipv6_m->has_frag_ext));
+       MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
+                !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
+}
+
+/**
+ * Add IPV6 fragment extension item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ *   Flow matcher.
+ * @param[in, out] key
+ *   Flow matcher value.
+ * @param[in] item
+ *   Flow pattern to translate.
+ * @param[in] inner
+ *   Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
+                                    const struct rte_flow_item *item,
+                                    int inner)
+{
+       const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
+       const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
+       const struct rte_flow_item_ipv6_frag_ext nic_mask = {
+               .hdr = {
+                       .next_header = 0xff,
+                       .frag_data = RTE_BE16(0xffff),
+               },
+       };
+       void *headers_m;
+       void *headers_v;
+
+       if (inner) {
+               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+                                        inner_headers);
+               headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+       } else {
+               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+                                        outer_headers);
+               headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+       }
+       /* IPv6 fragment extension item exists, so packet is IP fragment. */
+       MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
+       MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
+       if (!ipv6_frag_ext_v)
+               return;
+       if (!ipv6_frag_ext_m)
+               ipv6_frag_ext_m = &nic_mask;
+       MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
+                ipv6_frag_ext_m->hdr.next_header);
+       MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
+                ipv6_frag_ext_v->hdr.next_header &
+                ipv6_frag_ext_m->hdr.next_header);
 }
 
 /**
@@ -7556,12 +7863,6 @@ flow_dv_translate_item_icmp6(void *matcher, void *key,
                return;
        if (!icmp6_m)
                icmp6_m = &rte_flow_item_icmp6_mask;
-       /*
-        * Force flow only to match the non-fragmented IPv6 ICMPv6 packets.
-        * If only the protocol is specified, no need to match the frag.
-        */
-       MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
-       MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
        MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
        MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
                 icmp6_v->type & icmp6_m->type);
@@ -7611,12 +7912,6 @@ flow_dv_translate_item_icmp(void *matcher, void *key,
                return;
        if (!icmp_m)
                icmp_m = &rte_flow_item_icmp_mask;
-       /*
-        * Force flow only to match the non-fragmented IPv4 ICMP packets.
-        * If only the protocol is specified, no need to match the frag.
-        */
-       MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
-       MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
        MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
                 icmp_m->hdr.icmp_type);
        MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
@@ -8239,22 +8534,12 @@ flow_dv_translate_create_counter(struct rte_eth_dev *dev,
        if (!counter || age == NULL)
                return counter;
        age_param  = flow_dv_counter_idx_get_age(dev, counter);
-       /*
-        * The counter age accuracy may have a bit delay. Have 3/4
-        * second bias on the timeount in order to let it age in time.
-        */
        age_param->context = age->context ? age->context :
                (void *)(uintptr_t)(dev_flow->flow_idx);
-       /*
-        * The counter age accuracy may have a bit delay. Have 3/4
-        * second bias on the timeount in order to let it age in time.
-        */
-       age_param->timeout = age->timeout * 10 - MLX5_AGING_TIME_DELAY;
-       /* Set expire time in unit of 0.1 sec. */
+       age_param->timeout = age->timeout;
        age_param->port_id = dev->data->port_id;
-       age_param->expire = age_param->timeout +
-                       rte_rdtsc() / (rte_get_tsc_hz() / 10);
-       rte_atomic16_set(&age_param->state, AGE_CANDIDATE);
+       __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
+       __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
        return counter;
 }
 /**
@@ -8390,9 +8675,9 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
  */
 static struct mlx5_hrxq *
 flow_dv_handle_rx_queue(struct rte_eth_dev *dev,
-                         struct mlx5_flow *dev_flow,
-                         struct mlx5_flow_rss_desc *rss_desc,
-                         uint32_t *hrxq_idx)
+                       struct mlx5_flow *dev_flow,
+                       struct mlx5_flow_rss_desc *rss_desc,
+                       uint32_t *hrxq_idx)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_flow_handle *dh = dev_flow->handle;
@@ -8400,19 +8685,19 @@ flow_dv_handle_rx_queue(struct rte_eth_dev *dev,
 
        MLX5_ASSERT(rss_desc->queue_num);
        *hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key,
-                                MLX5_RSS_HASH_KEY_LEN,
-                                dev_flow->hash_fields,
-                                rss_desc->queue,
-                                rss_desc->queue_num);
+                                 MLX5_RSS_HASH_KEY_LEN,
+                                 dev_flow->hash_fields,
+                                 rss_desc->queue,
+                                 rss_desc->queue_num);
        if (!*hrxq_idx) {
                *hrxq_idx = mlx5_hrxq_new
                                (dev, rss_desc->key,
-                               MLX5_RSS_HASH_KEY_LEN,
-                               dev_flow->hash_fields,
-                               rss_desc->queue,
-                               rss_desc->queue_num,
-                               !!(dh->layers &
-                               MLX5_FLOW_LAYER_TUNNEL));
+                                MLX5_RSS_HASH_KEY_LEN,
+                                dev_flow->hash_fields,
+                                rss_desc->queue,
+                                rss_desc->queue_num,
+                                !!(dh->layers &
+                                MLX5_FLOW_LAYER_TUNNEL));
                if (!*hrxq_idx)
                        return NULL;
        }
@@ -8566,6 +8851,154 @@ error:
        return -rte_errno;
 }
 
+/**
+ * Find existing destination array resource or create and register a new one.
+ *
+ * @param[in, out] dev
+ *   Pointer to rte_eth_dev structure.
+ * @param[in] attr
+ *   Attributes of flow that includes this item.
+ * @param[in] resource
+ *   Pointer to destination array resource.
+ * @parm[in, out] dev_flow
+ *   Pointer to the dev_flow.
+ * @param[out] error
+ *   pointer to error structure.
+ *
+ * @return
+ *   0 on success otherwise -errno and errno is set.
+ */
+static int
+flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
+                        const struct rte_flow_attr *attr,
+                        struct mlx5_flow_dv_dest_array_resource *resource,
+                        struct mlx5_flow *dev_flow,
+                        struct rte_flow_error *error)
+{
+       struct mlx5_flow_dv_dest_array_resource *cache_resource;
+       struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
+       struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_dev_ctx_shared *sh = priv->sh;
+       struct mlx5_flow_sub_actions_list *sample_act;
+       struct mlx5dv_dr_domain *domain;
+       uint32_t idx = 0;
+
+       /* Lookup a matching resource from cache. */
+       ILIST_FOREACH(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
+                     sh->dest_array_list,
+                     idx, cache_resource, next) {
+               if (resource->num_of_dest == cache_resource->num_of_dest &&
+                   resource->ft_type == cache_resource->ft_type &&
+                   !memcmp((void *)cache_resource->sample_act,
+                           (void *)resource->sample_act,
+                          (resource->num_of_dest *
+                          sizeof(struct mlx5_flow_sub_actions_list)))) {
+                       DRV_LOG(DEBUG, "dest array resource %p: refcnt %d++",
+                               (void *)cache_resource,
+                               __atomic_load_n(&cache_resource->refcnt,
+                                               __ATOMIC_RELAXED));
+                       __atomic_fetch_add(&cache_resource->refcnt, 1,
+                                          __ATOMIC_RELAXED);
+                       dev_flow->handle->dvh.rix_dest_array = idx;
+                       dev_flow->dv.dest_array_res = cache_resource;
+                       return 0;
+               }
+       }
+       /* Register new destination array resource. */
+       cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
+                                      &dev_flow->handle->dvh.rix_dest_array);
+       if (!cache_resource)
+               return rte_flow_error_set(error, ENOMEM,
+                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                         NULL,
+                                         "cannot allocate resource memory");
+       *cache_resource = *resource;
+       if (attr->transfer)
+               domain = sh->fdb_domain;
+       else if (attr->ingress)
+               domain = sh->rx_domain;
+       else
+               domain = sh->tx_domain;
+       for (idx = 0; idx < resource->num_of_dest; idx++) {
+               dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
+                                mlx5_malloc(MLX5_MEM_ZERO,
+                                sizeof(struct mlx5dv_dr_action_dest_attr),
+                                0, SOCKET_ID_ANY);
+               if (!dest_attr[idx]) {
+                       rte_flow_error_set(error, ENOMEM,
+                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                          NULL,
+                                          "cannot allocate resource memory");
+                       goto error;
+               }
+               dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
+               sample_act = &resource->sample_act[idx];
+               if (sample_act->action_flags == MLX5_FLOW_ACTION_QUEUE) {
+                       dest_attr[idx]->dest = sample_act->dr_queue_action;
+               } else if (sample_act->action_flags ==
+                         (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP)) {
+                       dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
+                       dest_attr[idx]->dest_reformat = &dest_reformat[idx];
+                       dest_attr[idx]->dest_reformat->reformat =
+                                       sample_act->dr_encap_action;
+                       dest_attr[idx]->dest_reformat->dest =
+                                       sample_act->dr_port_id_action;
+               } else if (sample_act->action_flags ==
+                          MLX5_FLOW_ACTION_PORT_ID) {
+                       dest_attr[idx]->dest = sample_act->dr_port_id_action;
+               }
+       }
+       /* create a dest array actioin */
+       cache_resource->action = mlx5_glue->dr_create_flow_action_dest_array
+                                               (domain,
+                                                cache_resource->num_of_dest,
+                                                dest_attr);
+       if (!cache_resource->action) {
+               rte_flow_error_set(error, ENOMEM,
+                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                  NULL,
+                                  "cannot create destination array action");
+               goto error;
+       }
+       __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED);
+       ILIST_INSERT(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
+                    &sh->dest_array_list,
+                    dev_flow->handle->dvh.rix_dest_array, cache_resource,
+                    next);
+       dev_flow->dv.dest_array_res = cache_resource;
+       DRV_LOG(DEBUG, "new destination array resource %p: refcnt %d++",
+               (void *)cache_resource,
+               __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
+       for (idx = 0; idx < resource->num_of_dest; idx++)
+               mlx5_free(dest_attr[idx]);
+       return 0;
+error:
+       for (idx = 0; idx < resource->num_of_dest; idx++) {
+               struct mlx5_flow_sub_actions_idx *act_res =
+                                       &cache_resource->sample_idx[idx];
+               if (act_res->rix_hrxq &&
+                   !mlx5_hrxq_release(dev,
+                               act_res->rix_hrxq))
+                       act_res->rix_hrxq = 0;
+               if (act_res->rix_encap_decap &&
+                       !flow_dv_encap_decap_resource_release(dev,
+                               act_res->rix_encap_decap))
+                       act_res->rix_encap_decap = 0;
+               if (act_res->rix_port_id_action &&
+                       !flow_dv_port_id_action_resource_release(dev,
+                               act_res->rix_port_id_action))
+                       act_res->rix_port_id_action = 0;
+               if (dest_attr[idx])
+                       mlx5_free(dest_attr[idx]);
+       }
+
+       mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
+                               dev_flow->handle->dvh.rix_dest_array);
+       dev_flow->handle->dvh.rix_dest_array = 0;
+       return -rte_errno;
+}
+
 /**
  * Convert Sample action to DV specification.
  *
@@ -8577,6 +9010,8 @@ error:
  *   Pointer to the mlx5_flow.
  * @param[in] attr
  *   Pointer to the flow attributes.
+ * @param[in, out] num_of_dest
+ *   Pointer to the num of destination.
  * @param[in, out] sample_actions
  *   Pointer to sample actions list.
  * @param[in, out] res
@@ -8592,6 +9027,7 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev,
                                const struct rte_flow_action *action,
                                struct mlx5_flow *dev_flow,
                                const struct rte_flow_attr *attr,
+                               uint32_t *num_of_dest,
                                void **sample_actions,
                                struct mlx5_flow_dv_sample_resource *res,
                                struct rte_flow_error *error)
@@ -8637,6 +9073,7 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev,
                        sample_idx->rix_hrxq = hrxq_idx;
                        sample_actions[sample_act->actions_num++] =
                                                hrxq->action;
+                       (*num_of_dest)++;
                        action_flags |= MLX5_FLOW_ACTION_QUEUE;
                        if (action_flags & MLX5_FLOW_ACTION_MARK)
                                dev_flow->handle->rix_hrxq = hrxq_idx;
@@ -8649,6 +9086,7 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev,
                        uint32_t tag_be = mlx5_flow_mark_set
                                (((const struct rte_flow_action_mark *)
                                (sub_actions->conf))->id);
+
                        dev_flow->handle->mark = 1;
                        pre_rix = dev_flow->handle->dvh.rix_tag;
                        /* Save the mark resource before sample */
@@ -8691,6 +9129,56 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev,
                        action_flags |= MLX5_FLOW_ACTION_COUNT;
                        break;
                }
+               case RTE_FLOW_ACTION_TYPE_PORT_ID:
+               {
+                       struct mlx5_flow_dv_port_id_action_resource
+                                       port_id_resource;
+                       uint32_t port_id = 0;
+
+                       memset(&port_id_resource, 0, sizeof(port_id_resource));
+                       /* Save the port id resource before sample */
+                       pre_rix = dev_flow->handle->rix_port_id_action;
+                       pre_r = dev_flow->dv.port_id_action;
+                       if (flow_dv_translate_action_port_id(dev, sub_actions,
+                                                            &port_id, error))
+                               return -rte_errno;
+                       port_id_resource.port_id = port_id;
+                       if (flow_dv_port_id_action_resource_register
+                           (dev, &port_id_resource, dev_flow, error))
+                               return -rte_errno;
+                       sample_act->dr_port_id_action =
+                               dev_flow->dv.port_id_action->action;
+                       sample_idx->rix_port_id_action =
+                               dev_flow->handle->rix_port_id_action;
+                       sample_actions[sample_act->actions_num++] =
+                                               sample_act->dr_port_id_action;
+                       /* Recover the port id resource after sample */
+                       dev_flow->dv.port_id_action = pre_r;
+                       dev_flow->handle->rix_port_id_action = pre_rix;
+                       (*num_of_dest)++;
+                       action_flags |= MLX5_FLOW_ACTION_PORT_ID;
+                       break;
+               }
+               case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+                       /* Save the encap resource before sample */
+                       pre_rix = dev_flow->handle->dvh.rix_encap_decap;
+                       pre_r = dev_flow->dv.encap_decap;
+                       if (flow_dv_create_action_l2_encap(dev, sub_actions,
+                                                          dev_flow,
+                                                          attr->transfer,
+                                                          error))
+                               return -rte_errno;
+                       sample_act->dr_encap_action =
+                               dev_flow->dv.encap_decap->action;
+                       sample_idx->rix_encap_decap =
+                               dev_flow->handle->dvh.rix_encap_decap;
+                       sample_actions[sample_act->actions_num++] =
+                                               sample_act->dr_encap_action;
+                       /* Recover the encap resource after sample */
+                       dev_flow->dv.encap_decap = pre_r;
+                       dev_flow->handle->dvh.rix_encap_decap = pre_rix;
+                       action_flags |= MLX5_FLOW_ACTION_ENCAP;
+                       break;
                default:
                        return rte_flow_error_set(error, EINVAL,
                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
@@ -8729,10 +9217,16 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev,
  *   Pointer to the mlx5_flow.
  * @param[in] attr
  *   Pointer to the flow attributes.
+ * @param[in] num_of_dest
+ *   The num of destination.
  * @param[in, out] res
  *   Pointer to sample resource.
+ * @param[in, out] mdest_res
+ *   Pointer to destination array resource.
  * @param[in] sample_actions
  *   Pointer to sample path actions list.
+ * @param[in] action_flags
+ *   Holds the actions detected until now.
  * @param[out] error
  *   Pointer to the error structure.
  *
@@ -8741,17 +9235,81 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev,
  */
 static int
 flow_dv_create_action_sample(struct rte_eth_dev *dev,
-                               struct mlx5_flow *dev_flow,
-                               const struct rte_flow_attr *attr,
-                               struct mlx5_flow_dv_sample_resource *res,
-                               void **sample_actions,
-                               struct rte_flow_error *error)
+                            struct mlx5_flow *dev_flow,
+                            const struct rte_flow_attr *attr,
+                            uint32_t num_of_dest,
+                            struct mlx5_flow_dv_sample_resource *res,
+                            struct mlx5_flow_dv_dest_array_resource *mdest_res,
+                            void **sample_actions,
+                            uint64_t action_flags,
+                            struct rte_flow_error *error)
 {
-       if (flow_dv_sample_resource_register(dev, attr, res, dev_flow,
-                                               sample_actions, error))
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ACTION,
-                                         NULL, "can't create sample action");
+       struct mlx5_priv *priv = dev->data->dev_private;
+       /* update normal path action resource into last index of array */
+       uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
+       struct mlx5_flow_sub_actions_list *sample_act =
+                                       &mdest_res->sample_act[dest_index];
+       struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *)
+                                             priv->rss_desc)
+                                             [!!priv->flow_nested_idx];
+       uint32_t normal_idx = 0;
+       struct mlx5_hrxq *hrxq;
+       uint32_t hrxq_idx;
+
+       if (num_of_dest > 1) {
+               if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
+                       /* Handle QP action for mirroring */
+                       hrxq = flow_dv_handle_rx_queue(dev, dev_flow,
+                                                      rss_desc, &hrxq_idx);
+                       if (!hrxq)
+                               return rte_flow_error_set
+                                    (error, rte_errno,
+                                     RTE_FLOW_ERROR_TYPE_ACTION,
+                                     NULL,
+                                     "cannot create rx queue");
+                       normal_idx++;
+                       mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
+                       sample_act->dr_queue_action = hrxq->action;
+                       if (action_flags & MLX5_FLOW_ACTION_MARK)
+                               dev_flow->handle->rix_hrxq = hrxq_idx;
+                       dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
+               }
+               if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
+                       normal_idx++;
+                       mdest_res->sample_idx[dest_index].rix_encap_decap =
+                               dev_flow->handle->dvh.rix_encap_decap;
+                       sample_act->dr_encap_action =
+                               dev_flow->dv.encap_decap->action;
+               }
+               if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
+                       normal_idx++;
+                       mdest_res->sample_idx[dest_index].rix_port_id_action =
+                               dev_flow->handle->rix_port_id_action;
+                       sample_act->dr_port_id_action =
+                               dev_flow->dv.port_id_action->action;
+               }
+               sample_act->actions_num = normal_idx;
+               /* update sample action resource into first index of array */
+               mdest_res->ft_type = res->ft_type;
+               memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
+                               sizeof(struct mlx5_flow_sub_actions_idx));
+               memcpy(&mdest_res->sample_act[0], &res->sample_act,
+                               sizeof(struct mlx5_flow_sub_actions_list));
+               mdest_res->num_of_dest = num_of_dest;
+               if (flow_dv_dest_array_resource_register(dev, attr, mdest_res,
+                                                        dev_flow, error))
+                       return rte_flow_error_set(error, EINVAL,
+                                                 RTE_FLOW_ERROR_TYPE_ACTION,
+                                                 NULL, "can't create sample "
+                                                 "action");
+       } else {
+               if (flow_dv_sample_resource_register(dev, attr, res, dev_flow,
+                                                    sample_actions, error))
+                       return rte_flow_error_set(error, EINVAL,
+                                                 RTE_FLOW_ERROR_TYPE_ACTION,
+                                                 NULL,
+                                                 "can't create sample action");
+       }
        return 0;
 }
 
@@ -8819,15 +9377,22 @@ __flow_dv_translate(struct rte_eth_dev *dev,
        void *match_value = dev_flow->dv.value.buf;
        uint8_t next_protocol = 0xff;
        struct rte_vlan_hdr vlan = { 0 };
+       struct mlx5_flow_dv_dest_array_resource mdest_res;
        struct mlx5_flow_dv_sample_resource sample_res;
        void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
+       struct mlx5_flow_sub_actions_list *sample_act;
        uint32_t sample_act_pos = UINT32_MAX;
+       uint32_t num_of_dest = 0;
+       int tmp_actions_n = 0;
        uint32_t table;
        int ret = 0;
 
+       memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
        memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
        mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
                                           MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
+       /* update normal path action resource into last index of array */
+       sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
        ret = mlx5_flow_group_to_table(attr, dev_flow->external, attr->group,
                                       !!priv->fdb_def_rule, &table, error);
        if (ret)
@@ -8874,6 +9439,8 @@ __flow_dv_translate(struct rte_eth_dev *dev,
                                        dev_flow->dv.port_id_action->action;
                        action_flags |= MLX5_FLOW_ACTION_PORT_ID;
                        dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
+                       sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
+                       num_of_dest++;
                        break;
                case RTE_FLOW_ACTION_TYPE_FLAG:
                        action_flags |= MLX5_FLOW_ACTION_FLAG;
@@ -8959,6 +9526,8 @@ __flow_dv_translate(struct rte_eth_dev *dev,
                        rss_desc->queue[0] = queue->index;
                        action_flags |= MLX5_FLOW_ACTION_QUEUE;
                        dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
+                       sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
+                       num_of_dest++;
                        break;
                case RTE_FLOW_ACTION_TYPE_RSS:
                        rss = actions->conf;
@@ -9046,6 +9615,9 @@ __flow_dv_translate(struct rte_eth_dev *dev,
                        dev_flow->dv.actions[actions_n++] =
                                        dev_flow->dv.encap_decap->action;
                        action_flags |= MLX5_FLOW_ACTION_ENCAP;
+                       if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+                               sample_act->action_flags |=
+                                                       MLX5_FLOW_ACTION_ENCAP;
                        break;
                case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
                case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
@@ -9075,6 +9647,9 @@ __flow_dv_translate(struct rte_eth_dev *dev,
                                        dev_flow->dv.encap_decap->action;
                        }
                        action_flags |= MLX5_FLOW_ACTION_ENCAP;
+                       if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+                               sample_act->action_flags |=
+                                                       MLX5_FLOW_ACTION_ENCAP;
                        break;
                case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
                        while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
@@ -9267,6 +9842,7 @@ __flow_dv_translate(struct rte_eth_dev *dev,
                        ret = flow_dv_translate_action_sample(dev,
                                                              actions,
                                                              dev_flow, attr,
+                                                             &num_of_dest,
                                                              sample_actions,
                                                              &sample_res,
                                                              error);
@@ -9274,6 +9850,11 @@ __flow_dv_translate(struct rte_eth_dev *dev,
                                return ret;
                        actions_n++;
                        action_flags |= MLX5_FLOW_ACTION_SAMPLE;
+                       /* put encap action into group if work with port id */
+                       if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
+                           (action_flags & MLX5_FLOW_ACTION_PORT_ID))
+                               sample_act->action_flags |=
+                                                       MLX5_FLOW_ACTION_ENCAP;
                        break;
                case RTE_FLOW_ACTION_TYPE_END:
                        actions_end = true;
@@ -9297,15 +9878,19 @@ __flow_dv_translate(struct rte_eth_dev *dev,
                                                NULL,
                                                "cannot create counter"
                                                " object.");
-                               dev_flow->dv.actions[actions_n++] =
+                               dev_flow->dv.actions[actions_n] =
                                          (flow_dv_counter_get_by_idx(dev,
                                          flow->counter, NULL))->action;
+                               actions_n++;
                        }
                        if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
                                ret = flow_dv_create_action_sample(dev,
                                                          dev_flow, attr,
+                                                         num_of_dest,
                                                          &sample_res,
+                                                         &mdest_res,
                                                          sample_actions,
+                                                         action_flags,
                                                          error);
                                if (ret < 0)
                                        return rte_flow_error_set
@@ -9313,8 +9898,13 @@ __flow_dv_translate(struct rte_eth_dev *dev,
                                                RTE_FLOW_ERROR_TYPE_ACTION,
                                                NULL,
                                                "cannot create sample action");
-                               dev_flow->dv.actions[sample_act_pos] =
+                               if (num_of_dest > 1) {
+                                       dev_flow->dv.actions[sample_act_pos] =
+                                       dev_flow->dv.dest_array_res->action;
+                               } else {
+                                       dev_flow->dv.actions[sample_act_pos] =
                                        dev_flow->dv.sample_res->verbs_action;
+                               }
                        }
                        break;
                default:
@@ -9324,6 +9914,31 @@ __flow_dv_translate(struct rte_eth_dev *dev,
                    modify_action_position == UINT32_MAX)
                        modify_action_position = actions_n++;
        }
+       /*
+        * For multiple destination (sample action with ratio=1), the encap
+        * action and port id action will be combined into group action.
+        * So need remove the original these actions in the flow and only
+        * use the sample action instead of.
+        */
+       if (num_of_dest > 1 && sample_act->dr_port_id_action) {
+               int i;
+               void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
+
+               for (i = 0; i < actions_n; i++) {
+                       if ((sample_act->dr_encap_action &&
+                               sample_act->dr_encap_action ==
+                               dev_flow->dv.actions[i]) ||
+                               (sample_act->dr_port_id_action &&
+                               sample_act->dr_port_id_action ==
+                               dev_flow->dv.actions[i]))
+                               continue;
+                       temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
+               }
+               memcpy((void *)dev_flow->dv.actions,
+                               (void *)temp_actions,
+                               tmp_actions_n * sizeof(void *));
+               actions_n = tmp_actions_n;
+       }
        dev_flow->dv.actions_n = actions_n;
        dev_flow->act_flags = action_flags;
        for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
@@ -9409,6 +10024,27 @@ __flow_dv_translate(struct rte_eth_dev *dev,
                                next_protocol = 0xff;
                        }
                        break;
+               case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
+                       flow_dv_translate_item_ipv6_frag_ext(match_mask,
+                                                            match_value,
+                                                            items, tunnel);
+                       last_item = tunnel ?
+                                       MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
+                                       MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
+                       if (items->mask != NULL &&
+                           ((const struct rte_flow_item_ipv6_frag_ext *)
+                            items->mask)->hdr.next_header) {
+                               next_protocol =
+                               ((const struct rte_flow_item_ipv6_frag_ext *)
+                                items->spec)->hdr.next_header;
+                               next_protocol &=
+                               ((const struct rte_flow_item_ipv6_frag_ext *)
+                                items->mask)->hdr.next_header;
+                       } else {
+                               /* Reset for inner layer. */
+                               next_protocol = 0xff;
+                       }
+                       break;
                case RTE_FLOW_ITEM_TYPE_TCP:
                        flow_dv_translate_item_tcp(match_mask, match_value,
                                                   items, tunnel);
@@ -9628,7 +10264,7 @@ __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
                                dv->actions[n++] = drop_hrxq->action;
                        }
                } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
-                          !dv_h->rix_sample) {
+                          !dv_h->rix_sample && !dv_h->rix_dest_array) {
                        struct mlx5_hrxq *hrxq;
                        uint32_t hrxq_idx;
                        struct mlx5_flow_rss_desc *rss_desc =
@@ -9914,11 +10550,11 @@ flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
  */
 static int
 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
-                                       struct mlx5_flow_handle *handle)
+                                       uint32_t port_id)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_flow_dv_port_id_action_resource *cache_resource;
-       uint32_t idx = handle->rix_port_id_action;
+       uint32_t idx = port_id;
 
        cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID],
                                        idx);
@@ -10008,7 +10644,8 @@ flow_dv_fate_resource_release(struct rte_eth_dev *dev,
                flow_dv_jump_tbl_resource_release(dev, handle);
                break;
        case MLX5_FLOW_FATE_PORT_ID:
-               flow_dv_port_id_action_resource_release(dev, handle);
+               flow_dv_port_id_action_resource_release(dev,
+                               handle->rix_port_id_action);
                break;
        case MLX5_FLOW_FATE_DEFAULT_MISS:
                flow_dv_default_miss_resource_release(dev);
@@ -10086,6 +10723,74 @@ flow_dv_sample_resource_release(struct rte_eth_dev *dev,
        return 1;
 }
 
+/**
+ * Release an destination array resource.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param handle
+ *   Pointer to mlx5_flow_handle.
+ *
+ * @return
+ *   1 while a reference on it exists, 0 when freed.
+ */
+static int
+flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
+                                    struct mlx5_flow_handle *handle)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_flow_dv_dest_array_resource *cache_resource;
+       struct mlx5_flow_sub_actions_idx *mdest_act_res;
+       uint32_t idx = handle->dvh.rix_dest_array;
+       uint32_t i = 0;
+
+       cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
+                        idx);
+       if (!cache_resource)
+               return 0;
+       MLX5_ASSERT(cache_resource->action);
+       DRV_LOG(DEBUG, "destination array resource %p: refcnt %d--",
+               (void *)cache_resource,
+               __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
+       if (__atomic_sub_fetch(&cache_resource->refcnt, 1,
+                              __ATOMIC_RELAXED) == 0) {
+               if (cache_resource->action)
+                       claim_zero(mlx5_glue->destroy_flow_action
+                                               (cache_resource->action));
+               for (; i < cache_resource->num_of_dest; i++) {
+                       mdest_act_res = &cache_resource->sample_idx[i];
+                       if (mdest_act_res->rix_hrxq) {
+                               mlx5_hrxq_release(dev,
+                                       mdest_act_res->rix_hrxq);
+                               mdest_act_res->rix_hrxq = 0;
+                       }
+                       if (mdest_act_res->rix_encap_decap) {
+                               flow_dv_encap_decap_resource_release(dev,
+                                       mdest_act_res->rix_encap_decap);
+                               mdest_act_res->rix_encap_decap = 0;
+                       }
+                       if (mdest_act_res->rix_port_id_action) {
+                               flow_dv_port_id_action_resource_release(dev,
+                                       mdest_act_res->rix_port_id_action);
+                               mdest_act_res->rix_port_id_action = 0;
+                       }
+                       if (mdest_act_res->rix_tag) {
+                               flow_dv_tag_release(dev,
+                                       mdest_act_res->rix_tag);
+                               mdest_act_res->rix_tag = 0;
+                       }
+               }
+               ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
+                            &priv->sh->dest_array_list, idx,
+                            cache_resource, next);
+               mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], idx);
+               DRV_LOG(DEBUG, "destination array resource %p: removed",
+                       (void *)cache_resource);
+               return 0;
+       }
+       return 1;
+}
+
 /**
  * Remove the flow from the NIC but keeps it in memory.
  * Lock free, (mutex should be acquired by caller).
@@ -10167,6 +10872,8 @@ __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
                        flow_dv_matcher_release(dev, dev_handle);
                if (dev_handle->dvh.rix_sample)
                        flow_dv_sample_resource_release(dev, dev_handle);
+               if (dev_handle->dvh.rix_dest_array)
+                       flow_dv_dest_array_resource_release(dev, dev_handle);
                if (dev_handle->dvh.rix_encap_decap)
                        flow_dv_encap_decap_resource_release(dev,
                                dev_handle->dvh.rix_encap_decap);
@@ -10240,6 +10947,52 @@ flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
                                  "counters are not available");
 }
 
+/**
+ * Query a flow rule AGE action for aging information.
+ *
+ * @param[in] dev
+ *   Pointer to Ethernet device.
+ * @param[in] flow
+ *   Pointer to the sub flow.
+ * @param[out] data
+ *   data retrieved by the query.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
+                 void *data, struct rte_flow_error *error)
+{
+       struct rte_flow_query_age *resp = data;
+
+       if (flow->counter) {
+               struct mlx5_age_param *age_param =
+                               flow_dv_counter_idx_get_age(dev, flow->counter);
+
+               if (!age_param || !age_param->timeout)
+                       return rte_flow_error_set
+                                       (error, EINVAL,
+                                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                        NULL, "cannot read age data");
+               resp->aged = __atomic_load_n(&age_param->state,
+                                            __ATOMIC_RELAXED) ==
+                                                       AGE_TMOUT ? 1 : 0;
+               resp->sec_since_last_hit_valid = !resp->aged;
+               if (resp->sec_since_last_hit_valid)
+                       resp->sec_since_last_hit =
+                               __atomic_load_n(&age_param->sec_since_last_hit,
+                                               __ATOMIC_RELAXED);
+               return 0;
+       }
+       return rte_flow_error_set(error, EINVAL,
+                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                 NULL,
+                                 "age data not available");
+}
+
 /**
  * Query a flow.
  *
@@ -10262,6 +11015,9 @@ flow_dv_query(struct rte_eth_dev *dev,
                case RTE_FLOW_ACTION_TYPE_COUNT:
                        ret = flow_dv_query_count(dev, flow, data, error);
                        break;
+               case RTE_FLOW_ACTION_TYPE_AGE:
+                       ret = flow_dv_query_age(dev, flow, data, error);
+                       break;
                default:
                        return rte_flow_error_set(error, ENOTSUP,
                                                  RTE_FLOW_ERROR_TYPE_ACTION,