net/mlx5: enlarge maximal flow priority
[dpdk.git] / drivers / net / mlx5 / mlx5_flow.c
index cd58135..0197a07 100644 (file)
@@ -12,7 +12,7 @@
 
 #include <rte_common.h>
 #include <rte_ether.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
 #include <rte_eal_paging.h>
 #include <rte_flow.h>
 #include <rte_cycles.h>
 #include "mlx5_common_os.h"
 #include "rte_pmd_mlx5.h"
 
+struct tunnel_default_miss_ctx {
+       uint16_t *queue;
+       __extension__
+       union {
+               struct rte_flow_action_rss action_rss;
+               struct rte_flow_action_queue miss_queue;
+               struct rte_flow_action_jump miss_jump;
+               uint8_t raw[0];
+       };
+};
+
+static int
+flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
+                            struct rte_flow *flow,
+                            const struct rte_flow_attr *attr,
+                            const struct rte_flow_action *app_actions,
+                            uint32_t flow_idx,
+                            struct tunnel_default_miss_ctx *ctx,
+                            struct rte_flow_error *error);
 static struct mlx5_flow_tunnel *
 mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id);
 static void
 mlx5_flow_tunnel_free(struct rte_eth_dev *dev, struct mlx5_flow_tunnel *tunnel);
-static const struct mlx5_flow_tbl_data_entry  *
-tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark);
-static int
-mlx5_get_flow_tunnel(struct rte_eth_dev *dev,
-                    const struct rte_flow_tunnel *app_tunnel,
-                    struct mlx5_flow_tunnel **tunnel);
+static uint32_t
+tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
+                               const struct mlx5_flow_tunnel *tunnel,
+                               uint32_t group, uint32_t *table,
+                               struct rte_flow_error *error);
+
+static struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void);
+static void mlx5_flow_pop_thread_workspace(void);
 
 
 /** Device flow drivers. */
@@ -52,7 +73,7 @@ const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops;
 
 const struct mlx5_flow_driver_ops *flow_drv_ops[] = {
        [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops,
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
        [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops,
 #endif
        [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops,
@@ -191,6 +212,8 @@ mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item)
        return ret;
 }
 
+#define MLX5_RSS_EXP_ELT_N 8
+
 /**
  * Expand RSS flows into several possible flows according to the RSS hash
  * fields requested and the driver capabilities.
@@ -221,13 +244,12 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
                     const struct mlx5_flow_expand_node graph[],
                     int graph_root_index)
 {
-       const int elt_n = 8;
        const struct rte_flow_item *item;
        const struct mlx5_flow_expand_node *node = &graph[graph_root_index];
        const int *next_node;
-       const int *stack[elt_n];
+       const int *stack[MLX5_RSS_EXP_ELT_N];
        int stack_pos = 0;
-       struct rte_flow_item flow_items[elt_n];
+       struct rte_flow_item flow_items[MLX5_RSS_EXP_ELT_N];
        unsigned int i;
        size_t lsize;
        size_t user_pattern_size = 0;
@@ -240,10 +262,10 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
 
        memset(&missed_item, 0, sizeof(missed_item));
        lsize = offsetof(struct mlx5_flow_expand_rss, entry) +
-               elt_n * sizeof(buf->entry[0]);
+               MLX5_RSS_EXP_ELT_N * sizeof(buf->entry[0]);
        if (lsize <= size) {
                buf->entry[0].priority = 0;
-               buf->entry[0].pattern = (void *)&buf->entry[elt_n];
+               buf->entry[0].pattern = (void *)&buf->entry[MLX5_RSS_EXP_ELT_N];
                buf->entries = 0;
                addr = buf->entry[0].pattern;
        }
@@ -346,7 +368,7 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
                /* Go deeper. */
                if (node->next) {
                        next_node = node->next;
-                       if (stack_pos++ == elt_n) {
+                       if (stack_pos++ == MLX5_RSS_EXP_ELT_N) {
                                rte_errno = E2BIG;
                                return -rte_errno;
                        }
@@ -377,8 +399,6 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
                                   user_pattern_size);
                        addr = (void *)(((uintptr_t)addr) + user_pattern_size);
                        rte_memcpy(addr, flow_items, elt * sizeof(*item));
-                       addr = (void *)(((uintptr_t)addr) +
-                                       elt * sizeof(*item));
                }
        }
        return lsize;
@@ -580,162 +600,32 @@ static int mlx5_shared_action_query
                                 const struct rte_flow_shared_action *action,
                                 void *data,
                                 struct rte_flow_error *error);
-static inline bool
-mlx5_flow_tunnel_validate(struct rte_eth_dev *dev,
-                         struct rte_flow_tunnel *tunnel,
-                         const char *err_msg)
-{
-       err_msg = NULL;
-       if (!is_tunnel_offload_active(dev)) {
-               err_msg = "tunnel offload was not activated";
-               goto out;
-       } else if (!tunnel) {
-               err_msg = "no application tunnel";
-               goto out;
-       }
-
-       switch (tunnel->type) {
-       default:
-               err_msg = "unsupported tunnel type";
-               goto out;
-       case RTE_FLOW_ITEM_TYPE_VXLAN:
-               break;
-       }
-
-out:
-       return !err_msg;
-}
-
-
 static int
 mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,
                    struct rte_flow_tunnel *app_tunnel,
                    struct rte_flow_action **actions,
                    uint32_t *num_of_actions,
-                   struct rte_flow_error *error)
-{
-       int ret;
-       struct mlx5_flow_tunnel *tunnel;
-       const char *err_msg = NULL;
-       bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
-
-       if (!verdict)
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
-                                         err_msg);
-       ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
-       if (ret < 0) {
-               return rte_flow_error_set(error, ret,
-                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
-                                         "failed to initialize pmd tunnel");
-       }
-       *actions = &tunnel->action;
-       *num_of_actions = 1;
-       return 0;
-}
-
+                   struct rte_flow_error *error);
 static int
 mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
                       struct rte_flow_tunnel *app_tunnel,
                       struct rte_flow_item **items,
                       uint32_t *num_of_items,
-                      struct rte_flow_error *error)
-{
-       int ret;
-       struct mlx5_flow_tunnel *tunnel;
-       const char *err_msg = NULL;
-       bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
-
-       if (!verdict)
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-                                         err_msg);
-       ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
-       if (ret < 0) {
-               return rte_flow_error_set(error, ret,
-                                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-                                         "failed to initialize pmd tunnel");
-       }
-       *items = &tunnel->item;
-       *num_of_items = 1;
-       return 0;
-}
-
+                      struct rte_flow_error *error);
 static int
-mlx5_flow_item_release(struct rte_eth_dev *dev,
-                      struct rte_flow_item *pmd_items,
-                      uint32_t num_items, struct rte_flow_error *err)
-{
-       struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
-       struct mlx5_flow_tunnel *tun;
-
-       LIST_FOREACH(tun, &thub->tunnels, chain) {
-               if (&tun->item == pmd_items)
-                       break;
-       }
-       if (!tun || num_items != 1)
-               return rte_flow_error_set(err, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-                                         "invalid argument");
-       if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))
-               mlx5_flow_tunnel_free(dev, tun);
-       return 0;
-}
-
+mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,
+                             struct rte_flow_item *pmd_items,
+                             uint32_t num_items, struct rte_flow_error *err);
 static int
-mlx5_flow_action_release(struct rte_eth_dev *dev,
-                        struct rte_flow_action *pmd_actions,
-                        uint32_t num_actions, struct rte_flow_error *err)
-{
-       struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
-       struct mlx5_flow_tunnel *tun;
-
-       LIST_FOREACH(tun, &thub->tunnels, chain) {
-               if (&tun->action == pmd_actions)
-                       break;
-       }
-       if (!tun || num_actions != 1)
-               return rte_flow_error_set(err, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-                                         "invalid argument");
-       if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))
-               mlx5_flow_tunnel_free(dev, tun);
-
-       return 0;
-}
-
+mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,
+                               struct rte_flow_action *pmd_actions,
+                               uint32_t num_actions,
+                               struct rte_flow_error *err);
 static int
 mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
                                  struct rte_mbuf *m,
                                  struct rte_flow_restore_info *info,
-                                 struct rte_flow_error *err)
-{
-       uint64_t ol_flags = m->ol_flags;
-       const struct mlx5_flow_tbl_data_entry *tble;
-       const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID;
-
-       if ((ol_flags & mask) != mask)
-               goto err;
-       tble = tunnel_mark_decode(dev, m->hash.fdir.hi);
-       if (!tble) {
-               DRV_LOG(DEBUG, "port %u invalid miss tunnel mark %#x",
-                       dev->data->port_id, m->hash.fdir.hi);
-               goto err;
-       }
-       MLX5_ASSERT(tble->tunnel);
-       memcpy(&info->tunnel, &tble->tunnel->app_tunnel, sizeof(info->tunnel));
-       info->group_id = tble->group_id;
-       info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL |
-                     RTE_FLOW_RESTORE_INFO_GROUP_ID |
-                     RTE_FLOW_RESTORE_INFO_ENCAPSULATED;
-
-       return 0;
-
-err:
-       return rte_flow_error_set(err, EINVAL,
-                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-                                 "failed to get restore info");
-}
+                                 struct rte_flow_error *err);
 
 static const struct rte_flow_ops mlx5_flow_ops = {
        .validate = mlx5_flow_validate,
@@ -752,37 +642,11 @@ static const struct rte_flow_ops mlx5_flow_ops = {
        .shared_action_query = mlx5_shared_action_query,
        .tunnel_decap_set = mlx5_flow_tunnel_decap_set,
        .tunnel_match = mlx5_flow_tunnel_match,
-       .tunnel_action_decap_release = mlx5_flow_action_release,
-       .tunnel_item_release = mlx5_flow_item_release,
+       .tunnel_action_decap_release = mlx5_flow_tunnel_action_release,
+       .tunnel_item_release = mlx5_flow_tunnel_item_release,
        .get_restore_info = mlx5_flow_tunnel_get_restore_info,
 };
 
-/* Convert FDIR request to Generic flow. */
-struct mlx5_fdir {
-       struct rte_flow_attr attr;
-       struct rte_flow_item items[4];
-       struct rte_flow_item_eth l2;
-       struct rte_flow_item_eth l2_mask;
-       union {
-               struct rte_flow_item_ipv4 ipv4;
-               struct rte_flow_item_ipv6 ipv6;
-       } l3;
-       union {
-               struct rte_flow_item_ipv4 ipv4;
-               struct rte_flow_item_ipv6 ipv6;
-       } l3_mask;
-       union {
-               struct rte_flow_item_udp udp;
-               struct rte_flow_item_tcp tcp;
-       } l4;
-       union {
-               struct rte_flow_item_udp udp;
-               struct rte_flow_item_tcp tcp;
-       } l4_mask;
-       struct rte_flow_action actions[2];
-       struct rte_flow_action_queue queue;
-};
-
 /* Tunnel information. */
 struct mlx5_flow_tunnel_info {
        uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
@@ -832,11 +696,6 @@ static struct mlx5_flow_tunnel_info tunnels_info[] = {
        },
 };
 
-/* Key of thread specific flow workspace data. */
-static pthread_key_t key_workspace;
-
-/* Thread specific flow workspace data once initialization data. */
-static pthread_once_t key_workspace_init;
 
 
 /**
@@ -914,6 +773,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
                        return priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
                               REG_C_3;
        case MLX5_MTR_COLOR:
+       case MLX5_ASO_FLOW_HIT: /* Both features use the same REG_C. */
                MLX5_ASSERT(priv->mtr_color_reg != REG_NON);
                return priv->mtr_color_reg;
        case MLX5_COPY_MARK:
@@ -933,7 +793,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
                start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
                            (priv->mtr_reg_share ? REG_C_3 : REG_C_4);
                skip_mtr_reg = !!(priv->mtr_en && start_reg == REG_C_2);
-               if (id > (REG_C_7 - start_reg))
+               if (id > (uint32_t)(REG_C_7 - start_reg))
                        return rte_flow_error_set(error, EINVAL,
                                                  RTE_FLOW_ERROR_TYPE_ITEM,
                                                  NULL, "invalid tag id");
@@ -949,7 +809,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
                 */
                if (skip_mtr_reg && config->flow_mreg_c
                    [id + start_reg - REG_C_0] >= priv->mtr_color_reg) {
-                       if (id >= (REG_C_7 - start_reg))
+                       if (id >= (uint32_t)(REG_C_7 - start_reg))
                                return rte_flow_error_set(error, EINVAL,
                                                       RTE_FLOW_ERROR_TYPE_ITEM,
                                                        NULL, "invalid tag id");
@@ -995,6 +855,58 @@ mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev)
        return config->flow_mreg_c[2] != REG_NON;
 }
 
+/**
+ * Get the lowest priority.
+ *
+ * @param[in] dev
+ *   Pointer to the Ethernet device structure.
+ * @param[in] attributes
+ *   Pointer to device flow rule attributes.
+ *
+ * @return
+ *   The value of lowest priority of flow.
+ */
+uint32_t
+mlx5_get_lowest_priority(struct rte_eth_dev *dev,
+                         const struct rte_flow_attr *attr)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+
+       if (!attr->group && !attr->transfer)
+               return priv->config.flow_prio - 2;
+       return MLX5_NON_ROOT_FLOW_MAX_PRIO - 1;
+}
+
+/**
+ * Calculate matcher priority of the flow.
+ *
+ * @param[in] dev
+ *   Pointer to the Ethernet device structure.
+ * @param[in] attr
+ *   Pointer to device flow rule attributes.
+ * @param[in] subpriority
+ *   The priority based on the items.
+ * @return
+ *   The matcher priority of the flow.
+ */
+uint16_t
+mlx5_get_matcher_priority(struct rte_eth_dev *dev,
+                         const struct rte_flow_attr *attr,
+                         uint32_t subpriority)
+{
+       uint16_t priority = (uint16_t)attr->priority;
+       struct mlx5_priv *priv = dev->data->dev_private;
+
+       if (!attr->group && !attr->transfer) {
+               if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
+                       priority = priv->config.flow_prio - 1;
+               return mlx5_os_flow_adjust_priority(dev, priority, subpriority);
+       }
+       if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
+               priority = MLX5_NON_ROOT_FLOW_MAX_PRIO;
+       return priority * 3 + subpriority;
+}
+
 /**
  * Verify the @p item specifications (spec, last, mask) are compatible with the
  * NIC capabilities.
@@ -1137,17 +1049,29 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
        struct mlx5_priv *priv = dev->data->dev_private;
        const int mark = dev_handle->mark;
        const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
-       struct mlx5_hrxq *hrxq;
+       struct mlx5_ind_table_obj *ind_tbl = NULL;
        unsigned int i;
 
-       if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE)
-               return;
-       hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+       if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
+               struct mlx5_hrxq *hrxq;
+
+               hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
                              dev_handle->rix_hrxq);
-       if (!hrxq)
+               if (hrxq)
+                       ind_tbl = hrxq->ind_table;
+       } else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
+               struct mlx5_shared_action_rss *shared_rss;
+
+               shared_rss = mlx5_ipool_get
+                       (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
+                        dev_handle->rix_srss);
+               if (shared_rss)
+                       ind_tbl = shared_rss->ind_tbl;
+       }
+       if (!ind_tbl)
                return;
-       for (i = 0; i != hrxq->ind_table->queues_n; ++i) {
-               int idx = hrxq->ind_table->queues[i];
+       for (i = 0; i != ind_tbl->queues_n; ++i) {
+               int idx = ind_tbl->queues[i];
                struct mlx5_rxq_ctrl *rxq_ctrl =
                        container_of((*priv->rxqs)[idx],
                                     struct mlx5_rxq_ctrl, rxq);
@@ -1219,18 +1143,30 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
        struct mlx5_priv *priv = dev->data->dev_private;
        const int mark = dev_handle->mark;
        const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
-       struct mlx5_hrxq *hrxq;
+       struct mlx5_ind_table_obj *ind_tbl = NULL;
        unsigned int i;
 
-       if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE)
-               return;
-       hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+       if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
+               struct mlx5_hrxq *hrxq;
+
+               hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
                              dev_handle->rix_hrxq);
-       if (!hrxq)
+               if (hrxq)
+                       ind_tbl = hrxq->ind_table;
+       } else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
+               struct mlx5_shared_action_rss *shared_rss;
+
+               shared_rss = mlx5_ipool_get
+                       (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
+                        dev_handle->rix_srss);
+               if (shared_rss)
+                       ind_tbl = shared_rss->ind_tbl;
+       }
+       if (!ind_tbl)
                return;
        MLX5_ASSERT(dev->data->dev_started);
-       for (i = 0; i != hrxq->ind_table->queues_n; ++i) {
-               int idx = hrxq->ind_table->queues[i];
+       for (i = 0; i != ind_tbl->queues_n; ++i) {
+               int idx = ind_tbl->queues[i];
                struct mlx5_rxq_ctrl *rxq_ctrl =
                        container_of((*priv->rxqs)[idx],
                                     struct mlx5_rxq_ctrl, rxq);
@@ -1548,6 +1484,7 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev,
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        const struct rte_flow_action_rss *rss = action->conf;
+       enum mlx5_rxq_type rxq_type = MLX5_RXQ_TYPE_UNDEFINED;
        unsigned int i;
 
        if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
@@ -1613,6 +1550,8 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
                                          NULL, "No queues configured");
        for (i = 0; i != rss->queue_num; ++i) {
+               struct mlx5_rxq_ctrl *rxq_ctrl;
+
                if (rss->queue[i] >= priv->rxqs_n)
                        return rte_flow_error_set
                                (error, EINVAL,
@@ -1622,6 +1561,15 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev,
                        return rte_flow_error_set
                                (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
                                 &rss->queue[i], "queue is not configured");
+               rxq_ctrl = container_of((*priv->rxqs)[rss->queue[i]],
+                                       struct mlx5_rxq_ctrl, rxq);
+               if (i == 0)
+                       rxq_type = rxq_ctrl->type;
+               if (rxq_type != rxq_ctrl->type)
+                       return rte_flow_error_set
+                               (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                                &rss->queue[i],
+                                "combining hairpin and regular RSS queues is not supported");
        }
        return 0;
 }
@@ -1773,7 +1721,7 @@ mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
                                          NULL, "groups is not supported");
-       if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
+       if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
            attributes->priority >= priority_max)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
@@ -2726,6 +2674,149 @@ mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
        return 0;
 }
 
+/**
+ * Validate Geneve TLV option item.
+ *
+ * @param[in] item
+ *   Item specification.
+ * @param[in] last_item
+ *   Previous validated item in the pattern items.
+ * @param[in] geneve_item
+ *   Previous GENEVE item specification.
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item,
+                                  uint64_t last_item,
+                                  const struct rte_flow_item *geneve_item,
+                                  struct rte_eth_dev *dev,
+                                  struct rte_flow_error *error)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_dev_ctx_shared *sh = priv->sh;
+       struct mlx5_geneve_tlv_option_resource *geneve_opt_resource;
+       struct mlx5_hca_attr *hca_attr = &priv->config.hca_attr;
+       uint8_t data_max_supported =
+                       hca_attr->max_geneve_tlv_option_data_len * 4;
+       struct mlx5_dev_config *config = &priv->config;
+       const struct rte_flow_item_geneve *geneve_spec;
+       const struct rte_flow_item_geneve *geneve_mask;
+       const struct rte_flow_item_geneve_opt *spec = item->spec;
+       const struct rte_flow_item_geneve_opt *mask = item->mask;
+       unsigned int i;
+       unsigned int data_len;
+       uint8_t tlv_option_len;
+       uint16_t optlen_m, optlen_v;
+       const struct rte_flow_item_geneve_opt full_mask = {
+               .option_class = RTE_BE16(0xffff),
+               .option_type = 0xff,
+               .option_len = 0x1f,
+       };
+
+       if (!mask)
+               mask = &rte_flow_item_geneve_opt_mask;
+       if (!spec)
+               return rte_flow_error_set
+                       (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
+                       "Geneve TLV opt class/type/length must be specified");
+       if ((uint32_t)spec->option_len > MLX5_GENEVE_OPTLEN_MASK)
+               return rte_flow_error_set
+                       (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
+                       "Geneve TLV opt length exceeeds the limit (31)");
+       /* Check if class type and length masks are full. */
+       if (full_mask.option_class != mask->option_class ||
+           full_mask.option_type != mask->option_type ||
+           full_mask.option_len != (mask->option_len & full_mask.option_len))
+               return rte_flow_error_set
+                       (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
+                       "Geneve TLV opt class/type/length masks must be full");
+       /* Check if length is supported */
+       if ((uint32_t)spec->option_len >
+                       config->hca_attr.max_geneve_tlv_option_data_len)
+               return rte_flow_error_set
+                       (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
+                       "Geneve TLV opt length not supported");
+       if (config->hca_attr.max_geneve_tlv_options > 1)
+               DRV_LOG(DEBUG,
+                       "max_geneve_tlv_options supports more than 1 option");
+       /* Check GENEVE item preceding. */
+       if (!geneve_item || !(last_item & MLX5_FLOW_LAYER_GENEVE))
+               return rte_flow_error_set
+                       (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
+                       "Geneve opt item must be preceded with Geneve item");
+       geneve_spec = geneve_item->spec;
+       geneve_mask = geneve_item->mask ? geneve_item->mask :
+                                         &rte_flow_item_geneve_mask;
+       /* Check if GENEVE TLV option size doesn't exceed option length */
+       if (geneve_spec && (geneve_mask->ver_opt_len_o_c_rsvd0 ||
+                           geneve_spec->ver_opt_len_o_c_rsvd0)) {
+               tlv_option_len = spec->option_len & mask->option_len;
+               optlen_v = rte_be_to_cpu_16(geneve_spec->ver_opt_len_o_c_rsvd0);
+               optlen_v = MLX5_GENEVE_OPTLEN_VAL(optlen_v);
+               optlen_m = rte_be_to_cpu_16(geneve_mask->ver_opt_len_o_c_rsvd0);
+               optlen_m = MLX5_GENEVE_OPTLEN_VAL(optlen_m);
+               if ((optlen_v & optlen_m) <= tlv_option_len)
+                       return rte_flow_error_set
+                               (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                "GENEVE TLV option length exceeds optlen");
+       }
+       /* Check if length is 0 or data is 0. */
+       if (spec->data == NULL || spec->option_len == 0)
+               return rte_flow_error_set
+                       (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
+                       "Geneve TLV opt with zero data/length not supported");
+       /* Check not all data & mask are 0. */
+       data_len = spec->option_len * 4;
+       if (mask->data == NULL) {
+               for (i = 0; i < data_len; i++)
+                       if (spec->data[i])
+                               break;
+               if (i == data_len)
+                       return rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_ITEM, item,
+                               "Can't match on Geneve option data 0");
+       } else {
+               for (i = 0; i < data_len; i++)
+                       if (spec->data[i] & mask->data[i])
+                               break;
+               if (i == data_len)
+                       return rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_ITEM, item,
+                               "Can't match on Geneve option data and mask 0");
+               /* Check data mask supported. */
+               for (i = data_max_supported; i < data_len ; i++)
+                       if (mask->data[i])
+                               return rte_flow_error_set(error, ENOTSUP,
+                                       RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                       "Data mask is of unsupported size");
+       }
+       /* Check GENEVE option is supported in NIC. */
+       if (!config->hca_attr.geneve_tlv_opt)
+               return rte_flow_error_set
+                       (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
+                       "Geneve TLV opt not supported");
+       /* Check if we already have geneve option with different type/class. */
+       rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
+       geneve_opt_resource = sh->geneve_tlv_option_resource;
+       if (geneve_opt_resource != NULL)
+               if (geneve_opt_resource->option_class != spec->option_class ||
+                   geneve_opt_resource->option_type != spec->option_type ||
+                   geneve_opt_resource->length != spec->option_len) {
+                       rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
+                       return rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_ITEM, item,
+                               "Only one Geneve TLV option supported");
+               }
+       rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
+       return 0;
+}
+
 /**
  * Validate MPLS item.
  *
@@ -2764,7 +2855,8 @@ mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused,
        /* MPLS over IP, UDP, GRE is allowed */
        if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 |
                            MLX5_FLOW_LAYER_OUTER_L4_UDP |
-                           MLX5_FLOW_LAYER_GRE)))
+                           MLX5_FLOW_LAYER_GRE |
+                           MLX5_FLOW_LAYER_GRE_KEY)))
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "protocol filtering not compatible"
@@ -2888,17 +2980,20 @@ mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item,
                                        MLX5_FLOW_LAYER_OUTER_VLAN);
        struct rte_flow_item_ecpri mask_lo;
 
+       if (!(last_item & outer_l2_vlan) &&
+           last_item != MLX5_FLOW_LAYER_OUTER_L4_UDP)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "eCPRI can only follow L2/VLAN layer or UDP layer");
        if ((last_item & outer_l2_vlan) && ether_type &&
            ether_type != RTE_ETHER_TYPE_ECPRI)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "eCPRI cannot follow L2/VLAN layer "
-                                         "which ether type is not 0xAEFE.");
+                                         "eCPRI cannot follow L2/VLAN layer which ether type is not 0xAEFE");
        if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "eCPRI with tunnel is not supported "
-                                         "right now.");
+                                         "eCPRI with tunnel is not supported right now");
        if (item_flags & MLX5_FLOW_LAYER_OUTER_L3)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
@@ -2906,13 +3001,12 @@ mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item,
        else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "eCPRI cannot follow a TCP layer.");
+                                         "eCPRI cannot coexist with a TCP layer");
        /* In specification, eCPRI could be over UDP layer. */
        else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "eCPRI over UDP layer is not yet "
-                                         "supported right now.");
+                                         "eCPRI over UDP layer is not yet supported right now");
        /* Mask for type field in common header could be zero. */
        if (!mask)
                mask = &rte_flow_item_ecpri_mask;
@@ -2921,13 +3015,11 @@ mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item,
        if (mask_lo.hdr.common.type != 0 && mask_lo.hdr.common.type != 0xff)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
-                                         "partial mask is not supported "
-                                         "for protocol");
+                                         "partial mask is not supported for protocol");
        else if (mask_lo.hdr.common.type == 0 && mask->hdr.dummy[0] != 0)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
-                                         "message header mask must be after "
-                                         "a type mask");
+                                         "message header mask must be after a type mask");
        return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
                                         acc_mask ? (const uint8_t *)acc_mask
                                                  : (const uint8_t *)&nic_mask,
@@ -3239,10 +3331,9 @@ flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
 }
 
 /**
- * Flow driver remove API. This abstracts calling driver specific functions.
+ * Flow driver destroy API. This abstracts calling driver specific functions.
  * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
- * on device. All the resources of the flow should be freed by calling
- * flow_drv_destroy().
+ * on device and releases resources of the flow.
  *
  * @param[in] dev
  *   Pointer to Ethernet device.
@@ -3250,40 +3341,19 @@ flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
  *   Pointer to flow structure.
  */
 static inline void
-flow_drv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
+flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
        const struct mlx5_flow_driver_ops *fops;
        enum mlx5_flow_drv_type type = flow->drv_type;
 
+       flow_mreg_split_qrss_release(dev, flow);
        MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
        fops = flow_get_drv_ops(type);
-       fops->remove(dev, flow);
+       fops->destroy(dev, flow);
 }
 
 /**
- * Flow driver destroy API. This abstracts calling driver specific functions.
- * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
- * on device and releases resources of the flow.
- *
- * @param[in] dev
- *   Pointer to Ethernet device.
- * @param[in, out] flow
- *   Pointer to flow structure.
- */
-static inline void
-flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
-{
-       const struct mlx5_flow_driver_ops *fops;
-       enum mlx5_flow_drv_type type = flow->drv_type;
-
-       flow_mreg_split_qrss_release(dev, flow);
-       MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
-       fops = flow_get_drv_ops(type);
-       fops->destroy(dev, flow);
-}
-
-/**
- * Get RSS action from the action list.
+ * Get RSS action from the action list.
  *
  * @param[in] actions
  *   Pointer to the list of actions.
@@ -3294,16 +3364,51 @@ flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
 static const struct rte_flow_action_rss*
 flow_get_rss_action(const struct rte_flow_action actions[])
 {
+       const struct rte_flow_action_rss *rss = NULL;
+
        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
                switch (actions->type) {
                case RTE_FLOW_ACTION_TYPE_RSS:
-                       return (const struct rte_flow_action_rss *)
-                              actions->conf;
+                       rss = actions->conf;
+                       break;
+               case RTE_FLOW_ACTION_TYPE_SAMPLE:
+               {
+                       const struct rte_flow_action_sample *sample =
+                                                               actions->conf;
+                       const struct rte_flow_action *act = sample->actions;
+                       for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++)
+                               if (act->type == RTE_FLOW_ACTION_TYPE_RSS)
+                                       rss = act->conf;
+                       break;
+               }
                default:
                        break;
                }
        }
-       return NULL;
+       return rss;
+}
+
+/**
+ * Get ASO age action by index.
+ *
+ * @param[in] dev
+ *   Pointer to the Ethernet device structure.
+ * @param[in] age_idx
+ *   Index to the ASO age action.
+ *
+ * @return
+ *   The specified ASO age action.
+ */
+struct mlx5_aso_age_action*
+flow_aso_age_get_by_idx(struct rte_eth_dev *dev, uint32_t age_idx)
+{
+       uint16_t pool_idx = age_idx & UINT16_MAX;
+       uint16_t offset = (age_idx >> 16) & UINT16_MAX;
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
+       struct mlx5_aso_age_pool *pool = mng->pools[pool_idx];
+
+       return &pool->actions[offset - 1];
 }
 
 /* maps shared action to translated non shared in some actions array */
@@ -3320,6 +3425,8 @@ struct mlx5_translated_shared_action {
  * action handling should be preformed on *shared* actions list returned
  * from this call.
  *
+ * @param[in] dev
+ *   Pointer to Ethernet device.
  * @param[in] actions
  *   List of actions to translate.
  * @param[out] shared
@@ -3337,12 +3444,14 @@ struct mlx5_translated_shared_action {
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
-flow_shared_actions_translate(const struct rte_flow_action actions[],
-       struct mlx5_translated_shared_action *shared,
-       int *shared_n,
-       struct rte_flow_action **translated_actions,
-       struct rte_flow_error *error)
+flow_shared_actions_translate(struct rte_eth_dev *dev,
+                             const struct rte_flow_action actions[],
+                             struct mlx5_translated_shared_action *shared,
+                             int *shared_n,
+                             struct rte_flow_action **translated_actions,
+                             struct rte_flow_error *error)
 {
+       struct mlx5_priv *priv = dev->data->dev_private;
        struct rte_flow_action *translated = NULL;
        size_t actions_size;
        int n;
@@ -3374,16 +3483,31 @@ flow_shared_actions_translate(const struct rte_flow_action actions[],
        }
        memcpy(translated, actions, actions_size);
        for (shared_end = shared + copied_n; shared < shared_end; shared++) {
-               const struct rte_flow_shared_action *shared_action;
-
-               shared_action = shared->action;
-               switch (shared_action->type) {
-               case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
+               struct mlx5_shared_action_rss *shared_rss;
+               uint32_t act_idx = (uint32_t)(uintptr_t)shared->action;
+               uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
+               uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET)
+                                                                          - 1);
+
+               switch (type) {
+               case MLX5_SHARED_ACTION_TYPE_RSS:
+                       shared_rss = mlx5_ipool_get
+                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
                        translated[shared->index].type =
                                RTE_FLOW_ACTION_TYPE_RSS;
                        translated[shared->index].conf =
-                               &shared_action->rss.origin;
+                               &shared_rss->origin;
                        break;
+               case MLX5_SHARED_ACTION_TYPE_AGE:
+                       if (priv->sh->flow_hit_aso_en) {
+                               translated[shared->index].type =
+                                       (enum rte_flow_action_type)
+                                       MLX5_RTE_FLOW_ACTION_TYPE_AGE;
+                               translated[shared->index].conf =
+                                                        (void *)(uintptr_t)idx;
+                               break;
+                       }
+                       /* Fall-through */
                default:
                        mlx5_free(translated);
                        return rte_flow_error_set
@@ -3398,44 +3522,44 @@ flow_shared_actions_translate(const struct rte_flow_action actions[],
 /**
  * Get Shared RSS action from the action list.
  *
+ * @param[in] dev
+ *   Pointer to Ethernet device.
  * @param[in] shared
  *   Pointer to the list of actions.
  * @param[in] shared_n
  *   Actions list length.
  *
  * @return
- *   Pointer to the MLX5 RSS action if exists, otherwise return NULL.
+ *   The MLX5 RSS action ID if exists, otherwise return 0.
  */
-static struct mlx5_shared_action_rss *
-flow_get_shared_rss_action(struct mlx5_translated_shared_action *shared,
+static uint32_t
+flow_get_shared_rss_action(struct rte_eth_dev *dev,
+                          struct mlx5_translated_shared_action *shared,
                           int shared_n)
 {
        struct mlx5_translated_shared_action *shared_end;
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_shared_action_rss *shared_rss;
 
-       for (shared_end = shared + shared_n; shared < shared_end; shared++) {
-               struct rte_flow_shared_action *shared_action;
 
-               shared_action = shared->action;
-               switch (shared_action->type) {
-               case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
-                       __atomic_add_fetch(&shared_action->refcnt, 1,
+       for (shared_end = shared + shared_n; shared < shared_end; shared++) {
+               uint32_t act_idx = (uint32_t)(uintptr_t)shared->action;
+               uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
+               uint32_t idx = act_idx &
+                                  ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
+               switch (type) {
+               case MLX5_SHARED_ACTION_TYPE_RSS:
+                       shared_rss = mlx5_ipool_get
+                               (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
+                                                                          idx);
+                       __atomic_add_fetch(&shared_rss->refcnt, 1,
                                           __ATOMIC_RELAXED);
-                       return &shared_action->rss;
+                       return idx;
                default:
                        break;
                }
        }
-       return NULL;
-}
-
-struct rte_flow_shared_action *
-mlx5_flow_get_shared_rss(struct rte_flow *flow)
-{
-       if (flow->shared_rss)
-               return container_of(flow->shared_rss,
-                                   struct rte_flow_shared_action, rss);
-       else
-               return NULL;
+       return 0;
 }
 
 static unsigned int
@@ -3626,7 +3750,7 @@ flow_check_hairpin_split(struct rte_eth_dev *dev,
                        if (queue == NULL)
                                return 0;
                        conf = mlx5_rxq_get_hairpin_conf(dev, queue->index);
-                       if (conf != NULL && !!conf->tx_explicit)
+                       if (conf == NULL || conf->tx_explicit != 0)
                                return 0;
                        queue_action = 1;
                        action_n++;
@@ -3636,7 +3760,7 @@ flow_check_hairpin_split(struct rte_eth_dev *dev,
                        if (rss == NULL || rss->queue_num == 0)
                                return 0;
                        conf = mlx5_rxq_get_hairpin_conf(dev, rss->queue[0]);
-                       if (conf != NULL && !!conf->tx_explicit)
+                       if (conf == NULL || conf->tx_explicit != 0)
                                return 0;
                        queue_action = 1;
                        action_n++;
@@ -3651,9 +3775,7 @@ flow_check_hairpin_split(struct rte_eth_dev *dev,
                        break;
                case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
                        raw_encap = actions->conf;
-                       if (raw_encap->size >
-                           (sizeof(struct rte_flow_item_eth) +
-                            sizeof(struct rte_flow_item_ipv4)))
+                       if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
                                split++;
                        action_n++;
                        break;
@@ -3679,36 +3801,29 @@ static void
 flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
                  uint32_t flow_idx);
 
-/**
- * Add a flow of copying flow metadata registers in RX_CP_TBL.
- *
- * As mark_id is unique, if there's already a registered flow for the mark_id,
- * return by increasing the reference counter of the resource. Otherwise, create
- * the resource (mcp_res) and flow.
- *
- * Flow looks like,
- *   - If ingress port is ANY and reg_c[1] is mark_id,
- *     flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL.
- *
- * For default flow (zero mark_id), flow is like,
- *   - If ingress port is ANY,
- *     reg_b := reg_c[0] and jump to RX_ACT_TBL.
- *
- * @param dev
- *   Pointer to Ethernet device.
- * @param mark_id
- *   ID of MARK action, zero means default flow for META.
- * @param[out] error
- *   Perform verbose error reporting if not NULL.
- *
- * @return
- *   Associated resource on success, NULL otherwise and rte_errno is set.
- */
-static struct mlx5_flow_mreg_copy_resource *
-flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
-                         struct rte_flow_error *error)
+int
+flow_dv_mreg_match_cb(struct mlx5_hlist *list __rte_unused,
+                     struct mlx5_hlist_entry *entry,
+                     uint64_t key, void *cb_ctx __rte_unused)
+{
+       struct mlx5_flow_mreg_copy_resource *mcp_res =
+               container_of(entry, typeof(*mcp_res), hlist_ent);
+
+       return mcp_res->mark_id != key;
+}
+
+struct mlx5_hlist_entry *
+flow_dv_mreg_create_cb(struct mlx5_hlist *list, uint64_t key,
+                      void *cb_ctx)
 {
+       struct rte_eth_dev *dev = list->ctx;
        struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+       struct mlx5_flow_mreg_copy_resource *mcp_res;
+       struct rte_flow_error *error = ctx->error;
+       uint32_t idx = 0;
+       int ret;
+       uint32_t mark_id = key;
        struct rte_flow_attr attr = {
                .group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
                .ingress = 1,
@@ -3732,9 +3847,6 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
        struct rte_flow_action actions[] = {
                [3] = { .type = RTE_FLOW_ACTION_TYPE_END, },
        };
-       struct mlx5_flow_mreg_copy_resource *mcp_res;
-       uint32_t idx = 0;
-       int ret;
 
        /* Fill the register fileds in the flow. */
        ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
@@ -3745,17 +3857,6 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
        if (ret < 0)
                return NULL;
        cp_mreg.src = ret;
-       /* Check if already registered. */
-       MLX5_ASSERT(priv->mreg_cp_tbl);
-       mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id);
-       if (mcp_res) {
-               /* For non-default rule. */
-               if (mark_id != MLX5_DEFAULT_COPY_ID)
-                       mcp_res->refcnt++;
-               MLX5_ASSERT(mark_id != MLX5_DEFAULT_COPY_ID ||
-                           mcp_res->refcnt == 1);
-               return mcp_res;
-       }
        /* Provide the full width of FLAG specific value. */
        if (mark_id == (priv->sh->dv_regc0_mask & MLX5_FLOW_MARK_DEFAULT))
                tag_spec.data = MLX5_FLOW_MARK_DEFAULT;
@@ -3788,7 +3889,7 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
                };
        } else {
                /* Default rule, wildcard match. */
-               attr.priority = MLX5_FLOW_PRIO_RSVD;
+               attr.priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR;
                items[0] = (struct rte_flow_item){
                        .type = RTE_FLOW_ITEM_TYPE_END,
                };
@@ -3812,6 +3913,7 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
                return NULL;
        }
        mcp_res->idx = idx;
+       mcp_res->mark_id = mark_id;
        /*
         * The copy Flows are not included in any list. There
         * ones are referenced from other Flows and can not
@@ -3820,113 +3922,73 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
         */
        mcp_res->rix_flow = flow_list_create(dev, NULL, &attr, items,
                                         actions, false, error);
-       if (!mcp_res->rix_flow)
-               goto error;
-       mcp_res->refcnt++;
-       mcp_res->hlist_ent.key = mark_id;
-       ret = mlx5_hlist_insert(priv->mreg_cp_tbl,
-                               &mcp_res->hlist_ent);
-       MLX5_ASSERT(!ret);
-       if (ret)
-               goto error;
-       return mcp_res;
-error:
-       if (mcp_res->rix_flow)
-               flow_list_destroy(dev, NULL, mcp_res->rix_flow);
-       mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
-       return NULL;
+       if (!mcp_res->rix_flow) {
+               mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], idx);
+               return NULL;
+       }
+       return &mcp_res->hlist_ent;
 }
 
 /**
- * Release flow in RX_CP_TBL.
+ * Add a flow of copying flow metadata registers in RX_CP_TBL.
+ *
+ * As mark_id is unique, if there's already a registered flow for the mark_id,
+ * return by increasing the reference counter of the resource. Otherwise, create
+ * the resource (mcp_res) and flow.
+ *
+ * Flow looks like,
+ *   - If ingress port is ANY and reg_c[1] is mark_id,
+ *     flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL.
+ *
+ * For default flow (zero mark_id), flow is like,
+ *   - If ingress port is ANY,
+ *     reg_b := reg_c[0] and jump to RX_ACT_TBL.
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @flow
- *   Parent flow for wich copying is provided.
+ * @param mark_id
+ *   ID of MARK action, zero means default flow for META.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL.
+ *
+ * @return
+ *   Associated resource on success, NULL otherwise and rte_errno is set.
  */
-static void
-flow_mreg_del_copy_action(struct rte_eth_dev *dev,
-                         struct rte_flow *flow)
+static struct mlx5_flow_mreg_copy_resource *
+flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
+                         struct rte_flow_error *error)
 {
-       struct mlx5_flow_mreg_copy_resource *mcp_res;
        struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_hlist_entry *entry;
+       struct mlx5_flow_cb_ctx ctx = {
+               .dev = dev,
+               .error = error,
+       };
 
-       if (!flow->rix_mreg_copy)
-               return;
-       mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
-                                flow->rix_mreg_copy);
-       if (!mcp_res || !priv->mreg_cp_tbl)
-               return;
-       if (flow->copy_applied) {
-               MLX5_ASSERT(mcp_res->appcnt);
-               flow->copy_applied = 0;
-               --mcp_res->appcnt;
-               if (!mcp_res->appcnt) {
-                       struct rte_flow *mcp_flow = mlx5_ipool_get
-                                       (priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
-                                       mcp_res->rix_flow);
-
-                       if (mcp_flow)
-                               flow_drv_remove(dev, mcp_flow);
-               }
-       }
-       /*
-        * We do not check availability of metadata registers here,
-        * because copy resources are not allocated in this case.
-        */
-       if (--mcp_res->refcnt)
-               return;
-       MLX5_ASSERT(mcp_res->rix_flow);
-       flow_list_destroy(dev, NULL, mcp_res->rix_flow);
-       mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
-       mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
-       flow->rix_mreg_copy = 0;
+       /* Check if already registered. */
+       MLX5_ASSERT(priv->mreg_cp_tbl);
+       entry = mlx5_hlist_register(priv->mreg_cp_tbl, mark_id, &ctx);
+       if (!entry)
+               return NULL;
+       return container_of(entry, struct mlx5_flow_mreg_copy_resource,
+                           hlist_ent);
 }
 
-/**
- * Start flow in RX_CP_TBL.
- *
- * @param dev
- *   Pointer to Ethernet device.
- * @flow
- *   Parent flow for wich copying is provided.
- *
- * @return
- *   0 on success, a negative errno value otherwise and rte_errno is set.
- */
-static int
-flow_mreg_start_copy_action(struct rte_eth_dev *dev,
-                           struct rte_flow *flow)
+void
+flow_dv_mreg_remove_cb(struct mlx5_hlist *list, struct mlx5_hlist_entry *entry)
 {
-       struct mlx5_flow_mreg_copy_resource *mcp_res;
+       struct mlx5_flow_mreg_copy_resource *mcp_res =
+               container_of(entry, typeof(*mcp_res), hlist_ent);
+       struct rte_eth_dev *dev = list->ctx;
        struct mlx5_priv *priv = dev->data->dev_private;
-       int ret;
 
-       if (!flow->rix_mreg_copy || flow->copy_applied)
-               return 0;
-       mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
-                                flow->rix_mreg_copy);
-       if (!mcp_res)
-               return 0;
-       if (!mcp_res->appcnt) {
-               struct rte_flow *mcp_flow = mlx5_ipool_get
-                               (priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
-                               mcp_res->rix_flow);
-
-               if (mcp_flow) {
-                       ret = flow_drv_apply(dev, mcp_flow, NULL);
-                       if (ret)
-                               return ret;
-               }
-       }
-       ++mcp_res->appcnt;
-       flow->copy_applied = 1;
-       return 0;
+       MLX5_ASSERT(mcp_res->rix_flow);
+       flow_list_destroy(dev, NULL, mcp_res->rix_flow);
+       mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
 }
 
 /**
- * Stop flow in RX_CP_TBL.
+ * Release flow in RX_CP_TBL.
  *
  * @param dev
  *   Pointer to Ethernet device.
@@ -3934,59 +3996,54 @@ flow_mreg_start_copy_action(struct rte_eth_dev *dev,
  *   Parent flow for wich copying is provided.
  */
 static void
-flow_mreg_stop_copy_action(struct rte_eth_dev *dev,
-                          struct rte_flow *flow)
+flow_mreg_del_copy_action(struct rte_eth_dev *dev,
+                         struct rte_flow *flow)
 {
        struct mlx5_flow_mreg_copy_resource *mcp_res;
        struct mlx5_priv *priv = dev->data->dev_private;
 
-       if (!flow->rix_mreg_copy || !flow->copy_applied)
+       if (!flow->rix_mreg_copy)
                return;
        mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
                                 flow->rix_mreg_copy);
-       if (!mcp_res)
+       if (!mcp_res || !priv->mreg_cp_tbl)
                return;
-       MLX5_ASSERT(mcp_res->appcnt);
-       --mcp_res->appcnt;
-       flow->copy_applied = 0;
-       if (!mcp_res->appcnt) {
-               struct rte_flow *mcp_flow = mlx5_ipool_get
-                               (priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
-                               mcp_res->rix_flow);
-
-               if (mcp_flow)
-                       flow_drv_remove(dev, mcp_flow);
-       }
+       MLX5_ASSERT(mcp_res->rix_flow);
+       mlx5_hlist_unregister(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
+       flow->rix_mreg_copy = 0;
 }
 
 /**
  * Remove the default copy action from RX_CP_TBL.
  *
+ * This functions is called in the mlx5_dev_start(). No thread safe
+ * is guaranteed.
+ *
  * @param dev
  *   Pointer to Ethernet device.
  */
 static void
 flow_mreg_del_default_copy_action(struct rte_eth_dev *dev)
 {
-       struct mlx5_flow_mreg_copy_resource *mcp_res;
+       struct mlx5_hlist_entry *entry;
        struct mlx5_priv *priv = dev->data->dev_private;
 
        /* Check if default flow is registered. */
        if (!priv->mreg_cp_tbl)
                return;
-       mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl,
-                                           MLX5_DEFAULT_COPY_ID);
-       if (!mcp_res)
+       entry = mlx5_hlist_lookup(priv->mreg_cp_tbl,
+                                 MLX5_DEFAULT_COPY_ID, NULL);
+       if (!entry)
                return;
-       MLX5_ASSERT(mcp_res->rix_flow);
-       flow_list_destroy(dev, NULL, mcp_res->rix_flow);
-       mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
-       mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
+       mlx5_hlist_unregister(priv->mreg_cp_tbl, entry);
 }
 
 /**
  * Add the default copy action in in RX_CP_TBL.
  *
+ * This functions is called in the mlx5_dev_start(). No thread safe
+ * is guaranteed.
+ *
  * @param dev
  *   Pointer to Ethernet device.
  * @param[out] error
@@ -4008,6 +4065,12 @@ flow_mreg_add_default_copy_action(struct rte_eth_dev *dev,
            !mlx5_flow_ext_mreg_supported(dev) ||
            !priv->sh->dv_regc0_mask)
                return 0;
+       /*
+        * Add default mreg copy flow may be called multiple time, but
+        * only be called once in stop. Avoid register it twice.
+        */
+       if (mlx5_hlist_lookup(priv->mreg_cp_tbl, MLX5_DEFAULT_COPY_ID, NULL))
+               return 0;
        mcp_res = flow_mreg_add_copy_action(dev, MLX5_DEFAULT_COPY_ID, error);
        if (!mcp_res)
                return -rte_errno;
@@ -4074,10 +4137,6 @@ flow_mreg_update_copy_table(struct rte_eth_dev *dev,
                        if (!mcp_res)
                                return -rte_errno;
                        flow->rix_mreg_copy = mcp_res->idx;
-                       if (dev->data->dev_started) {
-                               mcp_res->appcnt++;
-                               flow->copy_applied = 1;
-                       }
                        return 0;
                case RTE_FLOW_ACTION_TYPE_MARK:
                        mark = (const struct rte_flow_action_mark *)
@@ -4087,10 +4146,6 @@ flow_mreg_update_copy_table(struct rte_eth_dev *dev,
                        if (!mcp_res)
                                return -rte_errno;
                        flow->rix_mreg_copy = mcp_res->idx;
-                       if (dev->data->dev_started) {
-                               mcp_res->appcnt++;
-                               flow->copy_applied = 1;
-                       }
                        return 0;
                default:
                        break;
@@ -4167,9 +4222,7 @@ flow_hairpin_split(struct rte_eth_dev *dev,
                        break;
                case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
                        raw_encap = actions->conf;
-                       if (raw_encap->size >
-                           (sizeof(struct rte_flow_item_eth) +
-                            sizeof(struct rte_flow_item_ipv4))) {
+                       if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) {
                                memcpy(actions_tx, actions,
                                       sizeof(struct rte_flow_action));
                                actions_tx++;
@@ -4182,9 +4235,7 @@ flow_hairpin_split(struct rte_eth_dev *dev,
                        break;
                case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
                        raw_decap = actions->conf;
-                       if (raw_decap->size <
-                           (sizeof(struct rte_flow_item_eth) +
-                            sizeof(struct rte_flow_item_ipv4))) {
+                       if (raw_decap->size < MLX5_ENCAPSULATION_DECISION_SIZE) {
                                memcpy(actions_tx, actions,
                                       sizeof(struct rte_flow_action));
                                actions_tx++;
@@ -4235,191 +4286,48 @@ flow_hairpin_split(struct rte_eth_dev *dev,
        return 0;
 }
 
-__extension__
-union tunnel_offload_mark {
-       uint32_t val;
-       struct {
-               uint32_t app_reserve:8;
-               uint32_t table_id:15;
-               uint32_t transfer:1;
-               uint32_t _unused_:8;
-       };
-};
-
-struct tunnel_default_miss_ctx {
-       uint16_t *queue;
-       __extension__
-       union {
-               struct rte_flow_action_rss action_rss;
-               struct rte_flow_action_queue miss_queue;
-               struct rte_flow_action_jump miss_jump;
-               uint8_t raw[0];
-       };
-};
-
+/**
+ * The last stage of splitting chain, just creates the subflow
+ * without any modification.
+ *
+ * @param[in] dev
+ *   Pointer to Ethernet device.
+ * @param[in] flow
+ *   Parent flow structure pointer.
+ * @param[in, out] sub_flow
+ *   Pointer to return the created subflow, may be NULL.
+ * @param[in] attr
+ *   Flow rule attributes.
+ * @param[in] items
+ *   Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ *   Associated actions (list terminated by the END action).
+ * @param[in] flow_split_info
+ *   Pointer to flow split info structure.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL.
+ * @return
+ *   0 on success, negative value otherwise
+ */
 static int
-flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
-                            struct rte_flow *flow,
-                            const struct rte_flow_attr *attr,
-                            const struct rte_flow_action *app_actions,
-                            uint32_t flow_idx,
-                            struct tunnel_default_miss_ctx *ctx,
-                            struct rte_flow_error *error)
-{
-       struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_flow *dev_flow;
-       struct rte_flow_attr miss_attr = *attr;
-       const struct mlx5_flow_tunnel *tunnel = app_actions[0].conf;
-       const struct rte_flow_item miss_items[2] = {
-               {
-                       .type = RTE_FLOW_ITEM_TYPE_ETH,
-                       .spec = NULL,
-                       .last = NULL,
-                       .mask = NULL
-               },
-               {
-                       .type = RTE_FLOW_ITEM_TYPE_END,
-                       .spec = NULL,
-                       .last = NULL,
-                       .mask = NULL
-               }
-       };
-       union tunnel_offload_mark mark_id;
-       struct rte_flow_action_mark miss_mark;
-       struct rte_flow_action miss_actions[3] = {
-               [0] = { .type = RTE_FLOW_ACTION_TYPE_MARK, .conf = &miss_mark },
-               [2] = { .type = RTE_FLOW_ACTION_TYPE_END,  .conf = NULL }
-       };
-       const struct rte_flow_action_jump *jump_data;
-       uint32_t i, flow_table = 0; /* prevent compilation warning */
-       struct flow_grp_info grp_info = {
-               .external = 1,
-               .transfer = attr->transfer,
-               .fdb_def_rule = !!priv->fdb_def_rule,
-               .std_tbl_fix = 0,
-       };
-       int ret;
-
-       if (!attr->transfer) {
-               uint32_t q_size;
-
-               miss_actions[1].type = RTE_FLOW_ACTION_TYPE_RSS;
-               q_size = priv->reta_idx_n * sizeof(ctx->queue[0]);
-               ctx->queue = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, q_size,
-                                        0, SOCKET_ID_ANY);
-               if (!ctx->queue)
-                       return rte_flow_error_set
-                               (error, ENOMEM,
-                               RTE_FLOW_ERROR_TYPE_ACTION_CONF,
-                               NULL, "invalid default miss RSS");
-               ctx->action_rss.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
-               ctx->action_rss.level = 0,
-               ctx->action_rss.types = priv->rss_conf.rss_hf,
-               ctx->action_rss.key_len = priv->rss_conf.rss_key_len,
-               ctx->action_rss.queue_num = priv->reta_idx_n,
-               ctx->action_rss.key = priv->rss_conf.rss_key,
-               ctx->action_rss.queue = ctx->queue;
-               if (!priv->reta_idx_n || !priv->rxqs_n)
-                       return rte_flow_error_set
-                               (error, EINVAL,
-                               RTE_FLOW_ERROR_TYPE_ACTION_CONF,
-                               NULL, "invalid port configuration");
-               if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
-                       ctx->action_rss.types = 0;
-               for (i = 0; i != priv->reta_idx_n; ++i)
-                       ctx->queue[i] = (*priv->reta_idx)[i];
-       } else {
-               miss_actions[1].type = RTE_FLOW_ACTION_TYPE_JUMP;
-               ctx->miss_jump.group = MLX5_TNL_MISS_FDB_JUMP_GRP;
-       }
-       miss_actions[1].conf = (typeof(miss_actions[1].conf))ctx->raw;
-       for (; app_actions->type != RTE_FLOW_ACTION_TYPE_JUMP; app_actions++);
-       jump_data = app_actions->conf;
-       miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY;
-       miss_attr.group = jump_data->group;
-       ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group,
-                                      &flow_table, grp_info, error);
-       if (ret)
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
-                                         NULL, "invalid tunnel id");
-       mark_id.app_reserve = 0;
-       mark_id.table_id = tunnel_flow_tbl_to_id(flow_table);
-       mark_id.transfer = !!attr->transfer;
-       mark_id._unused_ = 0;
-       miss_mark.id = mark_id.val;
-       dev_flow = flow_drv_prepare(dev, flow, &miss_attr,
-                                   miss_items, miss_actions, flow_idx, error);
-       if (!dev_flow)
-               return -rte_errno;
-       dev_flow->flow = flow;
-       dev_flow->external = true;
-       dev_flow->tunnel = tunnel;
-       /* Subflow object was created, we must include one in the list. */
-       SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
-                     dev_flow->handle, next);
-       DRV_LOG(DEBUG,
-               "port %u tunnel type=%d id=%u miss rule priority=%u group=%u",
-               dev->data->port_id, tunnel->app_tunnel.type,
-               tunnel->tunnel_id, miss_attr.priority, miss_attr.group);
-       ret = flow_drv_translate(dev, dev_flow, &miss_attr, miss_items,
-                                 miss_actions, error);
-       if (!ret)
-               ret = flow_mreg_update_copy_table(dev, flow, miss_actions,
-                                                 error);
-
-       return ret;
-}
-
-/**
- * The last stage of splitting chain, just creates the subflow
- * without any modification.
- *
- * @param[in] dev
- *   Pointer to Ethernet device.
- * @param[in] flow
- *   Parent flow structure pointer.
- * @param[in, out] sub_flow
- *   Pointer to return the created subflow, may be NULL.
- * @param[in] prefix_layers
- *   Prefix subflow layers, may be 0.
- * @param[in] prefix_mark
- *   Prefix subflow mark flag, may be 0.
- * @param[in] attr
- *   Flow rule attributes.
- * @param[in] items
- *   Pattern specification (list terminated by the END pattern item).
- * @param[in] actions
- *   Associated actions (list terminated by the END action).
- * @param[in] external
- *   This flow rule is created by request external to PMD.
- * @param[in] flow_idx
- *   This memory pool index to the flow.
- * @param[out] error
- *   Perform verbose error reporting if not NULL.
- * @return
- *   0 on success, negative value otherwise
- */
-static int
-flow_create_split_inner(struct rte_eth_dev *dev,
-                       struct rte_flow *flow,
-                       struct mlx5_flow **sub_flow,
-                       uint64_t prefix_layers,
-                       uint32_t prefix_mark,
-                       const struct rte_flow_attr *attr,
-                       const struct rte_flow_item items[],
-                       const struct rte_flow_action actions[],
-                       bool external, uint32_t flow_idx,
-                       struct rte_flow_error *error)
+flow_create_split_inner(struct rte_eth_dev *dev,
+                       struct rte_flow *flow,
+                       struct mlx5_flow **sub_flow,
+                       const struct rte_flow_attr *attr,
+                       const struct rte_flow_item items[],
+                       const struct rte_flow_action actions[],
+                       struct mlx5_flow_split_info *flow_split_info,
+                       struct rte_flow_error *error)
 {
        struct mlx5_flow *dev_flow;
 
        dev_flow = flow_drv_prepare(dev, flow, attr, items, actions,
-               flow_idx, error);
+                                   flow_split_info->flow_idx, error);
        if (!dev_flow)
                return -rte_errno;
        dev_flow->flow = flow;
-       dev_flow->external = external;
+       dev_flow->external = flow_split_info->external;
+       dev_flow->skip_scale = flow_split_info->skip_scale;
        /* Subflow object was created, we must include one in the list. */
        SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
                      dev_flow->handle, next);
@@ -4428,9 +4336,9 @@ flow_create_split_inner(struct rte_eth_dev *dev,
         * flow may need some user defined item layer flags, and pass the
         * Metadate rxq mark flag to suffix flow as well.
         */
-       if (prefix_layers)
-               dev_flow->handle->layers = prefix_layers;
-       if (prefix_mark)
+       if (flow_split_info->prefix_layers)
+               dev_flow->handle->layers = flow_split_info->prefix_layers;
+       if (flow_split_info->prefix_mark)
                dev_flow->handle->mark = 1;
        if (sub_flow)
                *sub_flow = dev_flow;
@@ -4784,6 +4692,8 @@ flow_mreg_tx_copy_prep(struct rte_eth_dev *dev,
  *   Pointer to the position of the matched action if exists, otherwise is -1.
  * @param[out] qrss_action_pos
  *   Pointer to the position of the Queue/RSS action if exists, otherwise is -1.
+ * @param[out] modify_after_mirror
+ *   Pointer to the flag of modify action after FDB mirroring.
  *
  * @return
  *   > 0 the total number of actions.
@@ -4793,14 +4703,15 @@ static int
 flow_check_match_action(const struct rte_flow_action actions[],
                        const struct rte_flow_attr *attr,
                        enum rte_flow_action_type action,
-                       int *match_action_pos, int *qrss_action_pos)
+                       int *match_action_pos, int *qrss_action_pos,
+                       int *modify_after_mirror)
 {
        const struct rte_flow_action_sample *sample;
        int actions_n = 0;
-       int jump_flag = 0;
        uint32_t ratio = 0;
        int sub_type = 0;
        int flag = 0;
+       int fdb_mirror = 0;
 
        *match_action_pos = -1;
        *qrss_action_pos = -1;
@@ -4809,27 +4720,53 @@ flow_check_match_action(const struct rte_flow_action actions[],
                        flag = 1;
                        *match_action_pos = actions_n;
                }
-               if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE ||
-                   actions->type == RTE_FLOW_ACTION_TYPE_RSS)
+               switch (actions->type) {
+               case RTE_FLOW_ACTION_TYPE_QUEUE:
+               case RTE_FLOW_ACTION_TYPE_RSS:
                        *qrss_action_pos = actions_n;
-               if (actions->type == RTE_FLOW_ACTION_TYPE_JUMP)
-                       jump_flag = 1;
-               if (actions->type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
+                       break;
+               case RTE_FLOW_ACTION_TYPE_SAMPLE:
                        sample = actions->conf;
                        ratio = sample->ratio;
                        sub_type = ((const struct rte_flow_action *)
                                        (sample->actions))->type;
+                       if (ratio == 1 && attr->transfer)
+                               fdb_mirror = 1;
+                       break;
+               case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
+               case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
+               case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
+               case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
+               case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
+               case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
+               case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
+               case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
+               case RTE_FLOW_ACTION_TYPE_DEC_TTL:
+               case RTE_FLOW_ACTION_TYPE_SET_TTL:
+               case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
+               case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
+               case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
+               case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
+               case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
+               case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
+               case RTE_FLOW_ACTION_TYPE_FLAG:
+               case RTE_FLOW_ACTION_TYPE_MARK:
+               case RTE_FLOW_ACTION_TYPE_SET_META:
+               case RTE_FLOW_ACTION_TYPE_SET_TAG:
+                       if (fdb_mirror)
+                               *modify_after_mirror = 1;
+                       break;
+               default:
+                       break;
                }
                actions_n++;
        }
-       if (flag && action == RTE_FLOW_ACTION_TYPE_SAMPLE && attr->transfer) {
-               if (ratio == 1) {
-                       /* JUMP Action not support for Mirroring;
-                        * Mirroring support multi-destination;
-                        */
-                       if (!jump_flag && sub_type != RTE_FLOW_ACTION_TYPE_END)
-                               flag = 0;
-               }
+       if (flag && fdb_mirror && !*modify_after_mirror) {
+               /* FDB mirroring uses the destination array to implement
+                * instead of FLOW_SAMPLER object.
+                */
+               if (sub_type != RTE_FLOW_ACTION_TYPE_END)
+                       flag = 0;
        }
        /* Count RTE_FLOW_ACTION_TYPE_END. */
        return flag ? actions_n + 1 : 0;
@@ -4848,8 +4785,8 @@ flow_check_match_action(const struct rte_flow_action actions[],
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @param[in] fdb_tx
- *   FDB egress flow flag.
+ * @param[in] add_tag
+ *   Add extra tag action flag.
  * @param[out] sfx_items
  *   Suffix flow match items (list terminated by the END pattern item).
  * @param[in] actions
@@ -4864,6 +4801,8 @@ flow_check_match_action(const struct rte_flow_action actions[],
  *   The sample action position.
  * @param[in] qrss_action_pos
  *   The Queue/RSS action position.
+ * @param[in] jump_table
+ *   Add extra jump action flag.
  * @param[out] error
  *   Perform verbose error reporting if not NULL.
  *
@@ -4873,7 +4812,7 @@ flow_check_match_action(const struct rte_flow_action actions[],
  */
 static int
 flow_sample_split_prep(struct rte_eth_dev *dev,
-                      uint32_t fdb_tx,
+                      int add_tag,
                       struct rte_flow_item sfx_items[],
                       const struct rte_flow_action actions[],
                       struct rte_flow_action actions_sfx[],
@@ -4881,14 +4820,17 @@ flow_sample_split_prep(struct rte_eth_dev *dev,
                       int actions_n,
                       int sample_action_pos,
                       int qrss_action_pos,
+                      int jump_table,
                       struct rte_flow_error *error)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_rte_flow_action_set_tag *set_tag;
        struct mlx5_rte_flow_item_tag *tag_spec;
        struct mlx5_rte_flow_item_tag *tag_mask;
+       struct rte_flow_action_jump *jump_action;
        uint32_t tag_id = 0;
        int index;
+       int append_index = 0;
        int ret;
 
        if (sample_action_pos < 0)
@@ -4896,9 +4838,37 @@ flow_sample_split_prep(struct rte_eth_dev *dev,
                                          RTE_FLOW_ERROR_TYPE_ACTION,
                                          NULL, "invalid position of sample "
                                          "action in list");
-       if (!fdb_tx) {
+       /* Prepare the actions for prefix and suffix flow. */
+       if (qrss_action_pos >= 0 && qrss_action_pos < sample_action_pos) {
+               index = qrss_action_pos;
+               /* Put the preceding the Queue/RSS action into prefix flow. */
+               if (index != 0)
+                       memcpy(actions_pre, actions,
+                              sizeof(struct rte_flow_action) * index);
+               /* Put others preceding the sample action into prefix flow. */
+               if (sample_action_pos > index + 1)
+                       memcpy(actions_pre + index, actions + index + 1,
+                              sizeof(struct rte_flow_action) *
+                              (sample_action_pos - index - 1));
+               index = sample_action_pos - 1;
+               /* Put Queue/RSS action into Suffix flow. */
+               memcpy(actions_sfx, actions + qrss_action_pos,
+                      sizeof(struct rte_flow_action));
+               actions_sfx++;
+       } else {
+               index = sample_action_pos;
+               if (index != 0)
+                       memcpy(actions_pre, actions,
+                              sizeof(struct rte_flow_action) * index);
+       }
+       /* For CX5, add an extra tag action for NIC-RX and E-Switch ingress.
+        * For CX6DX and above, metadata registers Cx preserve their value,
+        * add an extra tag action for NIC-RX and E-Switch Domain.
+        */
+       if (add_tag) {
                /* Prepare the prefix tag action. */
-               set_tag = (void *)(actions_pre + actions_n + 1);
+               append_index++;
+               set_tag = (void *)(actions_pre + actions_n + append_index);
                ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, 0, error);
                if (ret < 0)
                        return ret;
@@ -4923,32 +4893,7 @@ flow_sample_split_prep(struct rte_eth_dev *dev,
                        .type = (enum rte_flow_item_type)
                                RTE_FLOW_ITEM_TYPE_END,
                };
-       }
-       /* Prepare the actions for prefix and suffix flow. */
-       if (qrss_action_pos >= 0 && qrss_action_pos < sample_action_pos) {
-               index = qrss_action_pos;
-               /* Put the preceding the Queue/RSS action into prefix flow. */
-               if (index != 0)
-                       memcpy(actions_pre, actions,
-                              sizeof(struct rte_flow_action) * index);
-               /* Put others preceding the sample action into prefix flow. */
-               if (sample_action_pos > index + 1)
-                       memcpy(actions_pre + index, actions + index + 1,
-                              sizeof(struct rte_flow_action) *
-                              (sample_action_pos - index - 1));
-               index = sample_action_pos - 1;
-               /* Put Queue/RSS action into Suffix flow. */
-               memcpy(actions_sfx, actions + qrss_action_pos,
-                      sizeof(struct rte_flow_action));
-               actions_sfx++;
-       } else {
-               index = sample_action_pos;
-               if (index != 0)
-                       memcpy(actions_pre, actions,
-                              sizeof(struct rte_flow_action) * index);
-       }
-       /* Add the extra tag action for NIC-RX and E-Switch ingress. */
-       if (!fdb_tx) {
+               /* Prepare the tag action in prefix subflow. */
                actions_pre[index++] =
                        (struct rte_flow_action){
                        .type = (enum rte_flow_action_type)
@@ -4959,6 +4904,22 @@ flow_sample_split_prep(struct rte_eth_dev *dev,
        memcpy(actions_pre + index, actions + sample_action_pos,
               sizeof(struct rte_flow_action));
        index += 1;
+       /* For the modify action after the sample action in E-Switch mirroring,
+        * Add the extra jump action in prefix subflow and jump into the next
+        * table, then do the modify action in the new table.
+        */
+       if (jump_table) {
+               /* Prepare the prefix jump action. */
+               append_index++;
+               jump_action = (void *)(actions_pre + actions_n + append_index);
+               jump_action->group = jump_table;
+               actions_pre[index++] =
+                       (struct rte_flow_action){
+                       .type = (enum rte_flow_action_type)
+                               RTE_FLOW_ACTION_TYPE_JUMP,
+                       .conf = jump_action,
+               };
+       }
        actions_pre[index] = (struct rte_flow_action){
                .type = (enum rte_flow_action_type)
                        RTE_FLOW_ACTION_TYPE_END,
@@ -4984,20 +4945,14 @@ flow_sample_split_prep(struct rte_eth_dev *dev,
  *   Pointer to Ethernet device.
  * @param[in] flow
  *   Parent flow structure pointer.
- * @param[in] prefix_layers
- *   Prefix flow layer flags.
- * @param[in] prefix_mark
- *   Prefix subflow mark flag, may be 0.
  * @param[in] attr
  *   Flow rule attributes.
  * @param[in] items
  *   Pattern specification (list terminated by the END pattern item).
  * @param[in] actions
  *   Associated actions (list terminated by the END action).
- * @param[in] external
- *   This flow rule is created by request external to PMD.
- * @param[in] flow_idx
- *   This memory pool index to the flow.
+ * @param[in] flow_split_info
+ *   Pointer to flow split info structure.
  * @param[out] error
  *   Perform verbose error reporting if not NULL.
  * @return
@@ -5006,12 +4961,10 @@ flow_sample_split_prep(struct rte_eth_dev *dev,
 static int
 flow_create_split_metadata(struct rte_eth_dev *dev,
                           struct rte_flow *flow,
-                          uint64_t prefix_layers,
-                          uint32_t prefix_mark,
                           const struct rte_flow_attr *attr,
                           const struct rte_flow_item items[],
                           const struct rte_flow_action actions[],
-                          bool external, uint32_t flow_idx,
+                          struct mlx5_flow_split_info *flow_split_info,
                           struct rte_flow_error *error)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
@@ -5030,10 +4983,8 @@ flow_create_split_metadata(struct rte_eth_dev *dev,
        if (!config->dv_flow_en ||
            config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
            !mlx5_flow_ext_mreg_supported(dev))
-               return flow_create_split_inner(dev, flow, NULL, prefix_layers,
-                                              prefix_mark, attr, items,
-                                              actions, external, flow_idx,
-                                              error);
+               return flow_create_split_inner(dev, flow, NULL, attr, items,
+                                              actions, flow_split_info, error);
        actions_n = flow_parse_metadata_split_actions_info(actions, &qrss,
                                                           &encap_idx);
        if (qrss) {
@@ -5118,10 +5069,9 @@ flow_create_split_metadata(struct rte_eth_dev *dev,
                        goto exit;
        }
        /* Add the unmodified original or prefix subflow. */
-       ret = flow_create_split_inner(dev, flow, &dev_flow, prefix_layers,
-                                     prefix_mark, attr,
+       ret = flow_create_split_inner(dev, flow, &dev_flow, attr,
                                      items, ext_actions ? ext_actions :
-                                     actions, external, flow_idx, error);
+                                     actions, flow_split_info, error);
        if (ret < 0)
                goto exit;
        MLX5_ASSERT(dev_flow);
@@ -5182,10 +5132,12 @@ flow_create_split_metadata(struct rte_eth_dev *dev,
                }
                dev_flow = NULL;
                /* Add suffix subflow to execute Q/RSS. */
-               ret = flow_create_split_inner(dev, flow, &dev_flow, layers, 0,
+               flow_split_info->prefix_layers = layers;
+               flow_split_info->prefix_mark = 0;
+               ret = flow_create_split_inner(dev, flow, &dev_flow,
                                              &q_attr, mtr_sfx ? items :
                                              q_items, q_actions,
-                                             external, flow_idx, error);
+                                             flow_split_info, error);
                if (ret < 0)
                        goto exit;
                /* qrss ID should be freed if failed. */
@@ -5219,20 +5171,14 @@ exit:
  *   Pointer to Ethernet device.
  * @param[in] flow
  *   Parent flow structure pointer.
- * @param[in] prefix_layers
- *   Prefix subflow layers, may be 0.
- * @param[in] prefix_mark
- *   Prefix subflow mark flag, may be 0.
  * @param[in] attr
  *   Flow rule attributes.
  * @param[in] items
  *   Pattern specification (list terminated by the END pattern item).
  * @param[in] actions
  *   Associated actions (list terminated by the END action).
- * @param[in] external
- *   This flow rule is created by request external to PMD.
- * @param[in] flow_idx
- *   This memory pool index to the flow.
+ * @param[in] flow_split_info
+ *   Pointer to flow split info structure.
  * @param[out] error
  *   Perform verbose error reporting if not NULL.
  * @return
@@ -5241,12 +5187,10 @@ exit:
 static int
 flow_create_split_meter(struct rte_eth_dev *dev,
                        struct rte_flow *flow,
-                       uint64_t prefix_layers,
-                       uint32_t prefix_mark,
                        const struct rte_flow_attr *attr,
                        const struct rte_flow_item items[],
                        const struct rte_flow_action actions[],
-                       bool external, uint32_t flow_idx,
+                       struct mlx5_flow_split_info *flow_split_info,
                        struct rte_flow_error *error)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
@@ -5290,11 +5234,10 @@ flow_create_split_meter(struct rte_eth_dev *dev,
                        goto exit;
                }
                /* Add the prefix subflow. */
+               flow_split_info->prefix_mark = 0;
                ret = flow_create_split_inner(dev, flow, &dev_flow,
-                                             prefix_layers, 0,
-                                             attr, items,
-                                             pre_actions, external,
-                                             flow_idx, error);
+                                             attr, items, pre_actions,
+                                             flow_split_info, error);
                if (ret) {
                        ret = -rte_errno;
                        goto exit;
@@ -5304,16 +5247,16 @@ flow_create_split_meter(struct rte_eth_dev *dev,
                sfx_attr.group = sfx_attr.transfer ?
                                (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
                                 MLX5_FLOW_TABLE_LEVEL_SUFFIX;
+               flow_split_info->prefix_layers =
+                               flow_get_prefix_layer_flags(dev_flow);
+               flow_split_info->prefix_mark = dev_flow->handle->mark;
        }
        /* Add the prefix subflow. */
-       ret = flow_create_split_metadata(dev, flow, dev_flow ?
-                                        flow_get_prefix_layer_flags(dev_flow) :
-                                        prefix_layers, dev_flow ?
-                                        dev_flow->handle->mark : prefix_mark,
+       ret = flow_create_split_metadata(dev, flow,
                                         &sfx_attr, sfx_items ?
                                         sfx_items : items,
                                         sfx_actions ? sfx_actions : actions,
-                                        external, flow_idx, error);
+                                        flow_split_info, error);
 exit:
        if (sfx_actions)
                mlx5_free(sfx_actions);
@@ -5345,10 +5288,8 @@ exit:
  *   Pattern specification (list terminated by the END pattern item).
  * @param[in] actions
  *   Associated actions (list terminated by the END action).
- * @param[in] external
- *   This flow rule is created by request external to PMD.
- * @param[in] flow_idx
- *   This memory pool index to the flow.
+ * @param[in] flow_split_info
+ *   Pointer to flow split info structure.
  * @param[out] error
  *   Perform verbose error reporting if not NULL.
  * @return
@@ -5360,7 +5301,7 @@ flow_create_split_sample(struct rte_eth_dev *dev,
                         const struct rte_flow_attr *attr,
                         const struct rte_flow_item items[],
                         const struct rte_flow_action actions[],
-                        bool external, uint32_t flow_idx,
+                        struct mlx5_flow_split_info *flow_split_info,
                         struct rte_flow_error *error)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
@@ -5373,7 +5314,6 @@ flow_create_split_sample(struct rte_eth_dev *dev,
        struct mlx5_flow_dv_sample_resource *sample_res;
        struct mlx5_flow_tbl_data_entry *sfx_tbl_data;
        struct mlx5_flow_tbl_resource *sfx_tbl;
-       union mlx5_flow_tbl_key sfx_table_key;
 #endif
        size_t act_size;
        size_t item_size;
@@ -5382,12 +5322,17 @@ flow_create_split_sample(struct rte_eth_dev *dev,
        int actions_n = 0;
        int sample_action_pos;
        int qrss_action_pos;
+       int add_tag = 0;
+       int modify_after_mirror = 0;
+       uint16_t jump_table = 0;
+       const uint32_t next_ft_step = 1;
        int ret = 0;
 
        if (priv->sampler_en)
                actions_n = flow_check_match_action(actions, attr,
                                        RTE_FLOW_ACTION_TYPE_SAMPLE,
-                                       &sample_action_pos, &qrss_action_pos);
+                                       &sample_action_pos, &qrss_action_pos,
+                                       &modify_after_mirror);
        if (actions_n) {
                /* The prefix actions must includes sample, tag, end. */
                act_size = sizeof(struct rte_flow_action) * (actions_n * 2 + 1)
@@ -5403,49 +5348,72 @@ flow_create_split_sample(struct rte_eth_dev *dev,
                                                  "sample flow");
                /* The representor_id is -1 for uplink. */
                fdb_tx = (attr->transfer && priv->representor_id != -1);
-               if (!fdb_tx)
+               /*
+                * When reg_c_preserve is set, metadata registers Cx preserve
+                * their value even through packet duplication.
+                */
+               add_tag = (!fdb_tx || priv->config.hca_attr.reg_c_preserve);
+               if (add_tag)
                        sfx_items = (struct rte_flow_item *)((char *)sfx_actions
                                        + act_size);
+               if (modify_after_mirror)
+                       jump_table = attr->group * MLX5_FLOW_TABLE_FACTOR +
+                                    next_ft_step;
                pre_actions = sfx_actions + actions_n;
-               tag_id = flow_sample_split_prep(dev, fdb_tx, sfx_items,
+               tag_id = flow_sample_split_prep(dev, add_tag, sfx_items,
                                                actions, sfx_actions,
                                                pre_actions, actions_n,
                                                sample_action_pos,
-                                               qrss_action_pos, error);
-               if (tag_id < 0 || (!fdb_tx && !tag_id)) {
+                                               qrss_action_pos, jump_table,
+                                               error);
+               if (tag_id < 0 || (add_tag && !tag_id)) {
                        ret = -rte_errno;
                        goto exit;
                }
+               if (modify_after_mirror)
+                       flow_split_info->skip_scale =
+                                       1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT;
                /* Add the prefix subflow. */
-               ret = flow_create_split_inner(dev, flow, &dev_flow, 0, 0, attr,
-                                             items, pre_actions, external,
-                                             flow_idx, error);
+               ret = flow_create_split_inner(dev, flow, &dev_flow, attr,
+                                             items, pre_actions,
+                                             flow_split_info, error);
                if (ret) {
                        ret = -rte_errno;
                        goto exit;
                }
                dev_flow->handle->split_flow_id = tag_id;
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
-               /* Set the sfx group attr. */
-               sample_res = (struct mlx5_flow_dv_sample_resource *)
-                                       dev_flow->dv.sample_res;
-               sfx_tbl = (struct mlx5_flow_tbl_resource *)
-                                       sample_res->normal_path_tbl;
-               sfx_tbl_data = container_of(sfx_tbl,
-                                       struct mlx5_flow_tbl_data_entry, tbl);
-               sfx_table_key.v64 = sfx_tbl_data->entry.key;
-               sfx_attr.group = sfx_attr.transfer ?
-                                       (sfx_table_key.table_id - 1) :
-                                        sfx_table_key.table_id;
+               if (!modify_after_mirror) {
+                       /* Set the sfx group attr. */
+                       sample_res = (struct mlx5_flow_dv_sample_resource *)
+                                               dev_flow->dv.sample_res;
+                       sfx_tbl = (struct mlx5_flow_tbl_resource *)
+                                               sample_res->normal_path_tbl;
+                       sfx_tbl_data = container_of(sfx_tbl,
+                                               struct mlx5_flow_tbl_data_entry,
+                                               tbl);
+                       sfx_attr.group = sfx_attr.transfer ?
+                                               (sfx_tbl_data->table_id - 1) :
+                                               sfx_tbl_data->table_id;
+               } else {
+                       MLX5_ASSERT(attr->transfer);
+                       sfx_attr.group = jump_table;
+               }
+               flow_split_info->prefix_layers =
+                               flow_get_prefix_layer_flags(dev_flow);
+               flow_split_info->prefix_mark = dev_flow->handle->mark;
+               /* Suffix group level already be scaled with factor, set
+                * MLX5_SCALE_FLOW_GROUP_BIT of skip_scale to 1 to avoid scale
+                * again in translation.
+                */
+               flow_split_info->skip_scale = 1 << MLX5_SCALE_FLOW_GROUP_BIT;
 #endif
        }
        /* Add the suffix subflow. */
-       ret = flow_create_split_meter(dev, flow, dev_flow ?
-                                flow_get_prefix_layer_flags(dev_flow) : 0,
-                                dev_flow ? dev_flow->handle->mark : 0,
-                                &sfx_attr, sfx_items ? sfx_items : items,
-                                sfx_actions ? sfx_actions : actions,
-                                external, flow_idx, error);
+       ret = flow_create_split_meter(dev, flow, &sfx_attr,
+                                     sfx_items ? sfx_items : items,
+                                     sfx_actions ? sfx_actions : actions,
+                                     flow_split_info, error);
 exit:
        if (sfx_actions)
                mlx5_free(sfx_actions);
@@ -5480,10 +5448,8 @@ exit:
  *   Pattern specification (list terminated by the END pattern item).
  * @param[in] actions
  *   Associated actions (list terminated by the END action).
- * @param[in] external
- *   This flow rule is created by request external to PMD.
- * @param[in] flow_idx
- *   This memory pool index to the flow.
+ * @param[in] flow_split_info
+ *   Pointer to flow split info structure.
  * @param[out] error
  *   Perform verbose error reporting if not NULL.
  * @return
@@ -5495,13 +5461,13 @@ flow_create_split_outer(struct rte_eth_dev *dev,
                        const struct rte_flow_attr *attr,
                        const struct rte_flow_item items[],
                        const struct rte_flow_action actions[],
-                       bool external, uint32_t flow_idx,
+                       struct mlx5_flow_split_info *flow_split_info,
                        struct rte_flow_error *error)
 {
        int ret;
 
        ret = flow_create_split_sample(dev, flow, attr, items,
-                                      actions, external, flow_idx, error);
+                                      actions, flow_split_info, error);
        MLX5_ASSERT(ret <= 0);
        return ret;
 }
@@ -5545,17 +5511,15 @@ flow_rss_workspace_adjust(struct mlx5_flow_workspace *wks,
                          struct mlx5_flow_rss_desc *rss_desc,
                          uint32_t nrssq_num)
 {
-       bool fidx = !!wks->flow_idx;
-
-       if (likely(nrssq_num <= wks->rssq_num[fidx]))
+       if (likely(nrssq_num <= wks->rssq_num))
                return 0;
        rss_desc->queue = realloc(rss_desc->queue,
-                         sizeof(rss_desc->queue[0]) * RTE_ALIGN(nrssq_num, 2));
+                         sizeof(*rss_desc->queue) * RTE_ALIGN(nrssq_num, 2));
        if (!rss_desc->queue) {
                rte_errno = ENOMEM;
                return -1;
        }
-       wks->rssq_num[fidx] = RTE_ALIGN(nrssq_num, 2);
+       wks->rssq_num = RTE_ALIGN(nrssq_num, 2);
        return 0;
 }
 
@@ -5593,7 +5557,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
        struct mlx5_priv *priv = dev->data->dev_private;
        struct rte_flow *flow = NULL;
        struct mlx5_flow *dev_flow;
-       const struct rte_flow_action_rss *rss;
+       const struct rte_flow_action_rss *rss = NULL;
        struct mlx5_translated_shared_action
                shared_actions[MLX5_MAX_SHARED_ACTIONS];
        int shared_actions_n = MLX5_MAX_SHARED_ACTIONS;
@@ -5620,18 +5584,23 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
        uint32_t idx = 0;
        int hairpin_flow;
        struct rte_flow_attr attr_tx = { .priority = 0 };
-       struct rte_flow_attr attr_factor = {0};
        const struct rte_flow_action *actions;
        struct rte_flow_action *translated_actions = NULL;
        struct mlx5_flow_tunnel *tunnel;
        struct tunnel_default_miss_ctx default_miss_ctx = { 0, };
-       struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
-       bool fidx = !!wks->flow_idx;
+       struct mlx5_flow_workspace *wks = mlx5_flow_push_thread_workspace();
+       struct mlx5_flow_split_info flow_split_info = {
+               .external = !!external,
+               .skip_scale = 0,
+               .flow_idx = 0,
+               .prefix_mark = 0,
+               .prefix_layers = 0
+       };
        int ret;
 
        MLX5_ASSERT(wks);
-       rss_desc = &wks->rss_desc[fidx];
-       ret = flow_shared_actions_translate(original_actions,
+       rss_desc = &wks->rss_desc;
+       ret = flow_shared_actions_translate(dev, original_actions,
                                            shared_actions,
                                            &shared_actions_n,
                                            &translated_actions, error);
@@ -5640,10 +5609,9 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
                return 0;
        }
        actions = translated_actions ? translated_actions : original_actions;
-       memcpy((void *)&attr_factor, (const void *)attr, sizeof(*attr));
        p_actions_rx = actions;
-       hairpin_flow = flow_check_hairpin_split(dev, &attr_factor, actions);
-       ret = flow_drv_validate(dev, &attr_factor, items, p_actions_rx,
+       hairpin_flow = flow_check_hairpin_split(dev, attr, actions);
+       ret = flow_drv_validate(dev, attr, items, p_actions_rx,
                                external, hairpin_flow, error);
        if (ret < 0)
                goto error_before_hairpin_split;
@@ -5662,11 +5630,14 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
                                   idx);
                p_actions_rx = actions_rx.actions;
        }
-       flow->drv_type = flow_get_drv_type(dev, &attr_factor);
+       flow_split_info.flow_idx = idx;
+       flow->drv_type = flow_get_drv_type(dev, attr);
        MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
                    flow->drv_type < MLX5_FLOW_TYPE_MAX);
        memset(rss_desc, 0, offsetof(struct mlx5_flow_rss_desc, queue));
-       rss = flow_get_rss_action(p_actions_rx);
+       /* RSS Action only works on NIC RX domain */
+       if (attr->ingress && !attr->transfer)
+               rss = flow_get_rss_action(p_actions_rx);
        if (rss) {
                if (flow_rss_workspace_adjust(wks, rss_desc, rss->queue_num))
                        return 0;
@@ -5692,26 +5663,21 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
                buf->entries = 1;
                buf->entry[0].pattern = (void *)(uintptr_t)items;
        }
-       flow->shared_rss = flow_get_shared_rss_action(shared_actions,
+       rss_desc->shared_rss = flow_get_shared_rss_action(dev, shared_actions,
                                                      shared_actions_n);
-       /*
-        * Record the start index when there is a nested call. All sub-flows
-        * need to be translated before another calling.
-        * No need to use ping-pong buffer to save memory here.
-        */
-       if (fidx) {
-               MLX5_ASSERT(!wks->flow_nested_idx);
-               wks->flow_nested_idx = fidx;
-       }
        for (i = 0; i < buf->entries; ++i) {
+               /* Initialize flow split data. */
+               flow_split_info.prefix_layers = 0;
+               flow_split_info.prefix_mark = 0;
+               flow_split_info.skip_scale = 0;
                /*
                 * The splitter may create multiple dev_flows,
                 * depending on configuration. In the simplest
                 * case it just creates unmodified original flow.
                 */
-               ret = flow_create_split_outer(dev, flow, &attr_factor,
+               ret = flow_create_split_outer(dev, flow, attr,
                                              buf->entry[i].pattern,
-                                             p_actions_rx, external, idx,
+                                             p_actions_rx, &flow_split_info,
                                              error);
                if (ret < 0)
                        goto error;
@@ -5759,30 +5725,31 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
         * the egress Flows belong to the different device and
         * copy table should be updated in peer NIC Rx domain.
         */
-       if (attr_factor.ingress &&
-           (external || attr_factor.group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) {
+       if (attr->ingress &&
+           (external || attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) {
                ret = flow_mreg_update_copy_table(dev, flow, actions, error);
                if (ret)
                        goto error;
        }
        /*
-        * If the flow is external (from application) OR device is started, then
-        * the flow will be applied immediately.
+        * If the flow is external (from application) OR device is started,
+        * OR mreg discover, then apply immediately.
         */
-       if (external || dev->data->dev_started) {
+       if (external || dev->data->dev_started ||
+           (attr->group == MLX5_FLOW_MREG_CP_TABLE_GROUP &&
+            attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)) {
                ret = flow_drv_apply(dev, flow, error);
                if (ret < 0)
                        goto error;
        }
-       if (list)
+       if (list) {
+               rte_spinlock_lock(&priv->flow_list_lock);
                ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, idx,
                             flow, next);
+               rte_spinlock_unlock(&priv->flow_list_lock);
+       }
        flow_rxq_flags_set(dev, flow);
        rte_free(translated_actions);
-       /* Nested flow creation index recovery. */
-       wks->flow_idx = wks->flow_nested_idx;
-       if (wks->flow_nested_idx)
-               wks->flow_nested_idx = 0;
        tunnel = flow_tunnel_from_rule(dev, attr, items, actions);
        if (tunnel) {
                flow->tunnel = 1;
@@ -5790,19 +5757,23 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
                __atomic_add_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED);
                mlx5_free(default_miss_ctx.queue);
        }
+       mlx5_flow_pop_thread_workspace();
        return idx;
 error:
        MLX5_ASSERT(flow);
        ret = rte_errno; /* Save rte_errno before cleanup. */
        flow_mreg_del_copy_action(dev, flow);
        flow_drv_destroy(dev, flow);
+       if (rss_desc->shared_rss)
+               __atomic_sub_fetch(&((struct mlx5_shared_action_rss *)
+                       mlx5_ipool_get
+                       (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
+                       rss_desc->shared_rss))->refcnt, 1, __ATOMIC_RELAXED);
        mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], idx);
        rte_errno = ret; /* Restore rte_errno. */
        ret = rte_errno;
        rte_errno = ret;
-       wks->flow_idx = wks->flow_nested_idx;
-       if (wks->flow_nested_idx)
-               wks->flow_nested_idx = 0;
+       mlx5_flow_pop_thread_workspace();
 error_before_hairpin_split:
        rte_free(translated_actions);
        return 0;
@@ -5877,7 +5848,7 @@ mlx5_flow_validate(struct rte_eth_dev *dev,
        int shared_actions_n = MLX5_MAX_SHARED_ACTIONS;
        const struct rte_flow_action *actions;
        struct rte_flow_action *translated_actions = NULL;
-       int ret = flow_shared_actions_translate(original_actions,
+       int ret = flow_shared_actions_translate(dev, original_actions,
                                                shared_actions,
                                                &shared_actions_n,
                                                &translated_actions, error);
@@ -5944,7 +5915,6 @@ flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
                  uint32_t flow_idx)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_fdir_flow *priv_fdir_flow = NULL;
        struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool
                                               [MLX5_IPOOL_RTE_FLOW], flow_idx);
 
@@ -5957,29 +5927,22 @@ flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
        if (dev->data->dev_started)
                flow_rxq_flags_trim(dev, flow);
        flow_drv_destroy(dev, flow);
-       if (list)
+       if (list) {
+               rte_spinlock_lock(&priv->flow_list_lock);
                ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list,
                             flow_idx, flow, next);
-       flow_mreg_del_copy_action(dev, flow);
-       if (flow->fdir) {
-               LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) {
-                       if (priv_fdir_flow->rix_flow == flow_idx)
-                               break;
-               }
-               if (priv_fdir_flow) {
-                       LIST_REMOVE(priv_fdir_flow, next);
-                       mlx5_free(priv_fdir_flow->fdir);
-                       mlx5_free(priv_fdir_flow);
-               }
+               rte_spinlock_unlock(&priv->flow_list_lock);
        }
-       mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx);
        if (flow->tunnel) {
                struct mlx5_flow_tunnel *tunnel;
+
                tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id);
                RTE_VERIFY(tunnel);
                if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))
                        mlx5_flow_tunnel_free(dev, tunnel);
        }
+       flow_mreg_del_copy_action(dev, flow);
+       mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx);
 }
 
 /**
@@ -6008,172 +5971,157 @@ mlx5_flow_list_flush(struct rte_eth_dev *dev, uint32_t *list, bool active)
 }
 
 /**
- * Remove all flows.
+ * Stop all default actions for flows.
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @param list
- *   Pointer to the Indexed flow list.
  */
 void
-mlx5_flow_stop(struct rte_eth_dev *dev, uint32_t *list)
+mlx5_flow_stop_default(struct rte_eth_dev *dev)
 {
-       struct mlx5_priv *priv = dev->data->dev_private;
-       struct rte_flow *flow = NULL;
-       uint32_t idx;
-
-       ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], *list, idx,
-                     flow, next) {
-               flow_drv_remove(dev, flow);
-               flow_mreg_stop_copy_action(dev, flow);
-       }
        flow_mreg_del_default_copy_action(dev);
        flow_rxq_flags_clear(dev);
 }
 
 /**
- * Add all flows.
+ * Start all default actions for flows.
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @param list
- *   Pointer to the Indexed flow list.
- *
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-mlx5_flow_start(struct rte_eth_dev *dev, uint32_t *list)
+mlx5_flow_start_default(struct rte_eth_dev *dev)
 {
-       struct mlx5_priv *priv = dev->data->dev_private;
-       struct rte_flow *flow = NULL;
        struct rte_flow_error error;
-       uint32_t idx;
-       int ret = 0;
 
        /* Make sure default copy action (reg_c[0] -> reg_b) is created. */
-       ret = flow_mreg_add_default_copy_action(dev, &error);
-       if (ret < 0)
-               return -rte_errno;
-       /* Apply Flows created by application. */
-       ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], *list, idx,
-                     flow, next) {
-               ret = flow_mreg_start_copy_action(dev, flow);
-               if (ret < 0)
-                       goto error;
-               ret = flow_drv_apply(dev, flow, &error);
-               if (ret < 0)
-                       goto error;
-               flow_rxq_flags_set(dev, flow);
-       }
-       return 0;
-error:
-       ret = rte_errno; /* Save rte_errno before cleanup. */
-       mlx5_flow_stop(dev, list);
-       rte_errno = ret; /* Restore rte_errno. */
-       return -rte_errno;
+       return flow_mreg_add_default_copy_action(dev, &error);
 }
 
 /**
- * Stop all default actions for flows.
- *
- * @param dev
- *   Pointer to Ethernet device.
+ * Release key of thread specific flow workspace data.
  */
 void
-mlx5_flow_stop_default(struct rte_eth_dev *dev)
+flow_release_workspace(void *data)
 {
-       flow_mreg_del_default_copy_action(dev);
-       flow_rxq_flags_clear(dev);
+       struct mlx5_flow_workspace *wks = data;
+       struct mlx5_flow_workspace *next;
+
+       while (wks) {
+               next = wks->next;
+               free(wks->rss_desc.queue);
+               free(wks);
+               wks = next;
+       }
 }
 
 /**
- * Start all default actions for flows.
+ * Get thread specific current flow workspace.
  *
- * @param dev
- *   Pointer to Ethernet device.
- * @return
- *   0 on success, a negative errno value otherwise and rte_errno is set.
+ * @return pointer to thread specific flow workspace data, NULL on error.
  */
-int
-mlx5_flow_start_default(struct rte_eth_dev *dev)
+struct mlx5_flow_workspace*
+mlx5_flow_get_thread_workspace(void)
 {
-       struct rte_flow_error error;
+       struct mlx5_flow_workspace *data;
 
-       /* Make sure default copy action (reg_c[0] -> reg_b) is created. */
-       return flow_mreg_add_default_copy_action(dev, &error);
+       data = mlx5_flow_os_get_specific_workspace();
+       MLX5_ASSERT(data && data->inuse);
+       if (!data || !data->inuse)
+               DRV_LOG(ERR, "flow workspace not initialized.");
+       return data;
 }
 
 /**
- * Release key of thread specific flow workspace data.
+ * Allocate and init new flow workspace.
+ *
+ * @return pointer to flow workspace data, NULL on error.
  */
-static void
-flow_release_workspace(void *data)
+static struct mlx5_flow_workspace*
+flow_alloc_thread_workspace(void)
 {
-       struct mlx5_flow_workspace *wks = data;
+       struct mlx5_flow_workspace *data = calloc(1, sizeof(*data));
 
-       if (!wks)
-               return;
-       free(wks->rss_desc[0].queue);
-       free(wks->rss_desc[1].queue);
-       free(wks);
+       if (!data) {
+               DRV_LOG(ERR, "Failed to allocate flow workspace "
+                       "memory.");
+               return NULL;
+       }
+       data->rss_desc.queue = calloc(1,
+                       sizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM);
+       if (!data->rss_desc.queue)
+               goto err;
+       data->rssq_num = MLX5_RSSQ_DEFAULT_NUM;
+       return data;
+err:
+       if (data->rss_desc.queue)
+               free(data->rss_desc.queue);
+       free(data);
+       return NULL;
 }
 
 /**
- * Initialize key of thread specific flow workspace data.
+ * Get new thread specific flow workspace.
+ *
+ * If current workspace inuse, create new one and set as current.
+ *
+ * @return pointer to thread specific flow workspace data, NULL on error.
  */
-static void
-flow_alloc_workspace(void)
+static struct mlx5_flow_workspace*
+mlx5_flow_push_thread_workspace(void)
 {
-       if (pthread_key_create(&key_workspace, flow_release_workspace))
-               DRV_LOG(ERR, "Can't create flow workspace data thread key.");
+       struct mlx5_flow_workspace *curr;
+       struct mlx5_flow_workspace *data;
+
+       curr = mlx5_flow_os_get_specific_workspace();
+       if (!curr) {
+               data = flow_alloc_thread_workspace();
+               if (!data)
+                       return NULL;
+       } else if (!curr->inuse) {
+               data = curr;
+       } else if (curr->next) {
+               data = curr->next;
+       } else {
+               data = flow_alloc_thread_workspace();
+               if (!data)
+                       return NULL;
+               curr->next = data;
+               data->prev = curr;
+       }
+       data->inuse = 1;
+       data->flow_idx = 0;
+       /* Set as current workspace */
+       if (mlx5_flow_os_set_specific_workspace(data))
+               DRV_LOG(ERR, "Failed to set flow workspace to thread.");
+       return data;
 }
 
 /**
- * Get thread specific flow workspace.
+ * Close current thread specific flow workspace.
+ *
+ * If previous workspace available, set it as current.
  *
- * @return pointer to thread specific flowworkspace data, NULL on error.
+ * @return pointer to thread specific flow workspace data, NULL on error.
  */
-struct mlx5_flow_workspace*
-mlx5_flow_get_thread_workspace(void)
+static void
+mlx5_flow_pop_thread_workspace(void)
 {
-       struct mlx5_flow_workspace *data;
+       struct mlx5_flow_workspace *data = mlx5_flow_get_thread_workspace();
 
-       if (pthread_once(&key_workspace_init, flow_alloc_workspace)) {
-               DRV_LOG(ERR, "Failed to init flow workspace data thread key.");
-               return NULL;
-       }
-       data = pthread_getspecific(key_workspace);
-       if (!data) {
-               data = calloc(1, sizeof(*data));
-               if (!data) {
-                       DRV_LOG(ERR, "Failed to allocate flow workspace "
-                               "memory.");
-                       return NULL;
-               }
-               data->rss_desc[0].queue = calloc(1,
-                               sizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM);
-               if (!data->rss_desc[0].queue)
-                       goto err;
-               data->rss_desc[1].queue = calloc(1,
-                               sizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM);
-               if (!data->rss_desc[1].queue)
-                       goto err;
-               data->rssq_num[0] = MLX5_RSSQ_DEFAULT_NUM;
-               data->rssq_num[1] = MLX5_RSSQ_DEFAULT_NUM;
-               if (pthread_setspecific(key_workspace, data)) {
-                       DRV_LOG(ERR, "Failed to set flow workspace to thread.");
-                       goto err;
-               }
+       if (!data)
+               return;
+       if (!data->inuse) {
+               DRV_LOG(ERR, "Failed to close unused flow workspace.");
+               return;
        }
-       return data;
-err:
-       if (data->rss_desc[0].queue)
-               free(data->rss_desc[0].queue);
-       if (data->rss_desc[1].queue)
-               free(data->rss_desc[1].queue);
-       free(data);
-       return NULL;
+       data->inuse = 0;
+       if (!data->prev)
+               return;
+       if (mlx5_flow_os_set_specific_workspace(data->prev))
+               DRV_LOG(ERR, "Failed to set flow workspace to thread.");
 }
 
 /**
@@ -6289,7 +6237,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
        struct mlx5_priv *priv = dev->data->dev_private;
        const struct rte_flow_attr attr = {
                .ingress = 1,
-               .priority = MLX5_FLOW_PRIO_RSVD,
+               .priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR,
        };
        struct rte_flow_item items[] = {
                {
@@ -6478,9 +6426,9 @@ mlx5_flow_isolate(struct rte_eth_dev *dev,
        }
        priv->isolated = !!enable;
        if (enable)
-               dev->dev_ops = &mlx5_os_dev_ops_isolate;
+               dev->dev_ops = &mlx5_dev_ops_isolate;
        else
-               dev->dev_ops = &mlx5_os_dev_ops;
+               dev->dev_ops = &mlx5_dev_ops;
 
        dev->rx_descriptor_status = mlx5_rx_descriptor_status;
        dev->tx_descriptor_status = mlx5_tx_descriptor_status;
@@ -6544,620 +6492,142 @@ mlx5_flow_query(struct rte_eth_dev *dev,
 }
 
 /**
- * Convert a flow director filter to a generic flow.
+ * Manage filter operations.
  *
  * @param dev
- *   Pointer to Ethernet device.
- * @param fdir_filter
- *   Flow director filter to add.
- * @param attributes
- *   Generic flow parameters structure.
+ *   Pointer to Ethernet device structure.
+ * @param filter_type
+ *   Filter type.
+ * @param filter_op
+ *   Operation to perform.
+ * @param arg
+ *   Pointer to operation-specific structure.
  *
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
-static int
-flow_fdir_filter_convert(struct rte_eth_dev *dev,
-                        const struct rte_eth_fdir_filter *fdir_filter,
-                        struct mlx5_fdir *attributes)
+int
+mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
+                    enum rte_filter_type filter_type,
+                    enum rte_filter_op filter_op,
+                    void *arg)
 {
-       struct mlx5_priv *priv = dev->data->dev_private;
-       const struct rte_eth_fdir_input *input = &fdir_filter->input;
-       const struct rte_eth_fdir_masks *mask =
-               &dev->data->dev_conf.fdir_conf.mask;
-
-       /* Validate queue number. */
-       if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
-               DRV_LOG(ERR, "port %u invalid queue number %d",
-                       dev->data->port_id, fdir_filter->action.rx_queue);
-               rte_errno = EINVAL;
-               return -rte_errno;
-       }
-       attributes->attr.ingress = 1;
-       attributes->items[0] = (struct rte_flow_item) {
-               .type = RTE_FLOW_ITEM_TYPE_ETH,
-               .spec = &attributes->l2,
-               .mask = &attributes->l2_mask,
-       };
-       switch (fdir_filter->action.behavior) {
-       case RTE_ETH_FDIR_ACCEPT:
-               attributes->actions[0] = (struct rte_flow_action){
-                       .type = RTE_FLOW_ACTION_TYPE_QUEUE,
-                       .conf = &attributes->queue,
-               };
-               break;
-       case RTE_ETH_FDIR_REJECT:
-               attributes->actions[0] = (struct rte_flow_action){
-                       .type = RTE_FLOW_ACTION_TYPE_DROP,
-               };
-               break;
-       default:
-               DRV_LOG(ERR, "port %u invalid behavior %d",
-                       dev->data->port_id,
-                       fdir_filter->action.behavior);
-               rte_errno = ENOTSUP;
-               return -rte_errno;
-       }
-       attributes->queue.index = fdir_filter->action.rx_queue;
-       /* Handle L3. */
-       switch (fdir_filter->input.flow_type) {
-       case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
-       case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
-       case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
-               attributes->l3.ipv4.hdr = (struct rte_ipv4_hdr){
-                       .src_addr = input->flow.ip4_flow.src_ip,
-                       .dst_addr = input->flow.ip4_flow.dst_ip,
-                       .time_to_live = input->flow.ip4_flow.ttl,
-                       .type_of_service = input->flow.ip4_flow.tos,
-               };
-               attributes->l3_mask.ipv4.hdr = (struct rte_ipv4_hdr){
-                       .src_addr = mask->ipv4_mask.src_ip,
-                       .dst_addr = mask->ipv4_mask.dst_ip,
-                       .time_to_live = mask->ipv4_mask.ttl,
-                       .type_of_service = mask->ipv4_mask.tos,
-                       .next_proto_id = mask->ipv4_mask.proto,
-               };
-               attributes->items[1] = (struct rte_flow_item){
-                       .type = RTE_FLOW_ITEM_TYPE_IPV4,
-                       .spec = &attributes->l3,
-                       .mask = &attributes->l3_mask,
-               };
-               break;
-       case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
-       case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
-       case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
-               attributes->l3.ipv6.hdr = (struct rte_ipv6_hdr){
-                       .hop_limits = input->flow.ipv6_flow.hop_limits,
-                       .proto = input->flow.ipv6_flow.proto,
-               };
-
-               memcpy(attributes->l3.ipv6.hdr.src_addr,
-                      input->flow.ipv6_flow.src_ip,
-                      RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
-               memcpy(attributes->l3.ipv6.hdr.dst_addr,
-                      input->flow.ipv6_flow.dst_ip,
-                      RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
-               memcpy(attributes->l3_mask.ipv6.hdr.src_addr,
-                      mask->ipv6_mask.src_ip,
-                      RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
-               memcpy(attributes->l3_mask.ipv6.hdr.dst_addr,
-                      mask->ipv6_mask.dst_ip,
-                      RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
-               attributes->items[1] = (struct rte_flow_item){
-                       .type = RTE_FLOW_ITEM_TYPE_IPV6,
-                       .spec = &attributes->l3,
-                       .mask = &attributes->l3_mask,
-               };
-               break;
-       default:
-               DRV_LOG(ERR, "port %u invalid flow type%d",
-                       dev->data->port_id, fdir_filter->input.flow_type);
-               rte_errno = ENOTSUP;
-               return -rte_errno;
-       }
-       /* Handle L4. */
-       switch (fdir_filter->input.flow_type) {
-       case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
-               attributes->l4.udp.hdr = (struct rte_udp_hdr){
-                       .src_port = input->flow.udp4_flow.src_port,
-                       .dst_port = input->flow.udp4_flow.dst_port,
-               };
-               attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){
-                       .src_port = mask->src_port_mask,
-                       .dst_port = mask->dst_port_mask,
-               };
-               attributes->items[2] = (struct rte_flow_item){
-                       .type = RTE_FLOW_ITEM_TYPE_UDP,
-                       .spec = &attributes->l4,
-                       .mask = &attributes->l4_mask,
-               };
-               break;
-       case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
-               attributes->l4.tcp.hdr = (struct rte_tcp_hdr){
-                       .src_port = input->flow.tcp4_flow.src_port,
-                       .dst_port = input->flow.tcp4_flow.dst_port,
-               };
-               attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){
-                       .src_port = mask->src_port_mask,
-                       .dst_port = mask->dst_port_mask,
-               };
-               attributes->items[2] = (struct rte_flow_item){
-                       .type = RTE_FLOW_ITEM_TYPE_TCP,
-                       .spec = &attributes->l4,
-                       .mask = &attributes->l4_mask,
-               };
-               break;
-       case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
-               attributes->l4.udp.hdr = (struct rte_udp_hdr){
-                       .src_port = input->flow.udp6_flow.src_port,
-                       .dst_port = input->flow.udp6_flow.dst_port,
-               };
-               attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){
-                       .src_port = mask->src_port_mask,
-                       .dst_port = mask->dst_port_mask,
-               };
-               attributes->items[2] = (struct rte_flow_item){
-                       .type = RTE_FLOW_ITEM_TYPE_UDP,
-                       .spec = &attributes->l4,
-                       .mask = &attributes->l4_mask,
-               };
-               break;
-       case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
-               attributes->l4.tcp.hdr = (struct rte_tcp_hdr){
-                       .src_port = input->flow.tcp6_flow.src_port,
-                       .dst_port = input->flow.tcp6_flow.dst_port,
-               };
-               attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){
-                       .src_port = mask->src_port_mask,
-                       .dst_port = mask->dst_port_mask,
-               };
-               attributes->items[2] = (struct rte_flow_item){
-                       .type = RTE_FLOW_ITEM_TYPE_TCP,
-                       .spec = &attributes->l4,
-                       .mask = &attributes->l4_mask,
-               };
-               break;
-       case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
-       case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
-               break;
+       switch (filter_type) {
+       case RTE_ETH_FILTER_GENERIC:
+               if (filter_op != RTE_ETH_FILTER_GET) {
+                       rte_errno = EINVAL;
+                       return -rte_errno;
+               }
+               *(const void **)arg = &mlx5_flow_ops;
+               return 0;
        default:
-               DRV_LOG(ERR, "port %u invalid flow type%d",
-                       dev->data->port_id, fdir_filter->input.flow_type);
+               DRV_LOG(ERR, "port %u filter type (%d) not supported",
+                       dev->data->port_id, filter_type);
                rte_errno = ENOTSUP;
                return -rte_errno;
        }
        return 0;
 }
 
-#define FLOW_FDIR_CMP(f1, f2, fld) \
-       memcmp(&(f1)->fld, &(f2)->fld, sizeof(f1->fld))
-
 /**
- * Compare two FDIR flows. If items and actions are identical, the two flows are
- * regarded as same.
+ * Create the needed meter and suffix tables.
  *
- * @param dev
+ * @param[in] dev
  *   Pointer to Ethernet device.
- * @param f1
- *   FDIR flow to compare.
- * @param f2
- *   FDIR flow to compare.
+ * @param[in] fm
+ *   Pointer to the flow meter.
  *
  * @return
- *   Zero on match, 1 otherwise.
+ *   Pointer to table set on success, NULL otherwise.
  */
-static int
-flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2)
-{
-       if (FLOW_FDIR_CMP(f1, f2, attr) ||
-           FLOW_FDIR_CMP(f1, f2, l2) ||
-           FLOW_FDIR_CMP(f1, f2, l2_mask) ||
-           FLOW_FDIR_CMP(f1, f2, l3) ||
-           FLOW_FDIR_CMP(f1, f2, l3_mask) ||
-           FLOW_FDIR_CMP(f1, f2, l4) ||
-           FLOW_FDIR_CMP(f1, f2, l4_mask) ||
-           FLOW_FDIR_CMP(f1, f2, actions[0].type))
-               return 1;
-       if (f1->actions[0].type == RTE_FLOW_ACTION_TYPE_QUEUE &&
-           FLOW_FDIR_CMP(f1, f2, queue))
-               return 1;
-       return 0;
+struct mlx5_meter_domains_infos *
+mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev,
+                         const struct mlx5_flow_meter *fm)
+{
+       const struct mlx5_flow_driver_ops *fops;
+
+       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+       return fops->create_mtr_tbls(dev, fm);
 }
 
 /**
- * Search device flow list to find out a matched FDIR flow.
+ * Destroy the meter table set.
  *
- * @param dev
+ * @param[in] dev
  *   Pointer to Ethernet device.
- * @param fdir_flow
- *   FDIR flow to lookup.
+ * @param[in] tbl
+ *   Pointer to the meter table set.
  *
  * @return
- *   Index of flow if found, 0 otherwise.
+ *   0 on success.
  */
-static uint32_t
-flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow)
+int
+mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev,
+                          struct mlx5_meter_domains_infos *tbls)
 {
-       struct mlx5_priv *priv = dev->data->dev_private;
-       uint32_t flow_idx = 0;
-       struct mlx5_fdir_flow *priv_fdir_flow = NULL;
-
-       MLX5_ASSERT(fdir_flow);
-       LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) {
-               if (!flow_fdir_cmp(priv_fdir_flow->fdir, fdir_flow)) {
-                       DRV_LOG(DEBUG, "port %u found FDIR flow %u",
-                               dev->data->port_id, flow_idx);
-                       flow_idx = priv_fdir_flow->rix_flow;
-                       break;
-               }
-       }
-       return flow_idx;
+       const struct mlx5_flow_driver_ops *fops;
+
+       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+       return fops->destroy_mtr_tbls(dev, tbls);
 }
 
 /**
- * Add new flow director filter and store it in list.
+ * Create policer rules.
  *
- * @param dev
+ * @param[in] dev
  *   Pointer to Ethernet device.
- * @param fdir_filter
- *   Flow director filter to add.
+ * @param[in] fm
+ *   Pointer to flow meter structure.
+ * @param[in] attr
+ *   Pointer to flow attributes.
  *
  * @return
- *   0 on success, a negative errno value otherwise and rte_errno is set.
+ *   0 on success, -1 otherwise.
  */
-static int
-flow_fdir_filter_add(struct rte_eth_dev *dev,
-                    const struct rte_eth_fdir_filter *fdir_filter)
+int
+mlx5_flow_create_policer_rules(struct rte_eth_dev *dev,
+                              struct mlx5_flow_meter *fm,
+                              const struct rte_flow_attr *attr)
 {
-       struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_fdir *fdir_flow;
-       struct rte_flow *flow;
-       struct mlx5_fdir_flow *priv_fdir_flow = NULL;
-       uint32_t flow_idx;
-       int ret;
+       const struct mlx5_flow_driver_ops *fops;
 
-       fdir_flow = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*fdir_flow), 0,
-                               SOCKET_ID_ANY);
-       if (!fdir_flow) {
-               rte_errno = ENOMEM;
-               return -rte_errno;
-       }
-       ret = flow_fdir_filter_convert(dev, fdir_filter, fdir_flow);
-       if (ret)
-               goto error;
-       flow_idx = flow_fdir_filter_lookup(dev, fdir_flow);
-       if (flow_idx) {
-               rte_errno = EEXIST;
-               goto error;
-       }
-       priv_fdir_flow = mlx5_malloc(MLX5_MEM_ZERO,
-                                    sizeof(struct mlx5_fdir_flow),
-                                    0, SOCKET_ID_ANY);
-       if (!priv_fdir_flow) {
-               rte_errno = ENOMEM;
-               goto error;
-       }
-       flow_idx = flow_list_create(dev, &priv->flows, &fdir_flow->attr,
-                                   fdir_flow->items, fdir_flow->actions, true,
-                                   NULL);
-       flow = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx);
-       if (!flow)
-               goto error;
-       flow->fdir = 1;
-       priv_fdir_flow->fdir = fdir_flow;
-       priv_fdir_flow->rix_flow = flow_idx;
-       LIST_INSERT_HEAD(&priv->fdir_flows, priv_fdir_flow, next);
-       DRV_LOG(DEBUG, "port %u created FDIR flow %p",
-               dev->data->port_id, (void *)flow);
-       return 0;
-error:
-       mlx5_free(priv_fdir_flow);
-       mlx5_free(fdir_flow);
-       return -rte_errno;
+       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+       return fops->create_policer_rules(dev, fm, attr);
 }
 
 /**
- * Delete specific filter.
+ * Destroy policer rules.
  *
- * @param dev
- *   Pointer to Ethernet device.
- * @param fdir_filter
- *   Filter to be deleted.
+ * @param[in] fm
+ *   Pointer to flow meter structure.
+ * @param[in] attr
+ *   Pointer to flow attributes.
  *
  * @return
- *   0 on success, a negative errno value otherwise and rte_errno is set.
+ *   0 on success, -1 otherwise.
  */
-static int
-flow_fdir_filter_delete(struct rte_eth_dev *dev,
-                       const struct rte_eth_fdir_filter *fdir_filter)
+int
+mlx5_flow_destroy_policer_rules(struct rte_eth_dev *dev,
+                               struct mlx5_flow_meter *fm,
+                               const struct rte_flow_attr *attr)
 {
-       struct mlx5_priv *priv = dev->data->dev_private;
-       uint32_t flow_idx;
-       struct mlx5_fdir fdir_flow = {
-               .attr.group = 0,
-       };
-       struct mlx5_fdir_flow *priv_fdir_flow = NULL;
-       int ret;
+       const struct mlx5_flow_driver_ops *fops;
 
-       ret = flow_fdir_filter_convert(dev, fdir_filter, &fdir_flow);
-       if (ret)
-               return -rte_errno;
-       LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) {
-               /* Find the fdir in priv list */
-               if (!flow_fdir_cmp(priv_fdir_flow->fdir, &fdir_flow))
-                       break;
-       }
-       if (!priv_fdir_flow)
-               return 0;
-       LIST_REMOVE(priv_fdir_flow, next);
-       flow_idx = priv_fdir_flow->rix_flow;
-       flow_list_destroy(dev, &priv->flows, flow_idx);
-       mlx5_free(priv_fdir_flow->fdir);
-       mlx5_free(priv_fdir_flow);
-       DRV_LOG(DEBUG, "port %u deleted FDIR flow %u",
-               dev->data->port_id, flow_idx);
-       return 0;
+       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+       return fops->destroy_policer_rules(dev, fm, attr);
 }
 
 /**
- * Update queue for specific filter.
+ * Allocate a counter.
  *
- * @param dev
- *   Pointer to Ethernet device.
- * @param fdir_filter
- *   Filter to be updated.
+ * @param[in] dev
+ *   Pointer to Ethernet device structure.
  *
  * @return
- *   0 on success, a negative errno value otherwise and rte_errno is set.
+ *   Index to allocated counter  on success, 0 otherwise.
  */
-static int
-flow_fdir_filter_update(struct rte_eth_dev *dev,
-                       const struct rte_eth_fdir_filter *fdir_filter)
-{
-       int ret;
-
-       ret = flow_fdir_filter_delete(dev, fdir_filter);
-       if (ret)
-               return ret;
-       return flow_fdir_filter_add(dev, fdir_filter);
-}
-
-/**
- * Flush all filters.
- *
- * @param dev
- *   Pointer to Ethernet device.
- */
-static void
-flow_fdir_filter_flush(struct rte_eth_dev *dev)
-{
-       struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_fdir_flow *priv_fdir_flow = NULL;
-
-       while (!LIST_EMPTY(&priv->fdir_flows)) {
-               priv_fdir_flow = LIST_FIRST(&priv->fdir_flows);
-               LIST_REMOVE(priv_fdir_flow, next);
-               flow_list_destroy(dev, &priv->flows, priv_fdir_flow->rix_flow);
-               mlx5_free(priv_fdir_flow->fdir);
-               mlx5_free(priv_fdir_flow);
-       }
-}
-
-/**
- * Get flow director information.
- *
- * @param dev
- *   Pointer to Ethernet device.
- * @param[out] fdir_info
- *   Resulting flow director information.
- */
-static void
-flow_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
-{
-       struct rte_eth_fdir_masks *mask =
-               &dev->data->dev_conf.fdir_conf.mask;
-
-       fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
-       fdir_info->guarant_spc = 0;
-       rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
-       fdir_info->max_flexpayload = 0;
-       fdir_info->flow_types_mask[0] = 0;
-       fdir_info->flex_payload_unit = 0;
-       fdir_info->max_flex_payload_segment_num = 0;
-       fdir_info->flex_payload_limit = 0;
-       memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf));
-}
-
-/**
- * Deal with flow director operations.
- *
- * @param dev
- *   Pointer to Ethernet device.
- * @param filter_op
- *   Operation to perform.
- * @param arg
- *   Pointer to operation-specific structure.
- *
- * @return
- *   0 on success, a negative errno value otherwise and rte_errno is set.
- */
-static int
-flow_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
-                   void *arg)
-{
-       enum rte_fdir_mode fdir_mode =
-               dev->data->dev_conf.fdir_conf.mode;
-
-       if (filter_op == RTE_ETH_FILTER_NOP)
-               return 0;
-       if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
-           fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
-               DRV_LOG(ERR, "port %u flow director mode %d not supported",
-                       dev->data->port_id, fdir_mode);
-               rte_errno = EINVAL;
-               return -rte_errno;
-       }
-       switch (filter_op) {
-       case RTE_ETH_FILTER_ADD:
-               return flow_fdir_filter_add(dev, arg);
-       case RTE_ETH_FILTER_UPDATE:
-               return flow_fdir_filter_update(dev, arg);
-       case RTE_ETH_FILTER_DELETE:
-               return flow_fdir_filter_delete(dev, arg);
-       case RTE_ETH_FILTER_FLUSH:
-               flow_fdir_filter_flush(dev);
-               break;
-       case RTE_ETH_FILTER_INFO:
-               flow_fdir_info_get(dev, arg);
-               break;
-       default:
-               DRV_LOG(DEBUG, "port %u unknown operation %u",
-                       dev->data->port_id, filter_op);
-               rte_errno = EINVAL;
-               return -rte_errno;
-       }
-       return 0;
-}
-
-/**
- * Manage filter operations.
- *
- * @param dev
- *   Pointer to Ethernet device structure.
- * @param filter_type
- *   Filter type.
- * @param filter_op
- *   Operation to perform.
- * @param arg
- *   Pointer to operation-specific structure.
- *
- * @return
- *   0 on success, a negative errno value otherwise and rte_errno is set.
- */
-int
-mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
-                    enum rte_filter_type filter_type,
-                    enum rte_filter_op filter_op,
-                    void *arg)
-{
-       switch (filter_type) {
-       case RTE_ETH_FILTER_GENERIC:
-               if (filter_op != RTE_ETH_FILTER_GET) {
-                       rte_errno = EINVAL;
-                       return -rte_errno;
-               }
-               *(const void **)arg = &mlx5_flow_ops;
-               return 0;
-       case RTE_ETH_FILTER_FDIR:
-               return flow_fdir_ctrl_func(dev, filter_op, arg);
-       default:
-               DRV_LOG(ERR, "port %u filter type (%d) not supported",
-                       dev->data->port_id, filter_type);
-               rte_errno = ENOTSUP;
-               return -rte_errno;
-       }
-       return 0;
-}
-
-/**
- * Create the needed meter and suffix tables.
- *
- * @param[in] dev
- *   Pointer to Ethernet device.
- * @param[in] fm
- *   Pointer to the flow meter.
- *
- * @return
- *   Pointer to table set on success, NULL otherwise.
- */
-struct mlx5_meter_domains_infos *
-mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev,
-                         const struct mlx5_flow_meter *fm)
-{
-       const struct mlx5_flow_driver_ops *fops;
-
-       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
-       return fops->create_mtr_tbls(dev, fm);
-}
-
-/**
- * Destroy the meter table set.
- *
- * @param[in] dev
- *   Pointer to Ethernet device.
- * @param[in] tbl
- *   Pointer to the meter table set.
- *
- * @return
- *   0 on success.
- */
-int
-mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev,
-                          struct mlx5_meter_domains_infos *tbls)
-{
-       const struct mlx5_flow_driver_ops *fops;
-
-       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
-       return fops->destroy_mtr_tbls(dev, tbls);
-}
-
-/**
- * Create policer rules.
- *
- * @param[in] dev
- *   Pointer to Ethernet device.
- * @param[in] fm
- *   Pointer to flow meter structure.
- * @param[in] attr
- *   Pointer to flow attributes.
- *
- * @return
- *   0 on success, -1 otherwise.
- */
-int
-mlx5_flow_create_policer_rules(struct rte_eth_dev *dev,
-                              struct mlx5_flow_meter *fm,
-                              const struct rte_flow_attr *attr)
-{
-       const struct mlx5_flow_driver_ops *fops;
-
-       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
-       return fops->create_policer_rules(dev, fm, attr);
-}
-
-/**
- * Destroy policer rules.
- *
- * @param[in] fm
- *   Pointer to flow meter structure.
- * @param[in] attr
- *   Pointer to flow attributes.
- *
- * @return
- *   0 on success, -1 otherwise.
- */
-int
-mlx5_flow_destroy_policer_rules(struct rte_eth_dev *dev,
-                               struct mlx5_flow_meter *fm,
-                               const struct rte_flow_attr *attr)
-{
-       const struct mlx5_flow_driver_ops *fops;
-
-       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
-       return fops->destroy_policer_rules(dev, fm, attr);
-}
-
-/**
- * Allocate a counter.
- *
- * @param[in] dev
- *   Pointer to Ethernet device structure.
- *
- * @return
- *   Index to allocated counter  on success, 0 otherwise.
- */
-uint32_t
-mlx5_counter_alloc(struct rte_eth_dev *dev)
+uint32_t
+mlx5_counter_alloc(struct rte_eth_dev *dev)
 {
        const struct mlx5_flow_driver_ops *fops;
        struct rte_flow_attr attr = { .transfer = 0 };
@@ -7267,7 +6737,7 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
        }
        mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
        size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
-       mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size,
+       mem_mng->umem = mlx5_os_umem_reg(sh->ctx, mem, size,
                                                 IBV_ACCESS_LOCAL_WRITE);
        if (!mem_mng->umem) {
                rte_errno = errno;
@@ -7282,10 +6752,11 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
        mkey_attr.pg_access = 0;
        mkey_attr.klm_array = NULL;
        mkey_attr.klm_num = 0;
-       mkey_attr.relaxed_ordering = sh->cmng.relaxed_ordering;
+       mkey_attr.relaxed_ordering_write = sh->cmng.relaxed_ordering_write;
+       mkey_attr.relaxed_ordering_read = sh->cmng.relaxed_ordering_read;
        mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
        if (!mem_mng->dm) {
-               mlx5_glue->devx_umem_dereg(mem_mng->umem);
+               mlx5_os_umem_dereg(mem_mng->umem);
                rte_errno = errno;
                mlx5_free(mem);
                return -rte_errno;
@@ -7482,16 +6953,7 @@ mlx5_flow_aging_check(struct mlx5_dev_ctx_shared *sh,
                }
                rte_spinlock_unlock(&age_info->aged_sl);
        }
-       for (i = 0; i < sh->max_port; i++) {
-               age_info = &sh->port[i].age_info;
-               if (!MLX5_AGE_GET(age_info, MLX5_AGE_EVENT_NEW))
-                       continue;
-               if (MLX5_AGE_GET(age_info, MLX5_AGE_TRIGGER))
-                       rte_eth_dev_callback_process
-                               (&rte_eth_devices[sh->port[i].devx_ih_port_id],
-                               RTE_ETH_EVENT_FLOW_AGED, NULL);
-               age_info->flags = 0;
-       }
+       mlx5_age_event_prepare(sh);
 }
 
 /**
@@ -7541,84 +7003,13 @@ mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,
        sh->cmng.pending_queries--;
 }
 
-static const struct mlx5_flow_tbl_data_entry  *
-tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark)
-{
-       struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_dev_ctx_shared *sh = priv->sh;
-       struct mlx5_hlist_entry *he;
-       union tunnel_offload_mark mbits = { .val = mark };
-       union mlx5_flow_tbl_key table_key = {
-               {
-                       .table_id = tunnel_id_to_flow_tbl(mbits.table_id),
-                       .reserved = 0,
-                       .domain = !!mbits.transfer,
-                       .direction = 0,
-               }
-       };
-       he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
-       return he ?
-              container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL;
-}
-
-static uint32_t
-tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
-                               const struct mlx5_flow_tunnel *tunnel,
-                               uint32_t group, uint32_t *table,
-                               struct rte_flow_error *error)
-{
-       struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_hlist_entry *he;
-       struct tunnel_tbl_entry *tte;
-       union tunnel_tbl_key key = {
-               .tunnel_id = tunnel ? tunnel->tunnel_id : 0,
-               .group = group
-       };
-       struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
-       struct mlx5_hlist *group_hash;
-
-       group_hash = tunnel ? tunnel->groups : thub->groups;
-       he = mlx5_hlist_lookup(group_hash, key.val);
-       if (!he) {
-               tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO,
-                                 sizeof(*tte), 0,
-                                 SOCKET_ID_ANY);
-               if (!tte)
-                       goto err;
-               tte->hash.key = key.val;
-               mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
-                                 &tte->flow_table);
-               if (tte->flow_table >= MLX5_MAX_TABLES) {
-                       DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.",
-                               tte->flow_table);
-                       mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
-                                       tte->flow_table);
-                       goto err;
-               } else if (!tte->flow_table) {
-                       goto err;
-               }
-               tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table);
-               mlx5_hlist_insert(group_hash, &tte->hash);
-       } else {
-               tte = container_of(he, typeof(*tte), hash);
-       }
-       *table = tte->flow_table;
-       DRV_LOG(DEBUG, "port %u tunnel %u group=%#x table=%#x",
-               dev->data->port_id, key.tunnel_id, group, *table);
-       return 0;
-
-err:
-       if (tte)
-               mlx5_free(tte);
-       return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
-                                 NULL, "tunnel group index not supported");
-}
-
 static int
 flow_group_to_table(uint32_t port_id, uint32_t group, uint32_t *table,
-                   struct flow_grp_info grp_info, struct rte_flow_error *error)
+                   const struct flow_grp_info *grp_info,
+                   struct rte_flow_error *error)
 {
-       if (grp_info.transfer && grp_info.external && grp_info.fdb_def_rule) {
+       if (grp_info->transfer && grp_info->external &&
+           grp_info->fdb_def_rule) {
                if (group == UINT32_MAX)
                        return rte_flow_error_set
                                                (error, EINVAL,
@@ -7675,24 +7066,25 @@ int
 mlx5_flow_group_to_table(struct rte_eth_dev *dev,
                         const struct mlx5_flow_tunnel *tunnel,
                         uint32_t group, uint32_t *table,
-                        struct flow_grp_info grp_info,
+                        const struct flow_grp_info *grp_info,
                         struct rte_flow_error *error)
 {
        int ret;
        bool standard_translation;
 
-       if (grp_info.external && group < MLX5_MAX_TABLES_EXTERNAL)
+       if (!grp_info->skip_scale && grp_info->external &&
+           group < MLX5_MAX_TABLES_EXTERNAL)
                group *= MLX5_FLOW_TABLE_FACTOR;
        if (is_tunnel_offload_active(dev)) {
-               standard_translation = !grp_info.external ||
-                                       grp_info.std_tbl_fix;
+               standard_translation = !grp_info->external ||
+                                       grp_info->std_tbl_fix;
        } else {
                standard_translation = true;
        }
        DRV_LOG(DEBUG,
-               "port %u group=%#x transfer=%d external=%d fdb_def_rule=%d translate=%s",
-               dev->data->port_id, group, grp_info.transfer,
-               grp_info.external, grp_info.fdb_def_rule,
+               "port %u group=%u transfer=%d external=%d fdb_def_rule=%d translate=%s",
+               dev->data->port_id, group, grp_info->transfer,
+               grp_info->external, grp_info->fdb_def_rule,
                standard_translation ? "STANDARD" : "TUNNEL");
        if (standard_translation)
                ret = flow_group_to_table(dev->data->port_id, group, table,
@@ -7730,7 +7122,7 @@ mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
        for (idx = REG_C_2; idx <= REG_C_7; ++idx) {
                struct rte_flow_attr attr = {
                        .group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
-                       .priority = MLX5_FLOW_PRIO_RSVD,
+                       .priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR,
                        .ingress = 1,
                };
                struct rte_flow_item items[] = {
@@ -7770,8 +7162,7 @@ mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
                                      flow_idx);
                if (!flow)
                        continue;
-               if (dev->data->dev_started || !flow_drv_apply(dev, flow, NULL))
-                       config->flow_mreg_c[n++] = idx;
+               config->flow_mreg_c[n++] = idx;
                flow_list_destroy(dev, NULL, flow_idx);
        }
        for (; n < MLX5_MREG_C_NUM; ++n)
@@ -7919,6 +7310,25 @@ flow_drv_action_update(struct rte_eth_dev *dev,
        return fops->action_update(dev, action, action_conf, error);
 }
 
+/* Wrapper for driver action_destroy op callback */
+static int
+flow_drv_action_query(struct rte_eth_dev *dev,
+                     const struct rte_flow_shared_action *action,
+                     void *data,
+                     const struct mlx5_flow_driver_ops *fops,
+                     struct rte_flow_error *error)
+{
+       static const char err_msg[] = "shared action query unsupported";
+
+       if (!fops->action_query) {
+               DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
+               rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+                                  NULL, err_msg);
+               return -rte_errno;
+       }
+       return fops->action_query(dev, action, data, error);
+}
+
 /**
  * Create shared action for reuse in multiple flow rules.
  *
@@ -7986,25 +7396,11 @@ mlx5_shared_action_update(struct rte_eth_dev *dev,
                        flow_get_drv_ops(flow_get_drv_type(dev, &attr));
        int ret;
 
-       switch (shared_action->type) {
-       case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
-               if (action->type != RTE_FLOW_ACTION_TYPE_RSS) {
-                       return rte_flow_error_set(error, EINVAL,
-                                                 RTE_FLOW_ERROR_TYPE_ACTION,
-                                                 NULL,
-                                                 "update action type invalid");
-               }
-               ret = flow_drv_action_validate(dev, NULL, action, fops, error);
-               if (ret)
-                       return ret;
-               return flow_drv_action_update(dev, shared_action, action->conf,
-                                             fops, error);
-       default:
-               return rte_flow_error_set(error, ENOTSUP,
-                                         RTE_FLOW_ERROR_TYPE_ACTION,
-                                         NULL,
-                                         "action type not supported");
-       }
+       ret = flow_drv_action_validate(dev, NULL, action, fops, error);
+       if (ret)
+               return ret;
+       return flow_drv_action_update(dev, shared_action, action->conf, fops,
+                                     error);
 }
 
 /**
@@ -8035,18 +7431,11 @@ mlx5_shared_action_query(struct rte_eth_dev *dev,
                         void *data,
                         struct rte_flow_error *error)
 {
-       (void)dev;
-       switch (action->type) {
-       case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
-               __atomic_load(&action->refcnt, (uint32_t *)data,
-                             __ATOMIC_RELAXED);
-               return 0;
-       default:
-               return rte_flow_error_set(error, ENOTSUP,
-                                         RTE_FLOW_ERROR_TYPE_ACTION,
-                                         NULL,
-                                         "action type not supported");
-       }
+       struct rte_flow_attr attr = { .transfer = 0 };
+       const struct mlx5_flow_driver_ops *fops =
+                       flow_get_drv_ops(flow_get_drv_type(dev, &attr));
+
+       return flow_drv_action_query(dev, action, data, fops, error);
 }
 
 /**
@@ -8063,44 +7452,368 @@ mlx5_shared_action_flush(struct rte_eth_dev *dev)
 {
        struct rte_flow_error error;
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct rte_flow_shared_action *action;
+       struct mlx5_shared_action_rss *action;
        int ret = 0;
+       uint32_t idx;
+
+       ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
+                     priv->rss_shared_actions, idx, action, next) {
+               ret |= mlx5_shared_action_destroy(dev,
+                      (struct rte_flow_shared_action *)(uintptr_t)idx, &error);
+       }
+       return ret;
+}
+
+#ifndef HAVE_MLX5DV_DR
+#define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1))
+#else
+#define MLX5_DOMAIN_SYNC_FLOW \
+       (MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW | MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW)
+#endif
+
+int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains)
+{
+       struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+       const struct mlx5_flow_driver_ops *fops;
+       int ret;
+       struct rte_flow_attr attr = { .transfer = 0 };
+
+       fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr));
+       ret = fops->sync_domain(dev, domains, MLX5_DOMAIN_SYNC_FLOW);
+       if (ret > 0)
+               ret = -ret;
+       return ret;
+}
+
+/**
+ * tunnel offload functionalilty is defined for DV environment only
+ */
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+__extension__
+union tunnel_offload_mark {
+       uint32_t val;
+       struct {
+               uint32_t app_reserve:8;
+               uint32_t table_id:15;
+               uint32_t transfer:1;
+               uint32_t _unused_:8;
+       };
+};
+
+static bool
+mlx5_access_tunnel_offload_db
+       (struct rte_eth_dev *dev,
+        bool (*match)(struct rte_eth_dev *,
+                      struct mlx5_flow_tunnel *, const void *),
+        void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *),
+        void (*miss)(struct rte_eth_dev *, void *),
+        void *ctx, bool lock_op);
 
-       while (!LIST_EMPTY(&priv->shared_actions)) {
-               action = LIST_FIRST(&priv->shared_actions);
-               ret = mlx5_shared_action_destroy(dev, action, &error);
+static int
+flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
+                            struct rte_flow *flow,
+                            const struct rte_flow_attr *attr,
+                            const struct rte_flow_action *app_actions,
+                            uint32_t flow_idx,
+                            struct tunnel_default_miss_ctx *ctx,
+                            struct rte_flow_error *error)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_flow *dev_flow;
+       struct rte_flow_attr miss_attr = *attr;
+       const struct mlx5_flow_tunnel *tunnel = app_actions[0].conf;
+       const struct rte_flow_item miss_items[2] = {
+               {
+                       .type = RTE_FLOW_ITEM_TYPE_ETH,
+                       .spec = NULL,
+                       .last = NULL,
+                       .mask = NULL
+               },
+               {
+                       .type = RTE_FLOW_ITEM_TYPE_END,
+                       .spec = NULL,
+                       .last = NULL,
+                       .mask = NULL
+               }
+       };
+       union tunnel_offload_mark mark_id;
+       struct rte_flow_action_mark miss_mark;
+       struct rte_flow_action miss_actions[3] = {
+               [0] = { .type = RTE_FLOW_ACTION_TYPE_MARK, .conf = &miss_mark },
+               [2] = { .type = RTE_FLOW_ACTION_TYPE_END,  .conf = NULL }
+       };
+       const struct rte_flow_action_jump *jump_data;
+       uint32_t i, flow_table = 0; /* prevent compilation warning */
+       struct flow_grp_info grp_info = {
+               .external = 1,
+               .transfer = attr->transfer,
+               .fdb_def_rule = !!priv->fdb_def_rule,
+               .std_tbl_fix = 0,
+       };
+       int ret;
+
+       if (!attr->transfer) {
+               uint32_t q_size;
+
+               miss_actions[1].type = RTE_FLOW_ACTION_TYPE_RSS;
+               q_size = priv->reta_idx_n * sizeof(ctx->queue[0]);
+               ctx->queue = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, q_size,
+                                        0, SOCKET_ID_ANY);
+               if (!ctx->queue)
+                       return rte_flow_error_set
+                               (error, ENOMEM,
+                               RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                               NULL, "invalid default miss RSS");
+               ctx->action_rss.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
+               ctx->action_rss.level = 0,
+               ctx->action_rss.types = priv->rss_conf.rss_hf,
+               ctx->action_rss.key_len = priv->rss_conf.rss_key_len,
+               ctx->action_rss.queue_num = priv->reta_idx_n,
+               ctx->action_rss.key = priv->rss_conf.rss_key,
+               ctx->action_rss.queue = ctx->queue;
+               if (!priv->reta_idx_n || !priv->rxqs_n)
+                       return rte_flow_error_set
+                               (error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                               NULL, "invalid port configuration");
+               if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+                       ctx->action_rss.types = 0;
+               for (i = 0; i != priv->reta_idx_n; ++i)
+                       ctx->queue[i] = (*priv->reta_idx)[i];
+       } else {
+               miss_actions[1].type = RTE_FLOW_ACTION_TYPE_JUMP;
+               ctx->miss_jump.group = MLX5_TNL_MISS_FDB_JUMP_GRP;
        }
+       miss_actions[1].conf = (typeof(miss_actions[1].conf))ctx->raw;
+       for (; app_actions->type != RTE_FLOW_ACTION_TYPE_JUMP; app_actions++);
+       jump_data = app_actions->conf;
+       miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY;
+       miss_attr.group = jump_data->group;
+       ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group,
+                                      &flow_table, &grp_info, error);
+       if (ret)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                                         NULL, "invalid tunnel id");
+       mark_id.app_reserve = 0;
+       mark_id.table_id = tunnel_flow_tbl_to_id(flow_table);
+       mark_id.transfer = !!attr->transfer;
+       mark_id._unused_ = 0;
+       miss_mark.id = mark_id.val;
+       dev_flow = flow_drv_prepare(dev, flow, &miss_attr,
+                                   miss_items, miss_actions, flow_idx, error);
+       if (!dev_flow)
+               return -rte_errno;
+       dev_flow->flow = flow;
+       dev_flow->external = true;
+       dev_flow->tunnel = tunnel;
+       /* Subflow object was created, we must include one in the list. */
+       SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
+                     dev_flow->handle, next);
+       DRV_LOG(DEBUG,
+               "port %u tunnel type=%d id=%u miss rule priority=%u group=%u",
+               dev->data->port_id, tunnel->app_tunnel.type,
+               tunnel->tunnel_id, miss_attr.priority, miss_attr.group);
+       ret = flow_drv_translate(dev, dev_flow, &miss_attr, miss_items,
+                                 miss_actions, error);
+       if (!ret)
+               ret = flow_mreg_update_copy_table(dev, flow, miss_actions,
+                                                 error);
+
        return ret;
 }
 
+static const struct mlx5_flow_tbl_data_entry  *
+tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_dev_ctx_shared *sh = priv->sh;
+       struct mlx5_hlist_entry *he;
+       union tunnel_offload_mark mbits = { .val = mark };
+       union mlx5_flow_tbl_key table_key = {
+               {
+                       .table_id = tunnel_id_to_flow_tbl(mbits.table_id),
+                       .dummy = 0,
+                       .domain = !!mbits.transfer,
+                       .direction = 0,
+               }
+       };
+       he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL);
+       return he ?
+              container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL;
+}
+
+static void
+mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list,
+                                  struct mlx5_hlist_entry *entry)
+{
+       struct mlx5_dev_ctx_shared *sh = list->ctx;
+       struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
+
+       mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
+                       tunnel_flow_tbl_to_id(tte->flow_table));
+       mlx5_free(tte);
+}
+
+static int
+mlx5_flow_tunnel_grp2tbl_match_cb(struct mlx5_hlist *list __rte_unused,
+                                 struct mlx5_hlist_entry *entry,
+                                 uint64_t key, void *cb_ctx __rte_unused)
+{
+       union tunnel_tbl_key tbl = {
+               .val = key,
+       };
+       struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
+
+       return tbl.tunnel_id != tte->tunnel_id || tbl.group != tte->group;
+}
+
+static struct mlx5_hlist_entry *
+mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list, uint64_t key,
+                                  void *ctx __rte_unused)
+{
+       struct mlx5_dev_ctx_shared *sh = list->ctx;
+       struct tunnel_tbl_entry *tte;
+       union tunnel_tbl_key tbl = {
+               .val = key,
+       };
+
+       tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO,
+                         sizeof(*tte), 0,
+                         SOCKET_ID_ANY);
+       if (!tte)
+               goto err;
+       mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
+                         &tte->flow_table);
+       if (tte->flow_table >= MLX5_MAX_TABLES) {
+               DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.",
+                       tte->flow_table);
+               mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
+                               tte->flow_table);
+               goto err;
+       } else if (!tte->flow_table) {
+               goto err;
+       }
+       tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table);
+       tte->tunnel_id = tbl.tunnel_id;
+       tte->group = tbl.group;
+       return &tte->hash;
+err:
+       if (tte)
+               mlx5_free(tte);
+       return NULL;
+}
+
+static uint32_t
+tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
+                               const struct mlx5_flow_tunnel *tunnel,
+                               uint32_t group, uint32_t *table,
+                               struct rte_flow_error *error)
+{
+       struct mlx5_hlist_entry *he;
+       struct tunnel_tbl_entry *tte;
+       union tunnel_tbl_key key = {
+               .tunnel_id = tunnel ? tunnel->tunnel_id : 0,
+               .group = group
+       };
+       struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+       struct mlx5_hlist *group_hash;
+
+       group_hash = tunnel ? tunnel->groups : thub->groups;
+       he = mlx5_hlist_register(group_hash, key.val, NULL);
+       if (!he)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+                                         NULL,
+                                         "tunnel group index not supported");
+       tte = container_of(he, typeof(*tte), hash);
+       *table = tte->flow_table;
+       DRV_LOG(DEBUG, "port %u tunnel %u group=%#x table=%#x",
+               dev->data->port_id, key.tunnel_id, group, *table);
+       return 0;
+}
+
 static void
 mlx5_flow_tunnel_free(struct rte_eth_dev *dev,
                      struct mlx5_flow_tunnel *tunnel)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_indexed_pool *ipool;
 
        DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x",
                dev->data->port_id, tunnel->tunnel_id);
-       RTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED));
        LIST_REMOVE(tunnel, chain);
-       mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID],
-                       tunnel->tunnel_id);
-       mlx5_hlist_destroy(tunnel->groups, NULL, NULL);
-       mlx5_free(tunnel);
+       mlx5_hlist_destroy(tunnel->groups);
+       ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID];
+       mlx5_ipool_free(ipool, tunnel->tunnel_id);
 }
 
-static struct mlx5_flow_tunnel *
-mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)
+static bool
+mlx5_access_tunnel_offload_db
+       (struct rte_eth_dev *dev,
+        bool (*match)(struct rte_eth_dev *,
+                      struct mlx5_flow_tunnel *, const void *),
+        void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *),
+        void (*miss)(struct rte_eth_dev *, void *),
+        void *ctx, bool lock_op)
 {
+       bool verdict = false;
        struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
-       struct mlx5_flow_tunnel *tun;
+       struct mlx5_flow_tunnel *tunnel;
 
-       LIST_FOREACH(tun, &thub->tunnels, chain) {
-               if (tun->tunnel_id == id)
+       rte_spinlock_lock(&thub->sl);
+       LIST_FOREACH(tunnel, &thub->tunnels, chain) {
+               verdict = match(dev, tunnel, (const void *)ctx);
+               if (verdict)
                        break;
        }
+       if (!lock_op)
+               rte_spinlock_unlock(&thub->sl);
+       if (verdict && hit)
+               hit(dev, tunnel, ctx);
+       if (!verdict && miss)
+               miss(dev, ctx);
+       if (lock_op)
+               rte_spinlock_unlock(&thub->sl);
+
+       return verdict;
+}
+
+struct tunnel_db_find_tunnel_id_ctx {
+       uint32_t tunnel_id;
+       struct mlx5_flow_tunnel *tunnel;
+};
+
+static bool
+find_tunnel_id_match(struct rte_eth_dev *dev,
+                    struct mlx5_flow_tunnel *tunnel, const void *x)
+{
+       const struct tunnel_db_find_tunnel_id_ctx *ctx = x;
+
+       RTE_SET_USED(dev);
+       return tunnel->tunnel_id == ctx->tunnel_id;
+}
+
+static void
+find_tunnel_id_hit(struct rte_eth_dev *dev,
+                  struct mlx5_flow_tunnel *tunnel, void *x)
+{
+       struct tunnel_db_find_tunnel_id_ctx *ctx = x;
+       RTE_SET_USED(dev);
+       ctx->tunnel = tunnel;
+}
 
-       return tun;
+static struct mlx5_flow_tunnel *
+mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)
+{
+       struct tunnel_db_find_tunnel_id_ctx ctx = {
+               .tunnel_id = id,
+       };
+
+       mlx5_access_tunnel_offload_db(dev, find_tunnel_id_match,
+                                     find_tunnel_id_hit, NULL, &ctx, true);
+
+       return ctx.tunnel;
 }
 
 static struct mlx5_flow_tunnel *
@@ -8108,38 +7821,28 @@ mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
                          const struct rte_flow_tunnel *app_tunnel)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_indexed_pool *ipool;
        struct mlx5_flow_tunnel *tunnel;
        uint32_t id;
 
-       mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
-                         &id);
+       ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID];
+       tunnel = mlx5_ipool_zmalloc(ipool, &id);
+       if (!tunnel)
+               return NULL;
        if (id >= MLX5_MAX_TUNNELS) {
-               mlx5_ipool_free(priv->sh->ipool
-                               [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
+               mlx5_ipool_free(ipool, id);
                DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id);
                return NULL;
-       } else if (!id) {
-               return NULL;
-       }
-       /**
-        * mlx5 flow tunnel is an auxlilary data structure
-        * It's not part of IO. No need to allocate it from
-        * huge pages pools dedicated for IO
-        */
-       tunnel = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*tunnel),
-                            0, SOCKET_ID_ANY);
-       if (!tunnel) {
-               mlx5_ipool_free(priv->sh->ipool
-                               [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
-               return NULL;
        }
-       tunnel->groups = mlx5_hlist_create("tunnel groups", 1024);
+       tunnel->groups = mlx5_hlist_create("tunnel groups", 1024, 0, 0,
+                                          mlx5_flow_tunnel_grp2tbl_create_cb,
+                                          mlx5_flow_tunnel_grp2tbl_match_cb,
+                                          mlx5_flow_tunnel_grp2tbl_remove_cb);
        if (!tunnel->groups) {
-               mlx5_ipool_free(priv->sh->ipool
-                               [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
-               mlx5_free(tunnel);
+               mlx5_ipool_free(ipool, id);
                return NULL;
        }
+       tunnel->groups->ctx = priv->sh;
        /* initiate new PMD tunnel */
        memcpy(&tunnel->app_tunnel, app_tunnel, sizeof(*app_tunnel));
        tunnel->tunnel_id = id;
@@ -8158,36 +7861,60 @@ mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
        return tunnel;
 }
 
+struct tunnel_db_get_tunnel_ctx {
+       const struct rte_flow_tunnel *app_tunnel;
+       struct mlx5_flow_tunnel *tunnel;
+};
+
+static bool get_tunnel_match(struct rte_eth_dev *dev,
+                            struct mlx5_flow_tunnel *tunnel, const void *x)
+{
+       const struct tunnel_db_get_tunnel_ctx *ctx = x;
+
+       RTE_SET_USED(dev);
+       return !memcmp(ctx->app_tunnel, &tunnel->app_tunnel,
+                      sizeof(*ctx->app_tunnel));
+}
+
+static void get_tunnel_hit(struct rte_eth_dev *dev,
+                          struct mlx5_flow_tunnel *tunnel, void *x)
+{
+       /* called under tunnel spinlock protection */
+       struct tunnel_db_get_tunnel_ctx *ctx = x;
+
+       RTE_SET_USED(dev);
+       tunnel->refctn++;
+       ctx->tunnel = tunnel;
+}
+
+static void get_tunnel_miss(struct rte_eth_dev *dev, void *x)
+{
+       /* called under tunnel spinlock protection */
+       struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+       struct tunnel_db_get_tunnel_ctx *ctx = x;
+
+       rte_spinlock_unlock(&thub->sl);
+       ctx->tunnel = mlx5_flow_tunnel_allocate(dev, ctx->app_tunnel);
+       ctx->tunnel->refctn = 1;
+       rte_spinlock_lock(&thub->sl);
+       if (ctx->tunnel)
+               LIST_INSERT_HEAD(&thub->tunnels, ctx->tunnel, chain);
+}
+
+
 static int
 mlx5_get_flow_tunnel(struct rte_eth_dev *dev,
                     const struct rte_flow_tunnel *app_tunnel,
                     struct mlx5_flow_tunnel **tunnel)
 {
-       int ret;
-       struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
-       struct mlx5_flow_tunnel *tun;
-
-       LIST_FOREACH(tun, &thub->tunnels, chain) {
-               if (!memcmp(app_tunnel, &tun->app_tunnel,
-                           sizeof(*app_tunnel))) {
-                       *tunnel = tun;
-                       ret = 0;
-                       break;
-               }
-       }
-       if (!tun) {
-               tun = mlx5_flow_tunnel_allocate(dev, app_tunnel);
-               if (tun) {
-                       LIST_INSERT_HEAD(&thub->tunnels, tun, chain);
-                       *tunnel = tun;
-               } else {
-                       ret = -ENOMEM;
-               }
-       }
-       if (tun)
-               __atomic_add_fetch(&tun->refctn, 1, __ATOMIC_RELAXED);
+       struct tunnel_db_get_tunnel_ctx ctx = {
+               .app_tunnel = app_tunnel,
+       };
 
-       return ret;
+       mlx5_access_tunnel_offload_db(dev, get_tunnel_match, get_tunnel_hit,
+                                     get_tunnel_miss, &ctx, true);
+       *tunnel = ctx.tunnel;
+       return ctx.tunnel ? 0 : -ENOMEM;
 }
 
 void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id)
@@ -8198,7 +7925,7 @@ void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id)
                return;
        if (!LIST_EMPTY(&thub->tunnels))
                DRV_LOG(WARNING, "port %u tunnels present\n", port_id);
-       mlx5_hlist_destroy(thub->groups, NULL, NULL);
+       mlx5_hlist_destroy(thub->groups);
        mlx5_free(thub);
 }
 
@@ -8212,40 +7939,318 @@ int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh)
        if (!thub)
                return -ENOMEM;
        LIST_INIT(&thub->tunnels);
-       thub->groups = mlx5_hlist_create("flow groups", MLX5_MAX_TABLES);
+       rte_spinlock_init(&thub->sl);
+       thub->groups = mlx5_hlist_create("flow groups", MLX5_MAX_TABLES, 0,
+                                        0, mlx5_flow_tunnel_grp2tbl_create_cb,
+                                        mlx5_flow_tunnel_grp2tbl_match_cb,
+                                        mlx5_flow_tunnel_grp2tbl_remove_cb);
        if (!thub->groups) {
                err = -rte_errno;
                goto err;
        }
+       thub->groups->ctx = sh;
        sh->tunnel_hub = thub;
 
        return 0;
 
 err:
        if (thub->groups)
-               mlx5_hlist_destroy(thub->groups, NULL, NULL);
+               mlx5_hlist_destroy(thub->groups);
        if (thub)
                mlx5_free(thub);
        return err;
 }
 
-#ifndef HAVE_MLX5DV_DR
-#define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1))
-#else
-#define MLX5_DOMAIN_SYNC_FLOW \
-       (MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW | MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW)
-#endif
+static inline bool
+mlx5_flow_tunnel_validate(struct rte_eth_dev *dev,
+                         struct rte_flow_tunnel *tunnel,
+                         const char *err_msg)
+{
+       err_msg = NULL;
+       if (!is_tunnel_offload_active(dev)) {
+               err_msg = "tunnel offload was not activated";
+               goto out;
+       } else if (!tunnel) {
+               err_msg = "no application tunnel";
+               goto out;
+       }
 
-int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains)
+       switch (tunnel->type) {
+       default:
+               err_msg = "unsupported tunnel type";
+               goto out;
+       case RTE_FLOW_ITEM_TYPE_VXLAN:
+               break;
+       }
+
+out:
+       return !err_msg;
+}
+
+static int
+mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,
+                   struct rte_flow_tunnel *app_tunnel,
+                   struct rte_flow_action **actions,
+                   uint32_t *num_of_actions,
+                   struct rte_flow_error *error)
 {
-       struct rte_eth_dev *dev = &rte_eth_devices[port_id];
-       const struct mlx5_flow_driver_ops *fops;
        int ret;
-       struct rte_flow_attr attr = { .transfer = 0 };
+       struct mlx5_flow_tunnel *tunnel;
+       const char *err_msg = NULL;
+       bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
 
-       fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr));
-       ret = fops->sync_domain(dev, domains, MLX5_DOMAIN_SYNC_FLOW);
-       if (ret > 0)
-               ret = -ret;
-       return ret;
+       if (!verdict)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+                                         err_msg);
+       ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
+       if (ret < 0) {
+               return rte_flow_error_set(error, ret,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+                                         "failed to initialize pmd tunnel");
+       }
+       *actions = &tunnel->action;
+       *num_of_actions = 1;
+       return 0;
+}
+
+static int
+mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
+                      struct rte_flow_tunnel *app_tunnel,
+                      struct rte_flow_item **items,
+                      uint32_t *num_of_items,
+                      struct rte_flow_error *error)
+{
+       int ret;
+       struct mlx5_flow_tunnel *tunnel;
+       const char *err_msg = NULL;
+       bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
+
+       if (!verdict)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+                                         err_msg);
+       ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
+       if (ret < 0) {
+               return rte_flow_error_set(error, ret,
+                                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+                                         "failed to initialize pmd tunnel");
+       }
+       *items = &tunnel->item;
+       *num_of_items = 1;
+       return 0;
+}
+
+struct tunnel_db_element_release_ctx {
+       struct rte_flow_item *items;
+       struct rte_flow_action *actions;
+       uint32_t num_elements;
+       struct rte_flow_error *error;
+       int ret;
+};
+
+static bool
+tunnel_element_release_match(struct rte_eth_dev *dev,
+                            struct mlx5_flow_tunnel *tunnel, const void *x)
+{
+       const struct tunnel_db_element_release_ctx *ctx = x;
+
+       RTE_SET_USED(dev);
+       if (ctx->num_elements != 1)
+               return false;
+       else if (ctx->items)
+               return ctx->items == &tunnel->item;
+       else if (ctx->actions)
+               return ctx->actions == &tunnel->action;
+
+       return false;
+}
+
+static void
+tunnel_element_release_hit(struct rte_eth_dev *dev,
+                          struct mlx5_flow_tunnel *tunnel, void *x)
+{
+       struct tunnel_db_element_release_ctx *ctx = x;
+       ctx->ret = 0;
+       if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))
+               mlx5_flow_tunnel_free(dev, tunnel);
+}
+
+static void
+tunnel_element_release_miss(struct rte_eth_dev *dev, void *x)
+{
+       struct tunnel_db_element_release_ctx *ctx = x;
+       RTE_SET_USED(dev);
+       ctx->ret = rte_flow_error_set(ctx->error, EINVAL,
+                                     RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+                                     "invalid argument");
+}
+
+static int
+mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,
+                      struct rte_flow_item *pmd_items,
+                      uint32_t num_items, struct rte_flow_error *err)
+{
+       struct tunnel_db_element_release_ctx ctx = {
+               .items = pmd_items,
+               .actions = NULL,
+               .num_elements = num_items,
+               .error = err,
+       };
+
+       mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,
+                                     tunnel_element_release_hit,
+                                     tunnel_element_release_miss, &ctx, false);
+
+       return ctx.ret;
+}
+
+static int
+mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,
+                        struct rte_flow_action *pmd_actions,
+                        uint32_t num_actions, struct rte_flow_error *err)
+{
+       struct tunnel_db_element_release_ctx ctx = {
+               .items = NULL,
+               .actions = pmd_actions,
+               .num_elements = num_actions,
+               .error = err,
+       };
+
+       mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,
+                                     tunnel_element_release_hit,
+                                     tunnel_element_release_miss, &ctx, false);
+
+       return ctx.ret;
 }
+
+static int
+mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
+                                 struct rte_mbuf *m,
+                                 struct rte_flow_restore_info *info,
+                                 struct rte_flow_error *err)
+{
+       uint64_t ol_flags = m->ol_flags;
+       const struct mlx5_flow_tbl_data_entry *tble;
+       const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID;
+
+       if (!is_tunnel_offload_active(dev)) {
+               info->flags = 0;
+               return 0;
+       }
+
+       if ((ol_flags & mask) != mask)
+               goto err;
+       tble = tunnel_mark_decode(dev, m->hash.fdir.hi);
+       if (!tble) {
+               DRV_LOG(DEBUG, "port %u invalid miss tunnel mark %#x",
+                       dev->data->port_id, m->hash.fdir.hi);
+               goto err;
+       }
+       MLX5_ASSERT(tble->tunnel);
+       memcpy(&info->tunnel, &tble->tunnel->app_tunnel, sizeof(info->tunnel));
+       info->group_id = tble->group_id;
+       info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL |
+                     RTE_FLOW_RESTORE_INFO_GROUP_ID |
+                     RTE_FLOW_RESTORE_INFO_ENCAPSULATED;
+
+       return 0;
+
+err:
+       return rte_flow_error_set(err, EINVAL,
+                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                                 "failed to get restore info");
+}
+
+#else /* HAVE_IBV_FLOW_DV_SUPPORT */
+static int
+mlx5_flow_tunnel_decap_set(__rte_unused struct rte_eth_dev *dev,
+                          __rte_unused struct rte_flow_tunnel *app_tunnel,
+                          __rte_unused struct rte_flow_action **actions,
+                          __rte_unused uint32_t *num_of_actions,
+                          __rte_unused struct rte_flow_error *error)
+{
+       return -ENOTSUP;
+}
+
+static int
+mlx5_flow_tunnel_match(__rte_unused struct rte_eth_dev *dev,
+                      __rte_unused struct rte_flow_tunnel *app_tunnel,
+                      __rte_unused struct rte_flow_item **items,
+                      __rte_unused uint32_t *num_of_items,
+                      __rte_unused struct rte_flow_error *error)
+{
+       return -ENOTSUP;
+}
+
+static int
+mlx5_flow_tunnel_item_release(__rte_unused struct rte_eth_dev *dev,
+                             __rte_unused struct rte_flow_item *pmd_items,
+                             __rte_unused uint32_t num_items,
+                             __rte_unused struct rte_flow_error *err)
+{
+       return -ENOTSUP;
+}
+
+static int
+mlx5_flow_tunnel_action_release(__rte_unused struct rte_eth_dev *dev,
+                               __rte_unused struct rte_flow_action *pmd_action,
+                               __rte_unused uint32_t num_actions,
+                               __rte_unused struct rte_flow_error *err)
+{
+       return -ENOTSUP;
+}
+
+static int
+mlx5_flow_tunnel_get_restore_info(__rte_unused struct rte_eth_dev *dev,
+                                 __rte_unused struct rte_mbuf *m,
+                                 __rte_unused struct rte_flow_restore_info *i,
+                                 __rte_unused struct rte_flow_error *err)
+{
+       return -ENOTSUP;
+}
+
+static int
+flow_tunnel_add_default_miss(__rte_unused struct rte_eth_dev *dev,
+                            __rte_unused struct rte_flow *flow,
+                            __rte_unused const struct rte_flow_attr *attr,
+                            __rte_unused const struct rte_flow_action *actions,
+                            __rte_unused uint32_t flow_idx,
+                            __rte_unused struct tunnel_default_miss_ctx *ctx,
+                            __rte_unused struct rte_flow_error *error)
+{
+       return -ENOTSUP;
+}
+
+static struct mlx5_flow_tunnel *
+mlx5_find_tunnel_id(__rte_unused struct rte_eth_dev *dev,
+                   __rte_unused uint32_t id)
+{
+       return NULL;
+}
+
+static void
+mlx5_flow_tunnel_free(__rte_unused struct rte_eth_dev *dev,
+                     __rte_unused struct mlx5_flow_tunnel *tunnel)
+{
+}
+
+static uint32_t
+tunnel_flow_group_to_flow_table(__rte_unused struct rte_eth_dev *dev,
+                               __rte_unused const struct mlx5_flow_tunnel *t,
+                               __rte_unused uint32_t group,
+                               __rte_unused uint32_t *table,
+                               struct rte_flow_error *error)
+{
+       return rte_flow_error_set(error, ENOTSUP,
+                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                                 "tunnel offload requires DV support");
+}
+
+void
+mlx5_release_tunnel_hub(__rte_unused struct mlx5_dev_ctx_shared *sh,
+                       __rte_unused  uint16_t port_id)
+{
+}
+#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
+