net/mlx5: add flow sync API
[dpdk.git] / drivers / net / mlx5 / mlx5_flow.c
index 6077685..e2af7a9 100644 (file)
 #include "mlx5_flow_os.h"
 #include "mlx5_rxtx.h"
 #include "mlx5_common_os.h"
+#include "rte_pmd_mlx5.h"
+
+static struct mlx5_flow_tunnel *
+mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id);
+static void
+mlx5_flow_tunnel_free(struct rte_eth_dev *dev, struct mlx5_flow_tunnel *tunnel);
+static const struct mlx5_flow_tbl_data_entry  *
+tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark);
+static int
+mlx5_get_flow_tunnel(struct rte_eth_dev *dev,
+                    const struct rte_flow_tunnel *app_tunnel,
+                    struct mlx5_flow_tunnel **tunnel);
+
 
 /** Device flow drivers. */
 extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops;
@@ -567,6 +580,162 @@ static int mlx5_shared_action_query
                                 const struct rte_flow_shared_action *action,
                                 void *data,
                                 struct rte_flow_error *error);
+static inline bool
+mlx5_flow_tunnel_validate(struct rte_eth_dev *dev,
+                         struct rte_flow_tunnel *tunnel,
+                         const char *err_msg)
+{
+       err_msg = NULL;
+       if (!is_tunnel_offload_active(dev)) {
+               err_msg = "tunnel offload was not activated";
+               goto out;
+       } else if (!tunnel) {
+               err_msg = "no application tunnel";
+               goto out;
+       }
+
+       switch (tunnel->type) {
+       default:
+               err_msg = "unsupported tunnel type";
+               goto out;
+       case RTE_FLOW_ITEM_TYPE_VXLAN:
+               break;
+       }
+
+out:
+       return !err_msg;
+}
+
+
+static int
+mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,
+                   struct rte_flow_tunnel *app_tunnel,
+                   struct rte_flow_action **actions,
+                   uint32_t *num_of_actions,
+                   struct rte_flow_error *error)
+{
+       int ret;
+       struct mlx5_flow_tunnel *tunnel;
+       const char *err_msg = NULL;
+       bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
+
+       if (!verdict)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+                                         err_msg);
+       ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
+       if (ret < 0) {
+               return rte_flow_error_set(error, ret,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+                                         "failed to initialize pmd tunnel");
+       }
+       *actions = &tunnel->action;
+       *num_of_actions = 1;
+       return 0;
+}
+
+static int
+mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
+                      struct rte_flow_tunnel *app_tunnel,
+                      struct rte_flow_item **items,
+                      uint32_t *num_of_items,
+                      struct rte_flow_error *error)
+{
+       int ret;
+       struct mlx5_flow_tunnel *tunnel;
+       const char *err_msg = NULL;
+       bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
+
+       if (!verdict)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+                                         err_msg);
+       ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
+       if (ret < 0) {
+               return rte_flow_error_set(error, ret,
+                                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+                                         "failed to initialize pmd tunnel");
+       }
+       *items = &tunnel->item;
+       *num_of_items = 1;
+       return 0;
+}
+
+static int
+mlx5_flow_item_release(struct rte_eth_dev *dev,
+                      struct rte_flow_item *pmd_items,
+                      uint32_t num_items, struct rte_flow_error *err)
+{
+       struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+       struct mlx5_flow_tunnel *tun;
+
+       LIST_FOREACH(tun, &thub->tunnels, chain) {
+               if (&tun->item == pmd_items)
+                       break;
+       }
+       if (!tun || num_items != 1)
+               return rte_flow_error_set(err, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+                                         "invalid argument");
+       if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))
+               mlx5_flow_tunnel_free(dev, tun);
+       return 0;
+}
+
+static int
+mlx5_flow_action_release(struct rte_eth_dev *dev,
+                        struct rte_flow_action *pmd_actions,
+                        uint32_t num_actions, struct rte_flow_error *err)
+{
+       struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+       struct mlx5_flow_tunnel *tun;
+
+       LIST_FOREACH(tun, &thub->tunnels, chain) {
+               if (&tun->action == pmd_actions)
+                       break;
+       }
+       if (!tun || num_actions != 1)
+               return rte_flow_error_set(err, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+                                         "invalid argument");
+       if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))
+               mlx5_flow_tunnel_free(dev, tun);
+
+       return 0;
+}
+
+static int
+mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
+                                 struct rte_mbuf *m,
+                                 struct rte_flow_restore_info *info,
+                                 struct rte_flow_error *err)
+{
+       uint64_t ol_flags = m->ol_flags;
+       const struct mlx5_flow_tbl_data_entry *tble;
+       const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID;
+
+       if ((ol_flags & mask) != mask)
+               goto err;
+       tble = tunnel_mark_decode(dev, m->hash.fdir.hi);
+       if (!tble) {
+               DRV_LOG(DEBUG, "port %u invalid miss tunnel mark %#x",
+                       dev->data->port_id, m->hash.fdir.hi);
+               goto err;
+       }
+       MLX5_ASSERT(tble->tunnel);
+       memcpy(&info->tunnel, &tble->tunnel->app_tunnel, sizeof(info->tunnel));
+       info->group_id = tble->group_id;
+       info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL |
+                     RTE_FLOW_RESTORE_INFO_GROUP_ID |
+                     RTE_FLOW_RESTORE_INFO_ENCAPSULATED;
+
+       return 0;
+
+err:
+       return rte_flow_error_set(err, EINVAL,
+                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                                 "failed to get restore info");
+}
 
 static const struct rte_flow_ops mlx5_flow_ops = {
        .validate = mlx5_flow_validate,
@@ -581,6 +750,11 @@ static const struct rte_flow_ops mlx5_flow_ops = {
        .shared_action_destroy = mlx5_shared_action_destroy,
        .shared_action_update = mlx5_shared_action_update,
        .shared_action_query = mlx5_shared_action_query,
+       .tunnel_decap_set = mlx5_flow_tunnel_decap_set,
+       .tunnel_match = mlx5_flow_tunnel_match,
+       .tunnel_action_decap_release = mlx5_flow_action_release,
+       .tunnel_item_release = mlx5_flow_item_release,
+       .get_restore_info = mlx5_flow_tunnel_get_restore_info,
 };
 
 /* Convert FDIR request to Generic flow. */
@@ -1620,6 +1794,8 @@ mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
  *   Item specification.
  * @param[in] item_flags
  *   Bit-fields that holds the items detected until now.
+ * @param[in] ext_vlan_sup
+ *   Whether extended VLAN features are supported or not.
  * @param[out] error
  *   Pointer to error structure.
  *
@@ -1740,7 +1916,7 @@ mlx5_flow_validate_item_icmp(const struct rte_flow_item *item,
  */
 int
 mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
-                           uint64_t item_flags,
+                           uint64_t item_flags, bool ext_vlan_sup,
                            struct rte_flow_error *error)
 {
        const struct rte_flow_item_eth *mask = item->mask;
@@ -1748,6 +1924,7 @@ mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
                .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
                .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
                .type = RTE_BE16(0xffff),
+               .has_vlan = ext_vlan_sup ? 1 : 0,
        };
        int ret;
        int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
@@ -2866,6 +3043,14 @@ flow_null_query(struct rte_eth_dev *dev __rte_unused,
                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
 }
 
+static int
+flow_null_sync_domain(struct rte_eth_dev *dev __rte_unused,
+                     uint32_t domains __rte_unused,
+                     uint32_t flags __rte_unused)
+{
+       return 0;
+}
+
 /* Void driver to protect from null pointer reference. */
 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = {
        .validate = flow_null_validate,
@@ -2875,6 +3060,7 @@ const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = {
        .remove = flow_null_remove,
        .destroy = flow_null_destroy,
        .query = flow_null_query,
+       .sync_domain = flow_null_sync_domain,
 };
 
 /**
@@ -3444,6 +3630,7 @@ flow_check_hairpin_split(struct rte_eth_dev *dev,
        const struct rte_flow_action_queue *queue;
        const struct rte_flow_action_rss *rss;
        const struct rte_flow_action_raw_encap *raw_encap;
+       const struct rte_eth_hairpin_conf *conf;
 
        if (!attr->ingress)
                return 0;
@@ -3453,8 +3640,8 @@ flow_check_hairpin_split(struct rte_eth_dev *dev,
                        queue = actions->conf;
                        if (queue == NULL)
                                return 0;
-                       if (mlx5_rxq_get_type(dev, queue->index) !=
-                           MLX5_RXQ_TYPE_HAIRPIN)
+                       conf = mlx5_rxq_get_hairpin_conf(dev, queue->index);
+                       if (conf != NULL && !!conf->tx_explicit)
                                return 0;
                        queue_action = 1;
                        action_n++;
@@ -3463,8 +3650,8 @@ flow_check_hairpin_split(struct rte_eth_dev *dev,
                        rss = actions->conf;
                        if (rss == NULL || rss->queue_num == 0)
                                return 0;
-                       if (mlx5_rxq_get_type(dev, rss->queue[0]) !=
-                           MLX5_RXQ_TYPE_HAIRPIN)
+                       conf = mlx5_rxq_get_hairpin_conf(dev, rss->queue[0]);
+                       if (conf != NULL && !!conf->tx_explicit)
                                return 0;
                        queue_action = 1;
                        action_n++;
@@ -4065,6 +4252,142 @@ flow_hairpin_split(struct rte_eth_dev *dev,
        return 0;
 }
 
+__extension__
+union tunnel_offload_mark {
+       uint32_t val;
+       struct {
+               uint32_t app_reserve:8;
+               uint32_t table_id:15;
+               uint32_t transfer:1;
+               uint32_t _unused_:8;
+       };
+};
+
+struct tunnel_default_miss_ctx {
+       uint16_t *queue;
+       __extension__
+       union {
+               struct rte_flow_action_rss action_rss;
+               struct rte_flow_action_queue miss_queue;
+               struct rte_flow_action_jump miss_jump;
+               uint8_t raw[0];
+       };
+};
+
+static int
+flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
+                            struct rte_flow *flow,
+                            const struct rte_flow_attr *attr,
+                            const struct rte_flow_action *app_actions,
+                            uint32_t flow_idx,
+                            struct tunnel_default_miss_ctx *ctx,
+                            struct rte_flow_error *error)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_flow *dev_flow;
+       struct rte_flow_attr miss_attr = *attr;
+       const struct mlx5_flow_tunnel *tunnel = app_actions[0].conf;
+       const struct rte_flow_item miss_items[2] = {
+               {
+                       .type = RTE_FLOW_ITEM_TYPE_ETH,
+                       .spec = NULL,
+                       .last = NULL,
+                       .mask = NULL
+               },
+               {
+                       .type = RTE_FLOW_ITEM_TYPE_END,
+                       .spec = NULL,
+                       .last = NULL,
+                       .mask = NULL
+               }
+       };
+       union tunnel_offload_mark mark_id;
+       struct rte_flow_action_mark miss_mark;
+       struct rte_flow_action miss_actions[3] = {
+               [0] = { .type = RTE_FLOW_ACTION_TYPE_MARK, .conf = &miss_mark },
+               [2] = { .type = RTE_FLOW_ACTION_TYPE_END,  .conf = NULL }
+       };
+       const struct rte_flow_action_jump *jump_data;
+       uint32_t i, flow_table = 0; /* prevent compilation warning */
+       struct flow_grp_info grp_info = {
+               .external = 1,
+               .transfer = attr->transfer,
+               .fdb_def_rule = !!priv->fdb_def_rule,
+               .std_tbl_fix = 0,
+       };
+       int ret;
+
+       if (!attr->transfer) {
+               uint32_t q_size;
+
+               miss_actions[1].type = RTE_FLOW_ACTION_TYPE_RSS;
+               q_size = priv->reta_idx_n * sizeof(ctx->queue[0]);
+               ctx->queue = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, q_size,
+                                        0, SOCKET_ID_ANY);
+               if (!ctx->queue)
+                       return rte_flow_error_set
+                               (error, ENOMEM,
+                               RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                               NULL, "invalid default miss RSS");
+               ctx->action_rss.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
+               ctx->action_rss.level = 0,
+               ctx->action_rss.types = priv->rss_conf.rss_hf,
+               ctx->action_rss.key_len = priv->rss_conf.rss_key_len,
+               ctx->action_rss.queue_num = priv->reta_idx_n,
+               ctx->action_rss.key = priv->rss_conf.rss_key,
+               ctx->action_rss.queue = ctx->queue;
+               if (!priv->reta_idx_n || !priv->rxqs_n)
+                       return rte_flow_error_set
+                               (error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                               NULL, "invalid port configuration");
+               if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+                       ctx->action_rss.types = 0;
+               for (i = 0; i != priv->reta_idx_n; ++i)
+                       ctx->queue[i] = (*priv->reta_idx)[i];
+       } else {
+               miss_actions[1].type = RTE_FLOW_ACTION_TYPE_JUMP;
+               ctx->miss_jump.group = MLX5_TNL_MISS_FDB_JUMP_GRP;
+       }
+       miss_actions[1].conf = (typeof(miss_actions[1].conf))ctx->raw;
+       for (; app_actions->type != RTE_FLOW_ACTION_TYPE_JUMP; app_actions++);
+       jump_data = app_actions->conf;
+       miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY;
+       miss_attr.group = jump_data->group;
+       ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group,
+                                      &flow_table, grp_info, error);
+       if (ret)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                                         NULL, "invalid tunnel id");
+       mark_id.app_reserve = 0;
+       mark_id.table_id = tunnel_flow_tbl_to_id(flow_table);
+       mark_id.transfer = !!attr->transfer;
+       mark_id._unused_ = 0;
+       miss_mark.id = mark_id.val;
+       dev_flow = flow_drv_prepare(dev, flow, &miss_attr,
+                                   miss_items, miss_actions, flow_idx, error);
+       if (!dev_flow)
+               return -rte_errno;
+       dev_flow->flow = flow;
+       dev_flow->external = true;
+       dev_flow->tunnel = tunnel;
+       /* Subflow object was created, we must include one in the list. */
+       SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
+                     dev_flow->handle, next);
+       DRV_LOG(DEBUG,
+               "port %u tunnel type=%d id=%u miss rule priority=%u group=%u",
+               dev->data->port_id, tunnel->app_tunnel.type,
+               tunnel->tunnel_id, miss_attr.priority, miss_attr.group);
+       ret = flow_drv_translate(dev, dev_flow, &miss_attr, miss_items,
+                                 miss_actions, error);
+       if (!ret)
+               ret = flow_mreg_update_copy_table(dev, flow, miss_actions,
+                                                 error);
+
+       return ret;
+}
+
 /**
  * The last stage of splitting chain, just creates the subflow
  * without any modification.
@@ -5187,6 +5510,27 @@ flow_create_split_outer(struct rte_eth_dev *dev,
        return ret;
 }
 
+static struct mlx5_flow_tunnel *
+flow_tunnel_from_rule(struct rte_eth_dev *dev,
+                     const struct rte_flow_attr *attr,
+                     const struct rte_flow_item items[],
+                     const struct rte_flow_action actions[])
+{
+       struct mlx5_flow_tunnel *tunnel;
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+       if (is_flow_tunnel_match_rule(dev, attr, items, actions))
+               tunnel = (struct mlx5_flow_tunnel *)items[0].spec;
+       else if (is_flow_tunnel_steer_rule(dev, attr, items, actions))
+               tunnel = (struct mlx5_flow_tunnel *)actions[0].conf;
+       else
+               tunnel = NULL;
+#pragma GCC diagnostic pop
+
+       return tunnel;
+}
+
 /**
  * Create a flow and add it to @p list.
  *
@@ -5253,6 +5597,8 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
        struct rte_flow_attr attr_factor = {0};
        const struct rte_flow_action *actions;
        struct rte_flow_action *translated_actions = NULL;
+       struct mlx5_flow_tunnel *tunnel;
+       struct tunnel_default_miss_ctx default_miss_ctx = { 0, };
        int ret = flow_shared_actions_translate(original_actions,
                                                shared_actions,
                                                &shared_actions_n,
@@ -5264,8 +5610,6 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
        }
        actions = translated_actions ? translated_actions : original_actions;
        memcpy((void *)&attr_factor, (const void *)attr, sizeof(*attr));
-       if (external)
-               attr_factor.group *= MLX5_FLOW_TABLE_FACTOR;
        p_actions_rx = actions;
        hairpin_flow = flow_check_hairpin_split(dev, &attr_factor, actions);
        ret = flow_drv_validate(dev, &attr_factor, items, p_actions_rx,
@@ -5340,6 +5684,19 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
                                              error);
                if (ret < 0)
                        goto error;
+               if (is_flow_tunnel_steer_rule(dev, attr,
+                                             buf->entry[i].pattern,
+                                             p_actions_rx)) {
+                       ret = flow_tunnel_add_default_miss(dev, flow, attr,
+                                                          p_actions_rx,
+                                                          idx,
+                                                          &default_miss_ctx,
+                                                          error);
+                       if (ret < 0) {
+                               mlx5_free(default_miss_ctx.queue);
+                               goto error;
+                       }
+               }
        }
        /* Create the tx flow. */
        if (hairpin_flow) {
@@ -5395,6 +5752,13 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
        priv->flow_idx = priv->flow_nested_idx;
        if (priv->flow_nested_idx)
                priv->flow_nested_idx = 0;
+       tunnel = flow_tunnel_from_rule(dev, attr, items, actions);
+       if (tunnel) {
+               flow->tunnel = 1;
+               flow->tunnel_id = tunnel->tunnel_id;
+               __atomic_add_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED);
+               mlx5_free(default_miss_ctx.queue);
+       }
        return idx;
 error:
        MLX5_ASSERT(flow);
@@ -5530,6 +5894,7 @@ mlx5_flow_create(struct rte_eth_dev *dev,
                                   "port not started");
                return NULL;
        }
+
        return (void *)(uintptr_t)flow_list_create(dev, &priv->flows,
                                  attr, items, actions, true, error);
 }
@@ -5584,6 +5949,13 @@ flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
                }
        }
        mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx);
+       if (flow->tunnel) {
+               struct mlx5_flow_tunnel *tunnel;
+               tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id);
+               RTE_VERIFY(tunnel);
+               if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))
+                       mlx5_flow_tunnel_free(dev, tunnel);
+       }
 }
 
 /**
@@ -7118,19 +7490,122 @@ mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,
        sh->cmng.pending_queries--;
 }
 
+static const struct mlx5_flow_tbl_data_entry  *
+tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_dev_ctx_shared *sh = priv->sh;
+       struct mlx5_hlist_entry *he;
+       union tunnel_offload_mark mbits = { .val = mark };
+       union mlx5_flow_tbl_key table_key = {
+               {
+                       .table_id = tunnel_id_to_flow_tbl(mbits.table_id),
+                       .reserved = 0,
+                       .domain = !!mbits.transfer,
+                       .direction = 0,
+               }
+       };
+       he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
+       return he ?
+              container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL;
+}
+
+static uint32_t
+tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
+                               const struct mlx5_flow_tunnel *tunnel,
+                               uint32_t group, uint32_t *table,
+                               struct rte_flow_error *error)
+{
+       struct mlx5_hlist_entry *he;
+       struct tunnel_tbl_entry *tte;
+       union tunnel_tbl_key key = {
+               .tunnel_id = tunnel ? tunnel->tunnel_id : 0,
+               .group = group
+       };
+       struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+       struct mlx5_hlist *group_hash;
+
+       group_hash = tunnel ? tunnel->groups : thub->groups;
+       he = mlx5_hlist_lookup(group_hash, key.val);
+       if (!he) {
+               int ret;
+               tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO,
+                                 sizeof(*tte), 0,
+                                 SOCKET_ID_ANY);
+               if (!tte)
+                       goto err;
+               tte->hash.key = key.val;
+               ret = mlx5_flow_id_get(thub->table_ids, &tte->flow_table);
+               if (ret) {
+                       mlx5_free(tte);
+                       goto err;
+               }
+               tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table);
+               mlx5_hlist_insert(group_hash, &tte->hash);
+       } else {
+               tte = container_of(he, typeof(*tte), hash);
+       }
+       *table = tte->flow_table;
+       DRV_LOG(DEBUG, "port %u tunnel %u group=%#x table=%#x",
+               dev->data->port_id, key.tunnel_id, group, *table);
+       return 0;
+
+err:
+       return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+                                 NULL, "tunnel group index not supported");
+}
+
+static int
+flow_group_to_table(uint32_t port_id, uint32_t group, uint32_t *table,
+                   struct flow_grp_info grp_info, struct rte_flow_error *error)
+{
+       if (grp_info.transfer && grp_info.external && grp_info.fdb_def_rule) {
+               if (group == UINT32_MAX)
+                       return rte_flow_error_set
+                                               (error, EINVAL,
+                                                RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+                                                NULL,
+                                                "group index not supported");
+               *table = group + 1;
+       } else {
+               *table = group;
+       }
+       DRV_LOG(DEBUG, "port %u group=%#x table=%#x", port_id, group, *table);
+       return 0;
+}
+
 /**
  * Translate the rte_flow group index to HW table value.
  *
- * @param[in] attributes
- *   Pointer to flow attributes
- * @param[in] external
- *   Value is part of flow rule created by request external to PMD.
+ * If tunnel offload is disabled, all group ids converted to flow table
+ * id using the standard method.
+ * If tunnel offload is enabled, group id can be converted using the
+ * standard or tunnel conversion method. Group conversion method
+ * selection depends on flags in `grp_info` parameter:
+ * - Internal (grp_info.external == 0) groups conversion uses the
+ *   standard method.
+ * - Group ids in JUMP action converted with the tunnel conversion.
+ * - Group id in rule attribute conversion depends on a rule type and
+ *   group id value:
+ *   ** non zero group attributes converted with the tunnel method
+ *   ** zero group attribute in non-tunnel rule is converted using the
+ *      standard method - there's only one root table
+ *   ** zero group attribute in steer tunnel rule is converted with the
+ *      standard method - single root table
+ *   ** zero group attribute in match tunnel rule is a special OvS
+ *      case: that value is used for portability reasons. That group
+ *      id is converted with the tunnel conversion method.
+ *
+ * @param[in] dev
+ *   Port device
+ * @param[in] tunnel
+ *   PMD tunnel offload object
  * @param[in] group
  *   rte_flow group index value.
- * @param[out] fdb_def_rule
- *   Whether fdb jump to table 1 is configured.
  * @param[out] table
  *   HW table value.
+ * @param[in] grp_info
+ *   flags used for conversion
  * @param[out] error
  *   Pointer to error structure.
  *
@@ -7138,22 +7613,36 @@ mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-mlx5_flow_group_to_table(const struct rte_flow_attr *attributes, bool external,
-                        uint32_t group, bool fdb_def_rule, uint32_t *table,
+mlx5_flow_group_to_table(struct rte_eth_dev *dev,
+                        const struct mlx5_flow_tunnel *tunnel,
+                        uint32_t group, uint32_t *table,
+                        struct flow_grp_info grp_info,
                         struct rte_flow_error *error)
 {
-       if (attributes->transfer && external && fdb_def_rule) {
-               if (group == UINT32_MAX)
-                       return rte_flow_error_set
-                                               (error, EINVAL,
-                                                RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
-                                                NULL,
-                                                "group index not supported");
-               *table = group + 1;
+       int ret;
+       bool standard_translation;
+
+       if (grp_info.external && group < MLX5_MAX_TABLES_EXTERNAL)
+               group *= MLX5_FLOW_TABLE_FACTOR;
+       if (is_tunnel_offload_active(dev)) {
+               standard_translation = !grp_info.external ||
+                                       grp_info.std_tbl_fix;
        } else {
-               *table = group;
+               standard_translation = true;
        }
-       return 0;
+       DRV_LOG(DEBUG,
+               "port %u group=%#x transfer=%d external=%d fdb_def_rule=%d translate=%s",
+               dev->data->port_id, group, grp_info.transfer,
+               grp_info.external, grp_info.fdb_def_rule,
+               standard_translation ? "STANDARD" : "TUNNEL");
+       if (standard_translation)
+               ret = flow_group_to_table(dev->data->port_id, group, table,
+                                         grp_info, error);
+       else
+               ret = tunnel_flow_group_to_flow_table(dev, tunnel, group,
+                                                     table, error);
+
+       return ret;
 }
 
 /**
@@ -7524,3 +8013,189 @@ mlx5_shared_action_flush(struct rte_eth_dev *dev)
        }
        return ret;
 }
+
+static void
+mlx5_flow_tunnel_free(struct rte_eth_dev *dev,
+                     struct mlx5_flow_tunnel *tunnel)
+{
+       struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+       struct mlx5_flow_id_pool *id_pool = thub->tunnel_ids;
+
+       DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x",
+               dev->data->port_id, tunnel->tunnel_id);
+       RTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED));
+       LIST_REMOVE(tunnel, chain);
+       mlx5_flow_id_release(id_pool, tunnel->tunnel_id);
+       mlx5_hlist_destroy(tunnel->groups, NULL, NULL);
+       mlx5_free(tunnel);
+}
+
+static struct mlx5_flow_tunnel *
+mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)
+{
+       struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+       struct mlx5_flow_tunnel *tun;
+
+       LIST_FOREACH(tun, &thub->tunnels, chain) {
+               if (tun->tunnel_id == id)
+                       break;
+       }
+
+       return tun;
+}
+
+static struct mlx5_flow_tunnel *
+mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
+                         const struct rte_flow_tunnel *app_tunnel)
+{
+       int ret;
+       struct mlx5_flow_tunnel *tunnel;
+       struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+       struct mlx5_flow_id_pool *id_pool = thub->tunnel_ids;
+       uint32_t id;
+
+       ret = mlx5_flow_id_get(id_pool, &id);
+       if (ret)
+               return NULL;
+       /**
+        * mlx5 flow tunnel is an auxlilary data structure
+        * It's not part of IO. No need to allocate it from
+        * huge pages pools dedicated for IO
+        */
+       tunnel = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*tunnel),
+                            0, SOCKET_ID_ANY);
+       if (!tunnel) {
+               mlx5_flow_id_pool_release(id_pool);
+               return NULL;
+       }
+       tunnel->groups = mlx5_hlist_create("tunnel groups", 1024);
+       if (!tunnel->groups) {
+               mlx5_flow_id_pool_release(id_pool);
+               mlx5_free(tunnel);
+               return NULL;
+       }
+       /* initiate new PMD tunnel */
+       memcpy(&tunnel->app_tunnel, app_tunnel, sizeof(*app_tunnel));
+       tunnel->tunnel_id = id;
+       tunnel->action.type = (typeof(tunnel->action.type))
+                             MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET;
+       tunnel->action.conf = tunnel;
+       tunnel->item.type = (typeof(tunnel->item.type))
+                           MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL;
+       tunnel->item.spec = tunnel;
+       tunnel->item.last = NULL;
+       tunnel->item.mask = NULL;
+
+       DRV_LOG(DEBUG, "port %u new pmd tunnel id=0x%x",
+               dev->data->port_id, tunnel->tunnel_id);
+
+       return tunnel;
+}
+
+static int
+mlx5_get_flow_tunnel(struct rte_eth_dev *dev,
+                    const struct rte_flow_tunnel *app_tunnel,
+                    struct mlx5_flow_tunnel **tunnel)
+{
+       int ret;
+       struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+       struct mlx5_flow_tunnel *tun;
+
+       LIST_FOREACH(tun, &thub->tunnels, chain) {
+               if (!memcmp(app_tunnel, &tun->app_tunnel,
+                           sizeof(*app_tunnel))) {
+                       *tunnel = tun;
+                       ret = 0;
+                       break;
+               }
+       }
+       if (!tun) {
+               tun = mlx5_flow_tunnel_allocate(dev, app_tunnel);
+               if (tun) {
+                       LIST_INSERT_HEAD(&thub->tunnels, tun, chain);
+                       *tunnel = tun;
+               } else {
+                       ret = -ENOMEM;
+               }
+       }
+       if (tun)
+               __atomic_add_fetch(&tun->refctn, 1, __ATOMIC_RELAXED);
+
+       return ret;
+}
+
+void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id)
+{
+       struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
+
+       if (!thub)
+               return;
+       if (!LIST_EMPTY(&thub->tunnels))
+               DRV_LOG(WARNING, "port %u tunnels present\n", port_id);
+       mlx5_flow_id_pool_release(thub->tunnel_ids);
+       mlx5_flow_id_pool_release(thub->table_ids);
+       mlx5_hlist_destroy(thub->groups, NULL, NULL);
+       mlx5_free(thub);
+}
+
+int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh)
+{
+       int err;
+       struct mlx5_flow_tunnel_hub *thub;
+
+       thub = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*thub),
+                          0, SOCKET_ID_ANY);
+       if (!thub)
+               return -ENOMEM;
+       LIST_INIT(&thub->tunnels);
+       thub->tunnel_ids = mlx5_flow_id_pool_alloc(MLX5_MAX_TUNNELS);
+       if (!thub->tunnel_ids) {
+               err = -rte_errno;
+               goto err;
+       }
+       thub->table_ids = mlx5_flow_id_pool_alloc(MLX5_MAX_TABLES);
+       if (!thub->table_ids) {
+               err = -rte_errno;
+               goto err;
+       }
+       thub->groups = mlx5_hlist_create("flow groups", MLX5_MAX_TABLES);
+       if (!thub->groups) {
+               err = -rte_errno;
+               goto err;
+       }
+       sh->tunnel_hub = thub;
+
+       return 0;
+
+err:
+       if (thub->groups)
+               mlx5_hlist_destroy(thub->groups, NULL, NULL);
+       if (thub->table_ids)
+               mlx5_flow_id_pool_release(thub->table_ids);
+       if (thub->tunnel_ids)
+               mlx5_flow_id_pool_release(thub->tunnel_ids);
+       if (thub)
+               mlx5_free(thub);
+       return err;
+}
+
+#ifndef HAVE_MLX5DV_DR
+#define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1))
+#else
+#define MLX5_DOMAIN_SYNC_FLOW \
+       (MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW | MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW)
+#endif
+
+int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains)
+{
+       struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+       const struct mlx5_flow_driver_ops *fops;
+       int ret;
+       struct rte_flow_attr attr = { .transfer = 0 };
+
+       fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr));
+       ret = fops->sync_domain(dev, domains, MLX5_DOMAIN_SYNC_FLOW);
+       if (ret > 0)
+               ret = -ret;
+       return ret;
+}