X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow.c;h=0197a07209ffbcf1c083de725e2aa66761422ec8;hb=641dbe4fb053cbf300bceb991afaee5207ad90d9;hp=b08ee30f17f7c292dc2c1e34ef315cd5d938d4bb;hpb=4a42ac1f1ccda89450c0bbb8ee5816246033b785;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index b08ee30f17..0197a07209 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -12,7 +12,7 @@ #include #include -#include +#include #include #include #include @@ -33,16 +33,37 @@ #include "mlx5_common_os.h" #include "rte_pmd_mlx5.h" +struct tunnel_default_miss_ctx { + uint16_t *queue; + __extension__ + union { + struct rte_flow_action_rss action_rss; + struct rte_flow_action_queue miss_queue; + struct rte_flow_action_jump miss_jump; + uint8_t raw[0]; + }; +}; + +static int +flow_tunnel_add_default_miss(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_attr *attr, + const struct rte_flow_action *app_actions, + uint32_t flow_idx, + struct tunnel_default_miss_ctx *ctx, + struct rte_flow_error *error); static struct mlx5_flow_tunnel * mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id); static void mlx5_flow_tunnel_free(struct rte_eth_dev *dev, struct mlx5_flow_tunnel *tunnel); -static const struct mlx5_flow_tbl_data_entry * -tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark); -static int -mlx5_get_flow_tunnel(struct rte_eth_dev *dev, - const struct rte_flow_tunnel *app_tunnel, - struct mlx5_flow_tunnel **tunnel); +static uint32_t +tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev, + const struct mlx5_flow_tunnel *tunnel, + uint32_t group, uint32_t *table, + struct rte_flow_error *error); + +static struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void); +static void mlx5_flow_pop_thread_workspace(void); /** Device flow drivers. */ @@ -52,7 +73,7 @@ const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops; const struct mlx5_flow_driver_ops *flow_drv_ops[] = { [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops, -#ifdef HAVE_IBV_FLOW_DV_SUPPORT +#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops, #endif [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops, @@ -191,6 +212,8 @@ mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item) return ret; } +#define MLX5_RSS_EXP_ELT_N 8 + /** * Expand RSS flows into several possible flows according to the RSS hash * fields requested and the driver capabilities. @@ -221,13 +244,12 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, const struct mlx5_flow_expand_node graph[], int graph_root_index) { - const int elt_n = 8; const struct rte_flow_item *item; const struct mlx5_flow_expand_node *node = &graph[graph_root_index]; const int *next_node; - const int *stack[elt_n]; + const int *stack[MLX5_RSS_EXP_ELT_N]; int stack_pos = 0; - struct rte_flow_item flow_items[elt_n]; + struct rte_flow_item flow_items[MLX5_RSS_EXP_ELT_N]; unsigned int i; size_t lsize; size_t user_pattern_size = 0; @@ -240,10 +262,10 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, memset(&missed_item, 0, sizeof(missed_item)); lsize = offsetof(struct mlx5_flow_expand_rss, entry) + - elt_n * sizeof(buf->entry[0]); + MLX5_RSS_EXP_ELT_N * sizeof(buf->entry[0]); if (lsize <= size) { buf->entry[0].priority = 0; - buf->entry[0].pattern = (void *)&buf->entry[elt_n]; + buf->entry[0].pattern = (void *)&buf->entry[MLX5_RSS_EXP_ELT_N]; buf->entries = 0; addr = buf->entry[0].pattern; } @@ -346,7 +368,7 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, /* Go deeper. */ if (node->next) { next_node = node->next; - if (stack_pos++ == elt_n) { + if (stack_pos++ == MLX5_RSS_EXP_ELT_N) { rte_errno = E2BIG; return -rte_errno; } @@ -377,8 +399,6 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, user_pattern_size); addr = (void *)(((uintptr_t)addr) + user_pattern_size); rte_memcpy(addr, flow_items, elt * sizeof(*item)); - addr = (void *)(((uintptr_t)addr) + - elt * sizeof(*item)); } } return lsize; @@ -580,170 +600,32 @@ static int mlx5_shared_action_query const struct rte_flow_shared_action *action, void *data, struct rte_flow_error *error); -static inline bool -mlx5_flow_tunnel_validate(struct rte_eth_dev *dev, - struct rte_flow_tunnel *tunnel, - const char *err_msg) -{ - err_msg = NULL; - if (!is_tunnel_offload_active(dev)) { - err_msg = "tunnel offload was not activated"; - goto out; - } else if (!tunnel) { - err_msg = "no application tunnel"; - goto out; - } - - switch (tunnel->type) { - default: - err_msg = "unsupported tunnel type"; - goto out; - case RTE_FLOW_ITEM_TYPE_VXLAN: - break; - } - -out: - return !err_msg; -} - - static int mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev, struct rte_flow_tunnel *app_tunnel, struct rte_flow_action **actions, uint32_t *num_of_actions, - struct rte_flow_error *error) -{ - int ret; - struct mlx5_flow_tunnel *tunnel; - const char *err_msg = NULL; - bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg); - - if (!verdict) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, - err_msg); - ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel); - if (ret < 0) { - return rte_flow_error_set(error, ret, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, - "failed to initialize pmd tunnel"); - } - *actions = &tunnel->action; - *num_of_actions = 1; - return 0; -} - + struct rte_flow_error *error); static int mlx5_flow_tunnel_match(struct rte_eth_dev *dev, struct rte_flow_tunnel *app_tunnel, struct rte_flow_item **items, uint32_t *num_of_items, - struct rte_flow_error *error) -{ - int ret; - struct mlx5_flow_tunnel *tunnel; - const char *err_msg = NULL; - bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg); - - if (!verdict) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - err_msg); - ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel); - if (ret < 0) { - return rte_flow_error_set(error, ret, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "failed to initialize pmd tunnel"); - } - *items = &tunnel->item; - *num_of_items = 1; - return 0; -} - + struct rte_flow_error *error); static int -mlx5_flow_item_release(struct rte_eth_dev *dev, - struct rte_flow_item *pmd_items, - uint32_t num_items, struct rte_flow_error *err) -{ - struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); - struct mlx5_flow_tunnel *tun; - - rte_spinlock_lock(&thub->sl); - LIST_FOREACH(tun, &thub->tunnels, chain) { - if (&tun->item == pmd_items) { - LIST_REMOVE(tun, chain); - break; - } - } - rte_spinlock_unlock(&thub->sl); - if (!tun || num_items != 1) - return rte_flow_error_set(err, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "invalid argument"); - if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED)) - mlx5_flow_tunnel_free(dev, tun); - return 0; -} - +mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev, + struct rte_flow_item *pmd_items, + uint32_t num_items, struct rte_flow_error *err); static int -mlx5_flow_action_release(struct rte_eth_dev *dev, - struct rte_flow_action *pmd_actions, - uint32_t num_actions, struct rte_flow_error *err) -{ - struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); - struct mlx5_flow_tunnel *tun; - - rte_spinlock_lock(&thub->sl); - LIST_FOREACH(tun, &thub->tunnels, chain) { - if (&tun->action == pmd_actions) { - LIST_REMOVE(tun, chain); - break; - } - } - rte_spinlock_unlock(&thub->sl); - if (!tun || num_actions != 1) - return rte_flow_error_set(err, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "invalid argument"); - if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED)) - mlx5_flow_tunnel_free(dev, tun); - - return 0; -} - +mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev, + struct rte_flow_action *pmd_actions, + uint32_t num_actions, + struct rte_flow_error *err); static int mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev, struct rte_mbuf *m, struct rte_flow_restore_info *info, - struct rte_flow_error *err) -{ - uint64_t ol_flags = m->ol_flags; - const struct mlx5_flow_tbl_data_entry *tble; - const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID; - - if ((ol_flags & mask) != mask) - goto err; - tble = tunnel_mark_decode(dev, m->hash.fdir.hi); - if (!tble) { - DRV_LOG(DEBUG, "port %u invalid miss tunnel mark %#x", - dev->data->port_id, m->hash.fdir.hi); - goto err; - } - MLX5_ASSERT(tble->tunnel); - memcpy(&info->tunnel, &tble->tunnel->app_tunnel, sizeof(info->tunnel)); - info->group_id = tble->group_id; - info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL | - RTE_FLOW_RESTORE_INFO_GROUP_ID | - RTE_FLOW_RESTORE_INFO_ENCAPSULATED; - - return 0; - -err: - return rte_flow_error_set(err, EINVAL, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "failed to get restore info"); -} + struct rte_flow_error *err); static const struct rte_flow_ops mlx5_flow_ops = { .validate = mlx5_flow_validate, @@ -760,8 +642,8 @@ static const struct rte_flow_ops mlx5_flow_ops = { .shared_action_query = mlx5_shared_action_query, .tunnel_decap_set = mlx5_flow_tunnel_decap_set, .tunnel_match = mlx5_flow_tunnel_match, - .tunnel_action_decap_release = mlx5_flow_action_release, - .tunnel_item_release = mlx5_flow_item_release, + .tunnel_action_decap_release = mlx5_flow_tunnel_action_release, + .tunnel_item_release = mlx5_flow_tunnel_item_release, .get_restore_info = mlx5_flow_tunnel_get_restore_info, }; @@ -814,11 +696,6 @@ static struct mlx5_flow_tunnel_info tunnels_info[] = { }, }; -/* Key of thread specific flow workspace data. */ -static pthread_key_t key_workspace; - -/* Thread specific flow workspace data once initialization data. */ -static pthread_once_t key_workspace_init; /** @@ -896,6 +773,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev, return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : REG_C_3; case MLX5_MTR_COLOR: + case MLX5_ASO_FLOW_HIT: /* Both features use the same REG_C. */ MLX5_ASSERT(priv->mtr_color_reg != REG_NON); return priv->mtr_color_reg; case MLX5_COPY_MARK: @@ -915,7 +793,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev, start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_2 : (priv->mtr_reg_share ? REG_C_3 : REG_C_4); skip_mtr_reg = !!(priv->mtr_en && start_reg == REG_C_2); - if (id > (REG_C_7 - start_reg)) + if (id > (uint32_t)(REG_C_7 - start_reg)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "invalid tag id"); @@ -931,7 +809,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev, */ if (skip_mtr_reg && config->flow_mreg_c [id + start_reg - REG_C_0] >= priv->mtr_color_reg) { - if (id >= (REG_C_7 - start_reg)) + if (id >= (uint32_t)(REG_C_7 - start_reg)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "invalid tag id"); @@ -977,6 +855,58 @@ mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev) return config->flow_mreg_c[2] != REG_NON; } +/** + * Get the lowest priority. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] attributes + * Pointer to device flow rule attributes. + * + * @return + * The value of lowest priority of flow. + */ +uint32_t +mlx5_get_lowest_priority(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + if (!attr->group && !attr->transfer) + return priv->config.flow_prio - 2; + return MLX5_NON_ROOT_FLOW_MAX_PRIO - 1; +} + +/** + * Calculate matcher priority of the flow. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] attr + * Pointer to device flow rule attributes. + * @param[in] subpriority + * The priority based on the items. + * @return + * The matcher priority of the flow. + */ +uint16_t +mlx5_get_matcher_priority(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + uint32_t subpriority) +{ + uint16_t priority = (uint16_t)attr->priority; + struct mlx5_priv *priv = dev->data->dev_private; + + if (!attr->group && !attr->transfer) { + if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR) + priority = priv->config.flow_prio - 1; + return mlx5_os_flow_adjust_priority(dev, priority, subpriority); + } + if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR) + priority = MLX5_NON_ROOT_FLOW_MAX_PRIO; + return priority * 3 + subpriority; +} + /** * Verify the @p item specifications (spec, last, mask) are compatible with the * NIC capabilities. @@ -1119,17 +1049,29 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_priv *priv = dev->data->dev_private; const int mark = dev_handle->mark; const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL); - struct mlx5_hrxq *hrxq; + struct mlx5_ind_table_obj *ind_tbl = NULL; unsigned int i; - if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE) - return; - hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], + if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) { + struct mlx5_hrxq *hrxq; + + hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], dev_handle->rix_hrxq); - if (!hrxq) + if (hrxq) + ind_tbl = hrxq->ind_table; + } else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) { + struct mlx5_shared_action_rss *shared_rss; + + shared_rss = mlx5_ipool_get + (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], + dev_handle->rix_srss); + if (shared_rss) + ind_tbl = shared_rss->ind_tbl; + } + if (!ind_tbl) return; - for (i = 0; i != hrxq->ind_table->queues_n; ++i) { - int idx = hrxq->ind_table->queues[i]; + for (i = 0; i != ind_tbl->queues_n; ++i) { + int idx = ind_tbl->queues[i]; struct mlx5_rxq_ctrl *rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); @@ -1201,18 +1143,30 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_priv *priv = dev->data->dev_private; const int mark = dev_handle->mark; const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL); - struct mlx5_hrxq *hrxq; + struct mlx5_ind_table_obj *ind_tbl = NULL; unsigned int i; - if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE) - return; - hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], + if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) { + struct mlx5_hrxq *hrxq; + + hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], dev_handle->rix_hrxq); - if (!hrxq) + if (hrxq) + ind_tbl = hrxq->ind_table; + } else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) { + struct mlx5_shared_action_rss *shared_rss; + + shared_rss = mlx5_ipool_get + (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], + dev_handle->rix_srss); + if (shared_rss) + ind_tbl = shared_rss->ind_tbl; + } + if (!ind_tbl) return; MLX5_ASSERT(dev->data->dev_started); - for (i = 0; i != hrxq->ind_table->queues_n; ++i) { - int idx = hrxq->ind_table->queues[i]; + for (i = 0; i != ind_tbl->queues_n; ++i) { + int idx = ind_tbl->queues[i]; struct mlx5_rxq_ctrl *rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); @@ -1530,6 +1484,7 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev, { struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_action_rss *rss = action->conf; + enum mlx5_rxq_type rxq_type = MLX5_RXQ_TYPE_UNDEFINED; unsigned int i; if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT && @@ -1595,6 +1550,8 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, "No queues configured"); for (i = 0; i != rss->queue_num; ++i) { + struct mlx5_rxq_ctrl *rxq_ctrl; + if (rss->queue[i] >= priv->rxqs_n) return rte_flow_error_set (error, EINVAL, @@ -1604,6 +1561,15 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev, return rte_flow_error_set (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, &rss->queue[i], "queue is not configured"); + rxq_ctrl = container_of((*priv->rxqs)[rss->queue[i]], + struct mlx5_rxq_ctrl, rxq); + if (i == 0) + rxq_type = rxq_ctrl->type; + if (rxq_type != rxq_ctrl->type) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->queue[i], + "combining hairpin and regular RSS queues is not supported"); } return 0; } @@ -1755,7 +1721,7 @@ mlx5_flow_validate_attributes(struct rte_eth_dev *dev, return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL, "groups is not supported"); - if (attributes->priority != MLX5_FLOW_PRIO_RSVD && + if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR && attributes->priority >= priority_max) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, @@ -2708,6 +2674,149 @@ mlx5_flow_validate_item_geneve(const struct rte_flow_item *item, return 0; } +/** + * Validate Geneve TLV option item. + * + * @param[in] item + * Item specification. + * @param[in] last_item + * Previous validated item in the pattern items. + * @param[in] geneve_item + * Previous GENEVE item specification. + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item, + uint64_t last_item, + const struct rte_flow_item *geneve_item, + struct rte_eth_dev *dev, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_ctx_shared *sh = priv->sh; + struct mlx5_geneve_tlv_option_resource *geneve_opt_resource; + struct mlx5_hca_attr *hca_attr = &priv->config.hca_attr; + uint8_t data_max_supported = + hca_attr->max_geneve_tlv_option_data_len * 4; + struct mlx5_dev_config *config = &priv->config; + const struct rte_flow_item_geneve *geneve_spec; + const struct rte_flow_item_geneve *geneve_mask; + const struct rte_flow_item_geneve_opt *spec = item->spec; + const struct rte_flow_item_geneve_opt *mask = item->mask; + unsigned int i; + unsigned int data_len; + uint8_t tlv_option_len; + uint16_t optlen_m, optlen_v; + const struct rte_flow_item_geneve_opt full_mask = { + .option_class = RTE_BE16(0xffff), + .option_type = 0xff, + .option_len = 0x1f, + }; + + if (!mask) + mask = &rte_flow_item_geneve_opt_mask; + if (!spec) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, + "Geneve TLV opt class/type/length must be specified"); + if ((uint32_t)spec->option_len > MLX5_GENEVE_OPTLEN_MASK) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, + "Geneve TLV opt length exceeeds the limit (31)"); + /* Check if class type and length masks are full. */ + if (full_mask.option_class != mask->option_class || + full_mask.option_type != mask->option_type || + full_mask.option_len != (mask->option_len & full_mask.option_len)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, + "Geneve TLV opt class/type/length masks must be full"); + /* Check if length is supported */ + if ((uint32_t)spec->option_len > + config->hca_attr.max_geneve_tlv_option_data_len) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, + "Geneve TLV opt length not supported"); + if (config->hca_attr.max_geneve_tlv_options > 1) + DRV_LOG(DEBUG, + "max_geneve_tlv_options supports more than 1 option"); + /* Check GENEVE item preceding. */ + if (!geneve_item || !(last_item & MLX5_FLOW_LAYER_GENEVE)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, + "Geneve opt item must be preceded with Geneve item"); + geneve_spec = geneve_item->spec; + geneve_mask = geneve_item->mask ? geneve_item->mask : + &rte_flow_item_geneve_mask; + /* Check if GENEVE TLV option size doesn't exceed option length */ + if (geneve_spec && (geneve_mask->ver_opt_len_o_c_rsvd0 || + geneve_spec->ver_opt_len_o_c_rsvd0)) { + tlv_option_len = spec->option_len & mask->option_len; + optlen_v = rte_be_to_cpu_16(geneve_spec->ver_opt_len_o_c_rsvd0); + optlen_v = MLX5_GENEVE_OPTLEN_VAL(optlen_v); + optlen_m = rte_be_to_cpu_16(geneve_mask->ver_opt_len_o_c_rsvd0); + optlen_m = MLX5_GENEVE_OPTLEN_VAL(optlen_m); + if ((optlen_v & optlen_m) <= tlv_option_len) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, + "GENEVE TLV option length exceeds optlen"); + } + /* Check if length is 0 or data is 0. */ + if (spec->data == NULL || spec->option_len == 0) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, + "Geneve TLV opt with zero data/length not supported"); + /* Check not all data & mask are 0. */ + data_len = spec->option_len * 4; + if (mask->data == NULL) { + for (i = 0; i < data_len; i++) + if (spec->data[i]) + break; + if (i == data_len) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Can't match on Geneve option data 0"); + } else { + for (i = 0; i < data_len; i++) + if (spec->data[i] & mask->data[i]) + break; + if (i == data_len) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Can't match on Geneve option data and mask 0"); + /* Check data mask supported. */ + for (i = data_max_supported; i < data_len ; i++) + if (mask->data[i]) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Data mask is of unsupported size"); + } + /* Check GENEVE option is supported in NIC. */ + if (!config->hca_attr.geneve_tlv_opt) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, + "Geneve TLV opt not supported"); + /* Check if we already have geneve option with different type/class. */ + rte_spinlock_lock(&sh->geneve_tlv_opt_sl); + geneve_opt_resource = sh->geneve_tlv_option_resource; + if (geneve_opt_resource != NULL) + if (geneve_opt_resource->option_class != spec->option_class || + geneve_opt_resource->option_type != spec->option_type || + geneve_opt_resource->length != spec->option_len) { + rte_spinlock_unlock(&sh->geneve_tlv_opt_sl); + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Only one Geneve TLV option supported"); + } + rte_spinlock_unlock(&sh->geneve_tlv_opt_sl); + return 0; +} + /** * Validate MPLS item. * @@ -2746,7 +2855,8 @@ mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused, /* MPLS over IP, UDP, GRE is allowed */ if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 | MLX5_FLOW_LAYER_OUTER_L4_UDP | - MLX5_FLOW_LAYER_GRE))) + MLX5_FLOW_LAYER_GRE | + MLX5_FLOW_LAYER_GRE_KEY))) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "protocol filtering not compatible" @@ -2870,17 +2980,20 @@ mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item, MLX5_FLOW_LAYER_OUTER_VLAN); struct rte_flow_item_ecpri mask_lo; + if (!(last_item & outer_l2_vlan) && + last_item != MLX5_FLOW_LAYER_OUTER_L4_UDP) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "eCPRI can only follow L2/VLAN layer or UDP layer"); if ((last_item & outer_l2_vlan) && ether_type && ether_type != RTE_ETHER_TYPE_ECPRI) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, - "eCPRI cannot follow L2/VLAN layer " - "which ether type is not 0xAEFE."); + "eCPRI cannot follow L2/VLAN layer which ether type is not 0xAEFE"); if (item_flags & MLX5_FLOW_LAYER_TUNNEL) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, - "eCPRI with tunnel is not supported " - "right now."); + "eCPRI with tunnel is not supported right now"); if (item_flags & MLX5_FLOW_LAYER_OUTER_L3) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -2888,13 +3001,12 @@ mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item, else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, - "eCPRI cannot follow a TCP layer."); + "eCPRI cannot coexist with a TCP layer"); /* In specification, eCPRI could be over UDP layer. */ else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, - "eCPRI over UDP layer is not yet " - "supported right now."); + "eCPRI over UDP layer is not yet supported right now"); /* Mask for type field in common header could be zero. */ if (!mask) mask = &rte_flow_item_ecpri_mask; @@ -2903,13 +3015,11 @@ mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item, if (mask_lo.hdr.common.type != 0 && mask_lo.hdr.common.type != 0xff) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, - "partial mask is not supported " - "for protocol"); + "partial mask is not supported for protocol"); else if (mask_lo.hdr.common.type == 0 && mask->hdr.dummy[0] != 0) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, - "message header mask must be after " - "a type mask"); + "message header mask must be after a type mask"); return mlx5_flow_item_acceptable(item, (const uint8_t *)mask, acc_mask ? (const uint8_t *)acc_mask : (const uint8_t *)&nic_mask, @@ -3254,20 +3364,55 @@ flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) static const struct rte_flow_action_rss* flow_get_rss_action(const struct rte_flow_action actions[]) { + const struct rte_flow_action_rss *rss = NULL; + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { switch (actions->type) { case RTE_FLOW_ACTION_TYPE_RSS: - return (const struct rte_flow_action_rss *) - actions->conf; + rss = actions->conf; + break; + case RTE_FLOW_ACTION_TYPE_SAMPLE: + { + const struct rte_flow_action_sample *sample = + actions->conf; + const struct rte_flow_action *act = sample->actions; + for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) + if (act->type == RTE_FLOW_ACTION_TYPE_RSS) + rss = act->conf; + break; + } default: break; } } - return NULL; + return rss; } -/* maps shared action to translated non shared in some actions array */ -struct mlx5_translated_shared_action { +/** + * Get ASO age action by index. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] age_idx + * Index to the ASO age action. + * + * @return + * The specified ASO age action. + */ +struct mlx5_aso_age_action* +flow_aso_age_get_by_idx(struct rte_eth_dev *dev, uint32_t age_idx) +{ + uint16_t pool_idx = age_idx & UINT16_MAX; + uint16_t offset = (age_idx >> 16) & UINT16_MAX; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng; + struct mlx5_aso_age_pool *pool = mng->pools[pool_idx]; + + return &pool->actions[offset - 1]; +} + +/* maps shared action to translated non shared in some actions array */ +struct mlx5_translated_shared_action { struct rte_flow_shared_action *action; /**< Shared action */ int index; /**< Index in related array of rte_flow_action */ }; @@ -3353,6 +3498,16 @@ flow_shared_actions_translate(struct rte_eth_dev *dev, translated[shared->index].conf = &shared_rss->origin; break; + case MLX5_SHARED_ACTION_TYPE_AGE: + if (priv->sh->flow_hit_aso_en) { + translated[shared->index].type = + (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_AGE; + translated[shared->index].conf = + (void *)(uintptr_t)idx; + break; + } + /* Fall-through */ default: mlx5_free(translated); return rte_flow_error_set @@ -3595,7 +3750,7 @@ flow_check_hairpin_split(struct rte_eth_dev *dev, if (queue == NULL) return 0; conf = mlx5_rxq_get_hairpin_conf(dev, queue->index); - if (conf != NULL && !!conf->tx_explicit) + if (conf == NULL || conf->tx_explicit != 0) return 0; queue_action = 1; action_n++; @@ -3605,7 +3760,7 @@ flow_check_hairpin_split(struct rte_eth_dev *dev, if (rss == NULL || rss->queue_num == 0) return 0; conf = mlx5_rxq_get_hairpin_conf(dev, rss->queue[0]); - if (conf != NULL && !!conf->tx_explicit) + if (conf == NULL || conf->tx_explicit != 0) return 0; queue_action = 1; action_n++; @@ -3620,9 +3775,7 @@ flow_check_hairpin_split(struct rte_eth_dev *dev, break; case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: raw_encap = actions->conf; - if (raw_encap->size > - (sizeof(struct rte_flow_item_eth) + - sizeof(struct rte_flow_item_ipv4))) + if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) split++; action_n++; break; @@ -3648,6 +3801,17 @@ static void flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list, uint32_t flow_idx); +int +flow_dv_mreg_match_cb(struct mlx5_hlist *list __rte_unused, + struct mlx5_hlist_entry *entry, + uint64_t key, void *cb_ctx __rte_unused) +{ + struct mlx5_flow_mreg_copy_resource *mcp_res = + container_of(entry, typeof(*mcp_res), hlist_ent); + + return mcp_res->mark_id != key; +} + struct mlx5_hlist_entry * flow_dv_mreg_create_cb(struct mlx5_hlist *list, uint64_t key, void *cb_ctx) @@ -3725,7 +3889,7 @@ flow_dv_mreg_create_cb(struct mlx5_hlist *list, uint64_t key, }; } else { /* Default rule, wildcard match. */ - attr.priority = MLX5_FLOW_PRIO_RSVD; + attr.priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR; items[0] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END, }; @@ -3749,6 +3913,7 @@ flow_dv_mreg_create_cb(struct mlx5_hlist *list, uint64_t key, return NULL; } mcp_res->idx = idx; + mcp_res->mark_id = mark_id; /* * The copy Flows are not included in any list. There * ones are referenced from other Flows and can not @@ -4057,9 +4222,7 @@ flow_hairpin_split(struct rte_eth_dev *dev, break; case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: raw_encap = actions->conf; - if (raw_encap->size > - (sizeof(struct rte_flow_item_eth) + - sizeof(struct rte_flow_item_ipv4))) { + if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) { memcpy(actions_tx, actions, sizeof(struct rte_flow_action)); actions_tx++; @@ -4072,9 +4235,7 @@ flow_hairpin_split(struct rte_eth_dev *dev, break; case RTE_FLOW_ACTION_TYPE_RAW_DECAP: raw_decap = actions->conf; - if (raw_decap->size < - (sizeof(struct rte_flow_item_eth) + - sizeof(struct rte_flow_item_ipv4))) { + if (raw_decap->size < MLX5_ENCAPSULATION_DECISION_SIZE) { memcpy(actions_tx, actions, sizeof(struct rte_flow_action)); actions_tx++; @@ -4125,142 +4286,6 @@ flow_hairpin_split(struct rte_eth_dev *dev, return 0; } -__extension__ -union tunnel_offload_mark { - uint32_t val; - struct { - uint32_t app_reserve:8; - uint32_t table_id:15; - uint32_t transfer:1; - uint32_t _unused_:8; - }; -}; - -struct tunnel_default_miss_ctx { - uint16_t *queue; - __extension__ - union { - struct rte_flow_action_rss action_rss; - struct rte_flow_action_queue miss_queue; - struct rte_flow_action_jump miss_jump; - uint8_t raw[0]; - }; -}; - -static int -flow_tunnel_add_default_miss(struct rte_eth_dev *dev, - struct rte_flow *flow, - const struct rte_flow_attr *attr, - const struct rte_flow_action *app_actions, - uint32_t flow_idx, - struct tunnel_default_miss_ctx *ctx, - struct rte_flow_error *error) -{ - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_flow *dev_flow; - struct rte_flow_attr miss_attr = *attr; - const struct mlx5_flow_tunnel *tunnel = app_actions[0].conf; - const struct rte_flow_item miss_items[2] = { - { - .type = RTE_FLOW_ITEM_TYPE_ETH, - .spec = NULL, - .last = NULL, - .mask = NULL - }, - { - .type = RTE_FLOW_ITEM_TYPE_END, - .spec = NULL, - .last = NULL, - .mask = NULL - } - }; - union tunnel_offload_mark mark_id; - struct rte_flow_action_mark miss_mark; - struct rte_flow_action miss_actions[3] = { - [0] = { .type = RTE_FLOW_ACTION_TYPE_MARK, .conf = &miss_mark }, - [2] = { .type = RTE_FLOW_ACTION_TYPE_END, .conf = NULL } - }; - const struct rte_flow_action_jump *jump_data; - uint32_t i, flow_table = 0; /* prevent compilation warning */ - struct flow_grp_info grp_info = { - .external = 1, - .transfer = attr->transfer, - .fdb_def_rule = !!priv->fdb_def_rule, - .std_tbl_fix = 0, - }; - int ret; - - if (!attr->transfer) { - uint32_t q_size; - - miss_actions[1].type = RTE_FLOW_ACTION_TYPE_RSS; - q_size = priv->reta_idx_n * sizeof(ctx->queue[0]); - ctx->queue = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, q_size, - 0, SOCKET_ID_ANY); - if (!ctx->queue) - return rte_flow_error_set - (error, ENOMEM, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, - NULL, "invalid default miss RSS"); - ctx->action_rss.func = RTE_ETH_HASH_FUNCTION_DEFAULT, - ctx->action_rss.level = 0, - ctx->action_rss.types = priv->rss_conf.rss_hf, - ctx->action_rss.key_len = priv->rss_conf.rss_key_len, - ctx->action_rss.queue_num = priv->reta_idx_n, - ctx->action_rss.key = priv->rss_conf.rss_key, - ctx->action_rss.queue = ctx->queue; - if (!priv->reta_idx_n || !priv->rxqs_n) - return rte_flow_error_set - (error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, - NULL, "invalid port configuration"); - if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)) - ctx->action_rss.types = 0; - for (i = 0; i != priv->reta_idx_n; ++i) - ctx->queue[i] = (*priv->reta_idx)[i]; - } else { - miss_actions[1].type = RTE_FLOW_ACTION_TYPE_JUMP; - ctx->miss_jump.group = MLX5_TNL_MISS_FDB_JUMP_GRP; - } - miss_actions[1].conf = (typeof(miss_actions[1].conf))ctx->raw; - for (; app_actions->type != RTE_FLOW_ACTION_TYPE_JUMP; app_actions++); - jump_data = app_actions->conf; - miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY; - miss_attr.group = jump_data->group; - ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group, - &flow_table, grp_info, error); - if (ret) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, - NULL, "invalid tunnel id"); - mark_id.app_reserve = 0; - mark_id.table_id = tunnel_flow_tbl_to_id(flow_table); - mark_id.transfer = !!attr->transfer; - mark_id._unused_ = 0; - miss_mark.id = mark_id.val; - dev_flow = flow_drv_prepare(dev, flow, &miss_attr, - miss_items, miss_actions, flow_idx, error); - if (!dev_flow) - return -rte_errno; - dev_flow->flow = flow; - dev_flow->external = true; - dev_flow->tunnel = tunnel; - /* Subflow object was created, we must include one in the list. */ - SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx, - dev_flow->handle, next); - DRV_LOG(DEBUG, - "port %u tunnel type=%d id=%u miss rule priority=%u group=%u", - dev->data->port_id, tunnel->app_tunnel.type, - tunnel->tunnel_id, miss_attr.priority, miss_attr.group); - ret = flow_drv_translate(dev, dev_flow, &miss_attr, miss_items, - miss_actions, error); - if (!ret) - ret = flow_mreg_update_copy_table(dev, flow, miss_actions, - error); - - return ret; -} - /** * The last stage of splitting chain, just creates the subflow * without any modification. @@ -4271,20 +4296,14 @@ flow_tunnel_add_default_miss(struct rte_eth_dev *dev, * Parent flow structure pointer. * @param[in, out] sub_flow * Pointer to return the created subflow, may be NULL. - * @param[in] prefix_layers - * Prefix subflow layers, may be 0. - * @param[in] prefix_mark - * Prefix subflow mark flag, may be 0. * @param[in] attr * Flow rule attributes. * @param[in] items * Pattern specification (list terminated by the END pattern item). * @param[in] actions * Associated actions (list terminated by the END action). - * @param[in] external - * This flow rule is created by request external to PMD. - * @param[in] flow_idx - * This memory pool index to the flow. + * @param[in] flow_split_info + * Pointer to flow split info structure. * @param[out] error * Perform verbose error reporting if not NULL. * @return @@ -4294,22 +4313,21 @@ static int flow_create_split_inner(struct rte_eth_dev *dev, struct rte_flow *flow, struct mlx5_flow **sub_flow, - uint64_t prefix_layers, - uint32_t prefix_mark, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], - bool external, uint32_t flow_idx, + struct mlx5_flow_split_info *flow_split_info, struct rte_flow_error *error) { struct mlx5_flow *dev_flow; dev_flow = flow_drv_prepare(dev, flow, attr, items, actions, - flow_idx, error); + flow_split_info->flow_idx, error); if (!dev_flow) return -rte_errno; dev_flow->flow = flow; - dev_flow->external = external; + dev_flow->external = flow_split_info->external; + dev_flow->skip_scale = flow_split_info->skip_scale; /* Subflow object was created, we must include one in the list. */ SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx, dev_flow->handle, next); @@ -4318,9 +4336,9 @@ flow_create_split_inner(struct rte_eth_dev *dev, * flow may need some user defined item layer flags, and pass the * Metadate rxq mark flag to suffix flow as well. */ - if (prefix_layers) - dev_flow->handle->layers = prefix_layers; - if (prefix_mark) + if (flow_split_info->prefix_layers) + dev_flow->handle->layers = flow_split_info->prefix_layers; + if (flow_split_info->prefix_mark) dev_flow->handle->mark = 1; if (sub_flow) *sub_flow = dev_flow; @@ -4674,6 +4692,8 @@ flow_mreg_tx_copy_prep(struct rte_eth_dev *dev, * Pointer to the position of the matched action if exists, otherwise is -1. * @param[out] qrss_action_pos * Pointer to the position of the Queue/RSS action if exists, otherwise is -1. + * @param[out] modify_after_mirror + * Pointer to the flag of modify action after FDB mirroring. * * @return * > 0 the total number of actions. @@ -4683,14 +4703,15 @@ static int flow_check_match_action(const struct rte_flow_action actions[], const struct rte_flow_attr *attr, enum rte_flow_action_type action, - int *match_action_pos, int *qrss_action_pos) + int *match_action_pos, int *qrss_action_pos, + int *modify_after_mirror) { const struct rte_flow_action_sample *sample; int actions_n = 0; - int jump_flag = 0; uint32_t ratio = 0; int sub_type = 0; int flag = 0; + int fdb_mirror = 0; *match_action_pos = -1; *qrss_action_pos = -1; @@ -4699,27 +4720,53 @@ flow_check_match_action(const struct rte_flow_action actions[], flag = 1; *match_action_pos = actions_n; } - if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE || - actions->type == RTE_FLOW_ACTION_TYPE_RSS) + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_QUEUE: + case RTE_FLOW_ACTION_TYPE_RSS: *qrss_action_pos = actions_n; - if (actions->type == RTE_FLOW_ACTION_TYPE_JUMP) - jump_flag = 1; - if (actions->type == RTE_FLOW_ACTION_TYPE_SAMPLE) { + break; + case RTE_FLOW_ACTION_TYPE_SAMPLE: sample = actions->conf; ratio = sample->ratio; sub_type = ((const struct rte_flow_action *) (sample->actions))->type; + if (ratio == 1 && attr->transfer) + fdb_mirror = 1; + break; + case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC: + case RTE_FLOW_ACTION_TYPE_SET_MAC_DST: + case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC: + case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST: + case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC: + case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST: + case RTE_FLOW_ACTION_TYPE_SET_TP_SRC: + case RTE_FLOW_ACTION_TYPE_SET_TP_DST: + case RTE_FLOW_ACTION_TYPE_DEC_TTL: + case RTE_FLOW_ACTION_TYPE_SET_TTL: + case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ: + case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ: + case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK: + case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK: + case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP: + case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP: + case RTE_FLOW_ACTION_TYPE_FLAG: + case RTE_FLOW_ACTION_TYPE_MARK: + case RTE_FLOW_ACTION_TYPE_SET_META: + case RTE_FLOW_ACTION_TYPE_SET_TAG: + if (fdb_mirror) + *modify_after_mirror = 1; + break; + default: + break; } actions_n++; } - if (flag && action == RTE_FLOW_ACTION_TYPE_SAMPLE && attr->transfer) { - if (ratio == 1) { - /* JUMP Action not support for Mirroring; - * Mirroring support multi-destination; - */ - if (!jump_flag && sub_type != RTE_FLOW_ACTION_TYPE_END) - flag = 0; - } + if (flag && fdb_mirror && !*modify_after_mirror) { + /* FDB mirroring uses the destination array to implement + * instead of FLOW_SAMPLER object. + */ + if (sub_type != RTE_FLOW_ACTION_TYPE_END) + flag = 0; } /* Count RTE_FLOW_ACTION_TYPE_END. */ return flag ? actions_n + 1 : 0; @@ -4738,8 +4785,8 @@ flow_check_match_action(const struct rte_flow_action actions[], * * @param dev * Pointer to Ethernet device. - * @param[in] fdb_tx - * FDB egress flow flag. + * @param[in] add_tag + * Add extra tag action flag. * @param[out] sfx_items * Suffix flow match items (list terminated by the END pattern item). * @param[in] actions @@ -4754,6 +4801,8 @@ flow_check_match_action(const struct rte_flow_action actions[], * The sample action position. * @param[in] qrss_action_pos * The Queue/RSS action position. + * @param[in] jump_table + * Add extra jump action flag. * @param[out] error * Perform verbose error reporting if not NULL. * @@ -4763,7 +4812,7 @@ flow_check_match_action(const struct rte_flow_action actions[], */ static int flow_sample_split_prep(struct rte_eth_dev *dev, - uint32_t fdb_tx, + int add_tag, struct rte_flow_item sfx_items[], const struct rte_flow_action actions[], struct rte_flow_action actions_sfx[], @@ -4771,14 +4820,17 @@ flow_sample_split_prep(struct rte_eth_dev *dev, int actions_n, int sample_action_pos, int qrss_action_pos, + int jump_table, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rte_flow_action_set_tag *set_tag; struct mlx5_rte_flow_item_tag *tag_spec; struct mlx5_rte_flow_item_tag *tag_mask; + struct rte_flow_action_jump *jump_action; uint32_t tag_id = 0; int index; + int append_index = 0; int ret; if (sample_action_pos < 0) @@ -4786,9 +4838,37 @@ flow_sample_split_prep(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "invalid position of sample " "action in list"); - if (!fdb_tx) { + /* Prepare the actions for prefix and suffix flow. */ + if (qrss_action_pos >= 0 && qrss_action_pos < sample_action_pos) { + index = qrss_action_pos; + /* Put the preceding the Queue/RSS action into prefix flow. */ + if (index != 0) + memcpy(actions_pre, actions, + sizeof(struct rte_flow_action) * index); + /* Put others preceding the sample action into prefix flow. */ + if (sample_action_pos > index + 1) + memcpy(actions_pre + index, actions + index + 1, + sizeof(struct rte_flow_action) * + (sample_action_pos - index - 1)); + index = sample_action_pos - 1; + /* Put Queue/RSS action into Suffix flow. */ + memcpy(actions_sfx, actions + qrss_action_pos, + sizeof(struct rte_flow_action)); + actions_sfx++; + } else { + index = sample_action_pos; + if (index != 0) + memcpy(actions_pre, actions, + sizeof(struct rte_flow_action) * index); + } + /* For CX5, add an extra tag action for NIC-RX and E-Switch ingress. + * For CX6DX and above, metadata registers Cx preserve their value, + * add an extra tag action for NIC-RX and E-Switch Domain. + */ + if (add_tag) { /* Prepare the prefix tag action. */ - set_tag = (void *)(actions_pre + actions_n + 1); + append_index++; + set_tag = (void *)(actions_pre + actions_n + append_index); ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, 0, error); if (ret < 0) return ret; @@ -4813,32 +4893,7 @@ flow_sample_split_prep(struct rte_eth_dev *dev, .type = (enum rte_flow_item_type) RTE_FLOW_ITEM_TYPE_END, }; - } - /* Prepare the actions for prefix and suffix flow. */ - if (qrss_action_pos >= 0 && qrss_action_pos < sample_action_pos) { - index = qrss_action_pos; - /* Put the preceding the Queue/RSS action into prefix flow. */ - if (index != 0) - memcpy(actions_pre, actions, - sizeof(struct rte_flow_action) * index); - /* Put others preceding the sample action into prefix flow. */ - if (sample_action_pos > index + 1) - memcpy(actions_pre + index, actions + index + 1, - sizeof(struct rte_flow_action) * - (sample_action_pos - index - 1)); - index = sample_action_pos - 1; - /* Put Queue/RSS action into Suffix flow. */ - memcpy(actions_sfx, actions + qrss_action_pos, - sizeof(struct rte_flow_action)); - actions_sfx++; - } else { - index = sample_action_pos; - if (index != 0) - memcpy(actions_pre, actions, - sizeof(struct rte_flow_action) * index); - } - /* Add the extra tag action for NIC-RX and E-Switch ingress. */ - if (!fdb_tx) { + /* Prepare the tag action in prefix subflow. */ actions_pre[index++] = (struct rte_flow_action){ .type = (enum rte_flow_action_type) @@ -4849,6 +4904,22 @@ flow_sample_split_prep(struct rte_eth_dev *dev, memcpy(actions_pre + index, actions + sample_action_pos, sizeof(struct rte_flow_action)); index += 1; + /* For the modify action after the sample action in E-Switch mirroring, + * Add the extra jump action in prefix subflow and jump into the next + * table, then do the modify action in the new table. + */ + if (jump_table) { + /* Prepare the prefix jump action. */ + append_index++; + jump_action = (void *)(actions_pre + actions_n + append_index); + jump_action->group = jump_table; + actions_pre[index++] = + (struct rte_flow_action){ + .type = (enum rte_flow_action_type) + RTE_FLOW_ACTION_TYPE_JUMP, + .conf = jump_action, + }; + } actions_pre[index] = (struct rte_flow_action){ .type = (enum rte_flow_action_type) RTE_FLOW_ACTION_TYPE_END, @@ -4874,20 +4945,14 @@ flow_sample_split_prep(struct rte_eth_dev *dev, * Pointer to Ethernet device. * @param[in] flow * Parent flow structure pointer. - * @param[in] prefix_layers - * Prefix flow layer flags. - * @param[in] prefix_mark - * Prefix subflow mark flag, may be 0. * @param[in] attr * Flow rule attributes. * @param[in] items * Pattern specification (list terminated by the END pattern item). * @param[in] actions * Associated actions (list terminated by the END action). - * @param[in] external - * This flow rule is created by request external to PMD. - * @param[in] flow_idx - * This memory pool index to the flow. + * @param[in] flow_split_info + * Pointer to flow split info structure. * @param[out] error * Perform verbose error reporting if not NULL. * @return @@ -4896,12 +4961,10 @@ flow_sample_split_prep(struct rte_eth_dev *dev, static int flow_create_split_metadata(struct rte_eth_dev *dev, struct rte_flow *flow, - uint64_t prefix_layers, - uint32_t prefix_mark, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], - bool external, uint32_t flow_idx, + struct mlx5_flow_split_info *flow_split_info, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; @@ -4920,10 +4983,8 @@ flow_create_split_metadata(struct rte_eth_dev *dev, if (!config->dv_flow_en || config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || !mlx5_flow_ext_mreg_supported(dev)) - return flow_create_split_inner(dev, flow, NULL, prefix_layers, - prefix_mark, attr, items, - actions, external, flow_idx, - error); + return flow_create_split_inner(dev, flow, NULL, attr, items, + actions, flow_split_info, error); actions_n = flow_parse_metadata_split_actions_info(actions, &qrss, &encap_idx); if (qrss) { @@ -5008,10 +5069,9 @@ flow_create_split_metadata(struct rte_eth_dev *dev, goto exit; } /* Add the unmodified original or prefix subflow. */ - ret = flow_create_split_inner(dev, flow, &dev_flow, prefix_layers, - prefix_mark, attr, + ret = flow_create_split_inner(dev, flow, &dev_flow, attr, items, ext_actions ? ext_actions : - actions, external, flow_idx, error); + actions, flow_split_info, error); if (ret < 0) goto exit; MLX5_ASSERT(dev_flow); @@ -5072,10 +5132,12 @@ flow_create_split_metadata(struct rte_eth_dev *dev, } dev_flow = NULL; /* Add suffix subflow to execute Q/RSS. */ - ret = flow_create_split_inner(dev, flow, &dev_flow, layers, 0, + flow_split_info->prefix_layers = layers; + flow_split_info->prefix_mark = 0; + ret = flow_create_split_inner(dev, flow, &dev_flow, &q_attr, mtr_sfx ? items : q_items, q_actions, - external, flow_idx, error); + flow_split_info, error); if (ret < 0) goto exit; /* qrss ID should be freed if failed. */ @@ -5109,20 +5171,14 @@ exit: * Pointer to Ethernet device. * @param[in] flow * Parent flow structure pointer. - * @param[in] prefix_layers - * Prefix subflow layers, may be 0. - * @param[in] prefix_mark - * Prefix subflow mark flag, may be 0. * @param[in] attr * Flow rule attributes. * @param[in] items * Pattern specification (list terminated by the END pattern item). * @param[in] actions * Associated actions (list terminated by the END action). - * @param[in] external - * This flow rule is created by request external to PMD. - * @param[in] flow_idx - * This memory pool index to the flow. + * @param[in] flow_split_info + * Pointer to flow split info structure. * @param[out] error * Perform verbose error reporting if not NULL. * @return @@ -5131,12 +5187,10 @@ exit: static int flow_create_split_meter(struct rte_eth_dev *dev, struct rte_flow *flow, - uint64_t prefix_layers, - uint32_t prefix_mark, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], - bool external, uint32_t flow_idx, + struct mlx5_flow_split_info *flow_split_info, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; @@ -5180,11 +5234,10 @@ flow_create_split_meter(struct rte_eth_dev *dev, goto exit; } /* Add the prefix subflow. */ + flow_split_info->prefix_mark = 0; ret = flow_create_split_inner(dev, flow, &dev_flow, - prefix_layers, 0, - attr, items, - pre_actions, external, - flow_idx, error); + attr, items, pre_actions, + flow_split_info, error); if (ret) { ret = -rte_errno; goto exit; @@ -5194,16 +5247,16 @@ flow_create_split_meter(struct rte_eth_dev *dev, sfx_attr.group = sfx_attr.transfer ? (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) : MLX5_FLOW_TABLE_LEVEL_SUFFIX; + flow_split_info->prefix_layers = + flow_get_prefix_layer_flags(dev_flow); + flow_split_info->prefix_mark = dev_flow->handle->mark; } /* Add the prefix subflow. */ - ret = flow_create_split_metadata(dev, flow, dev_flow ? - flow_get_prefix_layer_flags(dev_flow) : - prefix_layers, dev_flow ? - dev_flow->handle->mark : prefix_mark, + ret = flow_create_split_metadata(dev, flow, &sfx_attr, sfx_items ? sfx_items : items, sfx_actions ? sfx_actions : actions, - external, flow_idx, error); + flow_split_info, error); exit: if (sfx_actions) mlx5_free(sfx_actions); @@ -5235,10 +5288,8 @@ exit: * Pattern specification (list terminated by the END pattern item). * @param[in] actions * Associated actions (list terminated by the END action). - * @param[in] external - * This flow rule is created by request external to PMD. - * @param[in] flow_idx - * This memory pool index to the flow. + * @param[in] flow_split_info + * Pointer to flow split info structure. * @param[out] error * Perform verbose error reporting if not NULL. * @return @@ -5250,7 +5301,7 @@ flow_create_split_sample(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], - bool external, uint32_t flow_idx, + struct mlx5_flow_split_info *flow_split_info, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; @@ -5263,7 +5314,6 @@ flow_create_split_sample(struct rte_eth_dev *dev, struct mlx5_flow_dv_sample_resource *sample_res; struct mlx5_flow_tbl_data_entry *sfx_tbl_data; struct mlx5_flow_tbl_resource *sfx_tbl; - union mlx5_flow_tbl_key sfx_table_key; #endif size_t act_size; size_t item_size; @@ -5272,12 +5322,17 @@ flow_create_split_sample(struct rte_eth_dev *dev, int actions_n = 0; int sample_action_pos; int qrss_action_pos; + int add_tag = 0; + int modify_after_mirror = 0; + uint16_t jump_table = 0; + const uint32_t next_ft_step = 1; int ret = 0; if (priv->sampler_en) actions_n = flow_check_match_action(actions, attr, RTE_FLOW_ACTION_TYPE_SAMPLE, - &sample_action_pos, &qrss_action_pos); + &sample_action_pos, &qrss_action_pos, + &modify_after_mirror); if (actions_n) { /* The prefix actions must includes sample, tag, end. */ act_size = sizeof(struct rte_flow_action) * (actions_n * 2 + 1) @@ -5293,49 +5348,72 @@ flow_create_split_sample(struct rte_eth_dev *dev, "sample flow"); /* The representor_id is -1 for uplink. */ fdb_tx = (attr->transfer && priv->representor_id != -1); - if (!fdb_tx) + /* + * When reg_c_preserve is set, metadata registers Cx preserve + * their value even through packet duplication. + */ + add_tag = (!fdb_tx || priv->config.hca_attr.reg_c_preserve); + if (add_tag) sfx_items = (struct rte_flow_item *)((char *)sfx_actions + act_size); + if (modify_after_mirror) + jump_table = attr->group * MLX5_FLOW_TABLE_FACTOR + + next_ft_step; pre_actions = sfx_actions + actions_n; - tag_id = flow_sample_split_prep(dev, fdb_tx, sfx_items, + tag_id = flow_sample_split_prep(dev, add_tag, sfx_items, actions, sfx_actions, pre_actions, actions_n, sample_action_pos, - qrss_action_pos, error); - if (tag_id < 0 || (!fdb_tx && !tag_id)) { + qrss_action_pos, jump_table, + error); + if (tag_id < 0 || (add_tag && !tag_id)) { ret = -rte_errno; goto exit; } + if (modify_after_mirror) + flow_split_info->skip_scale = + 1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT; /* Add the prefix subflow. */ - ret = flow_create_split_inner(dev, flow, &dev_flow, 0, 0, attr, - items, pre_actions, external, - flow_idx, error); + ret = flow_create_split_inner(dev, flow, &dev_flow, attr, + items, pre_actions, + flow_split_info, error); if (ret) { ret = -rte_errno; goto exit; } dev_flow->handle->split_flow_id = tag_id; #ifdef HAVE_IBV_FLOW_DV_SUPPORT - /* Set the sfx group attr. */ - sample_res = (struct mlx5_flow_dv_sample_resource *) - dev_flow->dv.sample_res; - sfx_tbl = (struct mlx5_flow_tbl_resource *) - sample_res->normal_path_tbl; - sfx_tbl_data = container_of(sfx_tbl, - struct mlx5_flow_tbl_data_entry, tbl); - sfx_table_key.v64 = sfx_tbl_data->entry.key; - sfx_attr.group = sfx_attr.transfer ? - (sfx_table_key.table_id - 1) : - sfx_table_key.table_id; + if (!modify_after_mirror) { + /* Set the sfx group attr. */ + sample_res = (struct mlx5_flow_dv_sample_resource *) + dev_flow->dv.sample_res; + sfx_tbl = (struct mlx5_flow_tbl_resource *) + sample_res->normal_path_tbl; + sfx_tbl_data = container_of(sfx_tbl, + struct mlx5_flow_tbl_data_entry, + tbl); + sfx_attr.group = sfx_attr.transfer ? + (sfx_tbl_data->table_id - 1) : + sfx_tbl_data->table_id; + } else { + MLX5_ASSERT(attr->transfer); + sfx_attr.group = jump_table; + } + flow_split_info->prefix_layers = + flow_get_prefix_layer_flags(dev_flow); + flow_split_info->prefix_mark = dev_flow->handle->mark; + /* Suffix group level already be scaled with factor, set + * MLX5_SCALE_FLOW_GROUP_BIT of skip_scale to 1 to avoid scale + * again in translation. + */ + flow_split_info->skip_scale = 1 << MLX5_SCALE_FLOW_GROUP_BIT; #endif } /* Add the suffix subflow. */ - ret = flow_create_split_meter(dev, flow, dev_flow ? - flow_get_prefix_layer_flags(dev_flow) : 0, - dev_flow ? dev_flow->handle->mark : 0, - &sfx_attr, sfx_items ? sfx_items : items, - sfx_actions ? sfx_actions : actions, - external, flow_idx, error); + ret = flow_create_split_meter(dev, flow, &sfx_attr, + sfx_items ? sfx_items : items, + sfx_actions ? sfx_actions : actions, + flow_split_info, error); exit: if (sfx_actions) mlx5_free(sfx_actions); @@ -5370,10 +5448,8 @@ exit: * Pattern specification (list terminated by the END pattern item). * @param[in] actions * Associated actions (list terminated by the END action). - * @param[in] external - * This flow rule is created by request external to PMD. - * @param[in] flow_idx - * This memory pool index to the flow. + * @param[in] flow_split_info + * Pointer to flow split info structure. * @param[out] error * Perform verbose error reporting if not NULL. * @return @@ -5385,13 +5461,13 @@ flow_create_split_outer(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], - bool external, uint32_t flow_idx, + struct mlx5_flow_split_info *flow_split_info, struct rte_flow_error *error) { int ret; ret = flow_create_split_sample(dev, flow, attr, items, - actions, external, flow_idx, error); + actions, flow_split_info, error); MLX5_ASSERT(ret <= 0); return ret; } @@ -5435,17 +5511,15 @@ flow_rss_workspace_adjust(struct mlx5_flow_workspace *wks, struct mlx5_flow_rss_desc *rss_desc, uint32_t nrssq_num) { - bool fidx = !!wks->flow_idx; - - if (likely(nrssq_num <= wks->rssq_num[fidx])) + if (likely(nrssq_num <= wks->rssq_num)) return 0; rss_desc->queue = realloc(rss_desc->queue, - sizeof(rss_desc->queue[0]) * RTE_ALIGN(nrssq_num, 2)); + sizeof(*rss_desc->queue) * RTE_ALIGN(nrssq_num, 2)); if (!rss_desc->queue) { rte_errno = ENOMEM; return -1; } - wks->rssq_num[fidx] = RTE_ALIGN(nrssq_num, 2); + wks->rssq_num = RTE_ALIGN(nrssq_num, 2); return 0; } @@ -5483,7 +5557,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list, struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow *flow = NULL; struct mlx5_flow *dev_flow; - const struct rte_flow_action_rss *rss; + const struct rte_flow_action_rss *rss = NULL; struct mlx5_translated_shared_action shared_actions[MLX5_MAX_SHARED_ACTIONS]; int shared_actions_n = MLX5_MAX_SHARED_ACTIONS; @@ -5510,17 +5584,22 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list, uint32_t idx = 0; int hairpin_flow; struct rte_flow_attr attr_tx = { .priority = 0 }; - struct rte_flow_attr attr_factor = {0}; const struct rte_flow_action *actions; struct rte_flow_action *translated_actions = NULL; struct mlx5_flow_tunnel *tunnel; struct tunnel_default_miss_ctx default_miss_ctx = { 0, }; - struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); - bool fidx = !!wks->flow_idx; + struct mlx5_flow_workspace *wks = mlx5_flow_push_thread_workspace(); + struct mlx5_flow_split_info flow_split_info = { + .external = !!external, + .skip_scale = 0, + .flow_idx = 0, + .prefix_mark = 0, + .prefix_layers = 0 + }; int ret; MLX5_ASSERT(wks); - rss_desc = &wks->rss_desc[fidx]; + rss_desc = &wks->rss_desc; ret = flow_shared_actions_translate(dev, original_actions, shared_actions, &shared_actions_n, @@ -5530,10 +5609,9 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list, return 0; } actions = translated_actions ? translated_actions : original_actions; - memcpy((void *)&attr_factor, (const void *)attr, sizeof(*attr)); p_actions_rx = actions; - hairpin_flow = flow_check_hairpin_split(dev, &attr_factor, actions); - ret = flow_drv_validate(dev, &attr_factor, items, p_actions_rx, + hairpin_flow = flow_check_hairpin_split(dev, attr, actions); + ret = flow_drv_validate(dev, attr, items, p_actions_rx, external, hairpin_flow, error); if (ret < 0) goto error_before_hairpin_split; @@ -5552,11 +5630,14 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list, idx); p_actions_rx = actions_rx.actions; } - flow->drv_type = flow_get_drv_type(dev, &attr_factor); + flow_split_info.flow_idx = idx; + flow->drv_type = flow_get_drv_type(dev, attr); MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN && flow->drv_type < MLX5_FLOW_TYPE_MAX); memset(rss_desc, 0, offsetof(struct mlx5_flow_rss_desc, queue)); - rss = flow_get_rss_action(p_actions_rx); + /* RSS Action only works on NIC RX domain */ + if (attr->ingress && !attr->transfer) + rss = flow_get_rss_action(p_actions_rx); if (rss) { if (flow_rss_workspace_adjust(wks, rss_desc, rss->queue_num)) return 0; @@ -5582,26 +5663,21 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list, buf->entries = 1; buf->entry[0].pattern = (void *)(uintptr_t)items; } - flow->shared_rss = flow_get_shared_rss_action(dev, shared_actions, + rss_desc->shared_rss = flow_get_shared_rss_action(dev, shared_actions, shared_actions_n); - /* - * Record the start index when there is a nested call. All sub-flows - * need to be translated before another calling. - * No need to use ping-pong buffer to save memory here. - */ - if (fidx) { - MLX5_ASSERT(!wks->flow_nested_idx); - wks->flow_nested_idx = fidx; - } for (i = 0; i < buf->entries; ++i) { + /* Initialize flow split data. */ + flow_split_info.prefix_layers = 0; + flow_split_info.prefix_mark = 0; + flow_split_info.skip_scale = 0; /* * The splitter may create multiple dev_flows, * depending on configuration. In the simplest * case it just creates unmodified original flow. */ - ret = flow_create_split_outer(dev, flow, &attr_factor, + ret = flow_create_split_outer(dev, flow, attr, buf->entry[i].pattern, - p_actions_rx, external, idx, + p_actions_rx, &flow_split_info, error); if (ret < 0) goto error; @@ -5649,17 +5725,19 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list, * the egress Flows belong to the different device and * copy table should be updated in peer NIC Rx domain. */ - if (attr_factor.ingress && - (external || attr_factor.group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) { + if (attr->ingress && + (external || attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) { ret = flow_mreg_update_copy_table(dev, flow, actions, error); if (ret) goto error; } /* - * If the flow is external (from application) OR device is started, then - * the flow will be applied immediately. + * If the flow is external (from application) OR device is started, + * OR mreg discover, then apply immediately. */ - if (external || dev->data->dev_started) { + if (external || dev->data->dev_started || + (attr->group == MLX5_FLOW_MREG_CP_TABLE_GROUP && + attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)) { ret = flow_drv_apply(dev, flow, error); if (ret < 0) goto error; @@ -5672,10 +5750,6 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list, } flow_rxq_flags_set(dev, flow); rte_free(translated_actions); - /* Nested flow creation index recovery. */ - wks->flow_idx = wks->flow_nested_idx; - if (wks->flow_nested_idx) - wks->flow_nested_idx = 0; tunnel = flow_tunnel_from_rule(dev, attr, items, actions); if (tunnel) { flow->tunnel = 1; @@ -5683,19 +5757,23 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list, __atomic_add_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED); mlx5_free(default_miss_ctx.queue); } + mlx5_flow_pop_thread_workspace(); return idx; error: MLX5_ASSERT(flow); ret = rte_errno; /* Save rte_errno before cleanup. */ flow_mreg_del_copy_action(dev, flow); flow_drv_destroy(dev, flow); + if (rss_desc->shared_rss) + __atomic_sub_fetch(&((struct mlx5_shared_action_rss *) + mlx5_ipool_get + (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], + rss_desc->shared_rss))->refcnt, 1, __ATOMIC_RELAXED); mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], idx); rte_errno = ret; /* Restore rte_errno. */ ret = rte_errno; rte_errno = ret; - wks->flow_idx = wks->flow_nested_idx; - if (wks->flow_nested_idx) - wks->flow_nested_idx = 0; + mlx5_flow_pop_thread_workspace(); error_before_hairpin_split: rte_free(translated_actions); return 0; @@ -5855,19 +5933,16 @@ flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list, flow_idx, flow, next); rte_spinlock_unlock(&priv->flow_list_lock); } - flow_mreg_del_copy_action(dev, flow); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx); if (flow->tunnel) { struct mlx5_flow_tunnel *tunnel; - rte_spinlock_lock(&mlx5_tunnel_hub(dev)->sl); tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id); RTE_VERIFY(tunnel); - LIST_REMOVE(tunnel, chain); - rte_spinlock_unlock(&mlx5_tunnel_hub(dev)->sl); if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED)) mlx5_flow_tunnel_free(dev, tunnel); } + flow_mreg_del_copy_action(dev, flow); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx); } /** @@ -5928,75 +6003,127 @@ mlx5_flow_start_default(struct rte_eth_dev *dev) /** * Release key of thread specific flow workspace data. */ -static void +void flow_release_workspace(void *data) { struct mlx5_flow_workspace *wks = data; + struct mlx5_flow_workspace *next; - if (!wks) - return; - free(wks->rss_desc[0].queue); - free(wks->rss_desc[1].queue); - free(wks); + while (wks) { + next = wks->next; + free(wks->rss_desc.queue); + free(wks); + wks = next; + } } /** - * Initialize key of thread specific flow workspace data. + * Get thread specific current flow workspace. + * + * @return pointer to thread specific flow workspace data, NULL on error. */ -static void -flow_alloc_workspace(void) +struct mlx5_flow_workspace* +mlx5_flow_get_thread_workspace(void) { - if (pthread_key_create(&key_workspace, flow_release_workspace)) - DRV_LOG(ERR, "Can't create flow workspace data thread key."); + struct mlx5_flow_workspace *data; + + data = mlx5_flow_os_get_specific_workspace(); + MLX5_ASSERT(data && data->inuse); + if (!data || !data->inuse) + DRV_LOG(ERR, "flow workspace not initialized."); + return data; } /** - * Get thread specific flow workspace. + * Allocate and init new flow workspace. * - * @return pointer to thread specific flowworkspace data, NULL on error. + * @return pointer to flow workspace data, NULL on error. */ -struct mlx5_flow_workspace* -mlx5_flow_get_thread_workspace(void) +static struct mlx5_flow_workspace* +flow_alloc_thread_workspace(void) { - struct mlx5_flow_workspace *data; + struct mlx5_flow_workspace *data = calloc(1, sizeof(*data)); - if (pthread_once(&key_workspace_init, flow_alloc_workspace)) { - DRV_LOG(ERR, "Failed to init flow workspace data thread key."); - return NULL; - } - data = pthread_getspecific(key_workspace); if (!data) { - data = calloc(1, sizeof(*data)); - if (!data) { - DRV_LOG(ERR, "Failed to allocate flow workspace " - "memory."); - return NULL; - } - data->rss_desc[0].queue = calloc(1, - sizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM); - if (!data->rss_desc[0].queue) - goto err; - data->rss_desc[1].queue = calloc(1, - sizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM); - if (!data->rss_desc[1].queue) - goto err; - data->rssq_num[0] = MLX5_RSSQ_DEFAULT_NUM; - data->rssq_num[1] = MLX5_RSSQ_DEFAULT_NUM; - if (pthread_setspecific(key_workspace, data)) { - DRV_LOG(ERR, "Failed to set flow workspace to thread."); - goto err; - } + DRV_LOG(ERR, "Failed to allocate flow workspace " + "memory."); + return NULL; } + data->rss_desc.queue = calloc(1, + sizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM); + if (!data->rss_desc.queue) + goto err; + data->rssq_num = MLX5_RSSQ_DEFAULT_NUM; return data; err: - if (data->rss_desc[0].queue) - free(data->rss_desc[0].queue); - if (data->rss_desc[1].queue) - free(data->rss_desc[1].queue); + if (data->rss_desc.queue) + free(data->rss_desc.queue); free(data); return NULL; } +/** + * Get new thread specific flow workspace. + * + * If current workspace inuse, create new one and set as current. + * + * @return pointer to thread specific flow workspace data, NULL on error. + */ +static struct mlx5_flow_workspace* +mlx5_flow_push_thread_workspace(void) +{ + struct mlx5_flow_workspace *curr; + struct mlx5_flow_workspace *data; + + curr = mlx5_flow_os_get_specific_workspace(); + if (!curr) { + data = flow_alloc_thread_workspace(); + if (!data) + return NULL; + } else if (!curr->inuse) { + data = curr; + } else if (curr->next) { + data = curr->next; + } else { + data = flow_alloc_thread_workspace(); + if (!data) + return NULL; + curr->next = data; + data->prev = curr; + } + data->inuse = 1; + data->flow_idx = 0; + /* Set as current workspace */ + if (mlx5_flow_os_set_specific_workspace(data)) + DRV_LOG(ERR, "Failed to set flow workspace to thread."); + return data; +} + +/** + * Close current thread specific flow workspace. + * + * If previous workspace available, set it as current. + * + * @return pointer to thread specific flow workspace data, NULL on error. + */ +static void +mlx5_flow_pop_thread_workspace(void) +{ + struct mlx5_flow_workspace *data = mlx5_flow_get_thread_workspace(); + + if (!data) + return; + if (!data->inuse) { + DRV_LOG(ERR, "Failed to close unused flow workspace."); + return; + } + data->inuse = 0; + if (!data->prev) + return; + if (mlx5_flow_os_set_specific_workspace(data->prev)) + DRV_LOG(ERR, "Failed to set flow workspace to thread."); +} + /** * Verify the flow list is empty * @@ -6110,7 +6237,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_attr attr = { .ingress = 1, - .priority = MLX5_FLOW_PRIO_RSVD, + .priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR, }; struct rte_flow_item items[] = { { @@ -6299,9 +6426,9 @@ mlx5_flow_isolate(struct rte_eth_dev *dev, } priv->isolated = !!enable; if (enable) - dev->dev_ops = &mlx5_os_dev_ops_isolate; + dev->dev_ops = &mlx5_dev_ops_isolate; else - dev->dev_ops = &mlx5_os_dev_ops; + dev->dev_ops = &mlx5_dev_ops; dev->rx_descriptor_status = mlx5_rx_descriptor_status; dev->tx_descriptor_status = mlx5_tx_descriptor_status; @@ -6610,7 +6737,7 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh) } mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1; size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n; - mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size, + mem_mng->umem = mlx5_os_umem_reg(sh->ctx, mem, size, IBV_ACCESS_LOCAL_WRITE); if (!mem_mng->umem) { rte_errno = errno; @@ -6625,10 +6752,11 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh) mkey_attr.pg_access = 0; mkey_attr.klm_array = NULL; mkey_attr.klm_num = 0; - mkey_attr.relaxed_ordering = sh->cmng.relaxed_ordering; + mkey_attr.relaxed_ordering_write = sh->cmng.relaxed_ordering_write; + mkey_attr.relaxed_ordering_read = sh->cmng.relaxed_ordering_read; mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr); if (!mem_mng->dm) { - mlx5_glue->devx_umem_dereg(mem_mng->umem); + mlx5_os_umem_dereg(mem_mng->umem); rte_errno = errno; mlx5_free(mem); return -rte_errno; @@ -6875,104 +7003,13 @@ mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh, sh->cmng.pending_queries--; } -static const struct mlx5_flow_tbl_data_entry * -tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark) -{ - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_hlist_entry *he; - union tunnel_offload_mark mbits = { .val = mark }; - union mlx5_flow_tbl_key table_key = { - { - .table_id = tunnel_id_to_flow_tbl(mbits.table_id), - .dummy = 0, - .domain = !!mbits.transfer, - .direction = 0, - } - }; - he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL); - return he ? - container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL; -} - -static void -mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list, - struct mlx5_hlist_entry *entry) -{ - struct mlx5_dev_ctx_shared *sh = list->ctx; - struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash); - - mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID], - tunnel_flow_tbl_to_id(tte->flow_table)); - mlx5_free(tte); -} - -static struct mlx5_hlist_entry * -mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list, - uint64_t key __rte_unused, - void *ctx __rte_unused) -{ - struct mlx5_dev_ctx_shared *sh = list->ctx; - struct tunnel_tbl_entry *tte; - - tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, - sizeof(*tte), 0, - SOCKET_ID_ANY); - if (!tte) - goto err; - mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TNL_TBL_ID], - &tte->flow_table); - if (tte->flow_table >= MLX5_MAX_TABLES) { - DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.", - tte->flow_table); - mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID], - tte->flow_table); - goto err; - } else if (!tte->flow_table) { - goto err; - } - tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table); - return &tte->hash; -err: - if (tte) - mlx5_free(tte); - return NULL; -} - -static uint32_t -tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev, - const struct mlx5_flow_tunnel *tunnel, - uint32_t group, uint32_t *table, - struct rte_flow_error *error) -{ - struct mlx5_hlist_entry *he; - struct tunnel_tbl_entry *tte; - union tunnel_tbl_key key = { - .tunnel_id = tunnel ? tunnel->tunnel_id : 0, - .group = group - }; - struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); - struct mlx5_hlist *group_hash; - - group_hash = tunnel ? tunnel->groups : thub->groups; - he = mlx5_hlist_register(group_hash, key.val, NULL); - if (!he) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ATTR_GROUP, - NULL, - "tunnel group index not supported"); - tte = container_of(he, typeof(*tte), hash); - *table = tte->flow_table; - DRV_LOG(DEBUG, "port %u tunnel %u group=%#x table=%#x", - dev->data->port_id, key.tunnel_id, group, *table); - return 0; -} - static int flow_group_to_table(uint32_t port_id, uint32_t group, uint32_t *table, - struct flow_grp_info grp_info, struct rte_flow_error *error) + const struct flow_grp_info *grp_info, + struct rte_flow_error *error) { - if (grp_info.transfer && grp_info.external && grp_info.fdb_def_rule) { + if (grp_info->transfer && grp_info->external && + grp_info->fdb_def_rule) { if (group == UINT32_MAX) return rte_flow_error_set (error, EINVAL, @@ -7029,24 +7066,25 @@ int mlx5_flow_group_to_table(struct rte_eth_dev *dev, const struct mlx5_flow_tunnel *tunnel, uint32_t group, uint32_t *table, - struct flow_grp_info grp_info, + const struct flow_grp_info *grp_info, struct rte_flow_error *error) { int ret; bool standard_translation; - if (grp_info.external && group < MLX5_MAX_TABLES_EXTERNAL) + if (!grp_info->skip_scale && grp_info->external && + group < MLX5_MAX_TABLES_EXTERNAL) group *= MLX5_FLOW_TABLE_FACTOR; if (is_tunnel_offload_active(dev)) { - standard_translation = !grp_info.external || - grp_info.std_tbl_fix; + standard_translation = !grp_info->external || + grp_info->std_tbl_fix; } else { standard_translation = true; } DRV_LOG(DEBUG, - "port %u group=%#x transfer=%d external=%d fdb_def_rule=%d translate=%s", - dev->data->port_id, group, grp_info.transfer, - grp_info.external, grp_info.fdb_def_rule, + "port %u group=%u transfer=%d external=%d fdb_def_rule=%d translate=%s", + dev->data->port_id, group, grp_info->transfer, + grp_info->external, grp_info->fdb_def_rule, standard_translation ? "STANDARD" : "TUNNEL"); if (standard_translation) ret = flow_group_to_table(dev->data->port_id, group, table, @@ -7084,7 +7122,7 @@ mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev) for (idx = REG_C_2; idx <= REG_C_7; ++idx) { struct rte_flow_attr attr = { .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, - .priority = MLX5_FLOW_PRIO_RSVD, + .priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR, .ingress = 1, }; struct rte_flow_item items[] = { @@ -7124,8 +7162,7 @@ mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev) flow_idx); if (!flow) continue; - if (dev->data->dev_started || !flow_drv_apply(dev, flow, NULL)) - config->flow_mreg_c[n++] = idx; + config->flow_mreg_c[n++] = idx; flow_list_destroy(dev, NULL, flow_idx); } for (; n < MLX5_MREG_C_NUM; ++n) @@ -7273,6 +7310,25 @@ flow_drv_action_update(struct rte_eth_dev *dev, return fops->action_update(dev, action, action_conf, error); } +/* Wrapper for driver action_destroy op callback */ +static int +flow_drv_action_query(struct rte_eth_dev *dev, + const struct rte_flow_shared_action *action, + void *data, + const struct mlx5_flow_driver_ops *fops, + struct rte_flow_error *error) +{ + static const char err_msg[] = "shared action query unsupported"; + + if (!fops->action_query) { + DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg); + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, + NULL, err_msg); + return -rte_errno; + } + return fops->action_query(dev, action, data, error); +} + /** * Create shared action for reuse in multiple flow rules. * @@ -7375,11 +7431,11 @@ mlx5_shared_action_query(struct rte_eth_dev *dev, void *data, struct rte_flow_error *error) { - (void)dev; - (void)action; - (void)data; - return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, - NULL, "action type query not supported"); + struct rte_flow_attr attr = { .transfer = 0 }; + const struct mlx5_flow_driver_ops *fops = + flow_get_drv_ops(flow_get_drv_type(dev, &attr)); + + return flow_drv_action_query(dev, action, data, fops, error); } /** @@ -7408,73 +7464,382 @@ mlx5_shared_action_flush(struct rte_eth_dev *dev) return ret; } -static void -mlx5_flow_tunnel_free(struct rte_eth_dev *dev, - struct mlx5_flow_tunnel *tunnel) +#ifndef HAVE_MLX5DV_DR +#define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1)) +#else +#define MLX5_DOMAIN_SYNC_FLOW \ + (MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW | MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW) +#endif + +int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains) { - struct mlx5_priv *priv = dev->data->dev_private; + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + const struct mlx5_flow_driver_ops *fops; + int ret; + struct rte_flow_attr attr = { .transfer = 0 }; - DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x", - dev->data->port_id, tunnel->tunnel_id); - RTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED)); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID], - tunnel->tunnel_id); - mlx5_hlist_destroy(tunnel->groups); - mlx5_free(tunnel); + fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr)); + ret = fops->sync_domain(dev, domains, MLX5_DOMAIN_SYNC_FLOW); + if (ret > 0) + ret = -ret; + return ret; } -static struct mlx5_flow_tunnel * -mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id) -{ - struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); - struct mlx5_flow_tunnel *tun; - - LIST_FOREACH(tun, &thub->tunnels, chain) { - if (tun->tunnel_id == id) - break; - } +/** + * tunnel offload functionalilty is defined for DV environment only + */ +#ifdef HAVE_IBV_FLOW_DV_SUPPORT +__extension__ +union tunnel_offload_mark { + uint32_t val; + struct { + uint32_t app_reserve:8; + uint32_t table_id:15; + uint32_t transfer:1; + uint32_t _unused_:8; + }; +}; - return tun; -} +static bool +mlx5_access_tunnel_offload_db + (struct rte_eth_dev *dev, + bool (*match)(struct rte_eth_dev *, + struct mlx5_flow_tunnel *, const void *), + void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *), + void (*miss)(struct rte_eth_dev *, void *), + void *ctx, bool lock_op); -static struct mlx5_flow_tunnel * -mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev, - const struct rte_flow_tunnel *app_tunnel) +static int +flow_tunnel_add_default_miss(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_attr *attr, + const struct rte_flow_action *app_actions, + uint32_t flow_idx, + struct tunnel_default_miss_ctx *ctx, + struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_flow_tunnel *tunnel; - uint32_t id; + struct mlx5_flow *dev_flow; + struct rte_flow_attr miss_attr = *attr; + const struct mlx5_flow_tunnel *tunnel = app_actions[0].conf; + const struct rte_flow_item miss_items[2] = { + { + .type = RTE_FLOW_ITEM_TYPE_ETH, + .spec = NULL, + .last = NULL, + .mask = NULL + }, + { + .type = RTE_FLOW_ITEM_TYPE_END, + .spec = NULL, + .last = NULL, + .mask = NULL + } + }; + union tunnel_offload_mark mark_id; + struct rte_flow_action_mark miss_mark; + struct rte_flow_action miss_actions[3] = { + [0] = { .type = RTE_FLOW_ACTION_TYPE_MARK, .conf = &miss_mark }, + [2] = { .type = RTE_FLOW_ACTION_TYPE_END, .conf = NULL } + }; + const struct rte_flow_action_jump *jump_data; + uint32_t i, flow_table = 0; /* prevent compilation warning */ + struct flow_grp_info grp_info = { + .external = 1, + .transfer = attr->transfer, + .fdb_def_rule = !!priv->fdb_def_rule, + .std_tbl_fix = 0, + }; + int ret; - mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], - &id); - if (id >= MLX5_MAX_TUNNELS) { - mlx5_ipool_free(priv->sh->ipool - [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id); - DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id); - return NULL; - } else if (!id) { - return NULL; + if (!attr->transfer) { + uint32_t q_size; + + miss_actions[1].type = RTE_FLOW_ACTION_TYPE_RSS; + q_size = priv->reta_idx_n * sizeof(ctx->queue[0]); + ctx->queue = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, q_size, + 0, SOCKET_ID_ANY); + if (!ctx->queue) + return rte_flow_error_set + (error, ENOMEM, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, "invalid default miss RSS"); + ctx->action_rss.func = RTE_ETH_HASH_FUNCTION_DEFAULT, + ctx->action_rss.level = 0, + ctx->action_rss.types = priv->rss_conf.rss_hf, + ctx->action_rss.key_len = priv->rss_conf.rss_key_len, + ctx->action_rss.queue_num = priv->reta_idx_n, + ctx->action_rss.key = priv->rss_conf.rss_key, + ctx->action_rss.queue = ctx->queue; + if (!priv->reta_idx_n || !priv->rxqs_n) + return rte_flow_error_set + (error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, "invalid port configuration"); + if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)) + ctx->action_rss.types = 0; + for (i = 0; i != priv->reta_idx_n; ++i) + ctx->queue[i] = (*priv->reta_idx)[i]; + } else { + miss_actions[1].type = RTE_FLOW_ACTION_TYPE_JUMP; + ctx->miss_jump.group = MLX5_TNL_MISS_FDB_JUMP_GRP; } - /** - * mlx5 flow tunnel is an auxlilary data structure - * It's not part of IO. No need to allocate it from - * huge pages pools dedicated for IO - */ - tunnel = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*tunnel), - 0, SOCKET_ID_ANY); - if (!tunnel) { - mlx5_ipool_free(priv->sh->ipool - [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id); + miss_actions[1].conf = (typeof(miss_actions[1].conf))ctx->raw; + for (; app_actions->type != RTE_FLOW_ACTION_TYPE_JUMP; app_actions++); + jump_data = app_actions->conf; + miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY; + miss_attr.group = jump_data->group; + ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group, + &flow_table, &grp_info, error); + if (ret) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, "invalid tunnel id"); + mark_id.app_reserve = 0; + mark_id.table_id = tunnel_flow_tbl_to_id(flow_table); + mark_id.transfer = !!attr->transfer; + mark_id._unused_ = 0; + miss_mark.id = mark_id.val; + dev_flow = flow_drv_prepare(dev, flow, &miss_attr, + miss_items, miss_actions, flow_idx, error); + if (!dev_flow) + return -rte_errno; + dev_flow->flow = flow; + dev_flow->external = true; + dev_flow->tunnel = tunnel; + /* Subflow object was created, we must include one in the list. */ + SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx, + dev_flow->handle, next); + DRV_LOG(DEBUG, + "port %u tunnel type=%d id=%u miss rule priority=%u group=%u", + dev->data->port_id, tunnel->app_tunnel.type, + tunnel->tunnel_id, miss_attr.priority, miss_attr.group); + ret = flow_drv_translate(dev, dev_flow, &miss_attr, miss_items, + miss_actions, error); + if (!ret) + ret = flow_mreg_update_copy_table(dev, flow, miss_actions, + error); + + return ret; +} + +static const struct mlx5_flow_tbl_data_entry * +tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_ctx_shared *sh = priv->sh; + struct mlx5_hlist_entry *he; + union tunnel_offload_mark mbits = { .val = mark }; + union mlx5_flow_tbl_key table_key = { + { + .table_id = tunnel_id_to_flow_tbl(mbits.table_id), + .dummy = 0, + .domain = !!mbits.transfer, + .direction = 0, + } + }; + he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL); + return he ? + container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL; +} + +static void +mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list, + struct mlx5_hlist_entry *entry) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash); + + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID], + tunnel_flow_tbl_to_id(tte->flow_table)); + mlx5_free(tte); +} + +static int +mlx5_flow_tunnel_grp2tbl_match_cb(struct mlx5_hlist *list __rte_unused, + struct mlx5_hlist_entry *entry, + uint64_t key, void *cb_ctx __rte_unused) +{ + union tunnel_tbl_key tbl = { + .val = key, + }; + struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash); + + return tbl.tunnel_id != tte->tunnel_id || tbl.group != tte->group; +} + +static struct mlx5_hlist_entry * +mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list, uint64_t key, + void *ctx __rte_unused) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct tunnel_tbl_entry *tte; + union tunnel_tbl_key tbl = { + .val = key, + }; + + tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, + sizeof(*tte), 0, + SOCKET_ID_ANY); + if (!tte) + goto err; + mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TNL_TBL_ID], + &tte->flow_table); + if (tte->flow_table >= MLX5_MAX_TABLES) { + DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.", + tte->flow_table); + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID], + tte->flow_table); + goto err; + } else if (!tte->flow_table) { + goto err; + } + tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table); + tte->tunnel_id = tbl.tunnel_id; + tte->group = tbl.group; + return &tte->hash; +err: + if (tte) + mlx5_free(tte); + return NULL; +} + +static uint32_t +tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev, + const struct mlx5_flow_tunnel *tunnel, + uint32_t group, uint32_t *table, + struct rte_flow_error *error) +{ + struct mlx5_hlist_entry *he; + struct tunnel_tbl_entry *tte; + union tunnel_tbl_key key = { + .tunnel_id = tunnel ? tunnel->tunnel_id : 0, + .group = group + }; + struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); + struct mlx5_hlist *group_hash; + + group_hash = tunnel ? tunnel->groups : thub->groups; + he = mlx5_hlist_register(group_hash, key.val, NULL); + if (!he) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + NULL, + "tunnel group index not supported"); + tte = container_of(he, typeof(*tte), hash); + *table = tte->flow_table; + DRV_LOG(DEBUG, "port %u tunnel %u group=%#x table=%#x", + dev->data->port_id, key.tunnel_id, group, *table); + return 0; +} + +static void +mlx5_flow_tunnel_free(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_indexed_pool *ipool; + + DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x", + dev->data->port_id, tunnel->tunnel_id); + LIST_REMOVE(tunnel, chain); + mlx5_hlist_destroy(tunnel->groups); + ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID]; + mlx5_ipool_free(ipool, tunnel->tunnel_id); +} + +static bool +mlx5_access_tunnel_offload_db + (struct rte_eth_dev *dev, + bool (*match)(struct rte_eth_dev *, + struct mlx5_flow_tunnel *, const void *), + void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *), + void (*miss)(struct rte_eth_dev *, void *), + void *ctx, bool lock_op) +{ + bool verdict = false; + struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); + struct mlx5_flow_tunnel *tunnel; + + rte_spinlock_lock(&thub->sl); + LIST_FOREACH(tunnel, &thub->tunnels, chain) { + verdict = match(dev, tunnel, (const void *)ctx); + if (verdict) + break; + } + if (!lock_op) + rte_spinlock_unlock(&thub->sl); + if (verdict && hit) + hit(dev, tunnel, ctx); + if (!verdict && miss) + miss(dev, ctx); + if (lock_op) + rte_spinlock_unlock(&thub->sl); + + return verdict; +} + +struct tunnel_db_find_tunnel_id_ctx { + uint32_t tunnel_id; + struct mlx5_flow_tunnel *tunnel; +}; + +static bool +find_tunnel_id_match(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, const void *x) +{ + const struct tunnel_db_find_tunnel_id_ctx *ctx = x; + + RTE_SET_USED(dev); + return tunnel->tunnel_id == ctx->tunnel_id; +} + +static void +find_tunnel_id_hit(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, void *x) +{ + struct tunnel_db_find_tunnel_id_ctx *ctx = x; + RTE_SET_USED(dev); + ctx->tunnel = tunnel; +} + +static struct mlx5_flow_tunnel * +mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id) +{ + struct tunnel_db_find_tunnel_id_ctx ctx = { + .tunnel_id = id, + }; + + mlx5_access_tunnel_offload_db(dev, find_tunnel_id_match, + find_tunnel_id_hit, NULL, &ctx, true); + + return ctx.tunnel; +} + +static struct mlx5_flow_tunnel * +mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev, + const struct rte_flow_tunnel *app_tunnel) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_indexed_pool *ipool; + struct mlx5_flow_tunnel *tunnel; + uint32_t id; + + ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID]; + tunnel = mlx5_ipool_zmalloc(ipool, &id); + if (!tunnel) + return NULL; + if (id >= MLX5_MAX_TUNNELS) { + mlx5_ipool_free(ipool, id); + DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id); return NULL; } tunnel->groups = mlx5_hlist_create("tunnel groups", 1024, 0, 0, mlx5_flow_tunnel_grp2tbl_create_cb, - NULL, + mlx5_flow_tunnel_grp2tbl_match_cb, mlx5_flow_tunnel_grp2tbl_remove_cb); if (!tunnel->groups) { - mlx5_ipool_free(priv->sh->ipool - [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id); - mlx5_free(tunnel); + mlx5_ipool_free(ipool, id); return NULL; } tunnel->groups->ctx = priv->sh; @@ -7496,38 +7861,60 @@ mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev, return tunnel; } +struct tunnel_db_get_tunnel_ctx { + const struct rte_flow_tunnel *app_tunnel; + struct mlx5_flow_tunnel *tunnel; +}; + +static bool get_tunnel_match(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, const void *x) +{ + const struct tunnel_db_get_tunnel_ctx *ctx = x; + + RTE_SET_USED(dev); + return !memcmp(ctx->app_tunnel, &tunnel->app_tunnel, + sizeof(*ctx->app_tunnel)); +} + +static void get_tunnel_hit(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, void *x) +{ + /* called under tunnel spinlock protection */ + struct tunnel_db_get_tunnel_ctx *ctx = x; + + RTE_SET_USED(dev); + tunnel->refctn++; + ctx->tunnel = tunnel; +} + +static void get_tunnel_miss(struct rte_eth_dev *dev, void *x) +{ + /* called under tunnel spinlock protection */ + struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); + struct tunnel_db_get_tunnel_ctx *ctx = x; + + rte_spinlock_unlock(&thub->sl); + ctx->tunnel = mlx5_flow_tunnel_allocate(dev, ctx->app_tunnel); + ctx->tunnel->refctn = 1; + rte_spinlock_lock(&thub->sl); + if (ctx->tunnel) + LIST_INSERT_HEAD(&thub->tunnels, ctx->tunnel, chain); +} + + static int mlx5_get_flow_tunnel(struct rte_eth_dev *dev, const struct rte_flow_tunnel *app_tunnel, struct mlx5_flow_tunnel **tunnel) { - int ret; - struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); - struct mlx5_flow_tunnel *tun; - - rte_spinlock_lock(&thub->sl); - LIST_FOREACH(tun, &thub->tunnels, chain) { - if (!memcmp(app_tunnel, &tun->app_tunnel, - sizeof(*app_tunnel))) { - *tunnel = tun; - ret = 0; - break; - } - } - if (!tun) { - tun = mlx5_flow_tunnel_allocate(dev, app_tunnel); - if (tun) { - LIST_INSERT_HEAD(&thub->tunnels, tun, chain); - *tunnel = tun; - } else { - ret = -ENOMEM; - } - } - rte_spinlock_unlock(&thub->sl); - if (tun) - __atomic_add_fetch(&tun->refctn, 1, __ATOMIC_RELAXED); + struct tunnel_db_get_tunnel_ctx ctx = { + .app_tunnel = app_tunnel, + }; - return ret; + mlx5_access_tunnel_offload_db(dev, get_tunnel_match, get_tunnel_hit, + get_tunnel_miss, &ctx, true); + *tunnel = ctx.tunnel; + return ctx.tunnel ? 0 : -ENOMEM; } void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id) @@ -7555,7 +7942,7 @@ int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh) rte_spinlock_init(&thub->sl); thub->groups = mlx5_hlist_create("flow groups", MLX5_MAX_TABLES, 0, 0, mlx5_flow_tunnel_grp2tbl_create_cb, - NULL, + mlx5_flow_tunnel_grp2tbl_match_cb, mlx5_flow_tunnel_grp2tbl_remove_cb); if (!thub->groups) { err = -rte_errno; @@ -7574,23 +7961,296 @@ err: return err; } -#ifndef HAVE_MLX5DV_DR -#define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1)) -#else -#define MLX5_DOMAIN_SYNC_FLOW \ - (MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW | MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW) -#endif +static inline bool +mlx5_flow_tunnel_validate(struct rte_eth_dev *dev, + struct rte_flow_tunnel *tunnel, + const char *err_msg) +{ + err_msg = NULL; + if (!is_tunnel_offload_active(dev)) { + err_msg = "tunnel offload was not activated"; + goto out; + } else if (!tunnel) { + err_msg = "no application tunnel"; + goto out; + } -int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains) + switch (tunnel->type) { + default: + err_msg = "unsupported tunnel type"; + goto out; + case RTE_FLOW_ITEM_TYPE_VXLAN: + break; + } + +out: + return !err_msg; +} + +static int +mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev, + struct rte_flow_tunnel *app_tunnel, + struct rte_flow_action **actions, + uint32_t *num_of_actions, + struct rte_flow_error *error) { - struct rte_eth_dev *dev = &rte_eth_devices[port_id]; - const struct mlx5_flow_driver_ops *fops; int ret; - struct rte_flow_attr attr = { .transfer = 0 }; + struct mlx5_flow_tunnel *tunnel; + const char *err_msg = NULL; + bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg); - fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr)); - ret = fops->sync_domain(dev, domains, MLX5_DOMAIN_SYNC_FLOW); - if (ret > 0) - ret = -ret; - return ret; + if (!verdict) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, + err_msg); + ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel); + if (ret < 0) { + return rte_flow_error_set(error, ret, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, + "failed to initialize pmd tunnel"); + } + *actions = &tunnel->action; + *num_of_actions = 1; + return 0; +} + +static int +mlx5_flow_tunnel_match(struct rte_eth_dev *dev, + struct rte_flow_tunnel *app_tunnel, + struct rte_flow_item **items, + uint32_t *num_of_items, + struct rte_flow_error *error) +{ + int ret; + struct mlx5_flow_tunnel *tunnel; + const char *err_msg = NULL; + bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg); + + if (!verdict) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + err_msg); + ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel); + if (ret < 0) { + return rte_flow_error_set(error, ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "failed to initialize pmd tunnel"); + } + *items = &tunnel->item; + *num_of_items = 1; + return 0; +} + +struct tunnel_db_element_release_ctx { + struct rte_flow_item *items; + struct rte_flow_action *actions; + uint32_t num_elements; + struct rte_flow_error *error; + int ret; +}; + +static bool +tunnel_element_release_match(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, const void *x) +{ + const struct tunnel_db_element_release_ctx *ctx = x; + + RTE_SET_USED(dev); + if (ctx->num_elements != 1) + return false; + else if (ctx->items) + return ctx->items == &tunnel->item; + else if (ctx->actions) + return ctx->actions == &tunnel->action; + + return false; +} + +static void +tunnel_element_release_hit(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, void *x) +{ + struct tunnel_db_element_release_ctx *ctx = x; + ctx->ret = 0; + if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED)) + mlx5_flow_tunnel_free(dev, tunnel); +} + +static void +tunnel_element_release_miss(struct rte_eth_dev *dev, void *x) +{ + struct tunnel_db_element_release_ctx *ctx = x; + RTE_SET_USED(dev); + ctx->ret = rte_flow_error_set(ctx->error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "invalid argument"); +} + +static int +mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev, + struct rte_flow_item *pmd_items, + uint32_t num_items, struct rte_flow_error *err) +{ + struct tunnel_db_element_release_ctx ctx = { + .items = pmd_items, + .actions = NULL, + .num_elements = num_items, + .error = err, + }; + + mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match, + tunnel_element_release_hit, + tunnel_element_release_miss, &ctx, false); + + return ctx.ret; } + +static int +mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev, + struct rte_flow_action *pmd_actions, + uint32_t num_actions, struct rte_flow_error *err) +{ + struct tunnel_db_element_release_ctx ctx = { + .items = NULL, + .actions = pmd_actions, + .num_elements = num_actions, + .error = err, + }; + + mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match, + tunnel_element_release_hit, + tunnel_element_release_miss, &ctx, false); + + return ctx.ret; +} + +static int +mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev, + struct rte_mbuf *m, + struct rte_flow_restore_info *info, + struct rte_flow_error *err) +{ + uint64_t ol_flags = m->ol_flags; + const struct mlx5_flow_tbl_data_entry *tble; + const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID; + + if (!is_tunnel_offload_active(dev)) { + info->flags = 0; + return 0; + } + + if ((ol_flags & mask) != mask) + goto err; + tble = tunnel_mark_decode(dev, m->hash.fdir.hi); + if (!tble) { + DRV_LOG(DEBUG, "port %u invalid miss tunnel mark %#x", + dev->data->port_id, m->hash.fdir.hi); + goto err; + } + MLX5_ASSERT(tble->tunnel); + memcpy(&info->tunnel, &tble->tunnel->app_tunnel, sizeof(info->tunnel)); + info->group_id = tble->group_id; + info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL | + RTE_FLOW_RESTORE_INFO_GROUP_ID | + RTE_FLOW_RESTORE_INFO_ENCAPSULATED; + + return 0; + +err: + return rte_flow_error_set(err, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "failed to get restore info"); +} + +#else /* HAVE_IBV_FLOW_DV_SUPPORT */ +static int +mlx5_flow_tunnel_decap_set(__rte_unused struct rte_eth_dev *dev, + __rte_unused struct rte_flow_tunnel *app_tunnel, + __rte_unused struct rte_flow_action **actions, + __rte_unused uint32_t *num_of_actions, + __rte_unused struct rte_flow_error *error) +{ + return -ENOTSUP; +} + +static int +mlx5_flow_tunnel_match(__rte_unused struct rte_eth_dev *dev, + __rte_unused struct rte_flow_tunnel *app_tunnel, + __rte_unused struct rte_flow_item **items, + __rte_unused uint32_t *num_of_items, + __rte_unused struct rte_flow_error *error) +{ + return -ENOTSUP; +} + +static int +mlx5_flow_tunnel_item_release(__rte_unused struct rte_eth_dev *dev, + __rte_unused struct rte_flow_item *pmd_items, + __rte_unused uint32_t num_items, + __rte_unused struct rte_flow_error *err) +{ + return -ENOTSUP; +} + +static int +mlx5_flow_tunnel_action_release(__rte_unused struct rte_eth_dev *dev, + __rte_unused struct rte_flow_action *pmd_action, + __rte_unused uint32_t num_actions, + __rte_unused struct rte_flow_error *err) +{ + return -ENOTSUP; +} + +static int +mlx5_flow_tunnel_get_restore_info(__rte_unused struct rte_eth_dev *dev, + __rte_unused struct rte_mbuf *m, + __rte_unused struct rte_flow_restore_info *i, + __rte_unused struct rte_flow_error *err) +{ + return -ENOTSUP; +} + +static int +flow_tunnel_add_default_miss(__rte_unused struct rte_eth_dev *dev, + __rte_unused struct rte_flow *flow, + __rte_unused const struct rte_flow_attr *attr, + __rte_unused const struct rte_flow_action *actions, + __rte_unused uint32_t flow_idx, + __rte_unused struct tunnel_default_miss_ctx *ctx, + __rte_unused struct rte_flow_error *error) +{ + return -ENOTSUP; +} + +static struct mlx5_flow_tunnel * +mlx5_find_tunnel_id(__rte_unused struct rte_eth_dev *dev, + __rte_unused uint32_t id) +{ + return NULL; +} + +static void +mlx5_flow_tunnel_free(__rte_unused struct rte_eth_dev *dev, + __rte_unused struct mlx5_flow_tunnel *tunnel) +{ +} + +static uint32_t +tunnel_flow_group_to_flow_table(__rte_unused struct rte_eth_dev *dev, + __rte_unused const struct mlx5_flow_tunnel *t, + __rte_unused uint32_t group, + __rte_unused uint32_t *table, + struct rte_flow_error *error) +{ + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "tunnel offload requires DV support"); +} + +void +mlx5_release_tunnel_hub(__rte_unused struct mlx5_dev_ctx_shared *sh, + __rte_unused uint16_t port_id) +{ +} +#endif /* HAVE_IBV_FLOW_DV_SUPPORT */ +