X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow.c;h=0197a07209ffbcf1c083de725e2aa66761422ec8;hb=641dbe4fb053cbf300bceb991afaee5207ad90d9;hp=324349ed19a60f47731b824072734bb5baef8b03;hpb=8b3799531b9bc46fb518c4ec84f4960ecc78a0d0;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 324349ed19..0197a07209 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -12,7 +12,7 @@ #include #include -#include +#include #include #include #include @@ -33,16 +33,35 @@ #include "mlx5_common_os.h" #include "rte_pmd_mlx5.h" +struct tunnel_default_miss_ctx { + uint16_t *queue; + __extension__ + union { + struct rte_flow_action_rss action_rss; + struct rte_flow_action_queue miss_queue; + struct rte_flow_action_jump miss_jump; + uint8_t raw[0]; + }; +}; + +static int +flow_tunnel_add_default_miss(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_attr *attr, + const struct rte_flow_action *app_actions, + uint32_t flow_idx, + struct tunnel_default_miss_ctx *ctx, + struct rte_flow_error *error); static struct mlx5_flow_tunnel * mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id); static void mlx5_flow_tunnel_free(struct rte_eth_dev *dev, struct mlx5_flow_tunnel *tunnel); -static const struct mlx5_flow_tbl_data_entry * -tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark); -static int -mlx5_get_flow_tunnel(struct rte_eth_dev *dev, - const struct rte_flow_tunnel *app_tunnel, - struct mlx5_flow_tunnel **tunnel); +static uint32_t +tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev, + const struct mlx5_flow_tunnel *tunnel, + uint32_t group, uint32_t *table, + struct rte_flow_error *error); + static struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void); static void mlx5_flow_pop_thread_workspace(void); @@ -54,7 +73,7 @@ const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops; const struct mlx5_flow_driver_ops *flow_drv_ops[] = { [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops, -#ifdef HAVE_IBV_FLOW_DV_SUPPORT +#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops, #endif [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops, @@ -193,6 +212,8 @@ mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item) return ret; } +#define MLX5_RSS_EXP_ELT_N 8 + /** * Expand RSS flows into several possible flows according to the RSS hash * fields requested and the driver capabilities. @@ -223,13 +244,12 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, const struct mlx5_flow_expand_node graph[], int graph_root_index) { - const int elt_n = 8; const struct rte_flow_item *item; const struct mlx5_flow_expand_node *node = &graph[graph_root_index]; const int *next_node; - const int *stack[elt_n]; + const int *stack[MLX5_RSS_EXP_ELT_N]; int stack_pos = 0; - struct rte_flow_item flow_items[elt_n]; + struct rte_flow_item flow_items[MLX5_RSS_EXP_ELT_N]; unsigned int i; size_t lsize; size_t user_pattern_size = 0; @@ -242,10 +262,10 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, memset(&missed_item, 0, sizeof(missed_item)); lsize = offsetof(struct mlx5_flow_expand_rss, entry) + - elt_n * sizeof(buf->entry[0]); + MLX5_RSS_EXP_ELT_N * sizeof(buf->entry[0]); if (lsize <= size) { buf->entry[0].priority = 0; - buf->entry[0].pattern = (void *)&buf->entry[elt_n]; + buf->entry[0].pattern = (void *)&buf->entry[MLX5_RSS_EXP_ELT_N]; buf->entries = 0; addr = buf->entry[0].pattern; } @@ -348,7 +368,7 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, /* Go deeper. */ if (node->next) { next_node = node->next; - if (stack_pos++ == elt_n) { + if (stack_pos++ == MLX5_RSS_EXP_ELT_N) { rte_errno = E2BIG; return -rte_errno; } @@ -580,170 +600,32 @@ static int mlx5_shared_action_query const struct rte_flow_shared_action *action, void *data, struct rte_flow_error *error); -static inline bool -mlx5_flow_tunnel_validate(struct rte_eth_dev *dev, - struct rte_flow_tunnel *tunnel, - const char *err_msg) -{ - err_msg = NULL; - if (!is_tunnel_offload_active(dev)) { - err_msg = "tunnel offload was not activated"; - goto out; - } else if (!tunnel) { - err_msg = "no application tunnel"; - goto out; - } - - switch (tunnel->type) { - default: - err_msg = "unsupported tunnel type"; - goto out; - case RTE_FLOW_ITEM_TYPE_VXLAN: - break; - } - -out: - return !err_msg; -} - - static int mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev, struct rte_flow_tunnel *app_tunnel, struct rte_flow_action **actions, uint32_t *num_of_actions, - struct rte_flow_error *error) -{ - int ret; - struct mlx5_flow_tunnel *tunnel; - const char *err_msg = NULL; - bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg); - - if (!verdict) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, - err_msg); - ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel); - if (ret < 0) { - return rte_flow_error_set(error, ret, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, - "failed to initialize pmd tunnel"); - } - *actions = &tunnel->action; - *num_of_actions = 1; - return 0; -} - + struct rte_flow_error *error); static int mlx5_flow_tunnel_match(struct rte_eth_dev *dev, struct rte_flow_tunnel *app_tunnel, struct rte_flow_item **items, uint32_t *num_of_items, - struct rte_flow_error *error) -{ - int ret; - struct mlx5_flow_tunnel *tunnel; - const char *err_msg = NULL; - bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg); - - if (!verdict) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - err_msg); - ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel); - if (ret < 0) { - return rte_flow_error_set(error, ret, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "failed to initialize pmd tunnel"); - } - *items = &tunnel->item; - *num_of_items = 1; - return 0; -} - + struct rte_flow_error *error); static int -mlx5_flow_item_release(struct rte_eth_dev *dev, - struct rte_flow_item *pmd_items, - uint32_t num_items, struct rte_flow_error *err) -{ - struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); - struct mlx5_flow_tunnel *tun; - - rte_spinlock_lock(&thub->sl); - LIST_FOREACH(tun, &thub->tunnels, chain) { - if (&tun->item == pmd_items) { - LIST_REMOVE(tun, chain); - break; - } - } - rte_spinlock_unlock(&thub->sl); - if (!tun || num_items != 1) - return rte_flow_error_set(err, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "invalid argument"); - if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED)) - mlx5_flow_tunnel_free(dev, tun); - return 0; -} - +mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev, + struct rte_flow_item *pmd_items, + uint32_t num_items, struct rte_flow_error *err); static int -mlx5_flow_action_release(struct rte_eth_dev *dev, - struct rte_flow_action *pmd_actions, - uint32_t num_actions, struct rte_flow_error *err) -{ - struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); - struct mlx5_flow_tunnel *tun; - - rte_spinlock_lock(&thub->sl); - LIST_FOREACH(tun, &thub->tunnels, chain) { - if (&tun->action == pmd_actions) { - LIST_REMOVE(tun, chain); - break; - } - } - rte_spinlock_unlock(&thub->sl); - if (!tun || num_actions != 1) - return rte_flow_error_set(err, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "invalid argument"); - if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED)) - mlx5_flow_tunnel_free(dev, tun); - - return 0; -} - +mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev, + struct rte_flow_action *pmd_actions, + uint32_t num_actions, + struct rte_flow_error *err); static int mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev, struct rte_mbuf *m, struct rte_flow_restore_info *info, - struct rte_flow_error *err) -{ - uint64_t ol_flags = m->ol_flags; - const struct mlx5_flow_tbl_data_entry *tble; - const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID; - - if ((ol_flags & mask) != mask) - goto err; - tble = tunnel_mark_decode(dev, m->hash.fdir.hi); - if (!tble) { - DRV_LOG(DEBUG, "port %u invalid miss tunnel mark %#x", - dev->data->port_id, m->hash.fdir.hi); - goto err; - } - MLX5_ASSERT(tble->tunnel); - memcpy(&info->tunnel, &tble->tunnel->app_tunnel, sizeof(info->tunnel)); - info->group_id = tble->group_id; - info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL | - RTE_FLOW_RESTORE_INFO_GROUP_ID | - RTE_FLOW_RESTORE_INFO_ENCAPSULATED; - - return 0; - -err: - return rte_flow_error_set(err, EINVAL, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "failed to get restore info"); -} + struct rte_flow_error *err); static const struct rte_flow_ops mlx5_flow_ops = { .validate = mlx5_flow_validate, @@ -760,8 +642,8 @@ static const struct rte_flow_ops mlx5_flow_ops = { .shared_action_query = mlx5_shared_action_query, .tunnel_decap_set = mlx5_flow_tunnel_decap_set, .tunnel_match = mlx5_flow_tunnel_match, - .tunnel_action_decap_release = mlx5_flow_action_release, - .tunnel_item_release = mlx5_flow_item_release, + .tunnel_action_decap_release = mlx5_flow_tunnel_action_release, + .tunnel_item_release = mlx5_flow_tunnel_item_release, .get_restore_info = mlx5_flow_tunnel_get_restore_info, }; @@ -814,11 +696,6 @@ static struct mlx5_flow_tunnel_info tunnels_info[] = { }, }; -/* Key of thread specific flow workspace data. */ -static pthread_key_t key_workspace; - -/* Thread specific flow workspace data once initialization data. */ -static pthread_once_t key_workspace_init; /** @@ -896,6 +773,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev, return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : REG_C_3; case MLX5_MTR_COLOR: + case MLX5_ASO_FLOW_HIT: /* Both features use the same REG_C. */ MLX5_ASSERT(priv->mtr_color_reg != REG_NON); return priv->mtr_color_reg; case MLX5_COPY_MARK: @@ -915,7 +793,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev, start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_2 : (priv->mtr_reg_share ? REG_C_3 : REG_C_4); skip_mtr_reg = !!(priv->mtr_en && start_reg == REG_C_2); - if (id > (REG_C_7 - start_reg)) + if (id > (uint32_t)(REG_C_7 - start_reg)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "invalid tag id"); @@ -931,7 +809,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev, */ if (skip_mtr_reg && config->flow_mreg_c [id + start_reg - REG_C_0] >= priv->mtr_color_reg) { - if (id >= (REG_C_7 - start_reg)) + if (id >= (uint32_t)(REG_C_7 - start_reg)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "invalid tag id"); @@ -977,6 +855,58 @@ mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev) return config->flow_mreg_c[2] != REG_NON; } +/** + * Get the lowest priority. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] attributes + * Pointer to device flow rule attributes. + * + * @return + * The value of lowest priority of flow. + */ +uint32_t +mlx5_get_lowest_priority(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + if (!attr->group && !attr->transfer) + return priv->config.flow_prio - 2; + return MLX5_NON_ROOT_FLOW_MAX_PRIO - 1; +} + +/** + * Calculate matcher priority of the flow. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] attr + * Pointer to device flow rule attributes. + * @param[in] subpriority + * The priority based on the items. + * @return + * The matcher priority of the flow. + */ +uint16_t +mlx5_get_matcher_priority(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + uint32_t subpriority) +{ + uint16_t priority = (uint16_t)attr->priority; + struct mlx5_priv *priv = dev->data->dev_private; + + if (!attr->group && !attr->transfer) { + if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR) + priority = priv->config.flow_prio - 1; + return mlx5_os_flow_adjust_priority(dev, priority, subpriority); + } + if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR) + priority = MLX5_NON_ROOT_FLOW_MAX_PRIO; + return priority * 3 + subpriority; +} + /** * Verify the @p item specifications (spec, last, mask) are compatible with the * NIC capabilities. @@ -1119,17 +1049,29 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_priv *priv = dev->data->dev_private; const int mark = dev_handle->mark; const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL); - struct mlx5_hrxq *hrxq; + struct mlx5_ind_table_obj *ind_tbl = NULL; unsigned int i; - if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE) - return; - hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], + if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) { + struct mlx5_hrxq *hrxq; + + hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], dev_handle->rix_hrxq); - if (!hrxq) + if (hrxq) + ind_tbl = hrxq->ind_table; + } else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) { + struct mlx5_shared_action_rss *shared_rss; + + shared_rss = mlx5_ipool_get + (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], + dev_handle->rix_srss); + if (shared_rss) + ind_tbl = shared_rss->ind_tbl; + } + if (!ind_tbl) return; - for (i = 0; i != hrxq->ind_table->queues_n; ++i) { - int idx = hrxq->ind_table->queues[i]; + for (i = 0; i != ind_tbl->queues_n; ++i) { + int idx = ind_tbl->queues[i]; struct mlx5_rxq_ctrl *rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); @@ -1201,18 +1143,30 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_priv *priv = dev->data->dev_private; const int mark = dev_handle->mark; const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL); - struct mlx5_hrxq *hrxq; + struct mlx5_ind_table_obj *ind_tbl = NULL; unsigned int i; - if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE) - return; - hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], + if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) { + struct mlx5_hrxq *hrxq; + + hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], dev_handle->rix_hrxq); - if (!hrxq) + if (hrxq) + ind_tbl = hrxq->ind_table; + } else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) { + struct mlx5_shared_action_rss *shared_rss; + + shared_rss = mlx5_ipool_get + (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], + dev_handle->rix_srss); + if (shared_rss) + ind_tbl = shared_rss->ind_tbl; + } + if (!ind_tbl) return; MLX5_ASSERT(dev->data->dev_started); - for (i = 0; i != hrxq->ind_table->queues_n; ++i) { - int idx = hrxq->ind_table->queues[i]; + for (i = 0; i != ind_tbl->queues_n; ++i) { + int idx = ind_tbl->queues[i]; struct mlx5_rxq_ctrl *rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); @@ -1530,6 +1484,7 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev, { struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_action_rss *rss = action->conf; + enum mlx5_rxq_type rxq_type = MLX5_RXQ_TYPE_UNDEFINED; unsigned int i; if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT && @@ -1595,6 +1550,8 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, "No queues configured"); for (i = 0; i != rss->queue_num; ++i) { + struct mlx5_rxq_ctrl *rxq_ctrl; + if (rss->queue[i] >= priv->rxqs_n) return rte_flow_error_set (error, EINVAL, @@ -1604,6 +1561,15 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev, return rte_flow_error_set (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, &rss->queue[i], "queue is not configured"); + rxq_ctrl = container_of((*priv->rxqs)[rss->queue[i]], + struct mlx5_rxq_ctrl, rxq); + if (i == 0) + rxq_type = rxq_ctrl->type; + if (rxq_type != rxq_ctrl->type) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->queue[i], + "combining hairpin and regular RSS queues is not supported"); } return 0; } @@ -1755,7 +1721,7 @@ mlx5_flow_validate_attributes(struct rte_eth_dev *dev, return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL, "groups is not supported"); - if (attributes->priority != MLX5_FLOW_PRIO_RSVD && + if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR && attributes->priority >= priority_max) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, @@ -2708,6 +2674,149 @@ mlx5_flow_validate_item_geneve(const struct rte_flow_item *item, return 0; } +/** + * Validate Geneve TLV option item. + * + * @param[in] item + * Item specification. + * @param[in] last_item + * Previous validated item in the pattern items. + * @param[in] geneve_item + * Previous GENEVE item specification. + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item, + uint64_t last_item, + const struct rte_flow_item *geneve_item, + struct rte_eth_dev *dev, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_ctx_shared *sh = priv->sh; + struct mlx5_geneve_tlv_option_resource *geneve_opt_resource; + struct mlx5_hca_attr *hca_attr = &priv->config.hca_attr; + uint8_t data_max_supported = + hca_attr->max_geneve_tlv_option_data_len * 4; + struct mlx5_dev_config *config = &priv->config; + const struct rte_flow_item_geneve *geneve_spec; + const struct rte_flow_item_geneve *geneve_mask; + const struct rte_flow_item_geneve_opt *spec = item->spec; + const struct rte_flow_item_geneve_opt *mask = item->mask; + unsigned int i; + unsigned int data_len; + uint8_t tlv_option_len; + uint16_t optlen_m, optlen_v; + const struct rte_flow_item_geneve_opt full_mask = { + .option_class = RTE_BE16(0xffff), + .option_type = 0xff, + .option_len = 0x1f, + }; + + if (!mask) + mask = &rte_flow_item_geneve_opt_mask; + if (!spec) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, + "Geneve TLV opt class/type/length must be specified"); + if ((uint32_t)spec->option_len > MLX5_GENEVE_OPTLEN_MASK) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, + "Geneve TLV opt length exceeeds the limit (31)"); + /* Check if class type and length masks are full. */ + if (full_mask.option_class != mask->option_class || + full_mask.option_type != mask->option_type || + full_mask.option_len != (mask->option_len & full_mask.option_len)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, + "Geneve TLV opt class/type/length masks must be full"); + /* Check if length is supported */ + if ((uint32_t)spec->option_len > + config->hca_attr.max_geneve_tlv_option_data_len) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, + "Geneve TLV opt length not supported"); + if (config->hca_attr.max_geneve_tlv_options > 1) + DRV_LOG(DEBUG, + "max_geneve_tlv_options supports more than 1 option"); + /* Check GENEVE item preceding. */ + if (!geneve_item || !(last_item & MLX5_FLOW_LAYER_GENEVE)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, + "Geneve opt item must be preceded with Geneve item"); + geneve_spec = geneve_item->spec; + geneve_mask = geneve_item->mask ? geneve_item->mask : + &rte_flow_item_geneve_mask; + /* Check if GENEVE TLV option size doesn't exceed option length */ + if (geneve_spec && (geneve_mask->ver_opt_len_o_c_rsvd0 || + geneve_spec->ver_opt_len_o_c_rsvd0)) { + tlv_option_len = spec->option_len & mask->option_len; + optlen_v = rte_be_to_cpu_16(geneve_spec->ver_opt_len_o_c_rsvd0); + optlen_v = MLX5_GENEVE_OPTLEN_VAL(optlen_v); + optlen_m = rte_be_to_cpu_16(geneve_mask->ver_opt_len_o_c_rsvd0); + optlen_m = MLX5_GENEVE_OPTLEN_VAL(optlen_m); + if ((optlen_v & optlen_m) <= tlv_option_len) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, + "GENEVE TLV option length exceeds optlen"); + } + /* Check if length is 0 or data is 0. */ + if (spec->data == NULL || spec->option_len == 0) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, + "Geneve TLV opt with zero data/length not supported"); + /* Check not all data & mask are 0. */ + data_len = spec->option_len * 4; + if (mask->data == NULL) { + for (i = 0; i < data_len; i++) + if (spec->data[i]) + break; + if (i == data_len) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Can't match on Geneve option data 0"); + } else { + for (i = 0; i < data_len; i++) + if (spec->data[i] & mask->data[i]) + break; + if (i == data_len) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Can't match on Geneve option data and mask 0"); + /* Check data mask supported. */ + for (i = data_max_supported; i < data_len ; i++) + if (mask->data[i]) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Data mask is of unsupported size"); + } + /* Check GENEVE option is supported in NIC. */ + if (!config->hca_attr.geneve_tlv_opt) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, + "Geneve TLV opt not supported"); + /* Check if we already have geneve option with different type/class. */ + rte_spinlock_lock(&sh->geneve_tlv_opt_sl); + geneve_opt_resource = sh->geneve_tlv_option_resource; + if (geneve_opt_resource != NULL) + if (geneve_opt_resource->option_class != spec->option_class || + geneve_opt_resource->option_type != spec->option_type || + geneve_opt_resource->length != spec->option_len) { + rte_spinlock_unlock(&sh->geneve_tlv_opt_sl); + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Only one Geneve TLV option supported"); + } + rte_spinlock_unlock(&sh->geneve_tlv_opt_sl); + return 0; +} + /** * Validate MPLS item. * @@ -3255,16 +3364,28 @@ flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) static const struct rte_flow_action_rss* flow_get_rss_action(const struct rte_flow_action actions[]) { + const struct rte_flow_action_rss *rss = NULL; + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { switch (actions->type) { case RTE_FLOW_ACTION_TYPE_RSS: - return (const struct rte_flow_action_rss *) - actions->conf; + rss = actions->conf; + break; + case RTE_FLOW_ACTION_TYPE_SAMPLE: + { + const struct rte_flow_action_sample *sample = + actions->conf; + const struct rte_flow_action *act = sample->actions; + for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) + if (act->type == RTE_FLOW_ACTION_TYPE_RSS) + rss = act->conf; + break; + } default: break; } } - return NULL; + return rss; } /** @@ -3629,7 +3750,7 @@ flow_check_hairpin_split(struct rte_eth_dev *dev, if (queue == NULL) return 0; conf = mlx5_rxq_get_hairpin_conf(dev, queue->index); - if (conf != NULL && !!conf->tx_explicit) + if (conf == NULL || conf->tx_explicit != 0) return 0; queue_action = 1; action_n++; @@ -3639,7 +3760,7 @@ flow_check_hairpin_split(struct rte_eth_dev *dev, if (rss == NULL || rss->queue_num == 0) return 0; conf = mlx5_rxq_get_hairpin_conf(dev, rss->queue[0]); - if (conf != NULL && !!conf->tx_explicit) + if (conf == NULL || conf->tx_explicit != 0) return 0; queue_action = 1; action_n++; @@ -3654,9 +3775,7 @@ flow_check_hairpin_split(struct rte_eth_dev *dev, break; case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: raw_encap = actions->conf; - if (raw_encap->size > - (sizeof(struct rte_flow_item_eth) + - sizeof(struct rte_flow_item_ipv4))) + if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) split++; action_n++; break; @@ -3682,9 +3801,20 @@ static void flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list, uint32_t flow_idx); -struct mlx5_hlist_entry * -flow_dv_mreg_create_cb(struct mlx5_hlist *list, uint64_t key, - void *cb_ctx) +int +flow_dv_mreg_match_cb(struct mlx5_hlist *list __rte_unused, + struct mlx5_hlist_entry *entry, + uint64_t key, void *cb_ctx __rte_unused) +{ + struct mlx5_flow_mreg_copy_resource *mcp_res = + container_of(entry, typeof(*mcp_res), hlist_ent); + + return mcp_res->mark_id != key; +} + +struct mlx5_hlist_entry * +flow_dv_mreg_create_cb(struct mlx5_hlist *list, uint64_t key, + void *cb_ctx) { struct rte_eth_dev *dev = list->ctx; struct mlx5_priv *priv = dev->data->dev_private; @@ -3759,7 +3889,7 @@ flow_dv_mreg_create_cb(struct mlx5_hlist *list, uint64_t key, }; } else { /* Default rule, wildcard match. */ - attr.priority = MLX5_FLOW_PRIO_RSVD; + attr.priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR; items[0] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END, }; @@ -3783,6 +3913,7 @@ flow_dv_mreg_create_cb(struct mlx5_hlist *list, uint64_t key, return NULL; } mcp_res->idx = idx; + mcp_res->mark_id = mark_id; /* * The copy Flows are not included in any list. There * ones are referenced from other Flows and can not @@ -4091,9 +4222,7 @@ flow_hairpin_split(struct rte_eth_dev *dev, break; case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: raw_encap = actions->conf; - if (raw_encap->size > - (sizeof(struct rte_flow_item_eth) + - sizeof(struct rte_flow_item_ipv4))) { + if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) { memcpy(actions_tx, actions, sizeof(struct rte_flow_action)); actions_tx++; @@ -4106,9 +4235,7 @@ flow_hairpin_split(struct rte_eth_dev *dev, break; case RTE_FLOW_ACTION_TYPE_RAW_DECAP: raw_decap = actions->conf; - if (raw_decap->size < - (sizeof(struct rte_flow_item_eth) + - sizeof(struct rte_flow_item_ipv4))) { + if (raw_decap->size < MLX5_ENCAPSULATION_DECISION_SIZE) { memcpy(actions_tx, actions, sizeof(struct rte_flow_action)); actions_tx++; @@ -4159,142 +4286,6 @@ flow_hairpin_split(struct rte_eth_dev *dev, return 0; } -__extension__ -union tunnel_offload_mark { - uint32_t val; - struct { - uint32_t app_reserve:8; - uint32_t table_id:15; - uint32_t transfer:1; - uint32_t _unused_:8; - }; -}; - -struct tunnel_default_miss_ctx { - uint16_t *queue; - __extension__ - union { - struct rte_flow_action_rss action_rss; - struct rte_flow_action_queue miss_queue; - struct rte_flow_action_jump miss_jump; - uint8_t raw[0]; - }; -}; - -static int -flow_tunnel_add_default_miss(struct rte_eth_dev *dev, - struct rte_flow *flow, - const struct rte_flow_attr *attr, - const struct rte_flow_action *app_actions, - uint32_t flow_idx, - struct tunnel_default_miss_ctx *ctx, - struct rte_flow_error *error) -{ - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_flow *dev_flow; - struct rte_flow_attr miss_attr = *attr; - const struct mlx5_flow_tunnel *tunnel = app_actions[0].conf; - const struct rte_flow_item miss_items[2] = { - { - .type = RTE_FLOW_ITEM_TYPE_ETH, - .spec = NULL, - .last = NULL, - .mask = NULL - }, - { - .type = RTE_FLOW_ITEM_TYPE_END, - .spec = NULL, - .last = NULL, - .mask = NULL - } - }; - union tunnel_offload_mark mark_id; - struct rte_flow_action_mark miss_mark; - struct rte_flow_action miss_actions[3] = { - [0] = { .type = RTE_FLOW_ACTION_TYPE_MARK, .conf = &miss_mark }, - [2] = { .type = RTE_FLOW_ACTION_TYPE_END, .conf = NULL } - }; - const struct rte_flow_action_jump *jump_data; - uint32_t i, flow_table = 0; /* prevent compilation warning */ - struct flow_grp_info grp_info = { - .external = 1, - .transfer = attr->transfer, - .fdb_def_rule = !!priv->fdb_def_rule, - .std_tbl_fix = 0, - }; - int ret; - - if (!attr->transfer) { - uint32_t q_size; - - miss_actions[1].type = RTE_FLOW_ACTION_TYPE_RSS; - q_size = priv->reta_idx_n * sizeof(ctx->queue[0]); - ctx->queue = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, q_size, - 0, SOCKET_ID_ANY); - if (!ctx->queue) - return rte_flow_error_set - (error, ENOMEM, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, - NULL, "invalid default miss RSS"); - ctx->action_rss.func = RTE_ETH_HASH_FUNCTION_DEFAULT, - ctx->action_rss.level = 0, - ctx->action_rss.types = priv->rss_conf.rss_hf, - ctx->action_rss.key_len = priv->rss_conf.rss_key_len, - ctx->action_rss.queue_num = priv->reta_idx_n, - ctx->action_rss.key = priv->rss_conf.rss_key, - ctx->action_rss.queue = ctx->queue; - if (!priv->reta_idx_n || !priv->rxqs_n) - return rte_flow_error_set - (error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, - NULL, "invalid port configuration"); - if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)) - ctx->action_rss.types = 0; - for (i = 0; i != priv->reta_idx_n; ++i) - ctx->queue[i] = (*priv->reta_idx)[i]; - } else { - miss_actions[1].type = RTE_FLOW_ACTION_TYPE_JUMP; - ctx->miss_jump.group = MLX5_TNL_MISS_FDB_JUMP_GRP; - } - miss_actions[1].conf = (typeof(miss_actions[1].conf))ctx->raw; - for (; app_actions->type != RTE_FLOW_ACTION_TYPE_JUMP; app_actions++); - jump_data = app_actions->conf; - miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY; - miss_attr.group = jump_data->group; - ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group, - &flow_table, grp_info, error); - if (ret) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, - NULL, "invalid tunnel id"); - mark_id.app_reserve = 0; - mark_id.table_id = tunnel_flow_tbl_to_id(flow_table); - mark_id.transfer = !!attr->transfer; - mark_id._unused_ = 0; - miss_mark.id = mark_id.val; - dev_flow = flow_drv_prepare(dev, flow, &miss_attr, - miss_items, miss_actions, flow_idx, error); - if (!dev_flow) - return -rte_errno; - dev_flow->flow = flow; - dev_flow->external = true; - dev_flow->tunnel = tunnel; - /* Subflow object was created, we must include one in the list. */ - SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx, - dev_flow->handle, next); - DRV_LOG(DEBUG, - "port %u tunnel type=%d id=%u miss rule priority=%u group=%u", - dev->data->port_id, tunnel->app_tunnel.type, - tunnel->tunnel_id, miss_attr.priority, miss_attr.group); - ret = flow_drv_translate(dev, dev_flow, &miss_attr, miss_items, - miss_actions, error); - if (!ret) - ret = flow_mreg_update_copy_table(dev, flow, miss_actions, - error); - - return ret; -} - /** * The last stage of splitting chain, just creates the subflow * without any modification. @@ -4701,6 +4692,8 @@ flow_mreg_tx_copy_prep(struct rte_eth_dev *dev, * Pointer to the position of the matched action if exists, otherwise is -1. * @param[out] qrss_action_pos * Pointer to the position of the Queue/RSS action if exists, otherwise is -1. + * @param[out] modify_after_mirror + * Pointer to the flag of modify action after FDB mirroring. * * @return * > 0 the total number of actions. @@ -4710,14 +4703,15 @@ static int flow_check_match_action(const struct rte_flow_action actions[], const struct rte_flow_attr *attr, enum rte_flow_action_type action, - int *match_action_pos, int *qrss_action_pos) + int *match_action_pos, int *qrss_action_pos, + int *modify_after_mirror) { const struct rte_flow_action_sample *sample; int actions_n = 0; - int jump_flag = 0; uint32_t ratio = 0; int sub_type = 0; int flag = 0; + int fdb_mirror = 0; *match_action_pos = -1; *qrss_action_pos = -1; @@ -4726,27 +4720,53 @@ flow_check_match_action(const struct rte_flow_action actions[], flag = 1; *match_action_pos = actions_n; } - if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE || - actions->type == RTE_FLOW_ACTION_TYPE_RSS) + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_QUEUE: + case RTE_FLOW_ACTION_TYPE_RSS: *qrss_action_pos = actions_n; - if (actions->type == RTE_FLOW_ACTION_TYPE_JUMP) - jump_flag = 1; - if (actions->type == RTE_FLOW_ACTION_TYPE_SAMPLE) { + break; + case RTE_FLOW_ACTION_TYPE_SAMPLE: sample = actions->conf; ratio = sample->ratio; sub_type = ((const struct rte_flow_action *) (sample->actions))->type; + if (ratio == 1 && attr->transfer) + fdb_mirror = 1; + break; + case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC: + case RTE_FLOW_ACTION_TYPE_SET_MAC_DST: + case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC: + case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST: + case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC: + case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST: + case RTE_FLOW_ACTION_TYPE_SET_TP_SRC: + case RTE_FLOW_ACTION_TYPE_SET_TP_DST: + case RTE_FLOW_ACTION_TYPE_DEC_TTL: + case RTE_FLOW_ACTION_TYPE_SET_TTL: + case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ: + case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ: + case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK: + case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK: + case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP: + case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP: + case RTE_FLOW_ACTION_TYPE_FLAG: + case RTE_FLOW_ACTION_TYPE_MARK: + case RTE_FLOW_ACTION_TYPE_SET_META: + case RTE_FLOW_ACTION_TYPE_SET_TAG: + if (fdb_mirror) + *modify_after_mirror = 1; + break; + default: + break; } actions_n++; } - if (flag && action == RTE_FLOW_ACTION_TYPE_SAMPLE && attr->transfer) { - if (ratio == 1) { - /* JUMP Action not support for Mirroring; - * Mirroring support multi-destination; - */ - if (!jump_flag && sub_type != RTE_FLOW_ACTION_TYPE_END) - flag = 0; - } + if (flag && fdb_mirror && !*modify_after_mirror) { + /* FDB mirroring uses the destination array to implement + * instead of FLOW_SAMPLER object. + */ + if (sub_type != RTE_FLOW_ACTION_TYPE_END) + flag = 0; } /* Count RTE_FLOW_ACTION_TYPE_END. */ return flag ? actions_n + 1 : 0; @@ -4765,8 +4785,8 @@ flow_check_match_action(const struct rte_flow_action actions[], * * @param dev * Pointer to Ethernet device. - * @param[in] fdb_tx - * FDB egress flow flag. + * @param[in] add_tag + * Add extra tag action flag. * @param[out] sfx_items * Suffix flow match items (list terminated by the END pattern item). * @param[in] actions @@ -4781,6 +4801,8 @@ flow_check_match_action(const struct rte_flow_action actions[], * The sample action position. * @param[in] qrss_action_pos * The Queue/RSS action position. + * @param[in] jump_table + * Add extra jump action flag. * @param[out] error * Perform verbose error reporting if not NULL. * @@ -4790,7 +4812,7 @@ flow_check_match_action(const struct rte_flow_action actions[], */ static int flow_sample_split_prep(struct rte_eth_dev *dev, - uint32_t fdb_tx, + int add_tag, struct rte_flow_item sfx_items[], const struct rte_flow_action actions[], struct rte_flow_action actions_sfx[], @@ -4798,14 +4820,17 @@ flow_sample_split_prep(struct rte_eth_dev *dev, int actions_n, int sample_action_pos, int qrss_action_pos, + int jump_table, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rte_flow_action_set_tag *set_tag; struct mlx5_rte_flow_item_tag *tag_spec; struct mlx5_rte_flow_item_tag *tag_mask; + struct rte_flow_action_jump *jump_action; uint32_t tag_id = 0; int index; + int append_index = 0; int ret; if (sample_action_pos < 0) @@ -4813,9 +4838,37 @@ flow_sample_split_prep(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "invalid position of sample " "action in list"); - if (!fdb_tx) { + /* Prepare the actions for prefix and suffix flow. */ + if (qrss_action_pos >= 0 && qrss_action_pos < sample_action_pos) { + index = qrss_action_pos; + /* Put the preceding the Queue/RSS action into prefix flow. */ + if (index != 0) + memcpy(actions_pre, actions, + sizeof(struct rte_flow_action) * index); + /* Put others preceding the sample action into prefix flow. */ + if (sample_action_pos > index + 1) + memcpy(actions_pre + index, actions + index + 1, + sizeof(struct rte_flow_action) * + (sample_action_pos - index - 1)); + index = sample_action_pos - 1; + /* Put Queue/RSS action into Suffix flow. */ + memcpy(actions_sfx, actions + qrss_action_pos, + sizeof(struct rte_flow_action)); + actions_sfx++; + } else { + index = sample_action_pos; + if (index != 0) + memcpy(actions_pre, actions, + sizeof(struct rte_flow_action) * index); + } + /* For CX5, add an extra tag action for NIC-RX and E-Switch ingress. + * For CX6DX and above, metadata registers Cx preserve their value, + * add an extra tag action for NIC-RX and E-Switch Domain. + */ + if (add_tag) { /* Prepare the prefix tag action. */ - set_tag = (void *)(actions_pre + actions_n + 1); + append_index++; + set_tag = (void *)(actions_pre + actions_n + append_index); ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, 0, error); if (ret < 0) return ret; @@ -4840,32 +4893,7 @@ flow_sample_split_prep(struct rte_eth_dev *dev, .type = (enum rte_flow_item_type) RTE_FLOW_ITEM_TYPE_END, }; - } - /* Prepare the actions for prefix and suffix flow. */ - if (qrss_action_pos >= 0 && qrss_action_pos < sample_action_pos) { - index = qrss_action_pos; - /* Put the preceding the Queue/RSS action into prefix flow. */ - if (index != 0) - memcpy(actions_pre, actions, - sizeof(struct rte_flow_action) * index); - /* Put others preceding the sample action into prefix flow. */ - if (sample_action_pos > index + 1) - memcpy(actions_pre + index, actions + index + 1, - sizeof(struct rte_flow_action) * - (sample_action_pos - index - 1)); - index = sample_action_pos - 1; - /* Put Queue/RSS action into Suffix flow. */ - memcpy(actions_sfx, actions + qrss_action_pos, - sizeof(struct rte_flow_action)); - actions_sfx++; - } else { - index = sample_action_pos; - if (index != 0) - memcpy(actions_pre, actions, - sizeof(struct rte_flow_action) * index); - } - /* Add the extra tag action for NIC-RX and E-Switch ingress. */ - if (!fdb_tx) { + /* Prepare the tag action in prefix subflow. */ actions_pre[index++] = (struct rte_flow_action){ .type = (enum rte_flow_action_type) @@ -4876,6 +4904,22 @@ flow_sample_split_prep(struct rte_eth_dev *dev, memcpy(actions_pre + index, actions + sample_action_pos, sizeof(struct rte_flow_action)); index += 1; + /* For the modify action after the sample action in E-Switch mirroring, + * Add the extra jump action in prefix subflow and jump into the next + * table, then do the modify action in the new table. + */ + if (jump_table) { + /* Prepare the prefix jump action. */ + append_index++; + jump_action = (void *)(actions_pre + actions_n + append_index); + jump_action->group = jump_table; + actions_pre[index++] = + (struct rte_flow_action){ + .type = (enum rte_flow_action_type) + RTE_FLOW_ACTION_TYPE_JUMP, + .conf = jump_action, + }; + } actions_pre[index] = (struct rte_flow_action){ .type = (enum rte_flow_action_type) RTE_FLOW_ACTION_TYPE_END, @@ -5270,7 +5314,6 @@ flow_create_split_sample(struct rte_eth_dev *dev, struct mlx5_flow_dv_sample_resource *sample_res; struct mlx5_flow_tbl_data_entry *sfx_tbl_data; struct mlx5_flow_tbl_resource *sfx_tbl; - union mlx5_flow_tbl_key sfx_table_key; #endif size_t act_size; size_t item_size; @@ -5279,12 +5322,17 @@ flow_create_split_sample(struct rte_eth_dev *dev, int actions_n = 0; int sample_action_pos; int qrss_action_pos; + int add_tag = 0; + int modify_after_mirror = 0; + uint16_t jump_table = 0; + const uint32_t next_ft_step = 1; int ret = 0; if (priv->sampler_en) actions_n = flow_check_match_action(actions, attr, RTE_FLOW_ACTION_TYPE_SAMPLE, - &sample_action_pos, &qrss_action_pos); + &sample_action_pos, &qrss_action_pos, + &modify_after_mirror); if (actions_n) { /* The prefix actions must includes sample, tag, end. */ act_size = sizeof(struct rte_flow_action) * (actions_n * 2 + 1) @@ -5300,19 +5348,31 @@ flow_create_split_sample(struct rte_eth_dev *dev, "sample flow"); /* The representor_id is -1 for uplink. */ fdb_tx = (attr->transfer && priv->representor_id != -1); - if (!fdb_tx) + /* + * When reg_c_preserve is set, metadata registers Cx preserve + * their value even through packet duplication. + */ + add_tag = (!fdb_tx || priv->config.hca_attr.reg_c_preserve); + if (add_tag) sfx_items = (struct rte_flow_item *)((char *)sfx_actions + act_size); + if (modify_after_mirror) + jump_table = attr->group * MLX5_FLOW_TABLE_FACTOR + + next_ft_step; pre_actions = sfx_actions + actions_n; - tag_id = flow_sample_split_prep(dev, fdb_tx, sfx_items, + tag_id = flow_sample_split_prep(dev, add_tag, sfx_items, actions, sfx_actions, pre_actions, actions_n, sample_action_pos, - qrss_action_pos, error); - if (tag_id < 0 || (!fdb_tx && !tag_id)) { + qrss_action_pos, jump_table, + error); + if (tag_id < 0 || (add_tag && !tag_id)) { ret = -rte_errno; goto exit; } + if (modify_after_mirror) + flow_split_info->skip_scale = + 1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT; /* Add the prefix subflow. */ ret = flow_create_split_inner(dev, flow, &dev_flow, attr, items, pre_actions, @@ -5323,24 +5383,30 @@ flow_create_split_sample(struct rte_eth_dev *dev, } dev_flow->handle->split_flow_id = tag_id; #ifdef HAVE_IBV_FLOW_DV_SUPPORT - /* Set the sfx group attr. */ - sample_res = (struct mlx5_flow_dv_sample_resource *) - dev_flow->dv.sample_res; - sfx_tbl = (struct mlx5_flow_tbl_resource *) - sample_res->normal_path_tbl; - sfx_tbl_data = container_of(sfx_tbl, - struct mlx5_flow_tbl_data_entry, tbl); - sfx_table_key.v64 = sfx_tbl_data->entry.key; - sfx_attr.group = sfx_attr.transfer ? - (sfx_table_key.table_id - 1) : - sfx_table_key.table_id; + if (!modify_after_mirror) { + /* Set the sfx group attr. */ + sample_res = (struct mlx5_flow_dv_sample_resource *) + dev_flow->dv.sample_res; + sfx_tbl = (struct mlx5_flow_tbl_resource *) + sample_res->normal_path_tbl; + sfx_tbl_data = container_of(sfx_tbl, + struct mlx5_flow_tbl_data_entry, + tbl); + sfx_attr.group = sfx_attr.transfer ? + (sfx_tbl_data->table_id - 1) : + sfx_tbl_data->table_id; + } else { + MLX5_ASSERT(attr->transfer); + sfx_attr.group = jump_table; + } flow_split_info->prefix_layers = flow_get_prefix_layer_flags(dev_flow); flow_split_info->prefix_mark = dev_flow->handle->mark; /* Suffix group level already be scaled with factor, set - * skip_scale to 1 to avoid scale again in translation. + * MLX5_SCALE_FLOW_GROUP_BIT of skip_scale to 1 to avoid scale + * again in translation. */ - flow_split_info->skip_scale = 1; + flow_split_info->skip_scale = 1 << MLX5_SCALE_FLOW_GROUP_BIT; #endif } /* Add the suffix subflow. */ @@ -5491,7 +5557,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list, struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow *flow = NULL; struct mlx5_flow *dev_flow; - const struct rte_flow_action_rss *rss; + const struct rte_flow_action_rss *rss = NULL; struct mlx5_translated_shared_action shared_actions[MLX5_MAX_SHARED_ACTIONS]; int shared_actions_n = MLX5_MAX_SHARED_ACTIONS; @@ -5569,7 +5635,9 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list, MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN && flow->drv_type < MLX5_FLOW_TYPE_MAX); memset(rss_desc, 0, offsetof(struct mlx5_flow_rss_desc, queue)); - rss = flow_get_rss_action(p_actions_rx); + /* RSS Action only works on NIC RX domain */ + if (attr->ingress && !attr->transfer) + rss = flow_get_rss_action(p_actions_rx); if (rss) { if (flow_rss_workspace_adjust(wks, rss_desc, rss->queue_num)) return 0; @@ -5669,7 +5737,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list, */ if (external || dev->data->dev_started || (attr->group == MLX5_FLOW_MREG_CP_TABLE_GROUP && - attr->priority == MLX5_FLOW_PRIO_RSVD)) { + attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)) { ret = flow_drv_apply(dev, flow, error); if (ret < 0) goto error; @@ -5868,11 +5936,8 @@ flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list, if (flow->tunnel) { struct mlx5_flow_tunnel *tunnel; - rte_spinlock_lock(&mlx5_tunnel_hub(dev)->sl); tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id); RTE_VERIFY(tunnel); - LIST_REMOVE(tunnel, chain); - rte_spinlock_unlock(&mlx5_tunnel_hub(dev)->sl); if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED)) mlx5_flow_tunnel_free(dev, tunnel); } @@ -5938,7 +6003,7 @@ mlx5_flow_start_default(struct rte_eth_dev *dev) /** * Release key of thread specific flow workspace data. */ -static void +void flow_release_workspace(void *data) { struct mlx5_flow_workspace *wks = data; @@ -5952,16 +6017,6 @@ flow_release_workspace(void *data) } } -/** - * Initialize key of thread specific flow workspace data. - */ -static void -flow_alloc_workspace(void) -{ - if (pthread_key_create(&key_workspace, flow_release_workspace)) - DRV_LOG(ERR, "Can't create flow workspace data thread key."); -} - /** * Get thread specific current flow workspace. * @@ -5972,7 +6027,7 @@ mlx5_flow_get_thread_workspace(void) { struct mlx5_flow_workspace *data; - data = pthread_getspecific(key_workspace); + data = mlx5_flow_os_get_specific_workspace(); MLX5_ASSERT(data && data->inuse); if (!data || !data->inuse) DRV_LOG(ERR, "flow workspace not initialized."); @@ -6020,11 +6075,7 @@ mlx5_flow_push_thread_workspace(void) struct mlx5_flow_workspace *curr; struct mlx5_flow_workspace *data; - if (pthread_once(&key_workspace_init, flow_alloc_workspace)) { - DRV_LOG(ERR, "Failed to init flow workspace data thread key."); - return NULL; - } - curr = pthread_getspecific(key_workspace); + curr = mlx5_flow_os_get_specific_workspace(); if (!curr) { data = flow_alloc_thread_workspace(); if (!data) @@ -6043,7 +6094,7 @@ mlx5_flow_push_thread_workspace(void) data->inuse = 1; data->flow_idx = 0; /* Set as current workspace */ - if (pthread_setspecific(key_workspace, data)) + if (mlx5_flow_os_set_specific_workspace(data)) DRV_LOG(ERR, "Failed to set flow workspace to thread."); return data; } @@ -6069,7 +6120,7 @@ mlx5_flow_pop_thread_workspace(void) data->inuse = 0; if (!data->prev) return; - if (pthread_setspecific(key_workspace, data->prev)) + if (mlx5_flow_os_set_specific_workspace(data->prev)) DRV_LOG(ERR, "Failed to set flow workspace to thread."); } @@ -6186,7 +6237,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_attr attr = { .ingress = 1, - .priority = MLX5_FLOW_PRIO_RSVD, + .priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR, }; struct rte_flow_item items[] = { { @@ -6375,9 +6426,9 @@ mlx5_flow_isolate(struct rte_eth_dev *dev, } priv->isolated = !!enable; if (enable) - dev->dev_ops = &mlx5_os_dev_ops_isolate; + dev->dev_ops = &mlx5_dev_ops_isolate; else - dev->dev_ops = &mlx5_os_dev_ops; + dev->dev_ops = &mlx5_dev_ops; dev->rx_descriptor_status = mlx5_rx_descriptor_status; dev->tx_descriptor_status = mlx5_tx_descriptor_status; @@ -6686,7 +6737,7 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh) } mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1; size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n; - mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size, + mem_mng->umem = mlx5_os_umem_reg(sh->ctx, mem, size, IBV_ACCESS_LOCAL_WRITE); if (!mem_mng->umem) { rte_errno = errno; @@ -6705,7 +6756,7 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh) mkey_attr.relaxed_ordering_read = sh->cmng.relaxed_ordering_read; mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr); if (!mem_mng->dm) { - mlx5_glue->devx_umem_dereg(mem_mng->umem); + mlx5_os_umem_dereg(mem_mng->umem); rte_errno = errno; mlx5_free(mem); return -rte_errno; @@ -6952,104 +7003,13 @@ mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh, sh->cmng.pending_queries--; } -static const struct mlx5_flow_tbl_data_entry * -tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark) -{ - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_hlist_entry *he; - union tunnel_offload_mark mbits = { .val = mark }; - union mlx5_flow_tbl_key table_key = { - { - .table_id = tunnel_id_to_flow_tbl(mbits.table_id), - .dummy = 0, - .domain = !!mbits.transfer, - .direction = 0, - } - }; - he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL); - return he ? - container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL; -} - -static void -mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list, - struct mlx5_hlist_entry *entry) +static int +flow_group_to_table(uint32_t port_id, uint32_t group, uint32_t *table, + const struct flow_grp_info *grp_info, + struct rte_flow_error *error) { - struct mlx5_dev_ctx_shared *sh = list->ctx; - struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash); - - mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID], - tunnel_flow_tbl_to_id(tte->flow_table)); - mlx5_free(tte); -} - -static struct mlx5_hlist_entry * -mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list, - uint64_t key __rte_unused, - void *ctx __rte_unused) -{ - struct mlx5_dev_ctx_shared *sh = list->ctx; - struct tunnel_tbl_entry *tte; - - tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, - sizeof(*tte), 0, - SOCKET_ID_ANY); - if (!tte) - goto err; - mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TNL_TBL_ID], - &tte->flow_table); - if (tte->flow_table >= MLX5_MAX_TABLES) { - DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.", - tte->flow_table); - mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID], - tte->flow_table); - goto err; - } else if (!tte->flow_table) { - goto err; - } - tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table); - return &tte->hash; -err: - if (tte) - mlx5_free(tte); - return NULL; -} - -static uint32_t -tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev, - const struct mlx5_flow_tunnel *tunnel, - uint32_t group, uint32_t *table, - struct rte_flow_error *error) -{ - struct mlx5_hlist_entry *he; - struct tunnel_tbl_entry *tte; - union tunnel_tbl_key key = { - .tunnel_id = tunnel ? tunnel->tunnel_id : 0, - .group = group - }; - struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); - struct mlx5_hlist *group_hash; - - group_hash = tunnel ? tunnel->groups : thub->groups; - he = mlx5_hlist_register(group_hash, key.val, NULL); - if (!he) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ATTR_GROUP, - NULL, - "tunnel group index not supported"); - tte = container_of(he, typeof(*tte), hash); - *table = tte->flow_table; - DRV_LOG(DEBUG, "port %u tunnel %u group=%#x table=%#x", - dev->data->port_id, key.tunnel_id, group, *table); - return 0; -} - -static int -flow_group_to_table(uint32_t port_id, uint32_t group, uint32_t *table, - struct flow_grp_info grp_info, struct rte_flow_error *error) -{ - if (grp_info.transfer && grp_info.external && grp_info.fdb_def_rule) { + if (grp_info->transfer && grp_info->external && + grp_info->fdb_def_rule) { if (group == UINT32_MAX) return rte_flow_error_set (error, EINVAL, @@ -7106,25 +7066,25 @@ int mlx5_flow_group_to_table(struct rte_eth_dev *dev, const struct mlx5_flow_tunnel *tunnel, uint32_t group, uint32_t *table, - struct flow_grp_info grp_info, + const struct flow_grp_info *grp_info, struct rte_flow_error *error) { int ret; bool standard_translation; - if (!grp_info.skip_scale && grp_info.external && + if (!grp_info->skip_scale && grp_info->external && group < MLX5_MAX_TABLES_EXTERNAL) group *= MLX5_FLOW_TABLE_FACTOR; if (is_tunnel_offload_active(dev)) { - standard_translation = !grp_info.external || - grp_info.std_tbl_fix; + standard_translation = !grp_info->external || + grp_info->std_tbl_fix; } else { standard_translation = true; } DRV_LOG(DEBUG, - "port %u group=%#x transfer=%d external=%d fdb_def_rule=%d translate=%s", - dev->data->port_id, group, grp_info.transfer, - grp_info.external, grp_info.fdb_def_rule, + "port %u group=%u transfer=%d external=%d fdb_def_rule=%d translate=%s", + dev->data->port_id, group, grp_info->transfer, + grp_info->external, grp_info->fdb_def_rule, standard_translation ? "STANDARD" : "TUNNEL"); if (standard_translation) ret = flow_group_to_table(dev->data->port_id, group, table, @@ -7162,7 +7122,7 @@ mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev) for (idx = REG_C_2; idx <= REG_C_7; ++idx) { struct rte_flow_attr attr = { .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, - .priority = MLX5_FLOW_PRIO_RSVD, + .priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR, .ingress = 1, }; struct rte_flow_item items[] = { @@ -7504,172 +7464,6 @@ mlx5_shared_action_flush(struct rte_eth_dev *dev) return ret; } -static void -mlx5_flow_tunnel_free(struct rte_eth_dev *dev, - struct mlx5_flow_tunnel *tunnel) -{ - struct mlx5_priv *priv = dev->data->dev_private; - - DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x", - dev->data->port_id, tunnel->tunnel_id); - RTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED)); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID], - tunnel->tunnel_id); - mlx5_hlist_destroy(tunnel->groups); - mlx5_free(tunnel); -} - -static struct mlx5_flow_tunnel * -mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id) -{ - struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); - struct mlx5_flow_tunnel *tun; - - LIST_FOREACH(tun, &thub->tunnels, chain) { - if (tun->tunnel_id == id) - break; - } - - return tun; -} - -static struct mlx5_flow_tunnel * -mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev, - const struct rte_flow_tunnel *app_tunnel) -{ - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_flow_tunnel *tunnel; - uint32_t id; - - mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], - &id); - if (id >= MLX5_MAX_TUNNELS) { - mlx5_ipool_free(priv->sh->ipool - [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id); - DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id); - return NULL; - } else if (!id) { - return NULL; - } - /** - * mlx5 flow tunnel is an auxlilary data structure - * It's not part of IO. No need to allocate it from - * huge pages pools dedicated for IO - */ - tunnel = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*tunnel), - 0, SOCKET_ID_ANY); - if (!tunnel) { - mlx5_ipool_free(priv->sh->ipool - [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id); - return NULL; - } - tunnel->groups = mlx5_hlist_create("tunnel groups", 1024, 0, 0, - mlx5_flow_tunnel_grp2tbl_create_cb, - NULL, - mlx5_flow_tunnel_grp2tbl_remove_cb); - if (!tunnel->groups) { - mlx5_ipool_free(priv->sh->ipool - [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id); - mlx5_free(tunnel); - return NULL; - } - tunnel->groups->ctx = priv->sh; - /* initiate new PMD tunnel */ - memcpy(&tunnel->app_tunnel, app_tunnel, sizeof(*app_tunnel)); - tunnel->tunnel_id = id; - tunnel->action.type = (typeof(tunnel->action.type)) - MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET; - tunnel->action.conf = tunnel; - tunnel->item.type = (typeof(tunnel->item.type)) - MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL; - tunnel->item.spec = tunnel; - tunnel->item.last = NULL; - tunnel->item.mask = NULL; - - DRV_LOG(DEBUG, "port %u new pmd tunnel id=0x%x", - dev->data->port_id, tunnel->tunnel_id); - - return tunnel; -} - -static int -mlx5_get_flow_tunnel(struct rte_eth_dev *dev, - const struct rte_flow_tunnel *app_tunnel, - struct mlx5_flow_tunnel **tunnel) -{ - int ret; - struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); - struct mlx5_flow_tunnel *tun; - - rte_spinlock_lock(&thub->sl); - LIST_FOREACH(tun, &thub->tunnels, chain) { - if (!memcmp(app_tunnel, &tun->app_tunnel, - sizeof(*app_tunnel))) { - *tunnel = tun; - ret = 0; - break; - } - } - if (!tun) { - tun = mlx5_flow_tunnel_allocate(dev, app_tunnel); - if (tun) { - LIST_INSERT_HEAD(&thub->tunnels, tun, chain); - *tunnel = tun; - } else { - ret = -ENOMEM; - } - } - rte_spinlock_unlock(&thub->sl); - if (tun) - __atomic_add_fetch(&tun->refctn, 1, __ATOMIC_RELAXED); - - return ret; -} - -void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id) -{ - struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub; - - if (!thub) - return; - if (!LIST_EMPTY(&thub->tunnels)) - DRV_LOG(WARNING, "port %u tunnels present\n", port_id); - mlx5_hlist_destroy(thub->groups); - mlx5_free(thub); -} - -int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh) -{ - int err; - struct mlx5_flow_tunnel_hub *thub; - - thub = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*thub), - 0, SOCKET_ID_ANY); - if (!thub) - return -ENOMEM; - LIST_INIT(&thub->tunnels); - rte_spinlock_init(&thub->sl); - thub->groups = mlx5_hlist_create("flow groups", MLX5_MAX_TABLES, 0, - 0, mlx5_flow_tunnel_grp2tbl_create_cb, - NULL, - mlx5_flow_tunnel_grp2tbl_remove_cb); - if (!thub->groups) { - err = -rte_errno; - goto err; - } - thub->groups->ctx = sh; - sh->tunnel_hub = thub; - - return 0; - -err: - if (thub->groups) - mlx5_hlist_destroy(thub->groups); - if (thub) - mlx5_free(thub); - return err; -} - #ifndef HAVE_MLX5DV_DR #define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1)) #else @@ -7690,3 +7484,773 @@ int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains) ret = -ret; return ret; } + +/** + * tunnel offload functionalilty is defined for DV environment only + */ +#ifdef HAVE_IBV_FLOW_DV_SUPPORT +__extension__ +union tunnel_offload_mark { + uint32_t val; + struct { + uint32_t app_reserve:8; + uint32_t table_id:15; + uint32_t transfer:1; + uint32_t _unused_:8; + }; +}; + +static bool +mlx5_access_tunnel_offload_db + (struct rte_eth_dev *dev, + bool (*match)(struct rte_eth_dev *, + struct mlx5_flow_tunnel *, const void *), + void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *), + void (*miss)(struct rte_eth_dev *, void *), + void *ctx, bool lock_op); + +static int +flow_tunnel_add_default_miss(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_attr *attr, + const struct rte_flow_action *app_actions, + uint32_t flow_idx, + struct tunnel_default_miss_ctx *ctx, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow *dev_flow; + struct rte_flow_attr miss_attr = *attr; + const struct mlx5_flow_tunnel *tunnel = app_actions[0].conf; + const struct rte_flow_item miss_items[2] = { + { + .type = RTE_FLOW_ITEM_TYPE_ETH, + .spec = NULL, + .last = NULL, + .mask = NULL + }, + { + .type = RTE_FLOW_ITEM_TYPE_END, + .spec = NULL, + .last = NULL, + .mask = NULL + } + }; + union tunnel_offload_mark mark_id; + struct rte_flow_action_mark miss_mark; + struct rte_flow_action miss_actions[3] = { + [0] = { .type = RTE_FLOW_ACTION_TYPE_MARK, .conf = &miss_mark }, + [2] = { .type = RTE_FLOW_ACTION_TYPE_END, .conf = NULL } + }; + const struct rte_flow_action_jump *jump_data; + uint32_t i, flow_table = 0; /* prevent compilation warning */ + struct flow_grp_info grp_info = { + .external = 1, + .transfer = attr->transfer, + .fdb_def_rule = !!priv->fdb_def_rule, + .std_tbl_fix = 0, + }; + int ret; + + if (!attr->transfer) { + uint32_t q_size; + + miss_actions[1].type = RTE_FLOW_ACTION_TYPE_RSS; + q_size = priv->reta_idx_n * sizeof(ctx->queue[0]); + ctx->queue = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, q_size, + 0, SOCKET_ID_ANY); + if (!ctx->queue) + return rte_flow_error_set + (error, ENOMEM, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, "invalid default miss RSS"); + ctx->action_rss.func = RTE_ETH_HASH_FUNCTION_DEFAULT, + ctx->action_rss.level = 0, + ctx->action_rss.types = priv->rss_conf.rss_hf, + ctx->action_rss.key_len = priv->rss_conf.rss_key_len, + ctx->action_rss.queue_num = priv->reta_idx_n, + ctx->action_rss.key = priv->rss_conf.rss_key, + ctx->action_rss.queue = ctx->queue; + if (!priv->reta_idx_n || !priv->rxqs_n) + return rte_flow_error_set + (error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, "invalid port configuration"); + if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)) + ctx->action_rss.types = 0; + for (i = 0; i != priv->reta_idx_n; ++i) + ctx->queue[i] = (*priv->reta_idx)[i]; + } else { + miss_actions[1].type = RTE_FLOW_ACTION_TYPE_JUMP; + ctx->miss_jump.group = MLX5_TNL_MISS_FDB_JUMP_GRP; + } + miss_actions[1].conf = (typeof(miss_actions[1].conf))ctx->raw; + for (; app_actions->type != RTE_FLOW_ACTION_TYPE_JUMP; app_actions++); + jump_data = app_actions->conf; + miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY; + miss_attr.group = jump_data->group; + ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group, + &flow_table, &grp_info, error); + if (ret) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, "invalid tunnel id"); + mark_id.app_reserve = 0; + mark_id.table_id = tunnel_flow_tbl_to_id(flow_table); + mark_id.transfer = !!attr->transfer; + mark_id._unused_ = 0; + miss_mark.id = mark_id.val; + dev_flow = flow_drv_prepare(dev, flow, &miss_attr, + miss_items, miss_actions, flow_idx, error); + if (!dev_flow) + return -rte_errno; + dev_flow->flow = flow; + dev_flow->external = true; + dev_flow->tunnel = tunnel; + /* Subflow object was created, we must include one in the list. */ + SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx, + dev_flow->handle, next); + DRV_LOG(DEBUG, + "port %u tunnel type=%d id=%u miss rule priority=%u group=%u", + dev->data->port_id, tunnel->app_tunnel.type, + tunnel->tunnel_id, miss_attr.priority, miss_attr.group); + ret = flow_drv_translate(dev, dev_flow, &miss_attr, miss_items, + miss_actions, error); + if (!ret) + ret = flow_mreg_update_copy_table(dev, flow, miss_actions, + error); + + return ret; +} + +static const struct mlx5_flow_tbl_data_entry * +tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_ctx_shared *sh = priv->sh; + struct mlx5_hlist_entry *he; + union tunnel_offload_mark mbits = { .val = mark }; + union mlx5_flow_tbl_key table_key = { + { + .table_id = tunnel_id_to_flow_tbl(mbits.table_id), + .dummy = 0, + .domain = !!mbits.transfer, + .direction = 0, + } + }; + he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL); + return he ? + container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL; +} + +static void +mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list, + struct mlx5_hlist_entry *entry) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash); + + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID], + tunnel_flow_tbl_to_id(tte->flow_table)); + mlx5_free(tte); +} + +static int +mlx5_flow_tunnel_grp2tbl_match_cb(struct mlx5_hlist *list __rte_unused, + struct mlx5_hlist_entry *entry, + uint64_t key, void *cb_ctx __rte_unused) +{ + union tunnel_tbl_key tbl = { + .val = key, + }; + struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash); + + return tbl.tunnel_id != tte->tunnel_id || tbl.group != tte->group; +} + +static struct mlx5_hlist_entry * +mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list, uint64_t key, + void *ctx __rte_unused) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct tunnel_tbl_entry *tte; + union tunnel_tbl_key tbl = { + .val = key, + }; + + tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, + sizeof(*tte), 0, + SOCKET_ID_ANY); + if (!tte) + goto err; + mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TNL_TBL_ID], + &tte->flow_table); + if (tte->flow_table >= MLX5_MAX_TABLES) { + DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.", + tte->flow_table); + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID], + tte->flow_table); + goto err; + } else if (!tte->flow_table) { + goto err; + } + tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table); + tte->tunnel_id = tbl.tunnel_id; + tte->group = tbl.group; + return &tte->hash; +err: + if (tte) + mlx5_free(tte); + return NULL; +} + +static uint32_t +tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev, + const struct mlx5_flow_tunnel *tunnel, + uint32_t group, uint32_t *table, + struct rte_flow_error *error) +{ + struct mlx5_hlist_entry *he; + struct tunnel_tbl_entry *tte; + union tunnel_tbl_key key = { + .tunnel_id = tunnel ? tunnel->tunnel_id : 0, + .group = group + }; + struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); + struct mlx5_hlist *group_hash; + + group_hash = tunnel ? tunnel->groups : thub->groups; + he = mlx5_hlist_register(group_hash, key.val, NULL); + if (!he) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + NULL, + "tunnel group index not supported"); + tte = container_of(he, typeof(*tte), hash); + *table = tte->flow_table; + DRV_LOG(DEBUG, "port %u tunnel %u group=%#x table=%#x", + dev->data->port_id, key.tunnel_id, group, *table); + return 0; +} + +static void +mlx5_flow_tunnel_free(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_indexed_pool *ipool; + + DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x", + dev->data->port_id, tunnel->tunnel_id); + LIST_REMOVE(tunnel, chain); + mlx5_hlist_destroy(tunnel->groups); + ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID]; + mlx5_ipool_free(ipool, tunnel->tunnel_id); +} + +static bool +mlx5_access_tunnel_offload_db + (struct rte_eth_dev *dev, + bool (*match)(struct rte_eth_dev *, + struct mlx5_flow_tunnel *, const void *), + void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *), + void (*miss)(struct rte_eth_dev *, void *), + void *ctx, bool lock_op) +{ + bool verdict = false; + struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); + struct mlx5_flow_tunnel *tunnel; + + rte_spinlock_lock(&thub->sl); + LIST_FOREACH(tunnel, &thub->tunnels, chain) { + verdict = match(dev, tunnel, (const void *)ctx); + if (verdict) + break; + } + if (!lock_op) + rte_spinlock_unlock(&thub->sl); + if (verdict && hit) + hit(dev, tunnel, ctx); + if (!verdict && miss) + miss(dev, ctx); + if (lock_op) + rte_spinlock_unlock(&thub->sl); + + return verdict; +} + +struct tunnel_db_find_tunnel_id_ctx { + uint32_t tunnel_id; + struct mlx5_flow_tunnel *tunnel; +}; + +static bool +find_tunnel_id_match(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, const void *x) +{ + const struct tunnel_db_find_tunnel_id_ctx *ctx = x; + + RTE_SET_USED(dev); + return tunnel->tunnel_id == ctx->tunnel_id; +} + +static void +find_tunnel_id_hit(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, void *x) +{ + struct tunnel_db_find_tunnel_id_ctx *ctx = x; + RTE_SET_USED(dev); + ctx->tunnel = tunnel; +} + +static struct mlx5_flow_tunnel * +mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id) +{ + struct tunnel_db_find_tunnel_id_ctx ctx = { + .tunnel_id = id, + }; + + mlx5_access_tunnel_offload_db(dev, find_tunnel_id_match, + find_tunnel_id_hit, NULL, &ctx, true); + + return ctx.tunnel; +} + +static struct mlx5_flow_tunnel * +mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev, + const struct rte_flow_tunnel *app_tunnel) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_indexed_pool *ipool; + struct mlx5_flow_tunnel *tunnel; + uint32_t id; + + ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID]; + tunnel = mlx5_ipool_zmalloc(ipool, &id); + if (!tunnel) + return NULL; + if (id >= MLX5_MAX_TUNNELS) { + mlx5_ipool_free(ipool, id); + DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id); + return NULL; + } + tunnel->groups = mlx5_hlist_create("tunnel groups", 1024, 0, 0, + mlx5_flow_tunnel_grp2tbl_create_cb, + mlx5_flow_tunnel_grp2tbl_match_cb, + mlx5_flow_tunnel_grp2tbl_remove_cb); + if (!tunnel->groups) { + mlx5_ipool_free(ipool, id); + return NULL; + } + tunnel->groups->ctx = priv->sh; + /* initiate new PMD tunnel */ + memcpy(&tunnel->app_tunnel, app_tunnel, sizeof(*app_tunnel)); + tunnel->tunnel_id = id; + tunnel->action.type = (typeof(tunnel->action.type)) + MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET; + tunnel->action.conf = tunnel; + tunnel->item.type = (typeof(tunnel->item.type)) + MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL; + tunnel->item.spec = tunnel; + tunnel->item.last = NULL; + tunnel->item.mask = NULL; + + DRV_LOG(DEBUG, "port %u new pmd tunnel id=0x%x", + dev->data->port_id, tunnel->tunnel_id); + + return tunnel; +} + +struct tunnel_db_get_tunnel_ctx { + const struct rte_flow_tunnel *app_tunnel; + struct mlx5_flow_tunnel *tunnel; +}; + +static bool get_tunnel_match(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, const void *x) +{ + const struct tunnel_db_get_tunnel_ctx *ctx = x; + + RTE_SET_USED(dev); + return !memcmp(ctx->app_tunnel, &tunnel->app_tunnel, + sizeof(*ctx->app_tunnel)); +} + +static void get_tunnel_hit(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, void *x) +{ + /* called under tunnel spinlock protection */ + struct tunnel_db_get_tunnel_ctx *ctx = x; + + RTE_SET_USED(dev); + tunnel->refctn++; + ctx->tunnel = tunnel; +} + +static void get_tunnel_miss(struct rte_eth_dev *dev, void *x) +{ + /* called under tunnel spinlock protection */ + struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); + struct tunnel_db_get_tunnel_ctx *ctx = x; + + rte_spinlock_unlock(&thub->sl); + ctx->tunnel = mlx5_flow_tunnel_allocate(dev, ctx->app_tunnel); + ctx->tunnel->refctn = 1; + rte_spinlock_lock(&thub->sl); + if (ctx->tunnel) + LIST_INSERT_HEAD(&thub->tunnels, ctx->tunnel, chain); +} + + +static int +mlx5_get_flow_tunnel(struct rte_eth_dev *dev, + const struct rte_flow_tunnel *app_tunnel, + struct mlx5_flow_tunnel **tunnel) +{ + struct tunnel_db_get_tunnel_ctx ctx = { + .app_tunnel = app_tunnel, + }; + + mlx5_access_tunnel_offload_db(dev, get_tunnel_match, get_tunnel_hit, + get_tunnel_miss, &ctx, true); + *tunnel = ctx.tunnel; + return ctx.tunnel ? 0 : -ENOMEM; +} + +void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id) +{ + struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub; + + if (!thub) + return; + if (!LIST_EMPTY(&thub->tunnels)) + DRV_LOG(WARNING, "port %u tunnels present\n", port_id); + mlx5_hlist_destroy(thub->groups); + mlx5_free(thub); +} + +int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh) +{ + int err; + struct mlx5_flow_tunnel_hub *thub; + + thub = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*thub), + 0, SOCKET_ID_ANY); + if (!thub) + return -ENOMEM; + LIST_INIT(&thub->tunnels); + rte_spinlock_init(&thub->sl); + thub->groups = mlx5_hlist_create("flow groups", MLX5_MAX_TABLES, 0, + 0, mlx5_flow_tunnel_grp2tbl_create_cb, + mlx5_flow_tunnel_grp2tbl_match_cb, + mlx5_flow_tunnel_grp2tbl_remove_cb); + if (!thub->groups) { + err = -rte_errno; + goto err; + } + thub->groups->ctx = sh; + sh->tunnel_hub = thub; + + return 0; + +err: + if (thub->groups) + mlx5_hlist_destroy(thub->groups); + if (thub) + mlx5_free(thub); + return err; +} + +static inline bool +mlx5_flow_tunnel_validate(struct rte_eth_dev *dev, + struct rte_flow_tunnel *tunnel, + const char *err_msg) +{ + err_msg = NULL; + if (!is_tunnel_offload_active(dev)) { + err_msg = "tunnel offload was not activated"; + goto out; + } else if (!tunnel) { + err_msg = "no application tunnel"; + goto out; + } + + switch (tunnel->type) { + default: + err_msg = "unsupported tunnel type"; + goto out; + case RTE_FLOW_ITEM_TYPE_VXLAN: + break; + } + +out: + return !err_msg; +} + +static int +mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev, + struct rte_flow_tunnel *app_tunnel, + struct rte_flow_action **actions, + uint32_t *num_of_actions, + struct rte_flow_error *error) +{ + int ret; + struct mlx5_flow_tunnel *tunnel; + const char *err_msg = NULL; + bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg); + + if (!verdict) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, + err_msg); + ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel); + if (ret < 0) { + return rte_flow_error_set(error, ret, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, + "failed to initialize pmd tunnel"); + } + *actions = &tunnel->action; + *num_of_actions = 1; + return 0; +} + +static int +mlx5_flow_tunnel_match(struct rte_eth_dev *dev, + struct rte_flow_tunnel *app_tunnel, + struct rte_flow_item **items, + uint32_t *num_of_items, + struct rte_flow_error *error) +{ + int ret; + struct mlx5_flow_tunnel *tunnel; + const char *err_msg = NULL; + bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg); + + if (!verdict) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + err_msg); + ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel); + if (ret < 0) { + return rte_flow_error_set(error, ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "failed to initialize pmd tunnel"); + } + *items = &tunnel->item; + *num_of_items = 1; + return 0; +} + +struct tunnel_db_element_release_ctx { + struct rte_flow_item *items; + struct rte_flow_action *actions; + uint32_t num_elements; + struct rte_flow_error *error; + int ret; +}; + +static bool +tunnel_element_release_match(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, const void *x) +{ + const struct tunnel_db_element_release_ctx *ctx = x; + + RTE_SET_USED(dev); + if (ctx->num_elements != 1) + return false; + else if (ctx->items) + return ctx->items == &tunnel->item; + else if (ctx->actions) + return ctx->actions == &tunnel->action; + + return false; +} + +static void +tunnel_element_release_hit(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, void *x) +{ + struct tunnel_db_element_release_ctx *ctx = x; + ctx->ret = 0; + if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED)) + mlx5_flow_tunnel_free(dev, tunnel); +} + +static void +tunnel_element_release_miss(struct rte_eth_dev *dev, void *x) +{ + struct tunnel_db_element_release_ctx *ctx = x; + RTE_SET_USED(dev); + ctx->ret = rte_flow_error_set(ctx->error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "invalid argument"); +} + +static int +mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev, + struct rte_flow_item *pmd_items, + uint32_t num_items, struct rte_flow_error *err) +{ + struct tunnel_db_element_release_ctx ctx = { + .items = pmd_items, + .actions = NULL, + .num_elements = num_items, + .error = err, + }; + + mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match, + tunnel_element_release_hit, + tunnel_element_release_miss, &ctx, false); + + return ctx.ret; +} + +static int +mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev, + struct rte_flow_action *pmd_actions, + uint32_t num_actions, struct rte_flow_error *err) +{ + struct tunnel_db_element_release_ctx ctx = { + .items = NULL, + .actions = pmd_actions, + .num_elements = num_actions, + .error = err, + }; + + mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match, + tunnel_element_release_hit, + tunnel_element_release_miss, &ctx, false); + + return ctx.ret; +} + +static int +mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev, + struct rte_mbuf *m, + struct rte_flow_restore_info *info, + struct rte_flow_error *err) +{ + uint64_t ol_flags = m->ol_flags; + const struct mlx5_flow_tbl_data_entry *tble; + const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID; + + if (!is_tunnel_offload_active(dev)) { + info->flags = 0; + return 0; + } + + if ((ol_flags & mask) != mask) + goto err; + tble = tunnel_mark_decode(dev, m->hash.fdir.hi); + if (!tble) { + DRV_LOG(DEBUG, "port %u invalid miss tunnel mark %#x", + dev->data->port_id, m->hash.fdir.hi); + goto err; + } + MLX5_ASSERT(tble->tunnel); + memcpy(&info->tunnel, &tble->tunnel->app_tunnel, sizeof(info->tunnel)); + info->group_id = tble->group_id; + info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL | + RTE_FLOW_RESTORE_INFO_GROUP_ID | + RTE_FLOW_RESTORE_INFO_ENCAPSULATED; + + return 0; + +err: + return rte_flow_error_set(err, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "failed to get restore info"); +} + +#else /* HAVE_IBV_FLOW_DV_SUPPORT */ +static int +mlx5_flow_tunnel_decap_set(__rte_unused struct rte_eth_dev *dev, + __rte_unused struct rte_flow_tunnel *app_tunnel, + __rte_unused struct rte_flow_action **actions, + __rte_unused uint32_t *num_of_actions, + __rte_unused struct rte_flow_error *error) +{ + return -ENOTSUP; +} + +static int +mlx5_flow_tunnel_match(__rte_unused struct rte_eth_dev *dev, + __rte_unused struct rte_flow_tunnel *app_tunnel, + __rte_unused struct rte_flow_item **items, + __rte_unused uint32_t *num_of_items, + __rte_unused struct rte_flow_error *error) +{ + return -ENOTSUP; +} + +static int +mlx5_flow_tunnel_item_release(__rte_unused struct rte_eth_dev *dev, + __rte_unused struct rte_flow_item *pmd_items, + __rte_unused uint32_t num_items, + __rte_unused struct rte_flow_error *err) +{ + return -ENOTSUP; +} + +static int +mlx5_flow_tunnel_action_release(__rte_unused struct rte_eth_dev *dev, + __rte_unused struct rte_flow_action *pmd_action, + __rte_unused uint32_t num_actions, + __rte_unused struct rte_flow_error *err) +{ + return -ENOTSUP; +} + +static int +mlx5_flow_tunnel_get_restore_info(__rte_unused struct rte_eth_dev *dev, + __rte_unused struct rte_mbuf *m, + __rte_unused struct rte_flow_restore_info *i, + __rte_unused struct rte_flow_error *err) +{ + return -ENOTSUP; +} + +static int +flow_tunnel_add_default_miss(__rte_unused struct rte_eth_dev *dev, + __rte_unused struct rte_flow *flow, + __rte_unused const struct rte_flow_attr *attr, + __rte_unused const struct rte_flow_action *actions, + __rte_unused uint32_t flow_idx, + __rte_unused struct tunnel_default_miss_ctx *ctx, + __rte_unused struct rte_flow_error *error) +{ + return -ENOTSUP; +} + +static struct mlx5_flow_tunnel * +mlx5_find_tunnel_id(__rte_unused struct rte_eth_dev *dev, + __rte_unused uint32_t id) +{ + return NULL; +} + +static void +mlx5_flow_tunnel_free(__rte_unused struct rte_eth_dev *dev, + __rte_unused struct mlx5_flow_tunnel *tunnel) +{ +} + +static uint32_t +tunnel_flow_group_to_flow_table(__rte_unused struct rte_eth_dev *dev, + __rte_unused const struct mlx5_flow_tunnel *t, + __rte_unused uint32_t group, + __rte_unused uint32_t *table, + struct rte_flow_error *error) +{ + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "tunnel offload requires DV support"); +} + +void +mlx5_release_tunnel_hub(__rte_unused struct mlx5_dev_ctx_shared *sh, + __rte_unused uint16_t port_id) +{ +} +#endif /* HAVE_IBV_FLOW_DV_SUPPORT */ +