X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow_hw.c;h=12498794a51771c526941c059271552d4088e96a;hb=7158e46cb94be6f6071ccdabfc1fa48256955906;hp=74f8ee1d6a24da850f825971390d9249530e789d;hpb=08dfff78f200bb9f077b0006ef05ab59dbc40ac8;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c index 74f8ee1d6a..12498794a5 100644 --- a/drivers/net/mlx5/mlx5_flow_hw.c +++ b/drivers/net/mlx5/mlx5_flow_hw.c @@ -7,6 +7,7 @@ #include #include "mlx5_defs.h" #include "mlx5_flow.h" +#include "mlx5_rx.h" #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) @@ -36,18 +37,480 @@ static uint32_t mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_MAX] }, }; +/** + * Set rxq flag. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] enable + * Flag to enable or not. + */ +static void +flow_hw_rxq_flag_set(struct rte_eth_dev *dev, bool enable) +{ + struct mlx5_priv *priv = dev->data->dev_private; + unsigned int i; + + if ((!priv->mark_enabled && !enable) || + (priv->mark_enabled && enable)) + return; + for (i = 0; i < priv->rxqs_n; ++i) { + struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i); + + rxq_ctrl->rxq.mark = enable; + } + priv->mark_enabled = enable; +} + +/** + * Generate the pattern item flags. + * Will be used for shared RSS action. + * + * @param[in] items + * Pointer to the list of items. + * + * @return + * Item flags. + */ +static uint64_t +flow_hw_rss_item_flags_get(const struct rte_flow_item items[]) +{ + uint64_t item_flags = 0; + uint64_t last_item = 0; + + for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { + int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + int item_type = items->type; + + switch (item_type) { + case RTE_FLOW_ITEM_TYPE_IPV4: + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : + MLX5_FLOW_LAYER_OUTER_L3_IPV4; + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : + MLX5_FLOW_LAYER_OUTER_L3_IPV6; + break; + case RTE_FLOW_ITEM_TYPE_TCP: + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP : + MLX5_FLOW_LAYER_OUTER_L4_TCP; + break; + case RTE_FLOW_ITEM_TYPE_UDP: + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP : + MLX5_FLOW_LAYER_OUTER_L4_UDP; + break; + case RTE_FLOW_ITEM_TYPE_GRE: + last_item = MLX5_FLOW_LAYER_GRE; + break; + case RTE_FLOW_ITEM_TYPE_NVGRE: + last_item = MLX5_FLOW_LAYER_GRE; + break; + case RTE_FLOW_ITEM_TYPE_VXLAN: + last_item = MLX5_FLOW_LAYER_VXLAN; + break; + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + last_item = MLX5_FLOW_LAYER_VXLAN_GPE; + break; + case RTE_FLOW_ITEM_TYPE_GENEVE: + last_item = MLX5_FLOW_LAYER_GENEVE; + break; + case RTE_FLOW_ITEM_TYPE_MPLS: + last_item = MLX5_FLOW_LAYER_MPLS; + break; + case RTE_FLOW_ITEM_TYPE_GTP: + last_item = MLX5_FLOW_LAYER_GTP; + break; + default: + break; + } + item_flags |= last_item; + } + return item_flags; +} + +/** + * Register destination table DR jump action. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] table_attr + * Pointer to the flow attributes. + * @param[in] dest_group + * The destination group ID. + * @param[out] error + * Pointer to error structure. + * + * @return + * Table on success, NULL otherwise and rte_errno is set. + */ +static struct mlx5_hw_jump_action * +flow_hw_jump_action_register(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + uint32_t dest_group, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_flow_attr jattr = *attr; + struct mlx5_flow_group *grp; + struct mlx5_flow_cb_ctx ctx = { + .dev = dev, + .error = error, + .data = &jattr, + }; + struct mlx5_list_entry *ge; + + jattr.group = dest_group; + ge = mlx5_hlist_register(priv->sh->flow_tbls, dest_group, &ctx); + if (!ge) + return NULL; + grp = container_of(ge, struct mlx5_flow_group, entry); + return &grp->jump; +} + +/** + * Release jump action. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] jump + * Pointer to the jump action. + */ + +static void +flow_hw_jump_release(struct rte_eth_dev *dev, struct mlx5_hw_jump_action *jump) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_group *grp; + + grp = container_of + (jump, struct mlx5_flow_group, jump); + mlx5_hlist_unregister(priv->sh->flow_tbls, &grp->entry); +} + +/** + * Register queue/RSS action. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] hws_flags + * DR action flags. + * @param[in] action + * rte flow action. + * + * @return + * Table on success, NULL otherwise and rte_errno is set. + */ +static inline struct mlx5_hrxq* +flow_hw_tir_action_register(struct rte_eth_dev *dev, + uint32_t hws_flags, + const struct rte_flow_action *action) +{ + struct mlx5_flow_rss_desc rss_desc = { + .hws_flags = hws_flags, + }; + struct mlx5_hrxq *hrxq; + + if (action->type == RTE_FLOW_ACTION_TYPE_QUEUE) { + const struct rte_flow_action_queue *queue = action->conf; + + rss_desc.const_q = &queue->index; + rss_desc.queue_num = 1; + } else { + const struct rte_flow_action_rss *rss = action->conf; + + rss_desc.queue_num = rss->queue_num; + rss_desc.const_q = rss->queue; + memcpy(rss_desc.key, + !rss->key ? rss_hash_default_key : rss->key, + MLX5_RSS_HASH_KEY_LEN); + rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN; + rss_desc.types = !rss->types ? RTE_ETH_RSS_IP : rss->types; + flow_dv_hashfields_set(0, &rss_desc, &rss_desc.hash_fields); + flow_dv_action_rss_l34_hash_adjust(rss->types, + &rss_desc.hash_fields); + if (rss->level > 1) { + rss_desc.hash_fields |= IBV_RX_HASH_INNER; + rss_desc.tunnel = 1; + } + } + hrxq = mlx5_hrxq_get(dev, &rss_desc); + return hrxq; +} + /** * Destroy DR actions created by action template. * * For DR actions created during table creation's action translate. * Need to destroy the DR action when destroying the table. * + * @param[in] dev + * Pointer to the rte_eth_dev structure. * @param[in] acts * Pointer to the template HW steering DR actions. */ static void -__flow_hw_action_template_destroy(struct mlx5_hw_actions *acts __rte_unused) +__flow_hw_action_template_destroy(struct rte_eth_dev *dev, + struct mlx5_hw_actions *acts) { + struct mlx5_priv *priv = dev->data->dev_private; + + if (acts->jump) { + struct mlx5_flow_group *grp; + + grp = container_of + (acts->jump, struct mlx5_flow_group, jump); + mlx5_hlist_unregister(priv->sh->flow_tbls, &grp->entry); + acts->jump = NULL; + } +} + +/** + * Append dynamic action to the dynamic action list. + * + * @param[in] priv + * Pointer to the port private data structure. + * @param[in] acts + * Pointer to the template HW steering DR actions. + * @param[in] type + * Action type. + * @param[in] action_src + * Offset of source rte flow action. + * @param[in] action_dst + * Offset of destination DR action. + * + * @return + * 0 on success, negative value otherwise and rte_errno is set. + */ +static __rte_always_inline struct mlx5_action_construct_data * +__flow_hw_act_data_alloc(struct mlx5_priv *priv, + enum rte_flow_action_type type, + uint16_t action_src, + uint16_t action_dst) +{ + struct mlx5_action_construct_data *act_data; + uint32_t idx = 0; + + act_data = mlx5_ipool_zmalloc(priv->acts_ipool, &idx); + if (!act_data) + return NULL; + act_data->idx = idx; + act_data->type = type; + act_data->action_src = action_src; + act_data->action_dst = action_dst; + return act_data; +} + +/** + * Append dynamic action to the dynamic action list. + * + * @param[in] priv + * Pointer to the port private data structure. + * @param[in] acts + * Pointer to the template HW steering DR actions. + * @param[in] type + * Action type. + * @param[in] action_src + * Offset of source rte flow action. + * @param[in] action_dst + * Offset of destination DR action. + * + * @return + * 0 on success, negative value otherwise and rte_errno is set. + */ +static __rte_always_inline int +__flow_hw_act_data_general_append(struct mlx5_priv *priv, + struct mlx5_hw_actions *acts, + enum rte_flow_action_type type, + uint16_t action_src, + uint16_t action_dst) +{ struct mlx5_action_construct_data *act_data; + + act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst); + if (!act_data) + return -1; + LIST_INSERT_HEAD(&acts->act_list, act_data, next); + return 0; +} + +/** + * Append dynamic encap action to the dynamic action list. + * + * @param[in] priv + * Pointer to the port private data structure. + * @param[in] acts + * Pointer to the template HW steering DR actions. + * @param[in] type + * Action type. + * @param[in] action_src + * Offset of source rte flow action. + * @param[in] action_dst + * Offset of destination DR action. + * @param[in] encap_src + * Offset of source encap raw data. + * @param[in] encap_dst + * Offset of destination encap raw data. + * @param[in] len + * Length of the data to be updated. + * + * @return + * 0 on success, negative value otherwise and rte_errno is set. + */ +static __rte_always_inline int +__flow_hw_act_data_encap_append(struct mlx5_priv *priv, + struct mlx5_hw_actions *acts, + enum rte_flow_action_type type, + uint16_t action_src, + uint16_t action_dst, + uint16_t encap_src, + uint16_t encap_dst, + uint16_t len) +{ struct mlx5_action_construct_data *act_data; + + act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst); + if (!act_data) + return -1; + act_data->encap.src = encap_src; + act_data->encap.dst = encap_dst; + act_data->encap.len = len; + LIST_INSERT_HEAD(&acts->act_list, act_data, next); + return 0; +} + +/** + * Append shared RSS action to the dynamic action list. + * + * @param[in] priv + * Pointer to the port private data structure. + * @param[in] acts + * Pointer to the template HW steering DR actions. + * @param[in] type + * Action type. + * @param[in] action_src + * Offset of source rte flow action. + * @param[in] action_dst + * Offset of destination DR action. + * @param[in] idx + * Shared RSS index. + * @param[in] rss + * Pointer to the shared RSS info. + * + * @return + * 0 on success, negative value otherwise and rte_errno is set. + */ +static __rte_always_inline int +__flow_hw_act_data_shared_rss_append(struct mlx5_priv *priv, + struct mlx5_hw_actions *acts, + enum rte_flow_action_type type, + uint16_t action_src, + uint16_t action_dst, + uint32_t idx, + struct mlx5_shared_action_rss *rss) +{ struct mlx5_action_construct_data *act_data; + + act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst); + if (!act_data) + return -1; + act_data->shared_rss.level = rss->origin.level; + act_data->shared_rss.types = !rss->origin.types ? RTE_ETH_RSS_IP : + rss->origin.types; + act_data->shared_rss.idx = idx; + LIST_INSERT_HEAD(&acts->act_list, act_data, next); + return 0; +} + +/** + * Translate shared indirect action. + * + * @param[in] dev + * Pointer to the rte_eth_dev data structure. + * @param[in] action + * Pointer to the shared indirect rte_flow action. + * @param[in] acts + * Pointer to the template HW steering DR actions. + * @param[in] action_src + * Offset of source rte flow action. + * @param[in] action_dst + * Offset of destination DR action. + * + * @return + * 0 on success, negative value otherwise and rte_errno is set. + */ +static __rte_always_inline int +flow_hw_shared_action_translate(struct rte_eth_dev *dev, + const struct rte_flow_action *action, + struct mlx5_hw_actions *acts, + uint16_t action_src, + uint16_t action_dst) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_shared_action_rss *shared_rss; + uint32_t act_idx = (uint32_t)(uintptr_t)action->conf; + uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET; + uint32_t idx = act_idx & + ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1); + + switch (type) { + case MLX5_INDIRECT_ACTION_TYPE_RSS: + shared_rss = mlx5_ipool_get + (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx); + if (!shared_rss || __flow_hw_act_data_shared_rss_append + (priv, acts, + (enum rte_flow_action_type)MLX5_RTE_FLOW_ACTION_TYPE_RSS, + action_src, action_dst, idx, shared_rss)) + return -1; + break; + default: + DRV_LOG(WARNING, "Unsupported shared action type:%d", type); + break; + } + return 0; +} + +/** + * Translate encap items to encapsulation list. + * + * @param[in] dev + * Pointer to the rte_eth_dev data structure. + * @param[in] acts + * Pointer to the template HW steering DR actions. + * @param[in] type + * Action type. + * @param[in] action_src + * Offset of source rte flow action. + * @param[in] action_dst + * Offset of destination DR action. + * @param[in] items + * Encap item pattern. + * @param[in] items_m + * Encap item mask indicates which part are constant and dynamic. + * + * @return + * 0 on success, negative value otherwise and rte_errno is set. + */ +static __rte_always_inline int +flow_hw_encap_item_translate(struct rte_eth_dev *dev, + struct mlx5_hw_actions *acts, + enum rte_flow_action_type type, + uint16_t action_src, + uint16_t action_dst, + const struct rte_flow_item *items, + const struct rte_flow_item *items_m) +{ + struct mlx5_priv *priv = dev->data->dev_private; + size_t len, total_len = 0; + uint32_t i = 0; + + for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++, items_m++, i++) { + len = flow_dv_get_item_hdr_len(items->type); + if ((!items_m->spec || + memcmp(items_m->spec, items->spec, len)) && + __flow_hw_act_data_encap_append(priv, acts, type, + action_src, action_dst, i, + total_len, len)) + return -1; + total_len += len; + } + return 0; } /** @@ -80,14 +543,22 @@ flow_hw_actions_translate(struct rte_eth_dev *dev, const struct rte_flow_template_table_attr *table_attr, struct mlx5_hw_actions *acts, struct rte_flow_actions_template *at, - struct rte_flow_error *error __rte_unused) + struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_attr *attr = &table_attr->flow_attr; struct rte_flow_action *actions = at->actions; + struct rte_flow_action *action_start = actions; struct rte_flow_action *masks = at->masks; + enum mlx5dr_action_reformat_type refmt_type = 0; + const struct rte_flow_action_raw_encap *raw_encap_data; + const struct rte_flow_item *enc_item = NULL, *enc_item_m = NULL; + uint16_t reformat_pos = MLX5_HW_MAX_ACTS, reformat_src = 0; + uint8_t *encap_data = NULL; + size_t data_size = 0; bool actions_end = false; - uint32_t type; + uint32_t type, i; + int err; if (attr->transfer) type = MLX5DR_TABLE_TYPE_FDB; @@ -95,14 +566,146 @@ flow_hw_actions_translate(struct rte_eth_dev *dev, type = MLX5DR_TABLE_TYPE_NIC_TX; else type = MLX5DR_TABLE_TYPE_NIC_RX; - for (; !actions_end; actions++, masks++) { + for (i = 0; !actions_end; actions++, masks++) { switch (actions->type) { case RTE_FLOW_ACTION_TYPE_INDIRECT: + if (!attr->group) { + DRV_LOG(ERR, "Indirect action is not supported in root table."); + goto err; + } + if (actions->conf && masks->conf) { + if (flow_hw_shared_action_translate + (dev, actions, acts, actions - action_start, i)) + goto err; + } else if (__flow_hw_act_data_general_append + (priv, acts, actions->type, + actions - action_start, i)){ + goto err; + } + i++; break; case RTE_FLOW_ACTION_TYPE_VOID: break; case RTE_FLOW_ACTION_TYPE_DROP: - acts->drop = priv->hw_drop[!!attr->group][type]; + acts->rule_acts[i++].action = + priv->hw_drop[!!attr->group][type]; + break; + case RTE_FLOW_ACTION_TYPE_MARK: + acts->mark = true; + if (masks->conf) + acts->rule_acts[i].tag.value = + mlx5_flow_mark_set + (((const struct rte_flow_action_mark *) + (masks->conf))->id); + else if (__flow_hw_act_data_general_append(priv, acts, + actions->type, actions - action_start, i)) + goto err; + acts->rule_acts[i++].action = + priv->hw_tag[!!attr->group]; + flow_hw_rxq_flag_set(dev, true); + break; + case RTE_FLOW_ACTION_TYPE_JUMP: + if (masks->conf) { + uint32_t jump_group = + ((const struct rte_flow_action_jump *) + actions->conf)->group; + acts->jump = flow_hw_jump_action_register + (dev, attr, jump_group, error); + if (!acts->jump) + goto err; + acts->rule_acts[i].action = (!!attr->group) ? + acts->jump->hws_action : + acts->jump->root_action; + } else if (__flow_hw_act_data_general_append + (priv, acts, actions->type, + actions - action_start, i)){ + goto err; + } + i++; + break; + case RTE_FLOW_ACTION_TYPE_QUEUE: + if (masks->conf) { + acts->tir = flow_hw_tir_action_register + (dev, + mlx5_hw_act_flag[!!attr->group][type], + actions); + if (!acts->tir) + goto err; + acts->rule_acts[i].action = + acts->tir->action; + } else if (__flow_hw_act_data_general_append + (priv, acts, actions->type, + actions - action_start, i)) { + goto err; + } + i++; + break; + case RTE_FLOW_ACTION_TYPE_RSS: + if (masks->conf) { + acts->tir = flow_hw_tir_action_register + (dev, + mlx5_hw_act_flag[!!attr->group][type], + actions); + if (!acts->tir) + goto err; + acts->rule_acts[i].action = + acts->tir->action; + } else if (__flow_hw_act_data_general_append + (priv, acts, actions->type, + actions - action_start, i)) { + goto err; + } + i++; + break; + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + MLX5_ASSERT(reformat_pos == MLX5_HW_MAX_ACTS); + enc_item = ((const struct rte_flow_action_vxlan_encap *) + actions->conf)->definition; + enc_item_m = + ((const struct rte_flow_action_vxlan_encap *) + masks->conf)->definition; + reformat_pos = i++; + reformat_src = actions - action_start; + refmt_type = MLX5DR_ACTION_REFORMAT_TYPE_L2_TO_TNL_L2; + break; + case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: + MLX5_ASSERT(reformat_pos == MLX5_HW_MAX_ACTS); + enc_item = ((const struct rte_flow_action_nvgre_encap *) + actions->conf)->definition; + enc_item_m = + ((const struct rte_flow_action_nvgre_encap *) + actions->conf)->definition; + reformat_pos = i++; + reformat_src = actions - action_start; + refmt_type = MLX5DR_ACTION_REFORMAT_TYPE_L2_TO_TNL_L2; + break; + case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: + case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: + MLX5_ASSERT(reformat_pos == MLX5_HW_MAX_ACTS); + reformat_pos = i++; + refmt_type = MLX5DR_ACTION_REFORMAT_TYPE_TNL_L2_TO_L2; + break; + case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: + raw_encap_data = + (const struct rte_flow_action_raw_encap *) + actions->conf; + encap_data = raw_encap_data->data; + data_size = raw_encap_data->size; + if (reformat_pos != MLX5_HW_MAX_ACTS) { + refmt_type = data_size < + MLX5_ENCAPSULATION_DECISION_SIZE ? + MLX5DR_ACTION_REFORMAT_TYPE_TNL_L3_TO_L2 : + MLX5DR_ACTION_REFORMAT_TYPE_L2_TO_TNL_L3; + } else { + reformat_pos = i++; + refmt_type = + MLX5DR_ACTION_REFORMAT_TYPE_L2_TO_TNL_L2; + } + reformat_src = actions - action_start; + break; + case RTE_FLOW_ACTION_TYPE_RAW_DECAP: + reformat_pos = i++; + refmt_type = MLX5DR_ACTION_REFORMAT_TYPE_TNL_L2_TO_L2; break; case RTE_FLOW_ACTION_TYPE_END: actions_end = true; @@ -111,6 +714,161 @@ flow_hw_actions_translate(struct rte_eth_dev *dev, break; } } + if (reformat_pos != MLX5_HW_MAX_ACTS) { + uint8_t buf[MLX5_ENCAP_MAX_LEN]; + + if (enc_item) { + MLX5_ASSERT(!encap_data); + if (flow_dv_convert_encap_data + (enc_item, buf, &data_size, error) || + flow_hw_encap_item_translate + (dev, acts, (action_start + reformat_src)->type, + reformat_src, reformat_pos, + enc_item, enc_item_m)) + goto err; + encap_data = buf; + } else if (encap_data && __flow_hw_act_data_encap_append + (priv, acts, + (action_start + reformat_src)->type, + reformat_src, reformat_pos, 0, 0, data_size)) { + goto err; + } + acts->encap_decap = mlx5_malloc(MLX5_MEM_ZERO, + sizeof(*acts->encap_decap) + data_size, + 0, SOCKET_ID_ANY); + if (!acts->encap_decap) + goto err; + if (data_size) { + acts->encap_decap->data_size = data_size; + memcpy(acts->encap_decap->data, encap_data, data_size); + } + acts->encap_decap->action = mlx5dr_action_create_reformat + (priv->dr_ctx, refmt_type, + data_size, encap_data, + rte_log2_u32(table_attr->nb_flows), + mlx5_hw_act_flag[!!attr->group][type]); + if (!acts->encap_decap->action) + goto err; + acts->rule_acts[reformat_pos].action = + acts->encap_decap->action; + acts->encap_decap_pos = reformat_pos; + } + acts->acts_num = i; + return 0; +err: + err = rte_errno; + __flow_hw_action_template_destroy(dev, acts); + return rte_flow_error_set(error, err, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "fail to create rte table"); +} + +/** + * Get shared indirect action. + * + * @param[in] dev + * Pointer to the rte_eth_dev data structure. + * @param[in] act_data + * Pointer to the recorded action construct data. + * @param[in] item_flags + * The matcher itme_flags used for RSS lookup. + * @param[in] rule_act + * Pointer to the shared action's destination rule DR action. + * + * @return + * 0 on success, negative value otherwise and rte_errno is set. + */ +static __rte_always_inline int +flow_hw_shared_action_get(struct rte_eth_dev *dev, + struct mlx5_action_construct_data *act_data, + const uint64_t item_flags, + struct mlx5dr_rule_action *rule_act) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_rss_desc rss_desc = { 0 }; + uint64_t hash_fields = 0; + uint32_t hrxq_idx = 0; + struct mlx5_hrxq *hrxq = NULL; + int act_type = act_data->type; + + switch (act_type) { + case MLX5_RTE_FLOW_ACTION_TYPE_RSS: + rss_desc.level = act_data->shared_rss.level; + rss_desc.types = act_data->shared_rss.types; + flow_dv_hashfields_set(item_flags, &rss_desc, &hash_fields); + hrxq_idx = flow_dv_action_rss_hrxq_lookup + (dev, act_data->shared_rss.idx, hash_fields); + if (hrxq_idx) + hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], + hrxq_idx); + if (hrxq) { + rule_act->action = hrxq->action; + return 0; + } + break; + default: + DRV_LOG(WARNING, "Unsupported shared action type:%d", + act_data->type); + break; + } + return -1; +} + +/** + * Construct shared indirect action. + * + * @param[in] dev + * Pointer to the rte_eth_dev data structure. + * @param[in] action + * Pointer to the shared indirect rte_flow action. + * @param[in] table + * Pointer to the flow table. + * @param[in] it_idx + * Item template index the action template refer to. + * @param[in] rule_act + * Pointer to the shared action's destination rule DR action. + * + * @return + * 0 on success, negative value otherwise and rte_errno is set. + */ +static __rte_always_inline int +flow_hw_shared_action_construct(struct rte_eth_dev *dev, + const struct rte_flow_action *action, + struct rte_flow_template_table *table, + const uint8_t it_idx, + struct mlx5dr_rule_action *rule_act) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_action_construct_data act_data; + struct mlx5_shared_action_rss *shared_rss; + uint32_t act_idx = (uint32_t)(uintptr_t)action->conf; + uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET; + uint32_t idx = act_idx & + ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1); + uint64_t item_flags; + + memset(&act_data, 0, sizeof(act_data)); + switch (type) { + case MLX5_INDIRECT_ACTION_TYPE_RSS: + act_data.type = MLX5_RTE_FLOW_ACTION_TYPE_RSS; + shared_rss = mlx5_ipool_get + (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx); + if (!shared_rss) + return -1; + act_data.shared_rss.idx = idx; + act_data.shared_rss.level = shared_rss->origin.level; + act_data.shared_rss.types = !shared_rss->origin.types ? + RTE_ETH_RSS_IP : + shared_rss->origin.types; + item_flags = table->its[it_idx]->item_flags; + if (flow_hw_shared_action_get + (dev, &act_data, item_flags, rule_act)) + return -1; + break; + default: + DRV_LOG(WARNING, "Unsupported shared action type:%d", type); + break; + } return 0; } @@ -120,8 +878,14 @@ flow_hw_actions_translate(struct rte_eth_dev *dev, * For action template contains dynamic actions, these actions need to * be updated according to the rte_flow action during flow creation. * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] job + * Pointer to job descriptor. * @param[in] hw_acts * Pointer to translated actions from template. + * @param[in] it_idx + * Item template index the action template refer to. * @param[in] actions * Array of rte_flow action need to be checked. * @param[in] rule_acts @@ -133,31 +897,131 @@ flow_hw_actions_translate(struct rte_eth_dev *dev, * 0 on success, negative value otherwise and rte_errno is set. */ static __rte_always_inline int -flow_hw_actions_construct(struct mlx5_hw_actions *hw_acts, +flow_hw_actions_construct(struct rte_eth_dev *dev, + struct mlx5_hw_q_job *job, + const struct mlx5_hw_actions *hw_acts, + const uint8_t it_idx, const struct rte_flow_action actions[], struct mlx5dr_rule_action *rule_acts, uint32_t *acts_num) { - bool actions_end = false; - uint32_t i; + struct rte_flow_template_table *table = job->flow->table; + struct mlx5_action_construct_data *act_data; + const struct rte_flow_action *action; + const struct rte_flow_action_raw_encap *raw_encap_data; + const struct rte_flow_item *enc_item = NULL; + uint8_t *buf = job->encap_data; + struct rte_flow_attr attr = { + .ingress = 1, + }; + uint32_t ft_flag; - for (i = 0; !actions_end || (i >= MLX5_HW_MAX_ACTS); actions++) { - switch (actions->type) { + memcpy(rule_acts, hw_acts->rule_acts, + sizeof(*rule_acts) * hw_acts->acts_num); + *acts_num = hw_acts->acts_num; + if (LIST_EMPTY(&hw_acts->act_list)) + return 0; + attr.group = table->grp->group_id; + ft_flag = mlx5_hw_act_flag[!!table->grp->group_id][table->type]; + if (table->type == MLX5DR_TABLE_TYPE_FDB) { + attr.transfer = 1; + attr.ingress = 1; + } else if (table->type == MLX5DR_TABLE_TYPE_NIC_TX) { + attr.egress = 1; + attr.ingress = 0; + } else { + attr.ingress = 1; + } + if (hw_acts->encap_decap && hw_acts->encap_decap->data_size) + memcpy(buf, hw_acts->encap_decap->data, + hw_acts->encap_decap->data_size); + LIST_FOREACH(act_data, &hw_acts->act_list, next) { + uint32_t jump_group; + uint32_t tag; + uint64_t item_flags; + struct mlx5_hw_jump_action *jump; + struct mlx5_hrxq *hrxq; + + action = &actions[act_data->action_src]; + MLX5_ASSERT(action->type == RTE_FLOW_ACTION_TYPE_INDIRECT || + (int)action->type == act_data->type); + switch (act_data->type) { case RTE_FLOW_ACTION_TYPE_INDIRECT: + if (flow_hw_shared_action_construct + (dev, action, table, it_idx, + &rule_acts[act_data->action_dst])) + return -1; break; case RTE_FLOW_ACTION_TYPE_VOID: break; - case RTE_FLOW_ACTION_TYPE_DROP: - rule_acts[i++].action = hw_acts->drop; + case RTE_FLOW_ACTION_TYPE_MARK: + tag = mlx5_flow_mark_set + (((const struct rte_flow_action_mark *) + (action->conf))->id); + rule_acts[act_data->action_dst].tag.value = tag; break; - case RTE_FLOW_ACTION_TYPE_END: - actions_end = true; + case RTE_FLOW_ACTION_TYPE_JUMP: + jump_group = ((const struct rte_flow_action_jump *) + action->conf)->group; + jump = flow_hw_jump_action_register + (dev, &attr, jump_group, NULL); + if (!jump) + return -1; + rule_acts[act_data->action_dst].action = + (!!attr.group) ? jump->hws_action : jump->root_action; + job->flow->jump = jump; + job->flow->fate_type = MLX5_FLOW_FATE_JUMP; + break; + case RTE_FLOW_ACTION_TYPE_RSS: + case RTE_FLOW_ACTION_TYPE_QUEUE: + hrxq = flow_hw_tir_action_register(dev, + ft_flag, + action); + if (!hrxq) + return -1; + rule_acts[act_data->action_dst].action = hrxq->action; + job->flow->hrxq = hrxq; + job->flow->fate_type = MLX5_FLOW_FATE_QUEUE; + break; + case MLX5_RTE_FLOW_ACTION_TYPE_RSS: + item_flags = table->its[it_idx]->item_flags; + if (flow_hw_shared_action_get + (dev, act_data, item_flags, + &rule_acts[act_data->action_dst])) + return -1; + break; + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + enc_item = ((const struct rte_flow_action_vxlan_encap *) + action->conf)->definition; + rte_memcpy((void *)&buf[act_data->encap.dst], + enc_item[act_data->encap.src].spec, + act_data->encap.len); + break; + case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: + enc_item = ((const struct rte_flow_action_nvgre_encap *) + action->conf)->definition; + rte_memcpy((void *)&buf[act_data->encap.dst], + enc_item[act_data->encap.src].spec, + act_data->encap.len); + break; + case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: + raw_encap_data = + (const struct rte_flow_action_raw_encap *) + action->conf; + rte_memcpy((void *)&buf[act_data->encap.dst], + raw_encap_data->data, act_data->encap.len); + MLX5_ASSERT(raw_encap_data->size == + act_data->encap.len); break; default: break; } } - *acts_num = i; + if (hw_acts->encap_decap) { + rule_acts[hw_acts->encap_decap_pos].reformat.offset = + job->flow->idx - 1; + rule_acts[hw_acts->encap_decap_pos].reformat.data = buf; + } return 0; } @@ -239,7 +1103,8 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev, rule_attr.user_data = job; hw_acts = &table->ats[action_template_index].acts; /* Construct the flow action array based on the input actions.*/ - flow_hw_actions_construct(hw_acts, actions, rule_acts, &acts_num); + flow_hw_actions_construct(dev, job, hw_acts, pattern_template_index, + actions, rule_acts, &acts_num); ret = mlx5dr_rule_create(table->matcher, pattern_template_index, items, rule_acts, acts_num, @@ -356,8 +1221,13 @@ flow_hw_pull(struct rte_eth_dev *dev, job = (struct mlx5_hw_q_job *)res[i].user_data; /* Restore user data. */ res[i].user_data = job->user_data; - if (job->type == MLX5_HW_Q_JOB_TYPE_DESTROY) + if (job->type == MLX5_HW_Q_JOB_TYPE_DESTROY) { + if (job->flow->fate_type == MLX5_FLOW_FATE_JUMP) + flow_hw_jump_release(dev, job->flow->jump); + else if (job->flow->fate_type == MLX5_FLOW_FATE_QUEUE) + mlx5_hrxq_obj_release(dev, job->flow->hrxq); mlx5_ipool_free(job->flow->table->flow, job->flow->idx); + } priv->hw_q[queue].job[priv->hw_q[queue].job_idx++] = job; } return ret; @@ -642,6 +1512,7 @@ flow_hw_table_create(struct rte_eth_dev *dev, rte_errno = EINVAL; goto at_error; } + LIST_INIT(&tbl->ats[i].acts.act_list); err = flow_hw_actions_translate(dev, attr, &tbl->ats[i].acts, action_templates[i], error); @@ -659,7 +1530,7 @@ flow_hw_table_create(struct rte_eth_dev *dev, return tbl; at_error: while (i--) { - __flow_hw_action_template_destroy(&tbl->ats[i].acts); + __flow_hw_action_template_destroy(dev, &tbl->ats[i].acts); __atomic_sub_fetch(&action_templates[i]->refcnt, 1, __ATOMIC_RELAXED); } @@ -718,7 +1589,9 @@ flow_hw_table_destroy(struct rte_eth_dev *dev, __atomic_sub_fetch(&table->its[i]->refcnt, 1, __ATOMIC_RELAXED); for (i = 0; i < table->nb_action_templates; i++) { - __flow_hw_action_template_destroy(&table->ats[i].acts); + if (table->ats[i].acts.mark) + flow_hw_rxq_flag_set(dev, false); + __flow_hw_action_template_destroy(dev, &table->ats[i].acts); __atomic_sub_fetch(&table->ats[i].action_template->refcnt, 1, __ATOMIC_RELAXED); } @@ -880,6 +1753,7 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev, "cannot create match template"); return NULL; } + it->item_flags = flow_hw_rss_item_flags_get(items); __atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED); LIST_INSERT_HEAD(&priv->flow_hw_itt, it, next); return it; @@ -1169,6 +2043,15 @@ flow_hw_configure(struct rte_eth_dev *dev, struct mlx5_hw_q *hw_q; struct mlx5_hw_q_job *job = NULL; uint32_t mem_size, i, j; + struct mlx5_indexed_pool_config cfg = { + .size = sizeof(struct mlx5_action_construct_data), + .trunk_size = 4096, + .need_lock = 1, + .release_mem_en = !!priv->sh->config.reclaim_mode, + .malloc = mlx5_malloc, + .free = mlx5_free, + .type = "mlx5_hw_action_construct_data", + }; if (!port_attr || !nb_queue || !queue_attr) { rte_errno = EINVAL; @@ -1187,6 +2070,9 @@ flow_hw_configure(struct rte_eth_dev *dev, } flow_hw_resource_release(dev); } + priv->acts_ipool = mlx5_ipool_create(&cfg); + if (!priv->acts_ipool) + goto err; /* Allocate the queue job descriptor LIFO. */ mem_size = sizeof(priv->hw_q[0]) * nb_queue; for (i = 0; i < nb_queue; i++) { @@ -1199,6 +2085,7 @@ flow_hw_configure(struct rte_eth_dev *dev, goto err; } mem_size += (sizeof(struct mlx5_hw_q_job *) + + sizeof(uint8_t) * MLX5_ENCAP_MAX_LEN + sizeof(struct mlx5_hw_q_job)) * queue_attr[0]->size; } @@ -1209,6 +2096,8 @@ flow_hw_configure(struct rte_eth_dev *dev, goto err; } for (i = 0; i < nb_queue; i++) { + uint8_t *encap = NULL; + priv->hw_q[i].job_idx = queue_attr[i]->size; priv->hw_q[i].size = queue_attr[i]->size; if (i == 0) @@ -1219,8 +2108,11 @@ flow_hw_configure(struct rte_eth_dev *dev, &job[queue_attr[i - 1]->size]; job = (struct mlx5_hw_q_job *) &priv->hw_q[i].job[queue_attr[i]->size]; - for (j = 0; j < queue_attr[i]->size; j++) + encap = (uint8_t *)&job[queue_attr[i]->size]; + for (j = 0; j < queue_attr[i]->size; j++) { + job[j].encap_data = &encap[j * MLX5_ENCAP_MAX_LEN]; priv->hw_q[i].job[j] = &job[j]; + } } dr_ctx_attr.pd = priv->sh->cdev->pd; dr_ctx_attr.queues = nb_queue; @@ -1240,20 +2132,29 @@ flow_hw_configure(struct rte_eth_dev *dev, if (!priv->hw_drop[i][j]) goto err; } + priv->hw_tag[i] = mlx5dr_action_create_tag + (priv->dr_ctx, mlx5_hw_act_flag[i][0]); + if (!priv->hw_tag[i]) + goto err; } return 0; err: for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) { for (j = 0; j < MLX5DR_TABLE_TYPE_MAX; j++) { - if (!priv->hw_drop[i][j]) - continue; - mlx5dr_action_destroy(priv->hw_drop[i][j]); + if (priv->hw_drop[i][j]) + mlx5dr_action_destroy(priv->hw_drop[i][j]); } + if (priv->hw_tag[i]) + mlx5dr_action_destroy(priv->hw_tag[i]); } if (dr_ctx) claim_zero(mlx5dr_context_close(dr_ctx)); mlx5_free(priv->hw_q); priv->hw_q = NULL; + if (priv->acts_ipool) { + mlx5_ipool_destroy(priv->acts_ipool); + priv->acts_ipool = NULL; + } return rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "fail to configure port"); @@ -1290,10 +2191,15 @@ flow_hw_resource_release(struct rte_eth_dev *dev) } for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) { for (j = 0; j < MLX5DR_TABLE_TYPE_MAX; j++) { - if (!priv->hw_drop[i][j]) - continue; - mlx5dr_action_destroy(priv->hw_drop[i][j]); + if (priv->hw_drop[i][j]) + mlx5dr_action_destroy(priv->hw_drop[i][j]); } + if (priv->hw_tag[i]) + mlx5dr_action_destroy(priv->hw_tag[i]); + } + if (priv->acts_ipool) { + mlx5_ipool_destroy(priv->acts_ipool); + priv->acts_ipool = NULL; } mlx5_free(priv->hw_q); priv->hw_q = NULL; @@ -1302,6 +2208,109 @@ flow_hw_resource_release(struct rte_eth_dev *dev) priv->nb_queue = 0; } +/** + * Create shared action. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] queue + * Which queue to be used.. + * @param[in] attr + * Operation attribute. + * @param[in] conf + * Indirect action configuration. + * @param[in] action + * rte_flow action detail. + * @param[in] user_data + * Pointer to the user_data. + * @param[out] error + * Pointer to error structure. + * + * @return + * Action handle on success, NULL otherwise and rte_errno is set. + */ +static struct rte_flow_action_handle * +flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue, + const struct rte_flow_op_attr *attr, + const struct rte_flow_indir_action_conf *conf, + const struct rte_flow_action *action, + void *user_data, + struct rte_flow_error *error) +{ + RTE_SET_USED(queue); + RTE_SET_USED(attr); + RTE_SET_USED(user_data); + return flow_dv_action_create(dev, conf, action, error); +} + +/** + * Update shared action. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] queue + * Which queue to be used.. + * @param[in] attr + * Operation attribute. + * @param[in] handle + * Action handle to be updated. + * @param[in] update + * Update value. + * @param[in] user_data + * Pointer to the user_data. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, negative value otherwise and rte_errno is set. + */ +static int +flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue, + const struct rte_flow_op_attr *attr, + struct rte_flow_action_handle *handle, + const void *update, + void *user_data, + struct rte_flow_error *error) +{ + RTE_SET_USED(queue); + RTE_SET_USED(attr); + RTE_SET_USED(user_data); + return flow_dv_action_update(dev, handle, update, error); +} + +/** + * Destroy shared action. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] queue + * Which queue to be used.. + * @param[in] attr + * Operation attribute. + * @param[in] handle + * Action handle to be destroyed. + * @param[in] user_data + * Pointer to the user_data. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, negative value otherwise and rte_errno is set. + */ +static int +flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue, + const struct rte_flow_op_attr *attr, + struct rte_flow_action_handle *handle, + void *user_data, + struct rte_flow_error *error) +{ + RTE_SET_USED(queue); + RTE_SET_USED(attr); + RTE_SET_USED(user_data); + return flow_dv_action_destroy(dev, handle, error); +} + + const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = { .info_get = flow_hw_info_get, .configure = flow_hw_configure, @@ -1315,6 +2324,14 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = { .async_flow_destroy = flow_hw_async_flow_destroy, .pull = flow_hw_pull, .push = flow_hw_push, + .async_action_create = flow_hw_action_handle_create, + .async_action_destroy = flow_hw_action_handle_destroy, + .async_action_update = flow_hw_action_handle_update, + .action_validate = flow_dv_action_validate, + .action_create = flow_dv_action_create, + .action_destroy = flow_dv_action_destroy, + .action_update = flow_dv_action_update, + .action_query = flow_dv_action_query, }; #endif