X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow.c;h=2bbb5f578bb034c922e0f63920427bfa58847ee0;hb=b4edeaf3efd51e677e5c10cc30a07c250a739316;hp=7a6d6f8675d605308803c0edfaaba0c6525f1a07;hpb=e6100c7b62263a1da7569371a846ef2ceb091207;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 7a6d6f8675..2bbb5f578b 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -50,6 +50,7 @@ flow_tunnel_add_default_miss(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_action *app_actions, uint32_t flow_idx, + const struct mlx5_flow_tunnel *tunnel, struct tunnel_default_miss_ctx *ctx, struct rte_flow_error *error); static struct mlx5_flow_tunnel * @@ -99,6 +100,8 @@ struct mlx5_flow_expand_node { * RSS types bit-field associated with this node * (see ETH_RSS_* definitions). */ + uint8_t optional; + /**< optional expand field. Default 0 to expand, 1 not go deeper. */ }; /** Object returned by mlx5_flow_expand_rss(). */ @@ -111,6 +114,31 @@ struct mlx5_flow_expand_rss { } entry[]; }; +static void +mlx5_dbg__print_pattern(const struct rte_flow_item *item); + +static bool +mlx5_flow_is_rss_expandable_item(const struct rte_flow_item *item) +{ + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_ETH: + case RTE_FLOW_ITEM_TYPE_VLAN: + case RTE_FLOW_ITEM_TYPE_IPV4: + case RTE_FLOW_ITEM_TYPE_IPV6: + case RTE_FLOW_ITEM_TYPE_UDP: + case RTE_FLOW_ITEM_TYPE_TCP: + case RTE_FLOW_ITEM_TYPE_VXLAN: + case RTE_FLOW_ITEM_TYPE_NVGRE: + case RTE_FLOW_ITEM_TYPE_GRE: + case RTE_FLOW_ITEM_TYPE_GENEVE: + case RTE_FLOW_ITEM_TYPE_MPLS: + return true; + default: + break; + } + return false; +} + static enum rte_flow_item_type mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item) { @@ -212,7 +240,7 @@ mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item) return ret; } -#define MLX5_RSS_EXP_ELT_N 8 +#define MLX5_RSS_EXP_ELT_N 16 /** * Expand RSS flows into several possible flows according to the RSS hash @@ -237,6 +265,7 @@ mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item) * set, the following errors are defined: * * -E2BIG: graph-depth @p graph is too deep. + * -EINVAL: @p size has not enough space for expanded pattern. */ static int mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, @@ -263,15 +292,18 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, memset(&missed_item, 0, sizeof(missed_item)); lsize = offsetof(struct mlx5_flow_expand_rss, entry) + MLX5_RSS_EXP_ELT_N * sizeof(buf->entry[0]); - if (lsize <= size) { - buf->entry[0].priority = 0; - buf->entry[0].pattern = (void *)&buf->entry[MLX5_RSS_EXP_ELT_N]; - buf->entries = 0; - addr = buf->entry[0].pattern; - } + if (lsize > size) + return -EINVAL; + buf->entry[0].priority = 0; + buf->entry[0].pattern = (void *)&buf->entry[MLX5_RSS_EXP_ELT_N]; + buf->entries = 0; + addr = buf->entry[0].pattern; for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { - if (item->type != RTE_FLOW_ITEM_TYPE_VOID) - last_item = item; + if (!mlx5_flow_is_rss_expandable_item(item)) { + user_pattern_size += sizeof(*item); + continue; + } + last_item = item; for (i = 0; node->next && node->next[i]; ++i) { next = &graph[node->next[i]]; if (next->type == item->type) @@ -283,12 +315,12 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, } user_pattern_size += sizeof(*item); /* Handle END item. */ lsize += user_pattern_size; + if (lsize > size) + return -EINVAL; /* Copy the user pattern in the first entry of the buffer. */ - if (lsize <= size) { - rte_memcpy(addr, pattern, user_pattern_size); - addr = (void *)(((uintptr_t)addr) + user_pattern_size); - buf->entries = 1; - } + rte_memcpy(addr, pattern, user_pattern_size); + addr = (void *)(((uintptr_t)addr) + user_pattern_size); + buf->entries = 1; /* Start expanding. */ memset(flow_items, 0, sizeof(flow_items)); user_pattern_size -= sizeof(*item); @@ -318,7 +350,9 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, elt = 2; /* missed item + item end. */ node = next; lsize += elt * sizeof(*item) + user_pattern_size; - if ((node->rss_types & types) && lsize <= size) { + if (lsize > size) + return -EINVAL; + if (node->rss_types & types) { buf->entry[buf->entries].priority = 1; buf->entry[buf->entries].pattern = addr; buf->entries++; @@ -337,6 +371,7 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, while (node) { flow_items[stack_pos].type = node->type; if (node->rss_types & types) { + size_t n; /* * compute the number of items to copy from the * expansion and copy it. @@ -346,27 +381,26 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, elt = stack_pos + 2; flow_items[stack_pos + 1].type = RTE_FLOW_ITEM_TYPE_END; lsize += elt * sizeof(*item) + user_pattern_size; - if (lsize <= size) { - size_t n = elt * sizeof(*item); - - buf->entry[buf->entries].priority = - stack_pos + 1 + missed; - buf->entry[buf->entries].pattern = addr; - buf->entries++; - rte_memcpy(addr, buf->entry[0].pattern, - user_pattern_size); - addr = (void *)(((uintptr_t)addr) + - user_pattern_size); - rte_memcpy(addr, &missed_item, - missed * sizeof(*item)); - addr = (void *)(((uintptr_t)addr) + - missed * sizeof(*item)); - rte_memcpy(addr, flow_items, n); - addr = (void *)(((uintptr_t)addr) + n); - } + if (lsize > size) + return -EINVAL; + n = elt * sizeof(*item); + buf->entry[buf->entries].priority = + stack_pos + 1 + missed; + buf->entry[buf->entries].pattern = addr; + buf->entries++; + rte_memcpy(addr, buf->entry[0].pattern, + user_pattern_size); + addr = (void *)(((uintptr_t)addr) + + user_pattern_size); + rte_memcpy(addr, &missed_item, + missed * sizeof(*item)); + addr = (void *)(((uintptr_t)addr) + + missed * sizeof(*item)); + rte_memcpy(addr, flow_items, n); + addr = (void *)(((uintptr_t)addr) + n); } /* Go deeper. */ - if (node->next) { + if (!node->optional && node->next) { next_node = node->next; if (stack_pos++ == MLX5_RSS_EXP_ELT_N) { rte_errno = E2BIG; @@ -405,6 +439,8 @@ enum mlx5_expansion { MLX5_EXPANSION_VXLAN, MLX5_EXPANSION_VXLAN_GPE, MLX5_EXPANSION_GRE, + MLX5_EXPANSION_NVGRE, + MLX5_EXPANSION_GRE_KEY, MLX5_EXPANSION_MPLS, MLX5_EXPANSION_ETH, MLX5_EXPANSION_ETH_VLAN, @@ -442,8 +478,7 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { }, [MLX5_EXPANSION_OUTER_ETH] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, - MLX5_EXPANSION_OUTER_IPV6, - MLX5_EXPANSION_MPLS), + MLX5_EXPANSION_OUTER_IPV6), .type = RTE_FLOW_ITEM_TYPE_ETH, .rss_types = 0, }, @@ -462,6 +497,7 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { (MLX5_EXPANSION_OUTER_IPV4_UDP, MLX5_EXPANSION_OUTER_IPV4_TCP, MLX5_EXPANSION_GRE, + MLX5_EXPANSION_NVGRE, MLX5_EXPANSION_IPV4, MLX5_EXPANSION_IPV6), .type = RTE_FLOW_ITEM_TYPE_IPV4, @@ -470,7 +506,8 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { }, [MLX5_EXPANSION_OUTER_IPV4_UDP] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, - MLX5_EXPANSION_VXLAN_GPE), + MLX5_EXPANSION_VXLAN_GPE, + MLX5_EXPANSION_MPLS), .type = RTE_FLOW_ITEM_TYPE_UDP, .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, }, @@ -484,14 +521,16 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { MLX5_EXPANSION_OUTER_IPV6_TCP, MLX5_EXPANSION_IPV4, MLX5_EXPANSION_IPV6, - MLX5_EXPANSION_GRE), + MLX5_EXPANSION_GRE, + MLX5_EXPANSION_NVGRE), .type = RTE_FLOW_ITEM_TYPE_IPV6, .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER, }, [MLX5_EXPANSION_OUTER_IPV6_UDP] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, - MLX5_EXPANSION_VXLAN_GPE), + MLX5_EXPANSION_VXLAN_GPE, + MLX5_EXPANSION_MPLS), .type = RTE_FLOW_ITEM_TYPE_UDP, .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, }, @@ -513,12 +552,26 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { }, [MLX5_EXPANSION_GRE] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, - MLX5_EXPANSION_IPV6), + MLX5_EXPANSION_IPV6, + MLX5_EXPANSION_GRE_KEY, + MLX5_EXPANSION_MPLS), .type = RTE_FLOW_ITEM_TYPE_GRE, }, + [MLX5_EXPANSION_GRE_KEY] = { + .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6, + MLX5_EXPANSION_MPLS), + .type = RTE_FLOW_ITEM_TYPE_GRE_KEY, + .optional = 1, + }, + [MLX5_EXPANSION_NVGRE] = { + .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH), + .type = RTE_FLOW_ITEM_TYPE_NVGRE, + }, [MLX5_EXPANSION_MPLS] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, - MLX5_EXPANSION_IPV6), + MLX5_EXPANSION_IPV6, + MLX5_EXPANSION_ETH), .type = RTE_FLOW_ITEM_TYPE_MPLS, }, [MLX5_EXPANSION_ETH] = { @@ -759,7 +812,9 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev, return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : REG_C_3; case MLX5_MTR_COLOR: - case MLX5_ASO_FLOW_HIT: /* Both features use the same REG_C. */ + case MLX5_ASO_FLOW_HIT: + case MLX5_ASO_CONNTRACK: + /* All features use the same REG_C. */ MLX5_ASSERT(priv->mtr_color_reg != REG_NON); return priv->mtr_color_reg; case MLX5_COPY_MARK: @@ -1028,7 +1083,7 @@ flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) * @param[in] dev_handle * Pointer to device flow handle structure. */ -static void +void flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow_handle *dev_handle) { @@ -1619,6 +1674,13 @@ mlx5_flow_validate_action_rss(const struct rte_flow_action *action, RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, "RSS on eCPRI is not supported now"); } + if ((item_flags & MLX5_FLOW_LAYER_MPLS) && + !(item_flags & + (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3)) && + rss->level > 1) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "MPLS inner RSS needs to specify inner L2/L3 items after MPLS in pattern"); return 0; } @@ -1685,6 +1747,37 @@ mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused, return 0; } +/* + * Validate the ASO CT action. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] conntrack + * Pointer to the CT action profile. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_validate_action_ct(struct rte_eth_dev *dev, + const struct rte_flow_action_conntrack *conntrack, + struct rte_flow_error *error) +{ + RTE_SET_USED(dev); + + if (conntrack->state > RTE_FLOW_CONNTRACK_STATE_TIME_WAIT) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Invalid CT state"); + if (conntrack->last_index > RTE_FLOW_CONNTRACK_FLAG_RST) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Invalid last TCP packet flag"); + return 0; +} + /** * Verify the @p attributes will be correctly understood by the NIC and store * them in the @p flow if everything is correct. @@ -2046,7 +2139,7 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, RTE_FLOW_ERROR_TYPE_ITEM, item, "IPv4 cannot follow L2/VLAN layer " "which ether type is not IPv4"); - if (item_flags & MLX5_FLOW_LAYER_IPIP) { + if (item_flags & MLX5_FLOW_LAYER_TUNNEL) { if (mask && spec) next_proto = mask->hdr.next_proto_id & spec->hdr.next_proto_id; @@ -2154,7 +2247,7 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, "which ether type is not IPv6"); if (mask && mask->hdr.proto == UINT8_MAX && spec) next_proto = spec->hdr.proto; - if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) { + if (item_flags & MLX5_FLOW_LAYER_TUNNEL) { if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -2317,12 +2410,14 @@ mlx5_flow_validate_item_tcp(const struct rte_flow_item *item, /** * Validate VXLAN item. * + * @param[in] dev + * Pointer to the Ethernet device structure. * @param[in] item * Item specification. * @param[in] item_flags * Bit-fields that holds the items detected until now. - * @param[in] target_protocol - * The next protocol in the previous item. + * @param[in] attr + * Flow rule attributes. * @param[out] error * Pointer to error structure. * @@ -2330,24 +2425,32 @@ mlx5_flow_validate_item_tcp(const struct rte_flow_item *item, * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item, +mlx5_flow_validate_item_vxlan(struct rte_eth_dev *dev, + const struct rte_flow_item *item, uint64_t item_flags, + const struct rte_flow_attr *attr, struct rte_flow_error *error) { const struct rte_flow_item_vxlan *spec = item->spec; const struct rte_flow_item_vxlan *mask = item->mask; int ret; + struct mlx5_priv *priv = dev->data->dev_private; union vni { uint32_t vlan_id; uint8_t vni[4]; } id = { .vlan_id = 0, }; - + const struct rte_flow_item_vxlan nic_mask = { + .vni = "\xff\xff\xff", + .rsvd1 = 0xff, + }; + const struct rte_flow_item_vxlan *valid_mask; if (item_flags & MLX5_FLOW_LAYER_TUNNEL) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "multiple tunnel layers not" " supported"); + valid_mask = &rte_flow_item_vxlan_mask; /* * Verify only UDPv4 is present as defined in * https://tools.ietf.org/html/rfc7348 @@ -2358,9 +2461,15 @@ mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item, "no outer UDP layer found"); if (!mask) mask = &rte_flow_item_vxlan_mask; + /* FDB domain & NIC domain non-zero group */ + if ((attr->transfer || attr->group) && priv->sh->misc5_cap) + valid_mask = &nic_mask; + /* Group zero in NIC domain */ + if (!attr->group && !attr->transfer && priv->sh->tunnel_header_0_1) + valid_mask = &nic_mask; ret = mlx5_flow_item_acceptable (item, (const uint8_t *)mask, - (const uint8_t *)&rte_flow_item_vxlan_mask, + (const uint8_t *)valid_mask, sizeof(struct rte_flow_item_vxlan), MLX5_ITEM_RANGE_NOT_ACCEPTED, error); if (ret < 0) @@ -2842,9 +2951,8 @@ mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused, "MPLS not supported or" " disabled in firmware" " configuration."); - /* MPLS over IP, UDP, GRE is allowed */ - if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 | - MLX5_FLOW_LAYER_OUTER_L4_UDP | + /* MPLS over UDP, GRE is allowed */ + if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_GRE_KEY))) return rte_flow_error_set(error, EINVAL, @@ -3017,31 +3125,6 @@ mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item, MLX5_ITEM_RANGE_NOT_ACCEPTED, error); } -/** - * Release resource related QUEUE/RSS action split. - * - * @param dev - * Pointer to Ethernet device. - * @param flow - * Flow to release id's from. - */ -static void -flow_mreg_split_qrss_release(struct rte_eth_dev *dev, - struct rte_flow *flow) -{ - struct mlx5_priv *priv = dev->data->dev_private; - uint32_t handle_idx; - struct mlx5_flow_handle *dev_handle; - - SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, - handle_idx, dev_handle, next) - if (dev_handle->split_flow_id && - !dev_handle->is_meter_flow_id) - mlx5_ipool_free(priv->sh->ipool - [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], - dev_handle->split_flow_id); -} - static int flow_null_validate(struct rte_eth_dev *dev __rte_unused, const struct rte_flow_attr *attr __rte_unused, @@ -3337,24 +3420,92 @@ flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) const struct mlx5_flow_driver_ops *fops; enum mlx5_flow_drv_type type = flow->drv_type; - flow_mreg_split_qrss_release(dev, flow); MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); fops = flow_get_drv_ops(type); fops->destroy(dev, flow); } +/** + * Flow driver find RSS policy tbl API. This abstracts calling driver + * specific functions. Parent flow (rte_flow) should have driver + * type (drv_type). It will find the RSS policy table that has the rss_desc. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] policy + * Pointer to meter policy table. + * @param[in] rss_desc + * Pointer to rss_desc + */ +static struct mlx5_flow_meter_sub_policy * +flow_drv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct mlx5_flow_meter_policy *policy, + struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]) +{ + const struct mlx5_flow_driver_ops *fops; + enum mlx5_flow_drv_type type = flow->drv_type; + + MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); + fops = flow_get_drv_ops(type); + return fops->meter_sub_policy_rss_prepare(dev, policy, rss_desc); +} + +/** + * Flow driver color tag rule API. This abstracts calling driver + * specific functions. Parent flow (rte_flow) should have driver + * type (drv_type). It will create the color tag rules in hierarchy meter. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] fm + * Pointer to flow meter structure. + * @param[in] src_port + * The src port this extra rule should use. + * @param[in] item + * The src port id match item. + * @param[out] error + * Pointer to error structure. + */ +static int +flow_drv_mtr_hierarchy_rule_create(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct mlx5_flow_meter_info *fm, + int32_t src_port, + const struct rte_flow_item *item, + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + enum mlx5_flow_drv_type type = flow->drv_type; + + MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); + fops = flow_get_drv_ops(type); + return fops->meter_hierarchy_rule_create(dev, fm, + src_port, item, error); +} + /** * Get RSS action from the action list. * + * @param[in] dev + * Pointer to Ethernet device. * @param[in] actions * Pointer to the list of actions. + * @param[in] flow + * Parent flow structure pointer. * * @return * Pointer to the RSS action if exist, else return NULL. */ static const struct rte_flow_action_rss* -flow_get_rss_action(const struct rte_flow_action actions[]) +flow_get_rss_action(struct rte_eth_dev *dev, + const struct rte_flow_action actions[]) { + struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_action_rss *rss = NULL; for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { @@ -3372,6 +3523,31 @@ flow_get_rss_action(const struct rte_flow_action actions[]) rss = act->conf; break; } + case RTE_FLOW_ACTION_TYPE_METER: + { + uint32_t mtr_idx; + struct mlx5_flow_meter_info *fm; + struct mlx5_flow_meter_policy *policy; + const struct rte_flow_action_meter *mtr = actions->conf; + + fm = mlx5_flow_meter_find(priv, mtr->mtr_id, &mtr_idx); + if (fm && !fm->def_policy) { + policy = mlx5_flow_meter_policy_find(dev, + fm->policy_id, NULL); + MLX5_ASSERT(policy); + if (policy->is_hierarchy) { + policy = + mlx5_flow_meter_hierarchy_get_final_policy(dev, + policy); + if (!policy) + return NULL; + } + if (policy->is_rss) + rss = + policy->act_cnt[RTE_COLOR_GREEN].rss->conf; + } + break; + } default: break; } @@ -3489,6 +3665,12 @@ flow_action_handles_translate(struct rte_eth_dev *dev, translated[handle->index].conf = &shared_rss->origin; break; + case MLX5_INDIRECT_ACTION_TYPE_COUNT: + translated[handle->index].type = + (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_COUNT; + translated[handle->index].conf = (void *)(uintptr_t)idx; + break; case MLX5_INDIRECT_ACTION_TYPE_AGE: if (priv->sh->flow_hit_aso_en) { translated[handle->index].type = @@ -3499,6 +3681,15 @@ flow_action_handles_translate(struct rte_eth_dev *dev, break; } /* Fall-through */ + case MLX5_INDIRECT_ACTION_TYPE_CT: + if (priv->sh->ct_aso_en) { + translated[handle->index].type = + RTE_FLOW_ACTION_TYPE_CONNTRACK; + translated[handle->index].conf = + (void *)(uintptr_t)idx; + break; + } + /* Fall-through */ default: mlx5_free(translated); return rte_flow_error_set @@ -3670,13 +3861,75 @@ flow_parse_metadata_split_actions_info(const struct rte_flow_action actions[], return actions_n + 1; } +/** + * Check if the action will change packet. + * + * @param dev + * Pointer to Ethernet device. + * @param[in] type + * action type. + * + * @return + * true if action will change packet, false otherwise. + */ +static bool flow_check_modify_action_type(struct rte_eth_dev *dev, + enum rte_flow_action_type type) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + switch (type) { + case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC: + case RTE_FLOW_ACTION_TYPE_SET_MAC_DST: + case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC: + case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST: + case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC: + case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST: + case RTE_FLOW_ACTION_TYPE_SET_TP_SRC: + case RTE_FLOW_ACTION_TYPE_SET_TP_DST: + case RTE_FLOW_ACTION_TYPE_DEC_TTL: + case RTE_FLOW_ACTION_TYPE_SET_TTL: + case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ: + case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ: + case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK: + case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK: + case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP: + case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP: + case RTE_FLOW_ACTION_TYPE_SET_META: + case RTE_FLOW_ACTION_TYPE_SET_TAG: + case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN: + case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: + case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: + case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: + case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: + case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: + case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: + case RTE_FLOW_ACTION_TYPE_RAW_DECAP: + case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD: + return true; + case RTE_FLOW_ACTION_TYPE_FLAG: + case RTE_FLOW_ACTION_TYPE_MARK: + if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) + return true; + else + return false; + default: + return false; + } +} + /** * Check meter action from the action list. * + * @param dev + * Pointer to Ethernet device. * @param[in] actions * Pointer to the list of actions. * @param[out] has_mtr * Pointer to the meter exist flag. + * @param[out] has_modify + * Pointer to the flag showing there's packet change action. * @param[out] meter_id * Pointer to the meter id. * @@ -3684,9 +3937,9 @@ flow_parse_metadata_split_actions_info(const struct rte_flow_action actions[], * Total number of actions. */ static int -flow_check_meter_action(const struct rte_flow_action actions[], - bool *has_mtr, - uint32_t *meter_id) +flow_check_meter_action(struct rte_eth_dev *dev, + const struct rte_flow_action actions[], + bool *has_mtr, bool *has_modify, uint32_t *meter_id) { const struct rte_flow_action_meter *mtr = NULL; int actions_n = 0; @@ -3703,6 +3956,9 @@ flow_check_meter_action(const struct rte_flow_action actions[], default: break; } + if (!*has_mtr) + *has_modify |= flow_check_modify_action_type(dev, + actions->type); actions_n++; } /* Count RTE_FLOW_ACTION_TYPE_END. */ @@ -3789,14 +4045,14 @@ flow_check_hairpin_split(struct rte_eth_dev *dev, /* Declare flow create/destroy prototype in advance. */ static uint32_t -flow_list_create(struct rte_eth_dev *dev, uint32_t *list, +flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], bool external, struct rte_flow_error *error); static void -flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list, +flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type, uint32_t flow_idx); int @@ -3918,8 +4174,8 @@ flow_dv_mreg_create_cb(struct mlx5_hlist *list, uint64_t key, * be applied, removed, deleted in ardbitrary order * by list traversing. */ - mcp_res->rix_flow = flow_list_create(dev, NULL, &attr, items, - actions, false, error); + mcp_res->rix_flow = flow_list_create(dev, MLX5_FLOW_TYPE_MCP, + &attr, items, actions, false, error); if (!mcp_res->rix_flow) { mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], idx); return NULL; @@ -3981,7 +4237,7 @@ flow_dv_mreg_remove_cb(struct mlx5_hlist *list, struct mlx5_hlist_entry *entry) struct mlx5_priv *priv = dev->data->dev_private; MLX5_ASSERT(mcp_res->rix_flow); - flow_list_destroy(dev, NULL, mcp_res->rix_flow); + flow_list_destroy(dev, MLX5_FLOW_TYPE_MCP, mcp_res->rix_flow); mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx); } @@ -4342,9 +4598,126 @@ flow_create_split_inner(struct rte_eth_dev *dev, dev_flow->handle->mark = 1; if (sub_flow) *sub_flow = dev_flow; +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + dev_flow->dv.table_id = flow_split_info->table_id; +#endif return flow_drv_translate(dev, dev_flow, attr, items, actions, error); } +/** + * Get the sub policy of a meter. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] flow + * Parent flow structure pointer. + * @param wks + * Pointer to thread flow work space. + * @param[in] attr + * Flow rule attributes. + * @param[in] items + * Pattern specification (list terminated by the END pattern item). + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * Pointer to the meter sub policy, NULL otherwise and rte_errno is set. + */ +static struct mlx5_flow_meter_sub_policy * +get_meter_sub_policy(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct mlx5_flow_workspace *wks, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + struct rte_flow_error *error) +{ + struct mlx5_flow_meter_policy *policy; + struct mlx5_flow_meter_policy *final_policy; + struct mlx5_flow_meter_sub_policy *sub_policy = NULL; + + policy = wks->policy; + final_policy = policy->is_hierarchy ? wks->final_policy : policy; + if (final_policy->is_rss || final_policy->is_queue) { + struct mlx5_flow_rss_desc rss_desc_v[MLX5_MTR_RTE_COLORS]; + struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS] = {0}; + uint32_t i; + + /** + * This is a tmp dev_flow, + * no need to register any matcher for it in translate. + */ + wks->skip_matcher_reg = 1; + for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) { + struct mlx5_flow dev_flow = {0}; + struct mlx5_flow_handle dev_handle = { {0} }; + + if (final_policy->is_rss) { + const void *rss_act = + final_policy->act_cnt[i].rss->conf; + struct rte_flow_action rss_actions[2] = { + [0] = { + .type = RTE_FLOW_ACTION_TYPE_RSS, + .conf = rss_act + }, + [1] = { + .type = RTE_FLOW_ACTION_TYPE_END, + .conf = NULL + } + }; + + dev_flow.handle = &dev_handle; + dev_flow.ingress = attr->ingress; + dev_flow.flow = flow; + dev_flow.external = 0; +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + dev_flow.dv.transfer = attr->transfer; +#endif + /** + * Translate RSS action to get rss hash fields. + */ + if (flow_drv_translate(dev, &dev_flow, attr, + items, rss_actions, error)) + goto exit; + rss_desc_v[i] = wks->rss_desc; + rss_desc_v[i].key_len = MLX5_RSS_HASH_KEY_LEN; + rss_desc_v[i].hash_fields = + dev_flow.hash_fields; + rss_desc_v[i].queue_num = + rss_desc_v[i].hash_fields ? + rss_desc_v[i].queue_num : 1; + rss_desc_v[i].tunnel = + !!(dev_flow.handle->layers & + MLX5_FLOW_LAYER_TUNNEL); + } else { + /* This is queue action. */ + rss_desc_v[i] = wks->rss_desc; + rss_desc_v[i].key_len = 0; + rss_desc_v[i].hash_fields = 0; + rss_desc_v[i].queue = + &final_policy->act_cnt[i].queue; + rss_desc_v[i].queue_num = 1; + } + rss_desc[i] = &rss_desc_v[i]; + } + sub_policy = flow_drv_meter_sub_policy_rss_prepare(dev, + flow, policy, rss_desc); + } else { + enum mlx5_meter_domain mtr_domain = + attr->transfer ? MLX5_MTR_DOMAIN_TRANSFER : + attr->egress ? MLX5_MTR_DOMAIN_EGRESS : + MLX5_MTR_DOMAIN_INGRESS; + sub_policy = policy->sub_policys[mtr_domain][0]; + } + if (!sub_policy) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Failed to get meter sub-policy."); + goto exit; + } +exit: + return sub_policy; +} + /** * Split the meter flow. * @@ -4361,8 +4734,10 @@ flow_create_split_inner(struct rte_eth_dev *dev, * Pointer to Ethernet device. * @param[in] flow * Parent flow structure pointer. - * @param[in] fm - * Pointer to flow meter structure. + * @param wks + * Pointer to thread flow work space. + * @param[in] attr + * Flow rule attributes. * @param[in] items * Pattern specification (list terminated by the END pattern item). * @param[out] sfx_items @@ -4373,24 +4748,29 @@ flow_create_split_inner(struct rte_eth_dev *dev, * Suffix flow actions. * @param[out] actions_pre * Prefix flow actions. + * @param[out] mtr_flow_id + * Pointer to meter flow id. * @param[out] error * Perform verbose error reporting if not NULL. * * @return - * The flow id, 0 otherwise and rte_errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ -static uint32_t +static int flow_meter_split_prep(struct rte_eth_dev *dev, struct rte_flow *flow, - struct mlx5_flow_meter_info *fm, + struct mlx5_flow_workspace *wks, + const struct rte_flow_attr *attr, const struct rte_flow_item items[], struct rte_flow_item sfx_items[], const struct rte_flow_action actions[], struct rte_flow_action actions_sfx[], struct rte_flow_action actions_pre[], + uint32_t *mtr_flow_id, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_meter_info *fm = wks->fm; struct rte_flow_action *tag_action = NULL; struct rte_flow_item *tag_item; struct mlx5_rte_flow_action_set_tag *set_tag; @@ -4399,7 +4779,12 @@ flow_meter_split_prep(struct rte_eth_dev *dev, struct mlx5_rte_flow_item_tag *tag_item_spec; struct mlx5_rte_flow_item_tag *tag_item_mask; uint32_t tag_id = 0; - bool copy_vlan = false; + struct rte_flow_item *vlan_item_dst = NULL; + const struct rte_flow_item *vlan_item_src = NULL; + struct rte_flow_action *hw_mtr_action; + struct rte_flow_action *action_pre_head = NULL; + int32_t flow_src_port = priv->representor_id; + bool mtr_first; uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0; uint8_t mtr_reg_bits = priv->mtr_reg_share ? MLX5_MTR_IDLE_BITS_IN_COLOR_REG : MLX5_REG_BITS; @@ -4408,92 +4793,168 @@ flow_meter_split_prep(struct rte_eth_dev *dev, uint8_t flow_id_bits = 0; int shift; + /* Prepare the suffix subflow items. */ + tag_item = sfx_items++; + for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { + struct mlx5_priv *port_priv; + const struct rte_flow_item_port_id *pid_v; + int item_type = items->type; + + switch (item_type) { + case RTE_FLOW_ITEM_TYPE_PORT_ID: + pid_v = items->spec; + MLX5_ASSERT(pid_v); + port_priv = mlx5_port_to_eswitch_info(pid_v->id, false); + if (!port_priv) + return rte_flow_error_set(error, + rte_errno, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, + pid_v, + "Failed to get port info."); + flow_src_port = port_priv->representor_id; + if (!fm->def_policy && wks->policy->is_hierarchy && + flow_src_port != priv->representor_id) { + if (flow_drv_mtr_hierarchy_rule_create(dev, + flow, fm, + flow_src_port, + items, + error)) + return -rte_errno; + } + memcpy(sfx_items, items, sizeof(*sfx_items)); + sfx_items++; + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + /* Determine if copy vlan item below. */ + vlan_item_src = items; + vlan_item_dst = sfx_items++; + vlan_item_dst->type = RTE_FLOW_ITEM_TYPE_VOID; + break; + default: + break; + } + } + sfx_items->type = RTE_FLOW_ITEM_TYPE_END; + sfx_items++; + mtr_first = priv->sh->meter_aso_en && + (attr->egress || (attr->transfer && flow_src_port != UINT16_MAX)); + /* For ASO meter, meter must be before tag in TX direction. */ + if (mtr_first) { + action_pre_head = actions_pre++; + /* Leave space for tag action. */ + tag_action = actions_pre++; + } /* Prepare the actions for prefix and suffix flow. */ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { - struct rte_flow_action **action_cur = NULL; + struct rte_flow_action *action_cur = NULL; switch (actions->type) { case RTE_FLOW_ACTION_TYPE_METER: - /* Add the extra tag action first. */ - tag_action = actions_pre++; - action_cur = &actions_pre; + if (mtr_first) { + action_cur = action_pre_head; + } else { + /* Leave space for tag action. */ + tag_action = actions_pre++; + action_cur = actions_pre++; + } break; case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: - action_cur = &actions_pre; + action_cur = actions_pre++; break; case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: raw_encap = actions->conf; if (raw_encap->size < MLX5_ENCAPSULATION_DECISION_SIZE) - action_cur = &actions_pre; + action_cur = actions_pre++; break; case RTE_FLOW_ACTION_TYPE_RAW_DECAP: raw_decap = actions->conf; if (raw_decap->size > MLX5_ENCAPSULATION_DECISION_SIZE) - action_cur = &actions_pre; + action_cur = actions_pre++; break; case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: - copy_vlan = true; + if (vlan_item_dst && vlan_item_src) { + memcpy(vlan_item_dst, vlan_item_src, + sizeof(*vlan_item_dst)); + /* + * Convert to internal match item, it is used + * for vlan push and set vid. + */ + vlan_item_dst->type = (enum rte_flow_item_type) + MLX5_RTE_FLOW_ITEM_TYPE_VLAN; + } break; default: break; } if (!action_cur) - action_cur = &actions_sfx; - memcpy(*action_cur, actions, sizeof(struct rte_flow_action)); - (*action_cur)++; + action_cur = (fm->def_policy) ? + actions_sfx++ : actions_pre++; + memcpy(action_cur, actions, sizeof(struct rte_flow_action)); } /* Add end action to the actions. */ actions_sfx->type = RTE_FLOW_ACTION_TYPE_END; + if (priv->sh->meter_aso_en) { + /** + * For ASO meter, need to add an extra jump action explicitly, + * to jump from meter to policer table. + */ + struct mlx5_flow_meter_sub_policy *sub_policy; + struct mlx5_flow_tbl_data_entry *tbl_data; + + if (!fm->def_policy) { + sub_policy = get_meter_sub_policy(dev, flow, wks, + attr, items, error); + if (!sub_policy) + return -rte_errno; + } else { + enum mlx5_meter_domain mtr_domain = + attr->transfer ? MLX5_MTR_DOMAIN_TRANSFER : + attr->egress ? MLX5_MTR_DOMAIN_EGRESS : + MLX5_MTR_DOMAIN_INGRESS; + + sub_policy = + &priv->sh->mtrmng->def_policy[mtr_domain]->sub_policy; + } + tbl_data = container_of(sub_policy->tbl_rsc, + struct mlx5_flow_tbl_data_entry, tbl); + hw_mtr_action = actions_pre++; + hw_mtr_action->type = (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_JUMP; + hw_mtr_action->conf = tbl_data->jump.action; + } actions_pre->type = RTE_FLOW_ACTION_TYPE_END; actions_pre++; - mlx5_ipool_malloc(fm->flow_ipool, &tag_id); - if (!tag_id) + if (!tag_action) return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "Failed to allocate meter flow id."); - flow_id = tag_id - 1; - flow_id_bits = MLX5_REG_BITS - __builtin_clz(flow_id); - flow_id_bits = flow_id_bits ? flow_id_bits : 1; - if ((flow_id_bits + priv->max_mtr_bits) > mtr_reg_bits) { - mlx5_ipool_free(fm->flow_ipool, tag_id); - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "Meter flow id exceeds max limit."); + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "No tag action space."); + if (!mtr_flow_id) { + tag_action->type = RTE_FLOW_ACTION_TYPE_VOID; + goto exit; } - if (flow_id_bits > priv->max_mtr_flow_bits) - priv->max_mtr_flow_bits = flow_id_bits; - /* Prepare the suffix subflow items. */ - tag_item = sfx_items++; - for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { - int item_type = items->type; - - switch (item_type) { - case RTE_FLOW_ITEM_TYPE_PORT_ID: - memcpy(sfx_items, items, sizeof(*sfx_items)); - sfx_items++; - break; - case RTE_FLOW_ITEM_TYPE_VLAN: - if (copy_vlan) { - memcpy(sfx_items, items, sizeof(*sfx_items)); - /* - * Convert to internal match item, it is used - * for vlan push and set vid. - */ - sfx_items->type = (enum rte_flow_item_type) - MLX5_RTE_FLOW_ITEM_TYPE_VLAN; - sfx_items++; - } - break; - default: - break; + /* Only default-policy Meter creates mtr flow id. */ + if (fm->def_policy) { + mlx5_ipool_malloc(fm->flow_ipool, &tag_id); + if (!tag_id) + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Failed to allocate meter flow id."); + flow_id = tag_id - 1; + flow_id_bits = (!flow_id) ? 1 : + (MLX5_REG_BITS - __builtin_clz(flow_id)); + if ((flow_id_bits + priv->sh->mtrmng->max_mtr_bits) > + mtr_reg_bits) { + mlx5_ipool_free(fm->flow_ipool, tag_id); + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Meter flow id exceeds max limit."); } + if (flow_id_bits > priv->sh->mtrmng->max_mtr_flow_bits) + priv->sh->mtrmng->max_mtr_flow_bits = flow_id_bits; } - sfx_items->type = RTE_FLOW_ITEM_TYPE_END; - sfx_items++; /* Build tag actions and items for meter_id/meter flow_id. */ - assert(tag_action); set_tag = (struct mlx5_rte_flow_action_set_tag *)actions_pre; tag_item_spec = (struct mlx5_rte_flow_item_tag *)sfx_items; tag_item_mask = tag_item_spec + 1; @@ -4511,8 +4972,9 @@ flow_meter_split_prep(struct rte_eth_dev *dev, */ for (shift = 0; shift < flow_id_bits; shift++) flow_id_reversed = (flow_id_reversed << 1) | - ((flow_id >> shift) & 0x1); - set_tag->data |= flow_id_reversed << (mtr_reg_bits - flow_id_bits); + ((flow_id >> shift) & 0x1); + set_tag->data |= + flow_id_reversed << (mtr_reg_bits - flow_id_bits); tag_item_spec->id = set_tag->id; tag_item_spec->data = set_tag->data << mtr_id_offset; tag_item_mask->data = UINT32_MAX << mtr_id_offset; @@ -4524,7 +4986,10 @@ flow_meter_split_prep(struct rte_eth_dev *dev, tag_item->spec = tag_item_spec; tag_item->last = NULL; tag_item->mask = tag_item_mask; - return tag_id; +exit: + if (mtr_flow_id) + *mtr_flow_id = tag_id; + return 0; } /** @@ -4790,6 +5255,7 @@ flow_check_match_action(const struct rte_flow_action actions[], case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: case RTE_FLOW_ACTION_TYPE_RAW_DECAP: case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD: + case RTE_FLOW_ACTION_TYPE_METER: if (fdb_mirror) *modify_after_mirror = 1; break; @@ -5047,8 +5513,8 @@ flow_create_split_metadata(struct rte_eth_dev *dev, if (qrss) { /* Check if it is in meter suffix table. */ mtr_sfx = attr->group == (attr->transfer ? - (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) : - MLX5_FLOW_TABLE_LEVEL_SUFFIX); + (MLX5_FLOW_TABLE_LEVEL_METER - 1) : + MLX5_FLOW_TABLE_LEVEL_METER); /* * Q/RSS action on NIC Rx should be split in order to pass by * the mreg copy table (RX_CP_TBL) and then it jumps to the @@ -5196,6 +5662,57 @@ exit: return ret; } +/** + * Create meter internal drop flow with the original pattern. + * + * @param dev + * Pointer to Ethernet device. + * @param[in] flow + * Parent flow structure pointer. + * @param[in] attr + * Flow rule attributes. + * @param[in] items + * Pattern specification (list terminated by the END pattern item). + * @param[in] flow_split_info + * Pointer to flow split info structure. + * @param[in] fm + * Pointer to flow meter structure. + * @param[out] error + * Perform verbose error reporting if not NULL. + * @return + * 0 on success, negative value otherwise + */ +static uint32_t +flow_meter_create_drop_flow_with_org_pattern(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + struct mlx5_flow_split_info *flow_split_info, + struct mlx5_flow_meter_info *fm, + struct rte_flow_error *error) +{ + struct mlx5_flow *dev_flow = NULL; + struct rte_flow_attr drop_attr = *attr; + struct rte_flow_action drop_actions[3]; + struct mlx5_flow_split_info drop_split_info = *flow_split_info; + + MLX5_ASSERT(fm->drop_cnt); + drop_actions[0].type = + (enum rte_flow_action_type)MLX5_RTE_FLOW_ACTION_TYPE_COUNT; + drop_actions[0].conf = (void *)(uintptr_t)fm->drop_cnt; + drop_actions[1].type = RTE_FLOW_ACTION_TYPE_DROP; + drop_actions[1].conf = NULL; + drop_actions[2].type = RTE_FLOW_ACTION_TYPE_END; + drop_actions[2].conf = NULL; + drop_split_info.external = false; + drop_split_info.skip_scale |= 1 << MLX5_SCALE_FLOW_GROUP_BIT; + drop_split_info.table_id = MLX5_MTR_TABLE_ID_DROP; + drop_attr.group = MLX5_FLOW_TABLE_LEVEL_METER; + return flow_create_split_inner(dev, flow, &dev_flow, + &drop_attr, items, drop_actions, + &drop_split_info, error); +} + /** * The splitting for meter feature. * @@ -5240,18 +5757,22 @@ flow_create_split_meter(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow = NULL; struct rte_flow_attr sfx_attr = *attr; struct mlx5_flow_meter_info *fm = NULL; + uint8_t skip_scale_restore; bool has_mtr = false; - uint32_t meter_id; + bool has_modify = false; + bool set_mtr_reg = true; + bool is_mtr_hierarchy = false; + uint32_t meter_id = 0; uint32_t mtr_idx = 0; - uint32_t mtr_tag_id = 0; + uint32_t mtr_flow_id = 0; size_t act_size; size_t item_size; int actions_n = 0; int ret = 0; if (priv->mtr_en) - actions_n = flow_check_meter_action(actions, &has_mtr, - &meter_id); + actions_n = flow_check_meter_action(dev, actions, &has_mtr, + &has_modify, &meter_id); if (has_mtr) { if (flow->meter) { fm = flow_dv_meter_find_by_idx(priv, flow->meter); @@ -5271,11 +5792,40 @@ flow_create_split_meter(struct rte_eth_dev *dev, return -rte_errno; flow->meter = mtr_idx; } + MLX5_ASSERT(wks); wks->fm = fm; - /* The prefix actions: meter, decap, encap, tag, end. */ - act_size = sizeof(struct rte_flow_action) * (actions_n + 5) + + if (!fm->def_policy) { + wks->policy = mlx5_flow_meter_policy_find(dev, + fm->policy_id, + NULL); + MLX5_ASSERT(wks->policy); + if (wks->policy->is_hierarchy) { + wks->final_policy = + mlx5_flow_meter_hierarchy_get_final_policy(dev, + wks->policy); + if (!wks->final_policy) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Failed to find terminal policy of hierarchy."); + is_mtr_hierarchy = true; + } + } + /* + * If it isn't default-policy Meter, and + * 1. There's no action in flow to change + * packet (modify/encap/decap etc.), OR + * 2. No drop count needed for this meter. + * 3. It's not meter hierarchy. + * Then no need to use regC to save meter id anymore. + */ + if (!fm->def_policy && !is_mtr_hierarchy && + (!has_modify || !fm->drop_cnt)) + set_mtr_reg = false; + /* Prefix actions: meter, decap, encap, tag, jump, end. */ + act_size = sizeof(struct rte_flow_action) * (actions_n + 6) + sizeof(struct mlx5_rte_flow_action_set_tag); - /* The suffix items: tag, vlan, port id, end. */ + /* Suffix items: tag, vlan, port id, end. */ #define METER_SUFFIX_ITEM 4 item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM + sizeof(struct mlx5_rte_flow_item_tag) * 2; @@ -5288,34 +5838,56 @@ flow_create_split_meter(struct rte_eth_dev *dev, "meter flow"); sfx_items = (struct rte_flow_item *)((char *)sfx_actions + act_size); - pre_actions = sfx_actions + actions_n; - mtr_tag_id = flow_meter_split_prep(dev, flow, fm, items, - sfx_items, actions, - sfx_actions, pre_actions, - error); - if (!mtr_tag_id) { + /* There's no suffix flow for meter of non-default policy. */ + if (!fm->def_policy) + pre_actions = sfx_actions + 1; + else + pre_actions = sfx_actions + actions_n; + ret = flow_meter_split_prep(dev, flow, wks, &sfx_attr, + items, sfx_items, actions, + sfx_actions, pre_actions, + (set_mtr_reg ? &mtr_flow_id : NULL), + error); + if (ret) { ret = -rte_errno; goto exit; } /* Add the prefix subflow. */ flow_split_info->prefix_mark = 0; + skip_scale_restore = flow_split_info->skip_scale; + flow_split_info->skip_scale |= + 1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT; ret = flow_create_split_inner(dev, flow, &dev_flow, attr, items, pre_actions, flow_split_info, error); + flow_split_info->skip_scale = skip_scale_restore; if (ret) { - mlx5_ipool_free(fm->flow_ipool, mtr_tag_id); + if (mtr_flow_id) + mlx5_ipool_free(fm->flow_ipool, mtr_flow_id); ret = -rte_errno; goto exit; } - dev_flow->handle->split_flow_id = mtr_tag_id; - dev_flow->handle->is_meter_flow_id = 1; + if (mtr_flow_id) { + dev_flow->handle->split_flow_id = mtr_flow_id; + dev_flow->handle->is_meter_flow_id = 1; + } + if (!fm->def_policy) { + if (!set_mtr_reg && fm->drop_cnt) + ret = + flow_meter_create_drop_flow_with_org_pattern(dev, flow, + &sfx_attr, items, + flow_split_info, + fm, error); + goto exit; + } /* Setting the sfx group atrr. */ sfx_attr.group = sfx_attr.transfer ? - (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) : - MLX5_FLOW_TABLE_LEVEL_SUFFIX; + (MLX5_FLOW_TABLE_LEVEL_METER - 1) : + MLX5_FLOW_TABLE_LEVEL_METER; flow_split_info->prefix_layers = flow_get_prefix_layer_flags(dev_flow); flow_split_info->prefix_mark = dev_flow->handle->mark; + flow_split_info->table_id = MLX5_MTR_TABLE_ID_SUFFIX; } /* Add the prefix subflow. */ ret = flow_create_split_metadata(dev, flow, @@ -5412,8 +5984,8 @@ flow_create_split_sample(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "no memory to split " "sample flow"); - /* The representor_id is -1 for uplink. */ - fdb_tx = (attr->transfer && priv->representor_id != -1); + /* The representor_id is UINT16_MAX for uplink. */ + fdb_tx = (attr->transfer && priv->representor_id != UINT16_MAX); /* * When reg_c_preserve is set, metadata registers Cx preserve * their value even through packet duplication. @@ -5459,8 +6031,7 @@ flow_create_split_sample(struct rte_eth_dev *dev, struct mlx5_flow_tbl_data_entry, tbl); sfx_attr.group = sfx_attr.transfer ? - (sfx_tbl_data->table_id - 1) : - sfx_tbl_data->table_id; + (sfx_tbl_data->level - 1) : sfx_tbl_data->level; } else { MLX5_ASSERT(attr->transfer); sfx_attr.group = jump_table; @@ -5538,22 +6109,14 @@ flow_create_split_outer(struct rte_eth_dev *dev, return ret; } -static struct mlx5_flow_tunnel * -flow_tunnel_from_rule(struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, - const struct rte_flow_item items[], - const struct rte_flow_action actions[]) +static inline struct mlx5_flow_tunnel * +flow_tunnel_from_rule(const struct mlx5_flow *flow) { struct mlx5_flow_tunnel *tunnel; #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wcast-qual" - if (is_flow_tunnel_match_rule(dev, attr, items, actions)) - tunnel = (struct mlx5_flow_tunnel *)items[0].spec; - else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) - tunnel = (struct mlx5_flow_tunnel *)actions[0].conf; - else - tunnel = NULL; + tunnel = (typeof(tunnel))flow->tunnel; #pragma GCC diagnostic pop return tunnel; @@ -5614,7 +6177,7 @@ flow_rss_workspace_adjust(struct mlx5_flow_workspace *wks, * A flow index on success, 0 otherwise and rte_errno is set. */ static uint32_t -flow_list_create(struct rte_eth_dev *dev, uint32_t *list, +flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action original_actions[], @@ -5660,7 +6223,8 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list, .skip_scale = 0, .flow_idx = 0, .prefix_mark = 0, - .prefix_layers = 0 + .prefix_layers = 0, + .table_id = 0 }; int ret; @@ -5681,7 +6245,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list, external, hairpin_flow, error); if (ret < 0) goto error_before_hairpin_split; - flow = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], &idx); + flow = mlx5_ipool_zmalloc(priv->flows[type], &idx); if (!flow) { rte_errno = ENOMEM; goto error_before_hairpin_split; @@ -5703,7 +6267,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list, memset(rss_desc, 0, offsetof(struct mlx5_flow_rss_desc, queue)); /* RSS Action only works on NIC RX domain */ if (attr->ingress && !attr->transfer) - rss = flow_get_rss_action(p_actions_rx); + rss = flow_get_rss_action(dev, p_actions_rx); if (rss) { if (flow_rss_workspace_adjust(wks, rss_desc, rss->queue_num)) return 0; @@ -5725,6 +6289,10 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list, mlx5_support_expansion, graph_root); MLX5_ASSERT(ret > 0 && (unsigned int)ret < sizeof(expand_buffer.buffer)); + if (rte_log_can_log(mlx5_logtype, RTE_LOG_DEBUG)) { + for (i = 0; i < buf->entries; ++i) + mlx5_dbg__print_pattern(buf->entry[i].pattern); + } } else { buf->entries = 1; buf->entry[0].pattern = (void *)(uintptr_t)items; @@ -5747,12 +6315,11 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list, error); if (ret < 0) goto error; - if (is_flow_tunnel_steer_rule(dev, attr, - buf->entry[i].pattern, - p_actions_rx)) { + if (is_flow_tunnel_steer_rule(wks->flows[0].tof_type)) { ret = flow_tunnel_add_default_miss(dev, flow, attr, p_actions_rx, idx, + wks->flows[0].tunnel, &default_miss_ctx, error); if (ret < 0) { @@ -5808,15 +6375,10 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list, if (ret < 0) goto error; } - if (list) { - rte_spinlock_lock(&priv->flow_list_lock); - ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, idx, - flow, next); - rte_spinlock_unlock(&priv->flow_list_lock); - } + flow->type = type; flow_rxq_flags_set(dev, flow); rte_free(translated_actions); - tunnel = flow_tunnel_from_rule(dev, attr, items, actions); + tunnel = flow_tunnel_from_rule(wks->flows); if (tunnel) { flow->tunnel = 1; flow->tunnel_id = tunnel->tunnel_id; @@ -5835,7 +6397,7 @@ error: mlx5_ipool_get (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], rss_desc->shared_rss))->refcnt, 1, __ATOMIC_RELAXED); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], idx); + mlx5_ipool_free(priv->flows[type], idx); rte_errno = ret; /* Restore rte_errno. */ ret = rte_errno; rte_errno = ret; @@ -5887,10 +6449,9 @@ mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev) .type = RTE_FLOW_ACTION_TYPE_END, }, }; - struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow_error error; - return (void *)(uintptr_t)flow_list_create(dev, &priv->ctrl_flows, + return (void *)(uintptr_t)flow_list_create(dev, MLX5_FLOW_TYPE_CTL, &attr, &pattern, actions, false, &error); } @@ -5942,8 +6503,6 @@ mlx5_flow_create(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error) { - struct mlx5_priv *priv = dev->data->dev_private; - /* * If the device is not started yet, it is not allowed to created a * flow from application. PMD default flows and traffic control flows @@ -5959,8 +6518,9 @@ mlx5_flow_create(struct rte_eth_dev *dev, return NULL; } - return (void *)(uintptr_t)flow_list_create(dev, &priv->flows, - attr, items, actions, true, error); + return (void *)(uintptr_t)flow_list_create(dev, MLX5_FLOW_TYPE_GEN, + attr, items, actions, + true, error); } /** @@ -5968,24 +6528,19 @@ mlx5_flow_create(struct rte_eth_dev *dev, * * @param dev * Pointer to Ethernet device. - * @param list - * Pointer to the Indexed flow list. If this parameter NULL, - * there is no flow removal from the list. Be noted that as - * flow is add to the indexed list, memory of the indexed - * list points to maybe changed as flow destroyed. * @param[in] flow_idx * Index of flow to destroy. */ static void -flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list, +flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type, uint32_t flow_idx) { struct mlx5_priv *priv = dev->data->dev_private; - struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool - [MLX5_IPOOL_RTE_FLOW], flow_idx); + struct rte_flow *flow = mlx5_ipool_get(priv->flows[type], flow_idx); if (!flow) return; + MLX5_ASSERT(flow->type == type); /* * Update RX queue flags only if port is started, otherwise it is * already clean. @@ -5993,12 +6548,6 @@ flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list, if (dev->data->dev_started) flow_rxq_flags_trim(dev, flow); flow_drv_destroy(dev, flow); - if (list) { - rte_spinlock_lock(&priv->flow_list_lock); - ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, - flow_idx, flow, next); - rte_spinlock_unlock(&priv->flow_list_lock); - } if (flow->tunnel) { struct mlx5_flow_tunnel *tunnel; @@ -6008,7 +6557,7 @@ flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list, mlx5_flow_tunnel_free(dev, tunnel); } flow_mreg_del_copy_action(dev, flow); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx); + mlx5_ipool_free(priv->flows[type], flow_idx); } /** @@ -6016,18 +6565,21 @@ flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list, * * @param dev * Pointer to Ethernet device. - * @param list - * Pointer to the Indexed flow list. + * @param type + * Flow type to be flushed. * @param active * If flushing is called avtively. */ void -mlx5_flow_list_flush(struct rte_eth_dev *dev, uint32_t *list, bool active) +mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type, + bool active) { - uint32_t num_flushed = 0; + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t num_flushed = 0, fidx = 1; + struct rte_flow *flow; - while (*list) { - flow_list_destroy(dev, list, *list); + MLX5_IPOOL_FOREACH(priv->flows[type], fidx, flow) { + flow_list_destroy(dev, type, fidx); num_flushed++; } if (active) { @@ -6199,18 +6751,19 @@ mlx5_flow_pop_thread_workspace(void) * @return the number of flows not released. */ int -mlx5_flow_verify(struct rte_eth_dev *dev) +mlx5_flow_verify(struct rte_eth_dev *dev __rte_unused) { struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow *flow; - uint32_t idx; - int ret = 0; + uint32_t idx = 0; + int ret = 0, i; - ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], priv->flows, idx, - flow, next) { - DRV_LOG(DEBUG, "port %u flow %p still referenced", - dev->data->port_id, (void *)flow); - ++ret; + for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) { + MLX5_IPOOL_FOREACH(priv->flows[i], idx, flow) { + DRV_LOG(DEBUG, "port %u flow %p still referenced", + dev->data->port_id, (void *)flow); + ret++; + } } return ret; } @@ -6230,7 +6783,6 @@ int mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, uint32_t queue) { - struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_attr attr = { .egress = 1, .priority = 0, @@ -6263,8 +6815,8 @@ mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP; actions[0].conf = &jump; actions[1].type = RTE_FLOW_ACTION_TYPE_END; - flow_idx = flow_list_create(dev, &priv->ctrl_flows, - &attr, items, actions, false, &error); + flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_CTL, + &attr, items, actions, false, &error); if (!flow_idx) { DRV_LOG(DEBUG, "Failed to create ctrl flow: rte_errno(%d)," @@ -6353,8 +6905,8 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, action_rss.types = 0; for (i = 0; i != priv->reta_idx_n; ++i) queue[i] = (*priv->reta_idx)[i]; - flow_idx = flow_list_create(dev, &priv->ctrl_flows, - &attr, items, actions, false, &error); + flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_CTL, + &attr, items, actions, false, &error); if (!flow_idx) return -rte_errno; return 0; @@ -6395,7 +6947,6 @@ mlx5_ctrl_flow(struct rte_eth_dev *dev, int mlx5_flow_lacp_miss(struct rte_eth_dev *dev) { - struct mlx5_priv *priv = dev->data->dev_private; /* * The LACP matching is done by only using ether type since using * a multicast dst mac causes kernel to give low priority to this flow. @@ -6429,8 +6980,9 @@ mlx5_flow_lacp_miss(struct rte_eth_dev *dev) }, }; struct rte_flow_error error; - uint32_t flow_idx = flow_list_create(dev, &priv->ctrl_flows, - &attr, items, actions, false, &error); + uint32_t flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_CTL, + &attr, items, actions, + false, &error); if (!flow_idx) return -rte_errno; @@ -6448,9 +7000,8 @@ mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error __rte_unused) { - struct mlx5_priv *priv = dev->data->dev_private; - - flow_list_destroy(dev, &priv->flows, (uintptr_t)(void *)flow); + flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, + (uintptr_t)(void *)flow); return 0; } @@ -6464,9 +7015,7 @@ int mlx5_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error __rte_unused) { - struct mlx5_priv *priv = dev->data->dev_private; - - mlx5_flow_list_flush(dev, &priv->flows, false); + mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, false); return 0; } @@ -6517,8 +7066,7 @@ flow_drv_query(struct rte_eth_dev *dev, { struct mlx5_priv *priv = dev->data->dev_private; const struct mlx5_flow_driver_ops *fops; - struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool - [MLX5_IPOOL_RTE_FLOW], + struct rte_flow *flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN], flow_idx); enum mlx5_flow_drv_type ftype; @@ -6576,21 +7124,42 @@ mlx5_flow_ops_get(struct rte_eth_dev *dev __rte_unused, } /** - * Create the needed meter and suffix tables. + * Validate meter policy actions. + * Dispatcher for action type specific validation. * * @param[in] dev - * Pointer to Ethernet device. + * Pointer to the Ethernet device structure. + * @param[in] action + * The meter policy action object to validate. + * @param[in] attr + * Attributes of flow to determine steering domain. + * @param[out] is_rss + * Is RSS or not. + * @param[out] domain_bitmap + * Domain bitmap. + * @param[out] is_def_policy + * Is default policy or not. + * @param[out] error + * Perform verbose error reporting if not NULL. Initialized in case of + * error only. * * @return - * Pointer to table set on success, NULL otherwise. + * 0 on success, otherwise negative errno value. */ -struct mlx5_meter_domains_infos * -mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev) +int +mlx5_flow_validate_mtr_acts(struct rte_eth_dev *dev, + const struct rte_flow_action *actions[RTE_COLORS], + struct rte_flow_attr *attr, + bool *is_rss, + uint8_t *domain_bitmap, + bool *is_def_policy, + struct rte_mtr_error *error) { const struct mlx5_flow_driver_ops *fops; fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); - return fops->create_mtr_tbls(dev); + return fops->validate_mtr_acts(dev, actions, attr, + is_rss, domain_bitmap, is_def_policy, error); } /** @@ -6598,66 +7167,195 @@ mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev) * * @param[in] dev * Pointer to Ethernet device. - * @param[in] tbl - * Pointer to the meter table set. + * @param[in] mtr_policy + * Meter policy struct. + */ +void +mlx5_flow_destroy_mtr_acts(struct rte_eth_dev *dev, + struct mlx5_flow_meter_policy *mtr_policy) +{ + const struct mlx5_flow_driver_ops *fops; + + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); + fops->destroy_mtr_acts(dev, mtr_policy); +} + +/** + * Create policy action, lock free, + * (mutex should be acquired by caller). + * Dispatcher for action type specific call. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] mtr_policy + * Meter policy struct. + * @param[in] action + * Action specification used to create meter actions. + * @param[out] error + * Perform verbose error reporting if not NULL. Initialized in case of + * error only. * * @return - * 0 on success. + * 0 on success, otherwise negative errno value. */ int -mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev, - struct mlx5_meter_domains_infos *tbls) +mlx5_flow_create_mtr_acts(struct rte_eth_dev *dev, + struct mlx5_flow_meter_policy *mtr_policy, + const struct rte_flow_action *actions[RTE_COLORS], + struct rte_mtr_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); + return fops->create_mtr_acts(dev, mtr_policy, actions, error); +} + +/** + * Create policy rules, lock free, + * (mutex should be acquired by caller). + * Dispatcher for action type specific call. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] mtr_policy + * Meter policy struct. + * + * @return + * 0 on success, -1 otherwise. + */ +int +mlx5_flow_create_policy_rules(struct rte_eth_dev *dev, + struct mlx5_flow_meter_policy *mtr_policy) +{ + const struct mlx5_flow_driver_ops *fops; + + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); + return fops->create_policy_rules(dev, mtr_policy); +} + +/** + * Destroy policy rules, lock free, + * (mutex should be acquired by caller). + * Dispatcher for action type specific call. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] mtr_policy + * Meter policy struct. + */ +void +mlx5_flow_destroy_policy_rules(struct rte_eth_dev *dev, + struct mlx5_flow_meter_policy *mtr_policy) { const struct mlx5_flow_driver_ops *fops; fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); - return fops->destroy_mtr_tbls(dev, tbls); + fops->destroy_policy_rules(dev, mtr_policy); } /** - * Prepare policer rules. + * Destroy the default policy table set. + * + * @param[in] dev + * Pointer to Ethernet device. + */ +void +mlx5_flow_destroy_def_policy(struct rte_eth_dev *dev) +{ + const struct mlx5_flow_driver_ops *fops; + + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); + fops->destroy_def_policy(dev); +} + +/** + * Destroy the default policy table set. * * @param[in] dev * Pointer to Ethernet device. - * @param[in] fm - * Pointer to flow meter structure. - * @param[in] attr - * Pointer to flow attributes. * * @return * 0 on success, -1 otherwise. */ int -mlx5_flow_prepare_policer_rules(struct rte_eth_dev *dev, - struct mlx5_flow_meter_info *fm, - const struct rte_flow_attr *attr) +mlx5_flow_create_def_policy(struct rte_eth_dev *dev) { const struct mlx5_flow_driver_ops *fops; fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); - return fops->prepare_policer_rules(dev, fm, attr); + return fops->create_def_policy(dev); } /** - * Destroy policer rules. + * Create the needed meter and suffix tables. * - * @param[in] fm - * Pointer to flow meter structure. - * @param[in] attr - * Pointer to flow attributes. + * @param[in] dev + * Pointer to Ethernet device. * * @return * 0 on success, -1 otherwise. */ int -mlx5_flow_destroy_policer_rules(struct rte_eth_dev *dev, - struct mlx5_flow_meter_info *fm, - const struct rte_flow_attr *attr) +mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev, + struct mlx5_flow_meter_info *fm, + uint32_t mtr_idx, + uint8_t domain_bitmap) +{ + const struct mlx5_flow_driver_ops *fops; + + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); + return fops->create_mtr_tbls(dev, fm, mtr_idx, domain_bitmap); +} + +/** + * Destroy the meter table set. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] tbl + * Pointer to the meter table set. + */ +void +mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev, + struct mlx5_flow_meter_info *fm) { const struct mlx5_flow_driver_ops *fops; fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); - return fops->destroy_policer_rules(dev, fm, attr); + fops->destroy_mtr_tbls(dev, fm); +} + +/** + * Destroy the global meter drop table. + * + * @param[in] dev + * Pointer to Ethernet device. + */ +void +mlx5_flow_destroy_mtr_drop_tbls(struct rte_eth_dev *dev) +{ + const struct mlx5_flow_driver_ops *fops; + + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); + fops->destroy_mtr_drop_tbls(dev); +} + +/** + * Destroy the sub policy table with RX queue. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] mtr_policy + * Pointer to meter policy table. + */ +void +mlx5_flow_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev, + struct mlx5_flow_meter_policy *mtr_policy) +{ + const struct mlx5_flow_driver_ops *fops; + + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); + fops->destroy_sub_policy_with_rxq(dev, mtr_policy); } /** @@ -6825,14 +7523,11 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh) mlx5_free(mem); return -rte_errno; } + memset(&mkey_attr, 0, sizeof(mkey_attr)); mkey_attr.addr = (uintptr_t)mem; mkey_attr.size = size; mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem); mkey_attr.pd = sh->pdn; - mkey_attr.log_entity_size = 0; - mkey_attr.pg_access = 0; - mkey_attr.klm_array = NULL; - mkey_attr.klm_num = 0; mkey_attr.relaxed_ordering_write = sh->cmng.relaxed_ordering_write; mkey_attr.relaxed_ordering_read = sh->cmng.relaxed_ordering_read; mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr); @@ -7237,20 +7932,171 @@ mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev) if (!config->dv_flow_en) break; /* Create internal flow, validation skips copy action. */ - flow_idx = flow_list_create(dev, NULL, &attr, items, - actions, false, &error); - flow = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], + flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_GEN, &attr, + items, actions, false, &error); + flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN], flow_idx); if (!flow) continue; config->flow_mreg_c[n++] = idx; - flow_list_destroy(dev, NULL, flow_idx); + flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, flow_idx); } for (; n < MLX5_MREG_C_NUM; ++n) config->flow_mreg_c[n] = REG_NON; return 0; } +int +save_dump_file(const uint8_t *data, uint32_t size, + uint32_t type, uint32_t id, void *arg, FILE *file) +{ + char line[BUF_SIZE]; + uint32_t out = 0; + uint32_t k; + uint32_t actions_num; + struct rte_flow_query_count *count; + + memset(line, 0, BUF_SIZE); + switch (type) { + case DR_DUMP_REC_TYPE_PMD_MODIFY_HDR: + actions_num = *(uint32_t *)(arg); + out += snprintf(line + out, BUF_SIZE - out, "%d,0x%x,%d,", + type, id, actions_num); + break; + case DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT: + out += snprintf(line + out, BUF_SIZE - out, "%d,0x%x,", + type, id); + break; + case DR_DUMP_REC_TYPE_PMD_COUNTER: + count = (struct rte_flow_query_count *)arg; + fprintf(file, "%d,0x%x,%" PRIu64 ",%" PRIu64 "\n", type, + id, count->hits, count->bytes); + return 0; + default: + return -1; + } + + for (k = 0; k < size; k++) { + /* Make sure we do not overrun the line buffer length. */ + if (out >= BUF_SIZE - 4) { + line[out] = '\0'; + break; + } + out += snprintf(line + out, BUF_SIZE - out, "%02x", + (data[k]) & 0xff); + } + fprintf(file, "%s\n", line); + return 0; +} + +int +mlx5_flow_query_counter(struct rte_eth_dev *dev, struct rte_flow *flow, + struct rte_flow_query_count *count, struct rte_flow_error *error) +{ + struct rte_flow_action action[2]; + enum mlx5_flow_drv_type ftype; + const struct mlx5_flow_driver_ops *fops; + + if (!flow) { + return rte_flow_error_set(error, ENOENT, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "invalid flow handle"); + } + action[0].type = RTE_FLOW_ACTION_TYPE_COUNT; + action[1].type = RTE_FLOW_ACTION_TYPE_END; + if (flow->counter) { + memset(count, 0, sizeof(struct rte_flow_query_count)); + ftype = (enum mlx5_flow_drv_type)(flow->drv_type); + MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN && + ftype < MLX5_FLOW_TYPE_MAX); + fops = flow_get_drv_ops(ftype); + return fops->query(dev, flow, action, count, error); + } + return -1; +} + +#ifdef HAVE_IBV_FLOW_DV_SUPPORT +/** + * Dump flow ipool data to file + * + * @param[in] dev + * The pointer to Ethernet device. + * @param[in] file + * A pointer to a file for output. + * @param[out] error + * Perform verbose error reporting if not NULL. PMDs initialize this + * structure in case of error only. + * @return + * 0 on success, a negative value otherwise. + */ +int +mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev, + struct rte_flow *flow, FILE *file, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_dv_modify_hdr_resource *modify_hdr; + struct mlx5_flow_dv_encap_decap_resource *encap_decap; + uint32_t handle_idx; + struct mlx5_flow_handle *dh; + struct rte_flow_query_count count; + uint32_t actions_num; + const uint8_t *data; + size_t size; + uint32_t id; + uint32_t type; + + if (!flow) { + return rte_flow_error_set(error, ENOENT, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "invalid flow handle"); + } + handle_idx = flow->dev_handles; + while (handle_idx) { + dh = mlx5_ipool_get(priv->sh->ipool + [MLX5_IPOOL_MLX5_FLOW], handle_idx); + if (!dh) + continue; + handle_idx = dh->next.next; + id = (uint32_t)(uintptr_t)dh->drv_flow; + + /* query counter */ + type = DR_DUMP_REC_TYPE_PMD_COUNTER; + if (!mlx5_flow_query_counter(dev, flow, &count, error)) + save_dump_file(NULL, 0, type, + id, (void *)&count, file); + + /* Get modify_hdr and encap_decap buf from ipools. */ + encap_decap = NULL; + modify_hdr = dh->dvh.modify_hdr; + + if (dh->dvh.rix_encap_decap) { + encap_decap = mlx5_ipool_get(priv->sh->ipool + [MLX5_IPOOL_DECAP_ENCAP], + dh->dvh.rix_encap_decap); + } + if (modify_hdr) { + data = (const uint8_t *)modify_hdr->actions; + size = (size_t)(modify_hdr->actions_num) * 8; + actions_num = modify_hdr->actions_num; + type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR; + save_dump_file(data, size, type, id, + (void *)(&actions_num), file); + } + if (encap_decap) { + data = encap_decap->buf; + size = encap_decap->size; + type = DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT; + save_dump_file(data, size, type, + id, NULL, file); + } + } + return 0; +} +#endif + /** * Dump flow raw hw data to file * @@ -7275,6 +8121,9 @@ mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx, int ret; struct mlx5_flow_handle *dh; struct rte_flow *flow; +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + uint32_t idx; +#endif if (!priv->config.dv_flow_en) { if (fputs("device dv flow disabled\n", file) <= 0) @@ -7283,16 +8132,24 @@ mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx, } /* dump all */ - if (!flow_idx) + if (!flow_idx) { +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + MLX5_IPOOL_FOREACH(priv->flows[MLX5_FLOW_TYPE_GEN], idx, flow) + mlx5_flow_dev_dump_ipool(dev, flow, file, error); +#endif return mlx5_devx_cmd_flow_dump(sh->fdb_domain, sh->rx_domain, sh->tx_domain, file); + } /* dump one */ - flow = mlx5_ipool_get(priv->sh->ipool - [MLX5_IPOOL_RTE_FLOW], (uintptr_t)(void *)flow_idx); + flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN], + (uintptr_t)(void *)flow_idx); if (!flow) return -ENOENT; +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + mlx5_flow_dev_dump_ipool(dev, flow, file, error); +#endif handle_idx = flow->dev_handles; while (handle_idx) { dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], @@ -7599,6 +8456,28 @@ int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains) return ret; } +const struct mlx5_flow_tunnel * +mlx5_get_tof(const struct rte_flow_item *item, + const struct rte_flow_action *action, + enum mlx5_tof_rule_type *rule_type) +{ + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->type == (typeof(item->type)) + MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL) { + *rule_type = MLX5_TUNNEL_OFFLOAD_MATCH_RULE; + return flow_items_to_tunnel(item); + } + } + for (; action->conf != RTE_FLOW_ACTION_TYPE_END; action++) { + if (action->type == (typeof(action->type)) + MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET) { + *rule_type = MLX5_TUNNEL_OFFLOAD_SET_RULE; + return flow_actions_to_tunnel(action); + } + } + return NULL; +} + /** * tunnel offload functionalilty is defined for DV environment only */ @@ -7629,13 +8508,13 @@ flow_tunnel_add_default_miss(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_action *app_actions, uint32_t flow_idx, + const struct mlx5_flow_tunnel *tunnel, struct tunnel_default_miss_ctx *ctx, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow *dev_flow; struct rte_flow_attr miss_attr = *attr; - const struct mlx5_flow_tunnel *tunnel = app_actions[0].conf; const struct rte_flow_item miss_items[2] = { { .type = RTE_FLOW_ITEM_TYPE_ETH, @@ -7721,6 +8600,7 @@ flow_tunnel_add_default_miss(struct rte_eth_dev *dev, dev_flow->flow = flow; dev_flow->external = true; dev_flow->tunnel = tunnel; + dev_flow->tof_type = MLX5_TUNNEL_OFFLOAD_MISS_RULE; /* Subflow object was created, we must include one in the list. */ SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx, dev_flow->handle, next); @@ -7746,10 +8626,12 @@ tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark) union tunnel_offload_mark mbits = { .val = mark }; union mlx5_flow_tbl_key table_key = { { - .table_id = tunnel_id_to_flow_tbl(mbits.table_id), + .level = tunnel_id_to_flow_tbl(mbits.table_id), + .id = 0, + .reserved = 0, .dummy = 0, - .domain = !!mbits.transfer, - .direction = 0, + .is_fdb = !!mbits.transfer, + .is_egress = 0, } }; he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL); @@ -8332,6 +9214,7 @@ flow_tunnel_add_default_miss(__rte_unused struct rte_eth_dev *dev, __rte_unused const struct rte_flow_attr *attr, __rte_unused const struct rte_flow_action *actions, __rte_unused uint32_t flow_idx, + __rte_unused const struct mlx5_flow_tunnel *tunnel, __rte_unused struct tunnel_default_miss_ctx *ctx, __rte_unused struct rte_flow_error *error) { @@ -8369,3 +9252,22 @@ mlx5_release_tunnel_hub(__rte_unused struct mlx5_dev_ctx_shared *sh, { } #endif /* HAVE_IBV_FLOW_DV_SUPPORT */ + +static void +mlx5_dbg__print_pattern(const struct rte_flow_item *item) +{ + int ret; + struct rte_flow_error error; + + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + char *item_name; + ret = rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, &item_name, + sizeof(item_name), + (void *)(uintptr_t)item->type, &error); + if (ret > 0) + printf("%s ", item_name); + else + printf("%d\n", (int)item->type); + } + printf("END\n"); +}