X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow.c;h=cdb40c07564ac265d43b7a11b222fa03422f4419;hb=d84c3cf7662c6abca83df803aaa5136b73d3831d;hp=c914a7120cca61e092ee7c5b9b7525bd579fc9fd;hpb=aa52e5f0f9e6fe4acf3cd54ce7e392944f68d6c4;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index c914a7120c..cdb40c0756 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -76,6 +76,7 @@ const struct mlx5_flow_driver_ops *flow_drv_ops[] = { [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops, #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops, + [MLX5_FLOW_TYPE_HW] = &mlx5_flow_hw_drv_ops, #endif [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops, [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops @@ -98,7 +99,7 @@ struct mlx5_flow_expand_node { uint64_t rss_types; /**< * RSS types bit-field associated with this node - * (see ETH_RSS_* definitions). + * (see RTE_ETH_RSS_* definitions). */ uint64_t node_flags; /**< @@ -132,6 +133,12 @@ struct mlx5_flow_expand_rss { static void mlx5_dbg__print_pattern(const struct rte_flow_item *item); +static const struct mlx5_flow_expand_node * +mlx5_flow_expand_rss_adjust_node(const struct rte_flow_item *pattern, + unsigned int item_idx, + const struct mlx5_flow_expand_node graph[], + const struct mlx5_flow_expand_node *node); + static bool mlx5_flow_is_rss_expandable_item(const struct rte_flow_item *item) { @@ -158,105 +165,143 @@ mlx5_flow_is_rss_expandable_item(const struct rte_flow_item *item) return false; } +/** + * Network Service Header (NSH) and its next protocol values + * are described in RFC-8393. + */ +static enum rte_flow_item_type +mlx5_nsh_proto_to_item_type(uint8_t proto_spec, uint8_t proto_mask) +{ + enum rte_flow_item_type type; + + switch (proto_mask & proto_spec) { + case RTE_VXLAN_GPE_TYPE_IPV4: + type = RTE_FLOW_ITEM_TYPE_IPV4; + break; + case RTE_VXLAN_GPE_TYPE_IPV6: + type = RTE_VXLAN_GPE_TYPE_IPV6; + break; + case RTE_VXLAN_GPE_TYPE_ETH: + type = RTE_FLOW_ITEM_TYPE_ETH; + break; + default: + type = RTE_FLOW_ITEM_TYPE_END; + } + return type; +} + +static enum rte_flow_item_type +mlx5_inet_proto_to_item_type(uint8_t proto_spec, uint8_t proto_mask) +{ + enum rte_flow_item_type type; + + switch (proto_mask & proto_spec) { + case IPPROTO_UDP: + type = RTE_FLOW_ITEM_TYPE_UDP; + break; + case IPPROTO_TCP: + type = RTE_FLOW_ITEM_TYPE_TCP; + break; + case IPPROTO_IP: + type = RTE_FLOW_ITEM_TYPE_IPV4; + break; + case IPPROTO_IPV6: + type = RTE_FLOW_ITEM_TYPE_IPV6; + break; + default: + type = RTE_FLOW_ITEM_TYPE_END; + } + return type; +} + +static enum rte_flow_item_type +mlx5_ethertype_to_item_type(rte_be16_t type_spec, + rte_be16_t type_mask, bool is_tunnel) +{ + enum rte_flow_item_type type; + + switch (rte_be_to_cpu_16(type_spec & type_mask)) { + case RTE_ETHER_TYPE_TEB: + type = is_tunnel ? + RTE_FLOW_ITEM_TYPE_ETH : RTE_FLOW_ITEM_TYPE_END; + break; + case RTE_ETHER_TYPE_VLAN: + type = !is_tunnel ? + RTE_FLOW_ITEM_TYPE_VLAN : RTE_FLOW_ITEM_TYPE_END; + break; + case RTE_ETHER_TYPE_IPV4: + type = RTE_FLOW_ITEM_TYPE_IPV4; + break; + case RTE_ETHER_TYPE_IPV6: + type = RTE_FLOW_ITEM_TYPE_IPV6; + break; + default: + type = RTE_FLOW_ITEM_TYPE_END; + } + return type; +} + static enum rte_flow_item_type mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item) { - enum rte_flow_item_type ret = RTE_FLOW_ITEM_TYPE_VOID; - uint16_t ether_type = 0; - uint16_t ether_type_m; - uint8_t ip_next_proto = 0; - uint8_t ip_next_proto_m; +#define MLX5_XSET_ITEM_MASK_SPEC(type, fld) \ + do { \ + const void *m = item->mask; \ + const void *s = item->spec; \ + mask = m ? \ + ((const struct rte_flow_item_##type *)m)->fld : \ + rte_flow_item_##type##_mask.fld; \ + spec = ((const struct rte_flow_item_##type *)s)->fld; \ + } while (0) + + enum rte_flow_item_type ret; + uint16_t spec, mask; if (item == NULL || item->spec == NULL) - return ret; + return RTE_FLOW_ITEM_TYPE_VOID; switch (item->type) { case RTE_FLOW_ITEM_TYPE_ETH: - if (item->mask) - ether_type_m = ((const struct rte_flow_item_eth *) - (item->mask))->type; - else - ether_type_m = rte_flow_item_eth_mask.type; - if (ether_type_m != RTE_BE16(0xFFFF)) - break; - ether_type = ((const struct rte_flow_item_eth *) - (item->spec))->type; - if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4) - ret = RTE_FLOW_ITEM_TYPE_IPV4; - else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6) - ret = RTE_FLOW_ITEM_TYPE_IPV6; - else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN) - ret = RTE_FLOW_ITEM_TYPE_VLAN; - else - ret = RTE_FLOW_ITEM_TYPE_END; + MLX5_XSET_ITEM_MASK_SPEC(eth, type); + if (!mask) + return RTE_FLOW_ITEM_TYPE_VOID; + ret = mlx5_ethertype_to_item_type(spec, mask, false); break; case RTE_FLOW_ITEM_TYPE_VLAN: - if (item->mask) - ether_type_m = ((const struct rte_flow_item_vlan *) - (item->mask))->inner_type; - else - ether_type_m = rte_flow_item_vlan_mask.inner_type; - if (ether_type_m != RTE_BE16(0xFFFF)) - break; - ether_type = ((const struct rte_flow_item_vlan *) - (item->spec))->inner_type; - if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4) - ret = RTE_FLOW_ITEM_TYPE_IPV4; - else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6) - ret = RTE_FLOW_ITEM_TYPE_IPV6; - else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN) - ret = RTE_FLOW_ITEM_TYPE_VLAN; - else - ret = RTE_FLOW_ITEM_TYPE_END; + MLX5_XSET_ITEM_MASK_SPEC(vlan, inner_type); + if (!mask) + return RTE_FLOW_ITEM_TYPE_VOID; + ret = mlx5_ethertype_to_item_type(spec, mask, false); break; case RTE_FLOW_ITEM_TYPE_IPV4: - if (item->mask) - ip_next_proto_m = ((const struct rte_flow_item_ipv4 *) - (item->mask))->hdr.next_proto_id; - else - ip_next_proto_m = - rte_flow_item_ipv4_mask.hdr.next_proto_id; - if (ip_next_proto_m != 0xFF) - break; - ip_next_proto = ((const struct rte_flow_item_ipv4 *) - (item->spec))->hdr.next_proto_id; - if (ip_next_proto == IPPROTO_UDP) - ret = RTE_FLOW_ITEM_TYPE_UDP; - else if (ip_next_proto == IPPROTO_TCP) - ret = RTE_FLOW_ITEM_TYPE_TCP; - else if (ip_next_proto == IPPROTO_IP) - ret = RTE_FLOW_ITEM_TYPE_IPV4; - else if (ip_next_proto == IPPROTO_IPV6) - ret = RTE_FLOW_ITEM_TYPE_IPV6; - else - ret = RTE_FLOW_ITEM_TYPE_END; + MLX5_XSET_ITEM_MASK_SPEC(ipv4, hdr.next_proto_id); + if (!mask) + return RTE_FLOW_ITEM_TYPE_VOID; + ret = mlx5_inet_proto_to_item_type(spec, mask); break; case RTE_FLOW_ITEM_TYPE_IPV6: - if (item->mask) - ip_next_proto_m = ((const struct rte_flow_item_ipv6 *) - (item->mask))->hdr.proto; - else - ip_next_proto_m = - rte_flow_item_ipv6_mask.hdr.proto; - if (ip_next_proto_m != 0xFF) - break; - ip_next_proto = ((const struct rte_flow_item_ipv6 *) - (item->spec))->hdr.proto; - if (ip_next_proto == IPPROTO_UDP) - ret = RTE_FLOW_ITEM_TYPE_UDP; - else if (ip_next_proto == IPPROTO_TCP) - ret = RTE_FLOW_ITEM_TYPE_TCP; - else if (ip_next_proto == IPPROTO_IP) - ret = RTE_FLOW_ITEM_TYPE_IPV4; - else if (ip_next_proto == IPPROTO_IPV6) - ret = RTE_FLOW_ITEM_TYPE_IPV6; - else - ret = RTE_FLOW_ITEM_TYPE_END; + MLX5_XSET_ITEM_MASK_SPEC(ipv6, hdr.proto); + if (!mask) + return RTE_FLOW_ITEM_TYPE_VOID; + ret = mlx5_inet_proto_to_item_type(spec, mask); + break; + case RTE_FLOW_ITEM_TYPE_GENEVE: + MLX5_XSET_ITEM_MASK_SPEC(geneve, protocol); + ret = mlx5_ethertype_to_item_type(spec, mask, true); + break; + case RTE_FLOW_ITEM_TYPE_GRE: + MLX5_XSET_ITEM_MASK_SPEC(gre, protocol); + ret = mlx5_ethertype_to_item_type(spec, mask, true); + break; + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + MLX5_XSET_ITEM_MASK_SPEC(vxlan_gpe, protocol); + ret = mlx5_nsh_proto_to_item_type(spec, mask); break; default: ret = RTE_FLOW_ITEM_TYPE_VOID; break; } return ret; +#undef MLX5_XSET_ITEM_MASK_SPEC } static const int * @@ -292,7 +337,7 @@ mlx5_flow_expand_rss_skip_explicit(const struct mlx5_flow_expand_node graph[], * @param[in] pattern * User flow pattern. * @param[in] types - * RSS types to expand (see ETH_RSS_* definitions). + * RSS types to expand (see RTE_ETH_RSS_* definitions). * @param[in] graph * Input graph to expand @p pattern according to @p types. * @param[in] graph_root_index @@ -318,7 +363,7 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, const int *stack[MLX5_RSS_EXP_ELT_N]; int stack_pos = 0; struct rte_flow_item flow_items[MLX5_RSS_EXP_ELT_N]; - unsigned int i; + unsigned int i, item_idx, last_expand_item_idx = 0; size_t lsize; size_t user_pattern_size = 0; void *addr = NULL; @@ -326,7 +371,7 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, struct rte_flow_item missed_item; int missed = 0; int elt = 0; - const struct rte_flow_item *last_item = NULL; + const struct rte_flow_item *last_expand_item = NULL; memset(&missed_item, 0, sizeof(missed_item)); lsize = offsetof(struct mlx5_flow_expand_rss, entry) + @@ -337,12 +382,15 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, buf->entry[0].pattern = (void *)&buf->entry[MLX5_RSS_EXP_ELT_N]; buf->entries = 0; addr = buf->entry[0].pattern; - for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + for (item = pattern, item_idx = 0; + item->type != RTE_FLOW_ITEM_TYPE_END; + item++, item_idx++) { if (!mlx5_flow_is_rss_expandable_item(item)) { user_pattern_size += sizeof(*item); continue; } - last_item = item; + last_expand_item = item; + last_expand_item_idx = item_idx; i = 0; while (node->next && node->next[i]) { next = &graph[node->next[i]]; @@ -374,7 +422,7 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, * Check if the last valid item has spec set, need complete pattern, * and the pattern can be used for expansion. */ - missed_item.type = mlx5_flow_expand_rss_item_complete(last_item); + missed_item.type = mlx5_flow_expand_rss_item_complete(last_expand_item); if (missed_item.type == RTE_FLOW_ITEM_TYPE_END) { /* Item type END indicates expansion is not required. */ return lsize; @@ -382,13 +430,20 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, if (missed_item.type != RTE_FLOW_ITEM_TYPE_VOID) { next = NULL; missed = 1; - for (i = 0; node->next && node->next[i]; ++i) { + i = 0; + while (node->next && node->next[i]) { next = &graph[node->next[i]]; if (next->type == missed_item.type) { flow_items[0].type = missed_item.type; flow_items[1].type = RTE_FLOW_ITEM_TYPE_END; break; } + if (next->node_flags & MLX5_EXPANSION_NODE_EXPLICIT) { + node = next; + i = 0; + } else { + ++i; + } next = NULL; } } @@ -409,6 +464,9 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, addr = (void *)(((uintptr_t)addr) + elt * sizeof(*item)); } + } else if (last_expand_item != NULL) { + node = mlx5_flow_expand_rss_adjust_node(pattern, + last_expand_item_idx, graph, node); } memset(flow_items, 0, sizeof(flow_items)); next_node = mlx5_flow_expand_rss_skip_explicit(graph, @@ -495,6 +553,8 @@ enum mlx5_expansion { MLX5_EXPANSION_OUTER_IPV6_UDP, MLX5_EXPANSION_OUTER_IPV6_TCP, MLX5_EXPANSION_VXLAN, + MLX5_EXPANSION_STD_VXLAN, + MLX5_EXPANSION_L3_VXLAN, MLX5_EXPANSION_VXLAN_GPE, MLX5_EXPANSION_GRE, MLX5_EXPANSION_NVGRE, @@ -509,7 +569,8 @@ enum mlx5_expansion { MLX5_EXPANSION_IPV6_UDP, MLX5_EXPANSION_IPV6_TCP, MLX5_EXPANSION_IPV6_FRAG_EXT, - MLX5_EXPANSION_GTP + MLX5_EXPANSION_GTP, + MLX5_EXPANSION_GENEVE, }; /** Supported expansion of items. */ @@ -546,20 +607,21 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { MLX5_EXPANSION_IPV4, MLX5_EXPANSION_IPV6), .type = RTE_FLOW_ITEM_TYPE_IPV4, - .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | - ETH_RSS_NONFRAG_IPV4_OTHER, + .rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | + RTE_ETH_RSS_NONFRAG_IPV4_OTHER, }, [MLX5_EXPANSION_OUTER_IPV4_UDP] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, MLX5_EXPANSION_VXLAN_GPE, MLX5_EXPANSION_MPLS, + MLX5_EXPANSION_GENEVE, MLX5_EXPANSION_GTP), .type = RTE_FLOW_ITEM_TYPE_UDP, - .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, + .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP, }, [MLX5_EXPANSION_OUTER_IPV4_TCP] = { .type = RTE_FLOW_ITEM_TYPE_TCP, - .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, + .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP, }, [MLX5_EXPANSION_OUTER_IPV6] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT @@ -570,20 +632,21 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { MLX5_EXPANSION_GRE, MLX5_EXPANSION_NVGRE), .type = RTE_FLOW_ITEM_TYPE_IPV6, - .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | - ETH_RSS_NONFRAG_IPV6_OTHER, + .rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | + RTE_ETH_RSS_NONFRAG_IPV6_OTHER, }, [MLX5_EXPANSION_OUTER_IPV6_UDP] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, MLX5_EXPANSION_VXLAN_GPE, MLX5_EXPANSION_MPLS, + MLX5_EXPANSION_GENEVE, MLX5_EXPANSION_GTP), .type = RTE_FLOW_ITEM_TYPE_UDP, - .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, + .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP, }, [MLX5_EXPANSION_OUTER_IPV6_TCP] = { .type = RTE_FLOW_ITEM_TYPE_TCP, - .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, + .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP, }, [MLX5_EXPANSION_VXLAN] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, @@ -591,6 +654,15 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { MLX5_EXPANSION_IPV6), .type = RTE_FLOW_ITEM_TYPE_VXLAN, }, + [MLX5_EXPANSION_STD_VXLAN] = { + .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH), + .type = RTE_FLOW_ITEM_TYPE_VXLAN, + }, + [MLX5_EXPANSION_L3_VXLAN] = { + .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), + .type = RTE_FLOW_ITEM_TYPE_VXLAN, + }, [MLX5_EXPANSION_VXLAN_GPE] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, MLX5_EXPANSION_IPV4, @@ -598,7 +670,8 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE, }, [MLX5_EXPANSION_GRE] = { - .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, + .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, + MLX5_EXPANSION_IPV4, MLX5_EXPANSION_IPV6, MLX5_EXPANSION_GRE_KEY, MLX5_EXPANSION_MPLS), @@ -636,32 +709,32 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP, MLX5_EXPANSION_IPV4_TCP), .type = RTE_FLOW_ITEM_TYPE_IPV4, - .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | - ETH_RSS_NONFRAG_IPV4_OTHER, + .rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | + RTE_ETH_RSS_NONFRAG_IPV4_OTHER, }, [MLX5_EXPANSION_IPV4_UDP] = { .type = RTE_FLOW_ITEM_TYPE_UDP, - .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, + .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP, }, [MLX5_EXPANSION_IPV4_TCP] = { .type = RTE_FLOW_ITEM_TYPE_TCP, - .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, + .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP, }, [MLX5_EXPANSION_IPV6] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP, MLX5_EXPANSION_IPV6_TCP, MLX5_EXPANSION_IPV6_FRAG_EXT), .type = RTE_FLOW_ITEM_TYPE_IPV6, - .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | - ETH_RSS_NONFRAG_IPV6_OTHER, + .rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | + RTE_ETH_RSS_NONFRAG_IPV6_OTHER, }, [MLX5_EXPANSION_IPV6_UDP] = { .type = RTE_FLOW_ITEM_TYPE_UDP, - .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, + .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP, }, [MLX5_EXPANSION_IPV6_TCP] = { .type = RTE_FLOW_ITEM_TYPE_TCP, - .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, + .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP, }, [MLX5_EXPANSION_IPV6_FRAG_EXT] = { .type = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT, @@ -671,6 +744,12 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { MLX5_EXPANSION_IPV6), .type = RTE_FLOW_ITEM_TYPE_GTP, }, + [MLX5_EXPANSION_GENEVE] = { + .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, + MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), + .type = RTE_FLOW_ITEM_TYPE_GENEVE, + }, }; static struct rte_flow_action_handle * @@ -718,6 +797,14 @@ mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev, struct rte_mbuf *m, struct rte_flow_restore_info *info, struct rte_flow_error *err); +static struct rte_flow_item_flex_handle * +mlx5_flow_flex_item_create(struct rte_eth_dev *dev, + const struct rte_flow_item_flex_conf *conf, + struct rte_flow_error *error); +static int +mlx5_flow_flex_item_release(struct rte_eth_dev *dev, + const struct rte_flow_item_flex_handle *handle, + struct rte_flow_error *error); static const struct rte_flow_ops mlx5_flow_ops = { .validate = mlx5_flow_validate, @@ -737,6 +824,8 @@ static const struct rte_flow_ops mlx5_flow_ops = { .tunnel_action_decap_release = mlx5_flow_tunnel_action_release, .tunnel_item_release = mlx5_flow_tunnel_item_release, .get_restore_info = mlx5_flow_tunnel_get_restore_info, + .flex_item_create = mlx5_flow_flex_item_create, + .flex_item_release = mlx5_flow_flex_item_release, }; /* Tunnel information. */ @@ -813,7 +902,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_config *config = &priv->config; + struct mlx5_sh_config *config = &priv->sh->config; enum modify_reg start_reg; bool skip_mtr_reg = false; @@ -867,6 +956,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev, case MLX5_MTR_COLOR: case MLX5_ASO_FLOW_HIT: case MLX5_ASO_CONNTRACK: + case MLX5_SAMPLE_ID: /* All features use the same REG_C. */ MLX5_ASSERT(priv->mtr_color_reg != REG_NON); return priv->mtr_color_reg; @@ -891,7 +981,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev, return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "invalid tag id"); - if (config->flow_mreg_c[id + start_reg - REG_C_0] == REG_NON) + if (priv->sh->flow_mreg_c[id + start_reg - REG_C_0] == REG_NON) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "unsupported tag id"); @@ -901,21 +991,21 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev, * If the available index REG_C_y >= REG_C_x, skip the * color register. */ - if (skip_mtr_reg && config->flow_mreg_c + if (skip_mtr_reg && priv->sh->flow_mreg_c [id + start_reg - REG_C_0] >= priv->mtr_color_reg) { if (id >= (uint32_t)(REG_C_7 - start_reg)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "invalid tag id"); - if (config->flow_mreg_c + if (priv->sh->flow_mreg_c [id + 1 + start_reg - REG_C_0] != REG_NON) - return config->flow_mreg_c + return priv->sh->flow_mreg_c [id + 1 + start_reg - REG_C_0]; return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "unsupported tag id"); } - return config->flow_mreg_c[id + start_reg - REG_C_0]; + return priv->sh->flow_mreg_c[id + start_reg - REG_C_0]; } MLX5_ASSERT(false); return rte_flow_error_set(error, EINVAL, @@ -936,7 +1026,6 @@ bool mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_config *config = &priv->config; /* * Having available reg_c can be regarded inclusively as supporting @@ -946,7 +1035,7 @@ mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev) * - reg_c's are preserved across different domain (FDB and NIC) on * packet loopback by flow lookup miss. */ - return config->flow_mreg_c[2] != REG_NON; + return priv->sh->flow_mreg_c[2] != REG_NON; } /** @@ -967,7 +1056,7 @@ mlx5_get_lowest_priority(struct rte_eth_dev *dev, struct mlx5_priv *priv = dev->data->dev_private; if (!attr->group && !attr->transfer) - return priv->config.flow_prio - 2; + return priv->sh->flow_max_priority - 2; return MLX5_NON_ROOT_FLOW_MAX_PRIO - 1; } @@ -980,21 +1069,26 @@ mlx5_get_lowest_priority(struct rte_eth_dev *dev, * Pointer to device flow rule attributes. * @param[in] subpriority * The priority based on the items. + * @param[in] external + * Flow is user flow. * @return * The matcher priority of the flow. */ uint16_t mlx5_get_matcher_priority(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, - uint32_t subpriority) + uint32_t subpriority, bool external) { uint16_t priority = (uint16_t)attr->priority; struct mlx5_priv *priv = dev->data->dev_private; if (!attr->group && !attr->transfer) { if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR) - priority = priv->config.flow_prio - 1; + priority = priv->sh->flow_max_priority - 1; return mlx5_os_flow_adjust_priority(dev, priority, subpriority); + } else if (!external && attr->transfer && attr->group == 0 && + attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR) { + return (priv->sh->flow_max_priority - 1) * 3; } if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR) priority = MLX5_NON_ROOT_FLOW_MAX_PRIO; @@ -1072,7 +1166,7 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item, * @param[in] tunnel * 1 when the hash field is for a tunnel item. * @param[in] layer_types - * ETH_RSS_* types. + * RTE_ETH_RSS_* types. * @param[in] hash_fields * Item hash fields. * @@ -1128,7 +1222,7 @@ flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) } /** - * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive + * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the device * flow. * * @param[in] dev @@ -1141,7 +1235,6 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow_handle *dev_handle) { struct mlx5_priv *priv = dev->data->dev_private; - const int mark = dev_handle->mark; const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL); struct mlx5_ind_table_obj *ind_tbl = NULL; unsigned int i; @@ -1166,24 +1259,16 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev, return; for (i = 0; i != ind_tbl->queues_n; ++i) { int idx = ind_tbl->queues[i]; - struct mlx5_rxq_ctrl *rxq_ctrl = - container_of((*priv->rxqs)[idx], - struct mlx5_rxq_ctrl, rxq); + struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx); + MLX5_ASSERT(rxq_ctrl != NULL); + if (rxq_ctrl == NULL) + continue; /* * To support metadata register copy on Tx loopback, * this must be always enabled (metadata may arive * from other port - not from local flows only. */ - if (priv->config.dv_flow_en && - priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && - mlx5_flow_ext_mreg_supported(dev)) { - rxq_ctrl->rxq.mark = 1; - rxq_ctrl->flow_mark_n = 1; - } else if (mark) { - rxq_ctrl->rxq.mark = 1; - rxq_ctrl->flow_mark_n++; - } if (tunnel) { unsigned int j; @@ -1201,6 +1286,20 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev, } } +static void +flow_rxq_mark_flag_set(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_rxq_ctrl *rxq_ctrl; + + if (priv->mark_enabled) + return; + LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) { + rxq_ctrl->rxq.mark = 1; + } + priv->mark_enabled = 1; +} + /** * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow * @@ -1215,7 +1314,11 @@ flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) struct mlx5_priv *priv = dev->data->dev_private; uint32_t handle_idx; struct mlx5_flow_handle *dev_handle; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + MLX5_ASSERT(wks); + if (wks->mark) + flow_rxq_mark_flag_set(dev); SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, handle_idx, dev_handle, next) flow_drv_rxq_flags_set(dev, dev_handle); @@ -1235,7 +1338,6 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow_handle *dev_handle) { struct mlx5_priv *priv = dev->data->dev_private; - const int mark = dev_handle->mark; const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL); struct mlx5_ind_table_obj *ind_tbl = NULL; unsigned int i; @@ -1261,19 +1363,11 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, MLX5_ASSERT(dev->data->dev_started); for (i = 0; i != ind_tbl->queues_n; ++i) { int idx = ind_tbl->queues[i]; - struct mlx5_rxq_ctrl *rxq_ctrl = - container_of((*priv->rxqs)[idx], - struct mlx5_rxq_ctrl, rxq); - - if (priv->config.dv_flow_en && - priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && - mlx5_flow_ext_mreg_supported(dev)) { - rxq_ctrl->rxq.mark = 1; - rxq_ctrl->flow_mark_n = 1; - } else if (mark) { - rxq_ctrl->flow_mark_n--; - rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n; - } + struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx); + + MLX5_ASSERT(rxq_ctrl != NULL); + if (rxq_ctrl == NULL) + continue; if (tunnel) { unsigned int j; @@ -1325,19 +1419,17 @@ flow_rxq_flags_clear(struct rte_eth_dev *dev) unsigned int i; for (i = 0; i != priv->rxqs_n; ++i) { - struct mlx5_rxq_ctrl *rxq_ctrl; + struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i); unsigned int j; - if (!(*priv->rxqs)[i]) + if (rxq == NULL || rxq->ctrl == NULL) continue; - rxq_ctrl = container_of((*priv->rxqs)[i], - struct mlx5_rxq_ctrl, rxq); - rxq_ctrl->flow_mark_n = 0; - rxq_ctrl->rxq.mark = 0; + rxq->ctrl->rxq.mark = 0; for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) - rxq_ctrl->flow_tunnels_n[j] = 0; - rxq_ctrl->rxq.tunnel = 0; + rxq->ctrl->flow_tunnels_n[j] = 0; + rxq->ctrl->rxq.tunnel = 0; } + priv->mark_enabled = 0; } /** @@ -1350,13 +1442,15 @@ void mlx5_flow_rxq_dynf_metadata_set(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_rxq_data *data; unsigned int i; for (i = 0; i != priv->rxqs_n; ++i) { - if (!(*priv->rxqs)[i]) + struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i); + struct mlx5_rxq_data *data; + + if (rxq == NULL || rxq->ctrl == NULL) continue; - data = (*priv->rxqs)[i]; + data = &rxq->ctrl->rxq; if (!rte_flow_dynf_metadata_avail()) { data->dynf_meta = 0; data->flow_meta_mask = 0; @@ -1547,7 +1641,7 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action, RTE_FLOW_ERROR_TYPE_ACTION_CONF, &queue->index, "queue index out of range"); - if (!(*priv->rxqs)[queue->index]) + if (mlx5_rxq_get(dev, queue->index) == NULL) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, &queue->index, @@ -1560,6 +1654,57 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action, return 0; } +/** + * Validate queue numbers for device RSS. + * + * @param[in] dev + * Configured device. + * @param[in] queues + * Array of queue numbers. + * @param[in] queues_n + * Size of the @p queues array. + * @param[out] error + * On error, filled with a textual error description. + * @param[out] queue + * On error, filled with an offending queue index in @p queues array. + * + * @return + * 0 on success, a negative errno code on error. + */ +static int +mlx5_validate_rss_queues(struct rte_eth_dev *dev, + const uint16_t *queues, uint32_t queues_n, + const char **error, uint32_t *queue_idx) +{ + const struct mlx5_priv *priv = dev->data->dev_private; + enum mlx5_rxq_type rxq_type = MLX5_RXQ_TYPE_UNDEFINED; + uint32_t i; + + for (i = 0; i != queues_n; ++i) { + struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, + queues[i]); + + if (queues[i] >= priv->rxqs_n) { + *error = "queue index out of range"; + *queue_idx = i; + return -EINVAL; + } + if (rxq_ctrl == NULL) { + *error = "queue is not configured"; + *queue_idx = i; + return -EINVAL; + } + if (i == 0) + rxq_type = rxq_ctrl->type; + if (rxq_type != rxq_ctrl->type) { + *error = "combining hairpin and regular RSS queues is not supported"; + *queue_idx = i; + return -ENOTSUP; + } + } + return 0; +} + /* * Validate the rss action. * @@ -1580,8 +1725,9 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev, { struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_action_rss *rss = action->conf; - enum mlx5_rxq_type rxq_type = MLX5_RXQ_TYPE_UNDEFINED; - unsigned int i; + int ret; + const char *message; + uint32_t queue_idx; if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT && rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) @@ -1614,7 +1760,7 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ACTION_CONF, &rss->key_len, "RSS hash key too large"); - if (rss->queue_num > priv->config.ind_table_max_size) + if (rss->queue_num > priv->sh->dev_cap.ind_table_max_size) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION_CONF, &rss->queue_num, @@ -1625,14 +1771,14 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev, &rss->types, "some RSS protocols are not" " supported"); - if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) && - !(rss->types & ETH_RSS_IP)) + if ((rss->types & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY)) && + !(rss->types & RTE_ETH_RSS_IP)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, "L3 partial RSS requested but L3 RSS" " type not specified"); - if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) && - !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP))) + if ((rss->types & (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)) && + !(rss->types & (RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP))) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, "L4 partial RSS requested but L4 RSS" @@ -1645,27 +1791,12 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev, return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, "No queues configured"); - for (i = 0; i != rss->queue_num; ++i) { - struct mlx5_rxq_ctrl *rxq_ctrl; - - if (rss->queue[i] >= priv->rxqs_n) - return rte_flow_error_set - (error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, - &rss->queue[i], "queue index out of range"); - if (!(*priv->rxqs)[rss->queue[i]]) - return rte_flow_error_set - (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, - &rss->queue[i], "queue is not configured"); - rxq_ctrl = container_of((*priv->rxqs)[rss->queue[i]], - struct mlx5_rxq_ctrl, rxq); - if (i == 0) - rxq_type = rxq_ctrl->type; - if (rxq_type != rxq_ctrl->type) - return rte_flow_error_set - (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION_CONF, - &rss->queue[i], - "combining hairpin and regular RSS queues is not supported"); + ret = mlx5_validate_rss_queues(dev, rss->queue, rss->queue_num, + &message, &queue_idx); + if (ret != 0) { + return rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->queue[queue_idx], message); } return 0; } @@ -1849,7 +1980,7 @@ mlx5_flow_validate_attributes(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - uint32_t priority_max = priv->config.flow_prio - 1; + uint32_t priority_max = priv->sh->flow_max_priority - 1; if (attributes->group) return rte_flow_error_set(error, ENOTSUP, @@ -1864,7 +1995,7 @@ mlx5_flow_validate_attributes(struct rte_eth_dev *dev, return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, "egress is not supported"); - if (attributes->transfer && !priv->config.dv_esw_en) + if (attributes->transfer && !priv->sh->config.dv_esw_en) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL, "transfer is not supported"); @@ -2581,7 +2712,7 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, uint8_t vni[4]; } id = { .vlan_id = 0, }; - if (!priv->config.l3_vxlan_en) + if (!priv->sh->config.l3_vxlan_en) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 VXLAN is not enabled by device" @@ -2776,7 +2907,7 @@ mlx5_flow_validate_item_geneve(const struct rte_flow_item *item, const struct rte_flow_item_geneve *mask = item->mask; int ret; uint16_t gbhdr; - uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ? + uint8_t opt_len = priv->sh->cdev->config.hca_attr.geneve_max_opt_len ? MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0; const struct rte_flow_item_geneve nic_mask = { .ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80), @@ -2784,7 +2915,7 @@ mlx5_flow_validate_item_geneve(const struct rte_flow_item *item, .protocol = RTE_BE16(UINT16_MAX), }; - if (!priv->config.hca_attr.tunnel_stateless_geneve_rx) + if (!priv->sh->cdev->config.hca_attr.tunnel_stateless_geneve_rx) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 Geneve is not enabled by device" @@ -2864,10 +2995,9 @@ mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item, struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_geneve_tlv_option_resource *geneve_opt_resource; - struct mlx5_hca_attr *hca_attr = &priv->config.hca_attr; + struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr; uint8_t data_max_supported = hca_attr->max_geneve_tlv_option_data_len * 4; - struct mlx5_dev_config *config = &priv->config; const struct rte_flow_item_geneve *geneve_spec; const struct rte_flow_item_geneve *geneve_mask; const struct rte_flow_item_geneve_opt *spec = item->spec; @@ -2891,7 +3021,7 @@ mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item, if ((uint32_t)spec->option_len > MLX5_GENEVE_OPTLEN_MASK) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, - "Geneve TLV opt length exceeeds the limit (31)"); + "Geneve TLV opt length exceeds the limit (31)"); /* Check if class type and length masks are full. */ if (full_mask.option_class != mask->option_class || full_mask.option_type != mask->option_type || @@ -2901,11 +3031,11 @@ mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item, "Geneve TLV opt class/type/length masks must be full"); /* Check if length is supported */ if ((uint32_t)spec->option_len > - config->hca_attr.max_geneve_tlv_option_data_len) + hca_attr->max_geneve_tlv_option_data_len) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "Geneve TLV opt length not supported"); - if (config->hca_attr.max_geneve_tlv_options > 1) + if (hca_attr->max_geneve_tlv_options > 1) DRV_LOG(DEBUG, "max_geneve_tlv_options supports more than 1 option"); /* Check GENEVE item preceding. */ @@ -2960,7 +3090,7 @@ mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item, "Data mask is of unsupported size"); } /* Check GENEVE option is supported in NIC. */ - if (!config->hca_attr.geneve_tlv_opt) + if (!hca_attr->geneve_tlv_opt) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "Geneve TLV opt not supported"); @@ -3009,7 +3139,7 @@ mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused, struct mlx5_priv *priv = dev->data->dev_private; int ret; - if (!priv->config.mpls_en) + if (!priv->sh->dev_cap.mpls_en) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "MPLS not supported or" @@ -3300,11 +3430,11 @@ flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr) if (type != MLX5_FLOW_TYPE_MAX) return type; /* If no OS specific type - continue with DV/VERBS selection */ - if (attr->transfer && priv->config.dv_esw_en) + if (attr->transfer && priv->sh->config.dv_esw_en) type = MLX5_FLOW_TYPE_DV; if (!attr->transfer) - type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV : - MLX5_FLOW_TYPE_VERBS; + type = priv->sh->config.dv_flow_en ? MLX5_FLOW_TYPE_DV : + MLX5_FLOW_TYPE_VERBS; return type; } @@ -3571,6 +3701,8 @@ flow_get_rss_action(struct rte_eth_dev *dev, { struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_action_rss *rss = NULL; + struct mlx5_meter_policy_action_container *acg; + struct mlx5_meter_policy_action_container *acy; for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { switch (actions->type) { @@ -3606,9 +3738,18 @@ flow_get_rss_action(struct rte_eth_dev *dev, if (!policy) return NULL; } - if (policy->is_rss) - rss = - policy->act_cnt[RTE_COLOR_GREEN].rss->conf; + if (policy->is_rss) { + acg = + &policy->act_cnt[RTE_COLOR_GREEN]; + acy = + &policy->act_cnt[RTE_COLOR_YELLOW]; + if (acg->fate_action == + MLX5_FLOW_FATE_SHARED_RSS) + rss = acg->rss->conf; + else if (acy->fate_action == + MLX5_FLOW_FATE_SHARED_RSS) + rss = acy->rss->conf; + } } break; } @@ -3637,8 +3778,11 @@ flow_aso_age_get_by_idx(struct rte_eth_dev *dev, uint32_t age_idx) uint16_t offset = (age_idx >> 16) & UINT16_MAX; struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng; - struct mlx5_aso_age_pool *pool = mng->pools[pool_idx]; + struct mlx5_aso_age_pool *pool; + rte_rwlock_read_lock(&mng->resize_rwl); + pool = mng->pools[pool_idx]; + rte_rwlock_read_unlock(&mng->resize_rwl); return &pool->actions[offset - 1]; } @@ -3826,7 +3970,7 @@ find_graph_root(uint32_t rss_level) * subflow. * * @param[in] dev_flow - * Pointer the created preifx subflow. + * Pointer the created prefix subflow. * * @return * The layers get from prefix subflow. @@ -3962,7 +4106,7 @@ static bool flow_check_modify_action_type(struct rte_eth_dev *dev, return true; case RTE_FLOW_ACTION_TYPE_FLAG: case RTE_FLOW_ACTION_TYPE_MARK: - if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) + if (priv->sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) return true; else return false; @@ -4153,7 +4297,7 @@ flow_dv_mreg_create_cb(void *tool_ctx, void *cb_ctx) [3] = { .type = RTE_FLOW_ACTION_TYPE_END, }, }; - /* Fill the register fileds in the flow. */ + /* Fill the register fields in the flow. */ ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error); if (ret < 0) return NULL; @@ -4222,7 +4366,7 @@ flow_dv_mreg_create_cb(void *tool_ctx, void *cb_ctx) /* * The copy Flows are not included in any list. There * ones are referenced from other Flows and can not - * be applied, removed, deleted in ardbitrary order + * be applied, removed, deleted in arbitrary order * by list traversing. */ mcp_res->rix_flow = flow_list_create(dev, MLX5_FLOW_TYPE_MCP, @@ -4401,8 +4545,8 @@ flow_mreg_add_default_copy_action(struct rte_eth_dev *dev, uint32_t mark_id; /* Check whether extensive metadata feature is engaged. */ - if (!priv->config.dv_flow_en || - priv->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || + if (!priv->sh->config.dv_flow_en || + priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || !mlx5_flow_ext_mreg_supported(dev) || !priv->sh->dv_regc0_mask) return 0; @@ -4461,7 +4605,7 @@ flow_mreg_update_copy_table(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_config *config = &priv->config; + struct mlx5_sh_config *config = &priv->sh->config; struct mlx5_flow_mreg_copy_resource *mcp_res; const struct rte_flow_action_mark *mark; @@ -4665,6 +4809,7 @@ flow_create_split_inner(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct mlx5_flow *dev_flow; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); dev_flow = flow_drv_prepare(dev, flow, attr, items, actions, flow_split_info->flow_idx, error); @@ -4679,12 +4824,14 @@ flow_create_split_inner(struct rte_eth_dev *dev, /* * If dev_flow is as one of the suffix flow, some actions in suffix * flow may need some user defined item layer flags, and pass the - * Metadate rxq mark flag to suffix flow as well. + * Metadata rxq mark flag to suffix flow as well. */ if (flow_split_info->prefix_layers) dev_flow->handle->layers = flow_split_info->prefix_layers; - if (flow_split_info->prefix_mark) - dev_flow->handle->mark = 1; + if (flow_split_info->prefix_mark) { + MLX5_ASSERT(wks); + wks->mark = 1; + } if (sub_flow) *sub_flow = dev_flow; #ifdef HAVE_IBV_FLOW_DV_SUPPORT @@ -4875,6 +5022,7 @@ flow_meter_split_prep(struct rte_eth_dev *dev, uint32_t tag_id = 0; struct rte_flow_item *vlan_item_dst = NULL; const struct rte_flow_item *vlan_item_src = NULL; + const struct rte_flow_item *orig_items = items; struct rte_flow_action *hw_mtr_action; struct rte_flow_action *action_pre_head = NULL; int32_t flow_src_port = priv->representor_id; @@ -4999,7 +5147,8 @@ flow_meter_split_prep(struct rte_eth_dev *dev, if (!fm->def_policy) { sub_policy = get_meter_sub_policy(dev, flow, wks, - attr, items, error); + attr, orig_items, + error); if (!sub_policy) return -rte_errno; } else { @@ -5120,6 +5269,8 @@ exit: * Pointer to the Q/RSS action. * @param[in] actions_n * Number of original actions. + * @param[in] mtr_sfx + * Check if it is in meter suffix table. * @param[out] error * Perform verbose error reporting if not NULL. * @@ -5132,7 +5283,8 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, struct rte_flow_action *split_actions, const struct rte_flow_action *actions, const struct rte_flow_action *qrss, - int actions_n, struct rte_flow_error *error) + int actions_n, int mtr_sfx, + struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rte_flow_action_set_tag *set_tag; @@ -5147,15 +5299,15 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, * - Add jump to mreg CP_TBL. * As a result, there will be one more action. */ - ++actions_n; memcpy(split_actions, actions, sizeof(*split_actions) * actions_n); + /* Count MLX5_RTE_FLOW_ACTION_TYPE_TAG. */ + ++actions_n; set_tag = (void *)(split_actions + actions_n); /* - * If tag action is not set to void(it means we are not the meter - * suffix flow), add the tag action. Since meter suffix flow already - * has the tag added. + * If we are not the meter suffix flow, add the tag action. + * Since meter suffix flow already has the tag added. */ - if (split_actions[qrss_idx].type != RTE_FLOW_ACTION_TYPE_VOID) { + if (!mtr_sfx) { /* * Allocate the new subflow ID. This one is unique within * device and not shared with representors. Otherwise, @@ -5188,6 +5340,12 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, MLX5_RTE_FLOW_ACTION_TYPE_TAG, .conf = set_tag, }; + } else { + /* + * If we are the suffix flow of meter, tag already exist. + * Set the QUEUE/RSS action to void. + */ + split_actions[qrss_idx].type = RTE_FLOW_ACTION_TYPE_VOID; } /* JUMP action to jump to mreg copy table (CP_TBL). */ jump = (void *)(set_tag + 1); @@ -5219,7 +5377,7 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, * @param[out] error * Perform verbose error reporting if not NULL. * @param[in] encap_idx - * The encap action inndex. + * The encap action index. * * @return * 0 on success, negative value otherwise @@ -5484,7 +5642,7 @@ flow_sample_split_prep(struct rte_eth_dev *dev, /* Prepare the prefix tag action. */ append_index++; set_tag = (void *)(actions_pre + actions_n + append_index); - ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, 0, error); + ret = mlx5_flow_get_reg_id(dev, MLX5_SAMPLE_ID, 0, error); if (ret < 0) return ret; mlx5_ipool_malloc(priv->sh->ipool @@ -5585,7 +5743,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_config *config = &priv->config; + struct mlx5_sh_config *config = &priv->sh->config; const struct rte_flow_action *qrss = NULL; struct rte_flow_action *ext_actions = NULL; struct mlx5_flow *dev_flow = NULL; @@ -5642,17 +5800,6 @@ flow_create_split_metadata(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "no memory to split " "metadata flow"); - /* - * If we are the suffix flow of meter, tag already exist. - * Set the tag action to void. - */ - if (mtr_sfx) - ext_actions[qrss - actions].type = - RTE_FLOW_ACTION_TYPE_VOID; - else - ext_actions[qrss - actions].type = - (enum rte_flow_action_type) - MLX5_RTE_FLOW_ACTION_TYPE_TAG; /* * Create the new actions list with removed Q/RSS action * and appended set tag and jump to register copy table @@ -5660,7 +5807,8 @@ flow_create_split_metadata(struct rte_eth_dev *dev, * in advance, because it is needed for set tag action. */ qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions, - qrss, actions_n, error); + qrss, actions_n, + mtr_sfx, error); if (!mtr_sfx && !qrss_id) { ret = -rte_errno; goto exit; @@ -5751,6 +5899,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev, /* Add suffix subflow to execute Q/RSS. */ flow_split_info->prefix_layers = layers; flow_split_info->prefix_mark = 0; + flow_split_info->table_id = 0; ret = flow_create_split_inner(dev, flow, &dev_flow, &q_attr, mtr_sfx ? items : q_items, q_actions, @@ -5965,7 +6114,6 @@ flow_create_split_meter(struct rte_eth_dev *dev, goto exit; } /* Add the prefix subflow. */ - flow_split_info->prefix_mark = 0; skip_scale_restore = flow_split_info->skip_scale; flow_split_info->skip_scale |= 1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT; @@ -5998,7 +6146,7 @@ flow_create_split_meter(struct rte_eth_dev *dev, MLX5_FLOW_TABLE_LEVEL_METER; flow_split_info->prefix_layers = flow_get_prefix_layer_flags(dev_flow); - flow_split_info->prefix_mark = dev_flow->handle->mark; + flow_split_info->prefix_mark |= wks->mark; flow_split_info->table_id = MLX5_MTR_TABLE_ID_SUFFIX; } /* Add the prefix subflow. */ @@ -6064,6 +6212,7 @@ flow_create_split_sample(struct rte_eth_dev *dev, struct mlx5_flow_dv_sample_resource *sample_res; struct mlx5_flow_tbl_data_entry *sfx_tbl_data; struct mlx5_flow_tbl_resource *sfx_tbl; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); #endif size_t act_size; size_t item_size; @@ -6102,7 +6251,8 @@ flow_create_split_sample(struct rte_eth_dev *dev, * When reg_c_preserve is set, metadata registers Cx preserve * their value even through packet duplication. */ - add_tag = (!fdb_tx || priv->config.hca_attr.reg_c_preserve); + add_tag = (!fdb_tx || + priv->sh->cdev->config.hca_attr.reg_c_preserve); if (add_tag) sfx_items = (struct rte_flow_item *)((char *)sfx_actions + act_size); @@ -6150,7 +6300,8 @@ flow_create_split_sample(struct rte_eth_dev *dev, } flow_split_info->prefix_layers = flow_get_prefix_layer_flags(dev_flow); - flow_split_info->prefix_mark = dev_flow->handle->mark; + MLX5_ASSERT(wks); + flow_split_info->prefix_mark |= wks->mark; /* Suffix group level already be scaled with factor, set * MLX5_SCALE_FLOW_GROUP_BIT of skip_scale to 1 to avoid scale * again in translation. @@ -6388,8 +6539,8 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type, * mlx5_flow_hashfields_adjust() in advance. */ rss_desc->level = rss->level; - /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */ - rss_desc->types = !rss->types ? ETH_RSS_IP : rss->types; + /* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */ + rss_desc->types = !rss->types ? RTE_ETH_RSS_IP : rss->types; } flow->dev_handles = 0; if (rss && rss->types) { @@ -6568,6 +6719,80 @@ mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev) actions, false, &error); } +/** + * Create a dedicated flow rule on e-switch table 1, matches ESW manager + * and sq number, directs all packets to peer vport. + * + * @param dev + * Pointer to Ethernet device. + * @param txq + * Txq index. + * + * @return + * Flow ID on success, 0 otherwise and rte_errno is set. + */ +uint32_t +mlx5_flow_create_devx_sq_miss_flow(struct rte_eth_dev *dev, uint32_t txq) +{ + struct rte_flow_attr attr = { + .group = 0, + .priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR, + .ingress = 1, + .egress = 0, + .transfer = 1, + }; + struct rte_flow_item_port_id port_spec = { + .id = MLX5_PORT_ESW_MGR, + }; + struct mlx5_rte_flow_item_tx_queue txq_spec = { + .queue = txq, + }; + struct rte_flow_item pattern[] = { + { + .type = RTE_FLOW_ITEM_TYPE_PORT_ID, + .spec = &port_spec, + }, + { + .type = (enum rte_flow_item_type) + MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE, + .spec = &txq_spec, + }, + { + .type = RTE_FLOW_ITEM_TYPE_END, + }, + }; + struct rte_flow_action_jump jump = { + .group = 1, + }; + struct rte_flow_action_port_id port = { + .id = dev->data->port_id, + }; + struct rte_flow_action actions[] = { + { + .type = RTE_FLOW_ACTION_TYPE_JUMP, + .conf = &jump, + }, + { + .type = RTE_FLOW_ACTION_TYPE_END, + }, + }; + struct rte_flow_error error; + + /* + * Creates group 0, highest priority jump flow. + * Matches txq to bypass kernel packets. + */ + if (flow_list_create(dev, MLX5_FLOW_TYPE_CTL, &attr, pattern, actions, + false, &error) == 0) + return 0; + /* Create group 1, lowest priority redirect flow for txq. */ + attr.group = 1; + actions[0].conf = &port; + actions[0].type = RTE_FLOW_ACTION_TYPE_PORT_ID; + return flow_list_create(dev, MLX5_FLOW_TYPE_CTL, &attr, pattern, + actions, false, &error); +} + /** * Validate a flow supported by the NIC. * @@ -6615,6 +6840,15 @@ mlx5_flow_create(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error) { + struct mlx5_priv *priv = dev->data->dev_private; + + if (priv->sh->config.dv_flow_en == 2) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Flow non-Q creation not supported"); + return NULL; + } /* * If the device is not started yet, it is not allowed to created a * flow from application. PMD default flows and traffic control flows @@ -6680,7 +6914,7 @@ flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type, * @param type * Flow type to be flushed. * @param active - * If flushing is called avtively. + * If flushing is called actively. */ void mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type, @@ -6786,8 +7020,7 @@ flow_alloc_thread_workspace(void) data->rssq_num = MLX5_RSSQ_DEFAULT_NUM; return data; err: - if (data->rss_desc.queue) - free(data->rss_desc.queue); + free(data->rss_desc.queue); free(data); return NULL; } @@ -7013,7 +7246,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, if (!priv->reta_idx_n || !priv->rxqs_n) { return 0; } - if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)) + if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) action_rss.types = 0; for (i = 0; i != priv->reta_idx_n; ++i) queue[i] = (*priv->reta_idx)[i]; @@ -7112,6 +7345,13 @@ mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error __rte_unused) { + struct mlx5_priv *priv = dev->data->dev_private; + + if (priv->sh->config.dv_flow_en == 2) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Flow non-Q destruction not supported"); flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, (uintptr_t)(void *)flow); return 0; @@ -7209,7 +7449,13 @@ mlx5_flow_query(struct rte_eth_dev *dev, struct rte_flow_error *error) { int ret; + struct mlx5_priv *priv = dev->data->dev_private; + if (priv->sh->config.dv_flow_en == 2) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Flow non-Q query not supported"); ret = flow_drv_query(dev, (uintptr_t)(void *)flow, actions, data, error); if (ret < 0) @@ -7576,14 +7822,15 @@ mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt) */ int mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt, - bool clear, uint64_t *pkts, uint64_t *bytes) + bool clear, uint64_t *pkts, uint64_t *bytes, void **action) { const struct mlx5_flow_driver_ops *fops; struct rte_flow_attr attr = { .transfer = 0 }; if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) { fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); - return fops->counter_query(dev, cnt, clear, pkts, bytes); + return fops->counter_query(dev, cnt, clear, pkts, + bytes, action); } DRV_LOG(ERR, "port %u counter query is not supported.", @@ -7604,7 +7851,6 @@ mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt, static int mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh) { - struct mlx5_devx_mkey_attr mkey_attr; struct mlx5_counter_stats_mem_mng *mem_mng; volatile struct flow_counter_stats *raw_data; int raws_n = MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES; @@ -7614,6 +7860,7 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh) sizeof(struct mlx5_counter_stats_mem_mng); size_t pgsize = rte_mem_page_size(); uint8_t *mem; + int ret; int i; if (pgsize == (size_t)-1) { @@ -7628,23 +7875,10 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh) } mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1; size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n; - mem_mng->umem = mlx5_os_umem_reg(sh->ctx, mem, size, - IBV_ACCESS_LOCAL_WRITE); - if (!mem_mng->umem) { - rte_errno = errno; - mlx5_free(mem); - return -rte_errno; - } - memset(&mkey_attr, 0, sizeof(mkey_attr)); - mkey_attr.addr = (uintptr_t)mem; - mkey_attr.size = size; - mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem); - mkey_attr.pd = sh->pdn; - mkey_attr.relaxed_ordering_write = sh->cmng.relaxed_ordering_write; - mkey_attr.relaxed_ordering_read = sh->cmng.relaxed_ordering_read; - mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr); - if (!mem_mng->dm) { - mlx5_os_umem_dereg(mem_mng->umem); + ret = mlx5_os_wrapped_mkey_create(sh->cdev->ctx, sh->cdev->pd, + sh->cdev->pdn, mem, size, + &mem_mng->wm); + if (ret) { rte_errno = errno; mlx5_free(mem); return -rte_errno; @@ -7763,7 +7997,7 @@ mlx5_flow_query_alarm(void *arg) ret = mlx5_devx_cmd_flow_counter_query(pool->min_dcs, 0, MLX5_COUNTERS_PER_POOL, NULL, NULL, - pool->raw_hw->mem_mng->dm->id, + pool->raw_hw->mem_mng->wm.lkey, (void *)(uintptr_t) pool->raw_hw->data, sh->devx_comp, @@ -7999,13 +8233,12 @@ int mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_config *config = &priv->config; enum modify_reg idx; int n = 0; /* reg_c[0] and reg_c[1] are reserved. */ - config->flow_mreg_c[n++] = REG_C_0; - config->flow_mreg_c[n++] = REG_C_1; + priv->sh->flow_mreg_c[n++] = REG_C_0; + priv->sh->flow_mreg_c[n++] = REG_C_1; /* Discover availability of other reg_c's. */ for (idx = REG_C_2; idx <= REG_C_7; ++idx) { struct rte_flow_attr attr = { @@ -8041,7 +8274,7 @@ mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev) struct rte_flow *flow; struct rte_flow_error error; - if (!config->dv_flow_en) + if (!priv->sh->config.dv_flow_en) break; /* Create internal flow, validation skips copy action. */ flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_GEN, &attr, @@ -8050,17 +8283,18 @@ mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev) flow_idx); if (!flow) continue; - config->flow_mreg_c[n++] = idx; + priv->sh->flow_mreg_c[n++] = idx; flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, flow_idx); } for (; n < MLX5_MREG_C_NUM; ++n) - config->flow_mreg_c[n] = REG_NON; + priv->sh->flow_mreg_c[n] = REG_NON; + priv->sh->metadata_regc_check_flag = 1; return 0; } int save_dump_file(const uint8_t *data, uint32_t size, - uint32_t type, uint32_t id, void *arg, FILE *file) + uint32_t type, uint64_t id, void *arg, FILE *file) { char line[BUF_SIZE]; uint32_t out = 0; @@ -8072,17 +8306,18 @@ save_dump_file(const uint8_t *data, uint32_t size, switch (type) { case DR_DUMP_REC_TYPE_PMD_MODIFY_HDR: actions_num = *(uint32_t *)(arg); - out += snprintf(line + out, BUF_SIZE - out, "%d,0x%x,%d,", + out += snprintf(line + out, BUF_SIZE - out, "%d,0x%" PRIx64 ",%d,", type, id, actions_num); break; case DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT: - out += snprintf(line + out, BUF_SIZE - out, "%d,0x%x,", + out += snprintf(line + out, BUF_SIZE - out, "%d,0x%" PRIx64 ",", type, id); break; case DR_DUMP_REC_TYPE_PMD_COUNTER: count = (struct rte_flow_query_count *)arg; - fprintf(file, "%d,0x%x,%" PRIu64 ",%" PRIu64 "\n", type, - id, count->hits, count->bytes); + fprintf(file, + "%d,0x%" PRIx64 ",%" PRIu64 ",%" PRIu64 "\n", + type, id, count->hits, count->bytes); return 0; default: return -1; @@ -8156,29 +8391,33 @@ mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev, uint32_t actions_num; const uint8_t *data; size_t size; - uint32_t id; + uint64_t id; uint32_t type; + void *action = NULL; if (!flow) { return rte_flow_error_set(error, ENOENT, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "invalid flow handle"); + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "invalid flow handle"); } handle_idx = flow->dev_handles; + /* query counter */ + if (flow->counter && + (!mlx5_counter_query(dev, flow->counter, false, + &count.hits, &count.bytes, &action)) && action) { + id = (uint64_t)(uintptr_t)action; + type = DR_DUMP_REC_TYPE_PMD_COUNTER; + save_dump_file(NULL, 0, type, + id, (void *)&count, file); + } + while (handle_idx) { dh = mlx5_ipool_get(priv->sh->ipool - [MLX5_IPOOL_MLX5_FLOW], handle_idx); + [MLX5_IPOOL_MLX5_FLOW], handle_idx); if (!dh) continue; handle_idx = dh->next.next; - id = (uint32_t)(uintptr_t)dh->drv_flow; - - /* query counter */ - type = DR_DUMP_REC_TYPE_PMD_COUNTER; - if (!mlx5_flow_query_counter(dev, flow, &count, error)) - save_dump_file(NULL, 0, type, - id, (void *)&count, file); /* Get modify_hdr and encap_decap buf from ipools. */ encap_decap = NULL; @@ -8192,14 +8431,16 @@ mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev, if (modify_hdr) { data = (const uint8_t *)modify_hdr->actions; size = (size_t)(modify_hdr->actions_num) * 8; + id = (uint64_t)(uintptr_t)modify_hdr->action; actions_num = modify_hdr->actions_num; type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR; save_dump_file(data, size, type, id, - (void *)(&actions_num), file); + (void *)(&actions_num), file); } if (encap_decap) { data = encap_decap->buf; size = encap_decap->size; + id = (uint64_t)(uintptr_t)encap_decap->action; type = DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT; save_dump_file(data, size, type, id, NULL, file); @@ -8207,6 +8448,117 @@ mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev, } return 0; } + +/** + * Dump all flow's encap_decap/modify_hdr/counter data to file + * + * @param[in] dev + * The pointer to Ethernet device. + * @param[in] file + * A pointer to a file for output. + * @param[out] error + * Perform verbose error reporting if not NULL. PMDs initialize this + * structure in case of error only. + * @return + * 0 on success, a negative value otherwise. + */ +static int +mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev, + FILE *file, struct rte_flow_error *error __rte_unused) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_ctx_shared *sh = priv->sh; + struct mlx5_hlist *h; + struct mlx5_flow_dv_modify_hdr_resource *modify_hdr; + struct mlx5_flow_dv_encap_decap_resource *encap_decap; + struct rte_flow_query_count count; + uint32_t actions_num; + const uint8_t *data; + size_t size; + uint64_t id; + uint32_t type; + uint32_t i; + uint32_t j; + struct mlx5_list_inconst *l_inconst; + struct mlx5_list_entry *e; + int lcore_index; + struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng; + uint32_t max; + void *action; + + /* encap_decap hlist is lcore_share, get global core cache. */ + i = MLX5_LIST_GLOBAL; + h = sh->encaps_decaps; + if (h) { + for (j = 0; j <= h->mask; j++) { + l_inconst = &h->buckets[j].l; + if (!l_inconst || !l_inconst->cache[i]) + continue; + + e = LIST_FIRST(&l_inconst->cache[i]->h); + while (e) { + encap_decap = + (struct mlx5_flow_dv_encap_decap_resource *)e; + data = encap_decap->buf; + size = encap_decap->size; + id = (uint64_t)(uintptr_t)encap_decap->action; + type = DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT; + save_dump_file(data, size, type, + id, NULL, file); + e = LIST_NEXT(e, next); + } + } + } + + /* get modify_hdr */ + h = sh->modify_cmds; + if (h) { + lcore_index = rte_lcore_index(rte_lcore_id()); + if (unlikely(lcore_index == -1)) { + lcore_index = MLX5_LIST_NLCORE; + rte_spinlock_lock(&h->l_const.lcore_lock); + } + i = lcore_index; + + for (j = 0; j <= h->mask; j++) { + l_inconst = &h->buckets[j].l; + if (!l_inconst || !l_inconst->cache[i]) + continue; + + e = LIST_FIRST(&l_inconst->cache[i]->h); + while (e) { + modify_hdr = + (struct mlx5_flow_dv_modify_hdr_resource *)e; + data = (const uint8_t *)modify_hdr->actions; + size = (size_t)(modify_hdr->actions_num) * 8; + actions_num = modify_hdr->actions_num; + id = (uint64_t)(uintptr_t)modify_hdr->action; + type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR; + save_dump_file(data, size, type, id, + (void *)(&actions_num), file); + e = LIST_NEXT(e, next); + } + } + + if (unlikely(lcore_index == MLX5_LIST_NLCORE)) + rte_spinlock_unlock(&h->l_const.lcore_lock); + } + + /* get counter */ + MLX5_ASSERT(cmng->n_valid <= cmng->n); + max = MLX5_COUNTERS_PER_POOL * cmng->n_valid; + for (j = 1; j <= max; j++) { + action = NULL; + if ((!mlx5_counter_query(dev, j, false, &count.hits, + &count.bytes, &action)) && action) { + id = (uint64_t)(uintptr_t)action; + type = DR_DUMP_REC_TYPE_PMD_COUNTER; + save_dump_file(NULL, 0, type, + id, (void *)&count, file); + } + } + return 0; +} #endif /** @@ -8220,7 +8572,7 @@ mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev, * Perform verbose error reporting if not NULL. PMDs initialize this * structure in case of error only. * @return - * 0 on success, a nagative value otherwise. + * 0 on success, a negative value otherwise. */ int mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx, @@ -8233,11 +8585,8 @@ mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx, int ret; struct mlx5_flow_handle *dh; struct rte_flow *flow; -#ifdef HAVE_IBV_FLOW_DV_SUPPORT - uint32_t idx; -#endif - if (!priv->config.dv_flow_en) { + if (!sh->config.dv_flow_en) { if (fputs("device dv flow disabled\n", file) <= 0) return -errno; return -ENOTSUP; @@ -8246,8 +8595,8 @@ mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx, /* dump all */ if (!flow_idx) { #ifdef HAVE_IBV_FLOW_DV_SUPPORT - MLX5_IPOOL_FOREACH(priv->flows[MLX5_FLOW_TYPE_GEN], idx, flow) - mlx5_flow_dev_dump_ipool(dev, flow, file, error); + if (mlx5_flow_dev_dump_sh_all(dev, file, error)) + return -EINVAL; #endif return mlx5_devx_cmd_flow_dump(sh->fdb_domain, sh->rx_domain, @@ -8257,7 +8606,7 @@ mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx, flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN], (uintptr_t)(void *)flow_idx); if (!flow) - return -ENOENT; + return -EINVAL; #ifdef HAVE_IBV_FLOW_DV_SUPPORT mlx5_flow_dev_dump_ipool(dev, flow, file, error); @@ -8547,6 +8896,116 @@ mlx5_action_handle_flush(struct rte_eth_dev *dev) return ret; } +/** + * Validate existing indirect actions against current device configuration + * and attach them to device resources. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_action_handle_attach(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_indexed_pool *ipool = + priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS]; + struct mlx5_shared_action_rss *shared_rss, *shared_rss_last; + int ret = 0; + uint32_t idx; + + ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) { + struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl; + const char *message; + uint32_t queue_idx; + + ret = mlx5_validate_rss_queues(dev, ind_tbl->queues, + ind_tbl->queues_n, + &message, &queue_idx); + if (ret != 0) { + DRV_LOG(ERR, "Port %u cannot use queue %u in RSS: %s", + dev->data->port_id, ind_tbl->queues[queue_idx], + message); + break; + } + } + if (ret != 0) + return ret; + ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) { + struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl; + + ret = mlx5_ind_table_obj_attach(dev, ind_tbl); + if (ret != 0) { + DRV_LOG(ERR, "Port %u could not attach " + "indirection table obj %p", + dev->data->port_id, (void *)ind_tbl); + goto error; + } + } + return 0; +error: + shared_rss_last = shared_rss; + ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) { + struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl; + + if (shared_rss == shared_rss_last) + break; + if (mlx5_ind_table_obj_detach(dev, ind_tbl) != 0) + DRV_LOG(CRIT, "Port %u could not detach " + "indirection table obj %p on rollback", + dev->data->port_id, (void *)ind_tbl); + } + return ret; +} + +/** + * Detach indirect actions of the device from its resources. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_action_handle_detach(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_indexed_pool *ipool = + priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS]; + struct mlx5_shared_action_rss *shared_rss, *shared_rss_last; + int ret = 0; + uint32_t idx; + + ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) { + struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl; + + ret = mlx5_ind_table_obj_detach(dev, ind_tbl); + if (ret != 0) { + DRV_LOG(ERR, "Port %u could not detach " + "indirection table obj %p", + dev->data->port_id, (void *)ind_tbl); + goto error; + } + } + return 0; +error: + shared_rss_last = shared_rss; + ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) { + struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl; + + if (shared_rss == shared_rss_last) + break; + if (mlx5_ind_table_obj_attach(dev, ind_tbl) != 0) + DRV_LOG(CRIT, "Port %u could not attach " + "indirection table obj %p on rollback", + dev->data->port_id, (void *)ind_tbl); + } + return ret; +} + #ifndef HAVE_MLX5DV_DR #define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1)) #else @@ -8591,7 +9050,7 @@ mlx5_get_tof(const struct rte_flow_item *item, } /** - * tunnel offload functionalilty is defined for DV environment only + * tunnel offload functionality is defined for DV environment only */ #ifdef HAVE_IBV_FLOW_DV_SUPPORT __extension__ @@ -8681,7 +9140,7 @@ flow_tunnel_add_default_miss(struct rte_eth_dev *dev, (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, "invalid port configuration"); - if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)) + if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) ctx->action_rss.types = 0; for (i = 0; i != priv->reta_idx_n; ++i) ctx->queue[i] = (*priv->reta_idx)[i]; @@ -9104,30 +9563,37 @@ err: return err; } -static inline bool +static inline int mlx5_flow_tunnel_validate(struct rte_eth_dev *dev, struct rte_flow_tunnel *tunnel, - const char *err_msg) + struct rte_flow_error *error) { - err_msg = NULL; - if (!is_tunnel_offload_active(dev)) { - err_msg = "tunnel offload was not activated"; - goto out; - } else if (!tunnel) { - err_msg = "no application tunnel"; - goto out; - } + struct mlx5_priv *priv = dev->data->dev_private; + if (!priv->sh->config.dv_flow_en) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, + "flow DV interface is off"); + if (!is_tunnel_offload_active(dev)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, + "tunnel offload was not activated"); + if (!tunnel) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, + "no application tunnel"); switch (tunnel->type) { default: - err_msg = "unsupported tunnel type"; - goto out; + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, + "unsupported tunnel type"); case RTE_FLOW_ITEM_TYPE_VXLAN: + case RTE_FLOW_ITEM_TYPE_GRE: + case RTE_FLOW_ITEM_TYPE_NVGRE: + case RTE_FLOW_ITEM_TYPE_GENEVE: break; } - -out: - return !err_msg; + return 0; } static int @@ -9137,15 +9603,11 @@ mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev, uint32_t *num_of_actions, struct rte_flow_error *error) { - int ret; struct mlx5_flow_tunnel *tunnel; - const char *err_msg = NULL; - bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg); + int ret = mlx5_flow_tunnel_validate(dev, app_tunnel, error); - if (!verdict) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, - err_msg); + if (ret) + return ret; ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel); if (ret < 0) { return rte_flow_error_set(error, ret, @@ -9164,15 +9626,11 @@ mlx5_flow_tunnel_match(struct rte_eth_dev *dev, uint32_t *num_of_items, struct rte_flow_error *error) { - int ret; struct mlx5_flow_tunnel *tunnel; - const char *err_msg = NULL; - bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg); + int ret = mlx5_flow_tunnel_validate(dev, app_tunnel, error); - if (!verdict) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - err_msg); + if (ret) + return ret; ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel); if (ret < 0) { return rte_flow_error_set(error, ret, @@ -9275,7 +9733,7 @@ mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev, { uint64_t ol_flags = m->ol_flags; const struct mlx5_flow_tbl_data_entry *tble; - const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID; + const uint64_t mask = RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID; if (!is_tunnel_offload_active(dev)) { info->flags = 0; @@ -9398,6 +9856,45 @@ mlx5_release_tunnel_hub(__rte_unused struct mlx5_dev_ctx_shared *sh, } #endif /* HAVE_IBV_FLOW_DV_SUPPORT */ +/* Flex flow item API */ +static struct rte_flow_item_flex_handle * +mlx5_flow_flex_item_create(struct rte_eth_dev *dev, + const struct rte_flow_item_flex_conf *conf, + struct rte_flow_error *error) +{ + static const char err_msg[] = "flex item creation unsupported"; + struct rte_flow_attr attr = { .transfer = 0 }; + const struct mlx5_flow_driver_ops *fops = + flow_get_drv_ops(flow_get_drv_type(dev, &attr)); + + if (!fops->item_create) { + DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg); + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, + NULL, err_msg); + return NULL; + } + return fops->item_create(dev, conf, error); +} + +static int +mlx5_flow_flex_item_release(struct rte_eth_dev *dev, + const struct rte_flow_item_flex_handle *handle, + struct rte_flow_error *error) +{ + static const char err_msg[] = "flex item release unsupported"; + struct rte_flow_attr attr = { .transfer = 0 }; + const struct mlx5_flow_driver_ops *fops = + flow_get_drv_ops(flow_get_drv_type(dev, &attr)); + + if (!fops->item_release) { + DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg); + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, + NULL, err_msg); + return -rte_errno; + } + return fops->item_release(dev, handle, error); +} + static void mlx5_dbg__print_pattern(const struct rte_flow_item *item) { @@ -9416,3 +9913,143 @@ mlx5_dbg__print_pattern(const struct rte_flow_item *item) } printf("END\n"); } + +static int +mlx5_flow_is_std_vxlan_port(const struct rte_flow_item *udp_item) +{ + const struct rte_flow_item_udp *spec = udp_item->spec; + const struct rte_flow_item_udp *mask = udp_item->mask; + uint16_t udp_dport = 0; + + if (spec != NULL) { + if (!mask) + mask = &rte_flow_item_udp_mask; + udp_dport = rte_be_to_cpu_16(spec->hdr.dst_port & + mask->hdr.dst_port); + } + return (!udp_dport || udp_dport == MLX5_UDP_PORT_VXLAN); +} + +static const struct mlx5_flow_expand_node * +mlx5_flow_expand_rss_adjust_node(const struct rte_flow_item *pattern, + unsigned int item_idx, + const struct mlx5_flow_expand_node graph[], + const struct mlx5_flow_expand_node *node) +{ + const struct rte_flow_item *item = pattern + item_idx, *prev_item; + + if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN && + node != NULL && + node->type == RTE_FLOW_ITEM_TYPE_VXLAN) { + /* + * The expansion node is VXLAN and it is also the last + * expandable item in the pattern, so need to continue + * expansion of the inner tunnel. + */ + MLX5_ASSERT(item_idx > 0); + prev_item = pattern + item_idx - 1; + MLX5_ASSERT(prev_item->type == RTE_FLOW_ITEM_TYPE_UDP); + if (mlx5_flow_is_std_vxlan_port(prev_item)) + return &graph[MLX5_EXPANSION_STD_VXLAN]; + return &graph[MLX5_EXPANSION_L3_VXLAN]; + } + return node; +} + +/* Map of Verbs to Flow priority with 8 Verbs priorities. */ +static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = { + { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 }, +}; + +/* Map of Verbs to Flow priority with 16 Verbs priorities. */ +static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = { + { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 }, + { 9, 10, 11 }, { 12, 13, 14 }, +}; + +/** + * Discover the number of available flow priorities. + * + * @param dev + * Ethernet device. + * + * @return + * On success, number of available flow priorities. + * On failure, a negative errno-style code and rte_errno is set. + */ +int +mlx5_flow_discover_priorities(struct rte_eth_dev *dev) +{ + static const uint16_t vprio[] = {8, 16}; + const struct mlx5_priv *priv = dev->data->dev_private; + const struct mlx5_flow_driver_ops *fops; + enum mlx5_flow_drv_type type; + int ret; + + type = mlx5_flow_os_get_type(); + if (type == MLX5_FLOW_TYPE_MAX) { + type = MLX5_FLOW_TYPE_VERBS; + if (priv->sh->cdev->config.devx && priv->sh->config.dv_flow_en) + type = MLX5_FLOW_TYPE_DV; + } + fops = flow_get_drv_ops(type); + if (fops->discover_priorities == NULL) { + DRV_LOG(ERR, "Priority discovery not supported"); + rte_errno = ENOTSUP; + return -rte_errno; + } + ret = fops->discover_priorities(dev, vprio, RTE_DIM(vprio)); + if (ret < 0) + return ret; + switch (ret) { + case 8: + ret = RTE_DIM(priority_map_3); + break; + case 16: + ret = RTE_DIM(priority_map_5); + break; + default: + rte_errno = ENOTSUP; + DRV_LOG(ERR, + "port %u maximum priority: %d expected 8/16", + dev->data->port_id, ret); + return -rte_errno; + } + DRV_LOG(INFO, "port %u supported flow priorities:" + " 0-%d for ingress or egress root table," + " 0-%d for non-root table or transfer root table.", + dev->data->port_id, ret - 2, + MLX5_NON_ROOT_FLOW_MAX_PRIO - 1); + return ret; +} + +/** + * Adjust flow priority based on the highest layer and the request priority. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] priority + * The rule base priority. + * @param[in] subpriority + * The priority based on the items. + * + * @return + * The new priority. + */ +uint32_t +mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, + uint32_t subpriority) +{ + uint32_t res = 0; + struct mlx5_priv *priv = dev->data->dev_private; + + switch (priv->sh->flow_max_priority) { + case RTE_DIM(priority_map_3): + res = priority_map_3[priority][subpriority]; + break; + case RTE_DIM(priority_map_5): + res = priority_map_5[priority][subpriority]; + break; + } + return res; +}