X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow.c;h=673e0ec55f7274d60c8478f595f77408533fb02d;hb=42431df9240da9c8e30ad61dc9fb3e00b6eb0284;hp=dc41e8b1b7310ffd83d75c77d9cdf9e6bcaa1ca8;hpb=14ad99d78a466bd5fb796cb9c147d2a315cbe318;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index dc41e8b1b7..673e0ec55f 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -76,6 +76,7 @@ const struct mlx5_flow_driver_ops *flow_drv_ops[] = { [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops, #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops, + [MLX5_FLOW_TYPE_HW] = &mlx5_flow_hw_drv_ops, #endif [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops, [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops @@ -98,12 +99,27 @@ struct mlx5_flow_expand_node { uint64_t rss_types; /**< * RSS types bit-field associated with this node - * (see ETH_RSS_* definitions). + * (see RTE_ETH_RSS_* definitions). + */ + uint64_t node_flags; + /**< + * Bit-fields that define how the node is used in the expansion. + * (see MLX5_EXPANSION_NODE_* definitions). */ - uint8_t optional; - /**< optional expand field. Default 0 to expand, 1 not go deeper. */ }; +/* Optional expand field. The expansion alg will not go deeper. */ +#define MLX5_EXPANSION_NODE_OPTIONAL (UINT64_C(1) << 0) + +/* The node is not added implicitly as expansion to the flow pattern. + * If the node type does not match the flow pattern item type, the + * expansion alg will go deeper to its next items. + * In the current implementation, the list of next nodes indexes can + * have up to one node with this flag set and it has to be the last + * node index (before the list terminator). + */ +#define MLX5_EXPANSION_NODE_EXPLICIT (UINT64_C(1) << 1) + /** Object returned by mlx5_flow_expand_rss(). */ struct mlx5_flow_expand_rss { uint32_t entries; @@ -117,6 +133,12 @@ struct mlx5_flow_expand_rss { static void mlx5_dbg__print_pattern(const struct rte_flow_item *item); +static const struct mlx5_flow_expand_node * +mlx5_flow_expand_rss_adjust_node(const struct rte_flow_item *pattern, + unsigned int item_idx, + const struct mlx5_flow_expand_node graph[], + const struct mlx5_flow_expand_node *node); + static bool mlx5_flow_is_rss_expandable_item(const struct rte_flow_item *item) { @@ -131,6 +153,11 @@ mlx5_flow_is_rss_expandable_item(const struct rte_flow_item *item) case RTE_FLOW_ITEM_TYPE_NVGRE: case RTE_FLOW_ITEM_TYPE_GRE: case RTE_FLOW_ITEM_TYPE_GENEVE: + case RTE_FLOW_ITEM_TYPE_MPLS: + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + case RTE_FLOW_ITEM_TYPE_GRE_KEY: + case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT: + case RTE_FLOW_ITEM_TYPE_GTP: return true; default: break; @@ -138,105 +165,163 @@ mlx5_flow_is_rss_expandable_item(const struct rte_flow_item *item) return false; } +/** + * Network Service Header (NSH) and its next protocol values + * are described in RFC-8393. + */ +static enum rte_flow_item_type +mlx5_nsh_proto_to_item_type(uint8_t proto_spec, uint8_t proto_mask) +{ + enum rte_flow_item_type type; + + switch (proto_mask & proto_spec) { + case RTE_VXLAN_GPE_TYPE_IPV4: + type = RTE_FLOW_ITEM_TYPE_IPV4; + break; + case RTE_VXLAN_GPE_TYPE_IPV6: + type = RTE_VXLAN_GPE_TYPE_IPV6; + break; + case RTE_VXLAN_GPE_TYPE_ETH: + type = RTE_FLOW_ITEM_TYPE_ETH; + break; + default: + type = RTE_FLOW_ITEM_TYPE_END; + } + return type; +} + +static enum rte_flow_item_type +mlx5_inet_proto_to_item_type(uint8_t proto_spec, uint8_t proto_mask) +{ + enum rte_flow_item_type type; + + switch (proto_mask & proto_spec) { + case IPPROTO_UDP: + type = RTE_FLOW_ITEM_TYPE_UDP; + break; + case IPPROTO_TCP: + type = RTE_FLOW_ITEM_TYPE_TCP; + break; + case IPPROTO_IP: + type = RTE_FLOW_ITEM_TYPE_IPV4; + break; + case IPPROTO_IPV6: + type = RTE_FLOW_ITEM_TYPE_IPV6; + break; + default: + type = RTE_FLOW_ITEM_TYPE_END; + } + return type; +} + +static enum rte_flow_item_type +mlx5_ethertype_to_item_type(rte_be16_t type_spec, + rte_be16_t type_mask, bool is_tunnel) +{ + enum rte_flow_item_type type; + + switch (rte_be_to_cpu_16(type_spec & type_mask)) { + case RTE_ETHER_TYPE_TEB: + type = is_tunnel ? + RTE_FLOW_ITEM_TYPE_ETH : RTE_FLOW_ITEM_TYPE_END; + break; + case RTE_ETHER_TYPE_VLAN: + type = !is_tunnel ? + RTE_FLOW_ITEM_TYPE_VLAN : RTE_FLOW_ITEM_TYPE_END; + break; + case RTE_ETHER_TYPE_IPV4: + type = RTE_FLOW_ITEM_TYPE_IPV4; + break; + case RTE_ETHER_TYPE_IPV6: + type = RTE_FLOW_ITEM_TYPE_IPV6; + break; + default: + type = RTE_FLOW_ITEM_TYPE_END; + } + return type; +} + static enum rte_flow_item_type mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item) { - enum rte_flow_item_type ret = RTE_FLOW_ITEM_TYPE_VOID; - uint16_t ether_type = 0; - uint16_t ether_type_m; - uint8_t ip_next_proto = 0; - uint8_t ip_next_proto_m; +#define MLX5_XSET_ITEM_MASK_SPEC(type, fld) \ + do { \ + const void *m = item->mask; \ + const void *s = item->spec; \ + mask = m ? \ + ((const struct rte_flow_item_##type *)m)->fld : \ + rte_flow_item_##type##_mask.fld; \ + spec = ((const struct rte_flow_item_##type *)s)->fld; \ + } while (0) + + enum rte_flow_item_type ret; + uint16_t spec, mask; if (item == NULL || item->spec == NULL) - return ret; + return RTE_FLOW_ITEM_TYPE_VOID; switch (item->type) { case RTE_FLOW_ITEM_TYPE_ETH: - if (item->mask) - ether_type_m = ((const struct rte_flow_item_eth *) - (item->mask))->type; - else - ether_type_m = rte_flow_item_eth_mask.type; - if (ether_type_m != RTE_BE16(0xFFFF)) - break; - ether_type = ((const struct rte_flow_item_eth *) - (item->spec))->type; - if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4) - ret = RTE_FLOW_ITEM_TYPE_IPV4; - else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6) - ret = RTE_FLOW_ITEM_TYPE_IPV6; - else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN) - ret = RTE_FLOW_ITEM_TYPE_VLAN; - else - ret = RTE_FLOW_ITEM_TYPE_END; + MLX5_XSET_ITEM_MASK_SPEC(eth, type); + if (!mask) + return RTE_FLOW_ITEM_TYPE_VOID; + ret = mlx5_ethertype_to_item_type(spec, mask, false); break; case RTE_FLOW_ITEM_TYPE_VLAN: - if (item->mask) - ether_type_m = ((const struct rte_flow_item_vlan *) - (item->mask))->inner_type; - else - ether_type_m = rte_flow_item_vlan_mask.inner_type; - if (ether_type_m != RTE_BE16(0xFFFF)) - break; - ether_type = ((const struct rte_flow_item_vlan *) - (item->spec))->inner_type; - if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4) - ret = RTE_FLOW_ITEM_TYPE_IPV4; - else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6) - ret = RTE_FLOW_ITEM_TYPE_IPV6; - else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN) - ret = RTE_FLOW_ITEM_TYPE_VLAN; - else - ret = RTE_FLOW_ITEM_TYPE_END; + MLX5_XSET_ITEM_MASK_SPEC(vlan, inner_type); + if (!mask) + return RTE_FLOW_ITEM_TYPE_VOID; + ret = mlx5_ethertype_to_item_type(spec, mask, false); break; case RTE_FLOW_ITEM_TYPE_IPV4: - if (item->mask) - ip_next_proto_m = ((const struct rte_flow_item_ipv4 *) - (item->mask))->hdr.next_proto_id; - else - ip_next_proto_m = - rte_flow_item_ipv4_mask.hdr.next_proto_id; - if (ip_next_proto_m != 0xFF) - break; - ip_next_proto = ((const struct rte_flow_item_ipv4 *) - (item->spec))->hdr.next_proto_id; - if (ip_next_proto == IPPROTO_UDP) - ret = RTE_FLOW_ITEM_TYPE_UDP; - else if (ip_next_proto == IPPROTO_TCP) - ret = RTE_FLOW_ITEM_TYPE_TCP; - else if (ip_next_proto == IPPROTO_IP) - ret = RTE_FLOW_ITEM_TYPE_IPV4; - else if (ip_next_proto == IPPROTO_IPV6) - ret = RTE_FLOW_ITEM_TYPE_IPV6; - else - ret = RTE_FLOW_ITEM_TYPE_END; + MLX5_XSET_ITEM_MASK_SPEC(ipv4, hdr.next_proto_id); + if (!mask) + return RTE_FLOW_ITEM_TYPE_VOID; + ret = mlx5_inet_proto_to_item_type(spec, mask); break; case RTE_FLOW_ITEM_TYPE_IPV6: - if (item->mask) - ip_next_proto_m = ((const struct rte_flow_item_ipv6 *) - (item->mask))->hdr.proto; - else - ip_next_proto_m = - rte_flow_item_ipv6_mask.hdr.proto; - if (ip_next_proto_m != 0xFF) - break; - ip_next_proto = ((const struct rte_flow_item_ipv6 *) - (item->spec))->hdr.proto; - if (ip_next_proto == IPPROTO_UDP) - ret = RTE_FLOW_ITEM_TYPE_UDP; - else if (ip_next_proto == IPPROTO_TCP) - ret = RTE_FLOW_ITEM_TYPE_TCP; - else if (ip_next_proto == IPPROTO_IP) - ret = RTE_FLOW_ITEM_TYPE_IPV4; - else if (ip_next_proto == IPPROTO_IPV6) - ret = RTE_FLOW_ITEM_TYPE_IPV6; - else - ret = RTE_FLOW_ITEM_TYPE_END; + MLX5_XSET_ITEM_MASK_SPEC(ipv6, hdr.proto); + if (!mask) + return RTE_FLOW_ITEM_TYPE_VOID; + ret = mlx5_inet_proto_to_item_type(spec, mask); + break; + case RTE_FLOW_ITEM_TYPE_GENEVE: + MLX5_XSET_ITEM_MASK_SPEC(geneve, protocol); + ret = mlx5_ethertype_to_item_type(spec, mask, true); + break; + case RTE_FLOW_ITEM_TYPE_GRE: + MLX5_XSET_ITEM_MASK_SPEC(gre, protocol); + ret = mlx5_ethertype_to_item_type(spec, mask, true); + break; + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + MLX5_XSET_ITEM_MASK_SPEC(vxlan_gpe, protocol); + ret = mlx5_nsh_proto_to_item_type(spec, mask); break; default: ret = RTE_FLOW_ITEM_TYPE_VOID; break; } return ret; +#undef MLX5_XSET_ITEM_MASK_SPEC +} + +static const int * +mlx5_flow_expand_rss_skip_explicit(const struct mlx5_flow_expand_node graph[], + const int *next_node) +{ + const struct mlx5_flow_expand_node *node = NULL; + const int *next = next_node; + + while (next && *next) { + /* + * Skip the nodes with the MLX5_EXPANSION_NODE_EXPLICIT + * flag set, because they were not found in the flow pattern. + */ + node = &graph[*next]; + if (!(node->node_flags & MLX5_EXPANSION_NODE_EXPLICIT)) + break; + next = node->next; + } + return next; } #define MLX5_RSS_EXP_ELT_N 16 @@ -252,7 +337,7 @@ mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item) * @param[in] pattern * User flow pattern. * @param[in] types - * RSS types to expand (see ETH_RSS_* definitions). + * RSS types to expand (see RTE_ETH_RSS_* definitions). * @param[in] graph * Input graph to expand @p pattern according to @p types. * @param[in] graph_root_index @@ -278,7 +363,7 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, const int *stack[MLX5_RSS_EXP_ELT_N]; int stack_pos = 0; struct rte_flow_item flow_items[MLX5_RSS_EXP_ELT_N]; - unsigned int i; + unsigned int i, item_idx, last_expand_item_idx = 0; size_t lsize; size_t user_pattern_size = 0; void *addr = NULL; @@ -286,7 +371,7 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, struct rte_flow_item missed_item; int missed = 0; int elt = 0; - const struct rte_flow_item *last_item = NULL; + const struct rte_flow_item *last_expand_item = NULL; memset(&missed_item, 0, sizeof(missed_item)); lsize = offsetof(struct mlx5_flow_expand_rss, entry) + @@ -297,16 +382,26 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, buf->entry[0].pattern = (void *)&buf->entry[MLX5_RSS_EXP_ELT_N]; buf->entries = 0; addr = buf->entry[0].pattern; - for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + for (item = pattern, item_idx = 0; + item->type != RTE_FLOW_ITEM_TYPE_END; + item++, item_idx++) { if (!mlx5_flow_is_rss_expandable_item(item)) { user_pattern_size += sizeof(*item); continue; } - last_item = item; - for (i = 0; node->next && node->next[i]; ++i) { + last_expand_item = item; + last_expand_item_idx = item_idx; + i = 0; + while (node->next && node->next[i]) { next = &graph[node->next[i]]; if (next->type == item->type) break; + if (next->node_flags & MLX5_EXPANSION_NODE_EXPLICIT) { + node = next; + i = 0; + } else { + ++i; + } } if (next) node = next; @@ -327,7 +422,7 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, * Check if the last valid item has spec set, need complete pattern, * and the pattern can be used for expansion. */ - missed_item.type = mlx5_flow_expand_rss_item_complete(last_item); + missed_item.type = mlx5_flow_expand_rss_item_complete(last_expand_item); if (missed_item.type == RTE_FLOW_ITEM_TYPE_END) { /* Item type END indicates expansion is not required. */ return lsize; @@ -335,13 +430,20 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, if (missed_item.type != RTE_FLOW_ITEM_TYPE_VOID) { next = NULL; missed = 1; - for (i = 0; node->next && node->next[i]; ++i) { + i = 0; + while (node->next && node->next[i]) { next = &graph[node->next[i]]; if (next->type == missed_item.type) { flow_items[0].type = missed_item.type; flow_items[1].type = RTE_FLOW_ITEM_TYPE_END; break; } + if (next->node_flags & MLX5_EXPANSION_NODE_EXPLICIT) { + node = next; + i = 0; + } else { + ++i; + } next = NULL; } } @@ -362,9 +464,13 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, addr = (void *)(((uintptr_t)addr) + elt * sizeof(*item)); } + } else if (last_expand_item != NULL) { + node = mlx5_flow_expand_rss_adjust_node(pattern, + last_expand_item_idx, graph, node); } memset(flow_items, 0, sizeof(flow_items)); - next_node = node->next; + next_node = mlx5_flow_expand_rss_skip_explicit(graph, + node->next); stack[stack_pos] = next_node; node = next_node ? &graph[*next_node] : NULL; while (node) { @@ -399,8 +505,10 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, addr = (void *)(((uintptr_t)addr) + n); } /* Go deeper. */ - if (!node->optional && node->next) { - next_node = node->next; + if (!(node->node_flags & MLX5_EXPANSION_NODE_OPTIONAL) && + node->next) { + next_node = mlx5_flow_expand_rss_skip_explicit(graph, + node->next); if (stack_pos++ == MLX5_RSS_EXP_ELT_N) { rte_errno = E2BIG; return -rte_errno; @@ -408,15 +516,27 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, stack[stack_pos] = next_node; } else if (*(next_node + 1)) { /* Follow up with the next possibility. */ + next_node = mlx5_flow_expand_rss_skip_explicit(graph, + ++next_node); + } else if (!stack_pos) { + /* + * Completing the traverse over the different paths. + * The next_node is advanced to the terminator. + */ ++next_node; } else { /* Move to the next path. */ - if (stack_pos) + while (stack_pos) { next_node = stack[--stack_pos]; - next_node++; + next_node++; + if (*next_node) + break; + } + next_node = mlx5_flow_expand_rss_skip_explicit(graph, + next_node); stack[stack_pos] = next_node; } - node = *next_node ? &graph[*next_node] : NULL; + node = next_node && *next_node ? &graph[*next_node] : NULL; }; return lsize; } @@ -424,10 +544,7 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, enum mlx5_expansion { MLX5_EXPANSION_ROOT, MLX5_EXPANSION_ROOT_OUTER, - MLX5_EXPANSION_ROOT_ETH_VLAN, - MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN, MLX5_EXPANSION_OUTER_ETH, - MLX5_EXPANSION_OUTER_ETH_VLAN, MLX5_EXPANSION_OUTER_VLAN, MLX5_EXPANSION_OUTER_IPV4, MLX5_EXPANSION_OUTER_IPV4_UDP, @@ -436,13 +553,14 @@ enum mlx5_expansion { MLX5_EXPANSION_OUTER_IPV6_UDP, MLX5_EXPANSION_OUTER_IPV6_TCP, MLX5_EXPANSION_VXLAN, + MLX5_EXPANSION_STD_VXLAN, + MLX5_EXPANSION_L3_VXLAN, MLX5_EXPANSION_VXLAN_GPE, MLX5_EXPANSION_GRE, MLX5_EXPANSION_NVGRE, MLX5_EXPANSION_GRE_KEY, MLX5_EXPANSION_MPLS, MLX5_EXPANSION_ETH, - MLX5_EXPANSION_ETH_VLAN, MLX5_EXPANSION_VLAN, MLX5_EXPANSION_IPV4, MLX5_EXPANSION_IPV4_UDP, @@ -450,6 +568,9 @@ enum mlx5_expansion { MLX5_EXPANSION_IPV6, MLX5_EXPANSION_IPV6_UDP, MLX5_EXPANSION_IPV6_TCP, + MLX5_EXPANSION_IPV6_FRAG_EXT, + MLX5_EXPANSION_GTP, + MLX5_EXPANSION_GENEVE, }; /** Supported expansion of items. */ @@ -466,23 +587,7 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { MLX5_EXPANSION_OUTER_IPV6), .type = RTE_FLOW_ITEM_TYPE_END, }, - [MLX5_EXPANSION_ROOT_ETH_VLAN] = { - .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN), - .type = RTE_FLOW_ITEM_TYPE_END, - }, - [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = { - .next = MLX5_FLOW_EXPAND_RSS_NEXT - (MLX5_EXPANSION_OUTER_ETH_VLAN), - .type = RTE_FLOW_ITEM_TYPE_END, - }, [MLX5_EXPANSION_OUTER_ETH] = { - .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, - MLX5_EXPANSION_OUTER_IPV6, - MLX5_EXPANSION_MPLS), - .type = RTE_FLOW_ITEM_TYPE_ETH, - .rss_types = 0, - }, - [MLX5_EXPANSION_OUTER_ETH_VLAN] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN), .type = RTE_FLOW_ITEM_TYPE_ETH, .rss_types = 0, @@ -491,6 +596,7 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, MLX5_EXPANSION_OUTER_IPV6), .type = RTE_FLOW_ITEM_TYPE_VLAN, + .node_flags = MLX5_EXPANSION_NODE_EXPLICIT, }, [MLX5_EXPANSION_OUTER_IPV4] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT @@ -501,18 +607,21 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { MLX5_EXPANSION_IPV4, MLX5_EXPANSION_IPV6), .type = RTE_FLOW_ITEM_TYPE_IPV4, - .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | - ETH_RSS_NONFRAG_IPV4_OTHER, + .rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | + RTE_ETH_RSS_NONFRAG_IPV4_OTHER, }, [MLX5_EXPANSION_OUTER_IPV4_UDP] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, - MLX5_EXPANSION_VXLAN_GPE), + MLX5_EXPANSION_VXLAN_GPE, + MLX5_EXPANSION_MPLS, + MLX5_EXPANSION_GENEVE, + MLX5_EXPANSION_GTP), .type = RTE_FLOW_ITEM_TYPE_UDP, - .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, + .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP, }, [MLX5_EXPANSION_OUTER_IPV4_TCP] = { .type = RTE_FLOW_ITEM_TYPE_TCP, - .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, + .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP, }, [MLX5_EXPANSION_OUTER_IPV6] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT @@ -523,18 +632,21 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { MLX5_EXPANSION_GRE, MLX5_EXPANSION_NVGRE), .type = RTE_FLOW_ITEM_TYPE_IPV6, - .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | - ETH_RSS_NONFRAG_IPV6_OTHER, + .rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | + RTE_ETH_RSS_NONFRAG_IPV6_OTHER, }, [MLX5_EXPANSION_OUTER_IPV6_UDP] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, - MLX5_EXPANSION_VXLAN_GPE), + MLX5_EXPANSION_VXLAN_GPE, + MLX5_EXPANSION_MPLS, + MLX5_EXPANSION_GENEVE, + MLX5_EXPANSION_GTP), .type = RTE_FLOW_ITEM_TYPE_UDP, - .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, + .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP, }, [MLX5_EXPANSION_OUTER_IPV6_TCP] = { .type = RTE_FLOW_ITEM_TYPE_TCP, - .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, + .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP, }, [MLX5_EXPANSION_VXLAN] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, @@ -542,6 +654,15 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { MLX5_EXPANSION_IPV6), .type = RTE_FLOW_ITEM_TYPE_VXLAN, }, + [MLX5_EXPANSION_STD_VXLAN] = { + .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH), + .type = RTE_FLOW_ITEM_TYPE_VXLAN, + }, + [MLX5_EXPANSION_L3_VXLAN] = { + .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), + .type = RTE_FLOW_ITEM_TYPE_VXLAN, + }, [MLX5_EXPANSION_VXLAN_GPE] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, MLX5_EXPANSION_IPV4, @@ -549,16 +670,19 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE, }, [MLX5_EXPANSION_GRE] = { - .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, + .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, + MLX5_EXPANSION_IPV4, MLX5_EXPANSION_IPV6, - MLX5_EXPANSION_GRE_KEY), + MLX5_EXPANSION_GRE_KEY, + MLX5_EXPANSION_MPLS), .type = RTE_FLOW_ITEM_TYPE_GRE, }, [MLX5_EXPANSION_GRE_KEY] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, - MLX5_EXPANSION_IPV6), + MLX5_EXPANSION_IPV6, + MLX5_EXPANSION_MPLS), .type = RTE_FLOW_ITEM_TYPE_GRE_KEY, - .optional = 1, + .node_flags = MLX5_EXPANSION_NODE_OPTIONAL, }, [MLX5_EXPANSION_NVGRE] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH), @@ -566,15 +690,12 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { }, [MLX5_EXPANSION_MPLS] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, - MLX5_EXPANSION_IPV6), + MLX5_EXPANSION_IPV6, + MLX5_EXPANSION_ETH), .type = RTE_FLOW_ITEM_TYPE_MPLS, + .node_flags = MLX5_EXPANSION_NODE_OPTIONAL, }, [MLX5_EXPANSION_ETH] = { - .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, - MLX5_EXPANSION_IPV6), - .type = RTE_FLOW_ITEM_TYPE_ETH, - }, - [MLX5_EXPANSION_ETH_VLAN] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN), .type = RTE_FLOW_ITEM_TYPE_ETH, }, @@ -582,36 +703,52 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, MLX5_EXPANSION_IPV6), .type = RTE_FLOW_ITEM_TYPE_VLAN, + .node_flags = MLX5_EXPANSION_NODE_EXPLICIT, }, [MLX5_EXPANSION_IPV4] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP, MLX5_EXPANSION_IPV4_TCP), .type = RTE_FLOW_ITEM_TYPE_IPV4, - .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | - ETH_RSS_NONFRAG_IPV4_OTHER, + .rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | + RTE_ETH_RSS_NONFRAG_IPV4_OTHER, }, [MLX5_EXPANSION_IPV4_UDP] = { .type = RTE_FLOW_ITEM_TYPE_UDP, - .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, + .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP, }, [MLX5_EXPANSION_IPV4_TCP] = { .type = RTE_FLOW_ITEM_TYPE_TCP, - .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, + .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP, }, [MLX5_EXPANSION_IPV6] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP, - MLX5_EXPANSION_IPV6_TCP), + MLX5_EXPANSION_IPV6_TCP, + MLX5_EXPANSION_IPV6_FRAG_EXT), .type = RTE_FLOW_ITEM_TYPE_IPV6, - .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | - ETH_RSS_NONFRAG_IPV6_OTHER, + .rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | + RTE_ETH_RSS_NONFRAG_IPV6_OTHER, }, [MLX5_EXPANSION_IPV6_UDP] = { .type = RTE_FLOW_ITEM_TYPE_UDP, - .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, + .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP, }, [MLX5_EXPANSION_IPV6_TCP] = { .type = RTE_FLOW_ITEM_TYPE_TCP, - .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, + .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP, + }, + [MLX5_EXPANSION_IPV6_FRAG_EXT] = { + .type = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT, + }, + [MLX5_EXPANSION_GTP] = { + .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), + .type = RTE_FLOW_ITEM_TYPE_GTP, + }, + [MLX5_EXPANSION_GENEVE] = { + .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, + MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), + .type = RTE_FLOW_ITEM_TYPE_GENEVE, }, }; @@ -660,6 +797,36 @@ mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev, struct rte_mbuf *m, struct rte_flow_restore_info *info, struct rte_flow_error *err); +static struct rte_flow_item_flex_handle * +mlx5_flow_flex_item_create(struct rte_eth_dev *dev, + const struct rte_flow_item_flex_conf *conf, + struct rte_flow_error *error); +static int +mlx5_flow_flex_item_release(struct rte_eth_dev *dev, + const struct rte_flow_item_flex_handle *handle, + struct rte_flow_error *error); +static int +mlx5_flow_info_get(struct rte_eth_dev *dev, + struct rte_flow_port_info *port_info, + struct rte_flow_queue_info *queue_info, + struct rte_flow_error *error); +static int +mlx5_flow_port_configure(struct rte_eth_dev *dev, + const struct rte_flow_port_attr *port_attr, + uint16_t nb_queue, + const struct rte_flow_queue_attr *queue_attr[], + struct rte_flow_error *err); + +static struct rte_flow_pattern_template * +mlx5_flow_pattern_template_create(struct rte_eth_dev *dev, + const struct rte_flow_pattern_template_attr *attr, + const struct rte_flow_item items[], + struct rte_flow_error *error); + +static int +mlx5_flow_pattern_template_destroy(struct rte_eth_dev *dev, + struct rte_flow_pattern_template *template, + struct rte_flow_error *error); static const struct rte_flow_ops mlx5_flow_ops = { .validate = mlx5_flow_validate, @@ -679,6 +846,12 @@ static const struct rte_flow_ops mlx5_flow_ops = { .tunnel_action_decap_release = mlx5_flow_tunnel_action_release, .tunnel_item_release = mlx5_flow_tunnel_item_release, .get_restore_info = mlx5_flow_tunnel_get_restore_info, + .flex_item_create = mlx5_flow_flex_item_create, + .flex_item_release = mlx5_flow_flex_item_release, + .info_get = mlx5_flow_info_get, + .configure = mlx5_flow_port_configure, + .pattern_template_create = mlx5_flow_pattern_template_create, + .pattern_template_destroy = mlx5_flow_pattern_template_destroy, }; /* Tunnel information. */ @@ -755,7 +928,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_config *config = &priv->config; + struct mlx5_sh_config *config = &priv->sh->config; enum modify_reg start_reg; bool skip_mtr_reg = false; @@ -809,6 +982,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev, case MLX5_MTR_COLOR: case MLX5_ASO_FLOW_HIT: case MLX5_ASO_CONNTRACK: + case MLX5_SAMPLE_ID: /* All features use the same REG_C. */ MLX5_ASSERT(priv->mtr_color_reg != REG_NON); return priv->mtr_color_reg; @@ -833,7 +1007,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev, return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "invalid tag id"); - if (config->flow_mreg_c[id + start_reg - REG_C_0] == REG_NON) + if (priv->sh->flow_mreg_c[id + start_reg - REG_C_0] == REG_NON) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "unsupported tag id"); @@ -843,21 +1017,21 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev, * If the available index REG_C_y >= REG_C_x, skip the * color register. */ - if (skip_mtr_reg && config->flow_mreg_c + if (skip_mtr_reg && priv->sh->flow_mreg_c [id + start_reg - REG_C_0] >= priv->mtr_color_reg) { if (id >= (uint32_t)(REG_C_7 - start_reg)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "invalid tag id"); - if (config->flow_mreg_c + if (priv->sh->flow_mreg_c [id + 1 + start_reg - REG_C_0] != REG_NON) - return config->flow_mreg_c + return priv->sh->flow_mreg_c [id + 1 + start_reg - REG_C_0]; return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "unsupported tag id"); } - return config->flow_mreg_c[id + start_reg - REG_C_0]; + return priv->sh->flow_mreg_c[id + start_reg - REG_C_0]; } MLX5_ASSERT(false); return rte_flow_error_set(error, EINVAL, @@ -878,7 +1052,6 @@ bool mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_config *config = &priv->config; /* * Having available reg_c can be regarded inclusively as supporting @@ -888,7 +1061,7 @@ mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev) * - reg_c's are preserved across different domain (FDB and NIC) on * packet loopback by flow lookup miss. */ - return config->flow_mreg_c[2] != REG_NON; + return priv->sh->flow_mreg_c[2] != REG_NON; } /** @@ -909,7 +1082,7 @@ mlx5_get_lowest_priority(struct rte_eth_dev *dev, struct mlx5_priv *priv = dev->data->dev_private; if (!attr->group && !attr->transfer) - return priv->config.flow_prio - 2; + return priv->sh->flow_max_priority - 2; return MLX5_NON_ROOT_FLOW_MAX_PRIO - 1; } @@ -922,21 +1095,26 @@ mlx5_get_lowest_priority(struct rte_eth_dev *dev, * Pointer to device flow rule attributes. * @param[in] subpriority * The priority based on the items. + * @param[in] external + * Flow is user flow. * @return * The matcher priority of the flow. */ uint16_t mlx5_get_matcher_priority(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, - uint32_t subpriority) + uint32_t subpriority, bool external) { uint16_t priority = (uint16_t)attr->priority; struct mlx5_priv *priv = dev->data->dev_private; if (!attr->group && !attr->transfer) { if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR) - priority = priv->config.flow_prio - 1; + priority = priv->sh->flow_max_priority - 1; return mlx5_os_flow_adjust_priority(dev, priority, subpriority); + } else if (!external && attr->transfer && attr->group == 0 && + attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR) { + return (priv->sh->flow_max_priority - 1) * 3; } if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR) priority = MLX5_NON_ROOT_FLOW_MAX_PRIO; @@ -1014,7 +1192,7 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item, * @param[in] tunnel * 1 when the hash field is for a tunnel item. * @param[in] layer_types - * ETH_RSS_* types. + * RTE_ETH_RSS_* types. * @param[in] hash_fields * Item hash fields. * @@ -1070,7 +1248,7 @@ flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) } /** - * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive + * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the device * flow. * * @param[in] dev @@ -1083,7 +1261,6 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow_handle *dev_handle) { struct mlx5_priv *priv = dev->data->dev_private; - const int mark = dev_handle->mark; const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL); struct mlx5_ind_table_obj *ind_tbl = NULL; unsigned int i; @@ -1108,24 +1285,16 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev, return; for (i = 0; i != ind_tbl->queues_n; ++i) { int idx = ind_tbl->queues[i]; - struct mlx5_rxq_ctrl *rxq_ctrl = - container_of((*priv->rxqs)[idx], - struct mlx5_rxq_ctrl, rxq); + struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx); + MLX5_ASSERT(rxq_ctrl != NULL); + if (rxq_ctrl == NULL) + continue; /* * To support metadata register copy on Tx loopback, * this must be always enabled (metadata may arive * from other port - not from local flows only. */ - if (priv->config.dv_flow_en && - priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && - mlx5_flow_ext_mreg_supported(dev)) { - rxq_ctrl->rxq.mark = 1; - rxq_ctrl->flow_mark_n = 1; - } else if (mark) { - rxq_ctrl->rxq.mark = 1; - rxq_ctrl->flow_mark_n++; - } if (tunnel) { unsigned int j; @@ -1143,6 +1312,20 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev, } } +static void +flow_rxq_mark_flag_set(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_rxq_ctrl *rxq_ctrl; + + if (priv->mark_enabled) + return; + LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) { + rxq_ctrl->rxq.mark = 1; + } + priv->mark_enabled = 1; +} + /** * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow * @@ -1157,7 +1340,11 @@ flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) struct mlx5_priv *priv = dev->data->dev_private; uint32_t handle_idx; struct mlx5_flow_handle *dev_handle; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + MLX5_ASSERT(wks); + if (wks->mark) + flow_rxq_mark_flag_set(dev); SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, handle_idx, dev_handle, next) flow_drv_rxq_flags_set(dev, dev_handle); @@ -1177,7 +1364,6 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow_handle *dev_handle) { struct mlx5_priv *priv = dev->data->dev_private; - const int mark = dev_handle->mark; const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL); struct mlx5_ind_table_obj *ind_tbl = NULL; unsigned int i; @@ -1203,19 +1389,11 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, MLX5_ASSERT(dev->data->dev_started); for (i = 0; i != ind_tbl->queues_n; ++i) { int idx = ind_tbl->queues[i]; - struct mlx5_rxq_ctrl *rxq_ctrl = - container_of((*priv->rxqs)[idx], - struct mlx5_rxq_ctrl, rxq); - - if (priv->config.dv_flow_en && - priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && - mlx5_flow_ext_mreg_supported(dev)) { - rxq_ctrl->rxq.mark = 1; - rxq_ctrl->flow_mark_n = 1; - } else if (mark) { - rxq_ctrl->flow_mark_n--; - rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n; - } + struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx); + + MLX5_ASSERT(rxq_ctrl != NULL); + if (rxq_ctrl == NULL) + continue; if (tunnel) { unsigned int j; @@ -1267,19 +1445,17 @@ flow_rxq_flags_clear(struct rte_eth_dev *dev) unsigned int i; for (i = 0; i != priv->rxqs_n; ++i) { - struct mlx5_rxq_ctrl *rxq_ctrl; + struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i); unsigned int j; - if (!(*priv->rxqs)[i]) + if (rxq == NULL || rxq->ctrl == NULL) continue; - rxq_ctrl = container_of((*priv->rxqs)[i], - struct mlx5_rxq_ctrl, rxq); - rxq_ctrl->flow_mark_n = 0; - rxq_ctrl->rxq.mark = 0; + rxq->ctrl->rxq.mark = 0; for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) - rxq_ctrl->flow_tunnels_n[j] = 0; - rxq_ctrl->rxq.tunnel = 0; + rxq->ctrl->flow_tunnels_n[j] = 0; + rxq->ctrl->rxq.tunnel = 0; } + priv->mark_enabled = 0; } /** @@ -1292,13 +1468,15 @@ void mlx5_flow_rxq_dynf_metadata_set(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_rxq_data *data; unsigned int i; for (i = 0; i != priv->rxqs_n; ++i) { - if (!(*priv->rxqs)[i]) + struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i); + struct mlx5_rxq_data *data; + + if (rxq == NULL || rxq->ctrl == NULL) continue; - data = (*priv->rxqs)[i]; + data = &rxq->ctrl->rxq; if (!rte_flow_dynf_metadata_avail()) { data->dynf_meta = 0; data->flow_meta_mask = 0; @@ -1308,9 +1486,7 @@ mlx5_flow_rxq_dynf_metadata_set(struct rte_eth_dev *dev) data->dynf_meta = 1; data->flow_meta_mask = rte_flow_dynf_metadata_mask; data->flow_meta_offset = rte_flow_dynf_metadata_offs; - data->flow_meta_port_mask = (uint32_t)~0; - if (priv->config.dv_xmeta_en == MLX5_XMETA_MODE_META16) - data->flow_meta_port_mask >>= 16; + data->flow_meta_port_mask = priv->sh->dv_meta_mask; } } } @@ -1491,7 +1667,7 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action, RTE_FLOW_ERROR_TYPE_ACTION_CONF, &queue->index, "queue index out of range"); - if (!(*priv->rxqs)[queue->index]) + if (mlx5_rxq_get(dev, queue->index) == NULL) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, &queue->index, @@ -1504,6 +1680,57 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action, return 0; } +/** + * Validate queue numbers for device RSS. + * + * @param[in] dev + * Configured device. + * @param[in] queues + * Array of queue numbers. + * @param[in] queues_n + * Size of the @p queues array. + * @param[out] error + * On error, filled with a textual error description. + * @param[out] queue + * On error, filled with an offending queue index in @p queues array. + * + * @return + * 0 on success, a negative errno code on error. + */ +static int +mlx5_validate_rss_queues(struct rte_eth_dev *dev, + const uint16_t *queues, uint32_t queues_n, + const char **error, uint32_t *queue_idx) +{ + const struct mlx5_priv *priv = dev->data->dev_private; + enum mlx5_rxq_type rxq_type = MLX5_RXQ_TYPE_UNDEFINED; + uint32_t i; + + for (i = 0; i != queues_n; ++i) { + struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, + queues[i]); + + if (queues[i] >= priv->rxqs_n) { + *error = "queue index out of range"; + *queue_idx = i; + return -EINVAL; + } + if (rxq_ctrl == NULL) { + *error = "queue is not configured"; + *queue_idx = i; + return -EINVAL; + } + if (i == 0) + rxq_type = rxq_ctrl->type; + if (rxq_type != rxq_ctrl->type) { + *error = "combining hairpin and regular RSS queues is not supported"; + *queue_idx = i; + return -ENOTSUP; + } + } + return 0; +} + /* * Validate the rss action. * @@ -1524,8 +1751,9 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev, { struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_action_rss *rss = action->conf; - enum mlx5_rxq_type rxq_type = MLX5_RXQ_TYPE_UNDEFINED; - unsigned int i; + int ret; + const char *message; + uint32_t queue_idx; if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT && rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) @@ -1558,7 +1786,7 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ACTION_CONF, &rss->key_len, "RSS hash key too large"); - if (rss->queue_num > priv->config.ind_table_max_size) + if (rss->queue_num > priv->sh->dev_cap.ind_table_max_size) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION_CONF, &rss->queue_num, @@ -1569,14 +1797,14 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev, &rss->types, "some RSS protocols are not" " supported"); - if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) && - !(rss->types & ETH_RSS_IP)) + if ((rss->types & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY)) && + !(rss->types & RTE_ETH_RSS_IP)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, "L3 partial RSS requested but L3 RSS" " type not specified"); - if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) && - !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP))) + if ((rss->types & (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)) && + !(rss->types & (RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP))) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, "L4 partial RSS requested but L4 RSS" @@ -1589,27 +1817,12 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev, return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, "No queues configured"); - for (i = 0; i != rss->queue_num; ++i) { - struct mlx5_rxq_ctrl *rxq_ctrl; - - if (rss->queue[i] >= priv->rxqs_n) - return rte_flow_error_set - (error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, - &rss->queue[i], "queue index out of range"); - if (!(*priv->rxqs)[rss->queue[i]]) - return rte_flow_error_set - (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, - &rss->queue[i], "queue is not configured"); - rxq_ctrl = container_of((*priv->rxqs)[rss->queue[i]], - struct mlx5_rxq_ctrl, rxq); - if (i == 0) - rxq_type = rxq_ctrl->type; - if (rxq_type != rxq_ctrl->type) - return rte_flow_error_set - (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION_CONF, - &rss->queue[i], - "combining hairpin and regular RSS queues is not supported"); + ret = mlx5_validate_rss_queues(dev, rss->queue, rss->queue_num, + &message, &queue_idx); + if (ret != 0) { + return rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->queue[queue_idx], message); } return 0; } @@ -1669,6 +1882,13 @@ mlx5_flow_validate_action_rss(const struct rte_flow_action *action, RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, "RSS on eCPRI is not supported now"); } + if ((item_flags & MLX5_FLOW_LAYER_MPLS) && + !(item_flags & + (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3)) && + rss->level > 1) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "MPLS inner RSS needs to specify inner L2/L3 items after MPLS in pattern"); return 0; } @@ -1786,7 +2006,7 @@ mlx5_flow_validate_attributes(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - uint32_t priority_max = priv->config.flow_prio - 1; + uint32_t priority_max = priv->sh->flow_max_priority - 1; if (attributes->group) return rte_flow_error_set(error, ENOTSUP, @@ -1801,7 +2021,7 @@ mlx5_flow_validate_attributes(struct rte_eth_dev *dev, return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, "egress is not supported"); - if (attributes->transfer && !priv->config.dv_esw_en) + if (attributes->transfer && !priv->sh->config.dv_esw_en) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL, "transfer is not supported"); @@ -1972,6 +2192,10 @@ mlx5_flow_validate_item_eth(const struct rte_flow_item *item, return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "L2 layer should not follow VLAN"); + if (item_flags & MLX5_FLOW_LAYER_GTP) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "L2 layer should not follow GTP"); if (!mask) mask = &rte_flow_item_eth_mask; ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, @@ -2398,12 +2622,16 @@ mlx5_flow_validate_item_tcp(const struct rte_flow_item *item, /** * Validate VXLAN item. * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] udp_dport + * UDP destination port * @param[in] item * Item specification. * @param[in] item_flags * Bit-fields that holds the items detected until now. - * @param[in] target_protocol - * The next protocol in the previous item. + * @param[in] attr + * Flow rule attributes. * @param[out] error * Pointer to error structure. * @@ -2411,24 +2639,33 @@ mlx5_flow_validate_item_tcp(const struct rte_flow_item *item, * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item, +mlx5_flow_validate_item_vxlan(struct rte_eth_dev *dev, + uint16_t udp_dport, + const struct rte_flow_item *item, uint64_t item_flags, + const struct rte_flow_attr *attr, struct rte_flow_error *error) { const struct rte_flow_item_vxlan *spec = item->spec; const struct rte_flow_item_vxlan *mask = item->mask; int ret; + struct mlx5_priv *priv = dev->data->dev_private; union vni { uint32_t vlan_id; uint8_t vni[4]; } id = { .vlan_id = 0, }; - + const struct rte_flow_item_vxlan nic_mask = { + .vni = "\xff\xff\xff", + .rsvd1 = 0xff, + }; + const struct rte_flow_item_vxlan *valid_mask; if (item_flags & MLX5_FLOW_LAYER_TUNNEL) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "multiple tunnel layers not" " supported"); + valid_mask = &rte_flow_item_vxlan_mask; /* * Verify only UDPv4 is present as defined in * https://tools.ietf.org/html/rfc7348 @@ -2439,9 +2676,21 @@ mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item, "no outer UDP layer found"); if (!mask) mask = &rte_flow_item_vxlan_mask; + + if (priv->sh->steering_format_version != + MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 || + !udp_dport || udp_dport == MLX5_UDP_PORT_VXLAN) { + /* FDB domain & NIC domain non-zero group */ + if ((attr->transfer || attr->group) && priv->sh->misc5_cap) + valid_mask = &nic_mask; + /* Group zero in NIC domain */ + if (!attr->group && !attr->transfer && + priv->sh->tunnel_header_0_1) + valid_mask = &nic_mask; + } ret = mlx5_flow_item_acceptable (item, (const uint8_t *)mask, - (const uint8_t *)&rte_flow_item_vxlan_mask, + (const uint8_t *)valid_mask, sizeof(struct rte_flow_item_vxlan), MLX5_ITEM_RANGE_NOT_ACCEPTED, error); if (ret < 0) @@ -2489,7 +2738,7 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, uint8_t vni[4]; } id = { .vlan_id = 0, }; - if (!priv->config.l3_vxlan_en) + if (!priv->sh->config.l3_vxlan_en) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 VXLAN is not enabled by device" @@ -2684,7 +2933,7 @@ mlx5_flow_validate_item_geneve(const struct rte_flow_item *item, const struct rte_flow_item_geneve *mask = item->mask; int ret; uint16_t gbhdr; - uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ? + uint8_t opt_len = priv->sh->cdev->config.hca_attr.geneve_max_opt_len ? MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0; const struct rte_flow_item_geneve nic_mask = { .ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80), @@ -2692,7 +2941,7 @@ mlx5_flow_validate_item_geneve(const struct rte_flow_item *item, .protocol = RTE_BE16(UINT16_MAX), }; - if (!priv->config.hca_attr.tunnel_stateless_geneve_rx) + if (!priv->sh->cdev->config.hca_attr.tunnel_stateless_geneve_rx) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 Geneve is not enabled by device" @@ -2772,10 +3021,9 @@ mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item, struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_geneve_tlv_option_resource *geneve_opt_resource; - struct mlx5_hca_attr *hca_attr = &priv->config.hca_attr; + struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr; uint8_t data_max_supported = hca_attr->max_geneve_tlv_option_data_len * 4; - struct mlx5_dev_config *config = &priv->config; const struct rte_flow_item_geneve *geneve_spec; const struct rte_flow_item_geneve *geneve_mask; const struct rte_flow_item_geneve_opt *spec = item->spec; @@ -2799,7 +3047,7 @@ mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item, if ((uint32_t)spec->option_len > MLX5_GENEVE_OPTLEN_MASK) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, - "Geneve TLV opt length exceeeds the limit (31)"); + "Geneve TLV opt length exceeds the limit (31)"); /* Check if class type and length masks are full. */ if (full_mask.option_class != mask->option_class || full_mask.option_type != mask->option_type || @@ -2809,11 +3057,11 @@ mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item, "Geneve TLV opt class/type/length masks must be full"); /* Check if length is supported */ if ((uint32_t)spec->option_len > - config->hca_attr.max_geneve_tlv_option_data_len) + hca_attr->max_geneve_tlv_option_data_len) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "Geneve TLV opt length not supported"); - if (config->hca_attr.max_geneve_tlv_options > 1) + if (hca_attr->max_geneve_tlv_options > 1) DRV_LOG(DEBUG, "max_geneve_tlv_options supports more than 1 option"); /* Check GENEVE item preceding. */ @@ -2868,7 +3116,7 @@ mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item, "Data mask is of unsupported size"); } /* Check GENEVE option is supported in NIC. */ - if (!config->hca_attr.geneve_tlv_opt) + if (!hca_attr->geneve_tlv_opt) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "Geneve TLV opt not supported"); @@ -2917,7 +3165,7 @@ mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused, struct mlx5_priv *priv = dev->data->dev_private; int ret; - if (!priv->config.mpls_en) + if (!priv->sh->dev_cap.mpls_en) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "MPLS not supported or" @@ -3097,31 +3345,6 @@ mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item, MLX5_ITEM_RANGE_NOT_ACCEPTED, error); } -/** - * Release resource related QUEUE/RSS action split. - * - * @param dev - * Pointer to Ethernet device. - * @param flow - * Flow to release id's from. - */ -static void -flow_mreg_split_qrss_release(struct rte_eth_dev *dev, - struct rte_flow *flow) -{ - struct mlx5_priv *priv = dev->data->dev_private; - uint32_t handle_idx; - struct mlx5_flow_handle *dev_handle; - - SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, - handle_idx, dev_handle, next) - if (dev_handle->split_flow_id && - !dev_handle->is_meter_flow_id) - mlx5_ipool_free(priv->sh->ipool - [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], - dev_handle->split_flow_id); -} - static int flow_null_validate(struct rte_eth_dev *dev __rte_unused, const struct rte_flow_attr *attr __rte_unused, @@ -3232,12 +3455,18 @@ flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr) if (type != MLX5_FLOW_TYPE_MAX) return type; + /* + * Currently when dv_flow_en == 2, only HW steering engine is + * supported. New engines can also be chosen here if ready. + */ + if (priv->sh->config.dv_flow_en == 2) + return MLX5_FLOW_TYPE_HW; /* If no OS specific type - continue with DV/VERBS selection */ - if (attr->transfer && priv->config.dv_esw_en) + if (attr->transfer && priv->sh->config.dv_esw_en) type = MLX5_FLOW_TYPE_DV; if (!attr->transfer) - type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV : - MLX5_FLOW_TYPE_VERBS; + type = priv->sh->config.dv_flow_en ? MLX5_FLOW_TYPE_DV : + MLX5_FLOW_TYPE_VERBS; return type; } @@ -3417,7 +3646,6 @@ flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) const struct mlx5_flow_driver_ops *fops; enum mlx5_flow_drv_type type = flow->drv_type; - flow_mreg_split_qrss_release(dev, flow); MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); fops = flow_get_drv_ops(type); fops->destroy(dev, flow); @@ -3451,6 +3679,41 @@ flow_drv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev, return fops->meter_sub_policy_rss_prepare(dev, policy, rss_desc); } +/** + * Flow driver color tag rule API. This abstracts calling driver + * specific functions. Parent flow (rte_flow) should have driver + * type (drv_type). It will create the color tag rules in hierarchy meter. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] fm + * Pointer to flow meter structure. + * @param[in] src_port + * The src port this extra rule should use. + * @param[in] item + * The src port id match item. + * @param[out] error + * Pointer to error structure. + */ +static int +flow_drv_mtr_hierarchy_rule_create(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct mlx5_flow_meter_info *fm, + int32_t src_port, + const struct rte_flow_item *item, + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + enum mlx5_flow_drv_type type = flow->drv_type; + + MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); + fops = flow_get_drv_ops(type); + return fops->meter_hierarchy_rule_create(dev, fm, + src_port, item, error); +} + /** * Get RSS action from the action list. * @@ -3470,6 +3733,8 @@ flow_get_rss_action(struct rte_eth_dev *dev, { struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_action_rss *rss = NULL; + struct mlx5_meter_policy_action_container *acg; + struct mlx5_meter_policy_action_container *acy; for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { switch (actions->type) { @@ -3494,12 +3759,29 @@ flow_get_rss_action(struct rte_eth_dev *dev, const struct rte_flow_action_meter *mtr = actions->conf; fm = mlx5_flow_meter_find(priv, mtr->mtr_id, &mtr_idx); - if (fm) { + if (fm && !fm->def_policy) { policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL); - if (policy && policy->is_rss) - rss = - policy->act_cnt[RTE_COLOR_GREEN].rss->conf; + MLX5_ASSERT(policy); + if (policy->is_hierarchy) { + policy = + mlx5_flow_meter_hierarchy_get_final_policy(dev, + policy); + if (!policy) + return NULL; + } + if (policy->is_rss) { + acg = + &policy->act_cnt[RTE_COLOR_GREEN]; + acy = + &policy->act_cnt[RTE_COLOR_YELLOW]; + if (acg->fate_action == + MLX5_FLOW_FATE_SHARED_RSS) + rss = acg->rss->conf; + else if (acy->fate_action == + MLX5_FLOW_FATE_SHARED_RSS) + rss = acy->rss->conf; + } } break; } @@ -3528,8 +3810,11 @@ flow_aso_age_get_by_idx(struct rte_eth_dev *dev, uint32_t age_idx) uint16_t offset = (age_idx >> 16) & UINT16_MAX; struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng; - struct mlx5_aso_age_pool *pool = mng->pools[pool_idx]; + struct mlx5_aso_age_pool *pool; + rte_rwlock_read_lock(&mng->resize_rwl); + pool = mng->pools[pool_idx]; + rte_rwlock_read_unlock(&mng->resize_rwl); return &pool->actions[offset - 1]; } @@ -3700,20 +3985,8 @@ flow_get_shared_rss_action(struct rte_eth_dev *dev, } static unsigned int -find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level) +find_graph_root(uint32_t rss_level) { - const struct rte_flow_item *item; - unsigned int has_vlan = 0; - - for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { - if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { - has_vlan = 1; - break; - } - } - if (has_vlan) - return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN : - MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN; return rss_level < 2 ? MLX5_EXPANSION_ROOT : MLX5_EXPANSION_ROOT_OUTER; } @@ -3729,7 +4002,7 @@ find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level) * subflow. * * @param[in] dev_flow - * Pointer the created preifx subflow. + * Pointer the created prefix subflow. * * @return * The layers get from prefix subflow. @@ -3865,7 +4138,7 @@ static bool flow_check_modify_action_type(struct rte_eth_dev *dev, return true; case RTE_FLOW_ACTION_TYPE_FLAG: case RTE_FLOW_ACTION_TYPE_MARK: - if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) + if (priv->sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) return true; else return false; @@ -4000,39 +4273,38 @@ flow_check_hairpin_split(struct rte_eth_dev *dev, /* Declare flow create/destroy prototype in advance. */ static uint32_t -flow_list_create(struct rte_eth_dev *dev, uint32_t *list, +flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], bool external, struct rte_flow_error *error); static void -flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list, +flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type, uint32_t flow_idx); int -flow_dv_mreg_match_cb(struct mlx5_hlist *list __rte_unused, - struct mlx5_hlist_entry *entry, - uint64_t key, void *cb_ctx __rte_unused) +flow_dv_mreg_match_cb(void *tool_ctx __rte_unused, + struct mlx5_list_entry *entry, void *cb_ctx) { + struct mlx5_flow_cb_ctx *ctx = cb_ctx; struct mlx5_flow_mreg_copy_resource *mcp_res = - container_of(entry, typeof(*mcp_res), hlist_ent); + container_of(entry, typeof(*mcp_res), hlist_ent); - return mcp_res->mark_id != key; + return mcp_res->mark_id != *(uint32_t *)(ctx->data); } -struct mlx5_hlist_entry * -flow_dv_mreg_create_cb(struct mlx5_hlist *list, uint64_t key, - void *cb_ctx) +struct mlx5_list_entry * +flow_dv_mreg_create_cb(void *tool_ctx, void *cb_ctx) { - struct rte_eth_dev *dev = list->ctx; + struct rte_eth_dev *dev = tool_ctx; struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_cb_ctx *ctx = cb_ctx; struct mlx5_flow_mreg_copy_resource *mcp_res; struct rte_flow_error *error = ctx->error; uint32_t idx = 0; int ret; - uint32_t mark_id = key; + uint32_t mark_id = *(uint32_t *)(ctx->data); struct rte_flow_attr attr = { .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, .ingress = 1, @@ -4057,7 +4329,7 @@ flow_dv_mreg_create_cb(struct mlx5_hlist *list, uint64_t key, [3] = { .type = RTE_FLOW_ACTION_TYPE_END, }, }; - /* Fill the register fileds in the flow. */ + /* Fill the register fields in the flow. */ ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error); if (ret < 0) return NULL; @@ -4126,11 +4398,11 @@ flow_dv_mreg_create_cb(struct mlx5_hlist *list, uint64_t key, /* * The copy Flows are not included in any list. There * ones are referenced from other Flows and can not - * be applied, removed, deleted in ardbitrary order + * be applied, removed, deleted in arbitrary order * by list traversing. */ - mcp_res->rix_flow = flow_list_create(dev, NULL, &attr, items, - actions, false, error); + mcp_res->rix_flow = flow_list_create(dev, MLX5_FLOW_TYPE_MCP, + &attr, items, actions, false, error); if (!mcp_res->rix_flow) { mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], idx); return NULL; @@ -4138,26 +4410,56 @@ flow_dv_mreg_create_cb(struct mlx5_hlist *list, uint64_t key, return &mcp_res->hlist_ent; } -/** - * Add a flow of copying flow metadata registers in RX_CP_TBL. - * - * As mark_id is unique, if there's already a registered flow for the mark_id, - * return by increasing the reference counter of the resource. Otherwise, create - * the resource (mcp_res) and flow. - * - * Flow looks like, - * - If ingress port is ANY and reg_c[1] is mark_id, - * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL. - * - * For default flow (zero mark_id), flow is like, - * - If ingress port is ANY, - * reg_b := reg_c[0] and jump to RX_ACT_TBL. - * - * @param dev - * Pointer to Ethernet device. - * @param mark_id - * ID of MARK action, zero means default flow for META. - * @param[out] error +struct mlx5_list_entry * +flow_dv_mreg_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry, + void *cb_ctx __rte_unused) +{ + struct rte_eth_dev *dev = tool_ctx; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_mreg_copy_resource *mcp_res; + uint32_t idx = 0; + + mcp_res = mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_MCP], &idx); + if (!mcp_res) { + rte_errno = ENOMEM; + return NULL; + } + memcpy(mcp_res, oentry, sizeof(*mcp_res)); + mcp_res->idx = idx; + return &mcp_res->hlist_ent; +} + +void +flow_dv_mreg_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry) +{ + struct mlx5_flow_mreg_copy_resource *mcp_res = + container_of(entry, typeof(*mcp_res), hlist_ent); + struct rte_eth_dev *dev = tool_ctx; + struct mlx5_priv *priv = dev->data->dev_private; + + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx); +} + +/** + * Add a flow of copying flow metadata registers in RX_CP_TBL. + * + * As mark_id is unique, if there's already a registered flow for the mark_id, + * return by increasing the reference counter of the resource. Otherwise, create + * the resource (mcp_res) and flow. + * + * Flow looks like, + * - If ingress port is ANY and reg_c[1] is mark_id, + * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL. + * + * For default flow (zero mark_id), flow is like, + * - If ingress port is ANY, + * reg_b := reg_c[0] and jump to RX_ACT_TBL. + * + * @param dev + * Pointer to Ethernet device. + * @param mark_id + * ID of MARK action, zero means default flow for META. + * @param[out] error * Perform verbose error reporting if not NULL. * * @return @@ -4168,10 +4470,11 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_hlist_entry *entry; + struct mlx5_list_entry *entry; struct mlx5_flow_cb_ctx ctx = { .dev = dev, .error = error, + .data = &mark_id, }; /* Check if already registered. */ @@ -4184,15 +4487,15 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, } void -flow_dv_mreg_remove_cb(struct mlx5_hlist *list, struct mlx5_hlist_entry *entry) +flow_dv_mreg_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry) { struct mlx5_flow_mreg_copy_resource *mcp_res = - container_of(entry, typeof(*mcp_res), hlist_ent); - struct rte_eth_dev *dev = list->ctx; + container_of(entry, typeof(*mcp_res), hlist_ent); + struct rte_eth_dev *dev = tool_ctx; struct mlx5_priv *priv = dev->data->dev_private; MLX5_ASSERT(mcp_res->rix_flow); - flow_list_destroy(dev, NULL, mcp_res->rix_flow); + flow_list_destroy(dev, MLX5_FLOW_TYPE_MCP, mcp_res->rix_flow); mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx); } @@ -4234,14 +4537,17 @@ flow_mreg_del_copy_action(struct rte_eth_dev *dev, static void flow_mreg_del_default_copy_action(struct rte_eth_dev *dev) { - struct mlx5_hlist_entry *entry; + struct mlx5_list_entry *entry; struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_cb_ctx ctx; + uint32_t mark_id; /* Check if default flow is registered. */ if (!priv->mreg_cp_tbl) return; - entry = mlx5_hlist_lookup(priv->mreg_cp_tbl, - MLX5_DEFAULT_COPY_ID, NULL); + mark_id = MLX5_DEFAULT_COPY_ID; + ctx.data = &mark_id; + entry = mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id, &ctx); if (!entry) return; mlx5_hlist_unregister(priv->mreg_cp_tbl, entry); @@ -4267,10 +4573,12 @@ flow_mreg_add_default_copy_action(struct rte_eth_dev *dev, { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_mreg_copy_resource *mcp_res; + struct mlx5_flow_cb_ctx ctx; + uint32_t mark_id; /* Check whether extensive metadata feature is engaged. */ - if (!priv->config.dv_flow_en || - priv->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || + if (!priv->sh->config.dv_flow_en || + priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || !mlx5_flow_ext_mreg_supported(dev) || !priv->sh->dv_regc0_mask) return 0; @@ -4278,9 +4586,11 @@ flow_mreg_add_default_copy_action(struct rte_eth_dev *dev, * Add default mreg copy flow may be called multiple time, but * only be called once in stop. Avoid register it twice. */ - if (mlx5_hlist_lookup(priv->mreg_cp_tbl, MLX5_DEFAULT_COPY_ID, NULL)) + mark_id = MLX5_DEFAULT_COPY_ID; + ctx.data = &mark_id; + if (mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id, &ctx)) return 0; - mcp_res = flow_mreg_add_copy_action(dev, MLX5_DEFAULT_COPY_ID, error); + mcp_res = flow_mreg_add_copy_action(dev, mark_id, error); if (!mcp_res) return -rte_errno; return 0; @@ -4327,7 +4637,7 @@ flow_mreg_update_copy_table(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_config *config = &priv->config; + struct mlx5_sh_config *config = &priv->sh->config; struct mlx5_flow_mreg_copy_resource *mcp_res; const struct rte_flow_action_mark *mark; @@ -4531,6 +4841,7 @@ flow_create_split_inner(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct mlx5_flow *dev_flow; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); dev_flow = flow_drv_prepare(dev, flow, attr, items, actions, flow_split_info->flow_idx, error); @@ -4545,12 +4856,14 @@ flow_create_split_inner(struct rte_eth_dev *dev, /* * If dev_flow is as one of the suffix flow, some actions in suffix * flow may need some user defined item layer flags, and pass the - * Metadate rxq mark flag to suffix flow as well. + * Metadata rxq mark flag to suffix flow as well. */ if (flow_split_info->prefix_layers) dev_flow->handle->layers = flow_split_info->prefix_layers; - if (flow_split_info->prefix_mark) - dev_flow->handle->mark = 1; + if (flow_split_info->prefix_mark) { + MLX5_ASSERT(wks); + wks->mark = 1; + } if (sub_flow) *sub_flow = dev_flow; #ifdef HAVE_IBV_FLOW_DV_SUPPORT @@ -4566,8 +4879,8 @@ flow_create_split_inner(struct rte_eth_dev *dev, * Pointer to Ethernet device. * @param[in] flow * Parent flow structure pointer. - * @param[in] policy_id; - * Meter Policy id. + * @param wks + * Pointer to thread flow work space. * @param[in] attr * Flow rule attributes. * @param[in] items @@ -4581,32 +4894,23 @@ flow_create_split_inner(struct rte_eth_dev *dev, static struct mlx5_flow_meter_sub_policy * get_meter_sub_policy(struct rte_eth_dev *dev, struct rte_flow *flow, - uint32_t policy_id, + struct mlx5_flow_workspace *wks, const struct rte_flow_attr *attr, const struct rte_flow_item items[], struct rte_flow_error *error) { struct mlx5_flow_meter_policy *policy; + struct mlx5_flow_meter_policy *final_policy; struct mlx5_flow_meter_sub_policy *sub_policy = NULL; - policy = mlx5_flow_meter_policy_find(dev, policy_id, NULL); - if (!policy) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "Failed to find Meter Policy."); - goto exit; - } - if (policy->is_rss || - (policy->is_queue && - !policy->sub_policys[MLX5_MTR_DOMAIN_INGRESS][0]->rix_hrxq[0])) { - struct mlx5_flow_workspace *wks = - mlx5_flow_get_thread_workspace(); + policy = wks->policy; + final_policy = policy->is_hierarchy ? wks->final_policy : policy; + if (final_policy->is_rss || final_policy->is_queue) { struct mlx5_flow_rss_desc rss_desc_v[MLX5_MTR_RTE_COLORS]; struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS] = {0}; uint32_t i; - MLX5_ASSERT(wks); - /** + /* * This is a tmp dev_flow, * no need to register any matcher for it in translate. */ @@ -4614,18 +4918,19 @@ get_meter_sub_policy(struct rte_eth_dev *dev, for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) { struct mlx5_flow dev_flow = {0}; struct mlx5_flow_handle dev_handle = { {0} }; + uint8_t fate = final_policy->act_cnt[i].fate_action; - if (policy->is_rss) { - const void *rss_act = - policy->act_cnt[i].rss->conf; + if (fate == MLX5_FLOW_FATE_SHARED_RSS) { + const struct rte_flow_action_rss *rss_act = + final_policy->act_cnt[i].rss->conf; struct rte_flow_action rss_actions[2] = { [0] = { .type = RTE_FLOW_ACTION_TYPE_RSS, - .conf = rss_act + .conf = rss_act, }, [1] = { .type = RTE_FLOW_ACTION_TYPE_END, - .conf = NULL + .conf = NULL, } }; @@ -4650,34 +4955,38 @@ get_meter_sub_policy(struct rte_eth_dev *dev, rss_desc_v[i].hash_fields ? rss_desc_v[i].queue_num : 1; rss_desc_v[i].tunnel = - !!(dev_flow.handle->layers & - MLX5_FLOW_LAYER_TUNNEL); - } else { + !!(dev_flow.handle->layers & + MLX5_FLOW_LAYER_TUNNEL); + /* Use the RSS queues in the containers. */ + rss_desc_v[i].queue = + (uint16_t *)(uintptr_t)rss_act->queue; + rss_desc[i] = &rss_desc_v[i]; + } else if (fate == MLX5_FLOW_FATE_QUEUE) { /* This is queue action. */ rss_desc_v[i] = wks->rss_desc; rss_desc_v[i].key_len = 0; rss_desc_v[i].hash_fields = 0; rss_desc_v[i].queue = - &policy->act_cnt[i].queue; + &final_policy->act_cnt[i].queue; rss_desc_v[i].queue_num = 1; + rss_desc[i] = &rss_desc_v[i]; + } else { + rss_desc[i] = NULL; } - rss_desc[i] = &rss_desc_v[i]; } sub_policy = flow_drv_meter_sub_policy_rss_prepare(dev, flow, policy, rss_desc); } else { enum mlx5_meter_domain mtr_domain = attr->transfer ? MLX5_MTR_DOMAIN_TRANSFER : - attr->egress ? MLX5_MTR_DOMAIN_EGRESS : - MLX5_MTR_DOMAIN_INGRESS; + (attr->egress ? MLX5_MTR_DOMAIN_EGRESS : + MLX5_MTR_DOMAIN_INGRESS); sub_policy = policy->sub_policys[mtr_domain][0]; } - if (!sub_policy) { + if (!sub_policy) rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "Failed to get meter sub-policy."); - goto exit; - } + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Failed to get meter sub-policy."); exit: return sub_policy; } @@ -4698,8 +5007,8 @@ exit: * Pointer to Ethernet device. * @param[in] flow * Parent flow structure pointer. - * @param[in] fm - * Pointer to flow meter structure. + * @param wks + * Pointer to thread flow work space. * @param[in] attr * Flow rule attributes. * @param[in] items @@ -4723,7 +5032,7 @@ exit: static int flow_meter_split_prep(struct rte_eth_dev *dev, struct rte_flow *flow, - struct mlx5_flow_meter_info *fm, + struct mlx5_flow_workspace *wks, const struct rte_flow_attr *attr, const struct rte_flow_item items[], struct rte_flow_item sfx_items[], @@ -4734,6 +5043,7 @@ flow_meter_split_prep(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_meter_info *fm = wks->fm; struct rte_flow_action *tag_action = NULL; struct rte_flow_item *tag_item; struct mlx5_rte_flow_action_set_tag *set_tag; @@ -4744,6 +5054,7 @@ flow_meter_split_prep(struct rte_eth_dev *dev, uint32_t tag_id = 0; struct rte_flow_item *vlan_item_dst = NULL; const struct rte_flow_item *vlan_item_src = NULL; + const struct rte_flow_item *orig_items = items; struct rte_flow_action *hw_mtr_action; struct rte_flow_action *action_pre_head = NULL; int32_t flow_src_port = priv->representor_id; @@ -4775,6 +5086,15 @@ flow_meter_split_prep(struct rte_eth_dev *dev, pid_v, "Failed to get port info."); flow_src_port = port_priv->representor_id; + if (!fm->def_policy && wks->policy->is_hierarchy && + flow_src_port != priv->representor_id) { + if (flow_drv_mtr_hierarchy_rule_create(dev, + flow, fm, + flow_src_port, + items, + error)) + return -rte_errno; + } memcpy(sfx_items, items, sizeof(*sfx_items)); sfx_items++; break; @@ -4858,16 +5178,16 @@ flow_meter_split_prep(struct rte_eth_dev *dev, struct mlx5_flow_tbl_data_entry *tbl_data; if (!fm->def_policy) { - sub_policy = get_meter_sub_policy(dev, flow, - fm->policy_id, attr, - items, error); + sub_policy = get_meter_sub_policy(dev, flow, wks, + attr, orig_items, + error); if (!sub_policy) return -rte_errno; } else { enum mlx5_meter_domain mtr_domain = attr->transfer ? MLX5_MTR_DOMAIN_TRANSFER : - attr->egress ? MLX5_MTR_DOMAIN_EGRESS : - MLX5_MTR_DOMAIN_INGRESS; + (attr->egress ? MLX5_MTR_DOMAIN_EGRESS : + MLX5_MTR_DOMAIN_INGRESS); sub_policy = &priv->sh->mtrmng->def_policy[mtr_domain]->sub_policy; @@ -4883,8 +5203,8 @@ flow_meter_split_prep(struct rte_eth_dev *dev, actions_pre++; if (!tag_action) return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "No tag action space."); + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "No tag action space."); if (!mtr_flow_id) { tag_action->type = RTE_FLOW_ACTION_TYPE_VOID; goto exit; @@ -4981,6 +5301,8 @@ exit: * Pointer to the Q/RSS action. * @param[in] actions_n * Number of original actions. + * @param[in] mtr_sfx + * Check if it is in meter suffix table. * @param[out] error * Perform verbose error reporting if not NULL. * @@ -4993,7 +5315,8 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, struct rte_flow_action *split_actions, const struct rte_flow_action *actions, const struct rte_flow_action *qrss, - int actions_n, struct rte_flow_error *error) + int actions_n, int mtr_sfx, + struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rte_flow_action_set_tag *set_tag; @@ -5008,15 +5331,15 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, * - Add jump to mreg CP_TBL. * As a result, there will be one more action. */ - ++actions_n; memcpy(split_actions, actions, sizeof(*split_actions) * actions_n); + /* Count MLX5_RTE_FLOW_ACTION_TYPE_TAG. */ + ++actions_n; set_tag = (void *)(split_actions + actions_n); /* - * If tag action is not set to void(it means we are not the meter - * suffix flow), add the tag action. Since meter suffix flow already - * has the tag added. + * If we are not the meter suffix flow, add the tag action. + * Since meter suffix flow already has the tag added. */ - if (split_actions[qrss_idx].type != RTE_FLOW_ACTION_TYPE_VOID) { + if (!mtr_sfx) { /* * Allocate the new subflow ID. This one is unique within * device and not shared with representors. Otherwise, @@ -5049,6 +5372,12 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, MLX5_RTE_FLOW_ACTION_TYPE_TAG, .conf = set_tag, }; + } else { + /* + * If we are the suffix flow of meter, tag already exist. + * Set the QUEUE/RSS action to void. + */ + split_actions[qrss_idx].type = RTE_FLOW_ACTION_TYPE_VOID; } /* JUMP action to jump to mreg copy table (CP_TBL). */ jump = (void *)(set_tag + 1); @@ -5080,7 +5409,7 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, * @param[out] error * Perform verbose error reporting if not NULL. * @param[in] encap_idx - * The encap action inndex. + * The encap action index. * * @return * 0 on success, negative value otherwise @@ -5156,6 +5485,7 @@ flow_check_match_action(const struct rte_flow_action actions[], int *modify_after_mirror) { const struct rte_flow_action_sample *sample; + const struct rte_flow_action_raw_decap *decap; int actions_n = 0; uint32_t ratio = 0; int sub_type = 0; @@ -5208,12 +5538,29 @@ flow_check_match_action(const struct rte_flow_action actions[], case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: - case RTE_FLOW_ACTION_TYPE_RAW_DECAP: case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD: case RTE_FLOW_ACTION_TYPE_METER: if (fdb_mirror) *modify_after_mirror = 1; break; + case RTE_FLOW_ACTION_TYPE_RAW_DECAP: + decap = actions->conf; + while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID) + ; + actions_n++; + if (actions->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) { + const struct rte_flow_action_raw_encap *encap = + actions->conf; + if (decap->size <= + MLX5_ENCAPSULATION_DECISION_SIZE && + encap->size > + MLX5_ENCAPSULATION_DECISION_SIZE) + /* L3 encap. */ + break; + } + if (fdb_mirror) + *modify_after_mirror = 1; + break; default: break; } @@ -5327,7 +5674,7 @@ flow_sample_split_prep(struct rte_eth_dev *dev, /* Prepare the prefix tag action. */ append_index++; set_tag = (void *)(actions_pre + actions_n + append_index); - ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, 0, error); + ret = mlx5_flow_get_reg_id(dev, MLX5_SAMPLE_ID, 0, error); if (ret < 0) return ret; mlx5_ipool_malloc(priv->sh->ipool @@ -5428,7 +5775,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_config *config = &priv->config; + struct mlx5_sh_config *config = &priv->sh->config; const struct rte_flow_action *qrss = NULL; struct rte_flow_action *ext_actions = NULL; struct mlx5_flow *dev_flow = NULL; @@ -5485,17 +5832,6 @@ flow_create_split_metadata(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "no memory to split " "metadata flow"); - /* - * If we are the suffix flow of meter, tag already exist. - * Set the tag action to void. - */ - if (mtr_sfx) - ext_actions[qrss - actions].type = - RTE_FLOW_ACTION_TYPE_VOID; - else - ext_actions[qrss - actions].type = - (enum rte_flow_action_type) - MLX5_RTE_FLOW_ACTION_TYPE_TAG; /* * Create the new actions list with removed Q/RSS action * and appended set tag and jump to register copy table @@ -5503,7 +5839,8 @@ flow_create_split_metadata(struct rte_eth_dev *dev, * in advance, because it is needed for set tag action. */ qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions, - qrss, actions_n, error); + qrss, actions_n, + mtr_sfx, error); if (!mtr_sfx && !qrss_id) { ret = -rte_errno; goto exit; @@ -5594,6 +5931,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev, /* Add suffix subflow to execute Q/RSS. */ flow_split_info->prefix_layers = layers; flow_split_info->prefix_mark = 0; + flow_split_info->table_id = 0; ret = flow_create_split_inner(dev, flow, &dev_flow, &q_attr, mtr_sfx ? items : q_items, q_actions, @@ -5716,6 +6054,7 @@ flow_create_split_meter(struct rte_eth_dev *dev, bool has_mtr = false; bool has_modify = false; bool set_mtr_reg = true; + bool is_mtr_hierarchy = false; uint32_t meter_id = 0; uint32_t mtr_idx = 0; uint32_t mtr_flow_id = 0; @@ -5748,14 +6087,33 @@ flow_create_split_meter(struct rte_eth_dev *dev, } MLX5_ASSERT(wks); wks->fm = fm; + if (!fm->def_policy) { + wks->policy = mlx5_flow_meter_policy_find(dev, + fm->policy_id, + NULL); + MLX5_ASSERT(wks->policy); + if (wks->policy->is_hierarchy) { + wks->final_policy = + mlx5_flow_meter_hierarchy_get_final_policy(dev, + wks->policy); + if (!wks->final_policy) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Failed to find terminal policy of hierarchy."); + is_mtr_hierarchy = true; + } + } /* * If it isn't default-policy Meter, and * 1. There's no action in flow to change * packet (modify/encap/decap etc.), OR * 2. No drop count needed for this meter. - * no need to use regC to save meter id anymore. + * 3. It's not meter hierarchy. + * Then no need to use regC to save meter id anymore. */ - if (!fm->def_policy && (!has_modify || !fm->drop_cnt)) + if (!fm->def_policy && !is_mtr_hierarchy && + (!has_modify || !fm->drop_cnt)) set_mtr_reg = false; /* Prefix actions: meter, decap, encap, tag, jump, end. */ act_size = sizeof(struct rte_flow_action) * (actions_n + 6) + @@ -5778,7 +6136,7 @@ flow_create_split_meter(struct rte_eth_dev *dev, pre_actions = sfx_actions + 1; else pre_actions = sfx_actions + actions_n; - ret = flow_meter_split_prep(dev, flow, fm, &sfx_attr, + ret = flow_meter_split_prep(dev, flow, wks, &sfx_attr, items, sfx_items, actions, sfx_actions, pre_actions, (set_mtr_reg ? &mtr_flow_id : NULL), @@ -5788,7 +6146,6 @@ flow_create_split_meter(struct rte_eth_dev *dev, goto exit; } /* Add the prefix subflow. */ - flow_split_info->prefix_mark = 0; skip_scale_restore = flow_split_info->skip_scale; flow_split_info->skip_scale |= 1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT; @@ -5821,7 +6178,7 @@ flow_create_split_meter(struct rte_eth_dev *dev, MLX5_FLOW_TABLE_LEVEL_METER; flow_split_info->prefix_layers = flow_get_prefix_layer_flags(dev_flow); - flow_split_info->prefix_mark = dev_flow->handle->mark; + flow_split_info->prefix_mark |= wks->mark; flow_split_info->table_id = MLX5_MTR_TABLE_ID_SUFFIX; } /* Add the prefix subflow. */ @@ -5887,6 +6244,7 @@ flow_create_split_sample(struct rte_eth_dev *dev, struct mlx5_flow_dv_sample_resource *sample_res; struct mlx5_flow_tbl_data_entry *sfx_tbl_data; struct mlx5_flow_tbl_resource *sfx_tbl; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); #endif size_t act_size; size_t item_size; @@ -5925,7 +6283,8 @@ flow_create_split_sample(struct rte_eth_dev *dev, * When reg_c_preserve is set, metadata registers Cx preserve * their value even through packet duplication. */ - add_tag = (!fdb_tx || priv->config.hca_attr.reg_c_preserve); + add_tag = (!fdb_tx || + priv->sh->cdev->config.hca_attr.reg_c_preserve); if (add_tag) sfx_items = (struct rte_flow_item *)((char *)sfx_actions + act_size); @@ -5973,7 +6332,8 @@ flow_create_split_sample(struct rte_eth_dev *dev, } flow_split_info->prefix_layers = flow_get_prefix_layer_flags(dev_flow); - flow_split_info->prefix_mark = dev_flow->handle->mark; + MLX5_ASSERT(wks); + flow_split_info->prefix_mark |= wks->mark; /* Suffix group level already be scaled with factor, set * MLX5_SCALE_FLOW_GROUP_BIT of skip_scale to 1 to avoid scale * again in translation. @@ -6112,7 +6472,7 @@ flow_rss_workspace_adjust(struct mlx5_flow_workspace *wks, * A flow index on success, 0 otherwise and rte_errno is set. */ static uint32_t -flow_list_create(struct rte_eth_dev *dev, uint32_t *list, +flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action original_actions[], @@ -6127,7 +6487,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list, int indir_actions_n = MLX5_MAX_INDIRECT_ACTIONS; union { struct mlx5_flow_expand_rss buf; - uint8_t buffer[2048]; + uint8_t buffer[4096]; } expand_buffer; union { struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS]; @@ -6180,7 +6540,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list, external, hairpin_flow, error); if (ret < 0) goto error_before_hairpin_split; - flow = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], &idx); + flow = mlx5_ipool_zmalloc(priv->flows[type], &idx); if (!flow) { rte_errno = ENOMEM; goto error_before_hairpin_split; @@ -6211,14 +6571,14 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list, * mlx5_flow_hashfields_adjust() in advance. */ rss_desc->level = rss->level; - /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */ - rss_desc->types = !rss->types ? ETH_RSS_IP : rss->types; + /* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */ + rss_desc->types = !rss->types ? RTE_ETH_RSS_IP : rss->types; } flow->dev_handles = 0; if (rss && rss->types) { unsigned int graph_root; - graph_root = find_graph_root(items, rss->level); + graph_root = find_graph_root(rss->level); ret = mlx5_flow_expand_rss(buf, sizeof(expand_buffer.buffer), items, rss->types, mlx5_support_expansion, graph_root); @@ -6310,12 +6670,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list, if (ret < 0) goto error; } - if (list) { - rte_spinlock_lock(&priv->flow_list_lock); - ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, idx, - flow, next); - rte_spinlock_unlock(&priv->flow_list_lock); - } + flow->type = type; flow_rxq_flags_set(dev, flow); rte_free(translated_actions); tunnel = flow_tunnel_from_rule(wks->flows); @@ -6337,7 +6692,7 @@ error: mlx5_ipool_get (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], rss_desc->shared_rss))->refcnt, 1, __ATOMIC_RELAXED); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], idx); + mlx5_ipool_free(priv->flows[type], idx); rte_errno = ret; /* Restore rte_errno. */ ret = rte_errno; rte_errno = ret; @@ -6389,14 +6744,87 @@ mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev) .type = RTE_FLOW_ACTION_TYPE_END, }, }; - struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow_error error; - return (void *)(uintptr_t)flow_list_create(dev, &priv->ctrl_flows, + return (void *)(uintptr_t)flow_list_create(dev, MLX5_FLOW_TYPE_CTL, &attr, &pattern, actions, false, &error); } +/** + * Create a dedicated flow rule on e-switch table 1, matches ESW manager + * and sq number, directs all packets to peer vport. + * + * @param dev + * Pointer to Ethernet device. + * @param txq + * Txq index. + * + * @return + * Flow ID on success, 0 otherwise and rte_errno is set. + */ +uint32_t +mlx5_flow_create_devx_sq_miss_flow(struct rte_eth_dev *dev, uint32_t txq) +{ + struct rte_flow_attr attr = { + .group = 0, + .priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR, + .ingress = 1, + .egress = 0, + .transfer = 1, + }; + struct rte_flow_item_port_id port_spec = { + .id = MLX5_PORT_ESW_MGR, + }; + struct mlx5_rte_flow_item_tx_queue txq_spec = { + .queue = txq, + }; + struct rte_flow_item pattern[] = { + { + .type = RTE_FLOW_ITEM_TYPE_PORT_ID, + .spec = &port_spec, + }, + { + .type = (enum rte_flow_item_type) + MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE, + .spec = &txq_spec, + }, + { + .type = RTE_FLOW_ITEM_TYPE_END, + }, + }; + struct rte_flow_action_jump jump = { + .group = 1, + }; + struct rte_flow_action_port_id port = { + .id = dev->data->port_id, + }; + struct rte_flow_action actions[] = { + { + .type = RTE_FLOW_ACTION_TYPE_JUMP, + .conf = &jump, + }, + { + .type = RTE_FLOW_ACTION_TYPE_END, + }, + }; + struct rte_flow_error error; + + /* + * Creates group 0, highest priority jump flow. + * Matches txq to bypass kernel packets. + */ + if (flow_list_create(dev, MLX5_FLOW_TYPE_CTL, &attr, pattern, actions, + false, &error) == 0) + return 0; + /* Create group 1, lowest priority redirect flow for txq. */ + attr.group = 1; + actions[0].conf = &port; + actions[0].type = RTE_FLOW_ACTION_TYPE_PORT_ID; + return flow_list_create(dev, MLX5_FLOW_TYPE_CTL, &attr, pattern, + actions, false, &error); +} + /** * Validate a flow supported by the NIC. * @@ -6446,6 +6874,13 @@ mlx5_flow_create(struct rte_eth_dev *dev, { struct mlx5_priv *priv = dev->data->dev_private; + if (priv->sh->config.dv_flow_en == 2) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Flow non-Q creation not supported"); + return NULL; + } /* * If the device is not started yet, it is not allowed to created a * flow from application. PMD default flows and traffic control flows @@ -6461,8 +6896,9 @@ mlx5_flow_create(struct rte_eth_dev *dev, return NULL; } - return (void *)(uintptr_t)flow_list_create(dev, &priv->flows, - attr, items, actions, true, error); + return (void *)(uintptr_t)flow_list_create(dev, MLX5_FLOW_TYPE_GEN, + attr, items, actions, + true, error); } /** @@ -6470,24 +6906,19 @@ mlx5_flow_create(struct rte_eth_dev *dev, * * @param dev * Pointer to Ethernet device. - * @param list - * Pointer to the Indexed flow list. If this parameter NULL, - * there is no flow removal from the list. Be noted that as - * flow is add to the indexed list, memory of the indexed - * list points to maybe changed as flow destroyed. * @param[in] flow_idx * Index of flow to destroy. */ static void -flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list, +flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type, uint32_t flow_idx) { struct mlx5_priv *priv = dev->data->dev_private; - struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool - [MLX5_IPOOL_RTE_FLOW], flow_idx); + struct rte_flow *flow = mlx5_ipool_get(priv->flows[type], flow_idx); if (!flow) return; + MLX5_ASSERT(flow->type == type); /* * Update RX queue flags only if port is started, otherwise it is * already clean. @@ -6495,12 +6926,6 @@ flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list, if (dev->data->dev_started) flow_rxq_flags_trim(dev, flow); flow_drv_destroy(dev, flow); - if (list) { - rte_spinlock_lock(&priv->flow_list_lock); - ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, - flow_idx, flow, next); - rte_spinlock_unlock(&priv->flow_list_lock); - } if (flow->tunnel) { struct mlx5_flow_tunnel *tunnel; @@ -6510,7 +6935,7 @@ flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list, mlx5_flow_tunnel_free(dev, tunnel); } flow_mreg_del_copy_action(dev, flow); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx); + mlx5_ipool_free(priv->flows[type], flow_idx); } /** @@ -6518,18 +6943,21 @@ flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list, * * @param dev * Pointer to Ethernet device. - * @param list - * Pointer to the Indexed flow list. + * @param type + * Flow type to be flushed. * @param active - * If flushing is called avtively. + * If flushing is called actively. */ void -mlx5_flow_list_flush(struct rte_eth_dev *dev, uint32_t *list, bool active) +mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type, + bool active) { - uint32_t num_flushed = 0; + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t num_flushed = 0, fidx = 1; + struct rte_flow *flow; - while (*list) { - flow_list_destroy(dev, list, *list); + MLX5_IPOOL_FOREACH(priv->flows[type], fidx, flow) { + flow_list_destroy(dev, type, fidx); num_flushed++; } if (active) { @@ -6624,8 +7052,7 @@ flow_alloc_thread_workspace(void) data->rssq_num = MLX5_RSSQ_DEFAULT_NUM; return data; err: - if (data->rss_desc.queue) - free(data->rss_desc.queue); + free(data->rss_desc.queue); free(data); return NULL; } @@ -6701,18 +7128,19 @@ mlx5_flow_pop_thread_workspace(void) * @return the number of flows not released. */ int -mlx5_flow_verify(struct rte_eth_dev *dev) +mlx5_flow_verify(struct rte_eth_dev *dev __rte_unused) { struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow *flow; - uint32_t idx; - int ret = 0; + uint32_t idx = 0; + int ret = 0, i; - ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], priv->flows, idx, - flow, next) { - DRV_LOG(DEBUG, "port %u flow %p still referenced", - dev->data->port_id, (void *)flow); - ++ret; + for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) { + MLX5_IPOOL_FOREACH(priv->flows[i], idx, flow) { + DRV_LOG(DEBUG, "port %u flow %p still referenced", + dev->data->port_id, (void *)flow); + ret++; + } } return ret; } @@ -6732,7 +7160,6 @@ int mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, uint32_t queue) { - struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_attr attr = { .egress = 1, .priority = 0, @@ -6765,8 +7192,8 @@ mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP; actions[0].conf = &jump; actions[1].type = RTE_FLOW_ACTION_TYPE_END; - flow_idx = flow_list_create(dev, &priv->ctrl_flows, - &attr, items, actions, false, &error); + flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_CTL, + &attr, items, actions, false, &error); if (!flow_idx) { DRV_LOG(DEBUG, "Failed to create ctrl flow: rte_errno(%d)," @@ -6851,12 +7278,12 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, if (!priv->reta_idx_n || !priv->rxqs_n) { return 0; } - if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)) + if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) action_rss.types = 0; for (i = 0; i != priv->reta_idx_n; ++i) queue[i] = (*priv->reta_idx)[i]; - flow_idx = flow_list_create(dev, &priv->ctrl_flows, - &attr, items, actions, false, &error); + flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_CTL, + &attr, items, actions, false, &error); if (!flow_idx) return -rte_errno; return 0; @@ -6897,7 +7324,6 @@ mlx5_ctrl_flow(struct rte_eth_dev *dev, int mlx5_flow_lacp_miss(struct rte_eth_dev *dev) { - struct mlx5_priv *priv = dev->data->dev_private; /* * The LACP matching is done by only using ether type since using * a multicast dst mac causes kernel to give low priority to this flow. @@ -6931,8 +7357,9 @@ mlx5_flow_lacp_miss(struct rte_eth_dev *dev) }, }; struct rte_flow_error error; - uint32_t flow_idx = flow_list_create(dev, &priv->ctrl_flows, - &attr, items, actions, false, &error); + uint32_t flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_CTL, + &attr, items, actions, + false, &error); if (!flow_idx) return -rte_errno; @@ -6952,7 +7379,13 @@ mlx5_flow_destroy(struct rte_eth_dev *dev, { struct mlx5_priv *priv = dev->data->dev_private; - flow_list_destroy(dev, &priv->flows, (uintptr_t)(void *)flow); + if (priv->sh->config.dv_flow_en == 2) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Flow non-Q destruction not supported"); + flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, + (uintptr_t)(void *)flow); return 0; } @@ -6966,9 +7399,7 @@ int mlx5_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error __rte_unused) { - struct mlx5_priv *priv = dev->data->dev_private; - - mlx5_flow_list_flush(dev, &priv->flows, false); + mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, false); return 0; } @@ -7019,8 +7450,7 @@ flow_drv_query(struct rte_eth_dev *dev, { struct mlx5_priv *priv = dev->data->dev_private; const struct mlx5_flow_driver_ops *fops; - struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool - [MLX5_IPOOL_RTE_FLOW], + struct rte_flow *flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN], flow_idx); enum mlx5_flow_drv_type ftype; @@ -7051,7 +7481,13 @@ mlx5_flow_query(struct rte_eth_dev *dev, struct rte_flow_error *error) { int ret; + struct mlx5_priv *priv = dev->data->dev_private; + if (priv->sh->config.dv_flow_en == 2) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Flow non-Q query not supported"); ret = flow_drv_query(dev, (uintptr_t)(void *)flow, actions, data, error); if (ret < 0) @@ -7106,14 +7542,14 @@ mlx5_flow_validate_mtr_acts(struct rte_eth_dev *dev, struct rte_flow_attr *attr, bool *is_rss, uint8_t *domain_bitmap, - bool *is_def_policy, + uint8_t *policy_mode, struct rte_mtr_error *error) { const struct mlx5_flow_driver_ops *fops; fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); - return fops->validate_mtr_acts(dev, actions, attr, - is_rss, domain_bitmap, is_def_policy, error); + return fops->validate_mtr_acts(dev, actions, attr, is_rss, + domain_bitmap, policy_mode, error); } /** @@ -7418,14 +7854,15 @@ mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt) */ int mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt, - bool clear, uint64_t *pkts, uint64_t *bytes) + bool clear, uint64_t *pkts, uint64_t *bytes, void **action) { const struct mlx5_flow_driver_ops *fops; struct rte_flow_attr attr = { .transfer = 0 }; if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) { fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); - return fops->counter_query(dev, cnt, clear, pkts, bytes); + return fops->counter_query(dev, cnt, clear, pkts, + bytes, action); } DRV_LOG(ERR, "port %u counter query is not supported.", @@ -7433,6 +7870,136 @@ mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt, return -ENOTSUP; } +/** + * Get information about HWS pre-configurable resources. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[out] port_info + * Pointer to port information. + * @param[out] queue_info + * Pointer to queue information. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_info_get(struct rte_eth_dev *dev, + struct rte_flow_port_info *port_info, + struct rte_flow_queue_info *queue_info, + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + + if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "info get with incorrect steering mode"); + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); + return fops->info_get(dev, port_info, queue_info, error); +} + +/** + * Configure port HWS resources. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] port_attr + * Port configuration attributes. + * @param[in] nb_queue + * Number of queue. + * @param[in] queue_attr + * Array that holds attributes for each flow queue. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_port_configure(struct rte_eth_dev *dev, + const struct rte_flow_port_attr *port_attr, + uint16_t nb_queue, + const struct rte_flow_queue_attr *queue_attr[], + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + + if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "port configure with incorrect steering mode"); + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); + return fops->configure(dev, port_attr, nb_queue, queue_attr, error); +} + +/** + * Create flow item template. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] attr + * Pointer to the item template attributes. + * @param[in] items + * The template item pattern. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static struct rte_flow_pattern_template * +mlx5_flow_pattern_template_create(struct rte_eth_dev *dev, + const struct rte_flow_pattern_template_attr *attr, + const struct rte_flow_item items[], + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + + if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "pattern create with incorrect steering mode"); + return NULL; + } + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); + return fops->pattern_template_create(dev, attr, items, error); +} + +/** + * Destroy flow item template. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] template + * Pointer to the item template to be destroyed. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_pattern_template_destroy(struct rte_eth_dev *dev, + struct rte_flow_pattern_template *template, + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + + if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "pattern destroy with incorrect steering mode"); + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW); + return fops->pattern_template_destroy(dev, template, error); +} + /** * Allocate a new memory for the counter values wrapped by all the needed * management. @@ -7446,7 +8013,6 @@ mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt, static int mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh) { - struct mlx5_devx_mkey_attr mkey_attr; struct mlx5_counter_stats_mem_mng *mem_mng; volatile struct flow_counter_stats *raw_data; int raws_n = MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES; @@ -7456,6 +8022,7 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh) sizeof(struct mlx5_counter_stats_mem_mng); size_t pgsize = rte_mem_page_size(); uint8_t *mem; + int ret; int i; if (pgsize == (size_t)-1) { @@ -7470,23 +8037,10 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh) } mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1; size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n; - mem_mng->umem = mlx5_os_umem_reg(sh->ctx, mem, size, - IBV_ACCESS_LOCAL_WRITE); - if (!mem_mng->umem) { - rte_errno = errno; - mlx5_free(mem); - return -rte_errno; - } - memset(&mkey_attr, 0, sizeof(mkey_attr)); - mkey_attr.addr = (uintptr_t)mem; - mkey_attr.size = size; - mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem); - mkey_attr.pd = sh->pdn; - mkey_attr.relaxed_ordering_write = sh->cmng.relaxed_ordering_write; - mkey_attr.relaxed_ordering_read = sh->cmng.relaxed_ordering_read; - mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr); - if (!mem_mng->dm) { - mlx5_os_umem_dereg(mem_mng->umem); + ret = mlx5_os_wrapped_mkey_create(sh->cdev->ctx, sh->cdev->pd, + sh->cdev->pdn, mem, size, + &mem_mng->wm); + if (ret) { rte_errno = errno; mlx5_free(mem); return -rte_errno; @@ -7605,7 +8159,7 @@ mlx5_flow_query_alarm(void *arg) ret = mlx5_devx_cmd_flow_counter_query(pool->min_dcs, 0, MLX5_COUNTERS_PER_POOL, NULL, NULL, - pool->raw_hw->mem_mng->dm->id, + pool->raw_hw->mem_mng->wm.lkey, (void *)(uintptr_t) pool->raw_hw->data, sh->devx_comp, @@ -7841,13 +8395,12 @@ int mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_config *config = &priv->config; enum modify_reg idx; int n = 0; /* reg_c[0] and reg_c[1] are reserved. */ - config->flow_mreg_c[n++] = REG_C_0; - config->flow_mreg_c[n++] = REG_C_1; + priv->sh->flow_mreg_c[n++] = REG_C_0; + priv->sh->flow_mreg_c[n++] = REG_C_1; /* Discover availability of other reg_c's. */ for (idx = REG_C_2; idx <= REG_C_7; ++idx) { struct rte_flow_attr attr = { @@ -7883,26 +8436,27 @@ mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev) struct rte_flow *flow; struct rte_flow_error error; - if (!config->dv_flow_en) + if (!priv->sh->config.dv_flow_en) break; /* Create internal flow, validation skips copy action. */ - flow_idx = flow_list_create(dev, NULL, &attr, items, - actions, false, &error); - flow = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], + flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_GEN, &attr, + items, actions, false, &error); + flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN], flow_idx); if (!flow) continue; - config->flow_mreg_c[n++] = idx; - flow_list_destroy(dev, NULL, flow_idx); + priv->sh->flow_mreg_c[n++] = idx; + flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, flow_idx); } for (; n < MLX5_MREG_C_NUM; ++n) - config->flow_mreg_c[n] = REG_NON; + priv->sh->flow_mreg_c[n] = REG_NON; + priv->sh->metadata_regc_check_flag = 1; return 0; } int save_dump_file(const uint8_t *data, uint32_t size, - uint32_t type, uint32_t id, void *arg, FILE *file) + uint32_t type, uint64_t id, void *arg, FILE *file) { char line[BUF_SIZE]; uint32_t out = 0; @@ -7914,17 +8468,18 @@ save_dump_file(const uint8_t *data, uint32_t size, switch (type) { case DR_DUMP_REC_TYPE_PMD_MODIFY_HDR: actions_num = *(uint32_t *)(arg); - out += snprintf(line + out, BUF_SIZE - out, "%d,0x%x,%d,", + out += snprintf(line + out, BUF_SIZE - out, "%d,0x%" PRIx64 ",%d,", type, id, actions_num); break; case DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT: - out += snprintf(line + out, BUF_SIZE - out, "%d,0x%x,", + out += snprintf(line + out, BUF_SIZE - out, "%d,0x%" PRIx64 ",", type, id); break; case DR_DUMP_REC_TYPE_PMD_COUNTER: count = (struct rte_flow_query_count *)arg; - fprintf(file, "%d,0x%x,%" PRIu64 ",%" PRIu64 "\n", type, - id, count->hits, count->bytes); + fprintf(file, + "%d,0x%" PRIx64 ",%" PRIu64 ",%" PRIu64 "\n", + type, id, count->hits, count->bytes); return 0; default: return -1; @@ -7998,29 +8553,33 @@ mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev, uint32_t actions_num; const uint8_t *data; size_t size; - uint32_t id; + uint64_t id; uint32_t type; + void *action = NULL; if (!flow) { return rte_flow_error_set(error, ENOENT, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "invalid flow handle"); + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "invalid flow handle"); } handle_idx = flow->dev_handles; + /* query counter */ + if (flow->counter && + (!mlx5_counter_query(dev, flow->counter, false, + &count.hits, &count.bytes, &action)) && action) { + id = (uint64_t)(uintptr_t)action; + type = DR_DUMP_REC_TYPE_PMD_COUNTER; + save_dump_file(NULL, 0, type, + id, (void *)&count, file); + } + while (handle_idx) { dh = mlx5_ipool_get(priv->sh->ipool - [MLX5_IPOOL_MLX5_FLOW], handle_idx); + [MLX5_IPOOL_MLX5_FLOW], handle_idx); if (!dh) continue; handle_idx = dh->next.next; - id = (uint32_t)(uintptr_t)dh->drv_flow; - - /* query counter */ - type = DR_DUMP_REC_TYPE_PMD_COUNTER; - if (!mlx5_flow_query_counter(dev, flow, &count, error)) - save_dump_file(NULL, 0, type, - id, (void *)&count, file); /* Get modify_hdr and encap_decap buf from ipools. */ encap_decap = NULL; @@ -8034,14 +8593,16 @@ mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev, if (modify_hdr) { data = (const uint8_t *)modify_hdr->actions; size = (size_t)(modify_hdr->actions_num) * 8; + id = (uint64_t)(uintptr_t)modify_hdr->action; actions_num = modify_hdr->actions_num; type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR; save_dump_file(data, size, type, id, - (void *)(&actions_num), file); + (void *)(&actions_num), file); } if (encap_decap) { data = encap_decap->buf; size = encap_decap->size; + id = (uint64_t)(uintptr_t)encap_decap->action; type = DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT; save_dump_file(data, size, type, id, NULL, file); @@ -8049,6 +8610,117 @@ mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev, } return 0; } + +/** + * Dump all flow's encap_decap/modify_hdr/counter data to file + * + * @param[in] dev + * The pointer to Ethernet device. + * @param[in] file + * A pointer to a file for output. + * @param[out] error + * Perform verbose error reporting if not NULL. PMDs initialize this + * structure in case of error only. + * @return + * 0 on success, a negative value otherwise. + */ +static int +mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev, + FILE *file, struct rte_flow_error *error __rte_unused) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_ctx_shared *sh = priv->sh; + struct mlx5_hlist *h; + struct mlx5_flow_dv_modify_hdr_resource *modify_hdr; + struct mlx5_flow_dv_encap_decap_resource *encap_decap; + struct rte_flow_query_count count; + uint32_t actions_num; + const uint8_t *data; + size_t size; + uint64_t id; + uint32_t type; + uint32_t i; + uint32_t j; + struct mlx5_list_inconst *l_inconst; + struct mlx5_list_entry *e; + int lcore_index; + struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng; + uint32_t max; + void *action; + + /* encap_decap hlist is lcore_share, get global core cache. */ + i = MLX5_LIST_GLOBAL; + h = sh->encaps_decaps; + if (h) { + for (j = 0; j <= h->mask; j++) { + l_inconst = &h->buckets[j].l; + if (!l_inconst || !l_inconst->cache[i]) + continue; + + e = LIST_FIRST(&l_inconst->cache[i]->h); + while (e) { + encap_decap = + (struct mlx5_flow_dv_encap_decap_resource *)e; + data = encap_decap->buf; + size = encap_decap->size; + id = (uint64_t)(uintptr_t)encap_decap->action; + type = DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT; + save_dump_file(data, size, type, + id, NULL, file); + e = LIST_NEXT(e, next); + } + } + } + + /* get modify_hdr */ + h = sh->modify_cmds; + if (h) { + lcore_index = rte_lcore_index(rte_lcore_id()); + if (unlikely(lcore_index == -1)) { + lcore_index = MLX5_LIST_NLCORE; + rte_spinlock_lock(&h->l_const.lcore_lock); + } + i = lcore_index; + + for (j = 0; j <= h->mask; j++) { + l_inconst = &h->buckets[j].l; + if (!l_inconst || !l_inconst->cache[i]) + continue; + + e = LIST_FIRST(&l_inconst->cache[i]->h); + while (e) { + modify_hdr = + (struct mlx5_flow_dv_modify_hdr_resource *)e; + data = (const uint8_t *)modify_hdr->actions; + size = (size_t)(modify_hdr->actions_num) * 8; + actions_num = modify_hdr->actions_num; + id = (uint64_t)(uintptr_t)modify_hdr->action; + type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR; + save_dump_file(data, size, type, id, + (void *)(&actions_num), file); + e = LIST_NEXT(e, next); + } + } + + if (unlikely(lcore_index == MLX5_LIST_NLCORE)) + rte_spinlock_unlock(&h->l_const.lcore_lock); + } + + /* get counter */ + MLX5_ASSERT(cmng->n_valid <= cmng->n); + max = MLX5_COUNTERS_PER_POOL * cmng->n_valid; + for (j = 1; j <= max; j++) { + action = NULL; + if ((!mlx5_counter_query(dev, j, false, &count.hits, + &count.bytes, &action)) && action) { + id = (uint64_t)(uintptr_t)action; + type = DR_DUMP_REC_TYPE_PMD_COUNTER; + save_dump_file(NULL, 0, type, + id, (void *)&count, file); + } + } + return 0; +} #endif /** @@ -8062,7 +8734,7 @@ mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev, * Perform verbose error reporting if not NULL. PMDs initialize this * structure in case of error only. * @return - * 0 on success, a nagative value otherwise. + * 0 on success, a negative value otherwise. */ int mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx, @@ -8075,11 +8747,8 @@ mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx, int ret; struct mlx5_flow_handle *dh; struct rte_flow *flow; -#ifdef HAVE_IBV_FLOW_DV_SUPPORT - uint32_t idx; -#endif - if (!priv->config.dv_flow_en) { + if (!sh->config.dv_flow_en) { if (fputs("device dv flow disabled\n", file) <= 0) return -errno; return -ENOTSUP; @@ -8088,19 +8757,18 @@ mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx, /* dump all */ if (!flow_idx) { #ifdef HAVE_IBV_FLOW_DV_SUPPORT - ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], - priv->flows, idx, flow, next) - mlx5_flow_dev_dump_ipool(dev, flow, file, error); + if (mlx5_flow_dev_dump_sh_all(dev, file, error)) + return -EINVAL; #endif return mlx5_devx_cmd_flow_dump(sh->fdb_domain, sh->rx_domain, sh->tx_domain, file); } /* dump one */ - flow = mlx5_ipool_get(priv->sh->ipool - [MLX5_IPOOL_RTE_FLOW], (uintptr_t)(void *)flow_idx); + flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN], + (uintptr_t)(void *)flow_idx); if (!flow) - return -ENOENT; + return -EINVAL; #ifdef HAVE_IBV_FLOW_DV_SUPPORT mlx5_flow_dev_dump_ipool(dev, flow, file, error); @@ -8390,6 +9058,116 @@ mlx5_action_handle_flush(struct rte_eth_dev *dev) return ret; } +/** + * Validate existing indirect actions against current device configuration + * and attach them to device resources. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_action_handle_attach(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_indexed_pool *ipool = + priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS]; + struct mlx5_shared_action_rss *shared_rss, *shared_rss_last; + int ret = 0; + uint32_t idx; + + ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) { + struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl; + const char *message; + uint32_t queue_idx; + + ret = mlx5_validate_rss_queues(dev, ind_tbl->queues, + ind_tbl->queues_n, + &message, &queue_idx); + if (ret != 0) { + DRV_LOG(ERR, "Port %u cannot use queue %u in RSS: %s", + dev->data->port_id, ind_tbl->queues[queue_idx], + message); + break; + } + } + if (ret != 0) + return ret; + ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) { + struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl; + + ret = mlx5_ind_table_obj_attach(dev, ind_tbl); + if (ret != 0) { + DRV_LOG(ERR, "Port %u could not attach " + "indirection table obj %p", + dev->data->port_id, (void *)ind_tbl); + goto error; + } + } + return 0; +error: + shared_rss_last = shared_rss; + ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) { + struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl; + + if (shared_rss == shared_rss_last) + break; + if (mlx5_ind_table_obj_detach(dev, ind_tbl) != 0) + DRV_LOG(CRIT, "Port %u could not detach " + "indirection table obj %p on rollback", + dev->data->port_id, (void *)ind_tbl); + } + return ret; +} + +/** + * Detach indirect actions of the device from its resources. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_action_handle_detach(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_indexed_pool *ipool = + priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS]; + struct mlx5_shared_action_rss *shared_rss, *shared_rss_last; + int ret = 0; + uint32_t idx; + + ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) { + struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl; + + ret = mlx5_ind_table_obj_detach(dev, ind_tbl); + if (ret != 0) { + DRV_LOG(ERR, "Port %u could not detach " + "indirection table obj %p", + dev->data->port_id, (void *)ind_tbl); + goto error; + } + } + return 0; +error: + shared_rss_last = shared_rss; + ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) { + struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl; + + if (shared_rss == shared_rss_last) + break; + if (mlx5_ind_table_obj_attach(dev, ind_tbl) != 0) + DRV_LOG(CRIT, "Port %u could not attach " + "indirection table obj %p on rollback", + dev->data->port_id, (void *)ind_tbl); + } + return ret; +} + #ifndef HAVE_MLX5DV_DR #define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1)) #else @@ -8434,7 +9212,7 @@ mlx5_get_tof(const struct rte_flow_item *item, } /** - * tunnel offload functionalilty is defined for DV environment only + * tunnel offload functionality is defined for DV environment only */ #ifdef HAVE_IBV_FLOW_DV_SUPPORT __extension__ @@ -8524,7 +9302,7 @@ flow_tunnel_add_default_miss(struct rte_eth_dev *dev, (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, "invalid port configuration"); - if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)) + if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) ctx->action_rss.types = 0; for (i = 0; i != priv->reta_idx_n; ++i) ctx->queue[i] = (*priv->reta_idx)[i]; @@ -8577,7 +9355,7 @@ tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_hlist_entry *he; + struct mlx5_list_entry *he; union tunnel_offload_mark mbits = { .val = mark }; union mlx5_flow_tbl_key table_key = { { @@ -8589,16 +9367,20 @@ tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark) .is_egress = 0, } }; - he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL); + struct mlx5_flow_cb_ctx ctx = { + .data = &table_key.v64, + }; + + he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, &ctx); return he ? container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL; } static void -mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list, - struct mlx5_hlist_entry *entry) +mlx5_flow_tunnel_grp2tbl_remove_cb(void *tool_ctx, + struct mlx5_list_entry *entry) { - struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_dev_ctx_shared *sh = tool_ctx; struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash); mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID], @@ -8607,26 +9389,26 @@ mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list, } static int -mlx5_flow_tunnel_grp2tbl_match_cb(struct mlx5_hlist *list __rte_unused, - struct mlx5_hlist_entry *entry, - uint64_t key, void *cb_ctx __rte_unused) +mlx5_flow_tunnel_grp2tbl_match_cb(void *tool_ctx __rte_unused, + struct mlx5_list_entry *entry, void *cb_ctx) { + struct mlx5_flow_cb_ctx *ctx = cb_ctx; union tunnel_tbl_key tbl = { - .val = key, + .val = *(uint64_t *)(ctx->data), }; struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash); return tbl.tunnel_id != tte->tunnel_id || tbl.group != tte->group; } -static struct mlx5_hlist_entry * -mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list, uint64_t key, - void *ctx __rte_unused) +static struct mlx5_list_entry * +mlx5_flow_tunnel_grp2tbl_create_cb(void *tool_ctx, void *cb_ctx) { - struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_dev_ctx_shared *sh = tool_ctx; + struct mlx5_flow_cb_ctx *ctx = cb_ctx; struct tunnel_tbl_entry *tte; union tunnel_tbl_key tbl = { - .val = key, + .val = *(uint64_t *)(ctx->data), }; tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, @@ -8655,13 +9437,36 @@ err: return NULL; } +static struct mlx5_list_entry * +mlx5_flow_tunnel_grp2tbl_clone_cb(void *tool_ctx __rte_unused, + struct mlx5_list_entry *oentry, + void *cb_ctx __rte_unused) +{ + struct tunnel_tbl_entry *tte = mlx5_malloc(MLX5_MEM_SYS, sizeof(*tte), + 0, SOCKET_ID_ANY); + + if (!tte) + return NULL; + memcpy(tte, oentry, sizeof(*tte)); + return &tte->hash; +} + +static void +mlx5_flow_tunnel_grp2tbl_clone_free_cb(void *tool_ctx __rte_unused, + struct mlx5_list_entry *entry) +{ + struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash); + + mlx5_free(tte); +} + static uint32_t tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev, const struct mlx5_flow_tunnel *tunnel, uint32_t group, uint32_t *table, struct rte_flow_error *error) { - struct mlx5_hlist_entry *he; + struct mlx5_list_entry *he; struct tunnel_tbl_entry *tte; union tunnel_tbl_key key = { .tunnel_id = tunnel ? tunnel->tunnel_id : 0, @@ -8669,9 +9474,12 @@ tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev, }; struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); struct mlx5_hlist *group_hash; + struct mlx5_flow_cb_ctx ctx = { + .data = &key.val, + }; group_hash = tunnel ? tunnel->groups : thub->groups; - he = mlx5_hlist_register(group_hash, key.val, NULL); + he = mlx5_hlist_register(group_hash, key.val, &ctx); if (!he) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_GROUP, @@ -8785,15 +9593,17 @@ mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev, DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id); return NULL; } - tunnel->groups = mlx5_hlist_create("tunnel groups", 1024, 0, 0, + tunnel->groups = mlx5_hlist_create("tunnel groups", 64, false, true, + priv->sh, mlx5_flow_tunnel_grp2tbl_create_cb, mlx5_flow_tunnel_grp2tbl_match_cb, - mlx5_flow_tunnel_grp2tbl_remove_cb); + mlx5_flow_tunnel_grp2tbl_remove_cb, + mlx5_flow_tunnel_grp2tbl_clone_cb, + mlx5_flow_tunnel_grp2tbl_clone_free_cb); if (!tunnel->groups) { mlx5_ipool_free(ipool, id); return NULL; } - tunnel->groups->ctx = priv->sh; /* initiate new PMD tunnel */ memcpy(&tunnel->app_tunnel, app_tunnel, sizeof(*app_tunnel)); tunnel->tunnel_id = id; @@ -8892,16 +9702,17 @@ int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh) return -ENOMEM; LIST_INIT(&thub->tunnels); rte_spinlock_init(&thub->sl); - thub->groups = mlx5_hlist_create("flow groups", - rte_align32pow2(MLX5_MAX_TABLES), 0, - 0, mlx5_flow_tunnel_grp2tbl_create_cb, + thub->groups = mlx5_hlist_create("flow groups", 64, + false, true, sh, + mlx5_flow_tunnel_grp2tbl_create_cb, mlx5_flow_tunnel_grp2tbl_match_cb, - mlx5_flow_tunnel_grp2tbl_remove_cb); + mlx5_flow_tunnel_grp2tbl_remove_cb, + mlx5_flow_tunnel_grp2tbl_clone_cb, + mlx5_flow_tunnel_grp2tbl_clone_free_cb); if (!thub->groups) { err = -rte_errno; goto err; } - thub->groups->ctx = sh; sh->tunnel_hub = thub; return 0; @@ -8914,30 +9725,37 @@ err: return err; } -static inline bool +static inline int mlx5_flow_tunnel_validate(struct rte_eth_dev *dev, struct rte_flow_tunnel *tunnel, - const char *err_msg) + struct rte_flow_error *error) { - err_msg = NULL; - if (!is_tunnel_offload_active(dev)) { - err_msg = "tunnel offload was not activated"; - goto out; - } else if (!tunnel) { - err_msg = "no application tunnel"; - goto out; - } + struct mlx5_priv *priv = dev->data->dev_private; + if (!priv->sh->config.dv_flow_en) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, + "flow DV interface is off"); + if (!is_tunnel_offload_active(dev)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, + "tunnel offload was not activated"); + if (!tunnel) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, + "no application tunnel"); switch (tunnel->type) { default: - err_msg = "unsupported tunnel type"; - goto out; + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, + "unsupported tunnel type"); case RTE_FLOW_ITEM_TYPE_VXLAN: + case RTE_FLOW_ITEM_TYPE_GRE: + case RTE_FLOW_ITEM_TYPE_NVGRE: + case RTE_FLOW_ITEM_TYPE_GENEVE: break; } - -out: - return !err_msg; + return 0; } static int @@ -8947,15 +9765,11 @@ mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev, uint32_t *num_of_actions, struct rte_flow_error *error) { - int ret; struct mlx5_flow_tunnel *tunnel; - const char *err_msg = NULL; - bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg); + int ret = mlx5_flow_tunnel_validate(dev, app_tunnel, error); - if (!verdict) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, - err_msg); + if (ret) + return ret; ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel); if (ret < 0) { return rte_flow_error_set(error, ret, @@ -8974,15 +9788,11 @@ mlx5_flow_tunnel_match(struct rte_eth_dev *dev, uint32_t *num_of_items, struct rte_flow_error *error) { - int ret; struct mlx5_flow_tunnel *tunnel; - const char *err_msg = NULL; - bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg); + int ret = mlx5_flow_tunnel_validate(dev, app_tunnel, error); - if (!verdict) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - err_msg); + if (ret) + return ret; ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel); if (ret < 0) { return rte_flow_error_set(error, ret, @@ -9085,7 +9895,7 @@ mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev, { uint64_t ol_flags = m->ol_flags; const struct mlx5_flow_tbl_data_entry *tble; - const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID; + const uint64_t mask = RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID; if (!is_tunnel_offload_active(dev)) { info->flags = 0; @@ -9208,6 +10018,45 @@ mlx5_release_tunnel_hub(__rte_unused struct mlx5_dev_ctx_shared *sh, } #endif /* HAVE_IBV_FLOW_DV_SUPPORT */ +/* Flex flow item API */ +static struct rte_flow_item_flex_handle * +mlx5_flow_flex_item_create(struct rte_eth_dev *dev, + const struct rte_flow_item_flex_conf *conf, + struct rte_flow_error *error) +{ + static const char err_msg[] = "flex item creation unsupported"; + struct rte_flow_attr attr = { .transfer = 0 }; + const struct mlx5_flow_driver_ops *fops = + flow_get_drv_ops(flow_get_drv_type(dev, &attr)); + + if (!fops->item_create) { + DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg); + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, + NULL, err_msg); + return NULL; + } + return fops->item_create(dev, conf, error); +} + +static int +mlx5_flow_flex_item_release(struct rte_eth_dev *dev, + const struct rte_flow_item_flex_handle *handle, + struct rte_flow_error *error) +{ + static const char err_msg[] = "flex item release unsupported"; + struct rte_flow_attr attr = { .transfer = 0 }; + const struct mlx5_flow_driver_ops *fops = + flow_get_drv_ops(flow_get_drv_type(dev, &attr)); + + if (!fops->item_release) { + DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg); + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, + NULL, err_msg); + return -rte_errno; + } + return fops->item_release(dev, handle, error); +} + static void mlx5_dbg__print_pattern(const struct rte_flow_item *item) { @@ -9226,3 +10075,143 @@ mlx5_dbg__print_pattern(const struct rte_flow_item *item) } printf("END\n"); } + +static int +mlx5_flow_is_std_vxlan_port(const struct rte_flow_item *udp_item) +{ + const struct rte_flow_item_udp *spec = udp_item->spec; + const struct rte_flow_item_udp *mask = udp_item->mask; + uint16_t udp_dport = 0; + + if (spec != NULL) { + if (!mask) + mask = &rte_flow_item_udp_mask; + udp_dport = rte_be_to_cpu_16(spec->hdr.dst_port & + mask->hdr.dst_port); + } + return (!udp_dport || udp_dport == MLX5_UDP_PORT_VXLAN); +} + +static const struct mlx5_flow_expand_node * +mlx5_flow_expand_rss_adjust_node(const struct rte_flow_item *pattern, + unsigned int item_idx, + const struct mlx5_flow_expand_node graph[], + const struct mlx5_flow_expand_node *node) +{ + const struct rte_flow_item *item = pattern + item_idx, *prev_item; + + if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN && + node != NULL && + node->type == RTE_FLOW_ITEM_TYPE_VXLAN) { + /* + * The expansion node is VXLAN and it is also the last + * expandable item in the pattern, so need to continue + * expansion of the inner tunnel. + */ + MLX5_ASSERT(item_idx > 0); + prev_item = pattern + item_idx - 1; + MLX5_ASSERT(prev_item->type == RTE_FLOW_ITEM_TYPE_UDP); + if (mlx5_flow_is_std_vxlan_port(prev_item)) + return &graph[MLX5_EXPANSION_STD_VXLAN]; + return &graph[MLX5_EXPANSION_L3_VXLAN]; + } + return node; +} + +/* Map of Verbs to Flow priority with 8 Verbs priorities. */ +static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = { + { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 }, +}; + +/* Map of Verbs to Flow priority with 16 Verbs priorities. */ +static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = { + { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 }, + { 9, 10, 11 }, { 12, 13, 14 }, +}; + +/** + * Discover the number of available flow priorities. + * + * @param dev + * Ethernet device. + * + * @return + * On success, number of available flow priorities. + * On failure, a negative errno-style code and rte_errno is set. + */ +int +mlx5_flow_discover_priorities(struct rte_eth_dev *dev) +{ + static const uint16_t vprio[] = {8, 16}; + const struct mlx5_priv *priv = dev->data->dev_private; + const struct mlx5_flow_driver_ops *fops; + enum mlx5_flow_drv_type type; + int ret; + + type = mlx5_flow_os_get_type(); + if (type == MLX5_FLOW_TYPE_MAX) { + type = MLX5_FLOW_TYPE_VERBS; + if (priv->sh->cdev->config.devx && priv->sh->config.dv_flow_en) + type = MLX5_FLOW_TYPE_DV; + } + fops = flow_get_drv_ops(type); + if (fops->discover_priorities == NULL) { + DRV_LOG(ERR, "Priority discovery not supported"); + rte_errno = ENOTSUP; + return -rte_errno; + } + ret = fops->discover_priorities(dev, vprio, RTE_DIM(vprio)); + if (ret < 0) + return ret; + switch (ret) { + case 8: + ret = RTE_DIM(priority_map_3); + break; + case 16: + ret = RTE_DIM(priority_map_5); + break; + default: + rte_errno = ENOTSUP; + DRV_LOG(ERR, + "port %u maximum priority: %d expected 8/16", + dev->data->port_id, ret); + return -rte_errno; + } + DRV_LOG(INFO, "port %u supported flow priorities:" + " 0-%d for ingress or egress root table," + " 0-%d for non-root table or transfer root table.", + dev->data->port_id, ret - 2, + MLX5_NON_ROOT_FLOW_MAX_PRIO - 1); + return ret; +} + +/** + * Adjust flow priority based on the highest layer and the request priority. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] priority + * The rule base priority. + * @param[in] subpriority + * The priority based on the items. + * + * @return + * The new priority. + */ +uint32_t +mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, + uint32_t subpriority) +{ + uint32_t res = 0; + struct mlx5_priv *priv = dev->data->dev_private; + + switch (priv->sh->flow_max_priority) { + case RTE_DIM(priority_map_3): + res = priority_map_3[priority][subpriority]; + break; + case RTE_DIM(priority_map_5): + res = priority_map_5[priority][subpriority]; + break; + } + return res; +}