X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow.c;h=d7243a878bab2ac41ce8eca342571b6732afc3c5;hb=c1d4e9d37abdc6c07a05f7d96928e624fea9ebb5;hp=d677de817ded348a9a11b41165406329dd6fa522;hpb=cec19a342f45ba5a9c8be9c4dc2a0faa89981b41;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index d677de817d..d7243a878b 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -8,40 +8,31 @@ #include #include #include - -/* Verbs header. */ -/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ -#ifdef PEDANTIC -#pragma GCC diagnostic ignored "-Wpedantic" -#endif -#include -#ifdef PEDANTIC -#pragma GCC diagnostic error "-Wpedantic" -#endif +#include #include #include #include +#include #include +#include #include #include #include -#include "mlx5.h" +#include +#include +#include +#include + #include "mlx5_defs.h" +#include "mlx5.h" #include "mlx5_flow.h" -#include "mlx5_glue.h" -#include "mlx5_prm.h" +#include "mlx5_flow_os.h" #include "mlx5_rxtx.h" - -/* Dev ops structure defined in mlx5.c */ -extern const struct eth_dev_ops mlx5_dev_ops; -extern const struct eth_dev_ops mlx5_dev_ops_isolate; +#include "mlx5_common_os.h" /** Device flow drivers. */ -#ifdef HAVE_IBV_FLOW_DV_SUPPORT -extern const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops; -#endif extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops; const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops; @@ -55,6 +46,331 @@ const struct mlx5_flow_driver_ops *flow_drv_ops[] = { [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops }; +/** Helper macro to build input graph for mlx5_flow_expand_rss(). */ +#define MLX5_FLOW_EXPAND_RSS_NEXT(...) \ + (const int []){ \ + __VA_ARGS__, 0, \ + } + +/** Node object of input graph for mlx5_flow_expand_rss(). */ +struct mlx5_flow_expand_node { + const int *const next; + /**< + * List of next node indexes. Index 0 is interpreted as a terminator. + */ + const enum rte_flow_item_type type; + /**< Pattern item type of current node. */ + uint64_t rss_types; + /**< + * RSS types bit-field associated with this node + * (see ETH_RSS_* definitions). + */ +}; + +/** Object returned by mlx5_flow_expand_rss(). */ +struct mlx5_flow_expand_rss { + uint32_t entries; + /**< Number of entries @p patterns and @p priorities. */ + struct { + struct rte_flow_item *pattern; /**< Expanded pattern array. */ + uint32_t priority; /**< Priority offset for each expansion. */ + } entry[]; +}; + +static enum rte_flow_item_type +mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item) +{ + enum rte_flow_item_type ret = RTE_FLOW_ITEM_TYPE_VOID; + uint16_t ether_type = 0; + uint16_t ether_type_m; + uint8_t ip_next_proto = 0; + uint8_t ip_next_proto_m; + + if (item == NULL || item->spec == NULL) + return ret; + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_ETH: + if (item->mask) + ether_type_m = ((const struct rte_flow_item_eth *) + (item->mask))->type; + else + ether_type_m = rte_flow_item_eth_mask.type; + if (ether_type_m != RTE_BE16(0xFFFF)) + break; + ether_type = ((const struct rte_flow_item_eth *) + (item->spec))->type; + if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4) + ret = RTE_FLOW_ITEM_TYPE_IPV4; + else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6) + ret = RTE_FLOW_ITEM_TYPE_IPV6; + else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN) + ret = RTE_FLOW_ITEM_TYPE_VLAN; + else + ret = RTE_FLOW_ITEM_TYPE_END; + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + if (item->mask) + ether_type_m = ((const struct rte_flow_item_vlan *) + (item->mask))->inner_type; + else + ether_type_m = rte_flow_item_vlan_mask.inner_type; + if (ether_type_m != RTE_BE16(0xFFFF)) + break; + ether_type = ((const struct rte_flow_item_vlan *) + (item->spec))->inner_type; + if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4) + ret = RTE_FLOW_ITEM_TYPE_IPV4; + else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6) + ret = RTE_FLOW_ITEM_TYPE_IPV6; + else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN) + ret = RTE_FLOW_ITEM_TYPE_VLAN; + else + ret = RTE_FLOW_ITEM_TYPE_END; + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + if (item->mask) + ip_next_proto_m = ((const struct rte_flow_item_ipv4 *) + (item->mask))->hdr.next_proto_id; + else + ip_next_proto_m = + rte_flow_item_ipv4_mask.hdr.next_proto_id; + if (ip_next_proto_m != 0xFF) + break; + ip_next_proto = ((const struct rte_flow_item_ipv4 *) + (item->spec))->hdr.next_proto_id; + if (ip_next_proto == IPPROTO_UDP) + ret = RTE_FLOW_ITEM_TYPE_UDP; + else if (ip_next_proto == IPPROTO_TCP) + ret = RTE_FLOW_ITEM_TYPE_TCP; + else if (ip_next_proto == IPPROTO_IP) + ret = RTE_FLOW_ITEM_TYPE_IPV4; + else if (ip_next_proto == IPPROTO_IPV6) + ret = RTE_FLOW_ITEM_TYPE_IPV6; + else + ret = RTE_FLOW_ITEM_TYPE_END; + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + if (item->mask) + ip_next_proto_m = ((const struct rte_flow_item_ipv6 *) + (item->mask))->hdr.proto; + else + ip_next_proto_m = + rte_flow_item_ipv6_mask.hdr.proto; + if (ip_next_proto_m != 0xFF) + break; + ip_next_proto = ((const struct rte_flow_item_ipv6 *) + (item->spec))->hdr.proto; + if (ip_next_proto == IPPROTO_UDP) + ret = RTE_FLOW_ITEM_TYPE_UDP; + else if (ip_next_proto == IPPROTO_TCP) + ret = RTE_FLOW_ITEM_TYPE_TCP; + else if (ip_next_proto == IPPROTO_IP) + ret = RTE_FLOW_ITEM_TYPE_IPV4; + else if (ip_next_proto == IPPROTO_IPV6) + ret = RTE_FLOW_ITEM_TYPE_IPV6; + else + ret = RTE_FLOW_ITEM_TYPE_END; + break; + default: + ret = RTE_FLOW_ITEM_TYPE_VOID; + break; + } + return ret; +} + +/** + * Expand RSS flows into several possible flows according to the RSS hash + * fields requested and the driver capabilities. + * + * @param[out] buf + * Buffer to store the result expansion. + * @param[in] size + * Buffer size in bytes. If 0, @p buf can be NULL. + * @param[in] pattern + * User flow pattern. + * @param[in] types + * RSS types to expand (see ETH_RSS_* definitions). + * @param[in] graph + * Input graph to expand @p pattern according to @p types. + * @param[in] graph_root_index + * Index of root node in @p graph, typically 0. + * + * @return + * A positive value representing the size of @p buf in bytes regardless of + * @p size on success, a negative errno value otherwise and rte_errno is + * set, the following errors are defined: + * + * -E2BIG: graph-depth @p graph is too deep. + */ +static int +mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, + const struct rte_flow_item *pattern, uint64_t types, + const struct mlx5_flow_expand_node graph[], + int graph_root_index) +{ + const int elt_n = 8; + const struct rte_flow_item *item; + const struct mlx5_flow_expand_node *node = &graph[graph_root_index]; + const int *next_node; + const int *stack[elt_n]; + int stack_pos = 0; + struct rte_flow_item flow_items[elt_n]; + unsigned int i; + size_t lsize; + size_t user_pattern_size = 0; + void *addr = NULL; + const struct mlx5_flow_expand_node *next = NULL; + struct rte_flow_item missed_item; + int missed = 0; + int elt = 0; + const struct rte_flow_item *last_item = NULL; + + memset(&missed_item, 0, sizeof(missed_item)); + lsize = offsetof(struct mlx5_flow_expand_rss, entry) + + elt_n * sizeof(buf->entry[0]); + if (lsize <= size) { + buf->entry[0].priority = 0; + buf->entry[0].pattern = (void *)&buf->entry[elt_n]; + buf->entries = 0; + addr = buf->entry[0].pattern; + } + for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->type != RTE_FLOW_ITEM_TYPE_VOID) + last_item = item; + for (i = 0; node->next && node->next[i]; ++i) { + next = &graph[node->next[i]]; + if (next->type == item->type) + break; + } + if (next) + node = next; + user_pattern_size += sizeof(*item); + } + user_pattern_size += sizeof(*item); /* Handle END item. */ + lsize += user_pattern_size; + /* Copy the user pattern in the first entry of the buffer. */ + if (lsize <= size) { + rte_memcpy(addr, pattern, user_pattern_size); + addr = (void *)(((uintptr_t)addr) + user_pattern_size); + buf->entries = 1; + } + /* Start expanding. */ + memset(flow_items, 0, sizeof(flow_items)); + user_pattern_size -= sizeof(*item); + /* + * Check if the last valid item has spec set, need complete pattern, + * and the pattern can be used for expansion. + */ + missed_item.type = mlx5_flow_expand_rss_item_complete(last_item); + if (missed_item.type == RTE_FLOW_ITEM_TYPE_END) { + /* Item type END indicates expansion is not required. */ + return lsize; + } + if (missed_item.type != RTE_FLOW_ITEM_TYPE_VOID) { + next = NULL; + missed = 1; + for (i = 0; node->next && node->next[i]; ++i) { + next = &graph[node->next[i]]; + if (next->type == missed_item.type) { + flow_items[0].type = missed_item.type; + flow_items[1].type = RTE_FLOW_ITEM_TYPE_END; + break; + } + next = NULL; + } + } + if (next && missed) { + elt = 2; /* missed item + item end. */ + node = next; + lsize += elt * sizeof(*item) + user_pattern_size; + if ((node->rss_types & types) && lsize <= size) { + buf->entry[buf->entries].priority = 1; + buf->entry[buf->entries].pattern = addr; + buf->entries++; + rte_memcpy(addr, buf->entry[0].pattern, + user_pattern_size); + addr = (void *)(((uintptr_t)addr) + user_pattern_size); + rte_memcpy(addr, flow_items, elt * sizeof(*item)); + addr = (void *)(((uintptr_t)addr) + + elt * sizeof(*item)); + } + } + memset(flow_items, 0, sizeof(flow_items)); + next_node = node->next; + stack[stack_pos] = next_node; + node = next_node ? &graph[*next_node] : NULL; + while (node) { + flow_items[stack_pos].type = node->type; + if (node->rss_types & types) { + /* + * compute the number of items to copy from the + * expansion and copy it. + * When the stack_pos is 0, there are 1 element in it, + * plus the addition END item. + */ + elt = stack_pos + 2; + flow_items[stack_pos + 1].type = RTE_FLOW_ITEM_TYPE_END; + lsize += elt * sizeof(*item) + user_pattern_size; + if (lsize <= size) { + size_t n = elt * sizeof(*item); + + buf->entry[buf->entries].priority = + stack_pos + 1 + missed; + buf->entry[buf->entries].pattern = addr; + buf->entries++; + rte_memcpy(addr, buf->entry[0].pattern, + user_pattern_size); + addr = (void *)(((uintptr_t)addr) + + user_pattern_size); + rte_memcpy(addr, &missed_item, + missed * sizeof(*item)); + addr = (void *)(((uintptr_t)addr) + + missed * sizeof(*item)); + rte_memcpy(addr, flow_items, n); + addr = (void *)(((uintptr_t)addr) + n); + } + } + /* Go deeper. */ + if (node->next) { + next_node = node->next; + if (stack_pos++ == elt_n) { + rte_errno = E2BIG; + return -rte_errno; + } + stack[stack_pos] = next_node; + } else if (*(next_node + 1)) { + /* Follow up with the next possibility. */ + ++next_node; + } else { + /* Move to the next path. */ + if (stack_pos) + next_node = stack[--stack_pos]; + next_node++; + stack[stack_pos] = next_node; + } + node = *next_node ? &graph[*next_node] : NULL; + }; + /* no expanded flows but we have missed item, create one rule for it */ + if (buf->entries == 1 && missed != 0) { + elt = 2; + lsize += elt * sizeof(*item) + user_pattern_size; + if (lsize <= size) { + buf->entry[buf->entries].priority = 1; + buf->entry[buf->entries].pattern = addr; + buf->entries++; + flow_items[0].type = missed_item.type; + flow_items[1].type = RTE_FLOW_ITEM_TYPE_END; + rte_memcpy(addr, buf->entry[0].pattern, + user_pattern_size); + addr = (void *)(((uintptr_t)addr) + user_pattern_size); + rte_memcpy(addr, flow_items, elt * sizeof(*item)); + addr = (void *)(((uintptr_t)addr) + + elt * sizeof(*item)); + } + } + return lsize; +} + enum mlx5_expansion { MLX5_EXPANSION_ROOT, MLX5_EXPANSION_ROOT_OUTER, @@ -85,46 +401,47 @@ enum mlx5_expansion { }; /** Supported expansion of items. */ -static const struct rte_flow_expand_node mlx5_support_expansion[] = { +static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { [MLX5_EXPANSION_ROOT] = { - .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, - MLX5_EXPANSION_IPV4, - MLX5_EXPANSION_IPV6), + .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, + MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), .type = RTE_FLOW_ITEM_TYPE_END, }, [MLX5_EXPANSION_ROOT_OUTER] = { - .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH, - MLX5_EXPANSION_OUTER_IPV4, - MLX5_EXPANSION_OUTER_IPV6), + .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH, + MLX5_EXPANSION_OUTER_IPV4, + MLX5_EXPANSION_OUTER_IPV6), .type = RTE_FLOW_ITEM_TYPE_END, }, [MLX5_EXPANSION_ROOT_ETH_VLAN] = { - .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN), + .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN), .type = RTE_FLOW_ITEM_TYPE_END, }, [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = { - .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH_VLAN), + .next = MLX5_FLOW_EXPAND_RSS_NEXT + (MLX5_EXPANSION_OUTER_ETH_VLAN), .type = RTE_FLOW_ITEM_TYPE_END, }, [MLX5_EXPANSION_OUTER_ETH] = { - .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, - MLX5_EXPANSION_OUTER_IPV6, - MLX5_EXPANSION_MPLS), + .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, + MLX5_EXPANSION_OUTER_IPV6, + MLX5_EXPANSION_MPLS), .type = RTE_FLOW_ITEM_TYPE_ETH, .rss_types = 0, }, [MLX5_EXPANSION_OUTER_ETH_VLAN] = { - .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN), + .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN), .type = RTE_FLOW_ITEM_TYPE_ETH, .rss_types = 0, }, [MLX5_EXPANSION_OUTER_VLAN] = { - .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, - MLX5_EXPANSION_OUTER_IPV6), + .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, + MLX5_EXPANSION_OUTER_IPV6), .type = RTE_FLOW_ITEM_TYPE_VLAN, }, [MLX5_EXPANSION_OUTER_IPV4] = { - .next = RTE_FLOW_EXPAND_RSS_NEXT + .next = MLX5_FLOW_EXPAND_RSS_NEXT (MLX5_EXPANSION_OUTER_IPV4_UDP, MLX5_EXPANSION_OUTER_IPV4_TCP, MLX5_EXPANSION_GRE, @@ -135,8 +452,8 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = { ETH_RSS_NONFRAG_IPV4_OTHER, }, [MLX5_EXPANSION_OUTER_IPV4_UDP] = { - .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, - MLX5_EXPANSION_VXLAN_GPE), + .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, + MLX5_EXPANSION_VXLAN_GPE), .type = RTE_FLOW_ITEM_TYPE_UDP, .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, }, @@ -145,7 +462,7 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = { .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, }, [MLX5_EXPANSION_OUTER_IPV6] = { - .next = RTE_FLOW_EXPAND_RSS_NEXT + .next = MLX5_FLOW_EXPAND_RSS_NEXT (MLX5_EXPANSION_OUTER_IPV6_UDP, MLX5_EXPANSION_OUTER_IPV6_TCP, MLX5_EXPANSION_IPV4, @@ -155,8 +472,8 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = { ETH_RSS_NONFRAG_IPV6_OTHER, }, [MLX5_EXPANSION_OUTER_IPV6_UDP] = { - .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, - MLX5_EXPANSION_VXLAN_GPE), + .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, + MLX5_EXPANSION_VXLAN_GPE), .type = RTE_FLOW_ITEM_TYPE_UDP, .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, }, @@ -165,41 +482,43 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = { .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, }, [MLX5_EXPANSION_VXLAN] = { - .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH), + .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, + MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), .type = RTE_FLOW_ITEM_TYPE_VXLAN, }, [MLX5_EXPANSION_VXLAN_GPE] = { - .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, - MLX5_EXPANSION_IPV4, - MLX5_EXPANSION_IPV6), + .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, + MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE, }, [MLX5_EXPANSION_GRE] = { - .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4), + .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4), .type = RTE_FLOW_ITEM_TYPE_GRE, }, [MLX5_EXPANSION_MPLS] = { - .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, - MLX5_EXPANSION_IPV6), + .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), .type = RTE_FLOW_ITEM_TYPE_MPLS, }, [MLX5_EXPANSION_ETH] = { - .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, - MLX5_EXPANSION_IPV6), + .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), .type = RTE_FLOW_ITEM_TYPE_ETH, }, [MLX5_EXPANSION_ETH_VLAN] = { - .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN), + .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN), .type = RTE_FLOW_ITEM_TYPE_ETH, }, [MLX5_EXPANSION_VLAN] = { - .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, - MLX5_EXPANSION_IPV6), + .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), .type = RTE_FLOW_ITEM_TYPE_VLAN, }, [MLX5_EXPANSION_IPV4] = { - .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP, - MLX5_EXPANSION_IPV4_TCP), + .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP, + MLX5_EXPANSION_IPV4_TCP), .type = RTE_FLOW_ITEM_TYPE_IPV4, .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER, @@ -213,8 +532,8 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = { .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, }, [MLX5_EXPANSION_IPV6] = { - .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP, - MLX5_EXPANSION_IPV6_TCP), + .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP, + MLX5_EXPANSION_IPV6_TCP), .type = RTE_FLOW_ITEM_TYPE_IPV6, .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER, @@ -236,6 +555,8 @@ static const struct rte_flow_ops mlx5_flow_ops = { .flush = mlx5_flow_flush, .isolate = mlx5_flow_isolate, .query = mlx5_flow_query, + .dev_dump = mlx5_flow_dev_dump, + .get_aged_flows = mlx5_flow_get_aged_flows, }; /* Convert FDIR request to Generic flow. */ @@ -264,17 +585,6 @@ struct mlx5_fdir { struct rte_flow_action_queue queue; }; -/* Map of Verbs to Flow priority with 8 Verbs priorities. */ -static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = { - { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 }, -}; - -/* Map of Verbs to Flow priority with 16 Verbs priorities. */ -static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = { - { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 }, - { 9, 10, 11 }, { 12, 13, 14 }, -}; - /* Tunnel information. */ struct mlx5_flow_tunnel_info { uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */ @@ -318,6 +628,10 @@ static struct mlx5_flow_tunnel_info tunnels_info[] = { .tunnel = MLX5_FLOW_LAYER_IPV6_ENCAP, .ptype = RTE_PTYPE_TUNNEL_IP, }, + { + .tunnel = MLX5_FLOW_LAYER_GTP, + .ptype = RTE_PTYPE_TUNNEL_GTPU, + }, }; /** @@ -336,7 +650,7 @@ static struct mlx5_flow_tunnel_info tunnels_info[] = { * The request register on success, a negative errno * value otherwise and rte_errno is set. */ -enum modify_reg +int mlx5_flow_get_reg_id(struct rte_eth_dev *dev, enum mlx5_feature_name feature, uint32_t id, @@ -345,6 +659,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev, struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_config *config = &priv->config; enum modify_reg start_reg; + bool skip_mtr_reg = false; switch (feature) { case MLX5_HAIRPIN_RX: @@ -364,45 +679,60 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev, case MLX5_METADATA_TX: return REG_A; case MLX5_METADATA_FDB: - return REG_C_0; + switch (config->dv_xmeta_en) { + case MLX5_XMETA_MODE_LEGACY: + return REG_NON; + case MLX5_XMETA_MODE_META16: + return REG_C_0; + case MLX5_XMETA_MODE_META32: + return REG_C_1; + } + break; case MLX5_FLOW_MARK: switch (config->dv_xmeta_en) { case MLX5_XMETA_MODE_LEGACY: - return REG_NONE; + return REG_NON; case MLX5_XMETA_MODE_META16: return REG_C_1; case MLX5_XMETA_MODE_META32: return REG_C_0; } break; - case MLX5_COPY_MARK: case MLX5_MTR_SFX: + /* + * If meter color and flow match share one register, flow match + * should use the meter color register for match. + */ + if (priv->mtr_reg_share) + return priv->mtr_color_reg; + else + return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : + REG_C_3; + case MLX5_MTR_COLOR: + MLX5_ASSERT(priv->mtr_color_reg != REG_NON); + return priv->mtr_color_reg; + case MLX5_COPY_MARK: /* * Metadata COPY_MARK register using is in meter suffix sub * flow while with meter. It's safe to share the same register. */ return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : REG_C_3; - case MLX5_MTR_COLOR: - RTE_ASSERT(priv->mtr_color_reg != REG_NONE); - return priv->mtr_color_reg; case MLX5_APP_TAG: /* - * If meter is enable, it will engage two registers for color + * If meter is enable, it will engage the register for color * match and flow match. If meter color match is not using the * REG_C_2, need to skip the REG_C_x be used by meter color * match. * If meter is disable, free to use all available registers. */ - if (priv->mtr_color_reg != REG_NONE) - start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_3 : - REG_C_4; - else - start_reg = REG_C_2; + start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_2 : + (priv->mtr_reg_share ? REG_C_3 : REG_C_4); + skip_mtr_reg = !!(priv->mtr_en && start_reg == REG_C_2); if (id > (REG_C_7 - start_reg)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "invalid tag id"); - if (config->flow_mreg_c[id + start_reg - REG_C_0] == REG_NONE) + if (config->flow_mreg_c[id + start_reg - REG_C_0] == REG_NON) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "unsupported tag id"); @@ -412,19 +742,23 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev, * If the available index REG_C_y >= REG_C_x, skip the * color register. */ - if (start_reg == REG_C_3 && config->flow_mreg_c - [id + REG_C_3 - REG_C_0] >= priv->mtr_color_reg) { - if (config->flow_mreg_c[id + 1 + REG_C_3 - REG_C_0] != - REG_NONE) + if (skip_mtr_reg && config->flow_mreg_c + [id + start_reg - REG_C_0] >= priv->mtr_color_reg) { + if (id >= (REG_C_7 - start_reg)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "invalid tag id"); + if (config->flow_mreg_c + [id + 1 + start_reg - REG_C_0] != REG_NON) return config->flow_mreg_c - [id + 1 + REG_C_3 - REG_C_0]; + [id + 1 + start_reg - REG_C_0]; return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "unsupported tag id"); } return config->flow_mreg_c[id + start_reg - REG_C_0]; } - assert(false); + MLX5_ASSERT(false); return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "invalid feature name"); @@ -453,107 +787,7 @@ mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev) * - reg_c's are preserved across different domain (FDB and NIC) on * packet loopback by flow lookup miss. */ - return config->flow_mreg_c[2] != REG_NONE; -} - -/** - * Discover the maximum number of priority available. - * - * @param[in] dev - * Pointer to the Ethernet device structure. - * - * @return - * number of supported flow priority on success, a negative errno - * value otherwise and rte_errno is set. - */ -int -mlx5_flow_discover_priorities(struct rte_eth_dev *dev) -{ - struct mlx5_priv *priv = dev->data->dev_private; - struct { - struct ibv_flow_attr attr; - struct ibv_flow_spec_eth eth; - struct ibv_flow_spec_action_drop drop; - } flow_attr = { - .attr = { - .num_of_specs = 2, - .port = (uint8_t)priv->ibv_port, - }, - .eth = { - .type = IBV_FLOW_SPEC_ETH, - .size = sizeof(struct ibv_flow_spec_eth), - }, - .drop = { - .size = sizeof(struct ibv_flow_spec_action_drop), - .type = IBV_FLOW_SPEC_ACTION_DROP, - }, - }; - struct ibv_flow *flow; - struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev); - uint16_t vprio[] = { 8, 16 }; - int i; - int priority = 0; - - if (!drop) { - rte_errno = ENOTSUP; - return -rte_errno; - } - for (i = 0; i != RTE_DIM(vprio); i++) { - flow_attr.attr.priority = vprio[i] - 1; - flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr); - if (!flow) - break; - claim_zero(mlx5_glue->destroy_flow(flow)); - priority = vprio[i]; - } - mlx5_hrxq_drop_release(dev); - switch (priority) { - case 8: - priority = RTE_DIM(priority_map_3); - break; - case 16: - priority = RTE_DIM(priority_map_5); - break; - default: - rte_errno = ENOTSUP; - DRV_LOG(ERR, - "port %u verbs maximum priority: %d expected 8/16", - dev->data->port_id, priority); - return -rte_errno; - } - DRV_LOG(INFO, "port %u flow maximum priority: %d", - dev->data->port_id, priority); - return priority; -} - -/** - * Adjust flow priority based on the highest layer and the request priority. - * - * @param[in] dev - * Pointer to the Ethernet device structure. - * @param[in] priority - * The rule base priority. - * @param[in] subpriority - * The priority based on the items. - * - * @return - * The new priority. - */ -uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, - uint32_t subpriority) -{ - uint32_t res = 0; - struct mlx5_priv *priv = dev->data->dev_private; - - switch (priv->config.flow_prio) { - case RTE_DIM(priority_map_3): - res = priority_map_3[priority][subpriority]; - break; - case RTE_DIM(priority_map_5): - res = priority_map_5[priority][subpriority]; - break; - } - return res; + return config->flow_mreg_c[2] != REG_NON; } /** @@ -568,6 +802,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, * Bit-masks covering supported fields by the NIC to compare with user mask. * @param[in] size * Bit-masks size in bytes. + * @param[in] range_accepted + * True if range of values is accepted for specific fields, false otherwise. * @param[out] error * Pointer to error structure. * @@ -579,11 +815,12 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item, const uint8_t *mask, const uint8_t *nic_mask, unsigned int size, + bool range_accepted, struct rte_flow_error *error) { unsigned int i; - assert(nic_mask); + MLX5_ASSERT(nic_mask); for (i = 0; i < size; ++i) if ((nic_mask[i] | mask[i]) != nic_mask[i]) return rte_flow_error_set(error, ENOTSUP, @@ -596,7 +833,7 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item, RTE_FLOW_ERROR_TYPE_ITEM, item, "mask/last without a spec is not" " supported"); - if (item->spec && item->last) { + if (item->spec && item->last && !range_accepted) { uint8_t spec[size]; uint8_t last[size]; unsigned int i; @@ -632,13 +869,12 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item, * The hash fields that should be used. */ uint64_t -mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow, +mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc, int tunnel __rte_unused, uint64_t layer_types, uint64_t hash_fields) { - struct rte_flow *flow = dev_flow->flow; #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT - int rss_request_inner = flow->rss.level >= 2; + int rss_request_inner = rss_desc->level >= 2; /* Check RSS hash level for tunnel. */ if (tunnel && rss_request_inner) @@ -647,7 +883,7 @@ mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow, return 0; #endif /* Check if requested layer matches RSS hash fields. */ - if (!(flow->rss.types & layer_types)) + if (!(rss_desc->types & layer_types)) return 0; return hash_fields; } @@ -686,21 +922,27 @@ flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) * * @param[in] dev * Pointer to the Ethernet device structure. - * @param[in] dev_flow - * Pointer to device flow structure. + * @param[in] dev_handle + * Pointer to device flow handle structure. */ static void -flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) +flow_drv_rxq_flags_set(struct rte_eth_dev *dev, + struct mlx5_flow_handle *dev_handle) { struct mlx5_priv *priv = dev->data->dev_private; - struct rte_flow *flow = dev_flow->flow; - const int mark = !!(dev_flow->actions & - (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); - const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); + const int mark = dev_handle->mark; + const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL); + struct mlx5_hrxq *hrxq; unsigned int i; - for (i = 0; i != flow->rss.queue_num; ++i) { - int idx = (*flow->rss.queue)[i]; + if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE) + return; + hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], + dev_handle->rix_hrxq); + if (!hrxq) + return; + for (i = 0; i != hrxq->ind_table->queues_n; ++i) { + int idx = hrxq->ind_table->queues[i]; struct mlx5_rxq_ctrl *rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); @@ -725,7 +967,7 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) /* Increase the counter matching the flow. */ for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { if ((tunnels_info[j].tunnel & - dev_flow->layers) == + dev_handle->layers) == tunnels_info[j].tunnel) { rxq_ctrl->flow_tunnels_n[j]++; break; @@ -747,10 +989,13 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) static void flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) { - struct mlx5_flow *dev_flow; + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t handle_idx; + struct mlx5_flow_handle *dev_handle; - LIST_FOREACH(dev_flow, &flow->dev_flows, next) - flow_drv_rxq_flags_set(dev, dev_flow); + SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, + handle_idx, dev_handle, next) + flow_drv_rxq_flags_set(dev, dev_handle); } /** @@ -759,22 +1004,28 @@ flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) * * @param dev * Pointer to Ethernet device. - * @param[in] dev_flow - * Pointer to the device flow. + * @param[in] dev_handle + * Pointer to the device flow handle structure. */ static void -flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) +flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, + struct mlx5_flow_handle *dev_handle) { struct mlx5_priv *priv = dev->data->dev_private; - struct rte_flow *flow = dev_flow->flow; - const int mark = !!(dev_flow->actions & - (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); - const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); + const int mark = dev_handle->mark; + const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL); + struct mlx5_hrxq *hrxq; unsigned int i; - assert(dev->data->dev_started); - for (i = 0; i != flow->rss.queue_num; ++i) { - int idx = (*flow->rss.queue)[i]; + if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE) + return; + hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], + dev_handle->rix_hrxq); + if (!hrxq) + return; + MLX5_ASSERT(dev->data->dev_started); + for (i = 0; i != hrxq->ind_table->queues_n; ++i) { + int idx = hrxq->ind_table->queues[i]; struct mlx5_rxq_ctrl *rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); @@ -794,7 +1045,7 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) /* Decrease the counter matching the flow. */ for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { if ((tunnels_info[j].tunnel & - dev_flow->layers) == + dev_handle->layers) == tunnels_info[j].tunnel) { rxq_ctrl->flow_tunnels_n[j]--; break; @@ -817,10 +1068,13 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) static void flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) { - struct mlx5_flow *dev_flow; + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t handle_idx; + struct mlx5_flow_handle *dev_handle; - LIST_FOREACH(dev_flow, &flow->dev_flows, next) - flow_drv_rxq_flags_trim(dev, dev_flow); + SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, + handle_idx, dev_handle, next) + flow_drv_rxq_flags_trim(dev, dev_handle); } /** @@ -851,6 +1105,35 @@ flow_rxq_flags_clear(struct rte_eth_dev *dev) } } +/** + * Set the Rx queue dynamic metadata (mask and offset) for a flow + * + * @param[in] dev + * Pointer to the Ethernet device structure. + */ +void +mlx5_flow_rxq_dynf_metadata_set(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_rxq_data *data; + unsigned int i; + + for (i = 0; i != priv->rxqs_n; ++i) { + if (!(*priv->rxqs)[i]) + continue; + data = (*priv->rxqs)[i]; + if (!rte_flow_dynf_metadata_avail()) { + data->dynf_meta = 0; + data->flow_meta_mask = 0; + data->flow_meta_offset = -1; + } else { + data->dynf_meta = 1; + data->flow_meta_mask = rte_flow_dynf_metadata_mask; + data->flow_meta_offset = rte_flow_dynf_metadata_offs; + } + } +} + /* * return a pointer to the desired action in the list of actions. * @@ -892,11 +1175,6 @@ mlx5_flow_validate_action_flag(uint64_t action_flags, const struct rte_flow_attr *attr, struct rte_flow_error *error) { - - if (action_flags & MLX5_FLOW_ACTION_DROP) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, NULL, - "can't drop and flag in same flow"); if (action_flags & MLX5_FLOW_ACTION_MARK) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, @@ -948,10 +1226,6 @@ mlx5_flow_validate_action_mark(const struct rte_flow_action *action, &mark->id, "mark id must in 0 <= id < " RTE_STR(MLX5_FLOW_MARK_MAX)); - if (action_flags & MLX5_FLOW_ACTION_DROP) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, NULL, - "can't drop and mark in same flow"); if (action_flags & MLX5_FLOW_ACTION_FLAG) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, @@ -983,24 +1257,10 @@ mlx5_flow_validate_action_mark(const struct rte_flow_action *action, * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -mlx5_flow_validate_action_drop(uint64_t action_flags, +mlx5_flow_validate_action_drop(uint64_t action_flags __rte_unused, const struct rte_flow_attr *attr, struct rte_flow_error *error) { - if (action_flags & MLX5_FLOW_ACTION_FLAG) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, NULL, - "can't drop and flag in same flow"); - if (action_flags & MLX5_FLOW_ACTION_MARK) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, NULL, - "can't drop and mark in same flow"); - if (action_flags & (MLX5_FLOW_FATE_ACTIONS | - MLX5_FLOW_FATE_ESWITCH_ACTIONS)) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, NULL, - "can't have 2 fate actions in" - " same flow"); if (attr->egress) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, @@ -1142,6 +1402,18 @@ mlx5_flow_validate_action_rss(const struct rte_flow_action *action, &rss->types, "some RSS protocols are not" " supported"); + if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) && + !(rss->types & ETH_RSS_IP)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, + "L3 partial RSS requested but L3 RSS" + " type not specified"); + if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) && + !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, + "L4 partial RSS requested but L4 RSS" + " type not specified"); if (!priv->rxqs_n) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, @@ -1166,11 +1438,54 @@ mlx5_flow_validate_action_rss(const struct rte_flow_action *action, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, "rss action not supported for " "egress"); - if (rss->level > 1 && !tunnel) + if (rss->level > 1 && !tunnel) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, "inner RSS is not supported for " "non-tunnel flows"); + if ((item_flags & MLX5_FLOW_LAYER_ECPRI) && + !(item_flags & MLX5_FLOW_LAYER_INNER_L4_UDP)) { + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, + "RSS on eCPRI is not supported now"); + } + return 0; +} + +/* + * Validate the default miss action. + * + * @param[in] action_flags + * Bit-fields that holds the actions detected until now. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_action_default_miss(uint64_t action_flags, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + if (action_flags & MLX5_FLOW_FATE_ACTIONS) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't have 2 fate actions in" + " same flow"); + if (attr->egress) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, + "default miss action not supported " + "for egress"); + if (attr->group) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL, + "only group 0 is supported"); + if (attr->transfer) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + NULL, "transfer is not supported"); return 0; } @@ -1293,7 +1608,8 @@ mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item, ret = mlx5_flow_item_acceptable (item, (const uint8_t *)mask, (const uint8_t *)&rte_flow_item_icmp6_mask, - sizeof(struct rte_flow_item_icmp6), error); + sizeof(struct rte_flow_item_icmp6), + MLX5_ITEM_RANGE_NOT_ACCEPTED, error); if (ret < 0) return ret; return 0; @@ -1319,6 +1635,12 @@ mlx5_flow_validate_item_icmp(const struct rte_flow_item *item, struct rte_flow_error *error) { const struct rte_flow_item_icmp *mask = item->mask; + const struct rte_flow_item_icmp nic_mask = { + .hdr.icmp_type = 0xff, + .hdr.icmp_code = 0xff, + .hdr.icmp_ident = RTE_BE16(0xffff), + .hdr.icmp_seq_nb = RTE_BE16(0xffff), + }; const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : MLX5_FLOW_LAYER_OUTER_L3_IPV4; @@ -1341,11 +1663,12 @@ mlx5_flow_validate_item_icmp(const struct rte_flow_item *item, RTE_FLOW_ERROR_TYPE_ITEM, item, "multiple L4 layers not supported"); if (!mask) - mask = &rte_flow_item_icmp_mask; + mask = &nic_mask; ret = mlx5_flow_item_acceptable (item, (const uint8_t *)mask, - (const uint8_t *)&rte_flow_item_icmp_mask, - sizeof(struct rte_flow_item_icmp), error); + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_icmp), + MLX5_ITEM_RANGE_NOT_ACCEPTED, error); if (ret < 0) return ret; return 0; @@ -1400,7 +1723,7 @@ mlx5_flow_validate_item_eth(const struct rte_flow_item *item, ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, (const uint8_t *)&nic_mask, sizeof(struct rte_flow_item_eth), - error); + MLX5_ITEM_RANGE_NOT_ACCEPTED, error); return ret; } @@ -1454,7 +1777,7 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, (const uint8_t *)&nic_mask, sizeof(struct rte_flow_item_vlan), - error); + MLX5_ITEM_RANGE_NOT_ACCEPTED, error); if (ret) return ret; if (!tunnel && mask->tci != RTE_BE16(0x0fff)) { @@ -1499,9 +1822,15 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, * Item specification. * @param[in] item_flags * Bit-fields that holds the items detected until now. + * @param[in] last_item + * Previous validated item in the pattern items. + * @param[in] ether_type + * Type in the ethernet layer header (including dot1q). * @param[in] acc_mask * Acceptable mask, if NULL default internal default mask * will be used to check whether item fields are supported. + * @param[in] range_accepted + * True if range of values is accepted for specific fields, false otherwise. * @param[out] error * Pointer to error structure. * @@ -1514,6 +1843,7 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, uint64_t last_item, uint16_t ether_type, const struct rte_flow_item_ipv4 *acc_mask, + bool range_accepted, struct rte_flow_error *error) { const struct rte_flow_item_ipv4 *mask = item->mask; @@ -1584,7 +1914,7 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, acc_mask ? (const uint8_t *)acc_mask : (const uint8_t *)&nic_mask, sizeof(struct rte_flow_item_ipv4), - error); + range_accepted, error); if (ret < 0) return ret; return 0; @@ -1597,6 +1927,10 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, * Item specification. * @param[in] item_flags * Bit-fields that holds the items detected until now. + * @param[in] last_item + * Previous validated item in the pattern items. + * @param[in] ether_type + * Type in the ethernet layer header (including dot1q). * @param[in] acc_mask * Acceptable mask, if NULL default internal default mask * will be used to check whether item fields are supported. @@ -1626,7 +1960,6 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, "\xff\xff\xff\xff\xff\xff\xff\xff", .vtc_flow = RTE_BE32(0xffffffff), .proto = 0xff, - .hop_limits = 0xff, }, }; const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); @@ -1646,9 +1979,9 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, RTE_FLOW_ERROR_TYPE_ITEM, item, "IPv6 cannot follow L2/VLAN layer " "which ether type is not IPv6"); + if (mask && mask->hdr.proto == UINT8_MAX && spec) + next_proto = spec->hdr.proto; if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) { - if (mask && spec) - next_proto = mask->hdr.proto & spec->hdr.proto; if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -1656,6 +1989,16 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, "multiple tunnel " "not supported"); } + if (next_proto == IPPROTO_HOPOPTS || + next_proto == IPPROTO_ROUTING || + next_proto == IPPROTO_FRAGMENT || + next_proto == IPPROTO_ESP || + next_proto == IPPROTO_AH || + next_proto == IPPROTO_DSTOPTS) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "IPv6 proto (next header) should " + "not be set as extension header"); if (item_flags & MLX5_FLOW_LAYER_IPIP) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -1680,7 +2023,7 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, acc_mask ? (const uint8_t *)acc_mask : (const uint8_t *)&nic_mask, sizeof(struct rte_flow_item_ipv6), - error); + MLX5_ITEM_RANGE_NOT_ACCEPTED, error); if (ret < 0) return ret; return 0; @@ -1735,7 +2078,8 @@ mlx5_flow_validate_item_udp(const struct rte_flow_item *item, ret = mlx5_flow_item_acceptable (item, (const uint8_t *)mask, (const uint8_t *)&rte_flow_item_udp_mask, - sizeof(struct rte_flow_item_udp), error); + sizeof(struct rte_flow_item_udp), MLX5_ITEM_RANGE_NOT_ACCEPTED, + error); if (ret < 0) return ret; return 0; @@ -1771,7 +2115,7 @@ mlx5_flow_validate_item_tcp(const struct rte_flow_item *item, MLX5_FLOW_LAYER_OUTER_L4; int ret; - assert(flow_mask); + MLX5_ASSERT(flow_mask); if (target_protocol != 0xff && target_protocol != IPPROTO_TCP) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -1790,7 +2134,8 @@ mlx5_flow_validate_item_tcp(const struct rte_flow_item *item, ret = mlx5_flow_item_acceptable (item, (const uint8_t *)mask, (const uint8_t *)flow_mask, - sizeof(struct rte_flow_item_tcp), error); + sizeof(struct rte_flow_item_tcp), MLX5_ITEM_RANGE_NOT_ACCEPTED, + error); if (ret < 0) return ret; return 0; @@ -1823,7 +2168,6 @@ mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item, uint32_t vlan_id; uint8_t vni[4]; } id = { .vlan_id = 0, }; - uint32_t vlan_id = 0; if (item_flags & MLX5_FLOW_LAYER_TUNNEL) @@ -1845,28 +2189,13 @@ mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item, (item, (const uint8_t *)mask, (const uint8_t *)&rte_flow_item_vxlan_mask, sizeof(struct rte_flow_item_vxlan), - error); + MLX5_ITEM_RANGE_NOT_ACCEPTED, error); if (ret < 0) return ret; if (spec) { memcpy(&id.vni[1], spec->vni, 3); - vlan_id = id.vlan_id; memcpy(&id.vni[1], mask->vni, 3); - vlan_id &= id.vlan_id; } - /* - * Tunnel id 0 is equivalent as not adding a VXLAN layer, if - * only this layer is defined in the Verbs specification it is - * interpreted as wildcard and all packets will match this - * rule, if it follows a full stack layer (ex: eth / ipv4 / - * udp), all packets matching the layers before will also - * match this rule. To avoid such situation, VNI 0 is - * currently refused. - */ - if (!vlan_id) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "VXLAN vni cannot be 0"); if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -1905,7 +2234,6 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, uint32_t vlan_id; uint8_t vni[4]; } id = { .vlan_id = 0, }; - uint32_t vlan_id = 0; if (!priv->config.l3_vxlan_en) return rte_flow_error_set(error, ENOTSUP, @@ -1932,7 +2260,7 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, (item, (const uint8_t *)mask, (const uint8_t *)&rte_flow_item_vxlan_gpe_mask, sizeof(struct rte_flow_item_vxlan_gpe), - error); + MLX5_ITEM_RANGE_NOT_ACCEPTED, error); if (ret < 0) return ret; if (spec) { @@ -1943,22 +2271,8 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, "VxLAN-GPE protocol" " not supported"); memcpy(&id.vni[1], spec->vni, 3); - vlan_id = id.vlan_id; memcpy(&id.vni[1], mask->vni, 3); - vlan_id &= id.vlan_id; } - /* - * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this - * layer is defined in the Verbs specification it is interpreted as - * wildcard and all packets will match this rule, if it follows a full - * stack layer (ex: eth / ipv4 / udp), all packets matching the layers - * before will also match this rule. To avoid such situation, VNI 0 - * is currently refused. - */ - if (!vlan_id) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "VXLAN-GPE vni cannot be 0"); if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -1990,8 +2304,8 @@ mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item, const rte_be32_t *mask = item->mask; int ret = 0; rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX); - const struct rte_flow_item_gre *gre_spec = gre_item->spec; - const struct rte_flow_item_gre *gre_mask = gre_item->mask; + const struct rte_flow_item_gre *gre_spec; + const struct rte_flow_item_gre *gre_mask; if (item_flags & MLX5_FLOW_LAYER_GRE_KEY) return rte_flow_error_set(error, ENOTSUP, @@ -2005,8 +2319,10 @@ mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item, return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "GRE key following a wrong item"); + gre_mask = gre_item->mask; if (!gre_mask) gre_mask = &rte_flow_item_gre_mask; + gre_spec = gre_item->spec; if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) && !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000))) return rte_flow_error_set(error, EINVAL, @@ -2018,7 +2334,7 @@ mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item, ret = mlx5_flow_item_acceptable (item, (const uint8_t *)mask, (const uint8_t *)&gre_key_default_mask, - sizeof(rte_be32_t), error); + sizeof(rte_be32_t), MLX5_ITEM_RANGE_NOT_ACCEPTED, error); return ret; } @@ -2070,7 +2386,8 @@ mlx5_flow_validate_item_gre(const struct rte_flow_item *item, ret = mlx5_flow_item_acceptable (item, (const uint8_t *)mask, (const uint8_t *)&nic_mask, - sizeof(struct rte_flow_item_gre), error); + sizeof(struct rte_flow_item_gre), MLX5_ITEM_RANGE_NOT_ACCEPTED, + error); if (ret < 0) return ret; #ifndef HAVE_MLX5DV_DR @@ -2121,9 +2438,7 @@ mlx5_flow_validate_item_geneve(const struct rte_flow_item *item, .protocol = RTE_BE16(UINT16_MAX), }; - if (!(priv->config.hca_attr.flex_parser_protocols & - MLX5_HCA_FLEX_GENEVE_ENABLED) || - !priv->config.hca_attr.tunnel_stateless_geneve_rx) + if (!priv->config.hca_attr.tunnel_stateless_geneve_rx) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 Geneve is not enabled by device" @@ -2147,7 +2462,8 @@ mlx5_flow_validate_item_geneve(const struct rte_flow_item *item, ret = mlx5_flow_item_acceptable (item, (const uint8_t *)mask, (const uint8_t *)&nic_mask, - sizeof(struct rte_flow_item_geneve), error); + sizeof(struct rte_flow_item_geneve), + MLX5_ITEM_RANGE_NOT_ACCEPTED, error); if (ret) return ret; if (spec) { @@ -2230,15 +2546,17 @@ mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused, ret = mlx5_flow_item_acceptable (item, (const uint8_t *)mask, (const uint8_t *)&rte_flow_item_mpls_mask, - sizeof(struct rte_flow_item_mpls), error); + sizeof(struct rte_flow_item_mpls), + MLX5_ITEM_RANGE_NOT_ACCEPTED, error); if (ret < 0) return ret; return 0; -#endif +#else return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "MPLS is not supported by Verbs, please" " update."); +#endif } /** @@ -2284,15 +2602,107 @@ mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item, ret = mlx5_flow_item_acceptable (item, (const uint8_t *)mask, (const uint8_t *)&rte_flow_item_nvgre_mask, - sizeof(struct rte_flow_item_nvgre), error); + sizeof(struct rte_flow_item_nvgre), + MLX5_ITEM_RANGE_NOT_ACCEPTED, error); if (ret < 0) return ret; return 0; } -/* Allocate unique ID for the split Q/RSS subflows. */ -static uint32_t -flow_qrss_get_id(struct rte_eth_dev *dev) +/** + * Validate eCPRI item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[in] last_item + * Previous validated item in the pattern items. + * @param[in] ether_type + * Type in the ethernet layer header (including dot1q). + * @param[in] acc_mask + * Acceptable mask, if NULL default internal default mask + * will be used to check whether item fields are supported. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item, + uint64_t item_flags, + uint64_t last_item, + uint16_t ether_type, + const struct rte_flow_item_ecpri *acc_mask, + struct rte_flow_error *error) +{ + const struct rte_flow_item_ecpri *mask = item->mask; + const struct rte_flow_item_ecpri nic_mask = { + .hdr = { + .common = { + .u32 = + RTE_BE32(((const struct rte_ecpri_common_hdr) { + .type = 0xFF, + }).u32), + }, + .dummy[0] = 0xFFFFFFFF, + }, + }; + const uint64_t outer_l2_vlan = (MLX5_FLOW_LAYER_OUTER_L2 | + MLX5_FLOW_LAYER_OUTER_VLAN); + struct rte_flow_item_ecpri mask_lo; + + if ((last_item & outer_l2_vlan) && ether_type && + ether_type != RTE_ETHER_TYPE_ECPRI) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "eCPRI cannot follow L2/VLAN layer " + "which ether type is not 0xAEFE."); + if (item_flags & MLX5_FLOW_LAYER_TUNNEL) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "eCPRI with tunnel is not supported " + "right now."); + if (item_flags & MLX5_FLOW_LAYER_OUTER_L3) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple L3 layers not supported"); + else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "eCPRI cannot follow a TCP layer."); + /* In specification, eCPRI could be over UDP layer. */ + else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "eCPRI over UDP layer is not yet " + "supported right now."); + /* Mask for type field in common header could be zero. */ + if (!mask) + mask = &rte_flow_item_ecpri_mask; + mask_lo.hdr.common.u32 = rte_be_to_cpu_32(mask->hdr.common.u32); + /* Input mask is in big-endian format. */ + if (mask_lo.hdr.common.type != 0 && mask_lo.hdr.common.type != 0xff) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, + "partial mask is not supported " + "for protocol"); + else if (mask_lo.hdr.common.type == 0 && mask->hdr.dummy[0] != 0) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, + "message header mask must be after " + "a type mask"); + return mlx5_flow_item_acceptable(item, (const uint8_t *)mask, + acc_mask ? (const uint8_t *)acc_mask + : (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_ecpri), + MLX5_ITEM_RANGE_NOT_ACCEPTED, error); +} + +/* Allocate unique ID for the split Q/RSS subflows. */ +static uint32_t +flow_qrss_get_id(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; uint32_t qrss_id, ret; @@ -2300,7 +2710,7 @@ flow_qrss_get_id(struct rte_eth_dev *dev) ret = mlx5_flow_id_get(priv->qrss_id_pool, &qrss_id); if (ret) return 0; - assert(qrss_id); + MLX5_ASSERT(qrss_id); return qrss_id; } @@ -2326,11 +2736,14 @@ static void flow_mreg_split_qrss_release(struct rte_eth_dev *dev, struct rte_flow *flow) { - struct mlx5_flow *dev_flow; + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t handle_idx; + struct mlx5_flow_handle *dev_handle; - LIST_FOREACH(dev_flow, &flow->dev_flows, next) - if (dev_flow->qrss_id) - flow_qrss_free_id(dev, dev_flow->qrss_id); + SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, + handle_idx, dev_handle, next) + if (dev_handle->split_flow_id) + flow_qrss_free_id(dev, dev_handle->split_flow_id); } static int @@ -2339,6 +2752,7 @@ flow_null_validate(struct rte_eth_dev *dev __rte_unused, const struct rte_flow_item items[] __rte_unused, const struct rte_flow_action actions[] __rte_unused, bool external __rte_unused, + int hairpin __rte_unused, struct rte_flow_error *error) { return rte_flow_error_set(error, ENOTSUP, @@ -2346,7 +2760,8 @@ flow_null_validate(struct rte_eth_dev *dev __rte_unused, } static struct mlx5_flow * -flow_null_prepare(const struct rte_flow_attr *attr __rte_unused, +flow_null_prepare(struct rte_eth_dev *dev __rte_unused, + const struct rte_flow_attr *attr __rte_unused, const struct rte_flow_item items[] __rte_unused, const struct rte_flow_action actions[] __rte_unused, struct rte_flow_error *error) @@ -2427,8 +2842,12 @@ static enum mlx5_flow_drv_type flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr) { struct mlx5_priv *priv = dev->data->dev_private; - enum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX; + /* The OS can determine first a specific flow type (DV, VERBS) */ + enum mlx5_flow_drv_type type = mlx5_flow_os_get_type(); + if (type != MLX5_FLOW_TYPE_MAX) + return type; + /* If no OS specific type - continue with DV/VERBS selection */ if (attr->transfer && priv->config.dv_esw_en) type = MLX5_FLOW_TYPE_DV; if (!attr->transfer) @@ -2453,6 +2872,8 @@ flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr) * Pointer to the list of actions. * @param[in] external * This flow rule is created by request external to PMD. + * @param[in] hairpin + * Number of hairpin TX actions, 0 means classic flow. * @param[out] error * Pointer to the error structure. * @@ -2464,13 +2885,14 @@ flow_drv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], - bool external, struct rte_flow_error *error) + bool external, int hairpin, struct rte_flow_error *error) { const struct mlx5_flow_driver_ops *fops; enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr); fops = flow_get_drv_ops(type); - return fops->validate(dev, attr, items, actions, external, error); + return fops->validate(dev, attr, items, actions, external, + hairpin, error); } /** @@ -2486,12 +2908,16 @@ flow_drv_validate(struct rte_eth_dev *dev, * setting backward reference to the flow should be done out of this function. * layers field is not filled either. * + * @param[in] dev + * Pointer to the dev structure. * @param[in] attr * Pointer to the flow attributes. * @param[in] items * Pointer to the list of items. * @param[in] actions * Pointer to the list of actions. + * @param[in] flow_idx + * This memory pool index to the flow. * @param[out] error * Pointer to the error structure. * @@ -2499,18 +2925,24 @@ flow_drv_validate(struct rte_eth_dev *dev, * Pointer to device flow on success, otherwise NULL and rte_errno is set. */ static inline struct mlx5_flow * -flow_drv_prepare(const struct rte_flow *flow, +flow_drv_prepare(struct rte_eth_dev *dev, + const struct rte_flow *flow, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], + uint32_t flow_idx, struct rte_flow_error *error) { const struct mlx5_flow_driver_ops *fops; enum mlx5_flow_drv_type type = flow->drv_type; + struct mlx5_flow *mlx5_flow = NULL; - assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); + MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); fops = flow_get_drv_ops(type); - return fops->prepare(attr, items, actions, error); + mlx5_flow = fops->prepare(dev, attr, items, actions, error); + if (mlx5_flow) + mlx5_flow->flow_idx = flow_idx; + return mlx5_flow; } /** @@ -2552,7 +2984,7 @@ flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow, const struct mlx5_flow_driver_ops *fops; enum mlx5_flow_drv_type type = dev_flow->flow->drv_type; - assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); + MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); fops = flow_get_drv_ops(type); return fops->translate(dev, dev_flow, attr, items, actions, error); } @@ -2579,7 +3011,7 @@ flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, const struct mlx5_flow_driver_ops *fops; enum mlx5_flow_drv_type type = flow->drv_type; - assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); + MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); fops = flow_get_drv_ops(type); return fops->apply(dev, flow, error); } @@ -2601,7 +3033,7 @@ flow_drv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) const struct mlx5_flow_driver_ops *fops; enum mlx5_flow_drv_type type = flow->drv_type; - assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); + MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); fops = flow_get_drv_ops(type); fops->remove(dev, flow); } @@ -2623,52 +3055,11 @@ flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) enum mlx5_flow_drv_type type = flow->drv_type; flow_mreg_split_qrss_release(dev, flow); - assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); + MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); fops = flow_get_drv_ops(type); fops->destroy(dev, flow); } -/** - * Validate a flow supported by the NIC. - * - * @see rte_flow_validate() - * @see rte_flow_ops - */ -int -mlx5_flow_validate(struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, - const struct rte_flow_item items[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) -{ - int ret; - - ret = flow_drv_validate(dev, attr, items, actions, true, error); - if (ret < 0) - return ret; - return 0; -} - -/** - * Get port id item from the item list. - * - * @param[in] item - * Pointer to the list of items. - * - * @return - * Pointer to the port id item if exist, else return NULL. - */ -static const struct rte_flow_item * -find_port_id_item(const struct rte_flow_item *item) -{ - assert(item); - for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { - if (item->type == RTE_FLOW_ITEM_TYPE_PORT_ID) - return item; - } - return NULL; -} - /** * Get RSS action from the action list. * @@ -2713,7 +3104,48 @@ find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level) } /** - * Get QUEUE/RSS action from the action list. + * Get layer flags from the prefix flow. + * + * Some flows may be split to several subflows, the prefix subflow gets the + * match items and the suffix sub flow gets the actions. + * Some actions need the user defined match item flags to get the detail for + * the action. + * This function helps the suffix flow to get the item layer flags from prefix + * subflow. + * + * @param[in] dev_flow + * Pointer the created preifx subflow. + * + * @return + * The layers get from prefix subflow. + */ +static inline uint64_t +flow_get_prefix_layer_flags(struct mlx5_flow *dev_flow) +{ + uint64_t layers = 0; + + /* + * Layers bits could be localization, but usually the compiler will + * help to do the optimization work for source code. + * If no decap actions, use the layers directly. + */ + if (!(dev_flow->act_flags & MLX5_FLOW_ACTION_DECAP)) + return dev_flow->handle->layers; + /* Convert L3 layers with decap action. */ + if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4) + layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4; + else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV6) + layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6; + /* Convert L4 layers with decap action. */ + if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_TCP) + layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP; + else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_UDP) + layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP; + return layers; +} + +/** + * Get metadata split action information. * * @param[in] actions * Pointer to the list of actions. @@ -2722,18 +3154,38 @@ find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level) * @param[out] qrss_type * Pointer to the action type to return. RTE_FLOW_ACTION_TYPE_END is returned * if no QUEUE/RSS is found. + * @param[out] encap_idx + * Pointer to the index of the encap action if exists, otherwise the last + * action index. * * @return * Total number of actions. */ static int -flow_parse_qrss_action(const struct rte_flow_action actions[], - const struct rte_flow_action **qrss) +flow_parse_metadata_split_actions_info(const struct rte_flow_action actions[], + const struct rte_flow_action **qrss, + int *encap_idx) { + const struct rte_flow_action_raw_encap *raw_encap; int actions_n = 0; + int raw_decap_idx = -1; + *encap_idx = -1; for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: + *encap_idx = actions_n; + break; + case RTE_FLOW_ACTION_TYPE_RAW_DECAP: + raw_decap_idx = actions_n; + break; + case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: + raw_encap = actions->conf; + if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) + *encap_idx = raw_decap_idx != -1 ? + raw_decap_idx : actions_n; + break; case RTE_FLOW_ACTION_TYPE_QUEUE: case RTE_FLOW_ACTION_TYPE_RSS: *qrss = actions; @@ -2743,6 +3195,8 @@ flow_parse_qrss_action(const struct rte_flow_action actions[], } actions_n++; } + if (*encap_idx == -1) + *encap_idx = actions_n; /* Count RTE_FLOW_ACTION_TYPE_END. */ return actions_n + 1; } @@ -2763,7 +3217,7 @@ flow_check_meter_action(const struct rte_flow_action actions[], uint32_t *mtr) { int actions_n = 0; - assert(mtr); + MLX5_ASSERT(mtr); *mtr = 0; for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { switch (actions->type) { @@ -2780,10 +3234,10 @@ flow_check_meter_action(const struct rte_flow_action actions[], uint32_t *mtr) } /** - * Check if the flow should be splited due to hairpin. + * Check if the flow should be split due to hairpin. * The reason for the split is that in current HW we can't - * support encap on Rx, so if a flow have encap we move it - * to Tx. + * support encap and push-vlan on Rx, so if a flow contains + * these actions we move it to Tx. * * @param dev * Pointer to Ethernet device. @@ -2803,7 +3257,7 @@ flow_check_hairpin_split(struct rte_eth_dev *dev, { int queue_action = 0; int action_n = 0; - int encap = 0; + int split = 0; const struct rte_flow_action_queue *queue; const struct rte_flow_action_rss *rss; const struct rte_flow_action_raw_encap *raw_encap; @@ -2814,6 +3268,8 @@ flow_check_hairpin_split(struct rte_eth_dev *dev, switch (actions->type) { case RTE_FLOW_ACTION_TYPE_QUEUE: queue = actions->conf; + if (queue == NULL) + return 0; if (mlx5_rxq_get_type(dev, queue->index) != MLX5_RXQ_TYPE_HAIRPIN) return 0; @@ -2822,6 +3278,8 @@ flow_check_hairpin_split(struct rte_eth_dev *dev, break; case RTE_FLOW_ACTION_TYPE_RSS: rss = actions->conf; + if (rss == NULL || rss->queue_num == 0) + return 0; if (mlx5_rxq_get_type(dev, rss->queue[0]) != MLX5_RXQ_TYPE_HAIRPIN) return 0; @@ -2830,7 +3288,10 @@ flow_check_hairpin_split(struct rte_eth_dev *dev, break; case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: - encap = 1; + case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: + case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: + case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: + split++; action_n++; break; case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: @@ -2838,7 +3299,7 @@ flow_check_hairpin_split(struct rte_eth_dev *dev, if (raw_encap->size > (sizeof(struct rte_flow_item_eth) + sizeof(struct rte_flow_item_ipv4))) - encap = 1; + split++; action_n++; break; default: @@ -2846,22 +3307,22 @@ flow_check_hairpin_split(struct rte_eth_dev *dev, break; } } - if (encap == 1 && queue_action) + if (split && queue_action) return action_n; return 0; } /* Declare flow create/destroy prototype in advance. */ -static struct rte_flow * -flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, +static uint32_t +flow_list_create(struct rte_eth_dev *dev, uint32_t *list, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], bool external, struct rte_flow_error *error); static void -flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, - struct rte_flow *flow); +flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list, + uint32_t flow_idx); /** * Add a flow of copying flow metadata registers in RX_CP_TBL. @@ -2908,7 +3369,7 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, }; struct mlx5_flow_action_copy_mreg cp_mreg = { .dst = REG_B, - .src = 0, + .src = REG_NON, }; struct rte_flow_action_jump jump = { .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, @@ -2917,6 +3378,7 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, [3] = { .type = RTE_FLOW_ACTION_TYPE_END, }, }; struct mlx5_flow_mreg_copy_resource *mcp_res; + uint32_t idx = 0; int ret; /* Fill the register fileds in the flow. */ @@ -2929,33 +3391,37 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, return NULL; cp_mreg.src = ret; /* Check if already registered. */ - assert(priv->mreg_cp_tbl); + MLX5_ASSERT(priv->mreg_cp_tbl); mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id); if (mcp_res) { /* For non-default rule. */ - if (mark_id) + if (mark_id != MLX5_DEFAULT_COPY_ID) mcp_res->refcnt++; - assert(mark_id || mcp_res->refcnt == 1); + MLX5_ASSERT(mark_id != MLX5_DEFAULT_COPY_ID || + mcp_res->refcnt == 1); return mcp_res; } /* Provide the full width of FLAG specific value. */ if (mark_id == (priv->sh->dv_regc0_mask & MLX5_FLOW_MARK_DEFAULT)) tag_spec.data = MLX5_FLOW_MARK_DEFAULT; /* Build a new flow. */ - if (mark_id) { + if (mark_id != MLX5_DEFAULT_COPY_ID) { items[0] = (struct rte_flow_item){ - .type = MLX5_RTE_FLOW_ITEM_TYPE_TAG, + .type = (enum rte_flow_item_type) + MLX5_RTE_FLOW_ITEM_TYPE_TAG, .spec = &tag_spec, }; items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END, }; actions[0] = (struct rte_flow_action){ - .type = MLX5_RTE_FLOW_ACTION_TYPE_MARK, + .type = (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_MARK, .conf = &ftag, }; actions[1] = (struct rte_flow_action){ - .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, + .type = (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, .conf = &cp_mreg, }; actions[2] = (struct rte_flow_action){ @@ -2972,7 +3438,8 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, .type = RTE_FLOW_ITEM_TYPE_END, }; actions[0] = (struct rte_flow_action){ - .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, + .type = (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, .conf = &cp_mreg, }; actions[1] = (struct rte_flow_action){ @@ -2984,33 +3451,34 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, }; } /* Build a new entry. */ - mcp_res = rte_zmalloc(__func__, sizeof(*mcp_res), 0); + mcp_res = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MCP], &idx); if (!mcp_res) { rte_errno = ENOMEM; return NULL; } + mcp_res->idx = idx; /* * The copy Flows are not included in any list. There * ones are referenced from other Flows and can not * be applied, removed, deleted in ardbitrary order * by list traversing. */ - mcp_res->flow = flow_list_create(dev, NULL, &attr, items, + mcp_res->rix_flow = flow_list_create(dev, NULL, &attr, items, actions, false, error); - if (!mcp_res->flow) + if (!mcp_res->rix_flow) goto error; mcp_res->refcnt++; mcp_res->hlist_ent.key = mark_id; ret = mlx5_hlist_insert(priv->mreg_cp_tbl, &mcp_res->hlist_ent); - assert(!ret); + MLX5_ASSERT(!ret); if (ret) goto error; return mcp_res; error: - if (mcp_res->flow) - flow_list_destroy(dev, NULL, mcp_res->flow); - rte_free(mcp_res); + if (mcp_res->rix_flow) + flow_list_destroy(dev, NULL, mcp_res->rix_flow); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx); return NULL; } @@ -3026,29 +3494,39 @@ static void flow_mreg_del_copy_action(struct rte_eth_dev *dev, struct rte_flow *flow) { - struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy; + struct mlx5_flow_mreg_copy_resource *mcp_res; struct mlx5_priv *priv = dev->data->dev_private; + if (!flow->rix_mreg_copy) + return; + mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP], + flow->rix_mreg_copy); if (!mcp_res || !priv->mreg_cp_tbl) return; if (flow->copy_applied) { - assert(mcp_res->appcnt); + MLX5_ASSERT(mcp_res->appcnt); flow->copy_applied = 0; --mcp_res->appcnt; - if (!mcp_res->appcnt) - flow_drv_remove(dev, mcp_res->flow); + if (!mcp_res->appcnt) { + struct rte_flow *mcp_flow = mlx5_ipool_get + (priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], + mcp_res->rix_flow); + + if (mcp_flow) + flow_drv_remove(dev, mcp_flow); + } } /* * We do not check availability of metadata registers here, - * because copy resources are allocated in this case. + * because copy resources are not allocated in this case. */ if (--mcp_res->refcnt) return; - assert(mcp_res->flow); - flow_list_destroy(dev, NULL, mcp_res->flow); + MLX5_ASSERT(mcp_res->rix_flow); + flow_list_destroy(dev, NULL, mcp_res->rix_flow); mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent); - rte_free(mcp_res); - flow->mreg_copy = NULL; + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx); + flow->rix_mreg_copy = 0; } /** @@ -3066,15 +3544,26 @@ static int flow_mreg_start_copy_action(struct rte_eth_dev *dev, struct rte_flow *flow) { - struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy; + struct mlx5_flow_mreg_copy_resource *mcp_res; + struct mlx5_priv *priv = dev->data->dev_private; int ret; - if (!mcp_res || flow->copy_applied) + if (!flow->rix_mreg_copy || flow->copy_applied) + return 0; + mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP], + flow->rix_mreg_copy); + if (!mcp_res) return 0; if (!mcp_res->appcnt) { - ret = flow_drv_apply(dev, mcp_res->flow, NULL); - if (ret) - return ret; + struct rte_flow *mcp_flow = mlx5_ipool_get + (priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], + mcp_res->rix_flow); + + if (mcp_flow) { + ret = flow_drv_apply(dev, mcp_flow, NULL); + if (ret) + return ret; + } } ++mcp_res->appcnt; flow->copy_applied = 1; @@ -3093,15 +3582,26 @@ static void flow_mreg_stop_copy_action(struct rte_eth_dev *dev, struct rte_flow *flow) { - struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy; + struct mlx5_flow_mreg_copy_resource *mcp_res; + struct mlx5_priv *priv = dev->data->dev_private; - if (!mcp_res || !flow->copy_applied) + if (!flow->rix_mreg_copy || !flow->copy_applied) + return; + mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP], + flow->rix_mreg_copy); + if (!mcp_res) return; - assert(mcp_res->appcnt); + MLX5_ASSERT(mcp_res->appcnt); --mcp_res->appcnt; flow->copy_applied = 0; - if (!mcp_res->appcnt) - flow_drv_remove(dev, mcp_res->flow); + if (!mcp_res->appcnt) { + struct rte_flow *mcp_flow = mlx5_ipool_get + (priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], + mcp_res->rix_flow); + + if (mcp_flow) + flow_drv_remove(dev, mcp_flow); + } } /** @@ -3119,13 +3619,14 @@ flow_mreg_del_default_copy_action(struct rte_eth_dev *dev) /* Check if default flow is registered. */ if (!priv->mreg_cp_tbl) return; - mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, 0ULL); + mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, + MLX5_DEFAULT_COPY_ID); if (!mcp_res) return; - assert(mcp_res->flow); - flow_list_destroy(dev, NULL, mcp_res->flow); + MLX5_ASSERT(mcp_res->rix_flow); + flow_list_destroy(dev, NULL, mcp_res->rix_flow); mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent); - rte_free(mcp_res); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx); } /** @@ -3152,7 +3653,7 @@ flow_mreg_add_default_copy_action(struct rte_eth_dev *dev, !mlx5_flow_ext_mreg_supported(dev) || !priv->sh->dv_regc0_mask) return 0; - mcp_res = flow_mreg_add_copy_action(dev, 0, error); + mcp_res = flow_mreg_add_copy_action(dev, MLX5_DEFAULT_COPY_ID, error); if (!mcp_res) return -rte_errno; return 0; @@ -3217,7 +3718,7 @@ flow_mreg_update_copy_table(struct rte_eth_dev *dev, (dev, MLX5_FLOW_MARK_DEFAULT, error); if (!mcp_res) return -rte_errno; - flow->mreg_copy = mcp_res; + flow->rix_mreg_copy = mcp_res->idx; if (dev->data->dev_started) { mcp_res->appcnt++; flow->copy_applied = 1; @@ -3230,7 +3731,7 @@ flow_mreg_update_copy_table(struct rte_eth_dev *dev, flow_mreg_add_copy_action(dev, mark->id, error); if (!mcp_res) return -rte_errno; - flow->mreg_copy = mcp_res; + flow->rix_mreg_copy = mcp_res->idx; if (dev->data->dev_started) { mcp_res->appcnt++; flow->copy_applied = 1; @@ -3248,7 +3749,8 @@ flow_mreg_update_copy_table(struct rte_eth_dev *dev, /** * Split the hairpin flow. - * Since HW can't support encap on Rx we move the encap to Tx. + * Since HW can't support encap and push-vlan on Rx, we move these + * actions to Tx. * If the count action is after the encap then we also * move the count action. in this case the count will also measure * the outer bytes. @@ -3292,6 +3794,9 @@ flow_hairpin_split(struct rte_eth_dev *dev, switch (actions->type) { case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: + case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: + case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: + case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action)); actions_tx++; @@ -3345,31 +3850,32 @@ flow_hairpin_split(struct rte_eth_dev *dev, } /* Add set meta action and end action for the Rx flow. */ tag_action = actions_rx; - tag_action->type = MLX5_RTE_FLOW_ACTION_TYPE_TAG; + tag_action->type = (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_TAG; actions_rx++; rte_memcpy(actions_rx, actions, sizeof(struct rte_flow_action)); actions_rx++; set_tag = (void *)actions_rx; set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL); - assert(set_tag->id > REG_NONE); + MLX5_ASSERT(set_tag->id > REG_NON); set_tag->data = *flow_id; tag_action->conf = set_tag; /* Create Tx item list. */ rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action)); addr = (void *)&pattern_tx[2]; item = pattern_tx; - item->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG; + item->type = (enum rte_flow_item_type) + MLX5_RTE_FLOW_ITEM_TYPE_TAG; tag_item = (void *)addr; tag_item->data = *flow_id; tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL); - assert(set_tag->id > REG_NONE); + MLX5_ASSERT(set_tag->id > REG_NON); item->spec = tag_item; addr += sizeof(struct mlx5_rte_flow_item_tag); tag_item = (void *)addr; tag_item->data = UINT32_MAX; tag_item->id = UINT16_MAX; item->mask = tag_item; - addr += sizeof(struct mlx5_rte_flow_item_tag); item->last = NULL; item++; item->type = RTE_FLOW_ITEM_TYPE_END; @@ -3380,12 +3886,16 @@ flow_hairpin_split(struct rte_eth_dev *dev, * The last stage of splitting chain, just creates the subflow * without any modification. * - * @param dev + * @param[in] dev * Pointer to Ethernet device. * @param[in] flow * Parent flow structure pointer. * @param[in, out] sub_flow * Pointer to return the created subflow, may be NULL. + * @param[in] prefix_layers + * Prefix subflow layers, may be 0. + * @param[in] prefix_mark + * Prefix subflow mark flag, may be 0. * @param[in] attr * Flow rule attributes. * @param[in] items @@ -3394,6 +3904,8 @@ flow_hairpin_split(struct rte_eth_dev *dev, * Associated actions (list terminated by the END action). * @param[in] external * This flow rule is created by request external to PMD. + * @param[in] flow_idx + * This memory pool index to the flow. * @param[out] error * Perform verbose error reporting if not NULL. * @return @@ -3403,20 +3915,34 @@ static int flow_create_split_inner(struct rte_eth_dev *dev, struct rte_flow *flow, struct mlx5_flow **sub_flow, + uint64_t prefix_layers, + uint32_t prefix_mark, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], - bool external, struct rte_flow_error *error) + bool external, uint32_t flow_idx, + struct rte_flow_error *error) { struct mlx5_flow *dev_flow; - dev_flow = flow_drv_prepare(flow, attr, items, actions, error); + dev_flow = flow_drv_prepare(dev, flow, attr, items, actions, + flow_idx, error); if (!dev_flow) return -rte_errno; dev_flow->flow = flow; dev_flow->external = external; /* Subflow object was created, we must include one in the list. */ - LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); + SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx, + dev_flow->handle, next); + /* + * If dev_flow is as one of the suffix flow, some actions in suffix + * flow may need some user defined item layer flags, and pass the + * Metadate rxq mark flag to suffix flow as well. + */ + if (prefix_layers) + dev_flow->handle->layers = prefix_layers; + if (prefix_mark) + dev_flow->handle->mark = 1; if (sub_flow) *sub_flow = dev_flow; return flow_drv_translate(dev, dev_flow, attr, items, actions, error); @@ -3436,6 +3962,10 @@ flow_create_split_inner(struct rte_eth_dev *dev, * * @param dev * Pointer to Ethernet device. + * @param[in] items + * Pattern specification (list terminated by the END pattern item). + * @param[out] sfx_items + * Suffix flow match items (list terminated by the END pattern item). * @param[in] actions * Associated actions (list terminated by the END action). * @param[out] actions_sfx @@ -3452,66 +3982,61 @@ flow_create_split_inner(struct rte_eth_dev *dev, */ static int flow_meter_split_prep(struct rte_eth_dev *dev, + const struct rte_flow_item items[], + struct rte_flow_item sfx_items[], const struct rte_flow_action actions[], struct rte_flow_action actions_sfx[], struct rte_flow_action actions_pre[]) { - struct rte_flow_action *tag_action; + struct rte_flow_action *tag_action = NULL; + struct rte_flow_item *tag_item; struct mlx5_rte_flow_action_set_tag *set_tag; struct rte_flow_error error; const struct rte_flow_action_raw_encap *raw_encap; const struct rte_flow_action_raw_decap *raw_decap; + struct mlx5_rte_flow_item_tag *tag_spec; + struct mlx5_rte_flow_item_tag *tag_mask; uint32_t tag_id; + bool copy_vlan = false; - /* Add the extra tag action first. */ - tag_action = actions_pre; - tag_action->type = MLX5_RTE_FLOW_ACTION_TYPE_TAG; - actions_pre++; /* Prepare the actions for prefix and suffix flow. */ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + struct rte_flow_action **action_cur = NULL; + switch (actions->type) { case RTE_FLOW_ACTION_TYPE_METER: + /* Add the extra tag action first. */ + tag_action = actions_pre; + tag_action->type = (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_TAG; + actions_pre++; + action_cur = &actions_pre; + break; case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: - memcpy(actions_pre, actions, - sizeof(struct rte_flow_action)); - actions_pre++; + action_cur = &actions_pre; break; case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: raw_encap = actions->conf; - if (raw_encap->size > - (sizeof(struct rte_flow_item_eth) + - sizeof(struct rte_flow_item_ipv4))) { - memcpy(actions_sfx, actions, - sizeof(struct rte_flow_action)); - actions_sfx++; - } else { - rte_memcpy(actions_pre, actions, - sizeof(struct rte_flow_action)); - actions_pre++; - } + if (raw_encap->size < MLX5_ENCAPSULATION_DECISION_SIZE) + action_cur = &actions_pre; break; case RTE_FLOW_ACTION_TYPE_RAW_DECAP: raw_decap = actions->conf; - /* Size 0 decap means 50 bytes as vxlan decap. */ - if (raw_decap->size && (raw_decap->size < - (sizeof(struct rte_flow_item_eth) + - sizeof(struct rte_flow_item_ipv4)))) { - memcpy(actions_sfx, actions, - sizeof(struct rte_flow_action)); - actions_sfx++; - } else { - rte_memcpy(actions_pre, actions, - sizeof(struct rte_flow_action)); - actions_pre++; - } + if (raw_decap->size > MLX5_ENCAPSULATION_DECISION_SIZE) + action_cur = &actions_pre; + break; + case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: + case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: + copy_vlan = true; break; default: - memcpy(actions_sfx, actions, - sizeof(struct rte_flow_action)); - actions_sfx++; break; } + if (!action_cur) + action_cur = &actions_sfx; + memcpy(*action_cur, actions, sizeof(struct rte_flow_action)); + (*action_cur)++; } /* Add end action to the actions. */ actions_sfx->type = RTE_FLOW_ACTION_TYPE_END; @@ -3524,8 +4049,47 @@ flow_meter_split_prep(struct rte_eth_dev *dev, * Get the id from the qrss_pool to make qrss share the id with meter. */ tag_id = flow_qrss_get_id(dev); - set_tag->data = rte_cpu_to_be_32(tag_id); + set_tag->data = tag_id << MLX5_MTR_COLOR_BITS; + assert(tag_action); tag_action->conf = set_tag; + /* Prepare the suffix subflow items. */ + tag_item = sfx_items++; + for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { + int item_type = items->type; + + switch (item_type) { + case RTE_FLOW_ITEM_TYPE_PORT_ID: + memcpy(sfx_items, items, sizeof(*sfx_items)); + sfx_items++; + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + if (copy_vlan) { + memcpy(sfx_items, items, sizeof(*sfx_items)); + /* + * Convert to internal match item, it is used + * for vlan push and set vid. + */ + sfx_items->type = (enum rte_flow_item_type) + MLX5_RTE_FLOW_ITEM_TYPE_VLAN; + sfx_items++; + } + break; + default: + break; + } + } + sfx_items->type = RTE_FLOW_ITEM_TYPE_END; + sfx_items++; + tag_spec = (struct mlx5_rte_flow_item_tag *)sfx_items; + tag_spec->data = tag_id << MLX5_MTR_COLOR_BITS; + tag_spec->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error); + tag_mask = tag_spec + 1; + tag_mask->data = 0xffffff00; + tag_item->type = (enum rte_flow_item_type) + MLX5_RTE_FLOW_ITEM_TYPE_TAG; + tag_item->spec = tag_spec; + tag_item->last = NULL; + tag_item->mask = tag_mask; return tag_id; } @@ -3625,7 +4189,8 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, /* Construct new actions array. */ /* Replace QUEUE/RSS action. */ split_actions[qrss_idx] = (struct rte_flow_action){ - .type = MLX5_RTE_FLOW_ACTION_TYPE_TAG, + .type = (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_TAG, .conf = set_tag, }; } @@ -3658,6 +4223,8 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, * Number of actions in the list. * @param[out] error * Perform verbose error reporting if not NULL. + * @param[in] encap_idx + * The encap action inndex. * * @return * 0 on success, negative value otherwise @@ -3666,7 +4233,8 @@ static int flow_mreg_tx_copy_prep(struct rte_eth_dev *dev, struct rte_flow_action *ext_actions, const struct rte_flow_action *actions, - int actions_n, struct rte_flow_error *error) + int actions_n, struct rte_flow_error *error, + int encap_idx) { struct mlx5_flow_action_copy_mreg *cp_mreg = (struct mlx5_flow_action_copy_mreg *) @@ -3681,18 +4249,226 @@ flow_mreg_tx_copy_prep(struct rte_eth_dev *dev, if (ret < 0) return ret; cp_mreg->src = ret; - memcpy(ext_actions, actions, - sizeof(*ext_actions) * actions_n); - ext_actions[actions_n - 1] = (struct rte_flow_action){ - .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, - .conf = cp_mreg, - }; - ext_actions[actions_n] = (struct rte_flow_action){ - .type = RTE_FLOW_ACTION_TYPE_END, - }; + if (encap_idx != 0) + memcpy(ext_actions, actions, sizeof(*ext_actions) * encap_idx); + if (encap_idx == actions_n - 1) { + ext_actions[actions_n - 1] = (struct rte_flow_action){ + .type = (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, + .conf = cp_mreg, + }; + ext_actions[actions_n] = (struct rte_flow_action){ + .type = RTE_FLOW_ACTION_TYPE_END, + }; + } else { + ext_actions[encap_idx] = (struct rte_flow_action){ + .type = (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, + .conf = cp_mreg, + }; + memcpy(ext_actions + encap_idx + 1, actions + encap_idx, + sizeof(*ext_actions) * (actions_n - encap_idx)); + } return 0; } +/** + * Check the match action from the action list. + * + * @param[in] actions + * Pointer to the list of actions. + * @param[in] attr + * Flow rule attributes. + * @param[in] action + * The action to be check if exist. + * @param[out] match_action_pos + * Pointer to the position of the matched action if exists, otherwise is -1. + * @param[out] qrss_action_pos + * Pointer to the position of the Queue/RSS action if exists, otherwise is -1. + * + * @return + * > 0 the total number of actions. + * 0 if not found match action in action list. + */ +static int +flow_check_match_action(const struct rte_flow_action actions[], + const struct rte_flow_attr *attr, + enum rte_flow_action_type action, + int *match_action_pos, int *qrss_action_pos) +{ + const struct rte_flow_action_sample *sample; + int actions_n = 0; + int jump_flag = 0; + uint32_t ratio = 0; + int sub_type = 0; + int flag = 0; + + *match_action_pos = -1; + *qrss_action_pos = -1; + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + if (actions->type == action) { + flag = 1; + *match_action_pos = actions_n; + } + if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE || + actions->type == RTE_FLOW_ACTION_TYPE_RSS) + *qrss_action_pos = actions_n; + if (actions->type == RTE_FLOW_ACTION_TYPE_JUMP) + jump_flag = 1; + if (actions->type == RTE_FLOW_ACTION_TYPE_SAMPLE) { + sample = actions->conf; + ratio = sample->ratio; + sub_type = ((const struct rte_flow_action *) + (sample->actions))->type; + } + actions_n++; + } + if (flag && action == RTE_FLOW_ACTION_TYPE_SAMPLE && attr->transfer) { + if (ratio == 1) { + /* JUMP Action not support for Mirroring; + * Mirroring support multi-destination; + */ + if (!jump_flag && sub_type != RTE_FLOW_ACTION_TYPE_END) + flag = 0; + } + } + /* Count RTE_FLOW_ACTION_TYPE_END. */ + return flag ? actions_n + 1 : 0; +} + +#define SAMPLE_SUFFIX_ITEM 2 + +/** + * Split the sample flow. + * + * As sample flow will split to two sub flow, sample flow with + * sample action, the other actions will move to new suffix flow. + * + * Also add unique tag id with tag action in the sample flow, + * the same tag id will be as match in the suffix flow. + * + * @param dev + * Pointer to Ethernet device. + * @param[in] fdb_tx + * FDB egress flow flag. + * @param[out] sfx_items + * Suffix flow match items (list terminated by the END pattern item). + * @param[in] actions + * Associated actions (list terminated by the END action). + * @param[out] actions_sfx + * Suffix flow actions. + * @param[out] actions_pre + * Prefix flow actions. + * @param[in] actions_n + * The total number of actions. + * @param[in] sample_action_pos + * The sample action position. + * @param[in] qrss_action_pos + * The Queue/RSS action position. + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * 0 on success, or unique flow_id, a negative errno value + * otherwise and rte_errno is set. + */ +static int +flow_sample_split_prep(struct rte_eth_dev *dev, + uint32_t fdb_tx, + struct rte_flow_item sfx_items[], + const struct rte_flow_action actions[], + struct rte_flow_action actions_sfx[], + struct rte_flow_action actions_pre[], + int actions_n, + int sample_action_pos, + int qrss_action_pos, + struct rte_flow_error *error) +{ + struct mlx5_rte_flow_action_set_tag *set_tag; + struct mlx5_rte_flow_item_tag *tag_spec; + struct mlx5_rte_flow_item_tag *tag_mask; + uint32_t tag_id = 0; + int index; + int ret; + + if (sample_action_pos < 0) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "invalid position of sample " + "action in list"); + if (!fdb_tx) { + /* Prepare the prefix tag action. */ + set_tag = (void *)(actions_pre + actions_n + 1); + ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, 0, error); + if (ret < 0) + return ret; + set_tag->id = ret; + tag_id = flow_qrss_get_id(dev); + set_tag->data = tag_id; + /* Prepare the suffix subflow items. */ + tag_spec = (void *)(sfx_items + SAMPLE_SUFFIX_ITEM); + tag_spec->data = tag_id; + tag_spec->id = set_tag->id; + tag_mask = tag_spec + 1; + tag_mask->data = UINT32_MAX; + sfx_items[0] = (struct rte_flow_item){ + .type = (enum rte_flow_item_type) + MLX5_RTE_FLOW_ITEM_TYPE_TAG, + .spec = tag_spec, + .last = NULL, + .mask = tag_mask, + }; + sfx_items[1] = (struct rte_flow_item){ + .type = (enum rte_flow_item_type) + RTE_FLOW_ITEM_TYPE_END, + }; + } + /* Prepare the actions for prefix and suffix flow. */ + if (qrss_action_pos >= 0 && qrss_action_pos < sample_action_pos) { + index = qrss_action_pos; + /* Put the preceding the Queue/RSS action into prefix flow. */ + if (index != 0) + memcpy(actions_pre, actions, + sizeof(struct rte_flow_action) * index); + /* Put others preceding the sample action into prefix flow. */ + if (sample_action_pos > index + 1) + memcpy(actions_pre + index, actions + index + 1, + sizeof(struct rte_flow_action) * + (sample_action_pos - index - 1)); + index = sample_action_pos - 1; + /* Put Queue/RSS action into Suffix flow. */ + memcpy(actions_sfx, actions + qrss_action_pos, + sizeof(struct rte_flow_action)); + actions_sfx++; + } else { + index = sample_action_pos; + if (index != 0) + memcpy(actions_pre, actions, + sizeof(struct rte_flow_action) * index); + } + /* Add the extra tag action for NIC-RX and E-Switch ingress. */ + if (!fdb_tx) { + actions_pre[index++] = + (struct rte_flow_action){ + .type = (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_TAG, + .conf = set_tag, + }; + } + memcpy(actions_pre + index, actions + sample_action_pos, + sizeof(struct rte_flow_action)); + index += 1; + actions_pre[index] = (struct rte_flow_action){ + .type = (enum rte_flow_action_type) + RTE_FLOW_ACTION_TYPE_END, + }; + /* Put the actions after sample into Suffix flow. */ + memcpy(actions_sfx, actions + sample_action_pos + 1, + sizeof(struct rte_flow_action) * + (actions_n - sample_action_pos - 1)); + return tag_id; +} + /** * The splitting for metadata feature. * @@ -3707,6 +4483,10 @@ flow_mreg_tx_copy_prep(struct rte_eth_dev *dev, * Pointer to Ethernet device. * @param[in] flow * Parent flow structure pointer. + * @param[in] prefix_layers + * Prefix flow layer flags. + * @param[in] prefix_mark + * Prefix subflow mark flag, may be 0. * @param[in] attr * Flow rule attributes. * @param[in] items @@ -3715,6 +4495,8 @@ flow_mreg_tx_copy_prep(struct rte_eth_dev *dev, * Associated actions (list terminated by the END action). * @param[in] external * This flow rule is created by request external to PMD. + * @param[in] flow_idx + * This memory pool index to the flow. * @param[out] error * Perform verbose error reporting if not NULL. * @return @@ -3723,10 +4505,13 @@ flow_mreg_tx_copy_prep(struct rte_eth_dev *dev, static int flow_create_split_metadata(struct rte_eth_dev *dev, struct rte_flow *flow, + uint64_t prefix_layers, + uint32_t prefix_mark, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], - bool external, struct rte_flow_error *error) + bool external, uint32_t flow_idx, + struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_config *config = &priv->config; @@ -3737,15 +4522,19 @@ flow_create_split_metadata(struct rte_eth_dev *dev, int mtr_sfx = 0; size_t act_size; int actions_n; + int encap_idx; int ret; /* Check whether extensive metadata feature is engaged. */ if (!config->dv_flow_en || config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || !mlx5_flow_ext_mreg_supported(dev)) - return flow_create_split_inner(dev, flow, NULL, attr, items, - actions, external, error); - actions_n = flow_parse_qrss_action(actions, &qrss); + return flow_create_split_inner(dev, flow, NULL, prefix_layers, + prefix_mark, attr, items, + actions, external, flow_idx, + error); + actions_n = flow_parse_metadata_split_actions_info(actions, &qrss, + &encap_idx); if (qrss) { /* Exclude hairpin flows from splitting. */ if (qrss->type == RTE_FLOW_ACTION_TYPE_QUEUE) { @@ -3777,7 +4566,8 @@ flow_create_split_metadata(struct rte_eth_dev *dev, act_size = sizeof(struct rte_flow_action) * (actions_n + 1) + sizeof(struct rte_flow_action_set_tag) + sizeof(struct rte_flow_action_jump); - ext_actions = rte_zmalloc(__func__, act_size, 0); + ext_actions = mlx5_malloc(MLX5_MEM_ZERO, act_size, 0, + SOCKET_ID_ANY); if (!ext_actions) return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION, @@ -3792,6 +4582,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev, RTE_FLOW_ACTION_TYPE_VOID; else ext_actions[qrss - actions].type = + (enum rte_flow_action_type) MLX5_RTE_FLOW_ACTION_TYPE_TAG; /* * Create the new actions list with removed Q/RSS action @@ -3812,7 +4603,8 @@ flow_create_split_metadata(struct rte_eth_dev *dev, */ act_size = sizeof(struct rte_flow_action) * (actions_n + 1) + sizeof(struct mlx5_flow_action_copy_mreg); - ext_actions = rte_zmalloc(__func__, act_size, 0); + ext_actions = mlx5_malloc(MLX5_MEM_ZERO, act_size, 0, + SOCKET_ID_ANY); if (!ext_actions) return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION, @@ -3820,17 +4612,18 @@ flow_create_split_metadata(struct rte_eth_dev *dev, "metadata flow"); /* Create the action list appended with copy register. */ ret = flow_mreg_tx_copy_prep(dev, ext_actions, actions, - actions_n, error); + actions_n, error, encap_idx); if (ret < 0) goto exit; } /* Add the unmodified original or prefix subflow. */ - ret = flow_create_split_inner(dev, flow, &dev_flow, attr, items, - ext_actions ? ext_actions : actions, - external, error); + ret = flow_create_split_inner(dev, flow, &dev_flow, prefix_layers, + prefix_mark, attr, + items, ext_actions ? ext_actions : + actions, external, flow_idx, error); if (ret < 0) goto exit; - assert(dev_flow); + MLX5_ASSERT(dev_flow); if (qrss) { const struct rte_flow_attr q_attr = { .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, @@ -3839,11 +4632,12 @@ flow_create_split_metadata(struct rte_eth_dev *dev, /* Internal PMD action to set register. */ struct mlx5_rte_flow_item_tag q_tag_spec = { .data = qrss_id, - .id = 0, + .id = REG_NON, }; struct rte_flow_item q_items[] = { { - .type = MLX5_RTE_FLOW_ITEM_TYPE_TAG, + .type = (enum rte_flow_item_type) + MLX5_RTE_FLOW_ITEM_TYPE_TAG, .spec = &q_tag_spec, .last = NULL, .mask = NULL, @@ -3861,7 +4655,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev, .type = RTE_FLOW_ACTION_TYPE_END, }, }; - uint64_t hash_fields = dev_flow->hash_fields; + uint64_t layers = flow_get_prefix_layer_flags(dev_flow); /* * Configure the tag item only if there is no meter subflow. @@ -3870,7 +4664,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev, */ if (qrss_id) { /* Not meter subflow. */ - assert(!mtr_sfx); + MLX5_ASSERT(!mtr_sfx); /* * Put unique id in prefix flow due to it is destroyed * after suffix flow and id will be freed after there @@ -3878,8 +4672,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev, * reallocation becomes possible (for example, for * other flows in other threads). */ - dev_flow->qrss_id = qrss_id; - qrss_id = 0; + dev_flow->handle->split_flow_id = qrss_id; ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error); if (ret < 0) @@ -3888,14 +4681,15 @@ flow_create_split_metadata(struct rte_eth_dev *dev, } dev_flow = NULL; /* Add suffix subflow to execute Q/RSS. */ - ret = flow_create_split_inner(dev, flow, &dev_flow, + ret = flow_create_split_inner(dev, flow, &dev_flow, layers, 0, &q_attr, mtr_sfx ? items : q_items, q_actions, - external, error); + external, flow_idx, error); if (ret < 0) goto exit; - assert(dev_flow); - dev_flow->hash_fields = hash_fields; + /* qrss ID should be freed if failed. */ + qrss_id = 0; + MLX5_ASSERT(dev_flow); } exit: @@ -3905,7 +4699,7 @@ exit: * by flow_drv_destroy. */ flow_qrss_free_id(dev, qrss_id); - rte_free(ext_actions); + mlx5_free(ext_actions); return ret; } @@ -3923,6 +4717,10 @@ exit: * Pointer to Ethernet device. * @param[in] flow * Parent flow structure pointer. + * @param[in] prefix_layers + * Prefix subflow layers, may be 0. + * @param[in] prefix_mark + * Prefix subflow mark flag, may be 0. * @param[in] attr * Flow rule attributes. * @param[in] items @@ -3931,6 +4729,8 @@ exit: * Associated actions (list terminated by the END action). * @param[in] external * This flow rule is created by request external to PMD. + * @param[in] flow_idx + * This memory pool index to the flow. * @param[out] error * Perform verbose error reporting if not NULL. * @return @@ -3938,17 +4738,19 @@ exit: */ static int flow_create_split_meter(struct rte_eth_dev *dev, - struct rte_flow *flow, - const struct rte_flow_attr *attr, - const struct rte_flow_item items[], - const struct rte_flow_action actions[], - bool external, struct rte_flow_error *error) + struct rte_flow *flow, + uint64_t prefix_layers, + uint32_t prefix_mark, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + bool external, uint32_t flow_idx, + struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow_action *sfx_actions = NULL; struct rte_flow_action *pre_actions = NULL; struct rte_flow_item *sfx_items = NULL; - const struct rte_flow_item *sfx_port_id_item; struct mlx5_flow *dev_flow = NULL; struct rte_flow_attr sfx_attr = *attr; uint32_t mtr = 0; @@ -3961,69 +4763,190 @@ flow_create_split_meter(struct rte_eth_dev *dev, if (priv->mtr_en) actions_n = flow_check_meter_action(actions, &mtr); if (mtr) { - struct mlx5_rte_flow_item_tag *tag_spec; /* The five prefix actions: meter, decap, encap, tag, end. */ act_size = sizeof(struct rte_flow_action) * (actions_n + 5) + - sizeof(struct rte_flow_action_set_tag); - /* tag, end. */ -#define METER_SUFFIX_ITEM 3 + sizeof(struct mlx5_rte_flow_action_set_tag); + /* tag, vlan, port id, end. */ +#define METER_SUFFIX_ITEM 4 item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM + - sizeof(struct mlx5_rte_flow_item_tag); - sfx_actions = rte_zmalloc(__func__, (act_size + item_size), 0); + sizeof(struct mlx5_rte_flow_item_tag) * 2; + sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size + item_size), + 0, SOCKET_ID_ANY); if (!sfx_actions) return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "no memory to split " "meter flow"); + sfx_items = (struct rte_flow_item *)((char *)sfx_actions + + act_size); pre_actions = sfx_actions + actions_n; - mtr_tag_id = flow_meter_split_prep(dev, actions, sfx_actions, - pre_actions); + mtr_tag_id = flow_meter_split_prep(dev, items, sfx_items, + actions, sfx_actions, + pre_actions); if (!mtr_tag_id) { ret = -rte_errno; goto exit; } /* Add the prefix subflow. */ - ret = flow_create_split_inner(dev, flow, &dev_flow, attr, items, - pre_actions, external, error); + ret = flow_create_split_inner(dev, flow, &dev_flow, + prefix_layers, 0, + attr, items, + pre_actions, external, + flow_idx, error); if (ret) { ret = -rte_errno; goto exit; } - dev_flow->mtr_flow_id = mtr_tag_id; - /* Prepare the suffix flow match pattern. */ - sfx_items = (struct rte_flow_item *)((char *)sfx_actions + - act_size); - tag_spec = (struct mlx5_rte_flow_item_tag *)(sfx_items + - METER_SUFFIX_ITEM); - tag_spec->data = rte_cpu_to_be_32(dev_flow->mtr_flow_id); - tag_spec->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, - error); - sfx_items->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG; - sfx_items->spec = tag_spec; - sfx_items->last = NULL; - sfx_items->mask = NULL; - sfx_items++; - sfx_port_id_item = find_port_id_item(items); - if (sfx_port_id_item) { - memcpy(sfx_items, sfx_port_id_item, - sizeof(*sfx_items)); - sfx_items++; - } - sfx_items->type = RTE_FLOW_ITEM_TYPE_END; - sfx_items -= METER_SUFFIX_ITEM; + dev_flow->handle->split_flow_id = mtr_tag_id; /* Setting the sfx group atrr. */ sfx_attr.group = sfx_attr.transfer ? (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) : MLX5_FLOW_TABLE_LEVEL_SUFFIX; } /* Add the prefix subflow. */ - ret = flow_create_split_metadata(dev, flow, &sfx_attr, - sfx_items ? sfx_items : items, + ret = flow_create_split_metadata(dev, flow, dev_flow ? + flow_get_prefix_layer_flags(dev_flow) : + prefix_layers, dev_flow ? + dev_flow->handle->mark : prefix_mark, + &sfx_attr, sfx_items ? + sfx_items : items, sfx_actions ? sfx_actions : actions, - external, error); + external, flow_idx, error); exit: if (sfx_actions) - rte_free(sfx_actions); + mlx5_free(sfx_actions); + return ret; +} + +/** + * The splitting for sample feature. + * + * Once Sample action is detected in the action list, the flow actions should + * be split into prefix sub flow and suffix sub flow. + * + * The original items remain in the prefix sub flow, all actions preceding the + * sample action and the sample action itself will be copied to the prefix + * sub flow, the actions following the sample action will be copied to the + * suffix sub flow, Queue action always be located in the suffix sub flow. + * + * In order to make the packet from prefix sub flow matches with suffix sub + * flow, an extra tag action be added into prefix sub flow, and the suffix sub + * flow uses tag item with the unique flow id. + * + * @param dev + * Pointer to Ethernet device. + * @param[in] flow + * Parent flow structure pointer. + * @param[in] attr + * Flow rule attributes. + * @param[in] items + * Pattern specification (list terminated by the END pattern item). + * @param[in] actions + * Associated actions (list terminated by the END action). + * @param[in] external + * This flow rule is created by request external to PMD. + * @param[in] flow_idx + * This memory pool index to the flow. + * @param[out] error + * Perform verbose error reporting if not NULL. + * @return + * 0 on success, negative value otherwise + */ +static int +flow_create_split_sample(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + bool external, uint32_t flow_idx, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_flow_action *sfx_actions = NULL; + struct rte_flow_action *pre_actions = NULL; + struct rte_flow_item *sfx_items = NULL; + struct mlx5_flow *dev_flow = NULL; + struct rte_flow_attr sfx_attr = *attr; +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + struct mlx5_flow_dv_sample_resource *sample_res; + struct mlx5_flow_tbl_data_entry *sfx_tbl_data; + struct mlx5_flow_tbl_resource *sfx_tbl; + union mlx5_flow_tbl_key sfx_table_key; +#endif + size_t act_size; + size_t item_size; + uint32_t fdb_tx = 0; + int32_t tag_id = 0; + int actions_n = 0; + int sample_action_pos; + int qrss_action_pos; + int ret = 0; + + if (priv->sampler_en) + actions_n = flow_check_match_action(actions, attr, + RTE_FLOW_ACTION_TYPE_SAMPLE, + &sample_action_pos, &qrss_action_pos); + if (actions_n) { + /* The prefix actions must includes sample, tag, end. */ + act_size = sizeof(struct rte_flow_action) * (actions_n * 2 + 1) + + sizeof(struct mlx5_rte_flow_action_set_tag); + item_size = sizeof(struct rte_flow_item) * SAMPLE_SUFFIX_ITEM + + sizeof(struct mlx5_rte_flow_item_tag) * 2; + sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size + + item_size), 0, SOCKET_ID_ANY); + if (!sfx_actions) + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "no memory to split " + "sample flow"); + /* The representor_id is -1 for uplink. */ + fdb_tx = (attr->transfer && priv->representor_id != -1); + if (!fdb_tx) + sfx_items = (struct rte_flow_item *)((char *)sfx_actions + + act_size); + pre_actions = sfx_actions + actions_n; + tag_id = flow_sample_split_prep(dev, fdb_tx, sfx_items, + actions, sfx_actions, + pre_actions, actions_n, + sample_action_pos, + qrss_action_pos, error); + if (tag_id < 0 || (!fdb_tx && !tag_id)) { + ret = -rte_errno; + goto exit; + } + /* Add the prefix subflow. */ + ret = flow_create_split_inner(dev, flow, &dev_flow, 0, 0, attr, + items, pre_actions, external, + flow_idx, error); + if (ret) { + ret = -rte_errno; + goto exit; + } + dev_flow->handle->split_flow_id = tag_id; +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + /* Set the sfx group attr. */ + sample_res = (struct mlx5_flow_dv_sample_resource *) + dev_flow->dv.sample_res; + sfx_tbl = (struct mlx5_flow_tbl_resource *) + sample_res->normal_path_tbl; + sfx_tbl_data = container_of(sfx_tbl, + struct mlx5_flow_tbl_data_entry, tbl); + sfx_table_key.v64 = sfx_tbl_data->entry.key; + sfx_attr.group = sfx_attr.transfer ? + (sfx_table_key.table_id - 1) : + sfx_table_key.table_id; +#endif + } + /* Add the suffix subflow. */ + ret = flow_create_split_meter(dev, flow, dev_flow ? + flow_get_prefix_layer_flags(dev_flow) : 0, + dev_flow ? dev_flow->handle->mark : 0, + &sfx_attr, sfx_items ? sfx_items : items, + sfx_actions ? sfx_actions : actions, + external, flow_idx, error); +exit: + if (sfx_actions) + mlx5_free(sfx_actions); return ret; } @@ -4057,6 +4980,8 @@ exit: * Associated actions (list terminated by the END action). * @param[in] external * This flow rule is created by request external to PMD. + * @param[in] flow_idx + * This memory pool index to the flow. * @param[out] error * Perform verbose error reporting if not NULL. * @return @@ -4068,13 +4993,14 @@ flow_create_split_outer(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], - bool external, struct rte_flow_error *error) + bool external, uint32_t flow_idx, + struct rte_flow_error *error) { int ret; - ret = flow_create_split_meter(dev, flow, attr, items, - actions, external, error); - assert(ret <= 0); + ret = flow_create_split_sample(dev, flow, attr, items, + actions, external, flow_idx, error); + MLX5_ASSERT(ret <= 0); return ret; } @@ -4100,10 +5026,10 @@ flow_create_split_outer(struct rte_eth_dev *dev, * Perform verbose error reporting if not NULL. * * @return - * A flow on success, NULL otherwise and rte_errno is set. + * A flow index on success, 0 otherwise and rte_errno is set. */ -static struct rte_flow * -flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, +static uint32_t +flow_list_create(struct rte_eth_dev *dev, uint32_t *list, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], @@ -4114,7 +5040,7 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, struct mlx5_flow *dev_flow; const struct rte_flow_action_rss *rss; union { - struct rte_flow_expand_rss buf; + struct mlx5_flow_expand_rss buf; uint8_t buffer[2048]; } expand_buffer; union { @@ -4129,81 +5055,89 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, struct rte_flow_item items[MLX5_MAX_SPLIT_ITEMS]; uint8_t buffer[2048]; } items_tx; - struct rte_flow_expand_rss *buf = &expand_buffer.buf; + struct mlx5_flow_expand_rss *buf = &expand_buffer.buf; + struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *) + priv->rss_desc)[!!priv->flow_idx]; const struct rte_flow_action *p_actions_rx = actions; - int ret; uint32_t i; - uint32_t flow_size; - int hairpin_flow = 0; + uint32_t idx = 0; + int hairpin_flow; uint32_t hairpin_id = 0; struct rte_flow_attr attr_tx = { .priority = 0 }; + struct rte_flow_attr attr_factor = {0}; + int ret; - hairpin_flow = flow_check_hairpin_split(dev, attr, actions); + memcpy((void *)&attr_factor, (const void *)attr, sizeof(*attr)); + if (external) + attr_factor.group *= MLX5_FLOW_TABLE_FACTOR; + hairpin_flow = flow_check_hairpin_split(dev, &attr_factor, actions); + ret = flow_drv_validate(dev, &attr_factor, items, p_actions_rx, + external, hairpin_flow, error); + if (ret < 0) + return 0; if (hairpin_flow > 0) { if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) { rte_errno = EINVAL; - return NULL; + return 0; } flow_hairpin_split(dev, actions, actions_rx.actions, actions_hairpin_tx.actions, items_tx.items, &hairpin_id); p_actions_rx = actions_rx.actions; } - ret = flow_drv_validate(dev, attr, items, p_actions_rx, external, - error); - if (ret < 0) - goto error_before_flow; - flow_size = sizeof(struct rte_flow); - rss = flow_get_rss_action(p_actions_rx); - if (rss) - flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t), - sizeof(void *)); - else - flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *)); - flow = rte_calloc(__func__, 1, flow_size, 0); + flow = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], &idx); if (!flow) { rte_errno = ENOMEM; goto error_before_flow; } - flow->drv_type = flow_get_drv_type(dev, attr); + flow->drv_type = flow_get_drv_type(dev, &attr_factor); if (hairpin_id != 0) flow->hairpin_flow_id = hairpin_id; - assert(flow->drv_type > MLX5_FLOW_TYPE_MIN && - flow->drv_type < MLX5_FLOW_TYPE_MAX); - flow->rss.queue = (void *)(flow + 1); + MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN && + flow->drv_type < MLX5_FLOW_TYPE_MAX); + memset(rss_desc, 0, sizeof(*rss_desc)); + rss = flow_get_rss_action(p_actions_rx); if (rss) { /* * The following information is required by * mlx5_flow_hashfields_adjust() in advance. */ - flow->rss.level = rss->level; + rss_desc->level = rss->level; /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */ - flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types; + rss_desc->types = !rss->types ? ETH_RSS_IP : rss->types; } - LIST_INIT(&flow->dev_flows); + flow->dev_handles = 0; if (rss && rss->types) { unsigned int graph_root; graph_root = find_graph_root(items, rss->level); - ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer), - items, rss->types, - mlx5_support_expansion, - graph_root); - assert(ret > 0 && + ret = mlx5_flow_expand_rss(buf, sizeof(expand_buffer.buffer), + items, rss->types, + mlx5_support_expansion, graph_root); + MLX5_ASSERT(ret > 0 && (unsigned int)ret < sizeof(expand_buffer.buffer)); } else { buf->entries = 1; buf->entry[0].pattern = (void *)(uintptr_t)items; } + /* + * Record the start index when there is a nested call. All sub-flows + * need to be translated before another calling. + * No need to use ping-pong buffer to save memory here. + */ + if (priv->flow_idx) { + MLX5_ASSERT(!priv->flow_nested_idx); + priv->flow_nested_idx = priv->flow_idx; + } for (i = 0; i < buf->entries; ++i) { /* * The splitter may create multiple dev_flows, * depending on configuration. In the simplest * case it just creates unmodified original flow. */ - ret = flow_create_split_outer(dev, flow, attr, + ret = flow_create_split_outer(dev, flow, &attr_factor, buf->entry[i].pattern, - p_actions_rx, external, + p_actions_rx, external, idx, error); if (ret < 0) goto error; @@ -4213,13 +5147,15 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, attr_tx.group = MLX5_HAIRPIN_TX_TABLE; attr_tx.ingress = 0; attr_tx.egress = 1; - dev_flow = flow_drv_prepare(flow, &attr_tx, items_tx.items, - actions_hairpin_tx.actions, error); + dev_flow = flow_drv_prepare(dev, flow, &attr_tx, items_tx.items, + actions_hairpin_tx.actions, + idx, error); if (!dev_flow) goto error; dev_flow->flow = flow; dev_flow->external = 0; - LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); + SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx, + dev_flow->handle, next); ret = flow_drv_translate(dev, dev_flow, &attr_tx, items_tx.items, actions_hairpin_tx.actions, error); @@ -4236,38 +5172,47 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, * the egress Flows belong to the different device and * copy table should be updated in peer NIC Rx domain. */ - if (attr->ingress && - (external || attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) { + if (attr_factor.ingress && + (external || attr_factor.group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) { ret = flow_mreg_update_copy_table(dev, flow, actions, error); if (ret) goto error; } - if (dev->data->dev_started) { + /* + * If the flow is external (from application) OR device is started, then + * the flow will be applied immediately. + */ + if (external || dev->data->dev_started) { ret = flow_drv_apply(dev, flow, error); if (ret < 0) goto error; } if (list) - TAILQ_INSERT_TAIL(list, flow, next); + ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, idx, + flow, next); flow_rxq_flags_set(dev, flow); - return flow; -error_before_flow: - if (hairpin_id) - mlx5_flow_id_release(priv->sh->flow_id_pool, - hairpin_id); - return NULL; + /* Nested flow creation index recovery. */ + priv->flow_idx = priv->flow_nested_idx; + if (priv->flow_nested_idx) + priv->flow_nested_idx = 0; + return idx; error: - assert(flow); - flow_mreg_del_copy_action(dev, flow); + MLX5_ASSERT(flow); ret = rte_errno; /* Save rte_errno before cleanup. */ - if (flow->hairpin_flow_id) - mlx5_flow_id_release(priv->sh->flow_id_pool, - flow->hairpin_flow_id); - assert(flow); + flow_mreg_del_copy_action(dev, flow); flow_drv_destroy(dev, flow); - rte_free(flow); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], idx); rte_errno = ret; /* Restore rte_errno. */ - return NULL; +error_before_flow: + ret = rte_errno; + if (hairpin_id) + mlx5_flow_id_release(priv->sh->flow_id_pool, + hairpin_id); + rte_errno = ret; + priv->flow_idx = priv->flow_nested_idx; + if (priv->flow_nested_idx) + priv->flow_nested_idx = 0; + return 0; } /** @@ -4315,8 +5260,29 @@ mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev) struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow_error error; - return flow_list_create(dev, &priv->ctrl_flows, &attr, &pattern, - actions, false, &error); + return (void *)(uintptr_t)flow_list_create(dev, &priv->ctrl_flows, + &attr, &pattern, + actions, false, &error); +} + +/** + * Validate a flow supported by the NIC. + * + * @see rte_flow_validate() + * @see rte_flow_ops + */ +int +mlx5_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + int hairpin_flow; + + hairpin_flow = flow_check_hairpin_split(dev, attr, actions); + return flow_drv_validate(dev, attr, items, actions, + true, hairpin_flow, error); } /** @@ -4334,8 +5300,22 @@ mlx5_flow_create(struct rte_eth_dev *dev, { struct mlx5_priv *priv = dev->data->dev_private; - return flow_list_create(dev, &priv->flows, - attr, items, actions, true, error); + /* + * If the device is not started yet, it is not allowed to created a + * flow from application. PMD default flows and traffic control flows + * are not affected. + */ + if (unlikely(!dev->data->dev_started)) { + DRV_LOG(DEBUG, "port %u is not started when " + "inserting a flow", dev->data->port_id); + rte_flow_error_set(error, ENODEV, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "port not started"); + return NULL; + } + return (void *)(uintptr_t)flow_list_create(dev, &priv->flows, + attr, items, actions, true, error); } /** @@ -4344,17 +5324,24 @@ mlx5_flow_create(struct rte_eth_dev *dev, * @param dev * Pointer to Ethernet device. * @param list - * Pointer to a TAILQ flow list. If this parameter NULL, - * there is no flow removal from the list. - * @param[in] flow - * Flow to destroy. + * Pointer to the Indexed flow list. If this parameter NULL, + * there is no flow removal from the list. Be noted that as + * flow is add to the indexed list, memory of the indexed + * list points to maybe changed as flow destroyed. + * @param[in] flow_idx + * Index of flow to destroy. */ static void -flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, - struct rte_flow *flow) +flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list, + uint32_t flow_idx) { struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_fdir_flow *priv_fdir_flow = NULL; + struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool + [MLX5_IPOOL_RTE_FLOW], flow_idx); + if (!flow) + return; /* * Update RX queue flags only if port is started, otherwise it is * already clean. @@ -4366,10 +5353,21 @@ flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, flow->hairpin_flow_id); flow_drv_destroy(dev, flow); if (list) - TAILQ_REMOVE(list, flow, next); + ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, + flow_idx, flow, next); flow_mreg_del_copy_action(dev, flow); - rte_free(flow->fdir); - rte_free(flow); + if (flow->fdir) { + LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) { + if (priv_fdir_flow->rix_flow == flow_idx) + break; + } + if (priv_fdir_flow) { + LIST_REMOVE(priv_fdir_flow, next); + mlx5_free(priv_fdir_flow->fdir); + mlx5_free(priv_fdir_flow); + } + } + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx); } /** @@ -4378,16 +5376,22 @@ flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, * @param dev * Pointer to Ethernet device. * @param list - * Pointer to a TAILQ flow list. + * Pointer to the Indexed flow list. + * @param active + * If flushing is called avtively. */ void -mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list) +mlx5_flow_list_flush(struct rte_eth_dev *dev, uint32_t *list, bool active) { - while (!TAILQ_EMPTY(list)) { - struct rte_flow *flow; + uint32_t num_flushed = 0; - flow = TAILQ_FIRST(list); - flow_list_destroy(dev, list, flow); + while (*list) { + flow_list_destroy(dev, list, *list); + num_flushed++; + } + if (active) { + DRV_LOG(INFO, "port %u: %u flows flushed before stopping", + dev->data->port_id, num_flushed); } } @@ -4397,14 +5401,17 @@ mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list) * @param dev * Pointer to Ethernet device. * @param list - * Pointer to a TAILQ flow list. + * Pointer to the Indexed flow list. */ void -mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list) +mlx5_flow_stop(struct rte_eth_dev *dev, uint32_t *list) { - struct rte_flow *flow; + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_flow *flow = NULL; + uint32_t idx; - TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) { + ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], *list, idx, + flow, next) { flow_drv_remove(dev, flow); flow_mreg_stop_copy_action(dev, flow); } @@ -4418,16 +5425,18 @@ mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list) * @param dev * Pointer to Ethernet device. * @param list - * Pointer to a TAILQ flow list. + * Pointer to the Indexed flow list. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list) +mlx5_flow_start(struct rte_eth_dev *dev, uint32_t *list) { - struct rte_flow *flow; + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_flow *flow = NULL; struct rte_flow_error error; + uint32_t idx; int ret = 0; /* Make sure default copy action (reg_c[0] -> reg_b) is created. */ @@ -4435,7 +5444,8 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list) if (ret < 0) return -rte_errno; /* Apply Flows created by application. */ - TAILQ_FOREACH(flow, list, next) { + ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], *list, idx, + flow, next) { ret = flow_mreg_start_copy_action(dev, flow); if (ret < 0) goto error; @@ -4452,6 +5462,81 @@ error: return -rte_errno; } +/** + * Stop all default actions for flows. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_flow_stop_default(struct rte_eth_dev *dev) +{ + flow_mreg_del_default_copy_action(dev); + flow_rxq_flags_clear(dev); +} + +/** + * Start all default actions for flows. + * + * @param dev + * Pointer to Ethernet device. + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_start_default(struct rte_eth_dev *dev) +{ + struct rte_flow_error error; + + /* Make sure default copy action (reg_c[0] -> reg_b) is created. */ + return flow_mreg_add_default_copy_action(dev, &error); +} + +/** + * Allocate intermediate resources for flow creation. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_flow_alloc_intermediate(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + if (!priv->inter_flows) { + priv->inter_flows = mlx5_malloc(MLX5_MEM_ZERO, + MLX5_NUM_MAX_DEV_FLOWS * + sizeof(struct mlx5_flow) + + (sizeof(struct mlx5_flow_rss_desc) + + sizeof(uint16_t) * UINT16_MAX) * 2, 0, + SOCKET_ID_ANY); + if (!priv->inter_flows) { + DRV_LOG(ERR, "can't allocate intermediate memory."); + return; + } + } + priv->rss_desc = &((struct mlx5_flow *)priv->inter_flows) + [MLX5_NUM_MAX_DEV_FLOWS]; + /* Reset the index. */ + priv->flow_idx = 0; + priv->flow_nested_idx = 0; +} + +/** + * Free intermediate resources for flows. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_flow_free_intermediate(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + mlx5_free(priv->inter_flows); + priv->inter_flows = NULL; +} + /** * Verify the flow list is empty * @@ -4465,9 +5550,11 @@ mlx5_flow_verify(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow *flow; + uint32_t idx; int ret = 0; - TAILQ_FOREACH(flow, &priv->flows, next) { + ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], priv->flows, idx, + flow, next) { DRV_LOG(DEBUG, "port %u flow %p still referenced", dev->data->port_id, (void *)flow); ++ret; @@ -4503,7 +5590,8 @@ mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, }; struct rte_flow_item items[] = { { - .type = MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE, + .type = (enum rte_flow_item_type) + MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE, .spec = &queue_spec, .last = NULL, .mask = &queue_mask, @@ -4516,15 +5604,15 @@ mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, .group = MLX5_HAIRPIN_TX_TABLE, }; struct rte_flow_action actions[2]; - struct rte_flow *flow; + uint32_t flow_idx; struct rte_flow_error error; actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP; actions[0].conf = &jump; actions[1].type = RTE_FLOW_ACTION_TYPE_END; - flow = flow_list_create(dev, &priv->ctrl_flows, + flow_idx = flow_list_create(dev, &priv->ctrl_flows, &attr, items, actions, false, &error); - if (!flow) { + if (!flow_idx) { DRV_LOG(DEBUG, "Failed to create ctrl flow: rte_errno(%d)," " type(%d), message(%s)", @@ -4601,18 +5689,20 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, .type = RTE_FLOW_ACTION_TYPE_END, }, }; - struct rte_flow *flow; + uint32_t flow_idx; struct rte_flow_error error; unsigned int i; if (!priv->reta_idx_n || !priv->rxqs_n) { return 0; } + if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)) + action_rss.types = 0; for (i = 0; i != priv->reta_idx_n; ++i) queue[i] = (*priv->reta_idx)[i]; - flow = flow_list_create(dev, &priv->ctrl_flows, + flow_idx = flow_list_create(dev, &priv->ctrl_flows, &attr, items, actions, false, &error); - if (!flow) + if (!flow_idx) return -rte_errno; return 0; } @@ -4638,6 +5728,62 @@ mlx5_ctrl_flow(struct rte_eth_dev *dev, return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL); } +/** + * Create default miss flow rule matching lacp traffic + * + * @param dev + * Pointer to Ethernet device. + * @param eth_spec + * An Ethernet flow spec to apply. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_lacp_miss(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + /* + * The LACP matching is done by only using ether type since using + * a multicast dst mac causes kernel to give low priority to this flow. + */ + static const struct rte_flow_item_eth lacp_spec = { + .type = RTE_BE16(0x8809), + }; + static const struct rte_flow_item_eth lacp_mask = { + .type = 0xffff, + }; + const struct rte_flow_attr attr = { + .ingress = 1, + }; + struct rte_flow_item items[] = { + { + .type = RTE_FLOW_ITEM_TYPE_ETH, + .spec = &lacp_spec, + .mask = &lacp_mask, + }, + { + .type = RTE_FLOW_ITEM_TYPE_END, + }, + }; + struct rte_flow_action actions[] = { + { + .type = (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS, + }, + { + .type = RTE_FLOW_ACTION_TYPE_END, + }, + }; + struct rte_flow_error error; + uint32_t flow_idx = flow_list_create(dev, &priv->ctrl_flows, + &attr, items, actions, false, &error); + + if (!flow_idx) + return -rte_errno; + return 0; +} + /** * Destroy a flow. * @@ -4651,7 +5797,7 @@ mlx5_flow_destroy(struct rte_eth_dev *dev, { struct mlx5_priv *priv = dev->data->dev_private; - flow_list_destroy(dev, &priv->flows, flow); + flow_list_destroy(dev, &priv->flows, (uintptr_t)(void *)flow); return 0; } @@ -4667,7 +5813,7 @@ mlx5_flow_flush(struct rte_eth_dev *dev, { struct mlx5_priv *priv = dev->data->dev_private; - mlx5_flow_list_flush(dev, &priv->flows); + mlx5_flow_list_flush(dev, &priv->flows, false); return 0; } @@ -4693,9 +5839,13 @@ mlx5_flow_isolate(struct rte_eth_dev *dev, } priv->isolated = !!enable; if (enable) - dev->dev_ops = &mlx5_dev_ops_isolate; + dev->dev_ops = &mlx5_os_dev_ops_isolate; else - dev->dev_ops = &mlx5_dev_ops; + dev->dev_ops = &mlx5_os_dev_ops; + + dev->rx_descriptor_status = mlx5_rx_descriptor_status; + dev->tx_descriptor_status = mlx5_tx_descriptor_status; + return 0; } @@ -4707,15 +5857,26 @@ mlx5_flow_isolate(struct rte_eth_dev *dev, */ static int flow_drv_query(struct rte_eth_dev *dev, - struct rte_flow *flow, + uint32_t flow_idx, const struct rte_flow_action *actions, void *data, struct rte_flow_error *error) { + struct mlx5_priv *priv = dev->data->dev_private; const struct mlx5_flow_driver_ops *fops; - enum mlx5_flow_drv_type ftype = flow->drv_type; + struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool + [MLX5_IPOOL_RTE_FLOW], + flow_idx); + enum mlx5_flow_drv_type ftype; - assert(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX); + if (!flow) { + return rte_flow_error_set(error, ENOENT, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "invalid flow handle"); + } + ftype = flow->drv_type; + MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX); fops = flow_get_drv_ops(ftype); return fops->query(dev, flow, actions, data, error); @@ -4736,7 +5897,8 @@ mlx5_flow_query(struct rte_eth_dev *dev, { int ret; - ret = flow_drv_query(dev, flow, actions, data, error); + ret = flow_drv_query(dev, (uintptr_t)(void *)flow, actions, data, + error); if (ret < 0) return ret; return 0; @@ -4972,23 +6134,25 @@ flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2) * FDIR flow to lookup. * * @return - * Pointer of flow if found, NULL otherwise. + * Index of flow if found, 0 otherwise. */ -static struct rte_flow * +static uint32_t flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow) { struct mlx5_priv *priv = dev->data->dev_private; - struct rte_flow *flow = NULL; - - assert(fdir_flow); - TAILQ_FOREACH(flow, &priv->flows, next) { - if (flow->fdir && !flow_fdir_cmp(flow->fdir, fdir_flow)) { - DRV_LOG(DEBUG, "port %u found FDIR flow %p", - dev->data->port_id, (void *)flow); + uint32_t flow_idx = 0; + struct mlx5_fdir_flow *priv_fdir_flow = NULL; + + MLX5_ASSERT(fdir_flow); + LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) { + if (!flow_fdir_cmp(priv_fdir_flow->fdir, fdir_flow)) { + DRV_LOG(DEBUG, "port %u found FDIR flow %u", + dev->data->port_id, flow_idx); + flow_idx = priv_fdir_flow->rix_flow; break; } } - return flow; + return flow_idx; } /** @@ -5009,9 +6173,12 @@ flow_fdir_filter_add(struct rte_eth_dev *dev, struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_fdir *fdir_flow; struct rte_flow *flow; + struct mlx5_fdir_flow *priv_fdir_flow = NULL; + uint32_t flow_idx; int ret; - fdir_flow = rte_zmalloc(__func__, sizeof(*fdir_flow), 0); + fdir_flow = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*fdir_flow), 0, + SOCKET_ID_ANY); if (!fdir_flow) { rte_errno = ENOMEM; return -rte_errno; @@ -5019,23 +6186,34 @@ flow_fdir_filter_add(struct rte_eth_dev *dev, ret = flow_fdir_filter_convert(dev, fdir_filter, fdir_flow); if (ret) goto error; - flow = flow_fdir_filter_lookup(dev, fdir_flow); - if (flow) { + flow_idx = flow_fdir_filter_lookup(dev, fdir_flow); + if (flow_idx) { rte_errno = EEXIST; goto error; } - flow = flow_list_create(dev, &priv->flows, &fdir_flow->attr, - fdir_flow->items, fdir_flow->actions, true, - NULL); + priv_fdir_flow = mlx5_malloc(MLX5_MEM_ZERO, + sizeof(struct mlx5_fdir_flow), + 0, SOCKET_ID_ANY); + if (!priv_fdir_flow) { + rte_errno = ENOMEM; + goto error; + } + flow_idx = flow_list_create(dev, &priv->flows, &fdir_flow->attr, + fdir_flow->items, fdir_flow->actions, true, + NULL); + flow = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx); if (!flow) goto error; - assert(!flow->fdir); - flow->fdir = fdir_flow; + flow->fdir = 1; + priv_fdir_flow->fdir = fdir_flow; + priv_fdir_flow->rix_flow = flow_idx; + LIST_INSERT_HEAD(&priv->fdir_flows, priv_fdir_flow, next); DRV_LOG(DEBUG, "port %u created FDIR flow %p", dev->data->port_id, (void *)flow); return 0; error: - rte_free(fdir_flow); + mlx5_free(priv_fdir_flow); + mlx5_free(fdir_flow); return -rte_errno; } @@ -5055,23 +6233,30 @@ flow_fdir_filter_delete(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *fdir_filter) { struct mlx5_priv *priv = dev->data->dev_private; - struct rte_flow *flow; + uint32_t flow_idx; struct mlx5_fdir fdir_flow = { .attr.group = 0, }; + struct mlx5_fdir_flow *priv_fdir_flow = NULL; int ret; ret = flow_fdir_filter_convert(dev, fdir_filter, &fdir_flow); if (ret) return -rte_errno; - flow = flow_fdir_filter_lookup(dev, &fdir_flow); - if (!flow) { - rte_errno = ENOENT; - return -rte_errno; + LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) { + /* Find the fdir in priv list */ + if (!flow_fdir_cmp(priv_fdir_flow->fdir, &fdir_flow)) + break; } - flow_list_destroy(dev, &priv->flows, flow); - DRV_LOG(DEBUG, "port %u deleted FDIR flow %p", - dev->data->port_id, (void *)flow); + if (!priv_fdir_flow) + return 0; + LIST_REMOVE(priv_fdir_flow, next); + flow_idx = priv_fdir_flow->rix_flow; + flow_list_destroy(dev, &priv->flows, flow_idx); + mlx5_free(priv_fdir_flow->fdir); + mlx5_free(priv_fdir_flow); + DRV_LOG(DEBUG, "port %u deleted FDIR flow %u", + dev->data->port_id, flow_idx); return 0; } @@ -5108,8 +6293,15 @@ static void flow_fdir_filter_flush(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - - mlx5_flow_list_flush(dev, &priv->flows); + struct mlx5_fdir_flow *priv_fdir_flow = NULL; + + while (!LIST_EMPTY(&priv->fdir_flows)) { + priv_fdir_flow = LIST_FIRST(&priv->fdir_flows); + LIST_REMOVE(priv_fdir_flow, next); + flow_list_destroy(dev, &priv->flows, priv_fdir_flow->rix_flow); + mlx5_free(priv_fdir_flow->fdir); + mlx5_free(priv_fdir_flow); + } } /** @@ -5323,9 +6515,9 @@ mlx5_flow_destroy_policer_rules(struct rte_eth_dev *dev, * Pointer to Ethernet device structure. * * @return - * Pointer to allocated counter on success, NULL otherwise. + * Index to allocated counter on success, 0 otherwise. */ -struct mlx5_flow_counter * +uint32_t mlx5_counter_alloc(struct rte_eth_dev *dev) { const struct mlx5_flow_driver_ops *fops; @@ -5338,7 +6530,7 @@ mlx5_counter_alloc(struct rte_eth_dev *dev) DRV_LOG(ERR, "port %u counter allocate is not supported.", dev->data->port_id); - return NULL; + return 0; } /** @@ -5347,10 +6539,10 @@ mlx5_counter_alloc(struct rte_eth_dev *dev) * @param[in] dev * Pointer to Ethernet device structure. * @param[in] cnt - * Pointer to counter to be free. + * Index to counter to be free. */ void -mlx5_counter_free(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt) +mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt) { const struct mlx5_flow_driver_ops *fops; struct rte_flow_attr attr = { .transfer = 0 }; @@ -5371,7 +6563,7 @@ mlx5_counter_free(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt) * @param[in] dev * Pointer to Ethernet device structure. * @param[in] cnt - * Pointer to counter to query. + * Index to counter to query. * @param[in] clear * Set to clear counter statistics. * @param[out] pkts @@ -5383,7 +6575,7 @@ mlx5_counter_free(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt) * 0 on success, a negative errno value otherwise. */ int -mlx5_counter_query(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt, +mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt, bool clear, uint64_t *pkts, uint64_t *bytes) { const struct mlx5_flow_driver_ops *fops; @@ -5399,6 +6591,111 @@ mlx5_counter_query(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt, return -ENOTSUP; } +/** + * Allocate a new memory for the counter values wrapped by all the needed + * management. + * + * @param[in] sh + * Pointer to mlx5_dev_ctx_shared object. + * + * @return + * 0 on success, a negative errno value otherwise. + */ +static int +mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh) +{ + struct mlx5_devx_mkey_attr mkey_attr; + struct mlx5_counter_stats_mem_mng *mem_mng; + volatile struct flow_counter_stats *raw_data; + int raws_n = MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES; + int size = (sizeof(struct flow_counter_stats) * + MLX5_COUNTERS_PER_POOL + + sizeof(struct mlx5_counter_stats_raw)) * raws_n + + sizeof(struct mlx5_counter_stats_mem_mng); + size_t pgsize = rte_mem_page_size(); + uint8_t *mem; + int i; + + if (pgsize == (size_t)-1) { + DRV_LOG(ERR, "Failed to get mem page size"); + rte_errno = ENOMEM; + return -ENOMEM; + } + mem = mlx5_malloc(MLX5_MEM_ZERO, size, pgsize, SOCKET_ID_ANY); + if (!mem) { + rte_errno = ENOMEM; + return -ENOMEM; + } + mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1; + size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n; + mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size, + IBV_ACCESS_LOCAL_WRITE); + if (!mem_mng->umem) { + rte_errno = errno; + mlx5_free(mem); + return -rte_errno; + } + mkey_attr.addr = (uintptr_t)mem; + mkey_attr.size = size; + mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem); + mkey_attr.pd = sh->pdn; + mkey_attr.log_entity_size = 0; + mkey_attr.pg_access = 0; + mkey_attr.klm_array = NULL; + mkey_attr.klm_num = 0; + mkey_attr.relaxed_ordering = sh->cmng.relaxed_ordering; + mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr); + if (!mem_mng->dm) { + mlx5_glue->devx_umem_dereg(mem_mng->umem); + rte_errno = errno; + mlx5_free(mem); + return -rte_errno; + } + mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size); + raw_data = (volatile struct flow_counter_stats *)mem; + for (i = 0; i < raws_n; ++i) { + mem_mng->raws[i].mem_mng = mem_mng; + mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL; + } + for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i) + LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, + mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE + i, + next); + LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next); + sh->cmng.mem_mng = mem_mng; + return 0; +} + +/** + * Set the statistic memory to the new counter pool. + * + * @param[in] sh + * Pointer to mlx5_dev_ctx_shared object. + * @param[in] pool + * Pointer to the pool to set the statistic memory. + * + * @return + * 0 on success, a negative errno value otherwise. + */ +static int +mlx5_flow_set_counter_stat_mem(struct mlx5_dev_ctx_shared *sh, + struct mlx5_flow_counter_pool *pool) +{ + struct mlx5_flow_counter_mng *cmng = &sh->cmng; + /* Resize statistic memory once used out. */ + if (!(pool->index % MLX5_CNT_CONTAINER_RESIZE) && + mlx5_flow_create_counter_stat_mem_mng(sh)) { + DRV_LOG(ERR, "Cannot resize counter stat mem."); + return -1; + } + rte_spinlock_lock(&pool->sl); + pool->raw = cmng->mem_mng->raws + pool->index % + MLX5_CNT_CONTAINER_RESIZE; + rte_spinlock_unlock(&pool->sl); + pool->raw_hw = NULL; + return 0; +} + #define MLX5_POOL_QUERY_FREQ_US 1000000 /** @@ -5406,17 +6703,14 @@ mlx5_counter_query(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt, * the counter pools. * * @param[in] sh - * Pointer to mlx5_ibv_shared object. + * Pointer to mlx5_dev_ctx_shared object. */ void -mlx5_set_query_alarm(struct mlx5_ibv_shared *sh) +mlx5_set_query_alarm(struct mlx5_dev_ctx_shared *sh) { - struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(sh, 0, 0); - uint32_t pools_n = rte_atomic16_read(&cont->n_valid); - uint32_t us; + uint32_t pools_n, us; - cont = MLX5_CNT_CONTAINER(sh, 1, 0); - pools_n += rte_atomic16_read(&cont->n_valid); + pools_n = __atomic_load_n(&sh->cmng.n_valid, __ATOMIC_RELAXED); us = MLX5_POOL_QUERY_FREQ_US / pools_n; DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us); if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) { @@ -5437,42 +6731,22 @@ mlx5_set_query_alarm(struct mlx5_ibv_shared *sh) void mlx5_flow_query_alarm(void *arg) { - struct mlx5_ibv_shared *sh = arg; - struct mlx5_devx_obj *dcs; - uint16_t offset; + struct mlx5_dev_ctx_shared *sh = arg; int ret; - uint8_t batch = sh->cmng.batch; uint16_t pool_index = sh->cmng.pool_index; - struct mlx5_pools_container *cont; - struct mlx5_pools_container *mcont; + struct mlx5_flow_counter_mng *cmng = &sh->cmng; struct mlx5_flow_counter_pool *pool; + uint16_t n_valid; if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES) goto set_alarm; -next_container: - cont = MLX5_CNT_CONTAINER(sh, batch, 1); - mcont = MLX5_CNT_CONTAINER(sh, batch, 0); - /* Check if resize was done and need to flip a container. */ - if (cont != mcont) { - if (cont->pools) { - /* Clean the old container. */ - rte_free(cont->pools); - memset(cont, 0, sizeof(*cont)); - } - rte_cio_wmb(); - /* Flip the host container. */ - sh->cmng.mhi[batch] ^= (uint8_t)2; - cont = mcont; - } - if (!cont->pools) { - /* 2 empty containers case is unexpected. */ - if (unlikely(batch != sh->cmng.batch)) - goto set_alarm; - batch ^= 0x1; - pool_index = 0; - goto next_container; - } - pool = cont->pools[pool_index]; + rte_spinlock_lock(&cmng->pool_update_sl); + pool = cmng->pools[pool_index]; + n_valid = cmng->n_valid; + rte_spinlock_unlock(&cmng->pool_update_sl); + /* Set the statistic memory to the new created pool. */ + if ((!pool->raw && mlx5_flow_set_counter_stat_mem(sh, pool))) + goto set_alarm; if (pool->raw_hw) /* There is a pool query in progress. */ goto set_alarm; @@ -5481,14 +6755,19 @@ next_container: if (!pool->raw_hw) /* No free counter statistics raw memory. */ goto set_alarm; - dcs = (struct mlx5_devx_obj *)(uintptr_t)rte_atomic64_read - (&pool->a64_dcs); - offset = batch ? 0 : dcs->id % MLX5_COUNTERS_PER_POOL; - ret = mlx5_devx_cmd_flow_counter_query(dcs, 0, MLX5_COUNTERS_PER_POOL - - offset, NULL, NULL, + /* + * Identify the counters released between query trigger and query + * handle more efficiently. The counter released in this gap period + * should wait for a new round of query as the new arrived packets + * will not be taken into account. + */ + pool->query_gen++; + ret = mlx5_devx_cmd_flow_counter_query(pool->min_dcs, 0, + MLX5_COUNTERS_PER_POOL, + NULL, NULL, pool->raw_hw->mem_mng->dm->id, (void *)(uintptr_t) - (pool->raw_hw->data + offset), + pool->raw_hw->data, sh->devx_comp, (uint64_t)(uintptr_t)pool); if (ret) { @@ -5497,49 +6776,126 @@ next_container: pool->raw_hw = NULL; goto set_alarm; } - pool->raw_hw->min_dcs_id = dcs->id; LIST_REMOVE(pool->raw_hw, next); sh->cmng.pending_queries++; pool_index++; - if (pool_index >= rte_atomic16_read(&cont->n_valid)) { - batch ^= 0x1; + if (pool_index >= n_valid) pool_index = 0; - } set_alarm: - sh->cmng.batch = batch; sh->cmng.pool_index = pool_index; mlx5_set_query_alarm(sh); } +/** + * Check and callback event for new aged flow in the counter pool + * + * @param[in] sh + * Pointer to mlx5_dev_ctx_shared object. + * @param[in] pool + * Pointer to Current counter pool. + */ +static void +mlx5_flow_aging_check(struct mlx5_dev_ctx_shared *sh, + struct mlx5_flow_counter_pool *pool) +{ + struct mlx5_priv *priv; + struct mlx5_flow_counter *cnt; + struct mlx5_age_info *age_info; + struct mlx5_age_param *age_param; + struct mlx5_counter_stats_raw *cur = pool->raw_hw; + struct mlx5_counter_stats_raw *prev = pool->raw; + const uint64_t curr_time = MLX5_CURR_TIME_SEC; + const uint32_t time_delta = curr_time - pool->time_of_last_age_check; + uint16_t expected = AGE_CANDIDATE; + uint32_t i; + + pool->time_of_last_age_check = curr_time; + for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) { + cnt = MLX5_POOL_GET_CNT(pool, i); + age_param = MLX5_CNT_TO_AGE(cnt); + if (__atomic_load_n(&age_param->state, + __ATOMIC_RELAXED) != AGE_CANDIDATE) + continue; + if (cur->data[i].hits != prev->data[i].hits) { + __atomic_store_n(&age_param->sec_since_last_hit, 0, + __ATOMIC_RELAXED); + continue; + } + if (__atomic_add_fetch(&age_param->sec_since_last_hit, + time_delta, + __ATOMIC_RELAXED) <= age_param->timeout) + continue; + /** + * Hold the lock first, or if between the + * state AGE_TMOUT and tailq operation the + * release happened, the release procedure + * may delete a non-existent tailq node. + */ + priv = rte_eth_devices[age_param->port_id].data->dev_private; + age_info = GET_PORT_AGE_INFO(priv); + rte_spinlock_lock(&age_info->aged_sl); + if (__atomic_compare_exchange_n(&age_param->state, &expected, + AGE_TMOUT, false, + __ATOMIC_RELAXED, + __ATOMIC_RELAXED)) { + TAILQ_INSERT_TAIL(&age_info->aged_counters, cnt, next); + MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW); + } + rte_spinlock_unlock(&age_info->aged_sl); + } + for (i = 0; i < sh->max_port; i++) { + age_info = &sh->port[i].age_info; + if (!MLX5_AGE_GET(age_info, MLX5_AGE_EVENT_NEW)) + continue; + if (MLX5_AGE_GET(age_info, MLX5_AGE_TRIGGER)) + rte_eth_dev_callback_process + (&rte_eth_devices[sh->port[i].devx_ih_port_id], + RTE_ETH_EVENT_FLOW_AGED, NULL); + age_info->flags = 0; + } +} + /** * Handler for the HW respond about ready values from an asynchronous batch * query. This function is probably called by the host thread. * * @param[in] sh - * The pointer to the shared IB device context. + * The pointer to the shared device context. * @param[in] async_id * The Devx async ID. * @param[in] status * The status of the completion. */ void -mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh, +mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh, uint64_t async_id, int status) { struct mlx5_flow_counter_pool *pool = (struct mlx5_flow_counter_pool *)(uintptr_t)async_id; struct mlx5_counter_stats_raw *raw_to_free; + uint8_t query_gen = pool->query_gen ^ 1; + struct mlx5_flow_counter_mng *cmng = &sh->cmng; + enum mlx5_counter_type cnt_type = + pool->is_aged ? MLX5_COUNTER_TYPE_AGE : + MLX5_COUNTER_TYPE_ORIGIN; if (unlikely(status)) { raw_to_free = pool->raw_hw; } else { raw_to_free = pool->raw; + if (pool->is_aged) + mlx5_flow_aging_check(sh, pool); rte_spinlock_lock(&pool->sl); pool->raw = pool->raw_hw; rte_spinlock_unlock(&pool->sl); - rte_atomic64_add(&pool->query_gen, 1); /* Be sure the new raw counters data is updated in memory. */ - rte_cio_wmb(); + rte_io_wmb(); + if (!TAILQ_EMPTY(&pool->counters[query_gen])) { + rte_spinlock_lock(&cmng->csl[cnt_type]); + TAILQ_CONCAT(&cmng->counters[cnt_type], + &pool->counters[query_gen], next); + rte_spinlock_unlock(&cmng->csl[cnt_type]); + } } LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next); pool->raw_hw = NULL; @@ -5555,6 +6911,8 @@ mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh, * Value is part of flow rule created by request external to PMD. * @param[in] group * rte_flow group index value. + * @param[out] fdb_def_rule + * Whether fdb jump to table 1 is configured. * @param[out] table * HW table value. * @param[out] error @@ -5565,10 +6923,10 @@ mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh, */ int mlx5_flow_group_to_table(const struct rte_flow_attr *attributes, bool external, - uint32_t group, uint32_t *table, + uint32_t group, bool fdb_def_rule, uint32_t *table, struct rte_flow_error *error) { - if (attributes->transfer && external) { + if (attributes->transfer && external && fdb_def_rule) { if (group == UINT32_MAX) return rte_flow_error_set (error, EINVAL, @@ -5618,7 +6976,8 @@ mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev) }; struct rte_flow_action actions[] = { [0] = { - .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, + .type = (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, .conf = &(struct mlx5_flow_action_copy_mreg){ .src = REG_C_1, .dst = idx, @@ -5634,21 +6993,91 @@ mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev) .type = RTE_FLOW_ACTION_TYPE_END, }, }; + uint32_t flow_idx; struct rte_flow *flow; struct rte_flow_error error; if (!config->dv_flow_en) break; /* Create internal flow, validation skips copy action. */ - flow = flow_list_create(dev, NULL, &attr, items, - actions, false, &error); + flow_idx = flow_list_create(dev, NULL, &attr, items, + actions, false, &error); + flow = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], + flow_idx); if (!flow) continue; if (dev->data->dev_started || !flow_drv_apply(dev, flow, NULL)) config->flow_mreg_c[n++] = idx; - flow_list_destroy(dev, NULL, flow); + flow_list_destroy(dev, NULL, flow_idx); } for (; n < MLX5_MREG_C_NUM; ++n) - config->flow_mreg_c[n] = REG_NONE; + config->flow_mreg_c[n] = REG_NON; return 0; } + +/** + * Dump flow raw hw data to file + * + * @param[in] dev + * The pointer to Ethernet device. + * @param[in] file + * A pointer to a file for output. + * @param[out] error + * Perform verbose error reporting if not NULL. PMDs initialize this + * structure in case of error only. + * @return + * 0 on success, a nagative value otherwise. + */ +int +mlx5_flow_dev_dump(struct rte_eth_dev *dev, + FILE *file, + struct rte_flow_error *error __rte_unused) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_ctx_shared *sh = priv->sh; + + if (!priv->config.dv_flow_en) { + if (fputs("device dv flow disabled\n", file) <= 0) + return -errno; + return -ENOTSUP; + } + return mlx5_devx_cmd_flow_dump(sh->fdb_domain, sh->rx_domain, + sh->tx_domain, file); +} + +/** + * Get aged-out flows. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] context + * The address of an array of pointers to the aged-out flows contexts. + * @param[in] nb_countexts + * The length of context array pointers. + * @param[out] error + * Perform verbose error reporting if not NULL. Initialized in case of + * error only. + * + * @return + * how many contexts get in success, otherwise negative errno value. + * if nb_contexts is 0, return the amount of all aged contexts. + * if nb_contexts is not 0 , return the amount of aged flows reported + * in the context array. + */ +int +mlx5_flow_get_aged_flows(struct rte_eth_dev *dev, void **contexts, + uint32_t nb_contexts, struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + struct rte_flow_attr attr = { .transfer = 0 }; + + if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) { + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); + return fops->get_aged_flows(dev, contexts, nb_contexts, + error); + } + DRV_LOG(ERR, + "port %u get aged flows is not supported.", + dev->data->port_id); + return -ENOTSUP; +}