X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow.c;h=c5c767aaee1a40e0890882db72849a68061109d7;hb=3186a3a49c3a33502ba6189a80b8317c0a064830;hp=dbeca571b64b224fdb5797466bb86a8f52515a32;hpb=ec962bad14e143ac6853a0acbae13755f450f646;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index dbeca571b6..c5c767aaee 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -114,6 +114,30 @@ struct mlx5_flow_expand_rss { } entry[]; }; +static void +mlx5_dbg__print_pattern(const struct rte_flow_item *item); + +static bool +mlx5_flow_is_rss_expandable_item(const struct rte_flow_item *item) +{ + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_ETH: + case RTE_FLOW_ITEM_TYPE_VLAN: + case RTE_FLOW_ITEM_TYPE_IPV4: + case RTE_FLOW_ITEM_TYPE_IPV6: + case RTE_FLOW_ITEM_TYPE_UDP: + case RTE_FLOW_ITEM_TYPE_TCP: + case RTE_FLOW_ITEM_TYPE_VXLAN: + case RTE_FLOW_ITEM_TYPE_NVGRE: + case RTE_FLOW_ITEM_TYPE_GRE: + case RTE_FLOW_ITEM_TYPE_GENEVE: + return true; + default: + break; + } + return false; +} + static enum rte_flow_item_type mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item) { @@ -273,8 +297,11 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size, addr = buf->entry[0].pattern; } for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { - if (item->type != RTE_FLOW_ITEM_TYPE_VOID) - last_item = item; + if (!mlx5_flow_is_rss_expandable_item(item)) { + user_pattern_size += sizeof(*item); + continue; + } + last_item = item; for (i = 0; node->next && node->next[i]; ++i) { next = &graph[node->next[i]]; if (next->type == item->type) @@ -2097,7 +2124,7 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, RTE_FLOW_ERROR_TYPE_ITEM, item, "IPv4 cannot follow L2/VLAN layer " "which ether type is not IPv4"); - if (item_flags & MLX5_FLOW_LAYER_IPIP) { + if (item_flags & MLX5_FLOW_LAYER_TUNNEL) { if (mask && spec) next_proto = mask->hdr.next_proto_id & spec->hdr.next_proto_id; @@ -2205,7 +2232,7 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, "which ether type is not IPv6"); if (mask && mask->hdr.proto == UINT8_MAX && spec) next_proto = spec->hdr.proto; - if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) { + if (item_flags & MLX5_FLOW_LAYER_TUNNEL) { if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -6174,6 +6201,10 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list, mlx5_support_expansion, graph_root); MLX5_ASSERT(ret > 0 && (unsigned int)ret < sizeof(expand_buffer.buffer)); + if (rte_log_can_log(mlx5_logtype, RTE_LOG_DEBUG)) { + for (i = 0; i < buf->entries; ++i) + mlx5_dbg__print_pattern(buf->entry[i].pattern); + } } else { buf->entries = 1; buf->entry[0].pattern = (void *)(uintptr_t)items; @@ -8990,3 +9021,22 @@ mlx5_release_tunnel_hub(__rte_unused struct mlx5_dev_ctx_shared *sh, { } #endif /* HAVE_IBV_FLOW_DV_SUPPORT */ + +static void +mlx5_dbg__print_pattern(const struct rte_flow_item *item) +{ + int ret; + struct rte_flow_error error; + + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + char *item_name; + ret = rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, &item_name, + sizeof(item_name), + (void *)(uintptr_t)item->type, &error); + if (ret > 0) + printf("%s ", item_name); + else + printf("%d\n", (int)item->type); + } + printf("END\n"); +}