X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow.c;h=83155c7a07ffee1fbd762bc9f88c05acc85f3ca7;hb=4e05a229c5da;hp=305b2ec019cc1f2865692c5a2a1298dd87192288;hpb=73b620f211beee986a229a79a21a877cc6587943;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 305b2ec019..83155c7a07 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -1,37 +1,11 @@ -/*- - * BSD LICENSE - * - * Copyright 2016 6WIND S.A. - * Copyright 2016 Mellanox. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of 6WIND S.A. nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2016 6WIND S.A. + * Copyright 2016 Mellanox Technologies, Ltd */ #include +#include +#include #include /* Verbs header. */ @@ -44,411 +18,323 @@ #pragma GCC diagnostic error "-Wpedantic" #endif -#include +#include +#include +#include +#include #include #include #include +#include #include "mlx5.h" #include "mlx5_defs.h" #include "mlx5_prm.h" - -/* Define minimal priority for control plane flows. */ -#define MLX5_CTRL_FLOW_PRIORITY 4 - -/* Internet Protocol versions. */ -#define MLX5_IPV4 4 -#define MLX5_IPV6 6 - -#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT -struct ibv_counter_set_init_attr { - int dummy; -}; -struct ibv_flow_spec_counter_action { - int dummy; -}; -struct ibv_counter_set { - int dummy; -}; - -static inline int -ibv_destroy_counter_set(struct ibv_counter_set *cs) -{ - (void)cs; - return -ENOTSUP; -} -#endif +#include "mlx5_glue.h" /* Dev ops structure defined in mlx5.c */ extern const struct eth_dev_ops mlx5_dev_ops; extern const struct eth_dev_ops mlx5_dev_ops_isolate; -static int -mlx5_flow_create_eth(const struct rte_flow_item *item, - const void *default_mask, - void *data); - -static int -mlx5_flow_create_vlan(const struct rte_flow_item *item, - const void *default_mask, - void *data); - -static int -mlx5_flow_create_ipv4(const struct rte_flow_item *item, - const void *default_mask, - void *data); - -static int -mlx5_flow_create_ipv6(const struct rte_flow_item *item, - const void *default_mask, - void *data); - -static int -mlx5_flow_create_udp(const struct rte_flow_item *item, - const void *default_mask, - void *data); - -static int -mlx5_flow_create_tcp(const struct rte_flow_item *item, - const void *default_mask, - void *data); - -static int -mlx5_flow_create_vxlan(const struct rte_flow_item *item, - const void *default_mask, - void *data); - -struct mlx5_flow_parse; - -static void -mlx5_flow_create_copy(struct mlx5_flow_parse *parser, void *src, - unsigned int size); - -static int -mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id); - -static int -mlx5_flow_create_count(struct priv *priv, struct mlx5_flow_parse *parser); - -/* Hash RX queue types. */ -enum hash_rxq_type { - HASH_RXQ_TCPV4, - HASH_RXQ_UDPV4, - HASH_RXQ_IPV4, - HASH_RXQ_TCPV6, - HASH_RXQ_UDPV6, - HASH_RXQ_IPV6, - HASH_RXQ_ETH, -}; - -/* Initialization data for hash RX queue. */ -struct hash_rxq_init { - uint64_t hash_fields; /* Fields that participate in the hash. */ - uint64_t dpdk_rss_hf; /* Matching DPDK RSS hash fields. */ - unsigned int flow_priority; /* Flow priority to use. */ - unsigned int ip_version; /* Internet protocol. */ +/* Pattern outer Layer bits. */ +#define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0) +#define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1) +#define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2) +#define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3) +#define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4) +#define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5) + +/* Pattern inner Layer bits. */ +#define MLX5_FLOW_LAYER_INNER_L2 (1u << 6) +#define MLX5_FLOW_LAYER_INNER_L3_IPV4 (1u << 7) +#define MLX5_FLOW_LAYER_INNER_L3_IPV6 (1u << 8) +#define MLX5_FLOW_LAYER_INNER_L4_UDP (1u << 9) +#define MLX5_FLOW_LAYER_INNER_L4_TCP (1u << 10) +#define MLX5_FLOW_LAYER_INNER_VLAN (1u << 11) + +/* Pattern tunnel Layer bits. */ +#define MLX5_FLOW_LAYER_VXLAN (1u << 12) +#define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13) +#define MLX5_FLOW_LAYER_GRE (1u << 14) +#define MLX5_FLOW_LAYER_MPLS (1u << 15) + +/* Outer Masks. */ +#define MLX5_FLOW_LAYER_OUTER_L3 \ + (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6) +#define MLX5_FLOW_LAYER_OUTER_L4 \ + (MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP) +#define MLX5_FLOW_LAYER_OUTER \ + (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \ + MLX5_FLOW_LAYER_OUTER_L4) + +/* Tunnel Masks. */ +#define MLX5_FLOW_LAYER_TUNNEL \ + (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \ + MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_MPLS) + +/* Inner Masks. */ +#define MLX5_FLOW_LAYER_INNER_L3 \ + (MLX5_FLOW_LAYER_INNER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV6) +#define MLX5_FLOW_LAYER_INNER_L4 \ + (MLX5_FLOW_LAYER_INNER_L4_UDP | MLX5_FLOW_LAYER_INNER_L4_TCP) +#define MLX5_FLOW_LAYER_INNER \ + (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3 | \ + MLX5_FLOW_LAYER_INNER_L4) + +/* Actions that modify the fate of matching traffic. */ +#define MLX5_FLOW_FATE_DROP (1u << 0) +#define MLX5_FLOW_FATE_QUEUE (1u << 1) +#define MLX5_FLOW_FATE_RSS (1u << 2) + +/* Modify a packet. */ +#define MLX5_FLOW_MOD_FLAG (1u << 0) +#define MLX5_FLOW_MOD_MARK (1u << 1) +#define MLX5_FLOW_MOD_COUNT (1u << 2) + +/* Actions */ +#define MLX5_FLOW_ACTION_DROP (1u << 0) +#define MLX5_FLOW_ACTION_QUEUE (1u << 1) +#define MLX5_FLOW_ACTION_RSS (1u << 2) +#define MLX5_FLOW_ACTION_FLAG (1u << 3) +#define MLX5_FLOW_ACTION_MARK (1u << 4) +#define MLX5_FLOW_ACTION_COUNT (1u << 5) + +#define MLX5_FLOW_FATE_ACTIONS \ + (MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS) + +/* possible L3 layers protocols filtering. */ +#define MLX5_IP_PROTOCOL_TCP 6 +#define MLX5_IP_PROTOCOL_UDP 17 +#define MLX5_IP_PROTOCOL_GRE 47 +#define MLX5_IP_PROTOCOL_MPLS 147 + +/* Priority reserved for default flows. */ +#define MLX5_FLOW_PRIO_RSVD ((uint32_t)-1) + +enum mlx5_expansion { + MLX5_EXPANSION_ROOT, + MLX5_EXPANSION_ROOT_OUTER, + MLX5_EXPANSION_ROOT_ETH_VLAN, + MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN, + MLX5_EXPANSION_OUTER_ETH, + MLX5_EXPANSION_OUTER_ETH_VLAN, + MLX5_EXPANSION_OUTER_VLAN, + MLX5_EXPANSION_OUTER_IPV4, + MLX5_EXPANSION_OUTER_IPV4_UDP, + MLX5_EXPANSION_OUTER_IPV4_TCP, + MLX5_EXPANSION_OUTER_IPV6, + MLX5_EXPANSION_OUTER_IPV6_UDP, + MLX5_EXPANSION_OUTER_IPV6_TCP, + MLX5_EXPANSION_VXLAN, + MLX5_EXPANSION_VXLAN_GPE, + MLX5_EXPANSION_GRE, + MLX5_EXPANSION_MPLS, + MLX5_EXPANSION_ETH, + MLX5_EXPANSION_ETH_VLAN, + MLX5_EXPANSION_VLAN, + MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV4_UDP, + MLX5_EXPANSION_IPV4_TCP, + MLX5_EXPANSION_IPV6, + MLX5_EXPANSION_IPV6_UDP, + MLX5_EXPANSION_IPV6_TCP, }; -/* Initialization data for hash RX queues. */ -const struct hash_rxq_init hash_rxq_init[] = { - [HASH_RXQ_TCPV4] = { - .hash_fields = (IBV_RX_HASH_SRC_IPV4 | - IBV_RX_HASH_DST_IPV4 | - IBV_RX_HASH_SRC_PORT_TCP | - IBV_RX_HASH_DST_PORT_TCP), - .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_TCP, - .flow_priority = 0, - .ip_version = MLX5_IPV4, +/** Supported expansion of items. */ +static const struct rte_flow_expand_node mlx5_support_expansion[] = { + [MLX5_EXPANSION_ROOT] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, + MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), + .type = RTE_FLOW_ITEM_TYPE_END, }, - [HASH_RXQ_UDPV4] = { - .hash_fields = (IBV_RX_HASH_SRC_IPV4 | - IBV_RX_HASH_DST_IPV4 | - IBV_RX_HASH_SRC_PORT_UDP | - IBV_RX_HASH_DST_PORT_UDP), - .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_UDP, - .flow_priority = 0, - .ip_version = MLX5_IPV4, + [MLX5_EXPANSION_ROOT_OUTER] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH, + MLX5_EXPANSION_OUTER_IPV4, + MLX5_EXPANSION_OUTER_IPV6), + .type = RTE_FLOW_ITEM_TYPE_END, }, - [HASH_RXQ_IPV4] = { - .hash_fields = (IBV_RX_HASH_SRC_IPV4 | - IBV_RX_HASH_DST_IPV4), - .dpdk_rss_hf = (ETH_RSS_IPV4 | - ETH_RSS_FRAG_IPV4), - .flow_priority = 1, - .ip_version = MLX5_IPV4, + [MLX5_EXPANSION_ROOT_ETH_VLAN] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN), + .type = RTE_FLOW_ITEM_TYPE_END, }, - [HASH_RXQ_TCPV6] = { - .hash_fields = (IBV_RX_HASH_SRC_IPV6 | - IBV_RX_HASH_DST_IPV6 | - IBV_RX_HASH_SRC_PORT_TCP | - IBV_RX_HASH_DST_PORT_TCP), - .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_TCP, - .flow_priority = 0, - .ip_version = MLX5_IPV6, + [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH_VLAN), + .type = RTE_FLOW_ITEM_TYPE_END, }, - [HASH_RXQ_UDPV6] = { - .hash_fields = (IBV_RX_HASH_SRC_IPV6 | - IBV_RX_HASH_DST_IPV6 | - IBV_RX_HASH_SRC_PORT_UDP | - IBV_RX_HASH_DST_PORT_UDP), - .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_UDP, - .flow_priority = 0, - .ip_version = MLX5_IPV6, + [MLX5_EXPANSION_OUTER_ETH] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, + MLX5_EXPANSION_OUTER_IPV6, + MLX5_EXPANSION_MPLS), + .type = RTE_FLOW_ITEM_TYPE_ETH, + .rss_types = 0, }, - [HASH_RXQ_IPV6] = { - .hash_fields = (IBV_RX_HASH_SRC_IPV6 | - IBV_RX_HASH_DST_IPV6), - .dpdk_rss_hf = (ETH_RSS_IPV6 | - ETH_RSS_FRAG_IPV6), - .flow_priority = 1, - .ip_version = MLX5_IPV6, + [MLX5_EXPANSION_OUTER_ETH_VLAN] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN), + .type = RTE_FLOW_ITEM_TYPE_ETH, + .rss_types = 0, }, - [HASH_RXQ_ETH] = { - .hash_fields = 0, - .dpdk_rss_hf = 0, - .flow_priority = 2, + [MLX5_EXPANSION_OUTER_VLAN] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, + MLX5_EXPANSION_OUTER_IPV6), + .type = RTE_FLOW_ITEM_TYPE_VLAN, }, -}; - -/* Number of entries in hash_rxq_init[]. */ -const unsigned int hash_rxq_init_n = RTE_DIM(hash_rxq_init); - -/** Structure for holding counter stats. */ -struct mlx5_flow_counter_stats { - uint64_t hits; /**< Number of packets matched by the rule. */ - uint64_t bytes; /**< Number of bytes matched by the rule. */ -}; - -/** Structure for Drop queue. */ -struct mlx5_hrxq_drop { - struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */ - struct ibv_qp *qp; /**< Verbs queue pair. */ - struct ibv_wq *wq; /**< Verbs work queue. */ - struct ibv_cq *cq; /**< Verbs completion queue. */ -}; - -/* Flows structures. */ -struct mlx5_flow { - uint64_t hash_fields; /**< Fields that participate in the hash. */ - struct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */ - struct ibv_flow *ibv_flow; /**< Verbs flow. */ - struct mlx5_hrxq *hrxq; /**< Hash Rx queues. */ -}; - -/* Drop flows structures. */ -struct mlx5_flow_drop { - struct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */ - struct ibv_flow *ibv_flow; /**< Verbs flow. */ -}; - -struct rte_flow { - TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */ - uint32_t mark:1; /**< Set if the flow is marked. */ - uint32_t drop:1; /**< Drop queue. */ - uint16_t queues_n; /**< Number of entries in queue[]. */ - uint16_t (*queues)[]; /**< Queues indexes to use. */ - struct rte_eth_rss_conf rss_conf; /**< RSS configuration */ - uint8_t rss_key[40]; /**< copy of the RSS key. */ - struct ibv_counter_set *cs; /**< Holds the counters for the rule. */ - struct mlx5_flow_counter_stats counter_stats;/**mask is not provided. When - * \default_mask is also NULL, the full supported bit-mask (\mask) is - * used instead. - */ - const void *default_mask; - /** Bit-masks size in bytes. */ - const unsigned int mask_sz; - /** - * Conversion function from rte_flow to NIC specific flow. - * - * @param item - * rte_flow item to convert. - * @param default_mask - * Default bit-masks to use when item->mask is not provided. - * @param data - * Internal structure to store the conversion. - * - * @return - * 0 on success, negative value otherwise. - */ - int (*convert)(const struct rte_flow_item *item, - const void *default_mask, - void *data); - /** Size in bytes of the destination structure. */ - const unsigned int dst_sz; - /** List of possible following items. */ - const enum rte_flow_item_type *const items; -}; - -/** Valid action for this PMD. */ -static const enum rte_flow_action_type valid_actions[] = { - RTE_FLOW_ACTION_TYPE_DROP, - RTE_FLOW_ACTION_TYPE_QUEUE, - RTE_FLOW_ACTION_TYPE_MARK, - RTE_FLOW_ACTION_TYPE_FLAG, -#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT - RTE_FLOW_ACTION_TYPE_COUNT, -#endif - RTE_FLOW_ACTION_TYPE_END, -}; - -/** Graph of supported items and associated actions. */ -static const struct mlx5_flow_items mlx5_flow_items[] = { - [RTE_FLOW_ITEM_TYPE_END] = { - .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_VXLAN), + [MLX5_EXPANSION_OUTER_IPV4] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT + (MLX5_EXPANSION_OUTER_IPV4_UDP, + MLX5_EXPANSION_OUTER_IPV4_TCP, + MLX5_EXPANSION_GRE), + .type = RTE_FLOW_ITEM_TYPE_IPV4, + .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | + ETH_RSS_NONFRAG_IPV4_OTHER, }, - [RTE_FLOW_ITEM_TYPE_ETH] = { - .items = ITEMS(RTE_FLOW_ITEM_TYPE_VLAN, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_IPV6), - .actions = valid_actions, - .mask = &(const struct rte_flow_item_eth){ - .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", - .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", - .type = -1, - }, - .default_mask = &rte_flow_item_eth_mask, - .mask_sz = sizeof(struct rte_flow_item_eth), - .convert = mlx5_flow_create_eth, - .dst_sz = sizeof(struct ibv_flow_spec_eth), + [MLX5_EXPANSION_OUTER_IPV4_UDP] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, + MLX5_EXPANSION_VXLAN_GPE), + .type = RTE_FLOW_ITEM_TYPE_UDP, + .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, }, - [RTE_FLOW_ITEM_TYPE_VLAN] = { - .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_IPV6), - .actions = valid_actions, - .mask = &(const struct rte_flow_item_vlan){ - .tci = -1, - }, - .default_mask = &rte_flow_item_vlan_mask, - .mask_sz = sizeof(struct rte_flow_item_vlan), - .convert = mlx5_flow_create_vlan, - .dst_sz = 0, + [MLX5_EXPANSION_OUTER_IPV4_TCP] = { + .type = RTE_FLOW_ITEM_TYPE_TCP, + .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, }, - [RTE_FLOW_ITEM_TYPE_IPV4] = { - .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_TCP), - .actions = valid_actions, - .mask = &(const struct rte_flow_item_ipv4){ - .hdr = { - .src_addr = -1, - .dst_addr = -1, - .type_of_service = -1, - .next_proto_id = -1, - }, - }, - .default_mask = &rte_flow_item_ipv4_mask, - .mask_sz = sizeof(struct rte_flow_item_ipv4), - .convert = mlx5_flow_create_ipv4, - .dst_sz = sizeof(struct ibv_flow_spec_ipv4_ext), + [MLX5_EXPANSION_OUTER_IPV6] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT + (MLX5_EXPANSION_OUTER_IPV6_UDP, + MLX5_EXPANSION_OUTER_IPV6_TCP), + .type = RTE_FLOW_ITEM_TYPE_IPV6, + .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | + ETH_RSS_NONFRAG_IPV6_OTHER, }, - [RTE_FLOW_ITEM_TYPE_IPV6] = { - .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_TCP), - .actions = valid_actions, - .mask = &(const struct rte_flow_item_ipv6){ - .hdr = { - .src_addr = { - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - }, - .dst_addr = { - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - }, - .vtc_flow = -1, - .proto = -1, - .hop_limits = -1, - }, - }, - .default_mask = &rte_flow_item_ipv6_mask, - .mask_sz = sizeof(struct rte_flow_item_ipv6), - .convert = mlx5_flow_create_ipv6, - .dst_sz = sizeof(struct ibv_flow_spec_ipv6), + [MLX5_EXPANSION_OUTER_IPV6_UDP] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, + MLX5_EXPANSION_VXLAN_GPE), + .type = RTE_FLOW_ITEM_TYPE_UDP, + .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, }, - [RTE_FLOW_ITEM_TYPE_UDP] = { - .items = ITEMS(RTE_FLOW_ITEM_TYPE_VXLAN), - .actions = valid_actions, - .mask = &(const struct rte_flow_item_udp){ - .hdr = { - .src_port = -1, - .dst_port = -1, - }, - }, - .default_mask = &rte_flow_item_udp_mask, - .mask_sz = sizeof(struct rte_flow_item_udp), - .convert = mlx5_flow_create_udp, - .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp), + [MLX5_EXPANSION_OUTER_IPV6_TCP] = { + .type = RTE_FLOW_ITEM_TYPE_TCP, + .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, }, - [RTE_FLOW_ITEM_TYPE_TCP] = { - .actions = valid_actions, - .mask = &(const struct rte_flow_item_tcp){ - .hdr = { - .src_port = -1, - .dst_port = -1, - }, - }, - .default_mask = &rte_flow_item_tcp_mask, - .mask_sz = sizeof(struct rte_flow_item_tcp), - .convert = mlx5_flow_create_tcp, - .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp), + [MLX5_EXPANSION_VXLAN] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH), + .type = RTE_FLOW_ITEM_TYPE_VXLAN, }, - [RTE_FLOW_ITEM_TYPE_VXLAN] = { - .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH), - .actions = valid_actions, - .mask = &(const struct rte_flow_item_vxlan){ - .vni = "\xff\xff\xff", - }, - .default_mask = &rte_flow_item_vxlan_mask, - .mask_sz = sizeof(struct rte_flow_item_vxlan), - .convert = mlx5_flow_create_vxlan, - .dst_sz = sizeof(struct ibv_flow_spec_tunnel), + [MLX5_EXPANSION_VXLAN_GPE] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, + MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), + .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE, + }, + [MLX5_EXPANSION_GRE] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4), + .type = RTE_FLOW_ITEM_TYPE_GRE, + }, + [MLX5_EXPANSION_MPLS] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), + .type = RTE_FLOW_ITEM_TYPE_MPLS, + }, + [MLX5_EXPANSION_ETH] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), + .type = RTE_FLOW_ITEM_TYPE_ETH, + }, + [MLX5_EXPANSION_ETH_VLAN] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN), + .type = RTE_FLOW_ITEM_TYPE_ETH, + }, + [MLX5_EXPANSION_VLAN] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), + .type = RTE_FLOW_ITEM_TYPE_VLAN, + }, + [MLX5_EXPANSION_IPV4] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP, + MLX5_EXPANSION_IPV4_TCP), + .type = RTE_FLOW_ITEM_TYPE_IPV4, + .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | + ETH_RSS_NONFRAG_IPV4_OTHER, + }, + [MLX5_EXPANSION_IPV4_UDP] = { + .type = RTE_FLOW_ITEM_TYPE_UDP, + .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, + }, + [MLX5_EXPANSION_IPV4_TCP] = { + .type = RTE_FLOW_ITEM_TYPE_TCP, + .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, + }, + [MLX5_EXPANSION_IPV6] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP, + MLX5_EXPANSION_IPV6_TCP), + .type = RTE_FLOW_ITEM_TYPE_IPV6, + .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | + ETH_RSS_NONFRAG_IPV6_OTHER, + }, + [MLX5_EXPANSION_IPV6_UDP] = { + .type = RTE_FLOW_ITEM_TYPE_UDP, + .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, + }, + [MLX5_EXPANSION_IPV6_TCP] = { + .type = RTE_FLOW_ITEM_TYPE_TCP, + .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, }, }; -/** Structure to pass to the conversion function. */ -struct mlx5_flow_parse { - uint32_t inner; /**< Set once VXLAN is encountered. */ - uint32_t allmulti:1; /**< Set once allmulti dst MAC is encountered. */ - uint32_t create:1; - /**< Whether resources should remain after a validate. */ - uint32_t drop:1; /**< Target is a drop queue. */ - uint32_t mark:1; /**< Mark is present in the flow. */ - uint32_t count:1; /**< Count is present in the flow. */ - uint32_t mark_id; /**< Mark identifier. */ - uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queues indexes to use. */ - uint16_t queues_n; /**< Number of entries in queue[]. */ - struct rte_eth_rss_conf rss_conf; /**< RSS configuration */ - uint8_t rss_key[40]; /**< copy of the RSS key. */ - enum hash_rxq_type layer; /**< Last pattern layer detected. */ - struct ibv_counter_set *cs; /**< Holds the counter set for the rule */ +/** Handles information leading to a drop fate. */ +struct mlx5_flow_verbs { + LIST_ENTRY(mlx5_flow_verbs) next; + unsigned int size; /**< Size of the attribute. */ struct { - struct ibv_flow_attr *ibv_attr; - /**< Pointer to Verbs attributes. */ - unsigned int offset; - /**< Current position or total size of the attribute. */ - } queue[RTE_DIM(hash_rxq_init)]; + struct ibv_flow_attr *attr; + /**< Pointer to the Specification buffer. */ + uint8_t *specs; /**< Pointer to the specifications. */ + }; + struct ibv_flow *flow; /**< Verbs flow pointer. */ + struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */ + uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */ +}; + +/** Device flow structure. */ +struct mlx5_flow { + LIST_ENTRY(mlx5_flow) next; + struct rte_flow *flow; /**< Pointer to the main flow. */ + union { + struct mlx5_flow_verbs verbs; /**< Holds the verbs dev-flow. */ + }; +}; + +/* Counters information. */ +struct mlx5_flow_counter { + LIST_ENTRY(mlx5_flow_counter) next; /**< Pointer to the next counter. */ + uint32_t shared:1; /**< Share counter ID with other flow rules. */ + uint32_t ref_cnt:31; /**< Reference counter. */ + uint32_t id; /**< Counter ID. */ + struct ibv_counter_set *cs; /**< Holds the counters for the rule. */ + uint64_t hits; /**< Number of packets matched by the rule. */ + uint64_t bytes; /**< Number of bytes matched by the rule. */ +}; + +/* Flow structure. */ +struct rte_flow { + TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */ + struct rte_flow_attr attributes; /**< User flow attribute. */ + uint32_t layers; + /**< Bit-fields of present layers see MLX5_FLOW_LAYER_*. */ + uint32_t modifier; + /**< Bit-fields of present modifier see MLX5_FLOW_MOD_*. */ + uint32_t fate; + /**< Bit-fields of present fate see MLX5_FLOW_FATE_*. */ + LIST_HEAD(verbs, mlx5_flow_verbs) verbs; /**< Verbs flows list. */ + struct mlx5_flow_verbs *cur_verbs; + /**< Current Verbs flow structure being filled. */ + struct mlx5_flow_counter *counter; /**< Holds Verbs flow counter. */ + struct rte_flow_action_rss rss;/**< RSS context. */ + uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */ + uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */ + void *nl_flow; /**< Netlink flow buffer if relevant. */ + LIST_HEAD(dev_flows, mlx5_flow) dev_flows; + /**< Device flows that are part of the flow. */ }; static const struct rte_flow_ops mlx5_flow_ops = { @@ -456,12 +342,8 @@ static const struct rte_flow_ops mlx5_flow_ops = { .create = mlx5_flow_create, .destroy = mlx5_flow_destroy, .flush = mlx5_flow_flush, -#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT - .query = mlx5_flow_query, -#else - .query = NULL, -#endif .isolate = mlx5_flow_isolate, + .query = mlx5_flow_query, }; /* Convert FDIR request to Generic flow. */ @@ -475,10 +357,18 @@ struct mlx5_fdir { struct rte_flow_item_ipv4 ipv4; struct rte_flow_item_ipv6 ipv6; } l3; + union { + struct rte_flow_item_ipv4 ipv4; + struct rte_flow_item_ipv6 ipv6; + } l3_mask; union { struct rte_flow_item_udp udp; struct rte_flow_item_tcp tcp; } l4; + union { + struct rte_flow_item_udp udp; + struct rte_flow_item_tcp tcp; + } l4_mask; struct rte_flow_action_queue queue; }; @@ -488,785 +378,409 @@ struct ibv_spec_header { uint16_t size; }; +/* + * Number of sub priorities. + * For each kind of pattern matching i.e. L2, L3, L4 to have a correct + * matching on the NIC (firmware dependent) L4 most have the higher priority + * followed by L3 and ending with L2. + */ +#define MLX5_PRIORITY_MAP_L2 2 +#define MLX5_PRIORITY_MAP_L3 1 +#define MLX5_PRIORITY_MAP_L4 0 +#define MLX5_PRIORITY_MAP_MAX 3 + +/* Map of Verbs to Flow priority with 8 Verbs priorities. */ +static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = { + { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 }, +}; + +/* Map of Verbs to Flow priority with 16 Verbs priorities. */ +static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = { + { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 }, + { 9, 10, 11 }, { 12, 13, 14 }, +}; + +/* Tunnel information. */ +struct mlx5_flow_tunnel_info { + uint32_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */ + uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */ +}; + +static struct mlx5_flow_tunnel_info tunnels_info[] = { + { + .tunnel = MLX5_FLOW_LAYER_VXLAN, + .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP, + }, + { + .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE, + .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP, + }, + { + .tunnel = MLX5_FLOW_LAYER_GRE, + .ptype = RTE_PTYPE_TUNNEL_GRE, + }, + { + .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP, + .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE | RTE_PTYPE_L4_UDP, + }, + { + .tunnel = MLX5_FLOW_LAYER_MPLS, + .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE, + }, +}; + /** - * Check support for a given item. + * Discover the maximum number of priority available. * - * @param item[in] - * Item specification. - * @param mask[in] - * Bit-masks covering supported fields to compare with spec, last and mask in - * \item. - * @param size - * Bit-Mask size in bytes. + * @param[in] dev + * Pointer to Ethernet device. * * @return - * 0 on success. + * number of supported flow priority on success, a negative errno + * value otherwise and rte_errno is set. */ -static int -mlx5_flow_item_validate(const struct rte_flow_item *item, - const uint8_t *mask, unsigned int size) +int +mlx5_flow_discover_priorities(struct rte_eth_dev *dev) { - int ret = 0; - - if (!item->spec && (item->mask || item->last)) - return -1; - if (item->spec && !item->mask) { - unsigned int i; - const uint8_t *spec = item->spec; - - for (i = 0; i < size; ++i) - if ((spec[i] | mask[i]) != mask[i]) - return -1; - } - if (item->last && !item->mask) { - unsigned int i; - const uint8_t *spec = item->last; - - for (i = 0; i < size; ++i) - if ((spec[i] | mask[i]) != mask[i]) - return -1; + struct { + struct ibv_flow_attr attr; + struct ibv_flow_spec_eth eth; + struct ibv_flow_spec_action_drop drop; + } flow_attr = { + .attr = { + .num_of_specs = 2, + }, + .eth = { + .type = IBV_FLOW_SPEC_ETH, + .size = sizeof(struct ibv_flow_spec_eth), + }, + .drop = { + .size = sizeof(struct ibv_flow_spec_action_drop), + .type = IBV_FLOW_SPEC_ACTION_DROP, + }, + }; + struct ibv_flow *flow; + struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev); + uint16_t vprio[] = { 8, 16 }; + int i; + int priority = 0; + + if (!drop) { + rte_errno = ENOTSUP; + return -rte_errno; } - if (item->mask) { - unsigned int i; - const uint8_t *spec = item->mask; - - for (i = 0; i < size; ++i) - if ((spec[i] | mask[i]) != mask[i]) - return -1; + for (i = 0; i != RTE_DIM(vprio); i++) { + flow_attr.attr.priority = vprio[i] - 1; + flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr); + if (!flow) + break; + claim_zero(mlx5_glue->destroy_flow(flow)); + priority = vprio[i]; } - if (item->spec && item->last) { - uint8_t spec[size]; - uint8_t last[size]; - const uint8_t *apply = mask; - unsigned int i; - - if (item->mask) - apply = item->mask; - for (i = 0; i < size; ++i) { - spec[i] = ((const uint8_t *)item->spec)[i] & apply[i]; - last[i] = ((const uint8_t *)item->last)[i] & apply[i]; - } - ret = memcmp(spec, last, size); + switch (priority) { + case 8: + priority = RTE_DIM(priority_map_3); + break; + case 16: + priority = RTE_DIM(priority_map_5); + break; + default: + rte_errno = ENOTSUP; + DRV_LOG(ERR, + "port %u verbs maximum priority: %d expected 8/16", + dev->data->port_id, vprio[i]); + return -rte_errno; } - return ret; + mlx5_hrxq_drop_release(dev); + DRV_LOG(INFO, "port %u flow maximum priority: %d", + dev->data->port_id, priority); + return priority; } /** - * Copy the RSS configuration from the user ones. - * - * @param priv - * Pointer to private structure. - * @param parser - * Internal parser structure. - * @param rss_conf - * User RSS configuration to save. + * Adjust flow priority. * - * @return - * 0 on success, errno value on failure. + * @param dev + * Pointer to Ethernet device. + * @param flow + * Pointer to an rte flow. */ -static int -priv_flow_convert_rss_conf(struct priv *priv, - struct mlx5_flow_parse *parser, - const struct rte_eth_rss_conf *rss_conf) +static void +mlx5_flow_adjust_priority(struct rte_eth_dev *dev, struct rte_flow *flow) { - const struct rte_eth_rss_conf *rss; + struct priv *priv = dev->data->dev_private; + uint32_t priority = flow->attributes.priority; + uint32_t subpriority = flow->cur_verbs->attr->priority; - if (rss_conf) { - if (rss_conf->rss_hf & MLX5_RSS_HF_MASK) - return EINVAL; - rss = rss_conf; - } else { - rss = &priv->rss_conf; - } - if (rss->rss_key_len > 40) - return EINVAL; - parser->rss_conf.rss_key_len = rss->rss_key_len; - parser->rss_conf.rss_hf = rss->rss_hf; - memcpy(parser->rss_key, rss->rss_key, rss->rss_key_len); - parser->rss_conf.rss_key = parser->rss_key; - return 0; + switch (priv->config.flow_prio) { + case RTE_DIM(priority_map_3): + priority = priority_map_3[priority][subpriority]; + break; + case RTE_DIM(priority_map_5): + priority = priority_map_5[priority][subpriority]; + break; + } + flow->cur_verbs->attr->priority = priority; } /** - * Extract attribute to the parser. + * Get a flow counter. * - * @param priv - * Pointer to private structure. - * @param[in] attr - * Flow rule attributes. - * @param[out] error - * Perform verbose error reporting if not NULL. - * @param[in, out] parser - * Internal parser structure. + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] shared + * Indicate if this counter is shared with other flows. + * @param[in] id + * Counter identifier. * * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. + * A pointer to the counter, NULL otherwise and rte_errno is set. */ -static int -priv_flow_convert_attributes(struct priv *priv, - const struct rte_flow_attr *attr, - struct rte_flow_error *error, - struct mlx5_flow_parse *parser) -{ - (void)priv; - (void)parser; - if (attr->group) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ATTR_GROUP, - NULL, - "groups are not supported"); - return -rte_errno; +static struct mlx5_flow_counter * +mlx5_flow_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id) +{ + struct priv *priv = dev->data->dev_private; + struct mlx5_flow_counter *cnt; + + LIST_FOREACH(cnt, &priv->flow_counters, next) { + if (!cnt->shared || cnt->shared != shared) + continue; + if (cnt->id != id) + continue; + cnt->ref_cnt++; + return cnt; } - if (attr->priority && attr->priority != MLX5_CTRL_FLOW_PRIORITY) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, - NULL, - "priorities are not supported"); - return -rte_errno; +#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT + + struct mlx5_flow_counter tmpl = { + .shared = shared, + .id = id, + .cs = mlx5_glue->create_counter_set + (priv->ctx, + &(struct ibv_counter_set_init_attr){ + .counter_set_id = id, + }), + .hits = 0, + .bytes = 0, + }; + + if (!tmpl.cs) { + rte_errno = errno; + return NULL; } - if (attr->egress) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, - NULL, - "egress is not supported"); - return -rte_errno; + cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0); + if (!cnt) { + rte_errno = ENOMEM; + return NULL; } - if (!attr->ingress) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, - NULL, - "only ingress is supported"); - return -rte_errno; + *cnt = tmpl; + LIST_INSERT_HEAD(&priv->flow_counters, cnt, next); + return cnt; +#endif + rte_errno = ENOTSUP; + return NULL; +} + +/** + * Release a flow counter. + * + * @param[in] counter + * Pointer to the counter handler. + */ +static void +mlx5_flow_counter_release(struct mlx5_flow_counter *counter) +{ + if (--counter->ref_cnt == 0) { + claim_zero(mlx5_glue->destroy_counter_set(counter->cs)); + LIST_REMOVE(counter, next); + rte_free(counter); } - return 0; } /** - * Extract actions request to the parser. + * Verify the @p attributes will be correctly understood by the NIC and store + * them in the @p flow if everything is correct. * - * @param priv - * Pointer to private structure. - * @param[in] actions - * Associated actions (list terminated by the END action). - * @param[out] error - * Perform verbose error reporting if not NULL. - * @param[in, out] parser - * Internal parser structure. + * @param[in] dev + * Pointer to Ethernet device structure. + * @param[in] attributes + * Pointer to flow attributes + * @param[in, out] flow + * Pointer to the rte_flow structure. * * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. + * 0 on success. */ static int -priv_flow_convert_actions(struct priv *priv, - const struct rte_flow_action actions[], - struct rte_flow_error *error, - struct mlx5_flow_parse *parser) +mlx5_flow_attributes(struct rte_eth_dev *dev, + const struct rte_flow_attr *attributes, + struct rte_flow *flow) { - /* - * Add default RSS configuration necessary for Verbs to create QP even - * if no RSS is necessary. - */ - priv_flow_convert_rss_conf(priv, parser, - (const struct rte_eth_rss_conf *) - &priv->rss_conf); - for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) { - if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) { - continue; - } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) { - parser->drop = 1; - } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) { - const struct rte_flow_action_queue *queue = - (const struct rte_flow_action_queue *) - actions->conf; - uint16_t n; - uint16_t found = 0; - - if (!queue || (queue->index > (priv->rxqs_n - 1))) - goto exit_action_not_supported; - for (n = 0; n < parser->queues_n; ++n) { - if (parser->queues[n] == queue->index) { - found = 1; - break; - } - } - if (parser->queues_n > 1 && !found) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "queue action not in RSS queues"); - return -rte_errno; - } - if (!found) { - parser->queues_n = 1; - parser->queues[0] = queue->index; - } - } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) { - const struct rte_flow_action_rss *rss = - (const struct rte_flow_action_rss *) - actions->conf; - uint16_t n; - - if (!rss || !rss->num) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "no valid queues"); - return -rte_errno; - } - if (parser->queues_n == 1) { - uint16_t found = 0; - - assert(parser->queues_n); - for (n = 0; n < rss->num; ++n) { - if (parser->queues[0] == - rss->queue[n]) { - found = 1; - break; - } - } - if (!found) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "queue action not in RSS" - " queues"); - return -rte_errno; - } - } - for (n = 0; n < rss->num; ++n) { - if (rss->queue[n] >= priv->rxqs_n) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "queue id > number of" - " queues"); - return -rte_errno; - } - } - for (n = 0; n < rss->num; ++n) - parser->queues[n] = rss->queue[n]; - parser->queues_n = rss->num; - if (priv_flow_convert_rss_conf(priv, parser, - rss->rss_conf)) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "wrong RSS configuration"); - return -rte_errno; - } - } else if (actions->type == RTE_FLOW_ACTION_TYPE_MARK) { - const struct rte_flow_action_mark *mark = - (const struct rte_flow_action_mark *) - actions->conf; - - if (!mark) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "mark must be defined"); - return -rte_errno; - } else if (mark->id >= MLX5_FLOW_MARK_MAX) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "mark must be between 0" - " and 16777199"); - return -rte_errno; - } - parser->mark = 1; - parser->mark_id = mark->id; - } else if (actions->type == RTE_FLOW_ACTION_TYPE_FLAG) { - parser->mark = 1; - } else if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT && - priv->config.flow_counter_en) { - parser->count = 1; - } else { - goto exit_action_not_supported; - } - } - if (parser->drop && parser->mark) - parser->mark = 0; - if (!parser->queues_n && !parser->drop) { - rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, - NULL, "no valid action"); - return -rte_errno; - } + struct priv *priv = dev->data->dev_private; + uint32_t priority_max = priv->config.flow_prio - 1; + + flow->attributes = *attributes; + if (attributes->priority == MLX5_FLOW_PRIO_RSVD) + flow->attributes.priority = priority_max; return 0; -exit_action_not_supported: - rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, - actions, "action not supported"); - return -rte_errno; } /** - * Validate items. + * Verify the @p item specifications (spec, last, mask) are compatible with the + * NIC capabilities. * - * @param priv - * Pointer to private structure. - * @param[in] items - * Pattern specification (list terminated by the END pattern item). + * @param[in] item + * Item specification. + * @param[in] mask + * @p item->mask or flow default bit-masks. + * @param[in] nic_mask + * Bit-masks covering supported fields by the NIC to compare with user mask. + * @param[in] size + * Bit-masks size in bytes. * @param[out] error - * Perform verbose error reporting if not NULL. - * @param[in, out] parser - * Internal parser structure. + * Pointer to error structure. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_flow_convert_items_validate(struct priv *priv, - const struct rte_flow_item items[], - struct rte_flow_error *error, - struct mlx5_flow_parse *parser) +mlx5_flow_item_acceptable(const struct rte_flow_item *item, + const uint8_t *mask, + const uint8_t *nic_mask, + unsigned int size, + struct rte_flow_error *error) { - const struct mlx5_flow_items *cur_item = mlx5_flow_items; unsigned int i; - (void)priv; - /* Initialise the offsets to start after verbs attribute. */ - for (i = 0; i != hash_rxq_init_n; ++i) - parser->queue[i].offset = sizeof(struct ibv_flow_attr); - for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) { - const struct mlx5_flow_items *token = NULL; - unsigned int n; - int err; + assert(nic_mask); + for (i = 0; i < size; ++i) + if ((nic_mask[i] | mask[i]) != nic_mask[i]) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "mask enables non supported" + " bits"); + if (!item->spec && (item->mask || item->last)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "mask/last without a spec is not" + " supported"); + if (item->spec && item->last) { + uint8_t spec[size]; + uint8_t last[size]; + unsigned int i; + int ret; - if (items->type == RTE_FLOW_ITEM_TYPE_VOID) - continue; - for (i = 0; - cur_item->items && - cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END; - ++i) { - if (cur_item->items[i] == items->type) { - token = &mlx5_flow_items[items->type]; - break; - } - } - if (!token) - goto exit_item_not_supported; - cur_item = token; - err = mlx5_flow_item_validate(items, - (const uint8_t *)cur_item->mask, - cur_item->mask_sz); - if (err) - goto exit_item_not_supported; - if (items->type == RTE_FLOW_ITEM_TYPE_VXLAN) { - if (parser->inner) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, - items, - "cannot recognize multiple" - " VXLAN encapsulations"); - return -rte_errno; - } - parser->inner = IBV_FLOW_SPEC_INNER; + for (i = 0; i < size; ++i) { + spec[i] = ((const uint8_t *)item->spec)[i] & mask[i]; + last[i] = ((const uint8_t *)item->last)[i] & mask[i]; } - if (parser->drop || parser->queues_n == 1) { - parser->queue[HASH_RXQ_ETH].offset += cur_item->dst_sz; - } else { - for (n = 0; n != hash_rxq_init_n; ++n) - parser->queue[n].offset += cur_item->dst_sz; - } - } - if (parser->mark) { - for (i = 0; i != hash_rxq_init_n; ++i) - parser->queue[i].offset += - sizeof(struct ibv_flow_spec_action_tag); - } - if (parser->count) { - unsigned int size = sizeof(struct ibv_flow_spec_counter_action); - - for (i = 0; i != hash_rxq_init_n; ++i) - parser->queue[i].offset += size; + ret = memcmp(spec, last, size); + if (ret != 0) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "range is not supported"); } return 0; -exit_item_not_supported: - rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, - items, "item not supported"); - return -rte_errno; } /** - * Allocate memory space to store verbs flow attributes. + * Add a verbs item specification into @p flow. * - * @param priv - * Pointer to private structure. - * @param[in] priority - * Flow priority. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] src + * Create specification. * @param[in] size - * Amount of byte to allocate. - * @param[out] error - * Perform verbose error reporting if not NULL. - * - * @return - * A verbs flow attribute on success, NULL otherwise. - */ -static struct ibv_flow_attr* -priv_flow_convert_allocate(struct priv *priv, - unsigned int priority, - unsigned int size, - struct rte_flow_error *error) -{ - struct ibv_flow_attr *ibv_attr; - - (void)priv; - ibv_attr = rte_calloc(__func__, 1, size, 0); - if (!ibv_attr) { - rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "cannot allocate verbs spec attributes."); - return NULL; - } - ibv_attr->priority = priority; - return ibv_attr; -} - -/** - * Finalise verbs flow attributes. - * - * @param priv - * Pointer to private structure. - * @param[in, out] parser - * Internal parser structure. + * Size in bytes of the specification to copy. */ static void -priv_flow_convert_finalise(struct priv *priv, struct mlx5_flow_parse *parser) -{ - const unsigned int ipv4 = - hash_rxq_init[parser->layer].ip_version == MLX5_IPV4; - const enum hash_rxq_type hmin = ipv4 ? HASH_RXQ_TCPV4 : HASH_RXQ_TCPV6; - const enum hash_rxq_type hmax = ipv4 ? HASH_RXQ_IPV4 : HASH_RXQ_IPV6; - const enum hash_rxq_type ohmin = ipv4 ? HASH_RXQ_TCPV6 : HASH_RXQ_TCPV4; - const enum hash_rxq_type ohmax = ipv4 ? HASH_RXQ_IPV6 : HASH_RXQ_IPV4; - const enum hash_rxq_type ip = ipv4 ? HASH_RXQ_IPV4 : HASH_RXQ_IPV6; - unsigned int i; +mlx5_flow_spec_verbs_add(struct rte_flow *flow, void *src, unsigned int size) +{ + struct mlx5_flow_verbs *verbs = flow->cur_verbs; - (void)priv; - if (parser->layer == HASH_RXQ_ETH) { - goto fill; - } else { - /* - * This layer becomes useless as the pattern define under - * layers. - */ - rte_free(parser->queue[HASH_RXQ_ETH].ibv_attr); - parser->queue[HASH_RXQ_ETH].ibv_attr = NULL; - } - /* Remove opposite kind of layer e.g. IPv6 if the pattern is IPv4. */ - for (i = ohmin; i != (ohmax + 1); ++i) { - if (!parser->queue[i].ibv_attr) - continue; - rte_free(parser->queue[i].ibv_attr); - parser->queue[i].ibv_attr = NULL; - } - /* Remove impossible flow according to the RSS configuration. */ - if (hash_rxq_init[parser->layer].dpdk_rss_hf & - parser->rss_conf.rss_hf) { - /* Remove any other flow. */ - for (i = hmin; i != (hmax + 1); ++i) { - if ((i == parser->layer) || - (!parser->queue[i].ibv_attr)) - continue; - rte_free(parser->queue[i].ibv_attr); - parser->queue[i].ibv_attr = NULL; - } - } else if (!parser->queue[ip].ibv_attr) { - /* no RSS possible with the current configuration. */ - parser->queues_n = 1; - return; - } -fill: - /* - * Fill missing layers in verbs specifications, or compute the correct - * offset to allocate the memory space for the attributes and - * specifications. - */ - for (i = 0; i != hash_rxq_init_n - 1; ++i) { - union { - struct ibv_flow_spec_ipv4_ext ipv4; - struct ibv_flow_spec_ipv6 ipv6; - struct ibv_flow_spec_tcp_udp udp_tcp; - } specs; + if (verbs->specs) { void *dst; - uint16_t size; - - if (i == parser->layer) - continue; - if (parser->layer == HASH_RXQ_ETH) { - if (hash_rxq_init[i].ip_version == MLX5_IPV4) { - size = sizeof(struct ibv_flow_spec_ipv4_ext); - specs.ipv4 = (struct ibv_flow_spec_ipv4_ext){ - .type = IBV_FLOW_SPEC_IPV4_EXT, - .size = size, - }; - } else { - size = sizeof(struct ibv_flow_spec_ipv6); - specs.ipv6 = (struct ibv_flow_spec_ipv6){ - .type = IBV_FLOW_SPEC_IPV6, - .size = size, - }; - } - if (parser->queue[i].ibv_attr) { - dst = (void *)((uintptr_t) - parser->queue[i].ibv_attr + - parser->queue[i].offset); - memcpy(dst, &specs, size); - ++parser->queue[i].ibv_attr->num_of_specs; - } - parser->queue[i].offset += size; - } - if ((i == HASH_RXQ_UDPV4) || (i == HASH_RXQ_TCPV4) || - (i == HASH_RXQ_UDPV6) || (i == HASH_RXQ_TCPV6)) { - size = sizeof(struct ibv_flow_spec_tcp_udp); - specs.udp_tcp = (struct ibv_flow_spec_tcp_udp) { - .type = ((i == HASH_RXQ_UDPV4 || - i == HASH_RXQ_UDPV6) ? - IBV_FLOW_SPEC_UDP : - IBV_FLOW_SPEC_TCP), - .size = size, - }; - if (parser->queue[i].ibv_attr) { - dst = (void *)((uintptr_t) - parser->queue[i].ibv_attr + - parser->queue[i].offset); - memcpy(dst, &specs, size); - ++parser->queue[i].ibv_attr->num_of_specs; - } - parser->queue[i].offset += size; - } - } -} - -/** - * Validate and convert a flow supported by the NIC. - * - * @param priv - * Pointer to private structure. - * @param[in] attr - * Flow rule attributes. - * @param[in] pattern - * Pattern specification (list terminated by the END pattern item). - * @param[in] actions - * Associated actions (list terminated by the END action). - * @param[out] error - * Perform verbose error reporting if not NULL. - * @param[in, out] parser - * Internal parser structure. - * - * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. - */ -static int -priv_flow_convert(struct priv *priv, - const struct rte_flow_attr *attr, - const struct rte_flow_item items[], - const struct rte_flow_action actions[], - struct rte_flow_error *error, - struct mlx5_flow_parse *parser) -{ - const struct mlx5_flow_items *cur_item = mlx5_flow_items; - unsigned int i; - int ret; - /* First step. Validate the attributes, items and actions. */ - *parser = (struct mlx5_flow_parse){ - .create = parser->create, - .layer = HASH_RXQ_ETH, - .mark_id = MLX5_FLOW_MARK_DEFAULT, - }; - ret = priv_flow_convert_attributes(priv, attr, error, parser); - if (ret) - return ret; - ret = priv_flow_convert_actions(priv, actions, error, parser); - if (ret) - return ret; - ret = priv_flow_convert_items_validate(priv, items, error, parser); - if (ret) - return ret; - priv_flow_convert_finalise(priv, parser); - /* - * Second step. - * Allocate the memory space to store verbs specifications. - */ - if (parser->drop || parser->queues_n == 1) { - unsigned int priority = - attr->priority + - hash_rxq_init[HASH_RXQ_ETH].flow_priority; - unsigned int offset = parser->queue[HASH_RXQ_ETH].offset; - - parser->queue[HASH_RXQ_ETH].ibv_attr = - priv_flow_convert_allocate(priv, priority, - offset, error); - if (!parser->queue[HASH_RXQ_ETH].ibv_attr) - return ENOMEM; - parser->queue[HASH_RXQ_ETH].offset = - sizeof(struct ibv_flow_attr); - } else { - for (i = 0; i != hash_rxq_init_n; ++i) { - unsigned int priority = - attr->priority + - hash_rxq_init[i].flow_priority; - unsigned int offset; - - if (!(parser->rss_conf.rss_hf & - hash_rxq_init[i].dpdk_rss_hf) && - (i != HASH_RXQ_ETH)) - continue; - offset = parser->queue[i].offset; - parser->queue[i].ibv_attr = - priv_flow_convert_allocate(priv, priority, - offset, error); - if (!parser->queue[i].ibv_attr) - goto exit_enomem; - parser->queue[i].offset = sizeof(struct ibv_flow_attr); - } - } - /* Third step. Conversion parse, fill the specifications. */ - parser->inner = 0; - for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) { - if (items->type == RTE_FLOW_ITEM_TYPE_VOID) - continue; - cur_item = &mlx5_flow_items[items->type]; - ret = cur_item->convert(items, - (cur_item->default_mask ? - cur_item->default_mask : - cur_item->mask), - parser); - if (ret) { - rte_flow_error_set(error, ret, - RTE_FLOW_ERROR_TYPE_ITEM, - items, "item not supported"); - goto exit_free; - } - } - if (parser->mark) - mlx5_flow_create_flag_mark(parser, parser->mark_id); - if (parser->count && parser->create) { - mlx5_flow_create_count(priv, parser); - if (!parser->cs) - goto exit_count_error; - } - /* - * Last step. Complete missing specification to reach the RSS - * configuration. - */ - if (parser->queues_n > 1) { - priv_flow_convert_finalise(priv, parser); - } else { - /* - * Action queue have their priority overridden with - * Ethernet priority, this priority needs to be adjusted to - * their most specific layer priority. - */ - parser->queue[HASH_RXQ_ETH].ibv_attr->priority = - attr->priority + - hash_rxq_init[parser->layer].flow_priority; - } -exit_free: - /* Only verification is expected, all resources should be released. */ - if (!parser->create) { - for (i = 0; i != hash_rxq_init_n; ++i) { - if (parser->queue[i].ibv_attr) { - rte_free(parser->queue[i].ibv_attr); - parser->queue[i].ibv_attr = NULL; - } - } - } - if (parser->allmulti && - parser->layer == HASH_RXQ_ETH) { - for (i = 0; i != hash_rxq_init_n; ++i) { - if (!parser->queue[i].ibv_attr) - continue; - if (parser->queue[i].ibv_attr->num_of_specs != 1) - break; - parser->queue[i].ibv_attr->type = - IBV_FLOW_ATTR_MC_DEFAULT; - } - } - return ret; -exit_enomem: - for (i = 0; i != hash_rxq_init_n; ++i) { - if (parser->queue[i].ibv_attr) { - rte_free(parser->queue[i].ibv_attr); - parser->queue[i].ibv_attr = NULL; - } + dst = (void *)(verbs->specs + verbs->size); + memcpy(dst, src, size); + ++verbs->attr->num_of_specs; } - rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot allocate verbs spec attributes."); - return ret; -exit_count_error: - rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot create counter."); - return rte_errno; + verbs->size += size; } /** - * Copy the specification created into the flow. + * Adjust verbs hash fields according to the @p flow information. * - * @param parser - * Internal parser structure. - * @param src - * Create specification. - * @param size - * Size in bytes of the specification to copy. + * @param[in, out] flow. + * Pointer to flow structure. + * @param[in] tunnel + * 1 when the hash field is for a tunnel item. + * @param[in] layer_types + * ETH_RSS_* types. + * @param[in] hash_fields + * Item hash fields. */ static void -mlx5_flow_create_copy(struct mlx5_flow_parse *parser, void *src, - unsigned int size) +mlx5_flow_verbs_hashfields_adjust(struct rte_flow *flow, + int tunnel __rte_unused, + uint32_t layer_types, uint64_t hash_fields) { - unsigned int i; - void *dst; - - for (i = 0; i != hash_rxq_init_n; ++i) { - if (!parser->queue[i].ibv_attr) - continue; - /* Specification must be the same l3 type or none. */ - if (parser->layer == HASH_RXQ_ETH || - (hash_rxq_init[parser->layer].ip_version == - hash_rxq_init[i].ip_version) || - (hash_rxq_init[i].ip_version == 0)) { - dst = (void *)((uintptr_t)parser->queue[i].ibv_attr + - parser->queue[i].offset); - memcpy(dst, src, size); - ++parser->queue[i].ibv_attr->num_of_specs; - parser->queue[i].offset += size; - } - } +#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT + hash_fields |= (tunnel ? IBV_RX_HASH_INNER : 0); + if (flow->rss.level == 2 && !tunnel) + hash_fields = 0; + else if (flow->rss.level < 2 && tunnel) + hash_fields = 0; +#endif + if (!(flow->rss.types & layer_types)) + hash_fields = 0; + flow->cur_verbs->hash_fields |= hash_fields; } /** - * Convert Ethernet item to Verbs specification. + * Convert the @p item into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. * - * @param item[in] + * @param[in] item * Item specification. - * @param default_mask[in] - * Default bit-masks to use when item->mask is not provided. - * @param data[in, out] - * User structure. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p item has fully been converted, + * otherwise another call with this returned memory size should be done. + * On error, a negative errno value is returned and rte_errno is set. */ static int -mlx5_flow_create_eth(const struct rte_flow_item *item, - const void *default_mask, - void *data) +mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow, + const size_t flow_size) { const struct rte_flow_item_eth *spec = item->spec; const struct rte_flow_item_eth *mask = item->mask; - struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data; - const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth); + const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); + const unsigned int size = sizeof(struct ibv_flow_spec_eth); struct ibv_flow_spec_eth eth = { - .type = parser->inner | IBV_FLOW_SPEC_ETH, - .size = eth_size, + .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0), + .size = size, }; - /* Don't update layer for the inner pattern. */ - if (!parser->inner) - parser->layer = HASH_RXQ_ETH; + if (!mask) + mask = &rte_flow_item_eth_mask; + flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 : + MLX5_FLOW_LAYER_OUTER_L2; + if (size > flow_size) + return size; if (spec) { unsigned int i; - if (!mask) - mask = default_mask; memcpy(ð.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN); memcpy(ð.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN); eth.val.ether_type = spec->type; @@ -1280,81 +794,144 @@ mlx5_flow_create_eth(const struct rte_flow_item *item, } eth.val.ether_type &= eth.mask.ether_type; } - mlx5_flow_create_copy(parser, ð, eth_size); - parser->allmulti = eth.val.dst_mac[0] & 1; - return 0; + flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2; + mlx5_flow_spec_verbs_add(flow, ð, size); + return size; +} + +/** + * Update the VLAN tag in the Verbs Ethernet specification. + * + * @param[in, out] attr + * Pointer to Verbs attributes structure. + * @param[in] eth + * Verbs structure containing the VLAN information to copy. + */ +static void +mlx5_flow_item_vlan_update(struct ibv_flow_attr *attr, + struct ibv_flow_spec_eth *eth) +{ + unsigned int i; + const enum ibv_flow_spec_type search = eth->type; + struct ibv_spec_header *hdr = (struct ibv_spec_header *) + ((uint8_t *)attr + sizeof(struct ibv_flow_attr)); + + for (i = 0; i != attr->num_of_specs; ++i) { + if (hdr->type == search) { + struct ibv_flow_spec_eth *e = + (struct ibv_flow_spec_eth *)hdr; + + e->val.vlan_tag = eth->val.vlan_tag; + e->mask.vlan_tag = eth->mask.vlan_tag; + e->val.ether_type = eth->val.ether_type; + e->mask.ether_type = eth->mask.ether_type; + break; + } + hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size); + } } /** - * Convert VLAN item to Verbs specification. + * Convert the @p item into @p flow (or by updating the already present + * Ethernet Verbs) specification after ensuring the NIC will understand and + * process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. * - * @param item[in] + * @param[in] item * Item specification. - * @param default_mask[in] - * Default bit-masks to use when item->mask is not provided. - * @param data[in, out] - * User structure. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p item has fully been converted, + * otherwise another call with this returned memory size should be done. + * On error, a negative errno value is returned and rte_errno is set. */ static int -mlx5_flow_create_vlan(const struct rte_flow_item *item, - const void *default_mask, - void *data) +mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow, + const size_t flow_size) { const struct rte_flow_item_vlan *spec = item->spec; const struct rte_flow_item_vlan *mask = item->mask; - struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data; - struct ibv_flow_spec_eth *eth; - const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth); + unsigned int size = sizeof(struct ibv_flow_spec_eth); + const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); + struct ibv_flow_spec_eth eth = { + .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0), + .size = size, + }; + const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : + MLX5_FLOW_LAYER_OUTER_L2; + if (!mask) + mask = &rte_flow_item_vlan_mask; if (spec) { - unsigned int i; - if (!mask) - mask = default_mask; - - for (i = 0; i != hash_rxq_init_n; ++i) { - if (!parser->queue[i].ibv_attr) - continue; - - eth = (void *)((uintptr_t)parser->queue[i].ibv_attr + - parser->queue[i].offset - eth_size); - eth->val.vlan_tag = spec->tci; - eth->mask.vlan_tag = mask->tci; - eth->val.vlan_tag &= eth->mask.vlan_tag; + eth.val.vlan_tag = spec->tci; + eth.mask.vlan_tag = mask->tci; + eth.val.vlan_tag &= eth.mask.vlan_tag; + eth.val.ether_type = spec->inner_type; + eth.mask.ether_type = mask->inner_type; + eth.val.ether_type &= eth.mask.ether_type; + } + if (!(flow->layers & l2m)) { + if (size <= flow_size) { + flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2; + mlx5_flow_spec_verbs_add(flow, ð, size); } + } else { + if (flow->cur_verbs) + mlx5_flow_item_vlan_update(flow->cur_verbs->attr, + ð); + size = 0; /* Only an update is done in eth specification. */ } - return 0; + flow->layers |= tunnel ? + (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_VLAN) : + (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_VLAN); + return size; } /** - * Convert IPv4 item to Verbs specification. + * Convert the @p item into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. * - * @param item[in] + * @param[in] item * Item specification. - * @param default_mask[in] - * Default bit-masks to use when item->mask is not provided. - * @param data[in, out] - * User structure. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p item has fully been converted, + * otherwise another call with this returned memory size should be done. */ static int -mlx5_flow_create_ipv4(const struct rte_flow_item *item, - const void *default_mask, - void *data) +mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow, + const size_t flow_size) { const struct rte_flow_item_ipv4 *spec = item->spec; const struct rte_flow_item_ipv4 *mask = item->mask; - struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data; - unsigned int ipv4_size = sizeof(struct ibv_flow_spec_ipv4_ext); + const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); + unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext); struct ibv_flow_spec_ipv4_ext ipv4 = { - .type = parser->inner | IBV_FLOW_SPEC_IPV4_EXT, - .size = ipv4_size, + .type = IBV_FLOW_SPEC_IPV4_EXT | + (tunnel ? IBV_FLOW_SPEC_INNER : 0), + .size = size, }; - /* Don't update layer for the inner pattern. */ - if (!parser->inner) - parser->layer = HASH_RXQ_IPV4; + if (!mask) + mask = &rte_flow_item_ipv4_mask; + flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : + MLX5_FLOW_LAYER_OUTER_L3_IPV4; if (spec) { - if (!mask) - mask = default_mask; ipv4.val = (struct ibv_flow_ipv4_ext_filter){ .src_ip = spec->hdr.src_addr, .dst_ip = spec->hdr.dst_addr, @@ -1373,42 +950,61 @@ mlx5_flow_create_ipv4(const struct rte_flow_item *item, ipv4.val.proto &= ipv4.mask.proto; ipv4.val.tos &= ipv4.mask.tos; } - mlx5_flow_create_copy(parser, &ipv4, ipv4_size); - return 0; + if (size <= flow_size) { + mlx5_flow_verbs_hashfields_adjust + (flow, tunnel, + (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | + ETH_RSS_NONFRAG_IPV4_TCP | + ETH_RSS_NONFRAG_IPV4_UDP | + ETH_RSS_NONFRAG_IPV4_OTHER), + (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4)); + flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3; + mlx5_flow_spec_verbs_add(flow, &ipv4, size); + } + return size; } /** - * Convert IPv6 item to Verbs specification. + * Convert the @p item into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. * - * @param item[in] + * @param[in] item * Item specification. - * @param default_mask[in] - * Default bit-masks to use when item->mask is not provided. - * @param data[in, out] - * User structure. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p item has fully been converted, + * otherwise another call with this returned memory size should be done. */ static int -mlx5_flow_create_ipv6(const struct rte_flow_item *item, - const void *default_mask, - void *data) +mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow, + const size_t flow_size) { const struct rte_flow_item_ipv6 *spec = item->spec; const struct rte_flow_item_ipv6 *mask = item->mask; - struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data; - unsigned int ipv6_size = sizeof(struct ibv_flow_spec_ipv6); + const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); + unsigned int size = sizeof(struct ibv_flow_spec_ipv6); struct ibv_flow_spec_ipv6 ipv6 = { - .type = parser->inner | IBV_FLOW_SPEC_IPV6, - .size = ipv6_size, + .type = IBV_FLOW_SPEC_IPV6 | (tunnel ? IBV_FLOW_SPEC_INNER : 0), + .size = size, }; - /* Don't update layer for the inner pattern. */ - if (!parser->inner) - parser->layer = HASH_RXQ_IPV6; + if (!mask) + mask = &rte_flow_item_ipv6_mask; + flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : + MLX5_FLOW_LAYER_OUTER_L3_IPV6; if (spec) { unsigned int i; + uint32_t vtc_flow_val; + uint32_t vtc_flow_mask; - if (!mask) - mask = default_mask; memcpy(&ipv6.val.src_ip, spec->hdr.src_addr, RTE_DIM(ipv6.val.src_ip)); memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr, @@ -1417,7 +1013,20 @@ mlx5_flow_create_ipv6(const struct rte_flow_item *item, RTE_DIM(ipv6.mask.src_ip)); memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr, RTE_DIM(ipv6.mask.dst_ip)); - ipv6.mask.flow_label = mask->hdr.vtc_flow; + vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow); + vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow); + ipv6.val.flow_label = + rte_cpu_to_be_32((vtc_flow_val & IPV6_HDR_FL_MASK) >> + IPV6_HDR_FL_SHIFT); + ipv6.val.traffic_class = (vtc_flow_val & IPV6_HDR_TC_MASK) >> + IPV6_HDR_TC_SHIFT; + ipv6.val.next_hdr = spec->hdr.proto; + ipv6.val.hop_limit = spec->hdr.hop_limits; + ipv6.mask.flow_label = + rte_cpu_to_be_32((vtc_flow_mask & IPV6_HDR_FL_MASK) >> + IPV6_HDR_FL_SHIFT); + ipv6.mask.traffic_class = (vtc_flow_mask & IPV6_HDR_TC_MASK) >> + IPV6_HDR_TC_SHIFT; ipv6.mask.next_hdr = mask->hdr.proto; ipv6.mask.hop_limit = mask->hdr.hop_limits; /* Remove unwanted bits from values. */ @@ -1426,47 +1035,61 @@ mlx5_flow_create_ipv6(const struct rte_flow_item *item, ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i]; } ipv6.val.flow_label &= ipv6.mask.flow_label; + ipv6.val.traffic_class &= ipv6.mask.traffic_class; ipv6.val.next_hdr &= ipv6.mask.next_hdr; ipv6.val.hop_limit &= ipv6.mask.hop_limit; } - mlx5_flow_create_copy(parser, &ipv6, ipv6_size); - return 0; + if (size <= flow_size) { + mlx5_flow_verbs_hashfields_adjust + (flow, tunnel, + (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | + ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_NONFRAG_IPV6_UDP | + ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX | + ETH_RSS_IPV6_TCP_EX | ETH_RSS_IPV6_UDP_EX), + (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6)); + flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3; + mlx5_flow_spec_verbs_add(flow, &ipv6, size); + } + return size; } /** - * Convert UDP item to Verbs specification. + * Convert the @p item into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. * - * @param item[in] + * @param[in] item * Item specification. - * @param default_mask[in] - * Default bit-masks to use when item->mask is not provided. - * @param data[in, out] - * User structure. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p item has fully been converted, + * otherwise another call with this returned memory size should be done. */ static int -mlx5_flow_create_udp(const struct rte_flow_item *item, - const void *default_mask, - void *data) +mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow, + const size_t flow_size) { const struct rte_flow_item_udp *spec = item->spec; const struct rte_flow_item_udp *mask = item->mask; - struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data; - unsigned int udp_size = sizeof(struct ibv_flow_spec_tcp_udp); + const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); + unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp); struct ibv_flow_spec_tcp_udp udp = { - .type = parser->inner | IBV_FLOW_SPEC_UDP, - .size = udp_size, + .type = IBV_FLOW_SPEC_UDP | (tunnel ? IBV_FLOW_SPEC_INNER : 0), + .size = size, }; - /* Don't update layer for the inner pattern. */ - if (!parser->inner) { - if (parser->layer == HASH_RXQ_IPV4) - parser->layer = HASH_RXQ_UDPV4; - else - parser->layer = HASH_RXQ_UDPV6; - } + if (!mask) + mask = &rte_flow_item_udp_mask; + flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP : + MLX5_FLOW_LAYER_OUTER_L4_UDP; if (spec) { - if (!mask) - mask = default_mask; udp.val.dst_port = spec->hdr.dst_port; udp.val.src_port = spec->hdr.src_port; udp.mask.dst_port = mask->hdr.dst_port; @@ -1475,44 +1098,55 @@ mlx5_flow_create_udp(const struct rte_flow_item *item, udp.val.src_port &= udp.mask.src_port; udp.val.dst_port &= udp.mask.dst_port; } - mlx5_flow_create_copy(parser, &udp, udp_size); - return 0; + if (size <= flow_size) { + mlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_UDP, + (IBV_RX_HASH_SRC_PORT_UDP | + IBV_RX_HASH_DST_PORT_UDP)); + flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4; + mlx5_flow_spec_verbs_add(flow, &udp, size); + } + return size; } /** - * Convert TCP item to Verbs specification. + * Convert the @p item into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. * - * @param item[in] + * @param[in] item * Item specification. - * @param default_mask[in] - * Default bit-masks to use when item->mask is not provided. - * @param data[in, out] - * User structure. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * @param[out] error + * Pointer to error structure. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p item has fully been converted, + * otherwise another call with this returned memory size should be done. */ static int -mlx5_flow_create_tcp(const struct rte_flow_item *item, - const void *default_mask, - void *data) +mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow, + const size_t flow_size) { const struct rte_flow_item_tcp *spec = item->spec; const struct rte_flow_item_tcp *mask = item->mask; - struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data; - unsigned int tcp_size = sizeof(struct ibv_flow_spec_tcp_udp); + const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); + unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp); struct ibv_flow_spec_tcp_udp tcp = { - .type = parser->inner | IBV_FLOW_SPEC_TCP, - .size = tcp_size, + .type = IBV_FLOW_SPEC_TCP | (tunnel ? IBV_FLOW_SPEC_INNER : 0), + .size = size, }; - /* Don't update layer for the inner pattern. */ - if (!parser->inner) { - if (parser->layer == HASH_RXQ_IPV4) - parser->layer = HASH_RXQ_TCPV4; - else - parser->layer = HASH_RXQ_TCPV6; - } + if (!mask) + mask = &rte_flow_item_tcp_mask; + flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP : + MLX5_FLOW_LAYER_OUTER_L4_TCP; if (spec) { - if (!mask) - mask = default_mask; tcp.val.dst_port = spec->hdr.dst_port; tcp.val.src_port = spec->hdr.src_port; tcp.mask.dst_port = mask->hdr.dst_port; @@ -1521,341 +1155,2656 @@ mlx5_flow_create_tcp(const struct rte_flow_item *item, tcp.val.src_port &= tcp.mask.src_port; tcp.val.dst_port &= tcp.mask.dst_port; } - mlx5_flow_create_copy(parser, &tcp, tcp_size); - return 0; + if (size <= flow_size) { + mlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_TCP, + (IBV_RX_HASH_SRC_PORT_TCP | + IBV_RX_HASH_DST_PORT_TCP)); + flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4; + mlx5_flow_spec_verbs_add(flow, &tcp, size); + } + return size; } /** - * Convert VXLAN item to Verbs specification. + * Convert the @p item into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. * - * @param item[in] + * @param[in] item * Item specification. - * @param default_mask[in] - * Default bit-masks to use when item->mask is not provided. - * @param data[in, out] - * User structure. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * @param[out] error + * Pointer to error structure. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p item has fully been converted, + * otherwise another call with this returned memory size should be done. */ static int -mlx5_flow_create_vxlan(const struct rte_flow_item *item, - const void *default_mask, - void *data) +mlx5_flow_item_vxlan(const struct rte_flow_item *item, struct rte_flow *flow, + const size_t flow_size) { const struct rte_flow_item_vxlan *spec = item->spec; const struct rte_flow_item_vxlan *mask = item->mask; - struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data; unsigned int size = sizeof(struct ibv_flow_spec_tunnel); struct ibv_flow_spec_tunnel vxlan = { - .type = parser->inner | IBV_FLOW_SPEC_VXLAN_TUNNEL, + .type = IBV_FLOW_SPEC_VXLAN_TUNNEL, + .size = size, + }; + union vni { + uint32_t vlan_id; + uint8_t vni[4]; + } id = { .vlan_id = 0, }; + + if (!mask) + mask = &rte_flow_item_vxlan_mask; + if (spec) { + memcpy(&id.vni[1], spec->vni, 3); + vxlan.val.tunnel_id = id.vlan_id; + memcpy(&id.vni[1], mask->vni, 3); + vxlan.mask.tunnel_id = id.vlan_id; + /* Remove unwanted bits from values. */ + vxlan.val.tunnel_id &= vxlan.mask.tunnel_id; + } + if (size <= flow_size) { + mlx5_flow_spec_verbs_add(flow, &vxlan, size); + flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2; + } + flow->layers |= MLX5_FLOW_LAYER_VXLAN; + return size; +} + +/** + * Convert the @p item into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. + * + * @param[in] item + * Item specification. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * @param[out] error + * Pointer to error structure. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p item has fully been converted, + * otherwise another call with this returned memory size should be done. + */ +static int +mlx5_flow_item_vxlan_gpe(const struct rte_flow_item *item, + struct rte_flow *flow, const size_t flow_size) +{ + const struct rte_flow_item_vxlan_gpe *spec = item->spec; + const struct rte_flow_item_vxlan_gpe *mask = item->mask; + unsigned int size = sizeof(struct ibv_flow_spec_tunnel); + struct ibv_flow_spec_tunnel vxlan_gpe = { + .type = IBV_FLOW_SPEC_VXLAN_TUNNEL, .size = size, }; union vni { uint32_t vlan_id; uint8_t vni[4]; - } id; + } id = { .vlan_id = 0, }; + + if (!mask) + mask = &rte_flow_item_vxlan_gpe_mask; + if (spec) { + memcpy(&id.vni[1], spec->vni, 3); + vxlan_gpe.val.tunnel_id = id.vlan_id; + memcpy(&id.vni[1], mask->vni, 3); + vxlan_gpe.mask.tunnel_id = id.vlan_id; + /* Remove unwanted bits from values. */ + vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id; + } + if (size <= flow_size) { + mlx5_flow_spec_verbs_add(flow, &vxlan_gpe, size); + flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2; + } + flow->layers |= MLX5_FLOW_LAYER_VXLAN_GPE; + return size; +} + +/** + * Update the protocol in Verbs IPv4/IPv6 spec. + * + * @param[in, out] attr + * Pointer to Verbs attributes structure. + * @param[in] search + * Specification type to search in order to update the IP protocol. + * @param[in] protocol + * Protocol value to set if none is present in the specification. + */ +static void +mlx5_flow_item_gre_ip_protocol_update(struct ibv_flow_attr *attr, + enum ibv_flow_spec_type search, + uint8_t protocol) +{ + unsigned int i; + struct ibv_spec_header *hdr = (struct ibv_spec_header *) + ((uint8_t *)attr + sizeof(struct ibv_flow_attr)); + + if (!attr) + return; + for (i = 0; i != attr->num_of_specs; ++i) { + if (hdr->type == search) { + union { + struct ibv_flow_spec_ipv4_ext *ipv4; + struct ibv_flow_spec_ipv6 *ipv6; + } ip; + + switch (search) { + case IBV_FLOW_SPEC_IPV4_EXT: + ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr; + if (!ip.ipv4->val.proto) { + ip.ipv4->val.proto = protocol; + ip.ipv4->mask.proto = 0xff; + } + break; + case IBV_FLOW_SPEC_IPV6: + ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr; + if (!ip.ipv6->val.next_hdr) { + ip.ipv6->val.next_hdr = protocol; + ip.ipv6->mask.next_hdr = 0xff; + } + break; + default: + break; + } + break; + } + hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size); + } +} + +/** + * Convert the @p item into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * It will also update the previous L3 layer with the protocol value matching + * the GRE. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. + * + * @param dev + * Pointer to Ethernet device. + * @param[in] item + * Item specification. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p item has fully been converted, + * otherwise another call with this returned memory size should be done. + */ +static int +mlx5_flow_item_gre(const struct rte_flow_item *item __rte_unused, + struct rte_flow *flow, const size_t flow_size) +{ + struct mlx5_flow_verbs *verbs = flow->cur_verbs; +#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT + const struct rte_flow_item_gre *spec = item->spec; + const struct rte_flow_item_gre *mask = item->mask; + unsigned int size = sizeof(struct ibv_flow_spec_gre); + struct ibv_flow_spec_gre tunnel = { + .type = IBV_FLOW_SPEC_GRE, + .size = size, + }; +#else + unsigned int size = sizeof(struct ibv_flow_spec_tunnel); + struct ibv_flow_spec_tunnel tunnel = { + .type = IBV_FLOW_SPEC_VXLAN_TUNNEL, + .size = size, + }; +#endif + +#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT + if (!mask) + mask = &rte_flow_item_gre_mask; + if (spec) { + tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver; + tunnel.val.protocol = spec->protocol; + tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver; + tunnel.mask.protocol = mask->protocol; + /* Remove unwanted bits from values. */ + tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver; + tunnel.val.protocol &= tunnel.mask.protocol; + tunnel.val.key &= tunnel.mask.key; + } +#else +#endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */ + if (size <= flow_size) { + if (flow->layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4) + mlx5_flow_item_gre_ip_protocol_update + (verbs->attr, IBV_FLOW_SPEC_IPV4_EXT, + MLX5_IP_PROTOCOL_GRE); + else + mlx5_flow_item_gre_ip_protocol_update + (verbs->attr, IBV_FLOW_SPEC_IPV6, + MLX5_IP_PROTOCOL_GRE); + mlx5_flow_spec_verbs_add(flow, &tunnel, size); + flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2; + } + flow->layers |= MLX5_FLOW_LAYER_GRE; + return size; +} + +/** + * Convert the @p item into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. + * + * @param[in] item + * Item specification. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * @param[out] error + * Pointer to error structure. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p item has fully been converted, + * otherwise another call with this returned memory size should be done. + * On error, a negative errno value is returned and rte_errno is set. + */ +static int +mlx5_flow_item_mpls(const struct rte_flow_item *item __rte_unused, + struct rte_flow *flow __rte_unused, + const size_t flow_size __rte_unused, + struct rte_flow_error *error) +{ +#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT + const struct rte_flow_item_mpls *spec = item->spec; + const struct rte_flow_item_mpls *mask = item->mask; + unsigned int size = sizeof(struct ibv_flow_spec_mpls); + struct ibv_flow_spec_mpls mpls = { + .type = IBV_FLOW_SPEC_MPLS, + .size = size, + }; + + if (!mask) + mask = &rte_flow_item_mpls_mask; + if (spec) { + memcpy(&mpls.val.label, spec, sizeof(mpls.val.label)); + memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label)); + /* Remove unwanted bits from values. */ + mpls.val.label &= mpls.mask.label; + } + if (size <= flow_size) { + mlx5_flow_spec_verbs_add(flow, &mpls, size); + flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2; + } + flow->layers |= MLX5_FLOW_LAYER_MPLS; + return size; +#endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */ + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "MPLS is not supported by Verbs, please" + " update."); +} + +/** + * Convert the @p pattern into a Verbs specifications after ensuring the NIC + * will understand and process it correctly. + * The conversion is performed item per item, each of them is written into + * the @p flow if its size is lesser or equal to @p flow_size. + * Validation and memory consumption computation are still performed until the + * end of @p pattern, unless an error is encountered. + * + * @param[in] pattern + * Flow pattern. + * @param[in, out] flow + * Pointer to the rte_flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small some + * garbage may be present. + * @param[out] error + * Pointer to error structure. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @pattern has fully been + * converted, otherwise another call with this returned memory size should + * be done. + * On error, a negative errno value is returned and rte_errno is set. + */ +static int +mlx5_flow_items(const struct rte_flow_item pattern[], + struct rte_flow *flow, const size_t flow_size, + struct rte_flow_error *error) +{ + int remain = flow_size; + size_t size = 0; + + for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) { + int ret = 0; + + switch (pattern->type) { + case RTE_FLOW_ITEM_TYPE_VOID: + break; + case RTE_FLOW_ITEM_TYPE_ETH: + ret = mlx5_flow_item_eth(pattern, flow, remain); + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + ret = mlx5_flow_item_vlan(pattern, flow, remain); + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + ret = mlx5_flow_item_ipv4(pattern, flow, remain); + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + ret = mlx5_flow_item_ipv6(pattern, flow, remain); + break; + case RTE_FLOW_ITEM_TYPE_UDP: + ret = mlx5_flow_item_udp(pattern, flow, remain); + break; + case RTE_FLOW_ITEM_TYPE_TCP: + ret = mlx5_flow_item_tcp(pattern, flow, remain); + break; + case RTE_FLOW_ITEM_TYPE_VXLAN: + ret = mlx5_flow_item_vxlan(pattern, flow, remain); + break; + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + ret = mlx5_flow_item_vxlan_gpe(pattern, flow, + remain); + break; + case RTE_FLOW_ITEM_TYPE_GRE: + ret = mlx5_flow_item_gre(pattern, flow, remain); + break; + case RTE_FLOW_ITEM_TYPE_MPLS: + ret = mlx5_flow_item_mpls(pattern, flow, remain, error); + break; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + pattern, + "item not supported"); + } + if (ret < 0) + return ret; + if (remain > ret) + remain -= ret; + else + remain = 0; + size += ret; + } + if (!flow->layers) { + const struct rte_flow_item item = { + .type = RTE_FLOW_ITEM_TYPE_ETH, + }; + + return mlx5_flow_item_eth(&item, flow, flow_size); + } + return size; +} + +/** + * Convert the @p action into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. + * + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p action has fully been + * converted, otherwise another call with this returned memory size should + * be done. + * On error, a negative errno value is returned and rte_errno is set. + */ +static int +mlx5_flow_action_drop(struct rte_flow *flow, const size_t flow_size) +{ + unsigned int size = sizeof(struct ibv_flow_spec_action_drop); + struct ibv_flow_spec_action_drop drop = { + .type = IBV_FLOW_SPEC_ACTION_DROP, + .size = size, + }; + + if (size < flow_size) + mlx5_flow_spec_verbs_add(flow, &drop, size); + flow->fate |= MLX5_FLOW_FATE_DROP; + return size; +} + +/** + * Convert the @p action into @p flow after ensuring the NIC will understand + * and process it correctly. + * + * @param[in] action + * Action configuration. + * @param[in, out] flow + * Pointer to flow structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_action_queue(const struct rte_flow_action *action, + struct rte_flow *flow) +{ + const struct rte_flow_action_queue *queue = action->conf; + + if (flow->queue) + (*flow->queue)[0] = queue->index; + flow->rss.queue_num = 1; + flow->fate |= MLX5_FLOW_FATE_QUEUE; + return 0; +} + +/** + * Ensure the @p action will be understood and used correctly by the NIC. + * + * @param[in] action + * Action configuration. + * @param flow[in, out] + * Pointer to the rte_flow structure. + * + * @return + * 0 On success. + */ +static int +mlx5_flow_action_rss(const struct rte_flow_action *action, + struct rte_flow *flow) +{ + const struct rte_flow_action_rss *rss = action->conf; + + if (flow->queue) + memcpy((*flow->queue), rss->queue, + rss->queue_num * sizeof(uint16_t)); + flow->rss.queue_num = rss->queue_num; + memcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN); + flow->rss.types = rss->types; + flow->rss.level = rss->level; + flow->fate |= MLX5_FLOW_FATE_RSS; + return 0; +} + +/** + * Convert the @p action into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. + * + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p action has fully been + * converted, otherwise another call with this returned memory size should + * be done. + */ +static int +mlx5_flow_action_flag(struct rte_flow *flow, const size_t flow_size) +{ + unsigned int size = sizeof(struct ibv_flow_spec_action_tag); + struct ibv_flow_spec_action_tag tag = { + .type = IBV_FLOW_SPEC_ACTION_TAG, + .size = size, + .tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT), + }; + struct mlx5_flow_verbs *verbs = flow->cur_verbs; + + if (flow->modifier & MLX5_FLOW_MOD_MARK) + size = 0; + else if (size <= flow_size && verbs) + mlx5_flow_spec_verbs_add(flow, &tag, size); + flow->modifier |= MLX5_FLOW_MOD_FLAG; + return size; +} + +/** + * Update verbs specification to modify the flag to mark. + * + * @param[in, out] verbs + * Pointer to the mlx5_flow_verbs structure. + * @param[in] mark_id + * Mark identifier to replace the flag. + */ +static void +mlx5_flow_verbs_mark_update(struct mlx5_flow_verbs *verbs, uint32_t mark_id) +{ + struct ibv_spec_header *hdr; + int i; + + if (!verbs) + return; + /* Update Verbs specification. */ + hdr = (struct ibv_spec_header *)verbs->specs; + if (!hdr) + return; + for (i = 0; i != verbs->attr->num_of_specs; ++i) { + if (hdr->type == IBV_FLOW_SPEC_ACTION_TAG) { + struct ibv_flow_spec_action_tag *t = + (struct ibv_flow_spec_action_tag *)hdr; + + t->tag_id = mlx5_flow_mark_set(mark_id); + } + hdr = (struct ibv_spec_header *)((uintptr_t)hdr + hdr->size); + } +} + +/** + * Convert the @p action into @p flow (or by updating the already present + * Flag Verbs specification) after ensuring the NIC will understand and + * process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. + * + * @param[in] action + * Action configuration. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p action has fully been + * converted, otherwise another call with this returned memory size should + * be done. + */ +static int +mlx5_flow_action_mark(const struct rte_flow_action *action, + struct rte_flow *flow, const size_t flow_size) +{ + const struct rte_flow_action_mark *mark = action->conf; + unsigned int size = sizeof(struct ibv_flow_spec_action_tag); + struct ibv_flow_spec_action_tag tag = { + .type = IBV_FLOW_SPEC_ACTION_TAG, + .size = size, + }; + struct mlx5_flow_verbs *verbs = flow->cur_verbs; + + if (flow->modifier & MLX5_FLOW_MOD_FLAG) { + mlx5_flow_verbs_mark_update(verbs, mark->id); + size = 0; + } else if (size <= flow_size) { + tag.tag_id = mlx5_flow_mark_set(mark->id); + mlx5_flow_spec_verbs_add(flow, &tag, size); + } + flow->modifier |= MLX5_FLOW_MOD_MARK; + return size; +} + +/** + * Convert the @p action into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. + * + * @param action[in] + * Action configuration. + * @param flow[in, out] + * Pointer to flow structure. + * @param flow_size[in] + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * @param error[int, out] + * Pointer to error structure. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p action has fully been + * converted, otherwise another call with this returned memory size should + * be done. + * On error, a negative errno value is returned and rte_errno is set. + */ +static int +mlx5_flow_action_count(struct rte_eth_dev *dev, + const struct rte_flow_action *action, + struct rte_flow *flow, + const size_t flow_size __rte_unused, + struct rte_flow_error *error) +{ + const struct rte_flow_action_count *count = action->conf; +#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT + unsigned int size = sizeof(struct ibv_flow_spec_counter_action); + struct ibv_flow_spec_counter_action counter = { + .type = IBV_FLOW_SPEC_ACTION_COUNT, + .size = size, + }; +#endif + + if (!flow->counter) { + flow->counter = mlx5_flow_counter_new(dev, count->shared, + count->id); + if (!flow->counter) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "cannot get counter" + " context."); + } + flow->modifier |= MLX5_FLOW_MOD_COUNT; +#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT + counter.counter_set_handle = flow->counter->cs->handle; + if (size <= flow_size) + mlx5_flow_spec_verbs_add(flow, &counter, size); + return size; +#endif + return 0; +} + +/** + * Convert the @p action into @p flow after ensuring the NIC will understand + * and process it correctly. + * The conversion is performed action per action, each of them is written into + * the @p flow if its size is lesser or equal to @p flow_size. + * Validation and memory consumption computation are still performed until the + * end of @p action, unless an error is encountered. + * + * @param[in] dev + * Pointer to Ethernet device structure. + * @param[in] actions + * Pointer to flow actions array. + * @param[in, out] flow + * Pointer to the rte_flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small some + * garbage may be present. + * @param[out] error + * Pointer to error structure. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p actions has fully been + * converted, otherwise another call with this returned memory size should + * be done. + * On error, a negative errno value is returned and rte_errno is set. + */ +static int +mlx5_flow_actions(struct rte_eth_dev *dev, + const struct rte_flow_action actions[], + struct rte_flow *flow, const size_t flow_size, + struct rte_flow_error *error) +{ + size_t size = 0; + int remain = flow_size; + int ret = 0; + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + case RTE_FLOW_ACTION_TYPE_FLAG: + ret = mlx5_flow_action_flag(flow, remain); + break; + case RTE_FLOW_ACTION_TYPE_MARK: + ret = mlx5_flow_action_mark(actions, flow, remain); + break; + case RTE_FLOW_ACTION_TYPE_DROP: + ret = mlx5_flow_action_drop(flow, remain); + break; + case RTE_FLOW_ACTION_TYPE_QUEUE: + ret = mlx5_flow_action_queue(actions, flow); + break; + case RTE_FLOW_ACTION_TYPE_RSS: + ret = mlx5_flow_action_rss(actions, flow); + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + ret = mlx5_flow_action_count(dev, actions, flow, remain, + error); + break; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "action not supported"); + } + if (ret < 0) + return ret; + if (remain > ret) + remain -= ret; + else + remain = 0; + size += ret; + } + if (!flow->fate) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "no fate action found"); + return size; +} + +/** + * Validate flow rule and fill flow structure accordingly. + * + * @param dev + * Pointer to Ethernet device. + * @param[out] flow + * Pointer to flow structure. + * @param flow_size + * Size of allocated space for @p flow. + * @param[in] attr + * Flow rule attributes. + * @param[in] pattern + * Pattern specification (list terminated by the END pattern item). + * @param[in] actions + * Associated actions (list terminated by the END action). + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * A positive value representing the size of the flow object in bytes + * regardless of @p flow_size on success, a negative errno value otherwise + * and rte_errno is set. + */ +static int +mlx5_flow_merge_switch(struct rte_eth_dev *dev, + struct rte_flow *flow, + size_t flow_size, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + unsigned int n = mlx5_dev_to_port_id(dev->device, NULL, 0); + uint16_t port_id[!n + n]; + struct mlx5_nl_flow_ptoi ptoi[!n + n + 1]; + size_t off = RTE_ALIGN_CEIL(sizeof(*flow), alignof(max_align_t)); + unsigned int i; + unsigned int own = 0; + int ret; + + /* At least one port is needed when no switch domain is present. */ + if (!n) { + n = 1; + port_id[0] = dev->data->port_id; + } else { + n = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, n), n); + } + for (i = 0; i != n; ++i) { + struct rte_eth_dev_info dev_info; + + rte_eth_dev_info_get(port_id[i], &dev_info); + if (port_id[i] == dev->data->port_id) + own = i; + ptoi[i].port_id = port_id[i]; + ptoi[i].ifindex = dev_info.if_index; + } + /* Ensure first entry of ptoi[] is the current device. */ + if (own) { + ptoi[n] = ptoi[0]; + ptoi[0] = ptoi[own]; + ptoi[own] = ptoi[n]; + } + /* An entry with zero ifindex terminates ptoi[]. */ + ptoi[n].port_id = 0; + ptoi[n].ifindex = 0; + if (flow_size < off) + flow_size = 0; + ret = mlx5_nl_flow_transpose((uint8_t *)flow + off, + flow_size ? flow_size - off : 0, + ptoi, attr, pattern, actions, error); + if (ret < 0) + return ret; + if (flow_size) { + *flow = (struct rte_flow){ + .attributes = *attr, + .nl_flow = (uint8_t *)flow + off, + }; + /* + * Generate a reasonably unique handle based on the address + * of the target buffer. + * + * This is straightforward on 32-bit systems where the flow + * pointer can be used directly. Otherwise, its least + * significant part is taken after shifting it by the + * previous power of two of the pointed buffer size. + */ + if (sizeof(flow) <= 4) + mlx5_nl_flow_brand(flow->nl_flow, (uintptr_t)flow); + else + mlx5_nl_flow_brand + (flow->nl_flow, + (uintptr_t)flow >> + rte_log2_u32(rte_align32prevpow2(flow_size))); + } + return off + ret; +} + +static unsigned int +mlx5_find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level) +{ + const struct rte_flow_item *item; + unsigned int has_vlan = 0; + + for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { + has_vlan = 1; + break; + } + } + if (has_vlan) + return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN : + MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN; + return rss_level < 2 ? MLX5_EXPANSION_ROOT : + MLX5_EXPANSION_ROOT_OUTER; +} + +/** + * Convert the @p attributes, @p pattern, @p action, into an flow for the NIC + * after ensuring the NIC will understand and process it correctly. + * The conversion is only performed item/action per item/action, each of + * them is written into the @p flow if its size is lesser or equal to @p + * flow_size. + * Validation and memory consumption computation are still performed until the + * end, unless an error is encountered. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small some + * garbage may be present. + * @param[in] attributes + * Flow rule attributes. + * @param[in] pattern + * Pattern specification (list terminated by the END pattern item). + * @param[in] actions + * Associated actions (list terminated by the END action). + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the flow has fully been converted and + * can be applied, otherwise another call with this returned memory size + * should be done. + * On error, a negative errno value is returned and rte_errno is set. + */ +static int +mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow, + const size_t flow_size, + const struct rte_flow_attr *attributes, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct rte_flow local_flow = { .layers = 0, }; + size_t size = sizeof(*flow); + union { + struct rte_flow_expand_rss buf; + uint8_t buffer[2048]; + } expand_buffer; + struct rte_flow_expand_rss *buf = &expand_buffer.buf; + struct mlx5_flow_verbs *original_verbs = NULL; + size_t original_verbs_size = 0; + uint32_t original_layers = 0; + int expanded_pattern_idx = 0; + int ret = 0; + uint32_t i; + + if (attributes->transfer) + return mlx5_flow_merge_switch(dev, flow, flow_size, + attributes, pattern, + actions, error); + if (size > flow_size) + flow = &local_flow; + ret = mlx5_flow_attributes(dev->data->dev_private, attributes, flow); + if (ret < 0) + return ret; + ret = mlx5_flow_actions(dev, actions, &local_flow, 0, error); + if (ret < 0) + return ret; + if (local_flow.rss.types) { + unsigned int graph_root; + + graph_root = mlx5_find_graph_root(pattern, + local_flow.rss.level); + ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer), + pattern, local_flow.rss.types, + mlx5_support_expansion, + graph_root); + assert(ret > 0 && + (unsigned int)ret < sizeof(expand_buffer.buffer)); + } else { + buf->entries = 1; + buf->entry[0].pattern = (void *)(uintptr_t)pattern; + } + size += RTE_ALIGN_CEIL(local_flow.rss.queue_num * sizeof(uint16_t), + sizeof(void *)); + if (size <= flow_size) + flow->queue = (void *)(flow + 1); + LIST_INIT(&flow->verbs); + flow->layers = 0; + flow->modifier = 0; + flow->fate = 0; + for (i = 0; i != buf->entries; ++i) { + size_t off = size; + size_t off2; + + flow->layers = original_layers; + size += sizeof(struct ibv_flow_attr) + + sizeof(struct mlx5_flow_verbs); + off2 = size; + if (size < flow_size) { + flow->cur_verbs = (void *)((uintptr_t)flow + off); + flow->cur_verbs->attr = (void *)(flow->cur_verbs + 1); + flow->cur_verbs->specs = + (void *)(flow->cur_verbs->attr + 1); + } + /* First iteration convert the pattern into Verbs. */ + if (i == 0) { + /* Actions don't need to be converted several time. */ + ret = mlx5_flow_actions(dev, actions, flow, + (size < flow_size) ? + flow_size - size : 0, + error); + if (ret < 0) + return ret; + size += ret; + } else { + /* + * Next iteration means the pattern has already been + * converted and an expansion is necessary to match + * the user RSS request. For that only the expanded + * items will be converted, the common part with the + * user pattern are just copied into the next buffer + * zone. + */ + size += original_verbs_size; + if (size < flow_size) { + rte_memcpy(flow->cur_verbs->attr, + original_verbs->attr, + original_verbs_size + + sizeof(struct ibv_flow_attr)); + flow->cur_verbs->size = original_verbs_size; + } + } + ret = mlx5_flow_items + ((const struct rte_flow_item *) + &buf->entry[i].pattern[expanded_pattern_idx], + flow, + (size < flow_size) ? flow_size - size : 0, error); + if (ret < 0) + return ret; + size += ret; + if (size <= flow_size) { + mlx5_flow_adjust_priority(dev, flow); + LIST_INSERT_HEAD(&flow->verbs, flow->cur_verbs, next); + } + /* + * Keep a pointer of the first verbs conversion and the layers + * it has encountered. + */ + if (i == 0) { + original_verbs = flow->cur_verbs; + original_verbs_size = size - off2; + original_layers = flow->layers; + /* + * move the index of the expanded pattern to the + * first item not addressed yet. + */ + if (pattern->type == RTE_FLOW_ITEM_TYPE_END) { + expanded_pattern_idx++; + } else { + const struct rte_flow_item *item = pattern; + + for (item = pattern; + item->type != RTE_FLOW_ITEM_TYPE_END; + ++item) + expanded_pattern_idx++; + } + } + } + /* Restore the origin layers in the flow. */ + flow->layers = original_layers; + return size; +} + +/** + * Lookup and set the ptype in the data Rx part. A single Ptype can be used, + * if several tunnel rules are used on this queue, the tunnel ptype will be + * cleared. + * + * @param rxq_ctrl + * Rx queue to update. + */ +static void +mlx5_flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) +{ + unsigned int i; + uint32_t tunnel_ptype = 0; + + /* Look up for the ptype to use. */ + for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) { + if (!rxq_ctrl->flow_tunnels_n[i]) + continue; + if (!tunnel_ptype) { + tunnel_ptype = tunnels_info[i].ptype; + } else { + tunnel_ptype = 0; + break; + } + } + rxq_ctrl->rxq.tunnel = tunnel_ptype; +} + +/** + * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the flow. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] flow + * Pointer to flow structure. + */ +static void +mlx5_flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) +{ + struct priv *priv = dev->data->dev_private; + const int mark = !!(flow->modifier & + (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK)); + const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); + unsigned int i; + + for (i = 0; i != flow->rss.queue_num; ++i) { + int idx = (*flow->queue)[i]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of((*priv->rxqs)[idx], + struct mlx5_rxq_ctrl, rxq); + + if (mark) { + rxq_ctrl->rxq.mark = 1; + rxq_ctrl->flow_mark_n++; + } + if (tunnel) { + unsigned int j; + + /* Increase the counter matching the flow. */ + for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { + if ((tunnels_info[j].tunnel & flow->layers) == + tunnels_info[j].tunnel) { + rxq_ctrl->flow_tunnels_n[j]++; + break; + } + } + mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl); + } + } +} + +/** + * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the + * @p flow if no other flow uses it with the same kind of request. + * + * @param dev + * Pointer to Ethernet device. + * @param[in] flow + * Pointer to the flow. + */ +static void +mlx5_flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) +{ + struct priv *priv = dev->data->dev_private; + const int mark = !!(flow->modifier & + (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK)); + const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); + unsigned int i; + + assert(dev->data->dev_started); + for (i = 0; i != flow->rss.queue_num; ++i) { + int idx = (*flow->queue)[i]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of((*priv->rxqs)[idx], + struct mlx5_rxq_ctrl, rxq); + + if (mark) { + rxq_ctrl->flow_mark_n--; + rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n; + } + if (tunnel) { + unsigned int j; + + /* Decrease the counter matching the flow. */ + for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { + if ((tunnels_info[j].tunnel & flow->layers) == + tunnels_info[j].tunnel) { + rxq_ctrl->flow_tunnels_n[j]--; + break; + } + } + mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl); + } + } +} + +/** + * Clear the Mark/Flag and Tunnel ptype information in all Rx queues. + * + * @param dev + * Pointer to Ethernet device. + */ +static void +mlx5_flow_rxq_flags_clear(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + unsigned int i; + + for (i = 0; i != priv->rxqs_n; ++i) { + struct mlx5_rxq_ctrl *rxq_ctrl; + unsigned int j; + + if (!(*priv->rxqs)[i]) + continue; + rxq_ctrl = container_of((*priv->rxqs)[i], + struct mlx5_rxq_ctrl, rxq); + rxq_ctrl->flow_mark_n = 0; + rxq_ctrl->rxq.mark = 0; + for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) + rxq_ctrl->flow_tunnels_n[j] = 0; + rxq_ctrl->rxq.tunnel = 0; + } +} + +/* + * Validate the flag action. + * + * @param[in] action_flags + * Bit-fields that holds the actions detected until now. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_validate_action_flag(uint64_t action_flags, + struct rte_flow_error *error) +{ + + if (action_flags & MLX5_FLOW_ACTION_DROP) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't drop and flag in same flow"); + if (action_flags & MLX5_FLOW_ACTION_MARK) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't mark and flag in same flow"); + if (action_flags & MLX5_FLOW_ACTION_FLAG) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't have 2 flag" + " actions in same flow"); + return 0; +} + +/* + * Validate the mark action. + * + * @param[in] action + * Pointer to the queue action. + * @param[in] action_flags + * Bit-fields that holds the actions detected until now. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_validate_action_mark(const struct rte_flow_action *action, + uint64_t action_flags, + struct rte_flow_error *error) +{ + const struct rte_flow_action_mark *mark = action->conf; + + if (!mark) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "configuration cannot be null"); + if (mark->id >= MLX5_FLOW_MARK_MAX) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &mark->id, + "mark id must in 0 <= id < " + RTE_STR(MLX5_FLOW_MARK_MAX)); + if (action_flags & MLX5_FLOW_ACTION_DROP) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't drop and mark in same flow"); + if (action_flags & MLX5_FLOW_ACTION_FLAG) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't flag and mark in same flow"); + if (action_flags & MLX5_FLOW_ACTION_MARK) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't have 2 mark actions in same" + " flow"); + return 0; +} + +/* + * Validate the drop action. + * + * @param[in] action_flags + * Bit-fields that holds the actions detected until now. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_ernno is set. + */ +static int +mlx5_flow_validate_action_drop(uint64_t action_flags, + struct rte_flow_error *error) +{ + if (action_flags & MLX5_FLOW_ACTION_FLAG) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't drop and flag in same flow"); + if (action_flags & MLX5_FLOW_ACTION_MARK) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't drop and mark in same flow"); + if (action_flags & MLX5_FLOW_FATE_ACTIONS) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't have 2 fate actions in" + " same flow"); + return 0; +} + +/* + * Validate the queue action. + * + * @param[in] action + * Pointer to the queue action. + * @param[in] action_flags + * Bit-fields that holds the actions detected until now. + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_ernno is set. + */ +static int +mlx5_flow_validate_action_queue(const struct rte_flow_action *action, + uint64_t action_flags, + struct rte_eth_dev *dev, + struct rte_flow_error *error) +{ + struct priv *priv = dev->data->dev_private; + const struct rte_flow_action_queue *queue = action->conf; + + if (action_flags & MLX5_FLOW_FATE_ACTIONS) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't have 2 fate actions in" + " same flow"); + if (queue->index >= priv->rxqs_n) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &queue->index, + "queue index out of range"); + if (!(*priv->rxqs)[queue->index]) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &queue->index, + "queue is not configured"); + return 0; +} + +/* + * Validate the rss action. + * + * @param[in] action + * Pointer to the queue action. + * @param[in] action_flags + * Bit-fields that holds the actions detected until now. + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_ernno is set. + */ +static int +mlx5_flow_validate_action_rss(const struct rte_flow_action *action, + uint64_t action_flags, + struct rte_eth_dev *dev, + struct rte_flow_error *error) +{ + struct priv *priv = dev->data->dev_private; + const struct rte_flow_action_rss *rss = action->conf; + unsigned int i; + + if (action_flags & MLX5_FLOW_FATE_ACTIONS) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't have 2 fate actions" + " in same flow"); + if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT && + rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->func, + "RSS hash function not supported"); +#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT + if (rss->level > 2) +#else + if (rss->level > 1) +#endif + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->level, + "tunnel RSS is not supported"); + if (rss->key_len < MLX5_RSS_HASH_KEY_LEN) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->key_len, + "RSS hash key too small"); + if (rss->key_len > MLX5_RSS_HASH_KEY_LEN) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->key_len, + "RSS hash key too large"); + if (rss->queue_num > priv->config.ind_table_max_size) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->queue_num, + "number of queues too large"); + if (rss->types & MLX5_RSS_HF_MASK) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->types, + "some RSS protocols are not" + " supported"); + for (i = 0; i != rss->queue_num; ++i) { + if (!(*priv->rxqs)[rss->queue[i]]) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->queue[i], "queue is not configured"); + } + return 0; +} + +/* + * Validate the count action. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_ernno is set. + */ +static int +mlx5_flow_validate_action_count(struct rte_eth_dev *dev, + struct rte_flow_error *error) +{ + struct priv *priv = dev->data->dev_private; + + if (!priv->config.flow_counter_en) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "flow counters are not supported."); + return 0; +} + +/** + * Verify the @p attributes will be correctly understood by the NIC and store + * them in the @p flow if everything is correct. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] attributes + * Pointer to flow attributes + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_validate_attributes(struct rte_eth_dev *dev, + const struct rte_flow_attr *attributes, + struct rte_flow_error *error) +{ + struct priv *priv = dev->data->dev_private; + uint32_t priority_max = priv->config.flow_prio - 1; + + if (attributes->group) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + NULL, "groups is not supported"); + if (attributes->priority != MLX5_FLOW_PRIO_RSVD && + attributes->priority >= priority_max) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + NULL, "priority out of range"); + if (attributes->egress) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, + "egress is not supported"); + if (attributes->transfer) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + NULL, "transfer is not supported"); + if (!attributes->ingress) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + NULL, + "ingress attribute is mandatory"); + return 0; +} + +/** + * Validate Ethernet item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_validate_item_eth(const struct rte_flow_item *item, + uint64_t item_flags, + struct rte_flow_error *error) +{ + const struct rte_flow_item_eth *mask = item->mask; + const struct rte_flow_item_eth nic_mask = { + .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", + .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", + .type = RTE_BE16(0xffff), + }; + int ret; + int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + + if (item_flags & MLX5_FLOW_LAYER_OUTER_L2) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "3 levels of l2 are not supported"); + if ((item_flags & MLX5_FLOW_LAYER_INNER_L2) && !tunnel) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "2 L2 without tunnel are not supported"); + if (!mask) + mask = &rte_flow_item_eth_mask; + ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_eth), + error); + return ret; +} + +/** + * Validate VLAN item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, + int64_t item_flags, + struct rte_flow_error *error) +{ + const struct rte_flow_item_vlan *spec = item->spec; + const struct rte_flow_item_vlan *mask = item->mask; + const struct rte_flow_item_vlan nic_mask = { + .tci = RTE_BE16(0x0fff), + .inner_type = RTE_BE16(0xffff), + }; + uint16_t vlan_tag = 0; + const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + int ret; + const uint32_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 | + MLX5_FLOW_LAYER_INNER_L4) : + (MLX5_FLOW_LAYER_OUTER_L3 | + MLX5_FLOW_LAYER_OUTER_L4); + const uint32_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : + MLX5_FLOW_LAYER_OUTER_VLAN; + + if (item_flags & vlanm) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "VLAN layer already configured"); + else if ((item_flags & l34m) != 0) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "L2 layer cannot follow L3/L4 layer"); + if (!mask) + mask = &rte_flow_item_vlan_mask; + ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_vlan), + error); + if (ret) + return ret; + if (spec) { + vlan_tag = spec->tci; + vlan_tag &= mask->tci; + } + /* + * From verbs perspective an empty VLAN is equivalent + * to a packet without VLAN layer. + */ + if (!vlan_tag) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, + item->spec, + "VLAN cannot be empty"); + return 0; +} + +/** + * Validate IPV4 item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, + int64_t item_flags, + struct rte_flow_error *error) +{ + const struct rte_flow_item_ipv4 *mask = item->mask; + const struct rte_flow_item_ipv4 nic_mask = { + .hdr = { + .src_addr = RTE_BE32(0xffffffff), + .dst_addr = RTE_BE32(0xffffffff), + .type_of_service = 0xff, + .next_proto_id = 0xff, + }, + }; + const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + int ret; + + if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple L3 layers not supported"); + else if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "L3 cannot follow an L4 layer."); + if (!mask) + mask = &rte_flow_item_ipv4_mask; + ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_ipv4), + error); + if (ret < 0) + return ret; + return 0; +} + +/** + * Validate IPV6 item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, + uint64_t item_flags, + struct rte_flow_error *error) +{ + const struct rte_flow_item_ipv6 *mask = item->mask; + const struct rte_flow_item_ipv6 nic_mask = { + .hdr = { + .src_addr = + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff", + .dst_addr = + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff", + .vtc_flow = RTE_BE32(0xffffffff), + .proto = 0xff, + .hop_limits = 0xff, + }, + }; + const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + int ret; + + if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple L3 layers not supported"); + else if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "L3 cannot follow an L4 layer."); + /* + * IPv6 is not recognised by the NIC inside a GRE tunnel. + * Such support has to be disabled as the rule will be + * accepted. Issue reproduced with Mellanox OFED 4.3-3.0.2.1 and + * Mellanox OFED 4.4-1.0.0.0. + */ + if (tunnel && item_flags & MLX5_FLOW_LAYER_GRE) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "IPv6 inside a GRE tunnel is" + " not recognised."); + if (!mask) + mask = &rte_flow_item_ipv6_mask; + ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_ipv6), + error); + if (ret < 0) + return ret; + return 0; +} + +/** + * Validate UDP item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[in] target_protocol + * The next protocol in the previous item. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_validate_item_udp(const struct rte_flow_item *item, + uint64_t item_flags, + uint8_t target_protocol, + struct rte_flow_error *error) +{ + const struct rte_flow_item_udp *mask = item->mask; + const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + int ret; + + if (target_protocol != 0xff && target_protocol != MLX5_IP_PROTOCOL_UDP) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "protocol filtering not compatible" + " with UDP layer"); + if (!(item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "L3 is mandatory to filter on L4"); + if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "L4 layer is already present"); + if (!mask) + mask = &rte_flow_item_udp_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_udp_mask, + sizeof(struct rte_flow_item_udp), error); + if (ret < 0) + return ret; + return 0; +} + +/** + * Validate TCP item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[in] target_protocol + * The next protocol in the previous item. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_validate_item_tcp(const struct rte_flow_item *item, + uint64_t item_flags, + uint8_t target_protocol, + struct rte_flow_error *error) +{ + const struct rte_flow_item_tcp *mask = item->mask; + const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + int ret; + + if (target_protocol != 0xff && target_protocol != MLX5_IP_PROTOCOL_TCP) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "protocol filtering not compatible" + " with TCP layer"); + if (!(item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "L3 is mandatory to filter on L4"); + if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "L4 layer is already present"); + if (!mask) + mask = &rte_flow_item_tcp_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_tcp_mask, + sizeof(struct rte_flow_item_tcp), error); + if (ret < 0) + return ret; + return 0; +} + +/** + * Validate VXLAN item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[in] target_protocol + * The next protocol in the previous item. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item, + uint64_t item_flags, + struct rte_flow_error *error) +{ + const struct rte_flow_item_vxlan *spec = item->spec; + const struct rte_flow_item_vxlan *mask = item->mask; + int ret; + union vni { + uint32_t vlan_id; + uint8_t vni[4]; + } id = { .vlan_id = 0, }; + uint32_t vlan_id = 0; + + + if (item_flags & MLX5_FLOW_LAYER_TUNNEL) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "a tunnel is already present"); + /* + * Verify only UDPv4 is present as defined in + * https://tools.ietf.org/html/rfc7348 + */ + if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "no outer UDP layer found"); + if (!mask) + mask = &rte_flow_item_vxlan_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_vxlan_mask, + sizeof(struct rte_flow_item_vxlan), + error); + if (ret < 0) + return ret; + if (spec) { + memcpy(&id.vni[1], spec->vni, 3); + vlan_id = id.vlan_id; + memcpy(&id.vni[1], mask->vni, 3); + vlan_id &= id.vlan_id; + } + /* + * Tunnel id 0 is equivalent as not adding a VXLAN layer, if + * only this layer is defined in the Verbs specification it is + * interpreted as wildcard and all packets will match this + * rule, if it follows a full stack layer (ex: eth / ipv4 / + * udp), all packets matching the layers before will also + * match this rule. To avoid such situation, VNI 0 is + * currently refused. + */ + if (!vlan_id) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "VXLAN vni cannot be 0"); + if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "VXLAN tunnel must be fully defined"); + return 0; +} + +/** + * Validate VXLAN_GPE item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[in] priv + * Pointer to the private data structure. + * @param[in] target_protocol + * The next protocol in the previous item. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, + uint64_t item_flags, + struct rte_eth_dev *dev, + struct rte_flow_error *error) +{ + struct priv *priv = dev->data->dev_private; + const struct rte_flow_item_vxlan_gpe *spec = item->spec; + const struct rte_flow_item_vxlan_gpe *mask = item->mask; + int ret; + union vni { + uint32_t vlan_id; + uint8_t vni[4]; + } id = { .vlan_id = 0, }; + uint32_t vlan_id = 0; + + if (!priv->config.l3_vxlan_en) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "L3 VXLAN is not enabled by device" + " parameter and/or not configured in" + " firmware"); + if (item_flags & MLX5_FLOW_LAYER_TUNNEL) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "a tunnel is already present"); + /* + * Verify only UDPv4 is present as defined in + * https://tools.ietf.org/html/rfc7348 + */ + if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "no outer UDP layer found"); + if (!mask) + mask = &rte_flow_item_vxlan_gpe_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_vxlan_gpe_mask, + sizeof(struct rte_flow_item_vxlan_gpe), + error); + if (ret < 0) + return ret; + if (spec) { + if (spec->protocol) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VxLAN-GPE protocol" + " not supported"); + memcpy(&id.vni[1], spec->vni, 3); + vlan_id = id.vlan_id; + memcpy(&id.vni[1], mask->vni, 3); + vlan_id &= id.vlan_id; + } + /* + * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this + * layer is defined in the Verbs specification it is interpreted as + * wildcard and all packets will match this rule, if it follows a full + * stack layer (ex: eth / ipv4 / udp), all packets matching the layers + * before will also match this rule. To avoid such situation, VNI 0 + * is currently refused. + */ + if (!vlan_id) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "VXLAN-GPE vni cannot be 0"); + if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "VXLAN-GPE tunnel must be fully" + " defined"); + return 0; +} + +/** + * Validate GRE item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit flags to mark detected items. + * @param[in] target_protocol + * The next protocol in the previous item. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_validate_item_gre(const struct rte_flow_item *item, + uint64_t item_flags, + uint8_t target_protocol, + struct rte_flow_error *error) +{ + const struct rte_flow_item_gre *spec __rte_unused = item->spec; + const struct rte_flow_item_gre *mask = item->mask; + int ret; + + if (target_protocol != 0xff && target_protocol != MLX5_IP_PROTOCOL_GRE) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "protocol filtering not compatible" + " with this GRE layer"); + if (item_flags & MLX5_FLOW_LAYER_TUNNEL) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "a tunnel is already present"); + if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "L3 Layer is missing"); + if (!mask) + mask = &rte_flow_item_gre_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_gre_mask, + sizeof(struct rte_flow_item_gre), error); + if (ret < 0) + return ret; +#ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT + if (spec && (spec->protocol & mask->protocol)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "without MPLS support the" + " specification cannot be used for" + " filtering"); +#endif + return 0; +} + +/** + * Validate MPLS item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[in] target_protocol + * The next protocol in the previous item. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_validate_item_mpls(const struct rte_flow_item *item __rte_unused, + uint64_t item_flags __rte_unused, + uint8_t target_protocol __rte_unused, + struct rte_flow_error *error) +{ +#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT + const struct rte_flow_item_mpls *mask = item->mask; + int ret; + + if (target_protocol != 0xff && target_protocol != MLX5_IP_PROTOCOL_MPLS) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "protocol filtering not compatible" + " with MPLS layer"); + if (item_flags & MLX5_FLOW_LAYER_TUNNEL) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "a tunnel is already" + " present"); + if (!mask) + mask = &rte_flow_item_mpls_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_mpls_mask, + sizeof(struct rte_flow_item_mpls), error); + if (ret < 0) + return ret; + return 0; +#endif + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "MPLS is not supported by Verbs, please" + " update."); +} + +/** + * Internal validation function. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] attr + * Pointer to the flow attributes. + * @param[in] items + * Pointer to the list of items. + * @param[in] actions + * Pointer to the list of actions. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_ernno is set. + */ +static int mlx5_flow_verbs_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + int ret; + uint32_t action_flags = 0; + uint32_t item_flags = 0; + int tunnel = 0; + uint8_t next_protocol = 0xff; - id.vni[0] = 0; - parser->inner = IBV_FLOW_SPEC_INNER; - if (spec) { - if (!mask) - mask = default_mask; - memcpy(&id.vni[1], spec->vni, 3); - vxlan.val.tunnel_id = id.vlan_id; - memcpy(&id.vni[1], mask->vni, 3); - vxlan.mask.tunnel_id = id.vlan_id; - /* Remove unwanted bits from values. */ - vxlan.val.tunnel_id &= vxlan.mask.tunnel_id; + if (items == NULL) + return -1; + ret = mlx5_flow_validate_attributes(dev, attr, error); + if (ret < 0) + return ret; + for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { + int ret = 0; + switch (items->type) { + case RTE_FLOW_ITEM_TYPE_VOID: + break; + case RTE_FLOW_ITEM_TYPE_ETH: + ret = mlx5_flow_validate_item_eth(items, item_flags, + error); + if (ret < 0) + return ret; + item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 : + MLX5_FLOW_LAYER_OUTER_L2; + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + ret = mlx5_flow_validate_item_vlan(items, item_flags, + error); + if (ret < 0) + return ret; + item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : + MLX5_FLOW_LAYER_OUTER_VLAN; + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + ret = mlx5_flow_validate_item_ipv4(items, item_flags, + error); + if (ret < 0) + return ret; + item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : + MLX5_FLOW_LAYER_OUTER_L3_IPV4; + if (items->mask != NULL && + ((const struct rte_flow_item_ipv4 *) + items->mask)->hdr.next_proto_id) + next_protocol = + ((const struct rte_flow_item_ipv4 *) + (items->spec))->hdr.next_proto_id; + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + ret = mlx5_flow_validate_item_ipv6(items, item_flags, + error); + if (ret < 0) + return ret; + item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : + MLX5_FLOW_LAYER_OUTER_L3_IPV6; + if (items->mask != NULL && + ((const struct rte_flow_item_ipv6 *) + items->mask)->hdr.proto) + next_protocol = + ((const struct rte_flow_item_ipv6 *) + items->spec)->hdr.proto; + break; + case RTE_FLOW_ITEM_TYPE_UDP: + ret = mlx5_flow_validate_item_udp(items, item_flags, + next_protocol, + error); + if (ret < 0) + return ret; + item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP : + MLX5_FLOW_LAYER_OUTER_L4_UDP; + break; + case RTE_FLOW_ITEM_TYPE_TCP: + ret = mlx5_flow_validate_item_tcp(items, item_flags, + next_protocol, error); + if (ret < 0) + return ret; + item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP : + MLX5_FLOW_LAYER_OUTER_L4_TCP; + break; + case RTE_FLOW_ITEM_TYPE_VXLAN: + ret = mlx5_flow_validate_item_vxlan(items, item_flags, + error); + if (ret < 0) + return ret; + item_flags |= MLX5_FLOW_LAYER_VXLAN; + break; + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + ret = mlx5_flow_validate_item_vxlan_gpe(items, + item_flags, + dev, error); + if (ret < 0) + return ret; + item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE; + break; + case RTE_FLOW_ITEM_TYPE_GRE: + ret = mlx5_flow_validate_item_gre(items, item_flags, + next_protocol, error); + if (ret < 0) + return ret; + item_flags |= MLX5_FLOW_LAYER_GRE; + break; + case RTE_FLOW_ITEM_TYPE_MPLS: + ret = mlx5_flow_validate_item_mpls(items, item_flags, + next_protocol, + error); + if (ret < 0) + return ret; + if (next_protocol != 0xff && + next_protocol != MLX5_IP_PROTOCOL_MPLS) + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, items, + "protocol filtering not compatible" + " with MPLS layer"); + item_flags |= MLX5_FLOW_LAYER_MPLS; + break; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, + "item not supported"); + } + } + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + case RTE_FLOW_ACTION_TYPE_FLAG: + ret = mlx5_flow_validate_action_flag(action_flags, + error); + if (ret < 0) + return ret; + action_flags |= MLX5_FLOW_ACTION_FLAG; + break; + case RTE_FLOW_ACTION_TYPE_MARK: + ret = mlx5_flow_validate_action_mark(actions, + action_flags, + error); + if (ret < 0) + return ret; + action_flags |= MLX5_FLOW_ACTION_MARK; + break; + case RTE_FLOW_ACTION_TYPE_DROP: + ret = mlx5_flow_validate_action_drop(action_flags, + error); + if (ret < 0) + return ret; + action_flags |= MLX5_FLOW_ACTION_DROP; + break; + case RTE_FLOW_ACTION_TYPE_QUEUE: + ret = mlx5_flow_validate_action_queue(actions, + action_flags, dev, + error); + if (ret < 0) + return ret; + action_flags |= MLX5_FLOW_ACTION_QUEUE; + break; + case RTE_FLOW_ACTION_TYPE_RSS: + ret = mlx5_flow_validate_action_rss(actions, + action_flags, dev, + error); + if (ret < 0) + return ret; + action_flags |= MLX5_FLOW_ACTION_RSS; + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + ret = mlx5_flow_validate_action_count(dev, error); + if (ret < 0) + return ret; + action_flags |= MLX5_FLOW_ACTION_COUNT; + break; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "action not supported"); + } } - /* - * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this - * layer is defined in the Verbs specification it is interpreted as - * wildcard and all packets will match this rule, if it follows a full - * stack layer (ex: eth / ipv4 / udp), all packets matching the layers - * before will also match this rule. - * To avoid such situation, VNI 0 is currently refused. - */ - if (!vxlan.val.tunnel_id) - return EINVAL; - mlx5_flow_create_copy(parser, &vxlan, size); return 0; } /** - * Convert mark/flag action to Verbs specification. + * Validate a flow supported by the NIC. * - * @param parser - * Internal parser structure. - * @param mark_id - * Mark identifier. + * @see rte_flow_validate() + * @see rte_flow_ops */ -static int -mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id) +int +mlx5_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) { - unsigned int size = sizeof(struct ibv_flow_spec_action_tag); - struct ibv_flow_spec_action_tag tag = { - .type = IBV_FLOW_SPEC_ACTION_TAG, - .size = size, - .tag_id = mlx5_flow_mark_set(mark_id), - }; + int ret; - assert(parser->mark); - mlx5_flow_create_copy(parser, &tag, size); + ret = mlx5_flow_verbs_validate(dev, attr, items, actions, error); + if (ret < 0) + return ret; return 0; } /** - * Convert count action to Verbs specification. + * Calculate the required bytes that are needed for the action part of the verbs + * flow, in addtion returns bit-fields with all the detected action, in order to + * avoid another interation over the actions. * - * @param priv - * Pointer to private structure. - * @param parser - * Pointer to MLX5 flow parser structure. + * @param[in] actions + * Pointer to the list of actions. + * @param[out] action_flags + * Pointer to the detected actions. * * @return - * 0 on success, errno value on failure. + * The size of the memory needed for all actions. */ static int -mlx5_flow_create_count(struct priv *priv __rte_unused, - struct mlx5_flow_parse *parser __rte_unused) +mlx5_flow_verbs_get_actions_and_size(const struct rte_flow_action actions[], + uint64_t *action_flags) { + int size = 0; + uint64_t detected_actions = 0; + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + case RTE_FLOW_ACTION_TYPE_FLAG: + size += sizeof(struct ibv_flow_spec_action_tag); + detected_actions |= MLX5_FLOW_ACTION_FLAG; + break; + case RTE_FLOW_ACTION_TYPE_MARK: + size += sizeof(struct ibv_flow_spec_action_tag); + detected_actions |= MLX5_FLOW_ACTION_MARK; + break; + case RTE_FLOW_ACTION_TYPE_DROP: + size += sizeof(struct ibv_flow_spec_action_drop); + detected_actions |= MLX5_FLOW_ACTION_DROP; + break; + case RTE_FLOW_ACTION_TYPE_QUEUE: + detected_actions |= MLX5_FLOW_ACTION_QUEUE; + break; + case RTE_FLOW_ACTION_TYPE_RSS: + detected_actions |= MLX5_FLOW_ACTION_RSS; + break; + case RTE_FLOW_ACTION_TYPE_COUNT: #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT - unsigned int size = sizeof(struct ibv_flow_spec_counter_action); - struct ibv_counter_set_init_attr init_attr = {0}; - struct ibv_flow_spec_counter_action counter = { - .type = IBV_FLOW_SPEC_ACTION_COUNT, - .size = size, - .counter_set_handle = 0, - }; - - init_attr.counter_set_id = 0; - parser->cs = ibv_create_counter_set(priv->ctx, &init_attr); - if (!parser->cs) - return EINVAL; - counter.counter_set_handle = parser->cs->handle; - mlx5_flow_create_copy(parser, &counter, size); + size += sizeof(struct ibv_flow_spec_counter_action); #endif - return 0; + detected_actions |= MLX5_FLOW_ACTION_COUNT; + break; + default: + break; + } + } + *action_flags = detected_actions; + return size; } /** - * Complete flow rule creation with a drop queue. + * Calculate the required bytes that are needed for the item part of the verbs + * flow, in addtion returns bit-fields with all the detected action, in order to + * avoid another interation over the actions. * - * @param priv - * Pointer to private structure. - * @param parser - * Internal parser structure. - * @param flow - * Pointer to the rte_flow. - * @param[out] error - * Perform verbose error reporting if not NULL. + * @param[in] actions + * Pointer to the list of items. + * @param[in, out] item_flags + * Pointer to the detected items. * * @return - * 0 on success, errno value on failure. + * The size of the memory needed for all items. */ static int -priv_flow_create_action_queue_drop(struct priv *priv, - struct mlx5_flow_parse *parser, - struct rte_flow *flow, - struct rte_flow_error *error) +mlx5_flow_verbs_get_items_and_size(const struct rte_flow_item items[], + uint64_t *item_flags) { - struct ibv_flow_spec_action_drop *drop; - unsigned int size = sizeof(struct ibv_flow_spec_action_drop); - int err = 0; - - assert(priv->pd); - assert(priv->ctx); - flow->drop = 1; - drop = (void *)((uintptr_t)parser->queue[HASH_RXQ_ETH].ibv_attr + - parser->queue[HASH_RXQ_ETH].offset); - *drop = (struct ibv_flow_spec_action_drop){ - .type = IBV_FLOW_SPEC_ACTION_DROP, - .size = size, - }; - ++parser->queue[HASH_RXQ_ETH].ibv_attr->num_of_specs; - parser->queue[HASH_RXQ_ETH].offset += size; - flow->frxq[HASH_RXQ_ETH].ibv_attr = - parser->queue[HASH_RXQ_ETH].ibv_attr; - if (parser->count) - flow->cs = parser->cs; - if (!priv->dev->data->dev_started) - return 0; - parser->queue[HASH_RXQ_ETH].ibv_attr = NULL; - flow->frxq[HASH_RXQ_ETH].ibv_flow = - ibv_create_flow(priv->flow_drop_queue->qp, - flow->frxq[HASH_RXQ_ETH].ibv_attr); - if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) { - rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, - NULL, "flow rule creation failure"); - err = ENOMEM; - goto error; - } - return 0; -error: - assert(flow); - if (flow->frxq[HASH_RXQ_ETH].ibv_flow) { - claim_zero(ibv_destroy_flow(flow->frxq[HASH_RXQ_ETH].ibv_flow)); - flow->frxq[HASH_RXQ_ETH].ibv_flow = NULL; - } - if (flow->frxq[HASH_RXQ_ETH].ibv_attr) { - rte_free(flow->frxq[HASH_RXQ_ETH].ibv_attr); - flow->frxq[HASH_RXQ_ETH].ibv_attr = NULL; + int size = 0; + uint64_t detected_items = 0; + const int tunnel = !!(*item_flags & MLX5_FLOW_LAYER_TUNNEL); + + for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { + switch (items->type) { + case RTE_FLOW_ITEM_TYPE_VOID: + break; + case RTE_FLOW_ITEM_TYPE_ETH: + size += sizeof(struct ibv_flow_spec_eth); + detected_items |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 : + MLX5_FLOW_LAYER_OUTER_L2; + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + size += sizeof(struct ibv_flow_spec_eth); + detected_items |= tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : + MLX5_FLOW_LAYER_OUTER_VLAN; + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + size += sizeof(struct ibv_flow_spec_ipv4_ext); + detected_items |= tunnel ? + MLX5_FLOW_LAYER_INNER_L3_IPV4 : + MLX5_FLOW_LAYER_OUTER_L3_IPV4; + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + size += sizeof(struct ibv_flow_spec_ipv6); + detected_items |= tunnel ? + MLX5_FLOW_LAYER_INNER_L3_IPV6 : + MLX5_FLOW_LAYER_OUTER_L3_IPV6; + break; + case RTE_FLOW_ITEM_TYPE_UDP: + size += sizeof(struct ibv_flow_spec_tcp_udp); + detected_items |= tunnel ? + MLX5_FLOW_LAYER_INNER_L4_UDP : + MLX5_FLOW_LAYER_OUTER_L4_UDP; + break; + case RTE_FLOW_ITEM_TYPE_TCP: + size += sizeof(struct ibv_flow_spec_tcp_udp); + detected_items |= tunnel ? + MLX5_FLOW_LAYER_INNER_L4_TCP : + MLX5_FLOW_LAYER_OUTER_L4_TCP; + break; + case RTE_FLOW_ITEM_TYPE_VXLAN: + size += sizeof(struct ibv_flow_spec_tunnel); + detected_items |= MLX5_FLOW_LAYER_VXLAN; + break; + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + size += sizeof(struct ibv_flow_spec_tunnel); + detected_items |= MLX5_FLOW_LAYER_VXLAN_GPE; + break; + case RTE_FLOW_ITEM_TYPE_GRE: +#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT + size += sizeof(struct ibv_flow_spec_gre); + detected_items |= MLX5_FLOW_LAYER_GRE; +#else + size += sizeof(struct ibv_flow_spec_tunnel); + detected_items |= MLX5_FLOW_LAYER_TUNNEL; +#endif + break; + case RTE_FLOW_ITEM_TYPE_MPLS: +#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT + size += sizeof(struct ibv_flow_spec_mpls); + detected_items |= MLX5_FLOW_LAYER_MPLS; +#endif + break; + default: + break; + } } - if (flow->cs) { - claim_zero(ibv_destroy_counter_set(flow->cs)); - flow->cs = NULL; - parser->cs = NULL; + *item_flags = detected_items; + return size; +} + +/** + * Get RSS action from the action list. + * + * @param[in] actions + * Pointer to the list of actions. + * + * @return + * Pointer to the RSS action if exist, else return NULL. + */ +static const struct rte_flow_action_rss* +mlx5_flow_get_rss_action(const struct rte_flow_action actions[]) +{ + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_RSS: + return (const struct rte_flow_action_rss *) + actions->conf; + default: + break; + } } - return err; + return NULL; } /** - * Create hash Rx queues when RSS is enabled. + * Internal preparation function. Allocate mlx5_flow with the required size. + * The required size is calculate based on the actions and items. This function + * also returns the detected actions and items for later use. * - * @param priv - * Pointer to private structure. - * @param parser - * Internal parser structure. - * @param flow - * Pointer to the rte_flow. + * @param[in] attr + * Pointer to the flow attributes. + * @param[in] items + * Pointer to the list of items. + * @param[in] actions + * Pointer to the list of actions. + * @param[out] item_flags + * Pointer to bit mask of all items detected. + * @param[out] action_flags + * Pointer to bit mask of all actions detected. * @param[out] error - * Perform verbose error reporting if not NULL. + * Pointer to the error structure. * * @return - * 0 on success, a errno value otherwise and rte_errno is set. + * Pointer to mlx5_flow object on success, otherwise NULL and rte_errno + * is set. */ -static int -priv_flow_create_action_queue_rss(struct priv *priv, - struct mlx5_flow_parse *parser, - struct rte_flow *flow, - struct rte_flow_error *error) +static struct mlx5_flow * +mlx5_flow_verbs_prepare(const struct rte_flow_attr *attr __rte_unused, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + uint64_t *item_flags, + uint64_t *action_flags, + struct rte_flow_error *error) { - unsigned int i; + uint32_t size = sizeof(struct ibv_flow_attr); + struct mlx5_flow *flow; - for (i = 0; i != hash_rxq_init_n; ++i) { - uint64_t hash_fields; + size += mlx5_flow_verbs_get_actions_and_size(actions, action_flags); + size += mlx5_flow_verbs_get_items_and_size(items, item_flags); + flow = rte_calloc(__func__, 1, size, 0); + if (!flow) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "not enough memory to create flow"); + return NULL; + } + return flow; +} - if (!parser->queue[i].ibv_attr) - continue; - flow->frxq[i].ibv_attr = parser->queue[i].ibv_attr; - parser->queue[i].ibv_attr = NULL; - hash_fields = hash_rxq_init[i].hash_fields; - if (!priv->dev->data->dev_started) - continue; - flow->frxq[i].hrxq = - mlx5_priv_hrxq_get(priv, - parser->rss_conf.rss_key, - parser->rss_conf.rss_key_len, - hash_fields, - parser->queues, - parser->queues_n); - if (flow->frxq[i].hrxq) - continue; - flow->frxq[i].hrxq = - mlx5_priv_hrxq_new(priv, - parser->rss_conf.rss_key, - parser->rss_conf.rss_key_len, - hash_fields, - parser->queues, - parser->queues_n); - if (!flow->frxq[i].hrxq) { - rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_HANDLE, - NULL, "cannot create hash rxq"); - return ENOMEM; +/** + * Remove the flow. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in, out] flow + * Pointer to flow structure. + */ +static void +mlx5_flow_remove(struct rte_eth_dev *dev, struct rte_flow *flow) +{ + struct priv *priv = dev->data->dev_private; + struct mlx5_flow_verbs *verbs; + + if (flow->nl_flow && priv->mnl_socket) + mlx5_nl_flow_destroy(priv->mnl_socket, flow->nl_flow, NULL); + LIST_FOREACH(verbs, &flow->verbs, next) { + if (verbs->flow) { + claim_zero(mlx5_glue->destroy_flow(verbs->flow)); + verbs->flow = NULL; + } + if (verbs->hrxq) { + if (flow->fate & MLX5_FLOW_FATE_DROP) + mlx5_hrxq_drop_release(dev); + else + mlx5_hrxq_release(dev, verbs->hrxq); + verbs->hrxq = NULL; } } - return 0; + if (flow->counter) { + mlx5_flow_counter_release(flow->counter); + flow->counter = NULL; + } } /** - * Complete flow rule creation. + * Apply the flow. * - * @param priv - * Pointer to private structure. - * @param parser - * Internal parser structure. - * @param flow - * Pointer to the rte_flow. + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in, out] flow + * Pointer to flow structure. * @param[out] error - * Perform verbose error reporting if not NULL. + * Pointer to error structure. * * @return - * 0 on success, a errno value otherwise and rte_errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_flow_create_action_queue(struct priv *priv, - struct mlx5_flow_parse *parser, - struct rte_flow *flow, - struct rte_flow_error *error) +mlx5_flow_apply(struct rte_eth_dev *dev, struct rte_flow *flow, + struct rte_flow_error *error) { - int err = 0; - unsigned int i; + struct priv *priv = dev->data->dev_private; + struct mlx5_flow_verbs *verbs; + int err; - assert(priv->pd); - assert(priv->ctx); - assert(!parser->drop); - err = priv_flow_create_action_queue_rss(priv, parser, flow, error); - if (err) - goto error; - if (parser->count) - flow->cs = parser->cs; - if (!priv->dev->data->dev_started) - return 0; - for (i = 0; i != hash_rxq_init_n; ++i) { - if (!flow->frxq[i].hrxq) - continue; - flow->frxq[i].ibv_flow = - ibv_create_flow(flow->frxq[i].hrxq->qp, - flow->frxq[i].ibv_attr); - if (!flow->frxq[i].ibv_flow) { - rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_HANDLE, - NULL, "flow rule creation failure"); - err = ENOMEM; + LIST_FOREACH(verbs, &flow->verbs, next) { + if (flow->fate & MLX5_FLOW_FATE_DROP) { + verbs->hrxq = mlx5_hrxq_drop_new(dev); + if (!verbs->hrxq) { + rte_flow_error_set + (error, errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot get drop hash queue"); + goto error; + } + } else { + struct mlx5_hrxq *hrxq; + + hrxq = mlx5_hrxq_get(dev, flow->key, + MLX5_RSS_HASH_KEY_LEN, + verbs->hash_fields, + (*flow->queue), + flow->rss.queue_num); + if (!hrxq) + hrxq = mlx5_hrxq_new(dev, flow->key, + MLX5_RSS_HASH_KEY_LEN, + verbs->hash_fields, + (*flow->queue), + flow->rss.queue_num, + !!(flow->layers & + MLX5_FLOW_LAYER_TUNNEL)); + if (!hrxq) { + rte_flow_error_set + (error, rte_errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot get hash queue"); + goto error; + } + verbs->hrxq = hrxq; + } + verbs->flow = + mlx5_glue->create_flow(verbs->hrxq->qp, verbs->attr); + if (!verbs->flow) { + rte_flow_error_set(error, errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "hardware refuses to create flow"); goto error; } - DEBUG("%p type %d QP %p ibv_flow %p", - (void *)flow, i, - (void *)flow->frxq[i].hrxq, - (void *)flow->frxq[i].ibv_flow); - } - for (i = 0; i != parser->queues_n; ++i) { - struct mlx5_rxq_data *q = - (*priv->rxqs)[parser->queues[i]]; - - q->mark |= parser->mark; } + if (flow->nl_flow && + priv->mnl_socket && + mlx5_nl_flow_create(priv->mnl_socket, flow->nl_flow, error)) + goto error; return 0; error: - assert(flow); - for (i = 0; i != hash_rxq_init_n; ++i) { - if (flow->frxq[i].ibv_flow) { - struct ibv_flow *ibv_flow = flow->frxq[i].ibv_flow; - - claim_zero(ibv_destroy_flow(ibv_flow)); + err = rte_errno; /* Save rte_errno before cleanup. */ + LIST_FOREACH(verbs, &flow->verbs, next) { + if (verbs->hrxq) { + if (flow->fate & MLX5_FLOW_FATE_DROP) + mlx5_hrxq_drop_release(dev); + else + mlx5_hrxq_release(dev, verbs->hrxq); + verbs->hrxq = NULL; } - if (flow->frxq[i].hrxq) - mlx5_priv_hrxq_release(priv, flow->frxq[i].hrxq); - if (flow->frxq[i].ibv_attr) - rte_free(flow->frxq[i].ibv_attr); } - if (flow->cs) { - claim_zero(ibv_destroy_counter_set(flow->cs)); - flow->cs = NULL; - parser->cs = NULL; - } - return err; + rte_errno = err; /* Restore rte_errno. */ + return -rte_errno; } /** - * Convert a flow. + * Create a flow and add it to @p list. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param list * Pointer to a TAILQ flow list. * @param[in] attr * Flow rule attributes. - * @param[in] pattern + * @param[in] items * Pattern specification (list terminated by the END pattern item). * @param[in] actions * Associated actions (list terminated by the END action). @@ -1863,84 +3812,90 @@ error: * Perform verbose error reporting if not NULL. * * @return - * A flow on success, NULL otherwise. + * A flow on success, NULL otherwise and rte_errno is set. */ static struct rte_flow * -priv_flow_create(struct priv *priv, - struct mlx5_flows *list, - const struct rte_flow_attr *attr, - const struct rte_flow_item items[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) +mlx5_flow_list_create(struct rte_eth_dev *dev, + struct mlx5_flows *list, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) { - struct mlx5_flow_parse parser = { .create = 1, }; struct rte_flow *flow = NULL; - unsigned int i; - int err; + struct mlx5_flow *dev_flow; + size_t size = 0; + uint64_t action_flags = 0; + uint64_t item_flags = 0; + const struct rte_flow_action_rss *rss; + union { + struct rte_flow_expand_rss buf; + uint8_t buffer[2048]; + } expand_buffer; + struct rte_flow_expand_rss *buf = &expand_buffer.buf; + int ret; + uint32_t i; - err = priv_flow_convert(priv, attr, items, actions, error, &parser); - if (err) - goto exit; - flow = rte_calloc(__func__, 1, - sizeof(*flow) + parser.queues_n * sizeof(uint16_t), - 0); + ret = mlx5_flow_validate(dev, attr, items, actions, error); + if (ret < 0) + return NULL; + flow = rte_calloc(__func__, 1, sizeof(*flow), 0); + LIST_INIT(&flow->dev_flows); + rss = mlx5_flow_get_rss_action(actions); + if (rss && rss->types) { + unsigned int graph_root; + + graph_root = mlx5_find_graph_root(items, rss->level); + ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer), + items, rss->types, + mlx5_support_expansion, + graph_root); + assert(ret > 0 && + (unsigned int)ret < sizeof(expand_buffer.buffer)); + } else { + buf->entries = 1; + buf->entry[0].pattern = (void *)(uintptr_t)items; + } + for (i = 0; i < buf->entries; ++i) { + dev_flow = mlx5_flow_verbs_prepare(attr, buf->entry[i].pattern, + actions, &item_flags, + &action_flags, error); + dev_flow->flow = flow; + LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); + } + ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error); + if (ret < 0) + return NULL; + size = ret; + flow = rte_calloc(__func__, 1, size, 0); if (!flow) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot allocate flow memory"); + "not enough memory to create flow"); return NULL; } - /* Copy queues configuration. */ - flow->queues = (uint16_t (*)[])(flow + 1); - memcpy(flow->queues, parser.queues, parser.queues_n * sizeof(uint16_t)); - flow->queues_n = parser.queues_n; - flow->mark = parser.mark; - /* Copy RSS configuration. */ - flow->rss_conf = parser.rss_conf; - flow->rss_conf.rss_key = flow->rss_key; - memcpy(flow->rss_key, parser.rss_key, parser.rss_conf.rss_key_len); - /* finalise the flow. */ - if (parser.drop) - err = priv_flow_create_action_queue_drop(priv, &parser, flow, - error); - else - err = priv_flow_create_action_queue(priv, &parser, flow, error); - if (err) - goto exit; + ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error); + if (ret < 0) { + rte_free(flow); + return NULL; + } + assert((size_t)ret == size); + if (dev->data->dev_started) { + ret = mlx5_flow_apply(dev, flow, error); + if (ret < 0) { + ret = rte_errno; /* Save rte_errno before cleanup. */ + if (flow) { + mlx5_flow_remove(dev, flow); + rte_free(flow); + } + rte_errno = ret; /* Restore rte_errno. */ + return NULL; + } + } TAILQ_INSERT_TAIL(list, flow, next); - DEBUG("Flow created %p", (void *)flow); + mlx5_flow_rxq_flags_set(dev, flow); return flow; -exit: - for (i = 0; i != hash_rxq_init_n; ++i) { - if (parser.queue[i].ibv_attr) - rte_free(parser.queue[i].ibv_attr); - } - rte_free(flow); - return NULL; -} - -/** - * Validate a flow supported by the NIC. - * - * @see rte_flow_validate() - * @see rte_flow_ops - */ -int -mlx5_flow_validate(struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, - const struct rte_flow_item items[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) -{ - struct priv *priv = dev->data->dev_private; - int ret; - struct mlx5_flow_parse parser = { .create = 0, }; - - priv_lock(priv); - ret = priv_flow_convert(priv, attr, items, actions, error, &parser); - priv_unlock(priv); - return ret; } /** @@ -1956,364 +3911,123 @@ mlx5_flow_create(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; - struct rte_flow *flow; - - priv_lock(priv); - flow = priv_flow_create(priv, &priv->flows, attr, items, actions, - error); - priv_unlock(priv); - return flow; + return mlx5_flow_list_create + (dev, &((struct priv *)dev->data->dev_private)->flows, + attr, items, actions, error); } /** - * Destroy a flow. + * Destroy a flow in a list. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param list * Pointer to a TAILQ flow list. * @param[in] flow * Flow to destroy. */ static void -priv_flow_destroy(struct priv *priv, - struct mlx5_flows *list, - struct rte_flow *flow) +mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, + struct rte_flow *flow) { - unsigned int i; - - if (flow->drop || !flow->mark) - goto free; - for (i = 0; i != flow->queues_n; ++i) { - struct rte_flow *tmp; - int mark = 0; - - /* - * To remove the mark from the queue, the queue must not be - * present in any other marked flow (RSS or not). - */ - TAILQ_FOREACH(tmp, list, next) { - unsigned int j; - uint16_t *tqs = NULL; - uint16_t tq_n = 0; - - if (!tmp->mark) - continue; - for (j = 0; j != hash_rxq_init_n; ++j) { - if (!tmp->frxq[j].hrxq) - continue; - tqs = tmp->frxq[j].hrxq->ind_table->queues; - tq_n = tmp->frxq[j].hrxq->ind_table->queues_n; - } - if (!tq_n) - continue; - for (j = 0; (j != tq_n) && !mark; j++) - if (tqs[j] == (*flow->queues)[i]) - mark = 1; - } - (*priv->rxqs)[(*flow->queues)[i]]->mark = mark; - } -free: - if (flow->drop) { - if (flow->frxq[HASH_RXQ_ETH].ibv_flow) - claim_zero(ibv_destroy_flow - (flow->frxq[HASH_RXQ_ETH].ibv_flow)); - rte_free(flow->frxq[HASH_RXQ_ETH].ibv_attr); - } else { - for (i = 0; i != hash_rxq_init_n; ++i) { - struct mlx5_flow *frxq = &flow->frxq[i]; - - if (frxq->ibv_flow) - claim_zero(ibv_destroy_flow(frxq->ibv_flow)); - if (frxq->hrxq) - mlx5_priv_hrxq_release(priv, frxq->hrxq); - if (frxq->ibv_attr) - rte_free(frxq->ibv_attr); - } - } - if (flow->cs) { - claim_zero(ibv_destroy_counter_set(flow->cs)); - flow->cs = NULL; - } + mlx5_flow_remove(dev, flow); TAILQ_REMOVE(list, flow, next); - DEBUG("Flow destroyed %p", (void *)flow); + /* + * Update RX queue flags only if port is started, otherwise it is + * already clean. + */ + if (dev->data->dev_started) + mlx5_flow_rxq_flags_trim(dev, flow); rte_free(flow); } /** * Destroy all flows. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param list * Pointer to a TAILQ flow list. */ void -priv_flow_flush(struct priv *priv, struct mlx5_flows *list) +mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list) { while (!TAILQ_EMPTY(list)) { struct rte_flow *flow; flow = TAILQ_FIRST(list); - priv_flow_destroy(priv, list, flow); - } -} - -/** - * Create drop queue. - * - * @param priv - * Pointer to private structure. - * - * @return - * 0 on success. - */ -int -priv_flow_create_drop_queue(struct priv *priv) -{ - struct mlx5_hrxq_drop *fdq = NULL; - - assert(priv->pd); - assert(priv->ctx); - fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0); - if (!fdq) { - WARN("cannot allocate memory for drop queue"); - goto error; - } - fdq->cq = ibv_create_cq(priv->ctx, 1, NULL, NULL, 0); - if (!fdq->cq) { - WARN("cannot allocate CQ for drop queue"); - goto error; - } - fdq->wq = ibv_create_wq(priv->ctx, - &(struct ibv_wq_init_attr){ - .wq_type = IBV_WQT_RQ, - .max_wr = 1, - .max_sge = 1, - .pd = priv->pd, - .cq = fdq->cq, - }); - if (!fdq->wq) { - WARN("cannot allocate WQ for drop queue"); - goto error; - } - fdq->ind_table = ibv_create_rwq_ind_table(priv->ctx, - &(struct ibv_rwq_ind_table_init_attr){ - .log_ind_tbl_size = 0, - .ind_tbl = &fdq->wq, - .comp_mask = 0, - }); - if (!fdq->ind_table) { - WARN("cannot allocate indirection table for drop queue"); - goto error; - } - fdq->qp = ibv_create_qp_ex(priv->ctx, - &(struct ibv_qp_init_attr_ex){ - .qp_type = IBV_QPT_RAW_PACKET, - .comp_mask = - IBV_QP_INIT_ATTR_PD | - IBV_QP_INIT_ATTR_IND_TABLE | - IBV_QP_INIT_ATTR_RX_HASH, - .rx_hash_conf = (struct ibv_rx_hash_conf){ - .rx_hash_function = - IBV_RX_HASH_FUNC_TOEPLITZ, - .rx_hash_key_len = rss_hash_default_key_len, - .rx_hash_key = rss_hash_default_key, - .rx_hash_fields_mask = 0, - }, - .rwq_ind_tbl = fdq->ind_table, - .pd = priv->pd - }); - if (!fdq->qp) { - WARN("cannot allocate QP for drop queue"); - goto error; + mlx5_flow_list_destroy(dev, list, flow); } - priv->flow_drop_queue = fdq; - return 0; -error: - if (fdq->qp) - claim_zero(ibv_destroy_qp(fdq->qp)); - if (fdq->ind_table) - claim_zero(ibv_destroy_rwq_ind_table(fdq->ind_table)); - if (fdq->wq) - claim_zero(ibv_destroy_wq(fdq->wq)); - if (fdq->cq) - claim_zero(ibv_destroy_cq(fdq->cq)); - if (fdq) - rte_free(fdq); - priv->flow_drop_queue = NULL; - return -1; -} - -/** - * Delete drop queue. - * - * @param priv - * Pointer to private structure. - */ -void -priv_flow_delete_drop_queue(struct priv *priv) -{ - struct mlx5_hrxq_drop *fdq = priv->flow_drop_queue; - - if (!fdq) - return; - if (fdq->qp) - claim_zero(ibv_destroy_qp(fdq->qp)); - if (fdq->ind_table) - claim_zero(ibv_destroy_rwq_ind_table(fdq->ind_table)); - if (fdq->wq) - claim_zero(ibv_destroy_wq(fdq->wq)); - if (fdq->cq) - claim_zero(ibv_destroy_cq(fdq->cq)); - rte_free(fdq); - priv->flow_drop_queue = NULL; } /** * Remove all flows. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param list * Pointer to a TAILQ flow list. */ void -priv_flow_stop(struct priv *priv, struct mlx5_flows *list) +mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list) { struct rte_flow *flow; - TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) { - unsigned int i; - - if (flow->drop) { - if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) - continue; - claim_zero(ibv_destroy_flow - (flow->frxq[HASH_RXQ_ETH].ibv_flow)); - flow->frxq[HASH_RXQ_ETH].ibv_flow = NULL; - /* Next flow. */ - continue; - } - if (flow->mark) { - struct mlx5_ind_table_ibv *ind_tbl = NULL; - - for (i = 0; i != hash_rxq_init_n; ++i) { - if (!flow->frxq[i].hrxq) - continue; - ind_tbl = flow->frxq[i].hrxq->ind_table; - } - assert(ind_tbl); - for (i = 0; i != ind_tbl->queues_n; ++i) - (*priv->rxqs)[ind_tbl->queues[i]]->mark = 0; - } - for (i = 0; i != hash_rxq_init_n; ++i) { - if (!flow->frxq[i].ibv_flow) - continue; - claim_zero(ibv_destroy_flow(flow->frxq[i].ibv_flow)); - flow->frxq[i].ibv_flow = NULL; - mlx5_priv_hrxq_release(priv, flow->frxq[i].hrxq); - flow->frxq[i].hrxq = NULL; - } - DEBUG("Flow %p removed", (void *)flow); - } + TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) + mlx5_flow_remove(dev, flow); + mlx5_flow_rxq_flags_clear(dev); } /** * Add all flows. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param list * Pointer to a TAILQ flow list. * * @return - * 0 on success, a errno value otherwise and rte_errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -priv_flow_start(struct priv *priv, struct mlx5_flows *list) +mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list) { struct rte_flow *flow; + struct rte_flow_error error; + int ret = 0; - TAILQ_FOREACH(flow, list, next) { - unsigned int i; - - if (flow->drop) { - flow->frxq[HASH_RXQ_ETH].ibv_flow = - ibv_create_flow - (priv->flow_drop_queue->qp, - flow->frxq[HASH_RXQ_ETH].ibv_attr); - if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) { - DEBUG("Flow %p cannot be applied", - (void *)flow); - rte_errno = EINVAL; - return rte_errno; - } - DEBUG("Flow %p applied", (void *)flow); - /* Next flow. */ - continue; - } - for (i = 0; i != hash_rxq_init_n; ++i) { - if (!flow->frxq[i].ibv_attr) - continue; - flow->frxq[i].hrxq = - mlx5_priv_hrxq_get(priv, flow->rss_conf.rss_key, - flow->rss_conf.rss_key_len, - hash_rxq_init[i].hash_fields, - (*flow->queues), - flow->queues_n); - if (flow->frxq[i].hrxq) - goto flow_create; - flow->frxq[i].hrxq = - mlx5_priv_hrxq_new(priv, flow->rss_conf.rss_key, - flow->rss_conf.rss_key_len, - hash_rxq_init[i].hash_fields, - (*flow->queues), - flow->queues_n); - if (!flow->frxq[i].hrxq) { - DEBUG("Flow %p cannot be applied", - (void *)flow); - rte_errno = EINVAL; - return rte_errno; - } -flow_create: - flow->frxq[i].ibv_flow = - ibv_create_flow(flow->frxq[i].hrxq->qp, - flow->frxq[i].ibv_attr); - if (!flow->frxq[i].ibv_flow) { - DEBUG("Flow %p cannot be applied", - (void *)flow); - rte_errno = EINVAL; - return rte_errno; - } - DEBUG("Flow %p applied", (void *)flow); - } - if (!flow->mark) - continue; - for (i = 0; i != flow->queues_n; ++i) - (*priv->rxqs)[(*flow->queues)[i]]->mark = 1; + TAILQ_FOREACH(flow, list, next) { + ret = mlx5_flow_apply(dev, flow, &error); + if (ret < 0) + goto error; + mlx5_flow_rxq_flags_set(dev, flow); } return 0; +error: + ret = rte_errno; /* Save rte_errno before cleanup. */ + mlx5_flow_stop(dev, list); + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; } /** * Verify the flow list is empty * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return the number of flows not released. */ int -priv_flow_verify(struct priv *priv) +mlx5_flow_verify(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct rte_flow *flow; int ret = 0; TAILQ_FOREACH(flow, &priv->flows, next) { - DEBUG("%p: flow %p still referenced", (void *)priv, - (void *)flow); + DRV_LOG(DEBUG, "port %u flow %p still referenced", + dev->data->port_id, (void *)flow); ++ret; } return ret; @@ -2334,7 +4048,7 @@ priv_flow_verify(struct priv *priv) * A VLAN flow mask to apply. * * @return - * 0 on success. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, @@ -2346,7 +4060,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, struct priv *priv = dev->data->dev_private; const struct rte_flow_attr attr = { .ingress = 1, - .priority = MLX5_CTRL_FLOW_PRIORITY, + .priority = MLX5_FLOW_PRIO_RSVD, }; struct rte_flow_item items[] = { { @@ -2357,7 +4071,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, }, { .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN : - RTE_FLOW_ITEM_TYPE_END, + RTE_FLOW_ITEM_TYPE_END, .spec = vlan_spec, .last = NULL, .mask = vlan_mask, @@ -2366,9 +4080,20 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, .type = RTE_FLOW_ITEM_TYPE_END, }, }; + uint16_t queue[priv->reta_idx_n]; + struct rte_flow_action_rss action_rss = { + .func = RTE_ETH_HASH_FUNCTION_DEFAULT, + .level = 0, + .types = priv->rss_conf.rss_hf, + .key_len = priv->rss_conf.rss_key_len, + .queue_num = priv->reta_idx_n, + .key = priv->rss_conf.rss_key, + .queue = queue, + }; struct rte_flow_action actions[] = { { .type = RTE_FLOW_ACTION_TYPE_RSS, + .conf = &action_rss, }, { .type = RTE_FLOW_ACTION_TYPE_END, @@ -2377,26 +4102,17 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, struct rte_flow *flow; struct rte_flow_error error; unsigned int i; - union { - struct rte_flow_action_rss rss; - struct { - const struct rte_eth_rss_conf *rss_conf; - uint16_t num; - uint16_t queue[RTE_MAX_QUEUES_PER_PORT]; - } local; - } action_rss; - - if (!priv->reta_idx_n) - return EINVAL; + + if (!priv->reta_idx_n) { + rte_errno = EINVAL; + return -rte_errno; + } for (i = 0; i != priv->reta_idx_n; ++i) - action_rss.local.queue[i] = (*priv->reta_idx)[i]; - action_rss.local.rss_conf = &priv->rss_conf; - action_rss.local.num = priv->reta_idx_n; - actions[0].conf = (const void *)&action_rss.rss; - flow = priv_flow_create(priv, &priv->ctrl_flows, &attr, items, actions, - &error); + queue[i] = (*priv->reta_idx)[i]; + flow = mlx5_flow_list_create(dev, &priv->ctrl_flows, &attr, items, + actions, &error); if (!flow) - return rte_errno; + return -rte_errno; return 0; } @@ -2411,7 +4127,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, * An Ethernet flow mask to apply. * * @return - * 0 on success. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_ctrl_flow(struct rte_eth_dev *dev, @@ -2430,14 +4146,11 @@ mlx5_ctrl_flow(struct rte_eth_dev *dev, int mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, - struct rte_flow_error *error) + struct rte_flow_error *error __rte_unused) { struct priv *priv = dev->data->dev_private; - (void)error; - priv_lock(priv); - priv_flow_destroy(priv, &priv->flows, flow); - priv_unlock(priv); + mlx5_flow_list_destroy(dev, &priv->flows, flow); return 0; } @@ -2449,152 +4162,161 @@ mlx5_flow_destroy(struct rte_eth_dev *dev, */ int mlx5_flow_flush(struct rte_eth_dev *dev, - struct rte_flow_error *error) + struct rte_flow_error *error __rte_unused) { struct priv *priv = dev->data->dev_private; - (void)error; - priv_lock(priv); - priv_flow_flush(priv, &priv->flows); - priv_unlock(priv); + mlx5_flow_list_flush(dev, &priv->flows); return 0; } -#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT /** - * Query flow counter. - * - * @param cs - * the counter set. - * @param counter_value - * returned data from the counter. + * Isolated mode. * - * @return - * 0 on success, a errno value otherwise and rte_errno is set. + * @see rte_flow_isolate() + * @see rte_flow_ops */ -static int -priv_flow_query_count(struct ibv_counter_set *cs, - struct mlx5_flow_counter_stats *counter_stats, - struct rte_flow_query_count *query_count, - struct rte_flow_error *error) +int +mlx5_flow_isolate(struct rte_eth_dev *dev, + int enable, + struct rte_flow_error *error) { - uint64_t counters[2]; - struct ibv_query_counter_set_attr query_cs_attr = { - .cs = cs, - .query_flags = IBV_COUNTER_SET_FORCE_UPDATE, - }; - struct ibv_counter_set_data query_out = { - .out = counters, - .outlen = 2 * sizeof(uint64_t), - }; - int res = ibv_query_counter_set(&query_cs_attr, &query_out); + struct priv *priv = dev->data->dev_private; - if (res) { - rte_flow_error_set(error, -res, + if (dev->data->dev_started) { + rte_flow_error_set(error, EBUSY, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot read counter"); - return -res; - } - query_count->hits_set = 1; - query_count->bytes_set = 1; - query_count->hits = counters[0] - counter_stats->hits; - query_count->bytes = counters[1] - counter_stats->bytes; - if (query_count->reset) { - counter_stats->hits = counters[0]; - counter_stats->bytes = counters[1]; + "port must be stopped first"); + return -rte_errno; } + priv->isolated = !!enable; + if (enable) + dev->dev_ops = &mlx5_dev_ops_isolate; + else + dev->dev_ops = &mlx5_dev_ops; return 0; } /** - * Query a flows. + * Query flow counter. * - * @see rte_flow_query() - * @see rte_flow_ops + * @param flow + * Pointer to the flow. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ -int -mlx5_flow_query(struct rte_eth_dev *dev, - struct rte_flow *flow, - enum rte_flow_action_type action __rte_unused, - void *data, - struct rte_flow_error *error) +static int +mlx5_flow_query_count(struct rte_flow *flow __rte_unused, + void *data __rte_unused, + struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; - int res = EINVAL; +#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT + if (flow->modifier & MLX5_FLOW_MOD_COUNT) { + struct rte_flow_query_count *qc = data; + uint64_t counters[2] = {0, 0}; + struct ibv_query_counter_set_attr query_cs_attr = { + .cs = flow->counter->cs, + .query_flags = IBV_COUNTER_SET_FORCE_UPDATE, + }; + struct ibv_counter_set_data query_out = { + .out = counters, + .outlen = 2 * sizeof(uint64_t), + }; + int err = mlx5_glue->query_counter_set(&query_cs_attr, + &query_out); - priv_lock(priv); - if (flow->cs) { - res = priv_flow_query_count(flow->cs, - &flow->counter_stats, - (struct rte_flow_query_count *)data, - error); - } else { - rte_flow_error_set(error, res, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "no counter found for flow"); + if (err) + return rte_flow_error_set + (error, err, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot read counter"); + qc->hits_set = 1; + qc->bytes_set = 1; + qc->hits = counters[0] - flow->counter->hits; + qc->bytes = counters[1] - flow->counter->bytes; + if (qc->reset) { + flow->counter->hits = counters[0]; + flow->counter->bytes = counters[1]; + } + return 0; } - priv_unlock(priv); - return -res; -} + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "flow does not have counter"); #endif + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "counters are not available"); +} /** - * Isolated mode. + * Query a flows. * - * @see rte_flow_isolate() + * @see rte_flow_query() * @see rte_flow_ops */ int -mlx5_flow_isolate(struct rte_eth_dev *dev, - int enable, - struct rte_flow_error *error) +mlx5_flow_query(struct rte_eth_dev *dev __rte_unused, + struct rte_flow *flow, + const struct rte_flow_action *actions, + void *data, + struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; + int ret = 0; - priv_lock(priv); - if (dev->data->dev_started) { - rte_flow_error_set(error, EBUSY, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "port must be stopped first"); - priv_unlock(priv); - return -rte_errno; + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + ret = mlx5_flow_query_count(flow, data, error); + break; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "action not supported"); + } + if (ret < 0) + return ret; } - priv->isolated = !!enable; - if (enable) - priv->dev->dev_ops = &mlx5_dev_ops_isolate; - else - priv->dev->dev_ops = &mlx5_dev_ops; - priv_unlock(priv); return 0; } /** * Convert a flow director filter to a generic flow. * - * @param priv - * Private structure. + * @param dev + * Pointer to Ethernet device. * @param fdir_filter * Flow director filter to add. * @param attributes * Generic flow parameters structure. * * @return - * 0 on success, errno value on error. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_fdir_filter_convert(struct priv *priv, +mlx5_fdir_filter_convert(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *fdir_filter, struct mlx5_fdir *attributes) { + struct priv *priv = dev->data->dev_private; const struct rte_eth_fdir_input *input = &fdir_filter->input; + const struct rte_eth_fdir_masks *mask = + &dev->data->dev_conf.fdir_conf.mask; /* Validate queue number. */ if (fdir_filter->action.rx_queue >= priv->rxqs_n) { - ERROR("invalid queue number %d", fdir_filter->action.rx_queue); - return EINVAL; + DRV_LOG(ERR, "port %u invalid queue number %d", + dev->data->port_id, fdir_filter->action.rx_queue); + rte_errno = EINVAL; + return -rte_errno; } attributes->attr.ingress = 1; attributes->items[0] = (struct rte_flow_item) { @@ -2615,134 +4337,139 @@ priv_fdir_filter_convert(struct priv *priv, }; break; default: - ERROR("invalid behavior %d", fdir_filter->action.behavior); - return ENOTSUP; + DRV_LOG(ERR, "port %u invalid behavior %d", + dev->data->port_id, + fdir_filter->action.behavior); + rte_errno = ENOTSUP; + return -rte_errno; } attributes->queue.index = fdir_filter->action.rx_queue; + /* Handle L3. */ switch (fdir_filter->input.flow_type) { case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: + case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: + case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: attributes->l3.ipv4.hdr = (struct ipv4_hdr){ - .src_addr = input->flow.udp4_flow.ip.src_ip, - .dst_addr = input->flow.udp4_flow.ip.dst_ip, - .time_to_live = input->flow.udp4_flow.ip.ttl, - .type_of_service = input->flow.udp4_flow.ip.tos, - .next_proto_id = input->flow.udp4_flow.ip.proto, + .src_addr = input->flow.ip4_flow.src_ip, + .dst_addr = input->flow.ip4_flow.dst_ip, + .time_to_live = input->flow.ip4_flow.ttl, + .type_of_service = input->flow.ip4_flow.tos, }; - attributes->l4.udp.hdr = (struct udp_hdr){ - .src_port = input->flow.udp4_flow.src_port, - .dst_port = input->flow.udp4_flow.dst_port, + attributes->l3_mask.ipv4.hdr = (struct ipv4_hdr){ + .src_addr = mask->ipv4_mask.src_ip, + .dst_addr = mask->ipv4_mask.dst_ip, + .time_to_live = mask->ipv4_mask.ttl, + .type_of_service = mask->ipv4_mask.tos, + .next_proto_id = mask->ipv4_mask.proto, }; attributes->items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_IPV4, .spec = &attributes->l3, + .mask = &attributes->l3_mask, + }; + break; + case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: + case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: + case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: + attributes->l3.ipv6.hdr = (struct ipv6_hdr){ + .hop_limits = input->flow.ipv6_flow.hop_limits, + .proto = input->flow.ipv6_flow.proto, + }; + + memcpy(attributes->l3.ipv6.hdr.src_addr, + input->flow.ipv6_flow.src_ip, + RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); + memcpy(attributes->l3.ipv6.hdr.dst_addr, + input->flow.ipv6_flow.dst_ip, + RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); + memcpy(attributes->l3_mask.ipv6.hdr.src_addr, + mask->ipv6_mask.src_ip, + RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr)); + memcpy(attributes->l3_mask.ipv6.hdr.dst_addr, + mask->ipv6_mask.dst_ip, + RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr)); + attributes->items[1] = (struct rte_flow_item){ + .type = RTE_FLOW_ITEM_TYPE_IPV6, + .spec = &attributes->l3, + .mask = &attributes->l3_mask, + }; + break; + default: + DRV_LOG(ERR, "port %u invalid flow type%d", + dev->data->port_id, fdir_filter->input.flow_type); + rte_errno = ENOTSUP; + return -rte_errno; + } + /* Handle L4. */ + switch (fdir_filter->input.flow_type) { + case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: + attributes->l4.udp.hdr = (struct udp_hdr){ + .src_port = input->flow.udp4_flow.src_port, + .dst_port = input->flow.udp4_flow.dst_port, + }; + attributes->l4_mask.udp.hdr = (struct udp_hdr){ + .src_port = mask->src_port_mask, + .dst_port = mask->dst_port_mask, }; attributes->items[2] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_UDP, .spec = &attributes->l4, + .mask = &attributes->l4_mask, }; break; case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: - attributes->l3.ipv4.hdr = (struct ipv4_hdr){ - .src_addr = input->flow.tcp4_flow.ip.src_ip, - .dst_addr = input->flow.tcp4_flow.ip.dst_ip, - .time_to_live = input->flow.tcp4_flow.ip.ttl, - .type_of_service = input->flow.tcp4_flow.ip.tos, - .next_proto_id = input->flow.tcp4_flow.ip.proto, - }; attributes->l4.tcp.hdr = (struct tcp_hdr){ .src_port = input->flow.tcp4_flow.src_port, .dst_port = input->flow.tcp4_flow.dst_port, }; - attributes->items[1] = (struct rte_flow_item){ - .type = RTE_FLOW_ITEM_TYPE_IPV4, - .spec = &attributes->l3, + attributes->l4_mask.tcp.hdr = (struct tcp_hdr){ + .src_port = mask->src_port_mask, + .dst_port = mask->dst_port_mask, }; attributes->items[2] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_TCP, .spec = &attributes->l4, - }; - break; - case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: - attributes->l3.ipv4.hdr = (struct ipv4_hdr){ - .src_addr = input->flow.ip4_flow.src_ip, - .dst_addr = input->flow.ip4_flow.dst_ip, - .time_to_live = input->flow.ip4_flow.ttl, - .type_of_service = input->flow.ip4_flow.tos, - .next_proto_id = input->flow.ip4_flow.proto, - }; - attributes->items[1] = (struct rte_flow_item){ - .type = RTE_FLOW_ITEM_TYPE_IPV4, - .spec = &attributes->l3, + .mask = &attributes->l4_mask, }; break; case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: - attributes->l3.ipv6.hdr = (struct ipv6_hdr){ - .hop_limits = input->flow.udp6_flow.ip.hop_limits, - .proto = input->flow.udp6_flow.ip.proto, - }; - memcpy(attributes->l3.ipv6.hdr.src_addr, - input->flow.udp6_flow.ip.src_ip, - RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); - memcpy(attributes->l3.ipv6.hdr.dst_addr, - input->flow.udp6_flow.ip.dst_ip, - RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); attributes->l4.udp.hdr = (struct udp_hdr){ .src_port = input->flow.udp6_flow.src_port, .dst_port = input->flow.udp6_flow.dst_port, }; - attributes->items[1] = (struct rte_flow_item){ - .type = RTE_FLOW_ITEM_TYPE_IPV6, - .spec = &attributes->l3, + attributes->l4_mask.udp.hdr = (struct udp_hdr){ + .src_port = mask->src_port_mask, + .dst_port = mask->dst_port_mask, }; attributes->items[2] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_UDP, .spec = &attributes->l4, + .mask = &attributes->l4_mask, }; break; case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: - attributes->l3.ipv6.hdr = (struct ipv6_hdr){ - .hop_limits = input->flow.tcp6_flow.ip.hop_limits, - .proto = input->flow.tcp6_flow.ip.proto, - }; - memcpy(attributes->l3.ipv6.hdr.src_addr, - input->flow.tcp6_flow.ip.src_ip, - RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); - memcpy(attributes->l3.ipv6.hdr.dst_addr, - input->flow.tcp6_flow.ip.dst_ip, - RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); attributes->l4.tcp.hdr = (struct tcp_hdr){ .src_port = input->flow.tcp6_flow.src_port, .dst_port = input->flow.tcp6_flow.dst_port, }; - attributes->items[1] = (struct rte_flow_item){ - .type = RTE_FLOW_ITEM_TYPE_IPV6, - .spec = &attributes->l3, + attributes->l4_mask.tcp.hdr = (struct tcp_hdr){ + .src_port = mask->src_port_mask, + .dst_port = mask->dst_port_mask, }; attributes->items[2] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_TCP, .spec = &attributes->l4, + .mask = &attributes->l4_mask, }; break; + case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: - attributes->l3.ipv6.hdr = (struct ipv6_hdr){ - .hop_limits = input->flow.ipv6_flow.hop_limits, - .proto = input->flow.ipv6_flow.proto, - }; - memcpy(attributes->l3.ipv6.hdr.src_addr, - input->flow.ipv6_flow.src_ip, - RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); - memcpy(attributes->l3.ipv6.hdr.dst_addr, - input->flow.ipv6_flow.dst_ip, - RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); - attributes->items[1] = (struct rte_flow_item){ - .type = RTE_FLOW_ITEM_TYPE_IPV6, - .spec = &attributes->l3, - }; break; default: - ERROR("invalid flow type%d", - fdir_filter->input.flow_type); - return ENOTSUP; + DRV_LOG(ERR, "port %u invalid flow type%d", + dev->data->port_id, fdir_filter->input.flow_type); + rte_errno = ENOTSUP; + return -rte_errno; } return 0; } @@ -2750,18 +4477,19 @@ priv_fdir_filter_convert(struct priv *priv, /** * Add new flow director filter and store it in list. * - * @param priv - * Private structure. + * @param dev + * Pointer to Ethernet device. * @param fdir_filter * Flow director filter to add. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_fdir_filter_add(struct priv *priv, +mlx5_fdir_filter_add(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *fdir_filter) { + struct priv *priv = dev->data->dev_private; struct mlx5_fdir attributes = { .attr.group = 0, .l2_mask = { @@ -2770,181 +4498,96 @@ priv_fdir_filter_add(struct priv *priv, .type = 0, }, }; - struct mlx5_flow_parse parser = { - .layer = HASH_RXQ_ETH, - }; struct rte_flow_error error; struct rte_flow *flow; int ret; - ret = priv_fdir_filter_convert(priv, fdir_filter, &attributes); + ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes); if (ret) - return -ret; - ret = priv_flow_convert(priv, &attributes.attr, attributes.items, - attributes.actions, &error, &parser); - if (ret) - return -ret; - flow = priv_flow_create(priv, - &priv->flows, - &attributes.attr, - attributes.items, - attributes.actions, - &error); + return ret; + flow = mlx5_flow_list_create(dev, &priv->flows, &attributes.attr, + attributes.items, attributes.actions, + &error); if (flow) { - DEBUG("FDIR created %p", (void *)flow); + DRV_LOG(DEBUG, "port %u FDIR created %p", dev->data->port_id, + (void *)flow); return 0; } - return ENOTSUP; + return -rte_errno; } /** * Delete specific filter. * - * @param priv - * Private structure. + * @param dev + * Pointer to Ethernet device. * @param fdir_filter * Filter to be deleted. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_fdir_filter_delete(struct priv *priv, - const struct rte_eth_fdir_filter *fdir_filter) +mlx5_fdir_filter_delete(struct rte_eth_dev *dev __rte_unused, + const struct rte_eth_fdir_filter *fdir_filter + __rte_unused) { - struct mlx5_fdir attributes = { - .attr.group = 0, - }; - struct mlx5_flow_parse parser = { - .create = 1, - .layer = HASH_RXQ_ETH, - }; - struct rte_flow_error error; - struct rte_flow *flow; - unsigned int i; - int ret; - - ret = priv_fdir_filter_convert(priv, fdir_filter, &attributes); - if (ret) - return -ret; - ret = priv_flow_convert(priv, &attributes.attr, attributes.items, - attributes.actions, &error, &parser); - if (ret) - goto exit; - /* - * Special case for drop action which is only set in the - * specifications when the flow is created. In this situation the - * drop specification is missing. - */ - if (parser.drop) { - struct ibv_flow_spec_action_drop *drop; - - drop = (void *)((uintptr_t)parser.queue[HASH_RXQ_ETH].ibv_attr + - parser.queue[HASH_RXQ_ETH].offset); - *drop = (struct ibv_flow_spec_action_drop){ - .type = IBV_FLOW_SPEC_ACTION_DROP, - .size = sizeof(struct ibv_flow_spec_action_drop), - }; - parser.queue[HASH_RXQ_ETH].ibv_attr->num_of_specs++; - } - TAILQ_FOREACH(flow, &priv->flows, next) { - struct ibv_flow_attr *attr; - struct ibv_spec_header *attr_h; - void *spec; - struct ibv_flow_attr *flow_attr; - struct ibv_spec_header *flow_h; - void *flow_spec; - unsigned int specs_n; - - attr = parser.queue[HASH_RXQ_ETH].ibv_attr; - flow_attr = flow->frxq[HASH_RXQ_ETH].ibv_attr; - /* Compare first the attributes. */ - if (memcmp(attr, flow_attr, sizeof(struct ibv_flow_attr))) - continue; - if (attr->num_of_specs == 0) - continue; - spec = (void *)((uintptr_t)attr + - sizeof(struct ibv_flow_attr)); - flow_spec = (void *)((uintptr_t)flow_attr + - sizeof(struct ibv_flow_attr)); - specs_n = RTE_MIN(attr->num_of_specs, flow_attr->num_of_specs); - for (i = 0; i != specs_n; ++i) { - attr_h = spec; - flow_h = flow_spec; - if (memcmp(spec, flow_spec, - RTE_MIN(attr_h->size, flow_h->size))) - goto wrong_flow; - spec = (void *)((uintptr_t)spec + attr_h->size); - flow_spec = (void *)((uintptr_t)flow_spec + - flow_h->size); - } - /* At this point, the flow match. */ - break; -wrong_flow: - /* The flow does not match. */ - continue; - } - if (flow) - priv_flow_destroy(priv, &priv->flows, flow); -exit: - for (i = 0; i != hash_rxq_init_n; ++i) { - if (parser.queue[i].ibv_attr) - rte_free(parser.queue[i].ibv_attr); - } - return -ret; + rte_errno = ENOTSUP; + return -rte_errno; } /** * Update queue for specific filter. * - * @param priv - * Private structure. + * @param dev + * Pointer to Ethernet device. * @param fdir_filter * Filter to be updated. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_fdir_filter_update(struct priv *priv, +mlx5_fdir_filter_update(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *fdir_filter) { int ret; - ret = priv_fdir_filter_delete(priv, fdir_filter); + ret = mlx5_fdir_filter_delete(dev, fdir_filter); if (ret) return ret; - ret = priv_fdir_filter_add(priv, fdir_filter); - return ret; + return mlx5_fdir_filter_add(dev, fdir_filter); } /** * Flush all filters. * - * @param priv - * Private structure. + * @param dev + * Pointer to Ethernet device. */ static void -priv_fdir_filter_flush(struct priv *priv) +mlx5_fdir_filter_flush(struct rte_eth_dev *dev) { - priv_flow_flush(priv, &priv->flows); + struct priv *priv = dev->data->dev_private; + + mlx5_flow_list_flush(dev, &priv->flows); } /** * Get flow director information. * - * @param priv - * Private structure. + * @param dev + * Pointer to Ethernet device. * @param[out] fdir_info * Resulting flow director information. */ static void -priv_fdir_info_get(struct priv *priv, struct rte_eth_fdir_info *fdir_info) +mlx5_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info) { struct rte_eth_fdir_masks *mask = - &priv->dev->data->dev_conf.fdir_conf.mask; + &dev->data->dev_conf.fdir_conf.mask; - fdir_info->mode = priv->dev->data->dev_conf.fdir_conf.mode; + fdir_info->mode = dev->data->dev_conf.fdir_conf.mode; fdir_info->guarant_spc = 0; rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask)); fdir_info->max_flexpayload = 0; @@ -2958,54 +4601,52 @@ priv_fdir_info_get(struct priv *priv, struct rte_eth_fdir_info *fdir_info) /** * Deal with flow director operations. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param filter_op * Operation to perform. * @param arg * Pointer to operation-specific structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg) +mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, + void *arg) { enum rte_fdir_mode fdir_mode = - priv->dev->data->dev_conf.fdir_conf.mode; - int ret = 0; + dev->data->dev_conf.fdir_conf.mode; if (filter_op == RTE_ETH_FILTER_NOP) return 0; if (fdir_mode != RTE_FDIR_MODE_PERFECT && fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { - ERROR("%p: flow director mode %d not supported", - (void *)priv, fdir_mode); - return EINVAL; + DRV_LOG(ERR, "port %u flow director mode %d not supported", + dev->data->port_id, fdir_mode); + rte_errno = EINVAL; + return -rte_errno; } switch (filter_op) { case RTE_ETH_FILTER_ADD: - ret = priv_fdir_filter_add(priv, arg); - break; + return mlx5_fdir_filter_add(dev, arg); case RTE_ETH_FILTER_UPDATE: - ret = priv_fdir_filter_update(priv, arg); - break; + return mlx5_fdir_filter_update(dev, arg); case RTE_ETH_FILTER_DELETE: - ret = priv_fdir_filter_delete(priv, arg); - break; + return mlx5_fdir_filter_delete(dev, arg); case RTE_ETH_FILTER_FLUSH: - priv_fdir_filter_flush(priv); + mlx5_fdir_filter_flush(dev); break; case RTE_ETH_FILTER_INFO: - priv_fdir_info_get(priv, arg); + mlx5_fdir_info_get(dev, arg); break; default: - DEBUG("%p: unknown operation %u", (void *)priv, - filter_op); - ret = EINVAL; - break; + DRV_LOG(DEBUG, "port %u unknown operation %u", + dev->data->port_id, filter_op); + rte_errno = EINVAL; + return -rte_errno; } - return ret; + return 0; } /** @@ -3021,7 +4662,7 @@ priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg) * Pointer to operation-specific structure. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, @@ -3029,24 +4670,21 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_op filter_op, void *arg) { - int ret = EINVAL; - struct priv *priv = dev->data->dev_private; - switch (filter_type) { case RTE_ETH_FILTER_GENERIC: - if (filter_op != RTE_ETH_FILTER_GET) - return -EINVAL; + if (filter_op != RTE_ETH_FILTER_GET) { + rte_errno = EINVAL; + return -rte_errno; + } *(const void **)arg = &mlx5_flow_ops; return 0; case RTE_ETH_FILTER_FDIR: - priv_lock(priv); - ret = priv_fdir_ctrl_func(priv, filter_op, arg); - priv_unlock(priv); - break; + return mlx5_fdir_ctrl_func(dev, filter_op, arg); default: - ERROR("%p: filter type (%d) not supported", - (void *)dev, filter_type); - break; + DRV_LOG(ERR, "port %u filter type (%d) not supported", + dev->data->port_id, filter_type); + rte_errno = ENOTSUP; + return -rte_errno; } - return -ret; + return 0; }