X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow.c;h=d4d956f2ec01cd8b7b4e8a502805b0d078cd00ce;hb=2e542da709371ee51d61d74c9a1b357ad34ae13e;hp=25818534729a3cb35dbd76cf2f428e94c39152cf;hpb=77182481c5599c56c9e8be93056484eb8a6cf294;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 2581853472..d4d956f2ec 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -3,7 +3,9 @@ * Copyright 2016 Mellanox Technologies, Ltd */ +#include #include +#include #include #include @@ -19,7 +21,6 @@ #include #include -#include #include #include #include @@ -28,75 +29,40 @@ #include "mlx5.h" #include "mlx5_defs.h" -#include "mlx5_prm.h" +#include "mlx5_flow.h" #include "mlx5_glue.h" +#include "mlx5_prm.h" +#include "mlx5_rxtx.h" /* Dev ops structure defined in mlx5.c */ extern const struct eth_dev_ops mlx5_dev_ops; extern const struct eth_dev_ops mlx5_dev_ops_isolate; -/* Pattern outer Layer bits. */ -#define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0) -#define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1) -#define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2) -#define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3) -#define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4) -#define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5) - -/* Pattern inner Layer bits. */ -#define MLX5_FLOW_LAYER_INNER_L2 (1u << 6) -#define MLX5_FLOW_LAYER_INNER_L3_IPV4 (1u << 7) -#define MLX5_FLOW_LAYER_INNER_L3_IPV6 (1u << 8) -#define MLX5_FLOW_LAYER_INNER_L4_UDP (1u << 9) -#define MLX5_FLOW_LAYER_INNER_L4_TCP (1u << 10) -#define MLX5_FLOW_LAYER_INNER_VLAN (1u << 11) - -/* Pattern tunnel Layer bits. */ -#define MLX5_FLOW_LAYER_VXLAN (1u << 12) -#define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13) - -/* Outer Masks. */ -#define MLX5_FLOW_LAYER_OUTER_L3 \ - (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6) -#define MLX5_FLOW_LAYER_OUTER_L4 \ - (MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP) -#define MLX5_FLOW_LAYER_OUTER \ - (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \ - MLX5_FLOW_LAYER_OUTER_L4) - -/* Tunnel Masks. */ -#define MLX5_FLOW_LAYER_TUNNEL \ - (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE) - -/* Inner Masks. */ -#define MLX5_FLOW_LAYER_INNER_L3 \ - (MLX5_FLOW_LAYER_INNER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV6) -#define MLX5_FLOW_LAYER_INNER_L4 \ - (MLX5_FLOW_LAYER_INNER_L4_UDP | MLX5_FLOW_LAYER_INNER_L4_TCP) -#define MLX5_FLOW_LAYER_INNER \ - (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3 | \ - MLX5_FLOW_LAYER_INNER_L4) - -/* Actions that modify the fate of matching traffic. */ -#define MLX5_FLOW_FATE_DROP (1u << 0) -#define MLX5_FLOW_FATE_QUEUE (1u << 1) -#define MLX5_FLOW_FATE_RSS (1u << 2) - -/* Modify a packet. */ -#define MLX5_FLOW_MOD_FLAG (1u << 0) -#define MLX5_FLOW_MOD_MARK (1u << 1) - -/* possible L3 layers protocols filtering. */ -#define MLX5_IP_PROTOCOL_TCP 6 -#define MLX5_IP_PROTOCOL_UDP 17 - -/* Priority reserved for default flows. */ -#define MLX5_FLOW_PRIO_RSVD ((uint32_t)-1) +/** Device flow drivers. */ +#ifdef HAVE_IBV_FLOW_DV_SUPPORT +extern const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops; +#endif +extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops; + +const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops; + +const struct mlx5_flow_driver_ops *flow_drv_ops[] = { + [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops, +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops, +#endif + [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops, + [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops +}; enum mlx5_expansion { MLX5_EXPANSION_ROOT, MLX5_EXPANSION_ROOT_OUTER, + MLX5_EXPANSION_ROOT_ETH_VLAN, + MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN, MLX5_EXPANSION_OUTER_ETH, + MLX5_EXPANSION_OUTER_ETH_VLAN, + MLX5_EXPANSION_OUTER_VLAN, MLX5_EXPANSION_OUTER_IPV4, MLX5_EXPANSION_OUTER_IPV4_UDP, MLX5_EXPANSION_OUTER_IPV4_TCP, @@ -105,7 +71,11 @@ enum mlx5_expansion { MLX5_EXPANSION_OUTER_IPV6_TCP, MLX5_EXPANSION_VXLAN, MLX5_EXPANSION_VXLAN_GPE, + MLX5_EXPANSION_GRE, + MLX5_EXPANSION_MPLS, MLX5_EXPANSION_ETH, + MLX5_EXPANSION_ETH_VLAN, + MLX5_EXPANSION_VLAN, MLX5_EXPANSION_IPV4, MLX5_EXPANSION_IPV4_UDP, MLX5_EXPANSION_IPV4_TCP, @@ -128,16 +98,38 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = { MLX5_EXPANSION_OUTER_IPV6), .type = RTE_FLOW_ITEM_TYPE_END, }, + [MLX5_EXPANSION_ROOT_ETH_VLAN] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN), + .type = RTE_FLOW_ITEM_TYPE_END, + }, + [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH_VLAN), + .type = RTE_FLOW_ITEM_TYPE_END, + }, [MLX5_EXPANSION_OUTER_ETH] = { .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, - MLX5_EXPANSION_OUTER_IPV6), + MLX5_EXPANSION_OUTER_IPV6, + MLX5_EXPANSION_MPLS), .type = RTE_FLOW_ITEM_TYPE_ETH, .rss_types = 0, }, + [MLX5_EXPANSION_OUTER_ETH_VLAN] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN), + .type = RTE_FLOW_ITEM_TYPE_ETH, + .rss_types = 0, + }, + [MLX5_EXPANSION_OUTER_VLAN] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, + MLX5_EXPANSION_OUTER_IPV6), + .type = RTE_FLOW_ITEM_TYPE_VLAN, + }, [MLX5_EXPANSION_OUTER_IPV4] = { .next = RTE_FLOW_EXPAND_RSS_NEXT (MLX5_EXPANSION_OUTER_IPV4_UDP, - MLX5_EXPANSION_OUTER_IPV4_TCP), + MLX5_EXPANSION_OUTER_IPV4_TCP, + MLX5_EXPANSION_GRE, + MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), .type = RTE_FLOW_ITEM_TYPE_IPV4, .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER, @@ -155,7 +147,9 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = { [MLX5_EXPANSION_OUTER_IPV6] = { .next = RTE_FLOW_EXPAND_RSS_NEXT (MLX5_EXPANSION_OUTER_IPV6_UDP, - MLX5_EXPANSION_OUTER_IPV6_TCP), + MLX5_EXPANSION_OUTER_IPV6_TCP, + MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), .type = RTE_FLOW_ITEM_TYPE_IPV6, .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER, @@ -180,11 +174,29 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = { MLX5_EXPANSION_IPV6), .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE, }, + [MLX5_EXPANSION_GRE] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4), + .type = RTE_FLOW_ITEM_TYPE_GRE, + }, + [MLX5_EXPANSION_MPLS] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), + .type = RTE_FLOW_ITEM_TYPE_MPLS, + }, [MLX5_EXPANSION_ETH] = { .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, MLX5_EXPANSION_IPV6), .type = RTE_FLOW_ITEM_TYPE_ETH, }, + [MLX5_EXPANSION_ETH_VLAN] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN), + .type = RTE_FLOW_ITEM_TYPE_ETH, + }, + [MLX5_EXPANSION_VLAN] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), + .type = RTE_FLOW_ITEM_TYPE_VLAN, + }, [MLX5_EXPANSION_IPV4] = { .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP, MLX5_EXPANSION_IPV4_TCP), @@ -217,52 +229,18 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = { }, }; -/** Handles information leading to a drop fate. */ -struct mlx5_flow_verbs { - LIST_ENTRY(mlx5_flow_verbs) next; - unsigned int size; /**< Size of the attribute. */ - struct { - struct ibv_flow_attr *attr; - /**< Pointer to the Specification buffer. */ - uint8_t *specs; /**< Pointer to the specifications. */ - }; - struct ibv_flow *flow; /**< Verbs flow pointer. */ - struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */ - uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */ -}; - -/* Flow structure. */ -struct rte_flow { - TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */ - struct rte_flow_attr attributes; /**< User flow attribute. */ - uint32_t l3_protocol_en:1; /**< Protocol filtering requested. */ - uint32_t layers; - /**< Bit-fields of present layers see MLX5_FLOW_LAYER_*. */ - uint32_t modifier; - /**< Bit-fields of present modifier see MLX5_FLOW_MOD_*. */ - uint32_t fate; - /**< Bit-fields of present fate see MLX5_FLOW_FATE_*. */ - uint8_t l3_protocol; /**< valid when l3_protocol_en is set. */ - LIST_HEAD(verbs, mlx5_flow_verbs) verbs; /**< Verbs flows list. */ - struct mlx5_flow_verbs *cur_verbs; - /**< Current Verbs flow structure being filled. */ - struct rte_flow_action_rss rss;/**< RSS context. */ - uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */ - uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */ -}; - static const struct rte_flow_ops mlx5_flow_ops = { .validate = mlx5_flow_validate, .create = mlx5_flow_create, .destroy = mlx5_flow_destroy, .flush = mlx5_flow_flush, .isolate = mlx5_flow_isolate, + .query = mlx5_flow_query, }; /* Convert FDIR request to Generic flow. */ struct mlx5_fdir { struct rte_flow_attr attr; - struct rte_flow_action actions[2]; struct rte_flow_item items[4]; struct rte_flow_item_eth l2; struct rte_flow_item_eth l2_mask; @@ -282,26 +260,10 @@ struct mlx5_fdir { struct rte_flow_item_udp udp; struct rte_flow_item_tcp tcp; } l4_mask; + struct rte_flow_action actions[2]; struct rte_flow_action_queue queue; }; -/* Verbs specification header. */ -struct ibv_spec_header { - enum ibv_flow_spec_type type; - uint16_t size; -}; - -/* - * Number of sub priorities. - * For each kind of pattern matching i.e. L2, L3, L4 to have a correct - * matching on the NIC (firmware dependent) L4 most have the higher priority - * followed by L3 and ending with L2. - */ -#define MLX5_PRIORITY_MAP_L2 2 -#define MLX5_PRIORITY_MAP_L3 1 -#define MLX5_PRIORITY_MAP_L4 0 -#define MLX5_PRIORITY_MAP_MAX 3 - /* Map of Verbs to Flow priority with 8 Verbs priorities. */ static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = { { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 }, @@ -315,7 +277,7 @@ static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = { /* Tunnel information. */ struct mlx5_flow_tunnel_info { - uint32_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */ + uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */ uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */ }; @@ -328,13 +290,37 @@ static struct mlx5_flow_tunnel_info tunnels_info[] = { .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE, .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP, }, + { + .tunnel = MLX5_FLOW_LAYER_GRE, + .ptype = RTE_PTYPE_TUNNEL_GRE, + }, + { + .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP, + .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP, + }, + { + .tunnel = MLX5_FLOW_LAYER_MPLS, + .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE, + }, + { + .tunnel = MLX5_FLOW_LAYER_NVGRE, + .ptype = RTE_PTYPE_TUNNEL_NVGRE, + }, + { + .tunnel = MLX5_FLOW_LAYER_IPIP, + .ptype = RTE_PTYPE_TUNNEL_IP, + }, + { + .tunnel = MLX5_FLOW_LAYER_IPV6_ENCAP, + .ptype = RTE_PTYPE_TUNNEL_IP, + }, }; /** * Discover the maximum number of priority available. * * @param[in] dev - * Pointer to Ethernet device. + * Pointer to the Ethernet device structure. * * @return * number of supported flow priority on success, a negative errno @@ -343,6 +329,7 @@ static struct mlx5_flow_tunnel_info tunnels_info[] = { int mlx5_flow_discover_priorities(struct rte_eth_dev *dev) { + struct mlx5_priv *priv = dev->data->dev_private; struct { struct ibv_flow_attr attr; struct ibv_flow_spec_eth eth; @@ -350,6 +337,7 @@ mlx5_flow_discover_priorities(struct rte_eth_dev *dev) } flow_attr = { .attr = { .num_of_specs = 2, + .port = (uint8_t)priv->ibv_port, }, .eth = { .type = IBV_FLOW_SPEC_ETH, @@ -378,6 +366,7 @@ mlx5_flow_discover_priorities(struct rte_eth_dev *dev) claim_zero(mlx5_glue->destroy_flow(flow)); priority = vprio[i]; } + mlx5_hrxq_drop_release(dev); switch (priority) { case 8: priority = RTE_DIM(priority_map_3); @@ -389,96 +378,42 @@ mlx5_flow_discover_priorities(struct rte_eth_dev *dev) rte_errno = ENOTSUP; DRV_LOG(ERR, "port %u verbs maximum priority: %d expected 8/16", - dev->data->port_id, vprio[i]); + dev->data->port_id, priority); return -rte_errno; } - mlx5_hrxq_drop_release(dev); DRV_LOG(INFO, "port %u flow maximum priority: %d", dev->data->port_id, priority); return priority; } /** - * Adjust flow priority. + * Adjust flow priority based on the highest layer and the request priority. * - * @param dev - * Pointer to Ethernet device. - * @param flow - * Pointer to an rte flow. + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] priority + * The rule base priority. + * @param[in] subpriority + * The priority based on the items. + * + * @return + * The new priority. */ -static void -mlx5_flow_adjust_priority(struct rte_eth_dev *dev, struct rte_flow *flow) +uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, + uint32_t subpriority) { - struct priv *priv = dev->data->dev_private; - uint32_t priority = flow->attributes.priority; - uint32_t subpriority = flow->cur_verbs->attr->priority; + uint32_t res = 0; + struct mlx5_priv *priv = dev->data->dev_private; switch (priv->config.flow_prio) { case RTE_DIM(priority_map_3): - priority = priority_map_3[priority][subpriority]; + res = priority_map_3[priority][subpriority]; break; case RTE_DIM(priority_map_5): - priority = priority_map_5[priority][subpriority]; + res = priority_map_5[priority][subpriority]; break; } - flow->cur_verbs->attr->priority = priority; -} - -/** - * Verify the @p attributes will be correctly understood by the NIC and store - * them in the @p flow if everything is correct. - * - * @param[in] dev - * Pointer to Ethernet device. - * @param[in] attributes - * Pointer to flow attributes - * @param[in, out] flow - * Pointer to the rte_flow structure. - * @param[out] error - * Pointer to error structure. - * - * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. - */ -static int -mlx5_flow_attributes(struct rte_eth_dev *dev, - const struct rte_flow_attr *attributes, - struct rte_flow *flow, - struct rte_flow_error *error) -{ - uint32_t priority_max = - ((struct priv *)dev->data->dev_private)->config.flow_prio - 1; - - if (attributes->group) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ATTR_GROUP, - NULL, - "groups is not supported"); - if (attributes->priority != MLX5_FLOW_PRIO_RSVD && - attributes->priority >= priority_max) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, - NULL, - "priority out of range"); - if (attributes->egress) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, - NULL, - "egress is not supported"); - if (attributes->transfer) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, - NULL, - "transfer is not supported"); - if (!attributes->ingress) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, - NULL, - "ingress attribute is mandatory"); - flow->attributes = *attributes; - if (attributes->priority == MLX5_FLOW_PRIO_RSVD) - flow->attributes.priority = priority_max; - return 0; + return res; } /** @@ -499,7 +434,7 @@ mlx5_flow_attributes(struct rte_eth_dev *dev, * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ -static int +int mlx5_flow_item_acceptable(const struct rte_flow_item *item, const uint8_t *mask, const uint8_t *nic_mask, @@ -518,8 +453,7 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item, " bits"); if (!item->spec && (item->mask || item->last)) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, + RTE_FLOW_ERROR_TYPE_ITEM, item, "mask/last without a spec is not" " supported"); if (item->spec && item->last) { @@ -534,408 +468,985 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item, } ret = memcmp(spec, last, size); if (ret != 0) - return rte_flow_error_set(error, ENOTSUP, + return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, - "range is not supported"); + "range is not valid"); } return 0; } /** - * Add a verbs item specification into @p flow. - * - * @param[in, out] flow - * Pointer to flow structure. - * @param[in] src - * Create specification. - * @param[in] size - * Size in bytes of the specification to copy. - */ -static void -mlx5_flow_spec_verbs_add(struct rte_flow *flow, void *src, unsigned int size) -{ - struct mlx5_flow_verbs *verbs = flow->cur_verbs; - - if (verbs->specs) { - void *dst; - - dst = (void *)(verbs->specs + verbs->size); - memcpy(dst, src, size); - ++verbs->attr->num_of_specs; - } - verbs->size += size; -} - -/** - * Adjust verbs hash fields according to the @p flow information. + * Adjust the hash fields according to the @p flow information. * - * @param[in, out] flow. - * Pointer to flow structure. + * @param[in] dev_flow. + * Pointer to the mlx5_flow. * @param[in] tunnel * 1 when the hash field is for a tunnel item. * @param[in] layer_types * ETH_RSS_* types. * @param[in] hash_fields * Item hash fields. + * + * @return + * The hash fields that should be used. */ -static void -mlx5_flow_verbs_hashfields_adjust(struct rte_flow *flow, - int tunnel __rte_unused, - uint32_t layer_types, uint64_t hash_fields) +uint64_t +mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow, + int tunnel __rte_unused, uint64_t layer_types, + uint64_t hash_fields) { + struct rte_flow *flow = dev_flow->flow; #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT - hash_fields |= (tunnel ? IBV_RX_HASH_INNER : 0); - if (flow->rss.level == 2 && !tunnel) - hash_fields = 0; - else if (flow->rss.level < 2 && tunnel) - hash_fields = 0; + int rss_request_inner = flow->rss.level >= 2; + + /* Check RSS hash level for tunnel. */ + if (tunnel && rss_request_inner) + hash_fields |= IBV_RX_HASH_INNER; + else if (tunnel || rss_request_inner) + return 0; #endif + /* Check if requested layer matches RSS hash fields. */ if (!(flow->rss.types & layer_types)) - hash_fields = 0; - flow->cur_verbs->hash_fields |= hash_fields; + return 0; + return hash_fields; } /** - * Convert the @p item into a Verbs specification after ensuring the NIC - * will understand and process it correctly. - * If the necessary size for the conversion is greater than the @p flow_size, - * nothing is written in @p flow, the validation is still performed. - * - * @param[in] item - * Item specification. - * @param[in, out] flow - * Pointer to flow structure. - * @param[in] flow_size - * Size in bytes of the available space in @p flow, if too small, nothing is - * written. - * @param[out] error - * Pointer to error structure. + * Lookup and set the ptype in the data Rx part. A single Ptype can be used, + * if several tunnel rules are used on this queue, the tunnel ptype will be + * cleared. * - * @return - * On success the number of bytes consumed/necessary, if the returned value - * is lesser or equal to @p flow_size, the @p item has fully been converted, - * otherwise another call with this returned memory size should be done. - * On error, a negative errno value is returned and rte_errno is set. + * @param rxq_ctrl + * Rx queue to update. */ -static int -mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow, - const size_t flow_size, struct rte_flow_error *error) +static void +flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) { - const struct rte_flow_item_eth *spec = item->spec; - const struct rte_flow_item_eth *mask = item->mask; - const struct rte_flow_item_eth nic_mask = { - .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", - .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", - .type = RTE_BE16(0xffff), - }; - const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); - const unsigned int size = sizeof(struct ibv_flow_spec_eth); - struct ibv_flow_spec_eth eth = { - .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0), - .size = size, - }; - int ret; - - if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L2 : - MLX5_FLOW_LAYER_OUTER_L2)) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "L2 layers already configured"); - if (!mask) - mask = &rte_flow_item_eth_mask; - ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, - (const uint8_t *)&nic_mask, - sizeof(struct rte_flow_item_eth), - error); - if (ret) - return ret; - flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 : - MLX5_FLOW_LAYER_OUTER_L2; - if (size > flow_size) - return size; - if (spec) { - unsigned int i; + unsigned int i; + uint32_t tunnel_ptype = 0; - memcpy(ð.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN); - memcpy(ð.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN); - eth.val.ether_type = spec->type; - memcpy(ð.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN); - memcpy(ð.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN); - eth.mask.ether_type = mask->type; - /* Remove unwanted bits from values. */ - for (i = 0; i < ETHER_ADDR_LEN; ++i) { - eth.val.dst_mac[i] &= eth.mask.dst_mac[i]; - eth.val.src_mac[i] &= eth.mask.src_mac[i]; + /* Look up for the ptype to use. */ + for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) { + if (!rxq_ctrl->flow_tunnels_n[i]) + continue; + if (!tunnel_ptype) { + tunnel_ptype = tunnels_info[i].ptype; + } else { + tunnel_ptype = 0; + break; } - eth.val.ether_type &= eth.mask.ether_type; } - flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2; - mlx5_flow_spec_verbs_add(flow, ð, size); - return size; + rxq_ctrl->rxq.tunnel = tunnel_ptype; } /** - * Update the VLAN tag in the Verbs Ethernet specification. + * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive + * flow. * - * @param[in, out] attr - * Pointer to Verbs attributes structure. - * @param[in] eth - * Verbs structure containing the VLAN information to copy. + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] dev_flow + * Pointer to device flow structure. */ static void -mlx5_flow_item_vlan_update(struct ibv_flow_attr *attr, - struct ibv_flow_spec_eth *eth) +flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) { + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_flow *flow = dev_flow->flow; + const int mark = !!(flow->actions & + (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); + const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); unsigned int i; - const enum ibv_flow_spec_type search = eth->type; - struct ibv_spec_header *hdr = (struct ibv_spec_header *) - ((uint8_t *)attr + sizeof(struct ibv_flow_attr)); - - for (i = 0; i != attr->num_of_specs; ++i) { - if (hdr->type == search) { - struct ibv_flow_spec_eth *e = - (struct ibv_flow_spec_eth *)hdr; - - e->val.vlan_tag = eth->val.vlan_tag; - e->mask.vlan_tag = eth->mask.vlan_tag; - e->val.ether_type = eth->val.ether_type; - e->mask.ether_type = eth->mask.ether_type; - break; + + for (i = 0; i != flow->rss.queue_num; ++i) { + int idx = (*flow->queue)[i]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of((*priv->rxqs)[idx], + struct mlx5_rxq_ctrl, rxq); + + if (mark) { + rxq_ctrl->rxq.mark = 1; + rxq_ctrl->flow_mark_n++; + } + if (tunnel) { + unsigned int j; + + /* Increase the counter matching the flow. */ + for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { + if ((tunnels_info[j].tunnel & + dev_flow->layers) == + tunnels_info[j].tunnel) { + rxq_ctrl->flow_tunnels_n[j]++; + break; + } + } + flow_rxq_tunnel_ptype_update(rxq_ctrl); } - hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size); } } /** - * Convert the @p item into @p flow (or by updating the already present - * Ethernet Verbs) specification after ensuring the NIC will understand and - * process it correctly. - * If the necessary size for the conversion is greater than the @p flow_size, - * nothing is written in @p flow, the validation is still performed. + * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow * - * @param[in] item - * Item specification. - * @param[in, out] flow + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] flow * Pointer to flow structure. - * @param[in] flow_size - * Size in bytes of the available space in @p flow, if too small, nothing is - * written. - * @param[out] error - * Pointer to error structure. - * - * @return - * On success the number of bytes consumed/necessary, if the returned value - * is lesser or equal to @p flow_size, the @p item has fully been converted, - * otherwise another call with this returned memory size should be done. - * On error, a negative errno value is returned and rte_errno is set. */ -static int -mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow, - const size_t flow_size, struct rte_flow_error *error) +static void +flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) { - const struct rte_flow_item_vlan *spec = item->spec; - const struct rte_flow_item_vlan *mask = item->mask; - const struct rte_flow_item_vlan nic_mask = { - .tci = RTE_BE16(0x0fff), - .inner_type = RTE_BE16(0xffff), - }; - unsigned int size = sizeof(struct ibv_flow_spec_eth); - const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); - struct ibv_flow_spec_eth eth = { - .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0), - .size = size, - }; - int ret; - const uint32_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 | - MLX5_FLOW_LAYER_INNER_L4) : - (MLX5_FLOW_LAYER_OUTER_L3 | MLX5_FLOW_LAYER_OUTER_L4); - const uint32_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : - MLX5_FLOW_LAYER_OUTER_VLAN; - const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : - MLX5_FLOW_LAYER_OUTER_L2; + struct mlx5_flow *dev_flow; - if (flow->layers & vlanm) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "VLAN layer already configured"); - else if ((flow->layers & l34m) != 0) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "L2 layer cannot follow L3/L4 layer"); - if (!mask) - mask = &rte_flow_item_vlan_mask; - ret = mlx5_flow_item_acceptable - (item, (const uint8_t *)mask, - (const uint8_t *)&nic_mask, - sizeof(struct rte_flow_item_vlan), error); - if (ret) - return ret; - if (spec) { - eth.val.vlan_tag = spec->tci; - eth.mask.vlan_tag = mask->tci; - eth.val.vlan_tag &= eth.mask.vlan_tag; - eth.val.ether_type = spec->inner_type; - eth.mask.ether_type = mask->inner_type; - eth.val.ether_type &= eth.mask.ether_type; - } - /* - * From verbs perspective an empty VLAN is equivalent - * to a packet without VLAN layer. - */ - if (!eth.mask.vlan_tag) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM_SPEC, - item->spec, - "VLAN cannot be empty"); - if (!(flow->layers & l2m)) { - if (size <= flow_size) { - flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2; - mlx5_flow_spec_verbs_add(flow, ð, size); - } - } else { - if (flow->cur_verbs) - mlx5_flow_item_vlan_update(flow->cur_verbs->attr, - ð); - size = 0; /* Only an update is done in eth specification. */ - } - flow->layers |= tunnel ? - (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_VLAN) : - (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_VLAN); - return size; + LIST_FOREACH(dev_flow, &flow->dev_flows, next) + flow_drv_rxq_flags_set(dev, dev_flow); } /** - * Convert the @p item into a Verbs specification after ensuring the NIC - * will understand and process it correctly. - * If the necessary size for the conversion is greater than the @p flow_size, - * nothing is written in @p flow, the validation is still performed. - * - * @param[in] item - * Item specification. - * @param[in, out] flow - * Pointer to flow structure. - * @param[in] flow_size - * Size in bytes of the available space in @p flow, if too small, nothing is - * written. - * @param[out] error - * Pointer to error structure. + * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the + * device flow if no other flow uses it with the same kind of request. * - * @return - * On success the number of bytes consumed/necessary, if the returned value - * is lesser or equal to @p flow_size, the @p item has fully been converted, - * otherwise another call with this returned memory size should be done. - * On error, a negative errno value is returned and rte_errno is set. + * @param dev + * Pointer to Ethernet device. + * @param[in] dev_flow + * Pointer to the device flow. */ -static int -mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow, - const size_t flow_size, struct rte_flow_error *error) +static void +flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) { - const struct rte_flow_item_ipv4 *spec = item->spec; - const struct rte_flow_item_ipv4 *mask = item->mask; - const struct rte_flow_item_ipv4 nic_mask = { - .hdr = { - .src_addr = RTE_BE32(0xffffffff), - .dst_addr = RTE_BE32(0xffffffff), + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_flow *flow = dev_flow->flow; + const int mark = !!(flow->actions & + (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); + const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); + unsigned int i; + + assert(dev->data->dev_started); + for (i = 0; i != flow->rss.queue_num; ++i) { + int idx = (*flow->queue)[i]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of((*priv->rxqs)[idx], + struct mlx5_rxq_ctrl, rxq); + + if (mark) { + rxq_ctrl->flow_mark_n--; + rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n; + } + if (tunnel) { + unsigned int j; + + /* Decrease the counter matching the flow. */ + for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { + if ((tunnels_info[j].tunnel & + dev_flow->layers) == + tunnels_info[j].tunnel) { + rxq_ctrl->flow_tunnels_n[j]--; + break; + } + } + flow_rxq_tunnel_ptype_update(rxq_ctrl); + } + } +} + +/** + * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the + * @p flow if no other flow uses it with the same kind of request. + * + * @param dev + * Pointer to Ethernet device. + * @param[in] flow + * Pointer to the flow. + */ +static void +flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) +{ + struct mlx5_flow *dev_flow; + + LIST_FOREACH(dev_flow, &flow->dev_flows, next) + flow_drv_rxq_flags_trim(dev, dev_flow); +} + +/** + * Clear the Mark/Flag and Tunnel ptype information in all Rx queues. + * + * @param dev + * Pointer to Ethernet device. + */ +static void +flow_rxq_flags_clear(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + unsigned int i; + + for (i = 0; i != priv->rxqs_n; ++i) { + struct mlx5_rxq_ctrl *rxq_ctrl; + unsigned int j; + + if (!(*priv->rxqs)[i]) + continue; + rxq_ctrl = container_of((*priv->rxqs)[i], + struct mlx5_rxq_ctrl, rxq); + rxq_ctrl->flow_mark_n = 0; + rxq_ctrl->rxq.mark = 0; + for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) + rxq_ctrl->flow_tunnels_n[j] = 0; + rxq_ctrl->rxq.tunnel = 0; + } +} + +/* + * return a pointer to the desired action in the list of actions. + * + * @param[in] actions + * The list of actions to search the action in. + * @param[in] action + * The action to find. + * + * @return + * Pointer to the action in the list, if found. NULL otherwise. + */ +const struct rte_flow_action * +mlx5_flow_find_action(const struct rte_flow_action *actions, + enum rte_flow_action_type action) +{ + if (actions == NULL) + return NULL; + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) + if (actions->type == action) + return actions; + return NULL; +} + +/* + * Validate the flag action. + * + * @param[in] action_flags + * Bit-fields that holds the actions detected until now. + * @param[in] attr + * Attributes of flow that includes this action. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_action_flag(uint64_t action_flags, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + + if (action_flags & MLX5_FLOW_ACTION_DROP) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't drop and flag in same flow"); + if (action_flags & MLX5_FLOW_ACTION_MARK) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't mark and flag in same flow"); + if (action_flags & MLX5_FLOW_ACTION_FLAG) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't have 2 flag" + " actions in same flow"); + if (attr->egress) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, + "flag action not supported for " + "egress"); + return 0; +} + +/* + * Validate the mark action. + * + * @param[in] action + * Pointer to the queue action. + * @param[in] action_flags + * Bit-fields that holds the actions detected until now. + * @param[in] attr + * Attributes of flow that includes this action. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_action_mark(const struct rte_flow_action *action, + uint64_t action_flags, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + const struct rte_flow_action_mark *mark = action->conf; + + if (!mark) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "configuration cannot be null"); + if (mark->id >= MLX5_FLOW_MARK_MAX) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &mark->id, + "mark id must in 0 <= id < " + RTE_STR(MLX5_FLOW_MARK_MAX)); + if (action_flags & MLX5_FLOW_ACTION_DROP) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't drop and mark in same flow"); + if (action_flags & MLX5_FLOW_ACTION_FLAG) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't flag and mark in same flow"); + if (action_flags & MLX5_FLOW_ACTION_MARK) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't have 2 mark actions in same" + " flow"); + if (attr->egress) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, + "mark action not supported for " + "egress"); + return 0; +} + +/* + * Validate the drop action. + * + * @param[in] action_flags + * Bit-fields that holds the actions detected until now. + * @param[in] attr + * Attributes of flow that includes this action. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_action_drop(uint64_t action_flags, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + if (action_flags & MLX5_FLOW_ACTION_FLAG) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't drop and flag in same flow"); + if (action_flags & MLX5_FLOW_ACTION_MARK) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't drop and mark in same flow"); + if (action_flags & (MLX5_FLOW_FATE_ACTIONS | + MLX5_FLOW_FATE_ESWITCH_ACTIONS)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't have 2 fate actions in" + " same flow"); + if (attr->egress) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, + "drop action not supported for " + "egress"); + return 0; +} + +/* + * Validate the queue action. + * + * @param[in] action + * Pointer to the queue action. + * @param[in] action_flags + * Bit-fields that holds the actions detected until now. + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] attr + * Attributes of flow that includes this action. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_action_queue(const struct rte_flow_action *action, + uint64_t action_flags, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + const struct rte_flow_action_queue *queue = action->conf; + + if (action_flags & MLX5_FLOW_FATE_ACTIONS) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't have 2 fate actions in" + " same flow"); + if (!priv->rxqs_n) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, "No Rx queues configured"); + if (queue->index >= priv->rxqs_n) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &queue->index, + "queue index out of range"); + if (!(*priv->rxqs)[queue->index]) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &queue->index, + "queue is not configured"); + if (attr->egress) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, + "queue action not supported for " + "egress"); + return 0; +} + +/* + * Validate the rss action. + * + * @param[in] action + * Pointer to the queue action. + * @param[in] action_flags + * Bit-fields that holds the actions detected until now. + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] attr + * Attributes of flow that includes this action. + * @param[in] item_flags + * Items that were detected. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_action_rss(const struct rte_flow_action *action, + uint64_t action_flags, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + uint64_t item_flags, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + const struct rte_flow_action_rss *rss = action->conf; + int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + unsigned int i; + + if (action_flags & MLX5_FLOW_FATE_ACTIONS) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't have 2 fate actions" + " in same flow"); + if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT && + rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->func, + "RSS hash function not supported"); +#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT + if (rss->level > 2) +#else + if (rss->level > 1) +#endif + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->level, + "tunnel RSS is not supported"); + /* allow RSS key_len 0 in case of NULL (default) RSS key. */ + if (rss->key_len == 0 && rss->key != NULL) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->key_len, + "RSS hash key length 0"); + if (rss->key_len > 0 && rss->key_len < MLX5_RSS_HASH_KEY_LEN) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->key_len, + "RSS hash key too small"); + if (rss->key_len > MLX5_RSS_HASH_KEY_LEN) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->key_len, + "RSS hash key too large"); + if (rss->queue_num > priv->config.ind_table_max_size) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->queue_num, + "number of queues too large"); + if (rss->types & MLX5_RSS_HF_MASK) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->types, + "some RSS protocols are not" + " supported"); + if (!priv->rxqs_n) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, "No Rx queues configured"); + if (!rss->queue_num) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, "No queues configured"); + for (i = 0; i != rss->queue_num; ++i) { + if (!(*priv->rxqs)[rss->queue[i]]) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->queue[i], "queue is not configured"); + } + if (attr->egress) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, + "rss action not supported for " + "egress"); + if (rss->level > 1 && !tunnel) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, + "inner RSS is not supported for " + "non-tunnel flows"); + return 0; +} + +/* + * Validate the count action. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] attr + * Attributes of flow that includes this action. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + if (attr->egress) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, + "count action not supported for " + "egress"); + return 0; +} + +/** + * Verify the @p attributes will be correctly understood by the NIC and store + * them in the @p flow if everything is correct. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] attributes + * Pointer to flow attributes + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_attributes(struct rte_eth_dev *dev, + const struct rte_flow_attr *attributes, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t priority_max = priv->config.flow_prio - 1; + + if (attributes->group) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + NULL, "groups is not supported"); + if (attributes->priority != MLX5_FLOW_PRIO_RSVD && + attributes->priority >= priority_max) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + NULL, "priority out of range"); + if (attributes->egress) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, + "egress is not supported"); + if (attributes->transfer && !priv->config.dv_esw_en) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + NULL, "transfer is not supported"); + if (!attributes->ingress) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + NULL, + "ingress attribute is mandatory"); + return 0; +} + +/** + * Validate ICMP6 item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item, + uint64_t item_flags, + uint8_t target_protocol, + struct rte_flow_error *error) +{ + const struct rte_flow_item_icmp6 *mask = item->mask; + const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : + MLX5_FLOW_LAYER_OUTER_L3_IPV6; + const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4; + int ret; + + if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "protocol filtering not compatible" + " with ICMP6 layer"); + if (!(item_flags & l3m)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "IPv6 is mandatory to filter on" + " ICMP6"); + if (item_flags & l4m) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple L4 layers not supported"); + if (!mask) + mask = &rte_flow_item_icmp6_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_icmp6_mask, + sizeof(struct rte_flow_item_icmp6), error); + if (ret < 0) + return ret; + return 0; +} + +/** + * Validate ICMP item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_item_icmp(const struct rte_flow_item *item, + uint64_t item_flags, + uint8_t target_protocol, + struct rte_flow_error *error) +{ + const struct rte_flow_item_icmp *mask = item->mask; + const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : + MLX5_FLOW_LAYER_OUTER_L3_IPV4; + const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4; + int ret; + + if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMP) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "protocol filtering not compatible" + " with ICMP layer"); + if (!(item_flags & l3m)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "IPv4 is mandatory to filter" + " on ICMP"); + if (item_flags & l4m) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple L4 layers not supported"); + if (!mask) + mask = &rte_flow_item_icmp_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_icmp_mask, + sizeof(struct rte_flow_item_icmp), error); + if (ret < 0) + return ret; + return 0; +} + +/** + * Validate Ethernet item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_item_eth(const struct rte_flow_item *item, + uint64_t item_flags, + struct rte_flow_error *error) +{ + const struct rte_flow_item_eth *mask = item->mask; + const struct rte_flow_item_eth nic_mask = { + .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", + .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", + .type = RTE_BE16(0xffff), + }; + int ret; + int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : + MLX5_FLOW_LAYER_OUTER_L2; + + if (item_flags & ethm) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple L2 layers not supported"); + if (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_L3)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "inner L2 layer should not " + "follow inner L3 layers"); + if (!mask) + mask = &rte_flow_item_eth_mask; + ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_eth), + error); + return ret; +} + +/** + * Validate VLAN item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[in] dev + * Ethernet device flow is being created on. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, + uint64_t item_flags, + struct rte_eth_dev *dev, + struct rte_flow_error *error) +{ + const struct rte_flow_item_vlan *spec = item->spec; + const struct rte_flow_item_vlan *mask = item->mask; + const struct rte_flow_item_vlan nic_mask = { + .tci = RTE_BE16(UINT16_MAX), + .inner_type = RTE_BE16(UINT16_MAX), + }; + uint16_t vlan_tag = 0; + const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + int ret; + const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 | + MLX5_FLOW_LAYER_INNER_L4) : + (MLX5_FLOW_LAYER_OUTER_L3 | + MLX5_FLOW_LAYER_OUTER_L4); + const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : + MLX5_FLOW_LAYER_OUTER_VLAN; + + const uint64_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : + MLX5_FLOW_LAYER_OUTER_L2; + if (item_flags & vlanm) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple VLAN layers not supported"); + else if ((item_flags & l34m) != 0) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "L2 layer cannot follow L3/L4 layer"); + else if ((item_flags & l2m) == 0) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "no L2 layer before VLAN"); + if (!mask) + mask = &rte_flow_item_vlan_mask; + ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_vlan), + error); + if (ret) + return ret; + if (!tunnel && mask->tci != RTE_BE16(0x0fff)) { + struct mlx5_priv *priv = dev->data->dev_private; + + if (priv->vmwa_context) { + /* + * Non-NULL context means we have a virtual machine + * and SR-IOV enabled, we have to create VLAN interface + * to make hypervisor to setup E-Switch vport + * context correctly. We avoid creating the multiple + * VLAN interfaces, so we cannot support VLAN tag mask. + */ + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VLAN tag mask is not" + " supported in virtual" + " environment"); + } + } + if (spec) { + vlan_tag = spec->tci; + vlan_tag &= mask->tci; + } + /* + * From verbs perspective an empty VLAN is equivalent + * to a packet without VLAN layer. + */ + if (!vlan_tag) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, + item->spec, + "VLAN cannot be empty"); + return 0; +} + +/** + * Validate IPV4 item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[in] acc_mask + * Acceptable mask, if NULL default internal default mask + * will be used to check whether item fields are supported. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, + uint64_t item_flags, + const struct rte_flow_item_ipv4 *acc_mask, + struct rte_flow_error *error) +{ + const struct rte_flow_item_ipv4 *mask = item->mask; + const struct rte_flow_item_ipv4 *spec = item->spec; + const struct rte_flow_item_ipv4 nic_mask = { + .hdr = { + .src_addr = RTE_BE32(0xffffffff), + .dst_addr = RTE_BE32(0xffffffff), .type_of_service = 0xff, .next_proto_id = 0xff, }, }; - const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); - unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext); - struct ibv_flow_spec_ipv4_ext ipv4 = { - .type = IBV_FLOW_SPEC_IPV4_EXT | - (tunnel ? IBV_FLOW_SPEC_INNER : 0), - .size = size, - }; + const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3; + const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4; int ret; - - if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 : - MLX5_FLOW_LAYER_OUTER_L3)) + uint8_t next_proto = 0xFF; + + if (item_flags & MLX5_FLOW_LAYER_IPIP) { + if (mask && spec) + next_proto = mask->hdr.next_proto_id & + spec->hdr.next_proto_id; + if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "multiple tunnel " + "not supported"); + } + if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "wrong tunnel type - IPv6 specified " + "but IPv4 item provided"); + if (item_flags & l3m) return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, - item, + RTE_FLOW_ERROR_TYPE_ITEM, item, "multiple L3 layers not supported"); - else if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 : - MLX5_FLOW_LAYER_OUTER_L4)) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, - item, + else if (item_flags & l4m) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 cannot follow an L4 layer."); + else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) && + !(item_flags & MLX5_FLOW_LAYER_INNER_L2)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "L3 cannot follow an NVGRE layer."); + else if (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L2)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "no L2 layer before IPV4"); if (!mask) mask = &rte_flow_item_ipv4_mask; - ret = mlx5_flow_item_acceptable - (item, (const uint8_t *)mask, - (const uint8_t *)&nic_mask, - sizeof(struct rte_flow_item_ipv4), error); + else if (mask->hdr.next_proto_id != 0 && + mask->hdr.next_proto_id != 0xff) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, + "partial mask is not supported" + " for protocol"); + ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, + acc_mask ? (const uint8_t *)acc_mask + : (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_ipv4), + error); if (ret < 0) return ret; - flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : - MLX5_FLOW_LAYER_OUTER_L3_IPV4; - if (spec) { - ipv4.val = (struct ibv_flow_ipv4_ext_filter){ - .src_ip = spec->hdr.src_addr, - .dst_ip = spec->hdr.dst_addr, - .proto = spec->hdr.next_proto_id, - .tos = spec->hdr.type_of_service, - }; - ipv4.mask = (struct ibv_flow_ipv4_ext_filter){ - .src_ip = mask->hdr.src_addr, - .dst_ip = mask->hdr.dst_addr, - .proto = mask->hdr.next_proto_id, - .tos = mask->hdr.type_of_service, - }; - /* Remove unwanted bits from values. */ - ipv4.val.src_ip &= ipv4.mask.src_ip; - ipv4.val.dst_ip &= ipv4.mask.dst_ip; - ipv4.val.proto &= ipv4.mask.proto; - ipv4.val.tos &= ipv4.mask.tos; - } - flow->l3_protocol_en = !!ipv4.mask.proto; - flow->l3_protocol = ipv4.val.proto; - if (size <= flow_size) { - mlx5_flow_verbs_hashfields_adjust - (flow, tunnel, - (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | - ETH_RSS_NONFRAG_IPV4_OTHER), - (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4)); - flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3; - mlx5_flow_spec_verbs_add(flow, &ipv4, size); - } - return size; + return 0; } /** - * Convert the @p item into a Verbs specification after ensuring the NIC - * will understand and process it correctly. - * If the necessary size for the conversion is greater than the @p flow_size, - * nothing is written in @p flow, the validation is still performed. + * Validate IPV6 item. * * @param[in] item * Item specification. - * @param[in, out] flow - * Pointer to flow structure. - * @param[in] flow_size - * Size in bytes of the available space in @p flow, if too small, nothing is - * written. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[in] acc_mask + * Acceptable mask, if NULL default internal default mask + * will be used to check whether item fields are supported. * @param[out] error * Pointer to error structure. * * @return - * On success the number of bytes consumed/necessary, if the returned value - * is lesser or equal to @p flow_size, the @p item has fully been converted, - * otherwise another call with this returned memory size should be done. - * On error, a negative errno value is returned and rte_errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ -static int -mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow, - const size_t flow_size, struct rte_flow_error *error) +int +mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, + uint64_t item_flags, + const struct rte_flow_item_ipv6 *acc_mask, + struct rte_flow_error *error) { - const struct rte_flow_item_ipv6 *spec = item->spec; const struct rte_flow_item_ipv6 *mask = item->mask; + const struct rte_flow_item_ipv6 *spec = item->spec; const struct rte_flow_item_ipv6 nic_mask = { .hdr = { .src_addr = @@ -949,144 +1460,102 @@ mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow, .hop_limits = 0xff, }, }; - const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); - unsigned int size = sizeof(struct ibv_flow_spec_ipv6); - struct ibv_flow_spec_ipv6 ipv6 = { - .type = IBV_FLOW_SPEC_IPV6 | (tunnel ? IBV_FLOW_SPEC_INNER : 0), - .size = size, - }; + const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3; + const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4; int ret; + uint8_t next_proto = 0xFF; - if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 : - MLX5_FLOW_LAYER_OUTER_L3)) + if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) { + if (mask && spec) + next_proto = mask->hdr.proto & spec->hdr.proto; + if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "multiple tunnel " + "not supported"); + } + if (item_flags & MLX5_FLOW_LAYER_IPIP) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "wrong tunnel type - IPv4 specified " + "but IPv6 item provided"); + if (item_flags & l3m) return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, - item, + RTE_FLOW_ERROR_TYPE_ITEM, item, "multiple L3 layers not supported"); - else if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 : - MLX5_FLOW_LAYER_OUTER_L4)) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, - item, + else if (item_flags & l4m) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 cannot follow an L4 layer."); + else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) && + !(item_flags & MLX5_FLOW_LAYER_INNER_L2)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "L3 cannot follow an NVGRE layer."); + else if (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L2)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "no L2 layer before IPV6"); if (!mask) mask = &rte_flow_item_ipv6_mask; - ret = mlx5_flow_item_acceptable - (item, (const uint8_t *)mask, - (const uint8_t *)&nic_mask, - sizeof(struct rte_flow_item_ipv6), error); + ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, + acc_mask ? (const uint8_t *)acc_mask + : (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_ipv6), + error); if (ret < 0) return ret; - flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : - MLX5_FLOW_LAYER_OUTER_L3_IPV6; - if (spec) { - unsigned int i; - uint32_t vtc_flow_val; - uint32_t vtc_flow_mask; - - memcpy(&ipv6.val.src_ip, spec->hdr.src_addr, - RTE_DIM(ipv6.val.src_ip)); - memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr, - RTE_DIM(ipv6.val.dst_ip)); - memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr, - RTE_DIM(ipv6.mask.src_ip)); - memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr, - RTE_DIM(ipv6.mask.dst_ip)); - vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow); - vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow); - ipv6.val.flow_label = - rte_cpu_to_be_32((vtc_flow_val & IPV6_HDR_FL_MASK) >> - IPV6_HDR_FL_SHIFT); - ipv6.val.traffic_class = (vtc_flow_val & IPV6_HDR_TC_MASK) >> - IPV6_HDR_TC_SHIFT; - ipv6.val.next_hdr = spec->hdr.proto; - ipv6.val.hop_limit = spec->hdr.hop_limits; - ipv6.mask.flow_label = - rte_cpu_to_be_32((vtc_flow_mask & IPV6_HDR_FL_MASK) >> - IPV6_HDR_FL_SHIFT); - ipv6.mask.traffic_class = (vtc_flow_mask & IPV6_HDR_TC_MASK) >> - IPV6_HDR_TC_SHIFT; - ipv6.mask.next_hdr = mask->hdr.proto; - ipv6.mask.hop_limit = mask->hdr.hop_limits; - /* Remove unwanted bits from values. */ - for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) { - ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i]; - ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i]; - } - ipv6.val.flow_label &= ipv6.mask.flow_label; - ipv6.val.traffic_class &= ipv6.mask.traffic_class; - ipv6.val.next_hdr &= ipv6.mask.next_hdr; - ipv6.val.hop_limit &= ipv6.mask.hop_limit; - } - flow->l3_protocol_en = !!ipv6.mask.next_hdr; - flow->l3_protocol = ipv6.val.next_hdr; - if (size <= flow_size) { - mlx5_flow_verbs_hashfields_adjust - (flow, tunnel, - (ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER), - (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6)); - flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3; - mlx5_flow_spec_verbs_add(flow, &ipv6, size); - } - return size; + return 0; } /** - * Convert the @p item into a Verbs specification after ensuring the NIC - * will understand and process it correctly. - * If the necessary size for the conversion is greater than the @p flow_size, - * nothing is written in @p flow, the validation is still performed. + * Validate UDP item. * * @param[in] item * Item specification. - * @param[in, out] flow - * Pointer to flow structure. - * @param[in] flow_size - * Size in bytes of the available space in @p flow, if too small, nothing is - * written. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[in] target_protocol + * The next protocol in the previous item. + * @param[in] flow_mask + * mlx5 flow-specific (DV, verbs, etc.) supported header fields mask. * @param[out] error * Pointer to error structure. * * @return - * On success the number of bytes consumed/necessary, if the returned value - * is lesser or equal to @p flow_size, the @p item has fully been converted, - * otherwise another call with this returned memory size should be done. - * On error, a negative errno value is returned and rte_errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ -static int -mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow, - const size_t flow_size, struct rte_flow_error *error) +int +mlx5_flow_validate_item_udp(const struct rte_flow_item *item, + uint64_t item_flags, + uint8_t target_protocol, + struct rte_flow_error *error) { - const struct rte_flow_item_udp *spec = item->spec; const struct rte_flow_item_udp *mask = item->mask; - const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); - unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp); - struct ibv_flow_spec_tcp_udp udp = { - .type = IBV_FLOW_SPEC_UDP | (tunnel ? IBV_FLOW_SPEC_INNER : 0), - .size = size, - }; + const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3; + const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4; int ret; - if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_UDP) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, - item, + if (target_protocol != 0xff && target_protocol != IPPROTO_UDP) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, "protocol filtering not compatible" " with UDP layer"); - if (!(flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 : - MLX5_FLOW_LAYER_OUTER_L3))) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "L3 is mandatory to filter" - " on L4"); - if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 : - MLX5_FLOW_LAYER_OUTER_L4)) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "L4 layer is already" - " present"); + if (!(item_flags & l3m)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "L3 is mandatory to filter on L4"); + if (item_flags & l4m) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple L4 layers not supported"); if (!mask) mask = &rte_flow_item_udp_mask; ret = mlx5_flow_item_acceptable @@ -1095,178 +1564,121 @@ mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow, sizeof(struct rte_flow_item_udp), error); if (ret < 0) return ret; - flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP : - MLX5_FLOW_LAYER_OUTER_L4_UDP; - if (spec) { - udp.val.dst_port = spec->hdr.dst_port; - udp.val.src_port = spec->hdr.src_port; - udp.mask.dst_port = mask->hdr.dst_port; - udp.mask.src_port = mask->hdr.src_port; - /* Remove unwanted bits from values. */ - udp.val.src_port &= udp.mask.src_port; - udp.val.dst_port &= udp.mask.dst_port; - } - if (size <= flow_size) { - mlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_UDP, - (IBV_RX_HASH_SRC_PORT_UDP | - IBV_RX_HASH_DST_PORT_UDP)); - flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4; - mlx5_flow_spec_verbs_add(flow, &udp, size); - } - return size; + return 0; } /** - * Convert the @p item into a Verbs specification after ensuring the NIC - * will understand and process it correctly. - * If the necessary size for the conversion is greater than the @p flow_size, - * nothing is written in @p flow, the validation is still performed. + * Validate TCP item. * * @param[in] item * Item specification. - * @param[in, out] flow - * Pointer to flow structure. - * @param[in] flow_size - * Size in bytes of the available space in @p flow, if too small, nothing is - * written. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[in] target_protocol + * The next protocol in the previous item. * @param[out] error * Pointer to error structure. * * @return - * On success the number of bytes consumed/necessary, if the returned value - * is lesser or equal to @p flow_size, the @p item has fully been converted, - * otherwise another call with this returned memory size should be done. - * On error, a negative errno value is returned and rte_errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ -static int -mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow, - const size_t flow_size, struct rte_flow_error *error) +int +mlx5_flow_validate_item_tcp(const struct rte_flow_item *item, + uint64_t item_flags, + uint8_t target_protocol, + const struct rte_flow_item_tcp *flow_mask, + struct rte_flow_error *error) { - const struct rte_flow_item_tcp *spec = item->spec; const struct rte_flow_item_tcp *mask = item->mask; - const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); - unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp); - struct ibv_flow_spec_tcp_udp tcp = { - .type = IBV_FLOW_SPEC_TCP | (tunnel ? IBV_FLOW_SPEC_INNER : 0), - .size = size, - }; + const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3; + const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4; int ret; - if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_TCP) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, - item, + assert(flow_mask); + if (target_protocol != 0xff && target_protocol != IPPROTO_TCP) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, "protocol filtering not compatible" " with TCP layer"); - if (!(flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 : - MLX5_FLOW_LAYER_OUTER_L3))) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, - item, + if (!(item_flags & l3m)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 is mandatory to filter on L4"); - if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 : - MLX5_FLOW_LAYER_OUTER_L4)) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "L4 layer is already present"); + if (item_flags & l4m) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple L4 layers not supported"); if (!mask) mask = &rte_flow_item_tcp_mask; ret = mlx5_flow_item_acceptable (item, (const uint8_t *)mask, - (const uint8_t *)&rte_flow_item_tcp_mask, + (const uint8_t *)flow_mask, sizeof(struct rte_flow_item_tcp), error); if (ret < 0) return ret; - flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP : - MLX5_FLOW_LAYER_OUTER_L4_TCP; - if (spec) { - tcp.val.dst_port = spec->hdr.dst_port; - tcp.val.src_port = spec->hdr.src_port; - tcp.mask.dst_port = mask->hdr.dst_port; - tcp.mask.src_port = mask->hdr.src_port; - /* Remove unwanted bits from values. */ - tcp.val.src_port &= tcp.mask.src_port; - tcp.val.dst_port &= tcp.mask.dst_port; - } - if (size <= flow_size) { - mlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_TCP, - (IBV_RX_HASH_SRC_PORT_TCP | - IBV_RX_HASH_DST_PORT_TCP)); - flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4; - mlx5_flow_spec_verbs_add(flow, &tcp, size); - } - return size; + return 0; } /** - * Convert the @p item into a Verbs specification after ensuring the NIC - * will understand and process it correctly. - * If the necessary size for the conversion is greater than the @p flow_size, - * nothing is written in @p flow, the validation is still performed. + * Validate VXLAN item. * * @param[in] item * Item specification. - * @param[in, out] flow - * Pointer to flow structure. - * @param[in] flow_size - * Size in bytes of the available space in @p flow, if too small, nothing is - * written. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[in] target_protocol + * The next protocol in the previous item. * @param[out] error * Pointer to error structure. * * @return - * On success the number of bytes consumed/necessary, if the returned value - * is lesser or equal to @p flow_size, the @p item has fully been converted, - * otherwise another call with this returned memory size should be done. - * On error, a negative errno value is returned and rte_errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ -static int -mlx5_flow_item_vxlan(const struct rte_flow_item *item, struct rte_flow *flow, - const size_t flow_size, struct rte_flow_error *error) +int +mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item, + uint64_t item_flags, + struct rte_flow_error *error) { const struct rte_flow_item_vxlan *spec = item->spec; const struct rte_flow_item_vxlan *mask = item->mask; - unsigned int size = sizeof(struct ibv_flow_spec_tunnel); - struct ibv_flow_spec_tunnel vxlan = { - .type = IBV_FLOW_SPEC_VXLAN_TUNNEL, - .size = size, - }; int ret; union vni { uint32_t vlan_id; uint8_t vni[4]; } id = { .vlan_id = 0, }; + uint32_t vlan_id = 0; + - if (flow->layers & MLX5_FLOW_LAYER_TUNNEL) + if (item_flags & MLX5_FLOW_LAYER_TUNNEL) return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "a tunnel is already present"); + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple tunnel layers not" + " supported"); /* * Verify only UDPv4 is present as defined in * https://tools.ietf.org/html/rfc7348 */ - if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, - item, + if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, "no outer UDP layer found"); if (!mask) mask = &rte_flow_item_vxlan_mask; ret = mlx5_flow_item_acceptable (item, (const uint8_t *)mask, (const uint8_t *)&rte_flow_item_vxlan_mask, - sizeof(struct rte_flow_item_vxlan), error); + sizeof(struct rte_flow_item_vxlan), + error); if (ret < 0) return ret; if (spec) { memcpy(&id.vni[1], spec->vni, 3); - vxlan.val.tunnel_id = id.vlan_id; + vlan_id = id.vlan_id; memcpy(&id.vni[1], mask->vni, 3); - vxlan.mask.tunnel_id = id.vlan_id; - /* Remove unwanted bits from values. */ - vxlan.val.tunnel_id &= vxlan.mask.tunnel_id; + vlan_id &= id.vlan_id; } /* * Tunnel id 0 is equivalent as not adding a VXLAN layer, if @@ -1277,109 +1689,89 @@ mlx5_flow_item_vxlan(const struct rte_flow_item *item, struct rte_flow *flow, * match this rule. To avoid such situation, VNI 0 is * currently refused. */ - if (!vxlan.val.tunnel_id) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, + if (!vlan_id) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, "VXLAN vni cannot be 0"); - if (!(flow->layers & MLX5_FLOW_LAYER_OUTER)) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, + if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, "VXLAN tunnel must be fully defined"); - if (size <= flow_size) { - mlx5_flow_spec_verbs_add(flow, &vxlan, size); - flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2; - } - flow->layers |= MLX5_FLOW_LAYER_VXLAN; - return size; + return 0; } /** - * Convert the @p item into a Verbs specification after ensuring the NIC - * will understand and process it correctly. - * If the necessary size for the conversion is greater than the @p flow_size, - * nothing is written in @p flow, the validation is still performed. + * Validate VXLAN_GPE item. * - * @param dev - * Pointer to Ethernet device. * @param[in] item * Item specification. - * @param[in, out] flow - * Pointer to flow structure. - * @param[in] flow_size - * Size in bytes of the available space in @p flow, if too small, nothing is - * written. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[in] priv + * Pointer to the private data structure. + * @param[in] target_protocol + * The next protocol in the previous item. * @param[out] error * Pointer to error structure. * * @return - * On success the number of bytes consumed/necessary, if the returned value - * is lesser or equal to @p flow_size, the @p item has fully been converted, - * otherwise another call with this returned memory size should be done. - * On error, a negative errno value is returned and rte_errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ -static int -mlx5_flow_item_vxlan_gpe(struct rte_eth_dev *dev, - const struct rte_flow_item *item, - struct rte_flow *flow, const size_t flow_size, - struct rte_flow_error *error) +int +mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, + uint64_t item_flags, + struct rte_eth_dev *dev, + struct rte_flow_error *error) { + struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_item_vxlan_gpe *spec = item->spec; const struct rte_flow_item_vxlan_gpe *mask = item->mask; - unsigned int size = sizeof(struct ibv_flow_spec_tunnel); - struct ibv_flow_spec_tunnel vxlan_gpe = { - .type = IBV_FLOW_SPEC_VXLAN_TUNNEL, - .size = size, - }; int ret; union vni { uint32_t vlan_id; uint8_t vni[4]; } id = { .vlan_id = 0, }; + uint32_t vlan_id = 0; - if (!((struct priv *)dev->data->dev_private)->config.l3_vxlan_en) + if (!priv->config.l3_vxlan_en) return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, - item, + RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 VXLAN is not enabled by device" " parameter and/or not configured in" " firmware"); - if (flow->layers & MLX5_FLOW_LAYER_TUNNEL) + if (item_flags & MLX5_FLOW_LAYER_TUNNEL) return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "a tunnel is already present"); + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple tunnel layers not" + " supported"); /* * Verify only UDPv4 is present as defined in * https://tools.ietf.org/html/rfc7348 */ - if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, - item, + if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, "no outer UDP layer found"); if (!mask) mask = &rte_flow_item_vxlan_gpe_mask; ret = mlx5_flow_item_acceptable (item, (const uint8_t *)mask, (const uint8_t *)&rte_flow_item_vxlan_gpe_mask, - sizeof(struct rte_flow_item_vxlan_gpe), error); + sizeof(struct rte_flow_item_vxlan_gpe), + error); if (ret < 0) return ret; if (spec) { + if (spec->protocol) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VxLAN-GPE protocol" + " not supported"); memcpy(&id.vni[1], spec->vni, 3); - vxlan_gpe.val.tunnel_id = id.vlan_id; + vlan_id = id.vlan_id; memcpy(&id.vni[1], mask->vni, 3); - vxlan_gpe.mask.tunnel_id = id.vlan_id; - if (spec->protocol) - return rte_flow_error_set - (error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "VxLAN-GPE protocol not supported"); - /* Remove unwanted bits from values. */ - vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id; + vlan_id &= id.vlan_id; } /* * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this @@ -1389,850 +1781,633 @@ mlx5_flow_item_vxlan_gpe(struct rte_eth_dev *dev, * before will also match this rule. To avoid such situation, VNI 0 * is currently refused. */ - if (!vxlan_gpe.val.tunnel_id) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, + if (!vlan_id) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, "VXLAN-GPE vni cannot be 0"); - if (!(flow->layers & MLX5_FLOW_LAYER_OUTER)) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, + if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, "VXLAN-GPE tunnel must be fully" " defined"); - if (size <= flow_size) { - mlx5_flow_spec_verbs_add(flow, &vxlan_gpe, size); - flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2; - } - flow->layers |= MLX5_FLOW_LAYER_VXLAN_GPE; - return size; + return 0; } - /** - * Convert the @p pattern into a Verbs specifications after ensuring the NIC - * will understand and process it correctly. - * The conversion is performed item per item, each of them is written into - * the @p flow if its size is lesser or equal to @p flow_size. - * Validation and memory consumption computation are still performed until the - * end of @p pattern, unless an error is encountered. + * Validate GRE Key item. * - * @param[in] pattern - * Flow pattern. - * @param[in, out] flow - * Pointer to the rte_flow structure. - * @param[in] flow_size - * Size in bytes of the available space in @p flow, if too small some - * garbage may be present. + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit flags to mark detected items. + * @param[in] gre_item + * Pointer to gre_item * @param[out] error * Pointer to error structure. * * @return - * On success the number of bytes consumed/necessary, if the returned value - * is lesser or equal to @p flow_size, the @pattern has fully been - * converted, otherwise another call with this returned memory size should - * be done. - * On error, a negative errno value is returned and rte_errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ -static int -mlx5_flow_items(struct rte_eth_dev *dev, - const struct rte_flow_item pattern[], - struct rte_flow *flow, const size_t flow_size, - struct rte_flow_error *error) +int +mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item, + uint64_t item_flags, + const struct rte_flow_item *gre_item, + struct rte_flow_error *error) { - int remain = flow_size; - size_t size = 0; - - for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) { - int ret = 0; + const rte_be32_t *mask = item->mask; + int ret = 0; + rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX); + const struct rte_flow_item_gre *gre_spec = gre_item->spec; + const struct rte_flow_item_gre *gre_mask = gre_item->mask; - switch (pattern->type) { - case RTE_FLOW_ITEM_TYPE_VOID: - break; - case RTE_FLOW_ITEM_TYPE_ETH: - ret = mlx5_flow_item_eth(pattern, flow, remain, error); - break; - case RTE_FLOW_ITEM_TYPE_VLAN: - ret = mlx5_flow_item_vlan(pattern, flow, remain, error); - break; - case RTE_FLOW_ITEM_TYPE_IPV4: - ret = mlx5_flow_item_ipv4(pattern, flow, remain, error); - break; - case RTE_FLOW_ITEM_TYPE_IPV6: - ret = mlx5_flow_item_ipv6(pattern, flow, remain, error); - break; - case RTE_FLOW_ITEM_TYPE_UDP: - ret = mlx5_flow_item_udp(pattern, flow, remain, error); - break; - case RTE_FLOW_ITEM_TYPE_TCP: - ret = mlx5_flow_item_tcp(pattern, flow, remain, error); - break; - case RTE_FLOW_ITEM_TYPE_VXLAN: - ret = mlx5_flow_item_vxlan(pattern, flow, remain, - error); - break; - case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: - ret = mlx5_flow_item_vxlan_gpe(dev, pattern, flow, - remain, error); - break; - default: - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, - pattern, - "item not supported"); - } - if (ret < 0) - return ret; - if (remain > ret) - remain -= ret; - else - remain = 0; - size += ret; - } - if (!flow->layers) { - const struct rte_flow_item item = { - .type = RTE_FLOW_ITEM_TYPE_ETH, - }; + if (item_flags & MLX5_FLOW_LAYER_GRE_KEY) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Multiple GRE key not support"); + if (!(item_flags & MLX5_FLOW_LAYER_GRE)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "No preceding GRE header"); + if (item_flags & MLX5_FLOW_LAYER_INNER) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "GRE key following a wrong item"); + if (!gre_mask) + gre_mask = &rte_flow_item_gre_mask; + if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) && + !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Key bit must be on"); - return mlx5_flow_item_eth(&item, flow, flow_size, error); - } - return size; + if (!mask) + mask = &gre_key_default_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&gre_key_default_mask, + sizeof(rte_be32_t), error); + return ret; } /** - * Convert the @p action into a Verbs specification after ensuring the NIC - * will understand and process it correctly. - * If the necessary size for the conversion is greater than the @p flow_size, - * nothing is written in @p flow, the validation is still performed. + * Validate GRE item. * - * @param[in] action - * Action configuration. - * @param[in, out] flow - * Pointer to flow structure. - * @param[in] flow_size - * Size in bytes of the available space in @p flow, if too small, nothing is - * written. + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit flags to mark detected items. + * @param[in] target_protocol + * The next protocol in the previous item. * @param[out] error * Pointer to error structure. * * @return - * On success the number of bytes consumed/necessary, if the returned value - * is lesser or equal to @p flow_size, the @p action has fully been - * converted, otherwise another call with this returned memory size should - * be done. - * On error, a negative errno value is returned and rte_errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ -static int -mlx5_flow_action_drop(const struct rte_flow_action *action, - struct rte_flow *flow, const size_t flow_size, - struct rte_flow_error *error) +int +mlx5_flow_validate_item_gre(const struct rte_flow_item *item, + uint64_t item_flags, + uint8_t target_protocol, + struct rte_flow_error *error) { - unsigned int size = sizeof(struct ibv_flow_spec_action_drop); - struct ibv_flow_spec_action_drop drop = { - .type = IBV_FLOW_SPEC_ACTION_DROP, - .size = size, + const struct rte_flow_item_gre *spec __rte_unused = item->spec; + const struct rte_flow_item_gre *mask = item->mask; + int ret; + const struct rte_flow_item_gre nic_mask = { + .c_rsvd0_ver = RTE_BE16(0xB000), + .protocol = RTE_BE16(UINT16_MAX), }; - if (flow->fate) + if (target_protocol != 0xff && target_protocol != IPPROTO_GRE) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "protocol filtering not compatible" + " with this GRE layer"); + if (item_flags & MLX5_FLOW_LAYER_TUNNEL) return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, - action, - "multiple fate actions are not" + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple tunnel layers not" " supported"); - if (flow->modifier & (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK)) + if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3)) return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, - action, - "drop is not compatible with" - " flag/mark action"); - if (size < flow_size) - mlx5_flow_spec_verbs_add(flow, &drop, size); - flow->fate |= MLX5_FLOW_FATE_DROP; - return size; + RTE_FLOW_ERROR_TYPE_ITEM, item, + "L3 Layer is missing"); + if (!mask) + mask = &rte_flow_item_gre_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_gre), error); + if (ret < 0) + return ret; +#ifndef HAVE_MLX5DV_DR +#ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT + if (spec && (spec->protocol & mask->protocol)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "without MPLS support the" + " specification cannot be used for" + " filtering"); +#endif +#endif + return 0; } /** - * Convert the @p action into @p flow after ensuring the NIC will understand - * and process it correctly. + * Validate Geneve item. * - * @param[in] dev - * Pointer to Ethernet device structure. - * @param[in] action - * Action configuration. - * @param[in, out] flow - * Pointer to flow structure. + * @param[in] item + * Item specification. + * @param[in] itemFlags + * Bit-fields that holds the items detected until now. + * @param[in] enPriv + * Pointer to the private data structure. * @param[out] error * Pointer to error structure. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ -static int -mlx5_flow_action_queue(struct rte_eth_dev *dev, - const struct rte_flow_action *action, - struct rte_flow *flow, - struct rte_flow_error *error) + +int +mlx5_flow_validate_item_geneve(const struct rte_flow_item *item, + uint64_t item_flags, + struct rte_eth_dev *dev, + struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; - const struct rte_flow_action_queue *queue = action->conf; + struct mlx5_priv *priv = dev->data->dev_private; + const struct rte_flow_item_geneve *spec = item->spec; + const struct rte_flow_item_geneve *mask = item->mask; + int ret; + uint16_t gbhdr; + uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ? + MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0; + const struct rte_flow_item_geneve nic_mask = { + .ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80), + .vni = "\xff\xff\xff", + .protocol = RTE_BE16(UINT16_MAX), + }; - if (flow->fate) + if (!(priv->config.hca_attr.flex_parser_protocols & + MLX5_HCA_FLEX_GENEVE_ENABLED) || + !priv->config.hca_attr.tunnel_stateless_geneve_rx) return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, - action, - "multiple fate actions are not" + RTE_FLOW_ERROR_TYPE_ITEM, item, + "L3 Geneve is not enabled by device" + " parameter and/or not configured in" + " firmware"); + if (item_flags & MLX5_FLOW_LAYER_TUNNEL) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple tunnel layers not" " supported"); - if (queue->index >= priv->rxqs_n) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, - &queue->index, - "queue index out of range"); - if (!(*priv->rxqs)[queue->index]) + /* + * Verify only UDPv4 is present as defined in + * https://tools.ietf.org/html/rfc7348 + */ + if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, - &queue->index, - "queue is not configured"); - if (flow->queue) - (*flow->queue)[0] = queue->index; - flow->rss.queue_num = 1; - flow->fate |= MLX5_FLOW_FATE_QUEUE; + RTE_FLOW_ERROR_TYPE_ITEM, item, + "no outer UDP layer found"); + if (!mask) + mask = &rte_flow_item_geneve_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_geneve), error); + if (ret) + return ret; + if (spec) { + gbhdr = rte_be_to_cpu_16(spec->ver_opt_len_o_c_rsvd0); + if (MLX5_GENEVE_VER_VAL(gbhdr) || + MLX5_GENEVE_CRITO_VAL(gbhdr) || + MLX5_GENEVE_RSVD_VAL(gbhdr) || spec->rsvd1) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Geneve protocol unsupported" + " fields are being used"); + if (MLX5_GENEVE_OPTLEN_VAL(gbhdr) > opt_len) + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Unsupported Geneve options length"); + } + if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Geneve tunnel must be fully defined"); return 0; } /** - * Ensure the @p action will be understood and used correctly by the NIC. + * Validate MPLS item. * - * @param dev - * Pointer to Ethernet device structure. - * @param action[in] - * Pointer to flow actions array. - * @param flow[in, out] - * Pointer to the rte_flow structure. - * @param error[in, out] + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[in] prev_layer + * The protocol layer indicated in previous item. + * @param[out] error * Pointer to error structure. * * @return - * On success @p flow->queue array and @p flow->rss are filled and valid. - * On error, a negative errno value is returned and rte_errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ -static int -mlx5_flow_action_rss(struct rte_eth_dev *dev, - const struct rte_flow_action *action, - struct rte_flow *flow, - struct rte_flow_error *error) +int +mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused, + const struct rte_flow_item *item __rte_unused, + uint64_t item_flags __rte_unused, + uint64_t prev_layer __rte_unused, + struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; - const struct rte_flow_action_rss *rss = action->conf; - unsigned int i; +#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT + const struct rte_flow_item_mpls *mask = item->mask; + struct mlx5_priv *priv = dev->data->dev_private; + int ret; - if (flow->fate) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, - action, - "multiple fate actions are not" - " supported"); - if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT && - rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, - &rss->func, - "RSS hash function not supported"); -#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT - if (rss->level > 2) -#else - if (rss->level > 1) -#endif - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, - &rss->level, - "tunnel RSS is not supported"); - if (rss->key_len < MLX5_RSS_HASH_KEY_LEN) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, - &rss->key_len, - "RSS hash key too small"); - if (rss->key_len > MLX5_RSS_HASH_KEY_LEN) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, - &rss->key_len, - "RSS hash key too large"); - if (rss->queue_num > priv->config.ind_table_max_size) + if (!priv->config.mpls_en) return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, - &rss->queue_num, - "number of queues too large"); - if (rss->types & MLX5_RSS_HF_MASK) + RTE_FLOW_ERROR_TYPE_ITEM, item, + "MPLS not supported or" + " disabled in firmware" + " configuration."); + /* MPLS over IP, UDP, GRE is allowed */ + if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 | + MLX5_FLOW_LAYER_OUTER_L4_UDP | + MLX5_FLOW_LAYER_GRE))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "protocol filtering not compatible" + " with MPLS layer"); + /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */ + if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) && + !(item_flags & MLX5_FLOW_LAYER_GRE)) return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, - &rss->types, - "some RSS protocols are not" + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple tunnel layers not" " supported"); - for (i = 0; i != rss->queue_num; ++i) { - if (!(*priv->rxqs)[rss->queue[i]]) - return rte_flow_error_set - (error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, - &rss->queue[i], - "queue is not configured"); - } - if (flow->queue) - memcpy((*flow->queue), rss->queue, - rss->queue_num * sizeof(uint16_t)); - flow->rss.queue_num = rss->queue_num; - memcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN); - flow->rss.types = rss->types; - flow->rss.level = rss->level; - flow->fate |= MLX5_FLOW_FATE_RSS; + if (!mask) + mask = &rte_flow_item_mpls_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_mpls_mask, + sizeof(struct rte_flow_item_mpls), error); + if (ret < 0) + return ret; return 0; +#endif + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "MPLS is not supported by Verbs, please" + " update."); } /** - * Convert the @p action into a Verbs specification after ensuring the NIC - * will understand and process it correctly. - * If the necessary size for the conversion is greater than the @p flow_size, - * nothing is written in @p flow, the validation is still performed. + * Validate NVGRE item. * - * @param[in] action - * Action configuration. - * @param[in, out] flow - * Pointer to flow structure. - * @param[in] flow_size - * Size in bytes of the available space in @p flow, if too small, nothing is - * written. + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit flags to mark detected items. + * @param[in] target_protocol + * The next protocol in the previous item. * @param[out] error * Pointer to error structure. * * @return - * On success the number of bytes consumed/necessary, if the returned value - * is lesser or equal to @p flow_size, the @p action has fully been - * converted, otherwise another call with this returned memory size should - * be done. - * On error, a negative errno value is returned and rte_errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ -static int -mlx5_flow_action_flag(const struct rte_flow_action *action, - struct rte_flow *flow, const size_t flow_size, - struct rte_flow_error *error) -{ - unsigned int size = sizeof(struct ibv_flow_spec_action_tag); - struct ibv_flow_spec_action_tag tag = { - .type = IBV_FLOW_SPEC_ACTION_TAG, - .size = size, - .tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT), - }; - struct mlx5_flow_verbs *verbs = flow->cur_verbs; +int +mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item, + uint64_t item_flags, + uint8_t target_protocol, + struct rte_flow_error *error) +{ + const struct rte_flow_item_nvgre *mask = item->mask; + int ret; - if (flow->modifier & MLX5_FLOW_MOD_FLAG) + if (target_protocol != 0xff && target_protocol != IPPROTO_GRE) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "protocol filtering not compatible" + " with this GRE layer"); + if (item_flags & MLX5_FLOW_LAYER_TUNNEL) return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, - action, - "flag action already present"); - if (flow->fate & MLX5_FLOW_FATE_DROP) + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple tunnel layers not" + " supported"); + if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3)) return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, - action, - "flag is not compatible with drop" - " action"); - if (flow->modifier & MLX5_FLOW_MOD_MARK) - size = 0; - else if (size <= flow_size && verbs) - mlx5_flow_spec_verbs_add(flow, &tag, size); - flow->modifier |= MLX5_FLOW_MOD_FLAG; - return size; + RTE_FLOW_ERROR_TYPE_ITEM, item, + "L3 Layer is missing"); + if (!mask) + mask = &rte_flow_item_nvgre_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_nvgre_mask, + sizeof(struct rte_flow_item_nvgre), error); + if (ret < 0) + return ret; + return 0; +} + +static int +flow_null_validate(struct rte_eth_dev *dev __rte_unused, + const struct rte_flow_attr *attr __rte_unused, + const struct rte_flow_item items[] __rte_unused, + const struct rte_flow_action actions[] __rte_unused, + bool external __rte_unused, + struct rte_flow_error *error) +{ + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); +} + +static struct mlx5_flow * +flow_null_prepare(const struct rte_flow_attr *attr __rte_unused, + const struct rte_flow_item items[] __rte_unused, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error) +{ + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); + return NULL; +} + +static int +flow_null_translate(struct rte_eth_dev *dev __rte_unused, + struct mlx5_flow *dev_flow __rte_unused, + const struct rte_flow_attr *attr __rte_unused, + const struct rte_flow_item items[] __rte_unused, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error) +{ + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); +} + +static int +flow_null_apply(struct rte_eth_dev *dev __rte_unused, + struct rte_flow *flow __rte_unused, + struct rte_flow_error *error) +{ + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); } -/** - * Update verbs specification to modify the flag to mark. - * - * @param[in, out] verbs - * Pointer to the mlx5_flow_verbs structure. - * @param[in] mark_id - * Mark identifier to replace the flag. - */ static void -mlx5_flow_verbs_mark_update(struct mlx5_flow_verbs *verbs, uint32_t mark_id) +flow_null_remove(struct rte_eth_dev *dev __rte_unused, + struct rte_flow *flow __rte_unused) { - struct ibv_spec_header *hdr; - int i; +} - if (!verbs) - return; - /* Update Verbs specification. */ - hdr = (struct ibv_spec_header *)verbs->specs; - if (!hdr) - return; - for (i = 0; i != verbs->attr->num_of_specs; ++i) { - if (hdr->type == IBV_FLOW_SPEC_ACTION_TAG) { - struct ibv_flow_spec_action_tag *t = - (struct ibv_flow_spec_action_tag *)hdr; - - t->tag_id = mlx5_flow_mark_set(mark_id); - } - hdr = (struct ibv_spec_header *)((uintptr_t)hdr + hdr->size); - } +static void +flow_null_destroy(struct rte_eth_dev *dev __rte_unused, + struct rte_flow *flow __rte_unused) +{ +} + +static int +flow_null_query(struct rte_eth_dev *dev __rte_unused, + struct rte_flow *flow __rte_unused, + const struct rte_flow_action *actions __rte_unused, + void *data __rte_unused, + struct rte_flow_error *error) +{ + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); } +/* Void driver to protect from null pointer reference. */ +const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = { + .validate = flow_null_validate, + .prepare = flow_null_prepare, + .translate = flow_null_translate, + .apply = flow_null_apply, + .remove = flow_null_remove, + .destroy = flow_null_destroy, + .query = flow_null_query, +}; + /** - * Convert the @p action into @p flow (or by updating the already present - * Flag Verbs specification) after ensuring the NIC will understand and - * process it correctly. - * If the necessary size for the conversion is greater than the @p flow_size, - * nothing is written in @p flow, the validation is still performed. + * Select flow driver type according to flow attributes and device + * configuration. * - * @param[in] action - * Action configuration. - * @param[in, out] flow - * Pointer to flow structure. - * @param[in] flow_size - * Size in bytes of the available space in @p flow, if too small, nothing is - * written. - * @param[out] error - * Pointer to error structure. + * @param[in] dev + * Pointer to the dev structure. + * @param[in] attr + * Pointer to the flow attributes. * * @return - * On success the number of bytes consumed/necessary, if the returned value - * is lesser or equal to @p flow_size, the @p action has fully been - * converted, otherwise another call with this returned memory size should - * be done. - * On error, a negative errno value is returned and rte_errno is set. + * flow driver type, MLX5_FLOW_TYPE_MAX otherwise. */ -static int -mlx5_flow_action_mark(const struct rte_flow_action *action, - struct rte_flow *flow, const size_t flow_size, - struct rte_flow_error *error) +static enum mlx5_flow_drv_type +flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr) { - const struct rte_flow_action_mark *mark = action->conf; - unsigned int size = sizeof(struct ibv_flow_spec_action_tag); - struct ibv_flow_spec_action_tag tag = { - .type = IBV_FLOW_SPEC_ACTION_TAG, - .size = size, - }; - struct mlx5_flow_verbs *verbs = flow->cur_verbs; - - if (!mark) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - action, - "configuration cannot be null"); - if (mark->id >= MLX5_FLOW_MARK_MAX) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, - &mark->id, - "mark id must in 0 <= id < " - RTE_STR(MLX5_FLOW_MARK_MAX)); - if (flow->modifier & MLX5_FLOW_MOD_MARK) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, - action, - "mark action already present"); - if (flow->fate & MLX5_FLOW_FATE_DROP) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, - action, - "mark is not compatible with drop" - " action"); - if (flow->modifier & MLX5_FLOW_MOD_FLAG) { - mlx5_flow_verbs_mark_update(verbs, mark->id); - size = 0; - } else if (size <= flow_size) { - tag.tag_id = mlx5_flow_mark_set(mark->id); - mlx5_flow_spec_verbs_add(flow, &tag, size); - } - flow->modifier |= MLX5_FLOW_MOD_MARK; - return size; + struct mlx5_priv *priv = dev->data->dev_private; + enum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX; + + if (attr->transfer && priv->config.dv_esw_en) + type = MLX5_FLOW_TYPE_DV; + if (!attr->transfer) + type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV : + MLX5_FLOW_TYPE_VERBS; + return type; } +#define flow_get_drv_ops(type) flow_drv_ops[type] + /** - * Convert the @p action into @p flow after ensuring the NIC will understand - * and process it correctly. - * The conversion is performed action per action, each of them is written into - * the @p flow if its size is lesser or equal to @p flow_size. - * Validation and memory consumption computation are still performed until the - * end of @p action, unless an error is encountered. + * Flow driver validation API. This abstracts calling driver specific functions. + * The type of flow driver is determined according to flow attributes. * * @param[in] dev - * Pointer to Ethernet device structure. + * Pointer to the dev structure. + * @param[in] attr + * Pointer to the flow attributes. + * @param[in] items + * Pointer to the list of items. * @param[in] actions - * Pointer to flow actions array. - * @param[in, out] flow - * Pointer to the rte_flow structure. - * @param[in] flow_size - * Size in bytes of the available space in @p flow, if too small some - * garbage may be present. + * Pointer to the list of actions. + * @param[in] external + * This flow rule is created by request external to PMD. * @param[out] error - * Pointer to error structure. + * Pointer to the error structure. * * @return - * On success the number of bytes consumed/necessary, if the returned value - * is lesser or equal to @p flow_size, the @p actions has fully been - * converted, otherwise another call with this returned memory size should - * be done. - * On error, a negative errno value is returned and rte_errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ -static int -mlx5_flow_actions(struct rte_eth_dev *dev, +static inline int +flow_drv_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], const struct rte_flow_action actions[], - struct rte_flow *flow, const size_t flow_size, - struct rte_flow_error *error) + bool external, struct rte_flow_error *error) { - size_t size = 0; - int remain = flow_size; - int ret = 0; + const struct mlx5_flow_driver_ops *fops; + enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr); - for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { - switch (actions->type) { - case RTE_FLOW_ACTION_TYPE_VOID: - break; - case RTE_FLOW_ACTION_TYPE_FLAG: - ret = mlx5_flow_action_flag(actions, flow, remain, - error); - break; - case RTE_FLOW_ACTION_TYPE_MARK: - ret = mlx5_flow_action_mark(actions, flow, remain, - error); - break; - case RTE_FLOW_ACTION_TYPE_DROP: - ret = mlx5_flow_action_drop(actions, flow, remain, - error); - break; - case RTE_FLOW_ACTION_TYPE_QUEUE: - ret = mlx5_flow_action_queue(dev, actions, flow, error); - break; - case RTE_FLOW_ACTION_TYPE_RSS: - ret = mlx5_flow_action_rss(dev, actions, flow, error); - break; - default: - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "action not supported"); - } - if (ret < 0) - return ret; - if (remain > ret) - remain -= ret; - else - remain = 0; - size += ret; - } - if (!flow->fate) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "no fate action found"); - return size; + fops = flow_get_drv_ops(type); + return fops->validate(dev, attr, items, actions, external, error); } /** - * Convert the @p attributes, @p pattern, @p action, into an flow for the NIC - * after ensuring the NIC will understand and process it correctly. - * The conversion is only performed item/action per item/action, each of - * them is written into the @p flow if its size is lesser or equal to @p - * flow_size. - * Validation and memory consumption computation are still performed until the - * end, unless an error is encountered. + * Flow driver preparation API. This abstracts calling driver specific + * functions. Parent flow (rte_flow) should have driver type (drv_type). It + * calculates the size of memory required for device flow, allocates the memory, + * initializes the device flow and returns the pointer. + * + * @note + * This function initializes device flow structure such as dv or verbs in + * struct mlx5_flow. However, it is caller's responsibility to initialize the + * rest. For example, adding returning device flow to flow->dev_flow list and + * setting backward reference to the flow should be done out of this function. + * layers field is not filled either. * - * @param[in] dev - * Pointer to Ethernet device. - * @param[in, out] flow - * Pointer to flow structure. - * @param[in] flow_size - * Size in bytes of the available space in @p flow, if too small some - * garbage may be present. - * @param[in] attributes - * Flow rule attributes. - * @param[in] pattern - * Pattern specification (list terminated by the END pattern item). + * @param[in] attr + * Pointer to the flow attributes. + * @param[in] items + * Pointer to the list of items. * @param[in] actions - * Associated actions (list terminated by the END action). + * Pointer to the list of actions. * @param[out] error - * Perform verbose error reporting if not NULL. + * Pointer to the error structure. * * @return - * On success the number of bytes consumed/necessary, if the returned value - * is lesser or equal to @p flow_size, the flow has fully been converted and - * can be applied, otherwise another call with this returned memory size - * should be done. - * On error, a negative errno value is returned and rte_errno is set. + * Pointer to device flow on success, otherwise NULL and rte_errno is set. */ -static int -mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow, - const size_t flow_size, - const struct rte_flow_attr *attributes, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) +static inline struct mlx5_flow * +flow_drv_prepare(const struct rte_flow *flow, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) { - struct rte_flow local_flow = { .layers = 0, }; - size_t size = sizeof(*flow); - union { - struct rte_flow_expand_rss buf; - uint8_t buffer[2048]; - } expand_buffer; - struct rte_flow_expand_rss *buf = &expand_buffer.buf; - struct mlx5_flow_verbs *original_verbs = NULL; - size_t original_verbs_size = 0; - uint32_t original_layers = 0; - int expanded_pattern_idx = 0; - int ret; - uint32_t i; + const struct mlx5_flow_driver_ops *fops; + enum mlx5_flow_drv_type type = flow->drv_type; - if (size > flow_size) - flow = &local_flow; - ret = mlx5_flow_attributes(dev, attributes, flow, error); - if (ret < 0) - return ret; - ret = mlx5_flow_actions(dev, actions, &local_flow, 0, error); - if (ret < 0) - return ret; - if (local_flow.rss.types) { - ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer), - pattern, local_flow.rss.types, - mlx5_support_expansion, - local_flow.rss.level < 2 ? - MLX5_EXPANSION_ROOT : - MLX5_EXPANSION_ROOT_OUTER); - assert(ret > 0 && - (unsigned int)ret < sizeof(expand_buffer.buffer)); - } else { - buf->entries = 1; - buf->entry[0].pattern = (void *)(uintptr_t)pattern; - } - size += RTE_ALIGN_CEIL(local_flow.rss.queue_num * sizeof(uint16_t), - sizeof(void *)); - if (size <= flow_size) - flow->queue = (void *)(flow + 1); - LIST_INIT(&flow->verbs); - flow->layers = 0; - flow->modifier = 0; - flow->fate = 0; - for (i = 0; i != buf->entries; ++i) { - size_t off = size; - size_t off2; - - flow->layers = original_layers; - size += sizeof(struct ibv_flow_attr) + - sizeof(struct mlx5_flow_verbs); - off2 = size; - if (size < flow_size) { - flow->cur_verbs = (void *)((uintptr_t)flow + off); - flow->cur_verbs->attr = (void *)(flow->cur_verbs + 1); - flow->cur_verbs->specs = - (void *)(flow->cur_verbs->attr + 1); - } - /* First iteration convert the pattern into Verbs. */ - if (i == 0) { - /* Actions don't need to be converted several time. */ - ret = mlx5_flow_actions(dev, actions, flow, - (size < flow_size) ? - flow_size - size : 0, - error); - if (ret < 0) - return ret; - size += ret; - } else { - /* - * Next iteration means the pattern has already been - * converted and an expansion is necessary to match - * the user RSS request. For that only the expanded - * items will be converted, the common part with the - * user pattern are just copied into the next buffer - * zone. - */ - size += original_verbs_size; - if (size < flow_size) { - rte_memcpy(flow->cur_verbs->attr, - original_verbs->attr, - original_verbs_size + - sizeof(struct ibv_flow_attr)); - flow->cur_verbs->size = original_verbs_size; - } - } - ret = mlx5_flow_items - (dev, - (const struct rte_flow_item *) - &buf->entry[i].pattern[expanded_pattern_idx], - flow, - (size < flow_size) ? flow_size - size : 0, error); - if (ret < 0) - return ret; - size += ret; - if (size <= flow_size) { - mlx5_flow_adjust_priority(dev, flow); - LIST_INSERT_HEAD(&flow->verbs, flow->cur_verbs, next); - } - /* - * Keep a pointer of the first verbs conversion and the layers - * it has encountered. - */ - if (i == 0) { - original_verbs = flow->cur_verbs; - original_verbs_size = size - off2; - original_layers = flow->layers; - /* - * move the index of the expanded pattern to the - * first item not addressed yet. - */ - if (pattern->type == RTE_FLOW_ITEM_TYPE_END) { - expanded_pattern_idx++; - } else { - const struct rte_flow_item *item = pattern; - - for (item = pattern; - item->type != RTE_FLOW_ITEM_TYPE_END; - ++item) - expanded_pattern_idx++; - } - } - } - /* Restore the origin layers in the flow. */ - flow->layers = original_layers; - return size; + assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); + fops = flow_get_drv_ops(type); + return fops->prepare(attr, items, actions, error); } /** - * Lookup and set the ptype in the data Rx part. A single Ptype can be used, - * if several tunnel rules are used on this queue, the tunnel ptype will be - * cleared. + * Flow driver translation API. This abstracts calling driver specific + * functions. Parent flow (rte_flow) should have driver type (drv_type). It + * translates a generic flow into a driver flow. flow_drv_prepare() must + * precede. + * + * @note + * dev_flow->layers could be filled as a result of parsing during translation + * if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled + * if necessary. As a flow can have multiple dev_flows by RSS flow expansion, + * flow->actions could be overwritten even though all the expanded dev_flows + * have the same actions. * - * @param rxq_ctrl - * Rx queue to update. + * @param[in] dev + * Pointer to the rte dev structure. + * @param[in, out] dev_flow + * Pointer to the mlx5 flow. + * @param[in] attr + * Pointer to the flow attributes. + * @param[in] items + * Pointer to the list of items. + * @param[in] actions + * Pointer to the list of actions. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ -static void -mlx5_flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) +static inline int +flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) { - unsigned int i; - uint32_t tunnel_ptype = 0; + const struct mlx5_flow_driver_ops *fops; + enum mlx5_flow_drv_type type = dev_flow->flow->drv_type; - /* Look up for the ptype to use. */ - for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) { - if (!rxq_ctrl->flow_tunnels_n[i]) - continue; - if (!tunnel_ptype) { - tunnel_ptype = tunnels_info[i].ptype; - } else { - tunnel_ptype = 0; - break; - } - } - rxq_ctrl->rxq.tunnel = tunnel_ptype; + assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); + fops = flow_get_drv_ops(type); + return fops->translate(dev, dev_flow, attr, items, actions, error); } /** - * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the flow. + * Flow driver apply API. This abstracts calling driver specific functions. + * Parent flow (rte_flow) should have driver type (drv_type). It applies + * translated driver flows on to device. flow_drv_translate() must precede. * * @param[in] dev - * Pointer to Ethernet device. - * @param[in] flow + * Pointer to Ethernet device structure. + * @param[in, out] flow * Pointer to flow structure. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ -static void -mlx5_flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) +static inline int +flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, + struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; - const int mark = !!(flow->modifier & - (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK)); - const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); - unsigned int i; - - for (i = 0; i != flow->rss.queue_num; ++i) { - int idx = (*flow->queue)[i]; - struct mlx5_rxq_ctrl *rxq_ctrl = - container_of((*priv->rxqs)[idx], - struct mlx5_rxq_ctrl, rxq); - - if (mark) { - rxq_ctrl->rxq.mark = 1; - rxq_ctrl->flow_mark_n++; - } - if (tunnel) { - unsigned int j; + const struct mlx5_flow_driver_ops *fops; + enum mlx5_flow_drv_type type = flow->drv_type; - /* Increase the counter matching the flow. */ - for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { - if ((tunnels_info[j].tunnel & flow->layers) == - tunnels_info[j].tunnel) { - rxq_ctrl->flow_tunnels_n[j]++; - break; - } - } - mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl); - } - } + assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); + fops = flow_get_drv_ops(type); + return fops->apply(dev, flow, error); } /** - * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the - * @p flow if no other flow uses it with the same kind of request. + * Flow driver remove API. This abstracts calling driver specific functions. + * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow + * on device. All the resources of the flow should be freed by calling + * flow_drv_destroy(). * - * @param dev + * @param[in] dev * Pointer to Ethernet device. - * @param[in] flow - * Pointer to the flow. + * @param[in, out] flow + * Pointer to flow structure. */ -static void -mlx5_flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) -{ - struct priv *priv = dev->data->dev_private; - const int mark = !!(flow->modifier & - (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK)); - const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); - unsigned int i; - - assert(dev->data->dev_started); - for (i = 0; i != flow->rss.queue_num; ++i) { - int idx = (*flow->queue)[i]; - struct mlx5_rxq_ctrl *rxq_ctrl = - container_of((*priv->rxqs)[idx], - struct mlx5_rxq_ctrl, rxq); - - if (mark) { - rxq_ctrl->flow_mark_n--; - rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n; - } - if (tunnel) { - unsigned int j; - - /* Decrease the counter matching the flow. */ - for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { - if ((tunnels_info[j].tunnel & flow->layers) == - tunnels_info[j].tunnel) { - rxq_ctrl->flow_tunnels_n[j]--; - break; - } - } - mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl); - } - } +static inline void +flow_drv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) +{ + const struct mlx5_flow_driver_ops *fops; + enum mlx5_flow_drv_type type = flow->drv_type; + + assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); + fops = flow_get_drv_ops(type); + fops->remove(dev, flow); } /** - * Clear the Mark/Flag and Tunnel ptype information in all Rx queues. + * Flow driver destroy API. This abstracts calling driver specific functions. + * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow + * on device and releases resources of the flow. * - * @param dev + * @param[in] dev * Pointer to Ethernet device. + * @param[in, out] flow + * Pointer to flow structure. */ -static void -mlx5_flow_rxq_flags_clear(struct rte_eth_dev *dev) +static inline void +flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) { - struct priv *priv = dev->data->dev_private; - unsigned int i; - unsigned int idx; - - for (idx = 0, i = 0; idx != priv->rxqs_n; ++i) { - struct mlx5_rxq_ctrl *rxq_ctrl; - unsigned int j; + const struct mlx5_flow_driver_ops *fops; + enum mlx5_flow_drv_type type = flow->drv_type; - if (!(*priv->rxqs)[idx]) - continue; - rxq_ctrl = container_of((*priv->rxqs)[idx], - struct mlx5_rxq_ctrl, rxq); - rxq_ctrl->flow_mark_n = 0; - rxq_ctrl->rxq.mark = 0; - for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) - rxq_ctrl->flow_tunnels_n[j] = 0; - rxq_ctrl->rxq.tunnel = 0; - ++idx; - } + assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); + fops = flow_get_drv_ops(type); + fops->destroy(dev, flow); } /** @@ -2248,120 +2423,55 @@ mlx5_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error) { - int ret = mlx5_flow_merge(dev, NULL, 0, attr, items, actions, error); + int ret; + ret = flow_drv_validate(dev, attr, items, actions, true, error); if (ret < 0) return ret; return 0; } /** - * Remove the flow. + * Get RSS action from the action list. * - * @param[in] dev - * Pointer to Ethernet device. - * @param[in, out] flow - * Pointer to flow structure. + * @param[in] actions + * Pointer to the list of actions. + * + * @return + * Pointer to the RSS action if exist, else return NULL. */ -static void -mlx5_flow_remove(struct rte_eth_dev *dev, struct rte_flow *flow) +static const struct rte_flow_action_rss* +flow_get_rss_action(const struct rte_flow_action actions[]) { - struct mlx5_flow_verbs *verbs; - - LIST_FOREACH(verbs, &flow->verbs, next) { - if (verbs->flow) { - claim_zero(mlx5_glue->destroy_flow(verbs->flow)); - verbs->flow = NULL; - } - if (verbs->hrxq) { - if (flow->fate & MLX5_FLOW_FATE_DROP) - mlx5_hrxq_drop_release(dev); - else - mlx5_hrxq_release(dev, verbs->hrxq); - verbs->hrxq = NULL; + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_RSS: + return (const struct rte_flow_action_rss *) + actions->conf; + default: + break; } } + return NULL; } -/** - * Apply the flow. - * - * @param[in] dev - * Pointer to Ethernet device structure. - * @param[in, out] flow - * Pointer to flow structure. - * @param[out] error - * Pointer to error structure. - * - * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. - */ -static int -mlx5_flow_apply(struct rte_eth_dev *dev, struct rte_flow *flow, - struct rte_flow_error *error) +static unsigned int +find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level) { - struct mlx5_flow_verbs *verbs; - int err; - - LIST_FOREACH(verbs, &flow->verbs, next) { - if (flow->fate & MLX5_FLOW_FATE_DROP) { - verbs->hrxq = mlx5_hrxq_drop_new(dev); - if (!verbs->hrxq) { - rte_flow_error_set - (error, errno, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "cannot get drop hash queue"); - goto error; - } - } else { - struct mlx5_hrxq *hrxq; - - hrxq = mlx5_hrxq_get(dev, flow->key, - MLX5_RSS_HASH_KEY_LEN, - verbs->hash_fields, - (*flow->queue), - flow->rss.queue_num); - if (!hrxq) - hrxq = mlx5_hrxq_new(dev, flow->key, - MLX5_RSS_HASH_KEY_LEN, - verbs->hash_fields, - (*flow->queue), - flow->rss.queue_num); - if (!hrxq) { - rte_flow_error_set - (error, rte_errno, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "cannot get hash queue"); - goto error; - } - verbs->hrxq = hrxq; - } - verbs->flow = - mlx5_glue->create_flow(verbs->hrxq->qp, verbs->attr); - if (!verbs->flow) { - rte_flow_error_set(error, errno, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "hardware refuses to create flow"); - goto error; - } - } - return 0; -error: - err = rte_errno; /* Save rte_errno before cleanup. */ - LIST_FOREACH(verbs, &flow->verbs, next) { - if (verbs->hrxq) { - if (flow->fate & MLX5_FLOW_FATE_DROP) - mlx5_hrxq_drop_release(dev); - else - mlx5_hrxq_release(dev, verbs->hrxq); - verbs->hrxq = NULL; + const struct rte_flow_item *item; + unsigned int has_vlan = 0; + + for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { + has_vlan = 1; + break; } } - rte_errno = err; /* Restore rte_errno. */ - return -rte_errno; + if (has_vlan) + return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN : + MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN; + return rss_level < 2 ? MLX5_EXPANSION_ROOT : + MLX5_EXPANSION_ROOT_OUTER; } /** @@ -2377,6 +2487,8 @@ error: * Pattern specification (list terminated by the END pattern item). * @param[in] actions * Associated actions (list terminated by the END action). + * @param[in] external + * This flow rule is created by request external to PMD. * @param[out] error * Perform verbose error reporting if not NULL. * @@ -2384,50 +2496,138 @@ error: * A flow on success, NULL otherwise and rte_errno is set. */ static struct rte_flow * -mlx5_flow_list_create(struct rte_eth_dev *dev, - struct mlx5_flows *list, - const struct rte_flow_attr *attr, - const struct rte_flow_item items[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) +flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + bool external, struct rte_flow_error *error) { struct rte_flow *flow = NULL; - size_t size = 0; + struct mlx5_flow *dev_flow; + const struct rte_flow_action_rss *rss; + union { + struct rte_flow_expand_rss buf; + uint8_t buffer[2048]; + } expand_buffer; + struct rte_flow_expand_rss *buf = &expand_buffer.buf; int ret; + uint32_t i; + uint32_t flow_size; - ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error); + ret = flow_drv_validate(dev, attr, items, actions, external, error); if (ret < 0) return NULL; - size = ret; - flow = rte_calloc(__func__, 1, size, 0); + flow_size = sizeof(struct rte_flow); + rss = flow_get_rss_action(actions); + if (rss) + flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t), + sizeof(void *)); + else + flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *)); + flow = rte_calloc(__func__, 1, flow_size, 0); if (!flow) { - rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "not enough memory to create flow"); + rte_errno = ENOMEM; return NULL; } - ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error); - if (ret < 0) { - rte_free(flow); - return NULL; + flow->drv_type = flow_get_drv_type(dev, attr); + flow->ingress = attr->ingress; + flow->transfer = attr->transfer; + assert(flow->drv_type > MLX5_FLOW_TYPE_MIN && + flow->drv_type < MLX5_FLOW_TYPE_MAX); + flow->queue = (void *)(flow + 1); + LIST_INIT(&flow->dev_flows); + if (rss && rss->types) { + unsigned int graph_root; + + graph_root = find_graph_root(items, rss->level); + ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer), + items, rss->types, + mlx5_support_expansion, + graph_root); + assert(ret > 0 && + (unsigned int)ret < sizeof(expand_buffer.buffer)); + } else { + buf->entries = 1; + buf->entry[0].pattern = (void *)(uintptr_t)items; + } + for (i = 0; i < buf->entries; ++i) { + dev_flow = flow_drv_prepare(flow, attr, buf->entry[i].pattern, + actions, error); + if (!dev_flow) + goto error; + dev_flow->flow = flow; + dev_flow->external = external; + LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); + ret = flow_drv_translate(dev, dev_flow, attr, + buf->entry[i].pattern, + actions, error); + if (ret < 0) + goto error; } - assert((size_t)ret == size); if (dev->data->dev_started) { - ret = mlx5_flow_apply(dev, flow, error); - if (ret < 0) { - ret = rte_errno; /* Save rte_errno before cleanup. */ - if (flow) { - mlx5_flow_remove(dev, flow); - rte_free(flow); - } - rte_errno = ret; /* Restore rte_errno. */ - return NULL; - } + ret = flow_drv_apply(dev, flow, error); + if (ret < 0) + goto error; } TAILQ_INSERT_TAIL(list, flow, next); - mlx5_flow_rxq_flags_set(dev, flow); + flow_rxq_flags_set(dev, flow); return flow; +error: + ret = rte_errno; /* Save rte_errno before cleanup. */ + assert(flow); + flow_drv_destroy(dev, flow); + rte_free(flow); + rte_errno = ret; /* Restore rte_errno. */ + return NULL; +} + +/** + * Create a dedicated flow rule on e-switch table 0 (root table), to direct all + * incoming packets to table 1. + * + * Other flow rules, requested for group n, will be created in + * e-switch table n+1. + * Jump action to e-switch group n will be created to group n+1. + * + * Used when working in switchdev mode, to utilise advantages of table 1 + * and above. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * Pointer to flow on success, NULL otherwise and rte_errno is set. + */ +struct rte_flow * +mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev) +{ + const struct rte_flow_attr attr = { + .group = 0, + .priority = 0, + .ingress = 1, + .egress = 0, + .transfer = 1, + }; + const struct rte_flow_item pattern = { + .type = RTE_FLOW_ITEM_TYPE_END, + }; + struct rte_flow_action_jump jump = { + .group = 1, + }; + const struct rte_flow_action actions[] = { + { + .type = RTE_FLOW_ACTION_TYPE_JUMP, + .conf = &jump, + }, + { + .type = RTE_FLOW_ACTION_TYPE_END, + }, + }; + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_flow_error error; + + return flow_list_create(dev, &priv->ctrl_flows, &attr, &pattern, + actions, false, &error); } /** @@ -2443,9 +2643,10 @@ mlx5_flow_create(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error) { - return mlx5_flow_list_create - (dev, &((struct priv *)dev->data->dev_private)->flows, - attr, items, actions, error); + struct mlx5_priv *priv = dev->data->dev_private; + + return flow_list_create(dev, &priv->flows, + attr, items, actions, true, error); } /** @@ -2459,17 +2660,18 @@ mlx5_flow_create(struct rte_eth_dev *dev, * Flow to destroy. */ static void -mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, - struct rte_flow *flow) +flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, + struct rte_flow *flow) { - mlx5_flow_remove(dev, flow); - TAILQ_REMOVE(list, flow, next); /* * Update RX queue flags only if port is started, otherwise it is * already clean. */ if (dev->data->dev_started) - mlx5_flow_rxq_flags_trim(dev, flow); + flow_rxq_flags_trim(dev, flow); + flow_drv_destroy(dev, flow); + TAILQ_REMOVE(list, flow, next); + rte_free(flow->fdir); rte_free(flow); } @@ -2488,7 +2690,7 @@ mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list) struct rte_flow *flow; flow = TAILQ_FIRST(list); - mlx5_flow_list_destroy(dev, list, flow); + flow_list_destroy(dev, list, flow); } } @@ -2506,8 +2708,8 @@ mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list) struct rte_flow *flow; TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) - mlx5_flow_remove(dev, flow); - mlx5_flow_rxq_flags_clear(dev); + flow_drv_remove(dev, flow); + flow_rxq_flags_clear(dev); } /** @@ -2529,10 +2731,10 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list) int ret = 0; TAILQ_FOREACH(flow, list, next) { - ret = mlx5_flow_apply(dev, flow, &error); + ret = flow_drv_apply(dev, flow, &error); if (ret < 0) goto error; - mlx5_flow_rxq_flags_set(dev, flow); + flow_rxq_flags_set(dev, flow); } return 0; error: @@ -2553,7 +2755,7 @@ error: int mlx5_flow_verify(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow *flow; int ret = 0; @@ -2589,7 +2791,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, struct rte_flow_item_vlan *vlan_spec, struct rte_flow_item_vlan *vlan_mask) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_attr attr = { .ingress = 1, .priority = MLX5_FLOW_PRIO_RSVD, @@ -2603,7 +2805,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, }, { .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN : - RTE_FLOW_ITEM_TYPE_END, + RTE_FLOW_ITEM_TYPE_END, .spec = vlan_spec, .last = NULL, .mask = vlan_mask, @@ -2635,14 +2837,13 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, struct rte_flow_error error; unsigned int i; - if (!priv->reta_idx_n) { - rte_errno = EINVAL; - return -rte_errno; + if (!priv->reta_idx_n || !priv->rxqs_n) { + return 0; } for (i = 0; i != priv->reta_idx_n; ++i) queue[i] = (*priv->reta_idx)[i]; - flow = mlx5_flow_list_create(dev, &priv->ctrl_flows, &attr, items, - actions, &error); + flow = flow_list_create(dev, &priv->ctrl_flows, + &attr, items, actions, false, &error); if (!flow) return -rte_errno; return 0; @@ -2680,9 +2881,9 @@ mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error __rte_unused) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; - mlx5_flow_list_destroy(dev, &priv->flows, flow); + flow_list_destroy(dev, &priv->flows, flow); return 0; } @@ -2696,7 +2897,7 @@ int mlx5_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error __rte_unused) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; mlx5_flow_list_flush(dev, &priv->flows); return 0; @@ -2713,7 +2914,7 @@ mlx5_flow_isolate(struct rte_eth_dev *dev, int enable, struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; if (dev->data->dev_started) { rte_flow_error_set(error, EBUSY, @@ -2730,6 +2931,49 @@ mlx5_flow_isolate(struct rte_eth_dev *dev, return 0; } +/** + * Query a flow. + * + * @see rte_flow_query() + * @see rte_flow_ops + */ +static int +flow_drv_query(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_action *actions, + void *data, + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + enum mlx5_flow_drv_type ftype = flow->drv_type; + + assert(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX); + fops = flow_get_drv_ops(ftype); + + return fops->query(dev, flow, actions, data, error); +} + +/** + * Query a flow. + * + * @see rte_flow_query() + * @see rte_flow_ops + */ +int +mlx5_flow_query(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_action *actions, + void *data, + struct rte_flow_error *error) +{ + int ret; + + ret = flow_drv_query(dev, flow, actions, data, error); + if (ret < 0) + return ret; + return 0; +} + /** * Convert a flow director filter to a generic flow. * @@ -2744,11 +2988,11 @@ mlx5_flow_isolate(struct rte_eth_dev *dev, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_fdir_filter_convert(struct rte_eth_dev *dev, +flow_fdir_filter_convert(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *fdir_filter, struct mlx5_fdir *attributes) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; const struct rte_eth_fdir_input *input = &fdir_filter->input; const struct rte_eth_fdir_masks *mask = &dev->data->dev_conf.fdir_conf.mask; @@ -2791,14 +3035,13 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev, case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: - attributes->l3.ipv4.hdr = (struct ipv4_hdr){ + attributes->l3.ipv4.hdr = (struct rte_ipv4_hdr){ .src_addr = input->flow.ip4_flow.src_ip, .dst_addr = input->flow.ip4_flow.dst_ip, .time_to_live = input->flow.ip4_flow.ttl, .type_of_service = input->flow.ip4_flow.tos, - .next_proto_id = input->flow.ip4_flow.proto, }; - attributes->l3_mask.ipv4.hdr = (struct ipv4_hdr){ + attributes->l3_mask.ipv4.hdr = (struct rte_ipv4_hdr){ .src_addr = mask->ipv4_mask.src_ip, .dst_addr = mask->ipv4_mask.dst_ip, .time_to_live = mask->ipv4_mask.ttl, @@ -2814,7 +3057,7 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev, case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: - attributes->l3.ipv6.hdr = (struct ipv6_hdr){ + attributes->l3.ipv6.hdr = (struct rte_ipv6_hdr){ .hop_limits = input->flow.ipv6_flow.hop_limits, .proto = input->flow.ipv6_flow.proto, }; @@ -2846,11 +3089,11 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev, /* Handle L4. */ switch (fdir_filter->input.flow_type) { case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: - attributes->l4.udp.hdr = (struct udp_hdr){ + attributes->l4.udp.hdr = (struct rte_udp_hdr){ .src_port = input->flow.udp4_flow.src_port, .dst_port = input->flow.udp4_flow.dst_port, }; - attributes->l4_mask.udp.hdr = (struct udp_hdr){ + attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){ .src_port = mask->src_port_mask, .dst_port = mask->dst_port_mask, }; @@ -2861,11 +3104,11 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev, }; break; case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: - attributes->l4.tcp.hdr = (struct tcp_hdr){ + attributes->l4.tcp.hdr = (struct rte_tcp_hdr){ .src_port = input->flow.tcp4_flow.src_port, .dst_port = input->flow.tcp4_flow.dst_port, }; - attributes->l4_mask.tcp.hdr = (struct tcp_hdr){ + attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){ .src_port = mask->src_port_mask, .dst_port = mask->dst_port_mask, }; @@ -2876,11 +3119,11 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev, }; break; case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: - attributes->l4.udp.hdr = (struct udp_hdr){ + attributes->l4.udp.hdr = (struct rte_udp_hdr){ .src_port = input->flow.udp6_flow.src_port, .dst_port = input->flow.udp6_flow.dst_port, }; - attributes->l4_mask.udp.hdr = (struct udp_hdr){ + attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){ .src_port = mask->src_port_mask, .dst_port = mask->dst_port_mask, }; @@ -2891,11 +3134,11 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev, }; break; case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: - attributes->l4.tcp.hdr = (struct tcp_hdr){ + attributes->l4.tcp.hdr = (struct rte_tcp_hdr){ .src_port = input->flow.tcp6_flow.src_port, .dst_port = input->flow.tcp6_flow.dst_port, }; - attributes->l4_mask.tcp.hdr = (struct tcp_hdr){ + attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){ .src_port = mask->src_port_mask, .dst_port = mask->dst_port_mask, }; @@ -2917,6 +3160,69 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev, return 0; } +#define FLOW_FDIR_CMP(f1, f2, fld) \ + memcmp(&(f1)->fld, &(f2)->fld, sizeof(f1->fld)) + +/** + * Compare two FDIR flows. If items and actions are identical, the two flows are + * regarded as same. + * + * @param dev + * Pointer to Ethernet device. + * @param f1 + * FDIR flow to compare. + * @param f2 + * FDIR flow to compare. + * + * @return + * Zero on match, 1 otherwise. + */ +static int +flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2) +{ + if (FLOW_FDIR_CMP(f1, f2, attr) || + FLOW_FDIR_CMP(f1, f2, l2) || + FLOW_FDIR_CMP(f1, f2, l2_mask) || + FLOW_FDIR_CMP(f1, f2, l3) || + FLOW_FDIR_CMP(f1, f2, l3_mask) || + FLOW_FDIR_CMP(f1, f2, l4) || + FLOW_FDIR_CMP(f1, f2, l4_mask) || + FLOW_FDIR_CMP(f1, f2, actions[0].type)) + return 1; + if (f1->actions[0].type == RTE_FLOW_ACTION_TYPE_QUEUE && + FLOW_FDIR_CMP(f1, f2, queue)) + return 1; + return 0; +} + +/** + * Search device flow list to find out a matched FDIR flow. + * + * @param dev + * Pointer to Ethernet device. + * @param fdir_flow + * FDIR flow to lookup. + * + * @return + * Pointer of flow if found, NULL otherwise. + */ +static struct rte_flow * +flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_flow *flow = NULL; + + assert(fdir_flow); + TAILQ_FOREACH(flow, &priv->flows, next) { + if (flow->fdir && !flow_fdir_cmp(flow->fdir, fdir_flow)) { + DRV_LOG(DEBUG, "port %u found FDIR flow %p", + dev->data->port_id, (void *)flow); + break; + } + } + return flow; +} + /** * Add new flow director filter and store it in list. * @@ -2929,33 +3235,39 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_fdir_filter_add(struct rte_eth_dev *dev, +flow_fdir_filter_add(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *fdir_filter) { - struct priv *priv = dev->data->dev_private; - struct mlx5_fdir attributes = { - .attr.group = 0, - .l2_mask = { - .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00", - .src.addr_bytes = "\x00\x00\x00\x00\x00\x00", - .type = 0, - }, - }; - struct rte_flow_error error; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_fdir *fdir_flow; struct rte_flow *flow; int ret; - ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes); + fdir_flow = rte_zmalloc(__func__, sizeof(*fdir_flow), 0); + if (!fdir_flow) { + rte_errno = ENOMEM; + return -rte_errno; + } + ret = flow_fdir_filter_convert(dev, fdir_filter, fdir_flow); if (ret) - return ret; - flow = mlx5_flow_list_create(dev, &priv->flows, &attributes.attr, - attributes.items, attributes.actions, - &error); + goto error; + flow = flow_fdir_filter_lookup(dev, fdir_flow); if (flow) { - DRV_LOG(DEBUG, "port %u FDIR created %p", dev->data->port_id, - (void *)flow); - return 0; + rte_errno = EEXIST; + goto error; } + flow = flow_list_create(dev, &priv->flows, &fdir_flow->attr, + fdir_flow->items, fdir_flow->actions, true, + NULL); + if (!flow) + goto error; + assert(!flow->fdir); + flow->fdir = fdir_flow; + DRV_LOG(DEBUG, "port %u created FDIR flow %p", + dev->data->port_id, (void *)flow); + return 0; +error: + rte_free(fdir_flow); return -rte_errno; } @@ -2971,12 +3283,28 @@ mlx5_fdir_filter_add(struct rte_eth_dev *dev, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_fdir_filter_delete(struct rte_eth_dev *dev __rte_unused, - const struct rte_eth_fdir_filter *fdir_filter - __rte_unused) +flow_fdir_filter_delete(struct rte_eth_dev *dev, + const struct rte_eth_fdir_filter *fdir_filter) { - rte_errno = ENOTSUP; - return -rte_errno; + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_flow *flow; + struct mlx5_fdir fdir_flow = { + .attr.group = 0, + }; + int ret; + + ret = flow_fdir_filter_convert(dev, fdir_filter, &fdir_flow); + if (ret) + return -rte_errno; + flow = flow_fdir_filter_lookup(dev, &fdir_flow); + if (!flow) { + rte_errno = ENOENT; + return -rte_errno; + } + flow_list_destroy(dev, &priv->flows, flow); + DRV_LOG(DEBUG, "port %u deleted FDIR flow %p", + dev->data->port_id, (void *)flow); + return 0; } /** @@ -2991,15 +3319,15 @@ mlx5_fdir_filter_delete(struct rte_eth_dev *dev __rte_unused, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_fdir_filter_update(struct rte_eth_dev *dev, +flow_fdir_filter_update(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *fdir_filter) { int ret; - ret = mlx5_fdir_filter_delete(dev, fdir_filter); + ret = flow_fdir_filter_delete(dev, fdir_filter); if (ret) return ret; - return mlx5_fdir_filter_add(dev, fdir_filter); + return flow_fdir_filter_add(dev, fdir_filter); } /** @@ -3009,9 +3337,9 @@ mlx5_fdir_filter_update(struct rte_eth_dev *dev, * Pointer to Ethernet device. */ static void -mlx5_fdir_filter_flush(struct rte_eth_dev *dev) +flow_fdir_filter_flush(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; mlx5_flow_list_flush(dev, &priv->flows); } @@ -3025,7 +3353,7 @@ mlx5_fdir_filter_flush(struct rte_eth_dev *dev) * Resulting flow director information. */ static void -mlx5_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info) +flow_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info) { struct rte_eth_fdir_masks *mask = &dev->data->dev_conf.fdir_conf.mask; @@ -3055,7 +3383,7 @@ mlx5_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info) * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, +flow_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, void *arg) { enum rte_fdir_mode fdir_mode = @@ -3072,16 +3400,16 @@ mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, } switch (filter_op) { case RTE_ETH_FILTER_ADD: - return mlx5_fdir_filter_add(dev, arg); + return flow_fdir_filter_add(dev, arg); case RTE_ETH_FILTER_UPDATE: - return mlx5_fdir_filter_update(dev, arg); + return flow_fdir_filter_update(dev, arg); case RTE_ETH_FILTER_DELETE: - return mlx5_fdir_filter_delete(dev, arg); + return flow_fdir_filter_delete(dev, arg); case RTE_ETH_FILTER_FLUSH: - mlx5_fdir_filter_flush(dev); + flow_fdir_filter_flush(dev); break; case RTE_ETH_FILTER_INFO: - mlx5_fdir_info_get(dev, arg); + flow_fdir_info_get(dev, arg); break; default: DRV_LOG(DEBUG, "port %u unknown operation %u", @@ -3122,7 +3450,7 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, *(const void **)arg = &mlx5_flow_ops; return 0; case RTE_ETH_FILTER_FDIR: - return mlx5_fdir_ctrl_func(dev, filter_op, arg); + return flow_fdir_ctrl_func(dev, filter_op, arg); default: DRV_LOG(ERR, "port %u filter type (%d) not supported", dev->data->port_id, filter_type); @@ -3131,3 +3459,186 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, } return 0; } + +#define MLX5_POOL_QUERY_FREQ_US 1000000 + +/** + * Set the periodic procedure for triggering asynchronous batch queries for all + * the counter pools. + * + * @param[in] sh + * Pointer to mlx5_ibv_shared object. + */ +void +mlx5_set_query_alarm(struct mlx5_ibv_shared *sh) +{ + struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(sh, 0, 0); + uint32_t pools_n = rte_atomic16_read(&cont->n_valid); + uint32_t us; + + cont = MLX5_CNT_CONTAINER(sh, 1, 0); + pools_n += rte_atomic16_read(&cont->n_valid); + us = MLX5_POOL_QUERY_FREQ_US / pools_n; + DRV_LOG(DEBUG, "Set alarm for %u pools each %u us\n", pools_n, us); + if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) { + sh->cmng.query_thread_on = 0; + DRV_LOG(ERR, "Cannot reinitialize query alarm\n"); + } else { + sh->cmng.query_thread_on = 1; + } +} + +/** + * The periodic procedure for triggering asynchronous batch queries for all the + * counter pools. This function is probably called by the host thread. + * + * @param[in] arg + * The parameter for the alarm process. + */ +void +mlx5_flow_query_alarm(void *arg) +{ + struct mlx5_ibv_shared *sh = arg; + struct mlx5_devx_obj *dcs; + uint16_t offset; + int ret; + uint8_t batch = sh->cmng.batch; + uint16_t pool_index = sh->cmng.pool_index; + struct mlx5_pools_container *cont; + struct mlx5_pools_container *mcont; + struct mlx5_flow_counter_pool *pool; + + if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES) + goto set_alarm; +next_container: + cont = MLX5_CNT_CONTAINER(sh, batch, 1); + mcont = MLX5_CNT_CONTAINER(sh, batch, 0); + /* Check if resize was done and need to flip a container. */ + if (cont != mcont) { + if (cont->pools) { + /* Clean the old container. */ + rte_free(cont->pools); + memset(cont, 0, sizeof(*cont)); + } + rte_cio_wmb(); + /* Flip the host container. */ + sh->cmng.mhi[batch] ^= (uint8_t)2; + cont = mcont; + } + if (!cont->pools) { + /* 2 empty containers case is unexpected. */ + if (unlikely(batch != sh->cmng.batch)) + goto set_alarm; + batch ^= 0x1; + pool_index = 0; + goto next_container; + } + pool = cont->pools[pool_index]; + if (pool->raw_hw) + /* There is a pool query in progress. */ + goto set_alarm; + pool->raw_hw = + LIST_FIRST(&sh->cmng.free_stat_raws); + if (!pool->raw_hw) + /* No free counter statistics raw memory. */ + goto set_alarm; + dcs = (struct mlx5_devx_obj *)(uintptr_t)rte_atomic64_read + (&pool->a64_dcs); + offset = batch ? 0 : dcs->id % MLX5_COUNTERS_PER_POOL; + ret = mlx5_devx_cmd_flow_counter_query(dcs, 0, MLX5_COUNTERS_PER_POOL - + offset, NULL, NULL, + pool->raw_hw->mem_mng->dm->id, + (void *)(uintptr_t) + (pool->raw_hw->data + offset), + sh->devx_comp, + (uint64_t)(uintptr_t)pool); + if (ret) { + DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID" + " %d\n", pool->min_dcs->id); + pool->raw_hw = NULL; + goto set_alarm; + } + pool->raw_hw->min_dcs_id = dcs->id; + LIST_REMOVE(pool->raw_hw, next); + sh->cmng.pending_queries++; + pool_index++; + if (pool_index >= rte_atomic16_read(&cont->n_valid)) { + batch ^= 0x1; + pool_index = 0; + } +set_alarm: + sh->cmng.batch = batch; + sh->cmng.pool_index = pool_index; + mlx5_set_query_alarm(sh); +} + +/** + * Handler for the HW respond about ready values from an asynchronous batch + * query. This function is probably called by the host thread. + * + * @param[in] sh + * The pointer to the shared IB device context. + * @param[in] async_id + * The Devx async ID. + * @param[in] status + * The status of the completion. + */ +void +mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh, + uint64_t async_id, int status) +{ + struct mlx5_flow_counter_pool *pool = + (struct mlx5_flow_counter_pool *)(uintptr_t)async_id; + struct mlx5_counter_stats_raw *raw_to_free; + + if (unlikely(status)) { + raw_to_free = pool->raw_hw; + } else { + raw_to_free = pool->raw; + rte_spinlock_lock(&pool->sl); + pool->raw = pool->raw_hw; + rte_spinlock_unlock(&pool->sl); + rte_atomic64_add(&pool->query_gen, 1); + /* Be sure the new raw counters data is updated in memory. */ + rte_cio_wmb(); + } + LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next); + pool->raw_hw = NULL; + sh->cmng.pending_queries--; +} + +/** + * Translate the rte_flow group index to HW table value. + * + * @param[in] attributes + * Pointer to flow attributes + * @param[in] external + * Value is part of flow rule created by request external to PMD. + * @param[in] group + * rte_flow group index value. + * @param[out] table + * HW table value. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_group_to_table(const struct rte_flow_attr *attributes, bool external, + uint32_t group, uint32_t *table, + struct rte_flow_error *error) +{ + if (attributes->transfer && external) { + if (group == UINT32_MAX) + return rte_flow_error_set + (error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + NULL, + "group index not supported"); + *table = group + 1; + } else { + *table = group; + } + return 0; +}