* Copyright 2016 Mellanox Technologies, Ltd
*/
+#include <netinet/in.h>
#include <sys/queue.h>
+#include <stdalign.h>
#include <stdint.h>
#include <string.h>
#include "mlx5_defs.h"
#include "mlx5_prm.h"
#include "mlx5_glue.h"
+#include "mlx5_flow.h"
/* Dev ops structure defined in mlx5.c */
extern const struct eth_dev_ops mlx5_dev_ops;
extern const struct eth_dev_ops mlx5_dev_ops_isolate;
-/* Pattern outer Layer bits. */
-#define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0)
-#define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1)
-#define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2)
-#define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3)
-#define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4)
-#define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5)
-
-/* Pattern inner Layer bits. */
-#define MLX5_FLOW_LAYER_INNER_L2 (1u << 6)
-#define MLX5_FLOW_LAYER_INNER_L3_IPV4 (1u << 7)
-#define MLX5_FLOW_LAYER_INNER_L3_IPV6 (1u << 8)
-#define MLX5_FLOW_LAYER_INNER_L4_UDP (1u << 9)
-#define MLX5_FLOW_LAYER_INNER_L4_TCP (1u << 10)
-#define MLX5_FLOW_LAYER_INNER_VLAN (1u << 11)
-
-/* Pattern tunnel Layer bits. */
-#define MLX5_FLOW_LAYER_VXLAN (1u << 12)
-#define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13)
-#define MLX5_FLOW_LAYER_GRE (1u << 14)
-#define MLX5_FLOW_LAYER_MPLS (1u << 15)
-
-/* Outer Masks. */
-#define MLX5_FLOW_LAYER_OUTER_L3 \
- (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
-#define MLX5_FLOW_LAYER_OUTER_L4 \
- (MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP)
-#define MLX5_FLOW_LAYER_OUTER \
- (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \
- MLX5_FLOW_LAYER_OUTER_L4)
-
-/* Tunnel Masks. */
-#define MLX5_FLOW_LAYER_TUNNEL \
- (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
- MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_MPLS)
-
-/* Inner Masks. */
-#define MLX5_FLOW_LAYER_INNER_L3 \
- (MLX5_FLOW_LAYER_INNER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV6)
-#define MLX5_FLOW_LAYER_INNER_L4 \
- (MLX5_FLOW_LAYER_INNER_L4_UDP | MLX5_FLOW_LAYER_INNER_L4_TCP)
-#define MLX5_FLOW_LAYER_INNER \
- (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3 | \
- MLX5_FLOW_LAYER_INNER_L4)
-
-/* Actions that modify the fate of matching traffic. */
-#define MLX5_FLOW_FATE_DROP (1u << 0)
-#define MLX5_FLOW_FATE_QUEUE (1u << 1)
-#define MLX5_FLOW_FATE_RSS (1u << 2)
-
-/* Modify a packet. */
-#define MLX5_FLOW_MOD_FLAG (1u << 0)
-#define MLX5_FLOW_MOD_MARK (1u << 1)
-#define MLX5_FLOW_MOD_COUNT (1u << 2)
-
-/* possible L3 layers protocols filtering. */
-#define MLX5_IP_PROTOCOL_TCP 6
-#define MLX5_IP_PROTOCOL_UDP 17
-#define MLX5_IP_PROTOCOL_GRE 47
-#define MLX5_IP_PROTOCOL_MPLS 147
-
-/* Priority reserved for default flows. */
-#define MLX5_FLOW_PRIO_RSVD ((uint32_t)-1)
+/** Device flow drivers. */
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+extern const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops;
+#endif
+extern const struct mlx5_flow_driver_ops mlx5_flow_tcf_drv_ops;
+extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops;
+
+const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops;
+
+const struct mlx5_flow_driver_ops *flow_drv_ops[] = {
+ [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops,
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops,
+#endif
+ [MLX5_FLOW_TYPE_TCF] = &mlx5_flow_tcf_drv_ops,
+ [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops,
+ [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops
+};
enum mlx5_expansion {
MLX5_EXPANSION_ROOT,
MLX5_EXPANSION_ROOT_OUTER,
+ MLX5_EXPANSION_ROOT_ETH_VLAN,
+ MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN,
MLX5_EXPANSION_OUTER_ETH,
+ MLX5_EXPANSION_OUTER_ETH_VLAN,
+ MLX5_EXPANSION_OUTER_VLAN,
MLX5_EXPANSION_OUTER_IPV4,
MLX5_EXPANSION_OUTER_IPV4_UDP,
MLX5_EXPANSION_OUTER_IPV4_TCP,
MLX5_EXPANSION_GRE,
MLX5_EXPANSION_MPLS,
MLX5_EXPANSION_ETH,
+ MLX5_EXPANSION_ETH_VLAN,
+ MLX5_EXPANSION_VLAN,
MLX5_EXPANSION_IPV4,
MLX5_EXPANSION_IPV4_UDP,
MLX5_EXPANSION_IPV4_TCP,
MLX5_EXPANSION_OUTER_IPV6),
.type = RTE_FLOW_ITEM_TYPE_END,
},
+ [MLX5_EXPANSION_ROOT_ETH_VLAN] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN),
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH_VLAN),
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
[MLX5_EXPANSION_OUTER_ETH] = {
.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
MLX5_EXPANSION_OUTER_IPV6,
.type = RTE_FLOW_ITEM_TYPE_ETH,
.rss_types = 0,
},
+ [MLX5_EXPANSION_OUTER_ETH_VLAN] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN),
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .rss_types = 0,
+ },
+ [MLX5_EXPANSION_OUTER_VLAN] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
+ MLX5_EXPANSION_OUTER_IPV6),
+ .type = RTE_FLOW_ITEM_TYPE_VLAN,
+ },
[MLX5_EXPANSION_OUTER_IPV4] = {
.next = RTE_FLOW_EXPAND_RSS_NEXT
(MLX5_EXPANSION_OUTER_IPV4_UDP,
MLX5_EXPANSION_IPV6),
.type = RTE_FLOW_ITEM_TYPE_ETH,
},
+ [MLX5_EXPANSION_ETH_VLAN] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN),
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ },
+ [MLX5_EXPANSION_VLAN] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
+ MLX5_EXPANSION_IPV6),
+ .type = RTE_FLOW_ITEM_TYPE_VLAN,
+ },
[MLX5_EXPANSION_IPV4] = {
.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
MLX5_EXPANSION_IPV4_TCP),
},
};
-/** Handles information leading to a drop fate. */
-struct mlx5_flow_verbs {
- LIST_ENTRY(mlx5_flow_verbs) next;
- unsigned int size; /**< Size of the attribute. */
- struct {
- struct ibv_flow_attr *attr;
- /**< Pointer to the Specification buffer. */
- uint8_t *specs; /**< Pointer to the specifications. */
- };
- struct ibv_flow *flow; /**< Verbs flow pointer. */
- struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */
- uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */
-};
-
-/* Counters information. */
-struct mlx5_flow_counter {
- LIST_ENTRY(mlx5_flow_counter) next; /**< Pointer to the next counter. */
- uint32_t shared:1; /**< Share counter ID with other flow rules. */
- uint32_t ref_cnt:31; /**< Reference counter. */
- uint32_t id; /**< Counter ID. */
- struct ibv_counter_set *cs; /**< Holds the counters for the rule. */
- uint64_t hits; /**< Number of packets matched by the rule. */
- uint64_t bytes; /**< Number of bytes matched by the rule. */
-};
-
-/* Flow structure. */
-struct rte_flow {
- TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
- struct rte_flow_attr attributes; /**< User flow attribute. */
- uint32_t l3_protocol_en:1; /**< Protocol filtering requested. */
- uint32_t layers;
- /**< Bit-fields of present layers see MLX5_FLOW_LAYER_*. */
- uint32_t modifier;
- /**< Bit-fields of present modifier see MLX5_FLOW_MOD_*. */
- uint32_t fate;
- /**< Bit-fields of present fate see MLX5_FLOW_FATE_*. */
- uint8_t l3_protocol; /**< valid when l3_protocol_en is set. */
- LIST_HEAD(verbs, mlx5_flow_verbs) verbs; /**< Verbs flows list. */
- struct mlx5_flow_verbs *cur_verbs;
- /**< Current Verbs flow structure being filled. */
- struct mlx5_flow_counter *counter; /**< Holds Verbs flow counter. */
- struct rte_flow_action_rss rss;/**< RSS context. */
- uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
- uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */
-};
-
static const struct rte_flow_ops mlx5_flow_ops = {
.validate = mlx5_flow_validate,
.create = mlx5_flow_create,
/* Convert FDIR request to Generic flow. */
struct mlx5_fdir {
struct rte_flow_attr attr;
- struct rte_flow_action actions[2];
struct rte_flow_item items[4];
struct rte_flow_item_eth l2;
struct rte_flow_item_eth l2_mask;
struct rte_flow_item_udp udp;
struct rte_flow_item_tcp tcp;
} l4_mask;
+ struct rte_flow_action actions[2];
struct rte_flow_action_queue queue;
};
-/* Verbs specification header. */
-struct ibv_spec_header {
- enum ibv_flow_spec_type type;
- uint16_t size;
-};
-
-/*
- * Number of sub priorities.
- * For each kind of pattern matching i.e. L2, L3, L4 to have a correct
- * matching on the NIC (firmware dependent) L4 most have the higher priority
- * followed by L3 and ending with L2.
- */
-#define MLX5_PRIORITY_MAP_L2 2
-#define MLX5_PRIORITY_MAP_L3 1
-#define MLX5_PRIORITY_MAP_L4 0
-#define MLX5_PRIORITY_MAP_MAX 3
-
/* Map of Verbs to Flow priority with 8 Verbs priorities. */
static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
{ 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
/* Tunnel information. */
struct mlx5_flow_tunnel_info {
- uint32_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
+ uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */
};
},
{
.tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP,
- .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE | RTE_PTYPE_L4_UDP,
+ .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP,
},
{
.tunnel = MLX5_FLOW_LAYER_MPLS,
* Discover the maximum number of priority available.
*
* @param[in] dev
- * Pointer to Ethernet device.
+ * Pointer to the Ethernet device structure.
*
* @return
* number of supported flow priority on success, a negative errno
}
/**
- * Adjust flow priority.
+ * Adjust flow priority based on the highest layer and the request priority.
*
- * @param dev
- * Pointer to Ethernet device.
- * @param flow
- * Pointer to an rte flow.
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] priority
+ * The rule base priority.
+ * @param[in] subpriority
+ * The priority based on the items.
+ *
+ * @return
+ * The new priority.
*/
-static void
-mlx5_flow_adjust_priority(struct rte_eth_dev *dev, struct rte_flow *flow)
+uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
+ uint32_t subpriority)
{
- struct priv *priv = dev->data->dev_private;
- uint32_t priority = flow->attributes.priority;
- uint32_t subpriority = flow->cur_verbs->attr->priority;
+ uint32_t res = 0;
+ struct mlx5_priv *priv = dev->data->dev_private;
switch (priv->config.flow_prio) {
case RTE_DIM(priority_map_3):
- priority = priority_map_3[priority][subpriority];
+ res = priority_map_3[priority][subpriority];
break;
case RTE_DIM(priority_map_5):
- priority = priority_map_5[priority][subpriority];
+ res = priority_map_5[priority][subpriority];
break;
}
- flow->cur_verbs->attr->priority = priority;
-}
-
-/**
- * Get a flow counter.
- *
- * @param[in] dev
- * Pointer to Ethernet device.
- * @param[in] shared
- * Indicate if this counter is shared with other flows.
- * @param[in] id
- * Counter identifier.
- *
- * @return
- * A pointer to the counter, NULL otherwise and rte_errno is set.
- */
-static struct mlx5_flow_counter *
-mlx5_flow_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
-{
- struct priv *priv = dev->data->dev_private;
- struct mlx5_flow_counter *cnt;
-
- LIST_FOREACH(cnt, &priv->flow_counters, next) {
- if (cnt->shared != shared)
- continue;
- if (cnt->id != id)
- continue;
- cnt->ref_cnt++;
- return cnt;
- }
-#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
-
- struct mlx5_flow_counter tmpl = {
- .shared = shared,
- .id = id,
- .cs = mlx5_glue->create_counter_set
- (priv->ctx,
- &(struct ibv_counter_set_init_attr){
- .counter_set_id = id,
- }),
- .hits = 0,
- .bytes = 0,
- };
-
- if (!tmpl.cs) {
- rte_errno = errno;
- return NULL;
- }
- cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
- if (!cnt) {
- rte_errno = ENOMEM;
- return NULL;
- }
- *cnt = tmpl;
- LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
- return cnt;
-#endif
- rte_errno = ENOTSUP;
- return NULL;
-}
-
-/**
- * Release a flow counter.
- *
- * @param[in] counter
- * Pointer to the counter handler.
- */
-static void
-mlx5_flow_counter_release(struct mlx5_flow_counter *counter)
-{
- if (--counter->ref_cnt == 0) {
- claim_zero(mlx5_glue->destroy_counter_set(counter->cs));
- LIST_REMOVE(counter, next);
- rte_free(counter);
- }
-}
-
-/**
- * Verify the @p attributes will be correctly understood by the NIC and store
- * them in the @p flow if everything is correct.
- *
- * @param[in] dev
- * Pointer to Ethernet device.
- * @param[in] attributes
- * Pointer to flow attributes
- * @param[in, out] flow
- * Pointer to the rte_flow structure.
- * @param[out] error
- * Pointer to error structure.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
- */
-static int
-mlx5_flow_attributes(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attributes,
- struct rte_flow *flow,
- struct rte_flow_error *error)
-{
- uint32_t priority_max =
- ((struct priv *)dev->data->dev_private)->config.flow_prio - 1;
-
- if (attributes->group)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
- NULL,
- "groups is not supported");
- if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
- attributes->priority >= priority_max)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
- NULL,
- "priority out of range");
- if (attributes->egress)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
- NULL,
- "egress is not supported");
- if (attributes->transfer)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
- NULL,
- "transfer is not supported");
- if (!attributes->ingress)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
- NULL,
- "ingress attribute is mandatory");
- flow->attributes = *attributes;
- if (attributes->priority == MLX5_FLOW_PRIO_RSVD)
- flow->attributes.priority = priority_max;
- return 0;
+ return res;
}
/**
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
+int
mlx5_flow_item_acceptable(const struct rte_flow_item *item,
const uint8_t *mask,
const uint8_t *nic_mask,
" bits");
if (!item->spec && (item->mask || item->last))
return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
"mask/last without a spec is not"
" supported");
if (item->spec && item->last) {
}
ret = memcmp(spec, last, size);
if (ret != 0)
- return rte_flow_error_set(error, ENOTSUP,
+ return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
- "range is not supported");
+ "range is not valid");
}
return 0;
}
/**
- * Add a verbs item specification into @p flow.
- *
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] src
- * Create specification.
- * @param[in] size
- * Size in bytes of the specification to copy.
- */
-static void
-mlx5_flow_spec_verbs_add(struct rte_flow *flow, void *src, unsigned int size)
-{
- struct mlx5_flow_verbs *verbs = flow->cur_verbs;
-
- if (verbs->specs) {
- void *dst;
-
- dst = (void *)(verbs->specs + verbs->size);
- memcpy(dst, src, size);
- ++verbs->attr->num_of_specs;
- }
- verbs->size += size;
-}
-
-/**
- * Adjust verbs hash fields according to the @p flow information.
+ * Adjust the hash fields according to the @p flow information.
*
- * @param[in, out] flow.
- * Pointer to flow structure.
+ * @param[in] dev_flow.
+ * Pointer to the mlx5_flow.
* @param[in] tunnel
* 1 when the hash field is for a tunnel item.
* @param[in] layer_types
* ETH_RSS_* types.
* @param[in] hash_fields
* Item hash fields.
+ *
+ * @return
+ * The hash fileds that should be used.
*/
-static void
-mlx5_flow_verbs_hashfields_adjust(struct rte_flow *flow,
- int tunnel __rte_unused,
- uint32_t layer_types, uint64_t hash_fields)
+uint64_t
+mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow,
+ int tunnel __rte_unused, uint64_t layer_types,
+ uint64_t hash_fields)
{
+ struct rte_flow *flow = dev_flow->flow;
#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
- hash_fields |= (tunnel ? IBV_RX_HASH_INNER : 0);
- if (flow->rss.level == 2 && !tunnel)
- hash_fields = 0;
- else if (flow->rss.level < 2 && tunnel)
- hash_fields = 0;
+ int rss_request_inner = flow->rss.level >= 2;
+
+ /* Check RSS hash level for tunnel. */
+ if (tunnel && rss_request_inner)
+ hash_fields |= IBV_RX_HASH_INNER;
+ else if (tunnel || rss_request_inner)
+ return 0;
#endif
+ /* Check if requested layer matches RSS hash fields. */
if (!(flow->rss.types & layer_types))
- hash_fields = 0;
- flow->cur_verbs->hash_fields |= hash_fields;
+ return 0;
+ return hash_fields;
}
/**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
- *
- * @param[in] item
- * Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
- * @param[out] error
- * Pointer to error structure.
+ * Lookup and set the ptype in the data Rx part. A single Ptype can be used,
+ * if several tunnel rules are used on this queue, the tunnel ptype will be
+ * cleared.
*
- * @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * @param rxq_ctrl
+ * Rx queue to update.
*/
-static int
-mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow,
- const size_t flow_size, struct rte_flow_error *error)
+static void
+flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
{
- const struct rte_flow_item_eth *spec = item->spec;
- const struct rte_flow_item_eth *mask = item->mask;
- const struct rte_flow_item_eth nic_mask = {
- .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
- .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
- .type = RTE_BE16(0xffff),
- };
- const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
- const unsigned int size = sizeof(struct ibv_flow_spec_eth);
- struct ibv_flow_spec_eth eth = {
- .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
- .size = size,
- };
- int ret;
-
- if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
- MLX5_FLOW_LAYER_OUTER_L2))
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "L2 layers already configured");
- if (!mask)
- mask = &rte_flow_item_eth_mask;
- ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
- (const uint8_t *)&nic_mask,
- sizeof(struct rte_flow_item_eth),
- error);
- if (ret)
- return ret;
- flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
- MLX5_FLOW_LAYER_OUTER_L2;
- if (size > flow_size)
- return size;
- if (spec) {
- unsigned int i;
+ unsigned int i;
+ uint32_t tunnel_ptype = 0;
- memcpy(ð.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
- memcpy(ð.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
- eth.val.ether_type = spec->type;
- memcpy(ð.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
- memcpy(ð.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
- eth.mask.ether_type = mask->type;
- /* Remove unwanted bits from values. */
- for (i = 0; i < ETHER_ADDR_LEN; ++i) {
- eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
- eth.val.src_mac[i] &= eth.mask.src_mac[i];
+ /* Look up for the ptype to use. */
+ for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) {
+ if (!rxq_ctrl->flow_tunnels_n[i])
+ continue;
+ if (!tunnel_ptype) {
+ tunnel_ptype = tunnels_info[i].ptype;
+ } else {
+ tunnel_ptype = 0;
+ break;
}
- eth.val.ether_type &= eth.mask.ether_type;
}
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
- mlx5_flow_spec_verbs_add(flow, ð, size);
- return size;
+ rxq_ctrl->rxq.tunnel = tunnel_ptype;
}
/**
- * Update the VLAN tag in the Verbs Ethernet specification.
+ * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive
+ * flow.
*
- * @param[in, out] attr
- * Pointer to Verbs attributes structure.
- * @param[in] eth
- * Verbs structure containing the VLAN information to copy.
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] dev_flow
+ * Pointer to device flow structure.
*/
static void
-mlx5_flow_item_vlan_update(struct ibv_flow_attr *attr,
- struct ibv_flow_spec_eth *eth)
+flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct rte_flow *flow = dev_flow->flow;
+ const int mark = !!(flow->actions &
+ (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
+ const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
unsigned int i;
- const enum ibv_flow_spec_type search = eth->type;
- struct ibv_spec_header *hdr = (struct ibv_spec_header *)
- ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
-
- for (i = 0; i != attr->num_of_specs; ++i) {
- if (hdr->type == search) {
- struct ibv_flow_spec_eth *e =
- (struct ibv_flow_spec_eth *)hdr;
-
- e->val.vlan_tag = eth->val.vlan_tag;
- e->mask.vlan_tag = eth->mask.vlan_tag;
- e->val.ether_type = eth->val.ether_type;
- e->mask.ether_type = eth->mask.ether_type;
- break;
+
+ for (i = 0; i != flow->rss.queue_num; ++i) {
+ int idx = (*flow->queue)[i];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of((*priv->rxqs)[idx],
+ struct mlx5_rxq_ctrl, rxq);
+
+ if (mark) {
+ rxq_ctrl->rxq.mark = 1;
+ rxq_ctrl->flow_mark_n++;
+ }
+ if (tunnel) {
+ unsigned int j;
+
+ /* Increase the counter matching the flow. */
+ for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
+ if ((tunnels_info[j].tunnel &
+ dev_flow->layers) ==
+ tunnels_info[j].tunnel) {
+ rxq_ctrl->flow_tunnels_n[j]++;
+ break;
+ }
+ }
+ flow_rxq_tunnel_ptype_update(rxq_ctrl);
}
- hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
}
}
/**
- * Convert the @p item into @p flow (or by updating the already present
- * Ethernet Verbs) specification after ensuring the NIC will understand and
- * process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow
*
- * @param[in] item
- * Item specification.
- * @param[in, out] flow
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] flow
* Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
- * @param[out] error
- * Pointer to error structure.
- *
- * @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- * On error, a negative errno value is returned and rte_errno is set.
*/
-static int
-mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow,
- const size_t flow_size, struct rte_flow_error *error)
+static void
+flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
{
- const struct rte_flow_item_vlan *spec = item->spec;
- const struct rte_flow_item_vlan *mask = item->mask;
- const struct rte_flow_item_vlan nic_mask = {
- .tci = RTE_BE16(0x0fff),
- .inner_type = RTE_BE16(0xffff),
- };
- unsigned int size = sizeof(struct ibv_flow_spec_eth);
- const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
- struct ibv_flow_spec_eth eth = {
- .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
- .size = size,
- };
- int ret;
- const uint32_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
- MLX5_FLOW_LAYER_INNER_L4) :
- (MLX5_FLOW_LAYER_OUTER_L3 | MLX5_FLOW_LAYER_OUTER_L4);
- const uint32_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
- MLX5_FLOW_LAYER_OUTER_VLAN;
- const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
- MLX5_FLOW_LAYER_OUTER_L2;
+ struct mlx5_flow *dev_flow;
- if (flow->layers & vlanm)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "VLAN layer already configured");
- else if ((flow->layers & l34m) != 0)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "L2 layer cannot follow L3/L4 layer");
- if (!mask)
- mask = &rte_flow_item_vlan_mask;
- ret = mlx5_flow_item_acceptable
- (item, (const uint8_t *)mask,
- (const uint8_t *)&nic_mask,
- sizeof(struct rte_flow_item_vlan), error);
- if (ret)
- return ret;
- if (spec) {
- eth.val.vlan_tag = spec->tci;
- eth.mask.vlan_tag = mask->tci;
- eth.val.vlan_tag &= eth.mask.vlan_tag;
- eth.val.ether_type = spec->inner_type;
- eth.mask.ether_type = mask->inner_type;
- eth.val.ether_type &= eth.mask.ether_type;
- }
- /*
- * From verbs perspective an empty VLAN is equivalent
- * to a packet without VLAN layer.
- */
- if (!eth.mask.vlan_tag)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
- item->spec,
- "VLAN cannot be empty");
- if (!(flow->layers & l2m)) {
- if (size <= flow_size) {
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
- mlx5_flow_spec_verbs_add(flow, ð, size);
- }
- } else {
- if (flow->cur_verbs)
- mlx5_flow_item_vlan_update(flow->cur_verbs->attr,
- ð);
- size = 0; /* Only an update is done in eth specification. */
- }
- flow->layers |= tunnel ?
- (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_VLAN) :
- (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_VLAN);
- return size;
+ LIST_FOREACH(dev_flow, &flow->dev_flows, next)
+ flow_drv_rxq_flags_set(dev, dev_flow);
}
/**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
+ * device flow if no other flow uses it with the same kind of request.
*
- * @param[in] item
- * Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[in] dev_flow
+ * Pointer to the device flow.
+ */
+static void
+flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct rte_flow *flow = dev_flow->flow;
+ const int mark = !!(flow->actions &
+ (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
+ const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ unsigned int i;
+
+ assert(dev->data->dev_started);
+ for (i = 0; i != flow->rss.queue_num; ++i) {
+ int idx = (*flow->queue)[i];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of((*priv->rxqs)[idx],
+ struct mlx5_rxq_ctrl, rxq);
+
+ if (mark) {
+ rxq_ctrl->flow_mark_n--;
+ rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
+ }
+ if (tunnel) {
+ unsigned int j;
+
+ /* Decrease the counter matching the flow. */
+ for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
+ if ((tunnels_info[j].tunnel &
+ dev_flow->layers) ==
+ tunnels_info[j].tunnel) {
+ rxq_ctrl->flow_tunnels_n[j]--;
+ break;
+ }
+ }
+ flow_rxq_tunnel_ptype_update(rxq_ctrl);
+ }
+ }
+}
+
+/**
+ * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
+ * @p flow if no other flow uses it with the same kind of request.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[in] flow
+ * Pointer to the flow.
+ */
+static void
+flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct mlx5_flow *dev_flow;
+
+ LIST_FOREACH(dev_flow, &flow->dev_flows, next)
+ flow_drv_rxq_flags_trim(dev, dev_flow);
+}
+
+/**
+ * Clear the Mark/Flag and Tunnel ptype information in all Rx queues.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+static void
+flow_rxq_flags_clear(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ unsigned int i;
+
+ for (i = 0; i != priv->rxqs_n; ++i) {
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+ unsigned int j;
+
+ if (!(*priv->rxqs)[i])
+ continue;
+ rxq_ctrl = container_of((*priv->rxqs)[i],
+ struct mlx5_rxq_ctrl, rxq);
+ rxq_ctrl->flow_mark_n = 0;
+ rxq_ctrl->rxq.mark = 0;
+ for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
+ rxq_ctrl->flow_tunnels_n[j] = 0;
+ rxq_ctrl->rxq.tunnel = 0;
+ }
+}
+
+/*
+ * Validate the flag action.
+ *
+ * @param[in] action_flags
+ * Bit-fields that holds the actions detected until now.
+ * @param[in] attr
+ * Attributes of flow that includes this action.
* @param[out] error
* Pointer to error structure.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow,
- const size_t flow_size, struct rte_flow_error *error)
+int
+mlx5_flow_validate_action_flag(uint64_t action_flags,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
{
- const struct rte_flow_item_ipv4 *spec = item->spec;
- const struct rte_flow_item_ipv4 *mask = item->mask;
- const struct rte_flow_item_ipv4 nic_mask = {
- .hdr = {
- .src_addr = RTE_BE32(0xffffffff),
- .dst_addr = RTE_BE32(0xffffffff),
- .type_of_service = 0xff,
- .next_proto_id = 0xff,
- },
- };
- const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
- unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
- struct ibv_flow_spec_ipv4_ext ipv4 = {
- .type = IBV_FLOW_SPEC_IPV4_EXT |
- (tunnel ? IBV_FLOW_SPEC_INNER : 0),
- .size = size,
- };
- int ret;
- if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
- MLX5_FLOW_LAYER_OUTER_L3))
+ if (action_flags & MLX5_FLOW_ACTION_DROP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't drop and flag in same flow");
+ if (action_flags & MLX5_FLOW_ACTION_MARK)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't mark and flag in same flow");
+ if (action_flags & MLX5_FLOW_ACTION_FLAG)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't have 2 flag"
+ " actions in same flow");
+ if (attr->egress)
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "multiple L3 layers not supported");
- else if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
- MLX5_FLOW_LAYER_OUTER_L4))
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
+ "flag action not supported for "
+ "egress");
+ return 0;
+}
+
+/*
+ * Validate the mark action.
+ *
+ * @param[in] action
+ * Pointer to the queue action.
+ * @param[in] action_flags
+ * Bit-fields that holds the actions detected until now.
+ * @param[in] attr
+ * Attributes of flow that includes this action.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_action_mark(const struct rte_flow_action *action,
+ uint64_t action_flags,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_mark *mark = action->conf;
+
+ if (!mark)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "configuration cannot be null");
+ if (mark->id >= MLX5_FLOW_MARK_MAX)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &mark->id,
+ "mark id must in 0 <= id < "
+ RTE_STR(MLX5_FLOW_MARK_MAX));
+ if (action_flags & MLX5_FLOW_ACTION_DROP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't drop and mark in same flow");
+ if (action_flags & MLX5_FLOW_ACTION_FLAG)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't flag and mark in same flow");
+ if (action_flags & MLX5_FLOW_ACTION_MARK)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't have 2 mark actions in same"
+ " flow");
+ if (attr->egress)
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "L3 cannot follow an L4 layer.");
- if (!mask)
- mask = &rte_flow_item_ipv4_mask;
- ret = mlx5_flow_item_acceptable
- (item, (const uint8_t *)mask,
- (const uint8_t *)&nic_mask,
- sizeof(struct rte_flow_item_ipv4), error);
- if (ret < 0)
- return ret;
- flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
- MLX5_FLOW_LAYER_OUTER_L3_IPV4;
- if (spec) {
- ipv4.val = (struct ibv_flow_ipv4_ext_filter){
- .src_ip = spec->hdr.src_addr,
- .dst_ip = spec->hdr.dst_addr,
- .proto = spec->hdr.next_proto_id,
- .tos = spec->hdr.type_of_service,
- };
- ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
- .src_ip = mask->hdr.src_addr,
- .dst_ip = mask->hdr.dst_addr,
- .proto = mask->hdr.next_proto_id,
- .tos = mask->hdr.type_of_service,
- };
- /* Remove unwanted bits from values. */
- ipv4.val.src_ip &= ipv4.mask.src_ip;
- ipv4.val.dst_ip &= ipv4.mask.dst_ip;
- ipv4.val.proto &= ipv4.mask.proto;
- ipv4.val.tos &= ipv4.mask.tos;
- }
- flow->l3_protocol_en = !!ipv4.mask.proto;
- flow->l3_protocol = ipv4.val.proto;
- if (size <= flow_size) {
- mlx5_flow_verbs_hashfields_adjust
- (flow, tunnel,
- (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
- ETH_RSS_NONFRAG_IPV4_OTHER),
- (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4));
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3;
- mlx5_flow_spec_verbs_add(flow, &ipv4, size);
- }
- return size;
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
+ "mark action not supported for "
+ "egress");
+ return 0;
}
-/**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+/*
+ * Validate the drop action.
*
- * @param[in] item
- * Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
+ * @param[in] action_flags
+ * Bit-fields that holds the actions detected until now.
+ * @param[in] attr
+ * Attributes of flow that includes this action.
* @param[out] error
* Pointer to error structure.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_ernno is set.
*/
-static int
-mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow,
- const size_t flow_size, struct rte_flow_error *error)
+int
+mlx5_flow_validate_action_drop(uint64_t action_flags,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
{
- const struct rte_flow_item_ipv6 *spec = item->spec;
- const struct rte_flow_item_ipv6 *mask = item->mask;
- const struct rte_flow_item_ipv6 nic_mask = {
- .hdr = {
- .src_addr =
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff",
- .dst_addr =
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff",
- .vtc_flow = RTE_BE32(0xffffffff),
- .proto = 0xff,
- .hop_limits = 0xff,
- },
- };
- const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
- unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
- struct ibv_flow_spec_ipv6 ipv6 = {
- .type = IBV_FLOW_SPEC_IPV6 | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
- .size = size,
- };
- int ret;
+ if (action_flags & MLX5_FLOW_ACTION_FLAG)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't drop and flag in same flow");
+ if (action_flags & MLX5_FLOW_ACTION_MARK)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't drop and mark in same flow");
+ if (action_flags & MLX5_FLOW_FATE_ACTIONS)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't have 2 fate actions in"
+ " same flow");
+ if (attr->egress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
+ "drop action not supported for "
+ "egress");
+ return 0;
+}
+
+/*
+ * Validate the queue action.
+ *
+ * @param[in] action
+ * Pointer to the queue action.
+ * @param[in] action_flags
+ * Bit-fields that holds the actions detected until now.
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] attr
+ * Attributes of flow that includes this action.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ */
+int
+mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
+ uint64_t action_flags,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_queue *queue = action->conf;
- if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
- MLX5_FLOW_LAYER_OUTER_L3))
+ if (action_flags & MLX5_FLOW_FATE_ACTIONS)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't have 2 fate actions in"
+ " same flow");
+ if (!priv->rxqs_n)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL, "No Rx queues configured");
+ if (queue->index >= priv->rxqs_n)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &queue->index,
+ "queue index out of range");
+ if (!(*priv->rxqs)[queue->index])
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &queue->index,
+ "queue is not configured");
+ if (attr->egress)
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "multiple L3 layers not supported");
- else if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
- MLX5_FLOW_LAYER_OUTER_L4))
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
+ "queue action not supported for "
+ "egress");
+ return 0;
+}
+
+/*
+ * Validate the rss action.
+ *
+ * @param[in] action
+ * Pointer to the queue action.
+ * @param[in] action_flags
+ * Bit-fields that holds the actions detected until now.
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] attr
+ * Attributes of flow that includes this action.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ */
+int
+mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
+ uint64_t action_flags,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_rss *rss = action->conf;
+ unsigned int i;
+
+ if (action_flags & MLX5_FLOW_FATE_ACTIONS)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't have 2 fate actions"
+ " in same flow");
+ if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
+ rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "L3 cannot follow an L4 layer.");
- /*
- * IPv6 is not recognised by the NIC inside a GRE tunnel.
- * Such support has to be disabled as the rule will be
- * accepted. Issue reproduced with Mellanox OFED 4.3-3.0.2.1 and
- * Mellanox OFED 4.4-1.0.0.0.
- */
- if (tunnel && flow->layers & MLX5_FLOW_LAYER_GRE)
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->func,
+ "RSS hash function not supported");
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ if (rss->level > 2)
+#else
+ if (rss->level > 1)
+#endif
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "IPv6 inside a GRE tunnel is"
- " not recognised.");
- if (!mask)
- mask = &rte_flow_item_ipv6_mask;
- ret = mlx5_flow_item_acceptable
- (item, (const uint8_t *)mask,
- (const uint8_t *)&nic_mask,
- sizeof(struct rte_flow_item_ipv6), error);
- if (ret < 0)
- return ret;
- flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
- MLX5_FLOW_LAYER_OUTER_L3_IPV6;
- if (spec) {
- unsigned int i;
- uint32_t vtc_flow_val;
- uint32_t vtc_flow_mask;
-
- memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
- RTE_DIM(ipv6.val.src_ip));
- memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
- RTE_DIM(ipv6.val.dst_ip));
- memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
- RTE_DIM(ipv6.mask.src_ip));
- memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
- RTE_DIM(ipv6.mask.dst_ip));
- vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
- vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
- ipv6.val.flow_label =
- rte_cpu_to_be_32((vtc_flow_val & IPV6_HDR_FL_MASK) >>
- IPV6_HDR_FL_SHIFT);
- ipv6.val.traffic_class = (vtc_flow_val & IPV6_HDR_TC_MASK) >>
- IPV6_HDR_TC_SHIFT;
- ipv6.val.next_hdr = spec->hdr.proto;
- ipv6.val.hop_limit = spec->hdr.hop_limits;
- ipv6.mask.flow_label =
- rte_cpu_to_be_32((vtc_flow_mask & IPV6_HDR_FL_MASK) >>
- IPV6_HDR_FL_SHIFT);
- ipv6.mask.traffic_class = (vtc_flow_mask & IPV6_HDR_TC_MASK) >>
- IPV6_HDR_TC_SHIFT;
- ipv6.mask.next_hdr = mask->hdr.proto;
- ipv6.mask.hop_limit = mask->hdr.hop_limits;
- /* Remove unwanted bits from values. */
- for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
- ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
- ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
- }
- ipv6.val.flow_label &= ipv6.mask.flow_label;
- ipv6.val.traffic_class &= ipv6.mask.traffic_class;
- ipv6.val.next_hdr &= ipv6.mask.next_hdr;
- ipv6.val.hop_limit &= ipv6.mask.hop_limit;
- }
- flow->l3_protocol_en = !!ipv6.mask.next_hdr;
- flow->l3_protocol = ipv6.val.next_hdr;
- if (size <= flow_size) {
- mlx5_flow_verbs_hashfields_adjust
- (flow, tunnel,
- (ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER),
- (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6));
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3;
- mlx5_flow_spec_verbs_add(flow, &ipv6, size);
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->level,
+ "tunnel RSS is not supported");
+ /* allow RSS key_len 0 in case of NULL (default) RSS key. */
+ if (rss->key_len == 0 && rss->key != NULL)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->key_len,
+ "RSS hash key length 0");
+ if (rss->key_len > 0 && rss->key_len < MLX5_RSS_HASH_KEY_LEN)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->key_len,
+ "RSS hash key too small");
+ if (rss->key_len > MLX5_RSS_HASH_KEY_LEN)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->key_len,
+ "RSS hash key too large");
+ if (rss->queue_num > priv->config.ind_table_max_size)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->queue_num,
+ "number of queues too large");
+ if (rss->types & MLX5_RSS_HF_MASK)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->types,
+ "some RSS protocols are not"
+ " supported");
+ if (!priv->rxqs_n)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL, "No Rx queues configured");
+ if (!rss->queue_num)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL, "No queues configured");
+ for (i = 0; i != rss->queue_num; ++i) {
+ if (!(*priv->rxqs)[rss->queue[i]])
+ return rte_flow_error_set
+ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->queue[i], "queue is not configured");
}
- return size;
+ if (attr->egress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
+ "rss action not supported for "
+ "egress");
+ return 0;
+}
+
+/*
+ * Validate the count action.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] attr
+ * Attributes of flow that includes this action.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ */
+int
+mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ if (attr->egress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
+ "count action not supported for "
+ "egress");
+ return 0;
}
/**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Verify the @p attributes will be correctly understood by the NIC and store
+ * them in the @p flow if everything is correct.
*
- * @param[in] item
- * Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] attributes
+ * Pointer to flow attributes
* @param[out] error
* Pointer to error structure.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow,
- const size_t flow_size, struct rte_flow_error *error)
+int
+mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attributes,
+ struct rte_flow_error *error)
{
- const struct rte_flow_item_udp *spec = item->spec;
- const struct rte_flow_item_udp *mask = item->mask;
- const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
- unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
- struct ibv_flow_spec_tcp_udp udp = {
- .type = IBV_FLOW_SPEC_UDP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
- .size = size,
- };
- int ret;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t priority_max = priv->config.flow_prio - 1;
- if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_UDP)
+ if (attributes->group)
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "protocol filtering not compatible"
- " with UDP layer");
- if (!(flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
- MLX5_FLOW_LAYER_OUTER_L3)))
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ NULL, "groups is not supported");
+ if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
+ attributes->priority >= priority_max)
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "L3 is mandatory to filter"
- " on L4");
- if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
- MLX5_FLOW_LAYER_OUTER_L4))
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ NULL, "priority out of range");
+ if (attributes->egress)
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "L4 layer is already"
- " present");
- if (!mask)
- mask = &rte_flow_item_udp_mask;
- ret = mlx5_flow_item_acceptable
- (item, (const uint8_t *)mask,
- (const uint8_t *)&rte_flow_item_udp_mask,
- sizeof(struct rte_flow_item_udp), error);
- if (ret < 0)
- return ret;
- flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
- MLX5_FLOW_LAYER_OUTER_L4_UDP;
- if (spec) {
- udp.val.dst_port = spec->hdr.dst_port;
- udp.val.src_port = spec->hdr.src_port;
- udp.mask.dst_port = mask->hdr.dst_port;
- udp.mask.src_port = mask->hdr.src_port;
- /* Remove unwanted bits from values. */
- udp.val.src_port &= udp.mask.src_port;
- udp.val.dst_port &= udp.mask.dst_port;
- }
- if (size <= flow_size) {
- mlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_UDP,
- (IBV_RX_HASH_SRC_PORT_UDP |
- IBV_RX_HASH_DST_PORT_UDP));
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4;
- mlx5_flow_spec_verbs_add(flow, &udp, size);
- }
- return size;
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
+ "egress is not supported");
+ if (attributes->transfer)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ NULL, "transfer is not supported");
+ if (!attributes->ingress)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ NULL,
+ "ingress attribute is mandatory");
+ return 0;
}
/**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Validate Ethernet item.
*
* @param[in] item
* Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
* @param[out] error
* Pointer to error structure.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow,
- const size_t flow_size, struct rte_flow_error *error)
+int
+mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ struct rte_flow_error *error)
{
- const struct rte_flow_item_tcp *spec = item->spec;
- const struct rte_flow_item_tcp *mask = item->mask;
- const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
- unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
- struct ibv_flow_spec_tcp_udp tcp = {
- .type = IBV_FLOW_SPEC_TCP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
- .size = size,
+ const struct rte_flow_item_eth *mask = item->mask;
+ const struct rte_flow_item_eth nic_mask = {
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .type = RTE_BE16(0xffff),
};
int ret;
+ int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2;
- if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_TCP)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "protocol filtering not compatible"
- " with TCP layer");
- if (!(flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
- MLX5_FLOW_LAYER_OUTER_L3)))
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "L3 is mandatory to filter on L4");
- if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
- MLX5_FLOW_LAYER_OUTER_L4))
+ if (item_flags & ethm)
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "L4 layer is already present");
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple L2 layers not supported");
if (!mask)
- mask = &rte_flow_item_tcp_mask;
- ret = mlx5_flow_item_acceptable
- (item, (const uint8_t *)mask,
- (const uint8_t *)&rte_flow_item_tcp_mask,
- sizeof(struct rte_flow_item_tcp), error);
- if (ret < 0)
- return ret;
- flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
- MLX5_FLOW_LAYER_OUTER_L4_TCP;
- if (spec) {
- tcp.val.dst_port = spec->hdr.dst_port;
- tcp.val.src_port = spec->hdr.src_port;
- tcp.mask.dst_port = mask->hdr.dst_port;
- tcp.mask.src_port = mask->hdr.src_port;
- /* Remove unwanted bits from values. */
- tcp.val.src_port &= tcp.mask.src_port;
- tcp.val.dst_port &= tcp.mask.dst_port;
- }
- if (size <= flow_size) {
- mlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_TCP,
- (IBV_RX_HASH_SRC_PORT_TCP |
- IBV_RX_HASH_DST_PORT_TCP));
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4;
- mlx5_flow_spec_verbs_add(flow, &tcp, size);
- }
- return size;
+ mask = &rte_flow_item_eth_mask;
+ ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_eth),
+ error);
+ return ret;
}
/**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Validate VLAN item.
*
* @param[in] item
* Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
* @param[out] error
* Pointer to error structure.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-mlx5_flow_item_vxlan(const struct rte_flow_item *item, struct rte_flow *flow,
- const size_t flow_size, struct rte_flow_error *error)
+int
+mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ struct rte_flow_error *error)
{
- const struct rte_flow_item_vxlan *spec = item->spec;
- const struct rte_flow_item_vxlan *mask = item->mask;
- unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
- struct ibv_flow_spec_tunnel vxlan = {
- .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
- .size = size,
+ const struct rte_flow_item_vlan *spec = item->spec;
+ const struct rte_flow_item_vlan *mask = item->mask;
+ const struct rte_flow_item_vlan nic_mask = {
+ .tci = RTE_BE16(0x0fff),
+ .inner_type = RTE_BE16(0xffff),
};
+ uint16_t vlan_tag = 0;
+ const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
int ret;
- union vni {
- uint32_t vlan_id;
- uint8_t vni[4];
- } id = { .vlan_id = 0, };
+ const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
+ MLX5_FLOW_LAYER_INNER_L4) :
+ (MLX5_FLOW_LAYER_OUTER_L3 |
+ MLX5_FLOW_LAYER_OUTER_L4);
+ const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
+ MLX5_FLOW_LAYER_OUTER_VLAN;
- if (flow->layers & MLX5_FLOW_LAYER_TUNNEL)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "a tunnel is already present");
- /*
- * Verify only UDPv4 is present as defined in
- * https://tools.ietf.org/html/rfc7348
- */
- if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP))
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "no outer UDP layer found");
+ if (item_flags & vlanm)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple VLAN layers not supported");
+ else if ((item_flags & l34m) != 0)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "L2 layer cannot follow L3/L4 layer");
if (!mask)
- mask = &rte_flow_item_vxlan_mask;
- ret = mlx5_flow_item_acceptable
- (item, (const uint8_t *)mask,
- (const uint8_t *)&rte_flow_item_vxlan_mask,
- sizeof(struct rte_flow_item_vxlan), error);
- if (ret < 0)
+ mask = &rte_flow_item_vlan_mask;
+ ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_vlan),
+ error);
+ if (ret)
return ret;
if (spec) {
- memcpy(&id.vni[1], spec->vni, 3);
- vxlan.val.tunnel_id = id.vlan_id;
- memcpy(&id.vni[1], mask->vni, 3);
- vxlan.mask.tunnel_id = id.vlan_id;
- /* Remove unwanted bits from values. */
- vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
+ vlan_tag = spec->tci;
+ vlan_tag &= mask->tci;
}
/*
- * Tunnel id 0 is equivalent as not adding a VXLAN layer, if
- * only this layer is defined in the Verbs specification it is
- * interpreted as wildcard and all packets will match this
- * rule, if it follows a full stack layer (ex: eth / ipv4 /
- * udp), all packets matching the layers before will also
- * match this rule. To avoid such situation, VNI 0 is
- * currently refused.
+ * From verbs perspective an empty VLAN is equivalent
+ * to a packet without VLAN layer.
*/
- if (!vxlan.val.tunnel_id)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "VXLAN vni cannot be 0");
- if (!(flow->layers & MLX5_FLOW_LAYER_OUTER))
+ if (!vlan_tag)
return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "VXLAN tunnel must be fully defined");
- if (size <= flow_size) {
- mlx5_flow_spec_verbs_add(flow, &vxlan, size);
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
- }
- flow->layers |= MLX5_FLOW_LAYER_VXLAN;
- return size;
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+ item->spec,
+ "VLAN cannot be empty");
+ return 0;
}
/**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Validate IPV4 item.
*
- * @param dev
- * Pointer to Ethernet device.
* @param[in] item
* Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[in] acc_mask
+ * Acceptable mask, if NULL default internal default mask
+ * will be used to check whether item fields are supported.
* @param[out] error
* Pointer to error structure.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-mlx5_flow_item_vxlan_gpe(struct rte_eth_dev *dev,
- const struct rte_flow_item *item,
- struct rte_flow *flow, const size_t flow_size,
- struct rte_flow_error *error)
+int
+mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ const struct rte_flow_item_ipv4 *acc_mask,
+ struct rte_flow_error *error)
{
- const struct rte_flow_item_vxlan_gpe *spec = item->spec;
- const struct rte_flow_item_vxlan_gpe *mask = item->mask;
- unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
- struct ibv_flow_spec_tunnel vxlan_gpe = {
- .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
- .size = size,
+ const struct rte_flow_item_ipv4 *mask = item->mask;
+ const struct rte_flow_item_ipv4 nic_mask = {
+ .hdr = {
+ .src_addr = RTE_BE32(0xffffffff),
+ .dst_addr = RTE_BE32(0xffffffff),
+ .type_of_service = 0xff,
+ .next_proto_id = 0xff,
+ },
};
+ const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+ MLX5_FLOW_LAYER_OUTER_L3;
+ const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4;
int ret;
- union vni {
- uint32_t vlan_id;
- uint8_t vni[4];
- } id = { .vlan_id = 0, };
- if (!((struct priv *)dev->data->dev_private)->config.l3_vxlan_en)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "L3 VXLAN is not enabled by device"
- " parameter and/or not configured in"
- " firmware");
- if (flow->layers & MLX5_FLOW_LAYER_TUNNEL)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "a tunnel is already present");
- /*
- * Verify only UDPv4 is present as defined in
- * https://tools.ietf.org/html/rfc7348
- */
- if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP))
+ if (item_flags & l3m)
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "no outer UDP layer found");
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple L3 layers not supported");
+ else if (item_flags & l4m)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "L3 cannot follow an L4 layer.");
if (!mask)
- mask = &rte_flow_item_vxlan_gpe_mask;
- ret = mlx5_flow_item_acceptable
- (item, (const uint8_t *)mask,
- (const uint8_t *)&rte_flow_item_vxlan_gpe_mask,
- sizeof(struct rte_flow_item_vxlan_gpe), error);
+ mask = &rte_flow_item_ipv4_mask;
+ else if (mask->hdr.next_proto_id != 0 &&
+ mask->hdr.next_proto_id != 0xff)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
+ "partial mask is not supported"
+ " for protocol");
+ ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+ acc_mask ? (const uint8_t *)acc_mask
+ : (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_ipv4),
+ error);
if (ret < 0)
return ret;
- if (spec) {
- memcpy(&id.vni[1], spec->vni, 3);
- vxlan_gpe.val.tunnel_id = id.vlan_id;
- memcpy(&id.vni[1], mask->vni, 3);
- vxlan_gpe.mask.tunnel_id = id.vlan_id;
- if (spec->protocol)
- return rte_flow_error_set
- (error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "VxLAN-GPE protocol not supported");
- /* Remove unwanted bits from values. */
- vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;
- }
- /*
- * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this
- * layer is defined in the Verbs specification it is interpreted as
- * wildcard and all packets will match this rule, if it follows a full
- * stack layer (ex: eth / ipv4 / udp), all packets matching the layers
- * before will also match this rule. To avoid such situation, VNI 0
- * is currently refused.
- */
- if (!vxlan_gpe.val.tunnel_id)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "VXLAN-GPE vni cannot be 0");
- if (!(flow->layers & MLX5_FLOW_LAYER_OUTER))
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "VXLAN-GPE tunnel must be fully"
- " defined");
- if (size <= flow_size) {
- mlx5_flow_spec_verbs_add(flow, &vxlan_gpe, size);
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
- }
- flow->layers |= MLX5_FLOW_LAYER_VXLAN_GPE;
- return size;
-}
-
-/**
- * Update the protocol in Verbs IPv4/IPv6 spec.
- *
- * @param[in, out] attr
- * Pointer to Verbs attributes structure.
- * @param[in] search
- * Specification type to search in order to update the IP protocol.
- * @param[in] protocol
- * Protocol value to set if none is present in the specification.
- */
-static void
-mlx5_flow_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
- enum ibv_flow_spec_type search,
- uint8_t protocol)
-{
- unsigned int i;
- struct ibv_spec_header *hdr = (struct ibv_spec_header *)
- ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
-
- if (!attr)
- return;
- for (i = 0; i != attr->num_of_specs; ++i) {
- if (hdr->type == search) {
- union {
- struct ibv_flow_spec_ipv4_ext *ipv4;
- struct ibv_flow_spec_ipv6 *ipv6;
- } ip;
-
- switch (search) {
- case IBV_FLOW_SPEC_IPV4_EXT:
- ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr;
- if (!ip.ipv4->val.proto) {
- ip.ipv4->val.proto = protocol;
- ip.ipv4->mask.proto = 0xff;
- }
- break;
- case IBV_FLOW_SPEC_IPV6:
- ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr;
- if (!ip.ipv6->val.next_hdr) {
- ip.ipv6->val.next_hdr = protocol;
- ip.ipv6->mask.next_hdr = 0xff;
- }
- break;
- default:
- break;
- }
- break;
- }
- hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
- }
+ return 0;
}
/**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * It will also update the previous L3 layer with the protocol value matching
- * the GRE.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Validate IPV6 item.
*
- * @param dev
- * Pointer to Ethernet device.
* @param[in] item
* Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[in] acc_mask
+ * Acceptable mask, if NULL default internal default mask
+ * will be used to check whether item fields are supported.
* @param[out] error
* Pointer to error structure.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-mlx5_flow_item_gre(const struct rte_flow_item *item,
- struct rte_flow *flow, const size_t flow_size,
- struct rte_flow_error *error)
+int
+mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ const struct rte_flow_item_ipv6 *acc_mask,
+ struct rte_flow_error *error)
{
- struct mlx5_flow_verbs *verbs = flow->cur_verbs;
- const struct rte_flow_item_gre *spec = item->spec;
- const struct rte_flow_item_gre *mask = item->mask;
-#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
- unsigned int size = sizeof(struct ibv_flow_spec_gre);
- struct ibv_flow_spec_gre tunnel = {
- .type = IBV_FLOW_SPEC_GRE,
- .size = size,
- };
-#else
- unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
- struct ibv_flow_spec_tunnel tunnel = {
- .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
- .size = size,
+ const struct rte_flow_item_ipv6 *mask = item->mask;
+ const struct rte_flow_item_ipv6 nic_mask = {
+ .hdr = {
+ .src_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .dst_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .vtc_flow = RTE_BE32(0xffffffff),
+ .proto = 0xff,
+ .hop_limits = 0xff,
+ },
};
-#endif
+ const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+ MLX5_FLOW_LAYER_OUTER_L3;
+ const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4;
int ret;
- if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_GRE)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "protocol filtering not compatible"
- " with this GRE layer");
- if (flow->layers & MLX5_FLOW_LAYER_TUNNEL)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "a tunnel is already present");
- if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L3))
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "L3 Layer is missing");
- if (!mask)
- mask = &rte_flow_item_gre_mask;
- ret = mlx5_flow_item_acceptable
- (item, (const uint8_t *)mask,
- (const uint8_t *)&rte_flow_item_gre_mask,
- sizeof(struct rte_flow_item_gre), error);
- if (ret < 0)
- return ret;
-#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
- if (spec) {
- tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
- tunnel.val.protocol = spec->protocol;
- tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
- tunnel.mask.protocol = mask->protocol;
- /* Remove unwanted bits from values. */
- tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
- tunnel.val.protocol &= tunnel.mask.protocol;
- tunnel.val.key &= tunnel.mask.key;
- }
-#else
- if (spec && (spec->protocol & mask->protocol))
+ if (item_flags & l3m)
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "without MPLS support the"
- " specification cannot be used for"
- " filtering");
-#endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */
- if (size <= flow_size) {
- if (flow->layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
- mlx5_flow_item_gre_ip_protocol_update
- (verbs->attr, IBV_FLOW_SPEC_IPV4_EXT,
- MLX5_IP_PROTOCOL_GRE);
- else
- mlx5_flow_item_gre_ip_protocol_update
- (verbs->attr, IBV_FLOW_SPEC_IPV6,
- MLX5_IP_PROTOCOL_GRE);
- mlx5_flow_spec_verbs_add(flow, &tunnel, size);
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
- }
- flow->layers |= MLX5_FLOW_LAYER_GRE;
- return size;
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple L3 layers not supported");
+ else if (item_flags & l4m)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "L3 cannot follow an L4 layer.");
+ if (!mask)
+ mask = &rte_flow_item_ipv6_mask;
+ ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+ acc_mask ? (const uint8_t *)acc_mask
+ : (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_ipv6),
+ error);
+ if (ret < 0)
+ return ret;
+ return 0;
}
/**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Validate UDP item.
*
* @param[in] item
* Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[in] target_protocol
+ * The next protocol in the previous item.
+ * @param[in] flow_mask
+ * mlx5 flow-specific (TCF, DV, verbs, etc.) supported header fields mask.
* @param[out] error
* Pointer to error structure.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-mlx5_flow_item_mpls(const struct rte_flow_item *item __rte_unused,
- struct rte_flow *flow __rte_unused,
- const size_t flow_size __rte_unused,
- struct rte_flow_error *error)
+int
+mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ uint8_t target_protocol,
+ struct rte_flow_error *error)
{
-#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
- const struct rte_flow_item_mpls *spec = item->spec;
- const struct rte_flow_item_mpls *mask = item->mask;
- unsigned int size = sizeof(struct ibv_flow_spec_mpls);
- struct ibv_flow_spec_mpls mpls = {
- .type = IBV_FLOW_SPEC_MPLS,
- .size = size,
- };
+ const struct rte_flow_item_udp *mask = item->mask;
+ const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+ MLX5_FLOW_LAYER_OUTER_L3;
+ const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4;
int ret;
- if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_MPLS)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
+ if (target_protocol != 0xff && target_protocol != IPPROTO_UDP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
"protocol filtering not compatible"
- " with MPLS layer");
- if (flow->layers & MLX5_FLOW_LAYER_TUNNEL)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "a tunnel is already"
- " present");
+ " with UDP layer");
+ if (!(item_flags & l3m))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "L3 is mandatory to filter on L4");
+ if (item_flags & l4m)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple L4 layers not supported");
if (!mask)
- mask = &rte_flow_item_mpls_mask;
+ mask = &rte_flow_item_udp_mask;
ret = mlx5_flow_item_acceptable
(item, (const uint8_t *)mask,
- (const uint8_t *)&rte_flow_item_mpls_mask,
- sizeof(struct rte_flow_item_mpls), error);
+ (const uint8_t *)&rte_flow_item_udp_mask,
+ sizeof(struct rte_flow_item_udp), error);
if (ret < 0)
return ret;
- if (spec) {
- memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
- memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
- /* Remove unwanted bits from values. */
- mpls.val.label &= mpls.mask.label;
- }
- if (size <= flow_size) {
- mlx5_flow_spec_verbs_add(flow, &mpls, size);
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
- }
- flow->layers |= MLX5_FLOW_LAYER_MPLS;
- return size;
-#endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "MPLS is not supported by Verbs, please"
- " update.");
-}
-
-/**
- * Convert the @p pattern into a Verbs specifications after ensuring the NIC
- * will understand and process it correctly.
- * The conversion is performed item per item, each of them is written into
- * the @p flow if its size is lesser or equal to @p flow_size.
- * Validation and memory consumption computation are still performed until the
- * end of @p pattern, unless an error is encountered.
- *
- * @param[in] pattern
- * Flow pattern.
- * @param[in, out] flow
- * Pointer to the rte_flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small some
- * garbage may be present.
- * @param[out] error
- * Pointer to error structure.
- *
- * @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @pattern has fully been
- * converted, otherwise another call with this returned memory size should
- * be done.
- * On error, a negative errno value is returned and rte_errno is set.
- */
-static int
-mlx5_flow_items(struct rte_eth_dev *dev,
- const struct rte_flow_item pattern[],
- struct rte_flow *flow, const size_t flow_size,
- struct rte_flow_error *error)
-{
- int remain = flow_size;
- size_t size = 0;
-
- for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
- int ret = 0;
-
- switch (pattern->type) {
- case RTE_FLOW_ITEM_TYPE_VOID:
- break;
- case RTE_FLOW_ITEM_TYPE_ETH:
- ret = mlx5_flow_item_eth(pattern, flow, remain, error);
- break;
- case RTE_FLOW_ITEM_TYPE_VLAN:
- ret = mlx5_flow_item_vlan(pattern, flow, remain, error);
- break;
- case RTE_FLOW_ITEM_TYPE_IPV4:
- ret = mlx5_flow_item_ipv4(pattern, flow, remain, error);
- break;
- case RTE_FLOW_ITEM_TYPE_IPV6:
- ret = mlx5_flow_item_ipv6(pattern, flow, remain, error);
- break;
- case RTE_FLOW_ITEM_TYPE_UDP:
- ret = mlx5_flow_item_udp(pattern, flow, remain, error);
- break;
- case RTE_FLOW_ITEM_TYPE_TCP:
- ret = mlx5_flow_item_tcp(pattern, flow, remain, error);
- break;
- case RTE_FLOW_ITEM_TYPE_VXLAN:
- ret = mlx5_flow_item_vxlan(pattern, flow, remain,
- error);
- break;
- case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
- ret = mlx5_flow_item_vxlan_gpe(dev, pattern, flow,
- remain, error);
- break;
- case RTE_FLOW_ITEM_TYPE_GRE:
- ret = mlx5_flow_item_gre(pattern, flow, remain, error);
- break;
- case RTE_FLOW_ITEM_TYPE_MPLS:
- ret = mlx5_flow_item_mpls(pattern, flow, remain, error);
- break;
- default:
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- pattern,
- "item not supported");
- }
- if (ret < 0)
- return ret;
- if (remain > ret)
- remain -= ret;
- else
- remain = 0;
- size += ret;
- }
- if (!flow->layers) {
- const struct rte_flow_item item = {
- .type = RTE_FLOW_ITEM_TYPE_ETH,
- };
-
- return mlx5_flow_item_eth(&item, flow, flow_size, error);
- }
- return size;
+ return 0;
}
/**
- * Convert the @p action into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Validate TCP item.
*
- * @param[in] action
- * Action configuration.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
+ * @param[in] item
+ * Item specification.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[in] target_protocol
+ * The next protocol in the previous item.
* @param[out] error
* Pointer to error structure.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p action has fully been
- * converted, otherwise another call with this returned memory size should
- * be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-mlx5_flow_action_drop(const struct rte_flow_action *action,
- struct rte_flow *flow, const size_t flow_size,
- struct rte_flow_error *error)
+int
+mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ uint8_t target_protocol,
+ const struct rte_flow_item_tcp *flow_mask,
+ struct rte_flow_error *error)
{
- unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
- struct ibv_flow_spec_action_drop drop = {
- .type = IBV_FLOW_SPEC_ACTION_DROP,
- .size = size,
- };
+ const struct rte_flow_item_tcp *mask = item->mask;
+ const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+ MLX5_FLOW_LAYER_OUTER_L3;
+ const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4;
+ int ret;
- if (flow->fate)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- action,
- "multiple fate actions are not"
- " supported");
- if (flow->modifier & (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK))
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- action,
- "drop is not compatible with"
- " flag/mark action");
- if (size < flow_size)
- mlx5_flow_spec_verbs_add(flow, &drop, size);
- flow->fate |= MLX5_FLOW_FATE_DROP;
- return size;
+ assert(flow_mask);
+ if (target_protocol != 0xff && target_protocol != IPPROTO_TCP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "protocol filtering not compatible"
+ " with TCP layer");
+ if (!(item_flags & l3m))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "L3 is mandatory to filter on L4");
+ if (item_flags & l4m)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple L4 layers not supported");
+ if (!mask)
+ mask = &rte_flow_item_tcp_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)flow_mask,
+ sizeof(struct rte_flow_item_tcp), error);
+ if (ret < 0)
+ return ret;
+ return 0;
}
/**
- * Convert the @p action into @p flow after ensuring the NIC will understand
- * and process it correctly.
+ * Validate VXLAN item.
*
- * @param[in] dev
- * Pointer to Ethernet device structure.
- * @param[in] action
- * Action configuration.
- * @param[in, out] flow
- * Pointer to flow structure.
+ * @param[in] item
+ * Item specification.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[in] target_protocol
+ * The next protocol in the previous item.
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-mlx5_flow_action_queue(struct rte_eth_dev *dev,
- const struct rte_flow_action *action,
- struct rte_flow *flow,
- struct rte_flow_error *error)
+int
+mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
- const struct rte_flow_action_queue *queue = action->conf;
+ const struct rte_flow_item_vxlan *spec = item->spec;
+ const struct rte_flow_item_vxlan *mask = item->mask;
+ int ret;
+ union vni {
+ uint32_t vlan_id;
+ uint8_t vni[4];
+ } id = { .vlan_id = 0, };
+ uint32_t vlan_id = 0;
+
- if (flow->fate)
+ if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- action,
- "multiple fate actions are not"
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple tunnel layers not"
" supported");
- if (queue->index >= priv->rxqs_n)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- &queue->index,
- "queue index out of range");
- if (!(*priv->rxqs)[queue->index])
+ /*
+ * Verify only UDPv4 is present as defined in
+ * https://tools.ietf.org/html/rfc7348
+ */
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- &queue->index,
- "queue is not configured");
- if (flow->queue)
- (*flow->queue)[0] = queue->index;
- flow->rss.queue_num = 1;
- flow->fate |= MLX5_FLOW_FATE_QUEUE;
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "no outer UDP layer found");
+ if (!mask)
+ mask = &rte_flow_item_vxlan_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_vxlan_mask,
+ sizeof(struct rte_flow_item_vxlan),
+ error);
+ if (ret < 0)
+ return ret;
+ if (spec) {
+ memcpy(&id.vni[1], spec->vni, 3);
+ vlan_id = id.vlan_id;
+ memcpy(&id.vni[1], mask->vni, 3);
+ vlan_id &= id.vlan_id;
+ }
+ /*
+ * Tunnel id 0 is equivalent as not adding a VXLAN layer, if
+ * only this layer is defined in the Verbs specification it is
+ * interpreted as wildcard and all packets will match this
+ * rule, if it follows a full stack layer (ex: eth / ipv4 /
+ * udp), all packets matching the layers before will also
+ * match this rule. To avoid such situation, VNI 0 is
+ * currently refused.
+ */
+ if (!vlan_id)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "VXLAN vni cannot be 0");
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "VXLAN tunnel must be fully defined");
return 0;
}
/**
- * Ensure the @p action will be understood and used correctly by the NIC.
+ * Validate VXLAN_GPE item.
*
- * @param dev
- * Pointer to Ethernet device structure.
- * @param action[in]
- * Pointer to flow actions array.
- * @param flow[in, out]
- * Pointer to the rte_flow structure.
- * @param error[in, out]
+ * @param[in] item
+ * Item specification.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[in] priv
+ * Pointer to the private data structure.
+ * @param[in] target_protocol
+ * The next protocol in the previous item.
+ * @param[out] error
* Pointer to error structure.
*
* @return
- * On success @p flow->queue array and @p flow->rss are filled and valid.
- * On error, a negative errno value is returned and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-mlx5_flow_action_rss(struct rte_eth_dev *dev,
- const struct rte_flow_action *action,
- struct rte_flow *flow,
- struct rte_flow_error *error)
+int
+mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
- const struct rte_flow_action_rss *rss = action->conf;
- unsigned int i;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_item_vxlan_gpe *spec = item->spec;
+ const struct rte_flow_item_vxlan_gpe *mask = item->mask;
+ int ret;
+ union vni {
+ uint32_t vlan_id;
+ uint8_t vni[4];
+ } id = { .vlan_id = 0, };
+ uint32_t vlan_id = 0;
- if (flow->fate)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- action,
- "multiple fate actions are not"
- " supported");
- if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
- rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- &rss->func,
- "RSS hash function not supported");
-#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
- if (rss->level > 2)
-#else
- if (rss->level > 1)
-#endif
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- &rss->level,
- "tunnel RSS is not supported");
- if (rss->key_len < MLX5_RSS_HASH_KEY_LEN)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- &rss->key_len,
- "RSS hash key too small");
- if (rss->key_len > MLX5_RSS_HASH_KEY_LEN)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- &rss->key_len,
- "RSS hash key too large");
- if (rss->queue_num > priv->config.ind_table_max_size)
+ if (!priv->config.l3_vxlan_en)
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- &rss->queue_num,
- "number of queues too large");
- if (rss->types & MLX5_RSS_HF_MASK)
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "L3 VXLAN is not enabled by device"
+ " parameter and/or not configured in"
+ " firmware");
+ if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- &rss->types,
- "some RSS protocols are not"
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple tunnel layers not"
" supported");
- for (i = 0; i != rss->queue_num; ++i) {
- if (!(*priv->rxqs)[rss->queue[i]])
- return rte_flow_error_set
- (error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- &rss->queue[i],
- "queue is not configured");
+ /*
+ * Verify only UDPv4 is present as defined in
+ * https://tools.ietf.org/html/rfc7348
+ */
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "no outer UDP layer found");
+ if (!mask)
+ mask = &rte_flow_item_vxlan_gpe_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_vxlan_gpe_mask,
+ sizeof(struct rte_flow_item_vxlan_gpe),
+ error);
+ if (ret < 0)
+ return ret;
+ if (spec) {
+ if (spec->protocol)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VxLAN-GPE protocol"
+ " not supported");
+ memcpy(&id.vni[1], spec->vni, 3);
+ vlan_id = id.vlan_id;
+ memcpy(&id.vni[1], mask->vni, 3);
+ vlan_id &= id.vlan_id;
}
- if (flow->queue)
- memcpy((*flow->queue), rss->queue,
- rss->queue_num * sizeof(uint16_t));
- flow->rss.queue_num = rss->queue_num;
- memcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN);
- flow->rss.types = rss->types;
- flow->rss.level = rss->level;
- flow->fate |= MLX5_FLOW_FATE_RSS;
+ /*
+ * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this
+ * layer is defined in the Verbs specification it is interpreted as
+ * wildcard and all packets will match this rule, if it follows a full
+ * stack layer (ex: eth / ipv4 / udp), all packets matching the layers
+ * before will also match this rule. To avoid such situation, VNI 0
+ * is currently refused.
+ */
+ if (!vlan_id)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "VXLAN-GPE vni cannot be 0");
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "VXLAN-GPE tunnel must be fully"
+ " defined");
return 0;
}
/**
- * Convert the @p action into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Validate GRE item.
*
- * @param[in] action
- * Action configuration.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
+ * @param[in] item
+ * Item specification.
+ * @param[in] item_flags
+ * Bit flags to mark detected items.
+ * @param[in] target_protocol
+ * The next protocol in the previous item.
* @param[out] error
* Pointer to error structure.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p action has fully been
- * converted, otherwise another call with this returned memory size should
- * be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-mlx5_flow_action_flag(const struct rte_flow_action *action,
- struct rte_flow *flow, const size_t flow_size,
- struct rte_flow_error *error)
-{
- unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
- struct ibv_flow_spec_action_tag tag = {
- .type = IBV_FLOW_SPEC_ACTION_TAG,
- .size = size,
- .tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),
- };
- struct mlx5_flow_verbs *verbs = flow->cur_verbs;
+int
+mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ uint8_t target_protocol,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_gre *spec __rte_unused = item->spec;
+ const struct rte_flow_item_gre *mask = item->mask;
+ int ret;
- if (flow->modifier & MLX5_FLOW_MOD_FLAG)
+ if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "protocol filtering not compatible"
+ " with this GRE layer");
+ if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- action,
- "flag action already present");
- if (flow->fate & MLX5_FLOW_FATE_DROP)
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple tunnel layers not"
+ " supported");
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- action,
- "flag is not compatible with drop"
- " action");
- if (flow->modifier & MLX5_FLOW_MOD_MARK)
- size = 0;
- else if (size <= flow_size && verbs)
- mlx5_flow_spec_verbs_add(flow, &tag, size);
- flow->modifier |= MLX5_FLOW_MOD_FLAG;
- return size;
-}
-
-/**
- * Update verbs specification to modify the flag to mark.
- *
- * @param[in, out] verbs
- * Pointer to the mlx5_flow_verbs structure.
- * @param[in] mark_id
- * Mark identifier to replace the flag.
- */
-static void
-mlx5_flow_verbs_mark_update(struct mlx5_flow_verbs *verbs, uint32_t mark_id)
-{
- struct ibv_spec_header *hdr;
- int i;
-
- if (!verbs)
- return;
- /* Update Verbs specification. */
- hdr = (struct ibv_spec_header *)verbs->specs;
- if (!hdr)
- return;
- for (i = 0; i != verbs->attr->num_of_specs; ++i) {
- if (hdr->type == IBV_FLOW_SPEC_ACTION_TAG) {
- struct ibv_flow_spec_action_tag *t =
- (struct ibv_flow_spec_action_tag *)hdr;
-
- t->tag_id = mlx5_flow_mark_set(mark_id);
- }
- hdr = (struct ibv_spec_header *)((uintptr_t)hdr + hdr->size);
- }
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "L3 Layer is missing");
+ if (!mask)
+ mask = &rte_flow_item_gre_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_gre_mask,
+ sizeof(struct rte_flow_item_gre), error);
+ if (ret < 0)
+ return ret;
+#ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
+ if (spec && (spec->protocol & mask->protocol))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "without MPLS support the"
+ " specification cannot be used for"
+ " filtering");
+#endif
+ return 0;
}
/**
- * Convert the @p action into @p flow (or by updating the already present
- * Flag Verbs specification) after ensuring the NIC will understand and
- * process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Validate MPLS item.
*
- * @param[in] action
- * Action configuration.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] item
+ * Item specification.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[in] prev_layer
+ * The protocol layer indicated in previous item.
* @param[out] error
* Pointer to error structure.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p action has fully been
- * converted, otherwise another call with this returned memory size should
- * be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-mlx5_flow_action_mark(const struct rte_flow_action *action,
- struct rte_flow *flow, const size_t flow_size,
- struct rte_flow_error *error)
+int
+mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_item *item __rte_unused,
+ uint64_t item_flags __rte_unused,
+ uint64_t prev_layer __rte_unused,
+ struct rte_flow_error *error)
{
- const struct rte_flow_action_mark *mark = action->conf;
- unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
- struct ibv_flow_spec_action_tag tag = {
- .type = IBV_FLOW_SPEC_ACTION_TAG,
- .size = size,
- };
- struct mlx5_flow_verbs *verbs = flow->cur_verbs;
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+ const struct rte_flow_item_mpls *mask = item->mask;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ int ret;
- if (!mark)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- action,
- "configuration cannot be null");
- if (mark->id >= MLX5_FLOW_MARK_MAX)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- &mark->id,
- "mark id must in 0 <= id < "
- RTE_STR(MLX5_FLOW_MARK_MAX));
- if (flow->modifier & MLX5_FLOW_MOD_MARK)
+ if (!priv->config.mpls_en)
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- action,
- "mark action already present");
- if (flow->fate & MLX5_FLOW_FATE_DROP)
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "MPLS not supported or"
+ " disabled in firmware"
+ " configuration.");
+ /* MPLS over IP, UDP, GRE is allowed */
+ if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 |
+ MLX5_FLOW_LAYER_OUTER_L4_UDP |
+ MLX5_FLOW_LAYER_GRE)))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "protocol filtering not compatible"
+ " with MPLS layer");
+ /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */
+ if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) &&
+ !(item_flags & MLX5_FLOW_LAYER_GRE))
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- action,
- "mark is not compatible with drop"
- " action");
- if (flow->modifier & MLX5_FLOW_MOD_FLAG) {
- mlx5_flow_verbs_mark_update(verbs, mark->id);
- size = 0;
- } else if (size <= flow_size) {
- tag.tag_id = mlx5_flow_mark_set(mark->id);
- mlx5_flow_spec_verbs_add(flow, &tag, size);
- }
- flow->modifier |= MLX5_FLOW_MOD_MARK;
- return size;
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple tunnel layers not"
+ " supported");
+ if (!mask)
+ mask = &rte_flow_item_mpls_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_mpls_mask,
+ sizeof(struct rte_flow_item_mpls), error);
+ if (ret < 0)
+ return ret;
+ return 0;
+#endif
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "MPLS is not supported by Verbs, please"
+ " update.");
+}
+
+static int
+flow_null_validate(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_attr *attr __rte_unused,
+ const struct rte_flow_item items[] __rte_unused,
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_error *error __rte_unused)
+{
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+}
+
+static struct mlx5_flow *
+flow_null_prepare(const struct rte_flow_attr *attr __rte_unused,
+ const struct rte_flow_item items[] __rte_unused,
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_error *error __rte_unused)
+{
+ rte_errno = ENOTSUP;
+ return NULL;
+}
+
+static int
+flow_null_translate(struct rte_eth_dev *dev __rte_unused,
+ struct mlx5_flow *dev_flow __rte_unused,
+ const struct rte_flow_attr *attr __rte_unused,
+ const struct rte_flow_item items[] __rte_unused,
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_error *error __rte_unused)
+{
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+}
+
+static int
+flow_null_apply(struct rte_eth_dev *dev __rte_unused,
+ struct rte_flow *flow __rte_unused,
+ struct rte_flow_error *error __rte_unused)
+{
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+}
+
+static void
+flow_null_remove(struct rte_eth_dev *dev __rte_unused,
+ struct rte_flow *flow __rte_unused)
+{
+}
+
+static void
+flow_null_destroy(struct rte_eth_dev *dev __rte_unused,
+ struct rte_flow *flow __rte_unused)
+{
+}
+
+static int
+flow_null_query(struct rte_eth_dev *dev __rte_unused,
+ struct rte_flow *flow __rte_unused,
+ const struct rte_flow_action *actions __rte_unused,
+ void *data __rte_unused,
+ struct rte_flow_error *error __rte_unused)
+{
+ rte_errno = ENOTSUP;
+ return -rte_errno;
}
+/* Void driver to protect from null pointer reference. */
+const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = {
+ .validate = flow_null_validate,
+ .prepare = flow_null_prepare,
+ .translate = flow_null_translate,
+ .apply = flow_null_apply,
+ .remove = flow_null_remove,
+ .destroy = flow_null_destroy,
+ .query = flow_null_query,
+};
+
/**
- * Convert the @p action into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
- *
- * @param action[in]
- * Action configuration.
- * @param flow[in, out]
- * Pointer to flow structure.
- * @param flow_size[in]
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
- * @param error[int, out]
- * Pointer to error structure.
+ * Select flow driver type according to flow attributes and device
+ * configuration.
+ *
+ * @param[in] dev
+ * Pointer to the dev structure.
+ * @param[in] attr
+ * Pointer to the flow attributes.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p action has fully been
- * converted, otherwise another call with this returned memory size should
- * be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * flow driver type, MLX5_FLOW_TYPE_MAX otherwise.
*/
-static int
-mlx5_flow_action_count(struct rte_eth_dev *dev,
- const struct rte_flow_action *action,
- struct rte_flow *flow,
- const size_t flow_size __rte_unused,
- struct rte_flow_error *error)
-{
- const struct rte_flow_action_count *count = action->conf;
-#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
- unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
- struct ibv_flow_spec_counter_action counter = {
- .type = IBV_FLOW_SPEC_ACTION_COUNT,
- .size = size,
- };
-#endif
+static enum mlx5_flow_drv_type
+flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ enum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX;
- if (!flow->counter) {
- flow->counter = mlx5_flow_counter_new(dev, count->shared,
- count->id);
- if (!flow->counter)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- action,
- "cannot get counter"
- " context.");
- }
- if (!((struct priv *)dev->data->dev_private)->config.flow_counter_en)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- action,
- "flow counters are not supported.");
- flow->modifier |= MLX5_FLOW_MOD_COUNT;
-#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
- counter.counter_set_handle = flow->counter->cs->handle;
- if (size <= flow_size)
- mlx5_flow_spec_verbs_add(flow, &counter, size);
- return size;
-#endif
- return 0;
+ if (attr->transfer)
+ type = MLX5_FLOW_TYPE_TCF;
+ else
+ type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :
+ MLX5_FLOW_TYPE_VERBS;
+ return type;
}
+#define flow_get_drv_ops(type) flow_drv_ops[type]
+
/**
- * Convert the @p action into @p flow after ensuring the NIC will understand
- * and process it correctly.
- * The conversion is performed action per action, each of them is written into
- * the @p flow if its size is lesser or equal to @p flow_size.
- * Validation and memory consumption computation are still performed until the
- * end of @p action, unless an error is encountered.
+ * Flow driver validation API. This abstracts calling driver specific functions.
+ * The type of flow driver is determined according to flow attributes.
*
* @param[in] dev
- * Pointer to Ethernet device structure.
+ * Pointer to the dev structure.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] items
+ * Pointer to the list of items.
* @param[in] actions
- * Pointer to flow actions array.
- * @param[in, out] flow
- * Pointer to the rte_flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small some
- * garbage may be present.
+ * Pointer to the list of actions.
* @param[out] error
- * Pointer to error structure.
+ * Pointer to the error structure.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p actions has fully been
- * converted, otherwise another call with this returned memory size should
- * be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_ernno is set.
*/
-static int
-mlx5_flow_actions(struct rte_eth_dev *dev,
+static inline int
+flow_drv_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
const struct rte_flow_action actions[],
- struct rte_flow *flow, const size_t flow_size,
struct rte_flow_error *error)
{
- size_t size = 0;
- int remain = flow_size;
- int ret = 0;
+ const struct mlx5_flow_driver_ops *fops;
+ enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr);
- for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
- switch (actions->type) {
- case RTE_FLOW_ACTION_TYPE_VOID:
- break;
- case RTE_FLOW_ACTION_TYPE_FLAG:
- ret = mlx5_flow_action_flag(actions, flow, remain,
- error);
- break;
- case RTE_FLOW_ACTION_TYPE_MARK:
- ret = mlx5_flow_action_mark(actions, flow, remain,
- error);
- break;
- case RTE_FLOW_ACTION_TYPE_DROP:
- ret = mlx5_flow_action_drop(actions, flow, remain,
- error);
- break;
- case RTE_FLOW_ACTION_TYPE_QUEUE:
- ret = mlx5_flow_action_queue(dev, actions, flow, error);
- break;
- case RTE_FLOW_ACTION_TYPE_RSS:
- ret = mlx5_flow_action_rss(dev, actions, flow, error);
- break;
- case RTE_FLOW_ACTION_TYPE_COUNT:
- ret = mlx5_flow_action_count(dev, actions, flow, remain,
- error);
- break;
- default:
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions,
- "action not supported");
- }
- if (ret < 0)
- return ret;
- if (remain > ret)
- remain -= ret;
- else
- remain = 0;
- size += ret;
- }
- if (!flow->fate)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "no fate action found");
- return size;
+ fops = flow_get_drv_ops(type);
+ return fops->validate(dev, attr, items, actions, error);
}
/**
- * Convert the @p attributes, @p pattern, @p action, into an flow for the NIC
- * after ensuring the NIC will understand and process it correctly.
- * The conversion is only performed item/action per item/action, each of
- * them is written into the @p flow if its size is lesser or equal to @p
- * flow_size.
- * Validation and memory consumption computation are still performed until the
- * end, unless an error is encountered.
+ * Flow driver preparation API. This abstracts calling driver specific
+ * functions. Parent flow (rte_flow) should have driver type (drv_type). It
+ * calculates the size of memory required for device flow, allocates the memory,
+ * initializes the device flow and returns the pointer.
+ *
+ * @note
+ * This function initializes device flow structure such as dv, tcf or verbs in
+ * struct mlx5_flow. However, it is caller's responsibility to initialize the
+ * rest. For example, adding returning device flow to flow->dev_flow list and
+ * setting backward reference to the flow should be done out of this function.
+ * layers field is not filled either.
*
- * @param[in] dev
- * Pointer to Ethernet device.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small some
- * garbage may be present.
- * @param[in] attributes
- * Flow rule attributes.
- * @param[in] pattern
- * Pattern specification (list terminated by the END pattern item).
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] items
+ * Pointer to the list of items.
* @param[in] actions
- * Associated actions (list terminated by the END action).
+ * Pointer to the list of actions.
* @param[out] error
- * Perform verbose error reporting if not NULL.
+ * Pointer to the error structure.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the flow has fully been converted and
- * can be applied, otherwise another call with this returned memory size
- * should be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * Pointer to device flow on success, otherwise NULL and rte_ernno is set.
*/
-static int
-mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow,
- const size_t flow_size,
- const struct rte_flow_attr *attributes,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
+static inline struct mlx5_flow *
+flow_drv_prepare(const struct rte_flow *flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
{
- struct rte_flow local_flow = { .layers = 0, };
- size_t size = sizeof(*flow);
- union {
- struct rte_flow_expand_rss buf;
- uint8_t buffer[2048];
- } expand_buffer;
- struct rte_flow_expand_rss *buf = &expand_buffer.buf;
- struct mlx5_flow_verbs *original_verbs = NULL;
- size_t original_verbs_size = 0;
- uint32_t original_layers = 0;
- int expanded_pattern_idx = 0;
- int ret;
- uint32_t i;
+ const struct mlx5_flow_driver_ops *fops;
+ enum mlx5_flow_drv_type type = flow->drv_type;
- if (size > flow_size)
- flow = &local_flow;
- ret = mlx5_flow_attributes(dev, attributes, flow, error);
- if (ret < 0)
- return ret;
- ret = mlx5_flow_actions(dev, actions, &local_flow, 0, error);
- if (ret < 0)
- return ret;
- if (local_flow.rss.types) {
- ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
- pattern, local_flow.rss.types,
- mlx5_support_expansion,
- local_flow.rss.level < 2 ?
- MLX5_EXPANSION_ROOT :
- MLX5_EXPANSION_ROOT_OUTER);
- assert(ret > 0 &&
- (unsigned int)ret < sizeof(expand_buffer.buffer));
- } else {
- buf->entries = 1;
- buf->entry[0].pattern = (void *)(uintptr_t)pattern;
- }
- size += RTE_ALIGN_CEIL(local_flow.rss.queue_num * sizeof(uint16_t),
- sizeof(void *));
- if (size <= flow_size)
- flow->queue = (void *)(flow + 1);
- LIST_INIT(&flow->verbs);
- flow->layers = 0;
- flow->modifier = 0;
- flow->fate = 0;
- for (i = 0; i != buf->entries; ++i) {
- size_t off = size;
- size_t off2;
-
- flow->layers = original_layers;
- size += sizeof(struct ibv_flow_attr) +
- sizeof(struct mlx5_flow_verbs);
- off2 = size;
- if (size < flow_size) {
- flow->cur_verbs = (void *)((uintptr_t)flow + off);
- flow->cur_verbs->attr = (void *)(flow->cur_verbs + 1);
- flow->cur_verbs->specs =
- (void *)(flow->cur_verbs->attr + 1);
- }
- /* First iteration convert the pattern into Verbs. */
- if (i == 0) {
- /* Actions don't need to be converted several time. */
- ret = mlx5_flow_actions(dev, actions, flow,
- (size < flow_size) ?
- flow_size - size : 0,
- error);
- if (ret < 0)
- return ret;
- size += ret;
- } else {
- /*
- * Next iteration means the pattern has already been
- * converted and an expansion is necessary to match
- * the user RSS request. For that only the expanded
- * items will be converted, the common part with the
- * user pattern are just copied into the next buffer
- * zone.
- */
- size += original_verbs_size;
- if (size < flow_size) {
- rte_memcpy(flow->cur_verbs->attr,
- original_verbs->attr,
- original_verbs_size +
- sizeof(struct ibv_flow_attr));
- flow->cur_verbs->size = original_verbs_size;
- }
- }
- ret = mlx5_flow_items
- (dev,
- (const struct rte_flow_item *)
- &buf->entry[i].pattern[expanded_pattern_idx],
- flow,
- (size < flow_size) ? flow_size - size : 0, error);
- if (ret < 0)
- return ret;
- size += ret;
- if (size <= flow_size) {
- mlx5_flow_adjust_priority(dev, flow);
- LIST_INSERT_HEAD(&flow->verbs, flow->cur_verbs, next);
- }
- /*
- * Keep a pointer of the first verbs conversion and the layers
- * it has encountered.
- */
- if (i == 0) {
- original_verbs = flow->cur_verbs;
- original_verbs_size = size - off2;
- original_layers = flow->layers;
- /*
- * move the index of the expanded pattern to the
- * first item not addressed yet.
- */
- if (pattern->type == RTE_FLOW_ITEM_TYPE_END) {
- expanded_pattern_idx++;
- } else {
- const struct rte_flow_item *item = pattern;
-
- for (item = pattern;
- item->type != RTE_FLOW_ITEM_TYPE_END;
- ++item)
- expanded_pattern_idx++;
- }
- }
- }
- /* Restore the origin layers in the flow. */
- flow->layers = original_layers;
- return size;
+ assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+ fops = flow_get_drv_ops(type);
+ return fops->prepare(attr, items, actions, error);
}
/**
- * Lookup and set the ptype in the data Rx part. A single Ptype can be used,
- * if several tunnel rules are used on this queue, the tunnel ptype will be
- * cleared.
+ * Flow driver translation API. This abstracts calling driver specific
+ * functions. Parent flow (rte_flow) should have driver type (drv_type). It
+ * translates a generic flow into a driver flow. flow_drv_prepare() must
+ * precede.
+ *
+ * @note
+ * dev_flow->layers could be filled as a result of parsing during translation
+ * if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled
+ * if necessary. As a flow can have multiple dev_flows by RSS flow expansion,
+ * flow->actions could be overwritten even though all the expanded dev_flows
+ * have the same actions.
*
- * @param rxq_ctrl
- * Rx queue to update.
+ * @param[in] dev
+ * Pointer to the rte dev structure.
+ * @param[in, out] dev_flow
+ * Pointer to the mlx5 flow.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] items
+ * Pointer to the list of items.
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_ernno is set.
*/
-static void
-mlx5_flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
-{
- unsigned int i;
- uint32_t tunnel_ptype = 0;
-
- /* Look up for the ptype to use. */
- for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) {
- if (!rxq_ctrl->flow_tunnels_n[i])
- continue;
- if (!tunnel_ptype) {
- tunnel_ptype = tunnels_info[i].ptype;
- } else {
- tunnel_ptype = 0;
- break;
- }
- }
- rxq_ctrl->rxq.tunnel = tunnel_ptype;
+static inline int
+flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+ enum mlx5_flow_drv_type type = dev_flow->flow->drv_type;
+
+ assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+ fops = flow_get_drv_ops(type);
+ return fops->translate(dev, dev_flow, attr, items, actions, error);
}
/**
- * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the flow.
+ * Flow driver apply API. This abstracts calling driver specific functions.
+ * Parent flow (rte_flow) should have driver type (drv_type). It applies
+ * translated driver flows on to device. flow_drv_translate() must precede.
*
* @param[in] dev
- * Pointer to Ethernet device.
- * @param[in] flow
+ * Pointer to Ethernet device structure.
+ * @param[in, out] flow
* Pointer to flow structure.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static void
-mlx5_flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
+static inline int
+flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
- const int mark = !!(flow->modifier &
- (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK));
- const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
- unsigned int i;
-
- for (i = 0; i != flow->rss.queue_num; ++i) {
- int idx = (*flow->queue)[i];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of((*priv->rxqs)[idx],
- struct mlx5_rxq_ctrl, rxq);
-
- if (mark) {
- rxq_ctrl->rxq.mark = 1;
- rxq_ctrl->flow_mark_n++;
- }
- if (tunnel) {
- unsigned int j;
+ const struct mlx5_flow_driver_ops *fops;
+ enum mlx5_flow_drv_type type = flow->drv_type;
- /* Increase the counter matching the flow. */
- for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
- if ((tunnels_info[j].tunnel & flow->layers) ==
- tunnels_info[j].tunnel) {
- rxq_ctrl->flow_tunnels_n[j]++;
- break;
- }
- }
- mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl);
- }
- }
+ assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+ fops = flow_get_drv_ops(type);
+ return fops->apply(dev, flow, error);
}
/**
- * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
- * @p flow if no other flow uses it with the same kind of request.
+ * Flow driver remove API. This abstracts calling driver specific functions.
+ * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
+ * on device. All the resources of the flow should be freed by calling
+ * flow_drv_destroy().
*
- * @param dev
+ * @param[in] dev
* Pointer to Ethernet device.
- * @param[in] flow
- * Pointer to the flow.
+ * @param[in, out] flow
+ * Pointer to flow structure.
*/
-static void
-mlx5_flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
+static inline void
+flow_drv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
{
- struct priv *priv = dev->data->dev_private;
- const int mark = !!(flow->modifier &
- (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK));
- const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
- unsigned int i;
-
- assert(dev->data->dev_started);
- for (i = 0; i != flow->rss.queue_num; ++i) {
- int idx = (*flow->queue)[i];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of((*priv->rxqs)[idx],
- struct mlx5_rxq_ctrl, rxq);
-
- if (mark) {
- rxq_ctrl->flow_mark_n--;
- rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
- }
- if (tunnel) {
- unsigned int j;
+ const struct mlx5_flow_driver_ops *fops;
+ enum mlx5_flow_drv_type type = flow->drv_type;
- /* Decrease the counter matching the flow. */
- for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
- if ((tunnels_info[j].tunnel & flow->layers) ==
- tunnels_info[j].tunnel) {
- rxq_ctrl->flow_tunnels_n[j]--;
- break;
- }
- }
- mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl);
- }
- }
+ assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+ fops = flow_get_drv_ops(type);
+ fops->remove(dev, flow);
}
/**
- * Clear the Mark/Flag and Tunnel ptype information in all Rx queues.
+ * Flow driver destroy API. This abstracts calling driver specific functions.
+ * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
+ * on device and releases resources of the flow.
*
- * @param dev
+ * @param[in] dev
* Pointer to Ethernet device.
+ * @param[in, out] flow
+ * Pointer to flow structure.
*/
-static void
-mlx5_flow_rxq_flags_clear(struct rte_eth_dev *dev)
+static inline void
+flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
{
- struct priv *priv = dev->data->dev_private;
- unsigned int i;
- unsigned int idx;
+ const struct mlx5_flow_driver_ops *fops;
+ enum mlx5_flow_drv_type type = flow->drv_type;
- for (idx = 0, i = 0; idx != priv->rxqs_n; ++i) {
- struct mlx5_rxq_ctrl *rxq_ctrl;
- unsigned int j;
-
- if (!(*priv->rxqs)[idx])
- continue;
- rxq_ctrl = container_of((*priv->rxqs)[idx],
- struct mlx5_rxq_ctrl, rxq);
- rxq_ctrl->flow_mark_n = 0;
- rxq_ctrl->rxq.mark = 0;
- for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
- rxq_ctrl->flow_tunnels_n[j] = 0;
- rxq_ctrl->rxq.tunnel = 0;
- ++idx;
- }
+ assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+ fops = flow_get_drv_ops(type);
+ fops->destroy(dev, flow);
}
/**
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
- int ret = mlx5_flow_merge(dev, NULL, 0, attr, items, actions, error);
+ int ret;
+ ret = flow_drv_validate(dev, attr, items, actions, error);
if (ret < 0)
return ret;
return 0;
}
/**
- * Remove the flow.
+ * Get RSS action from the action list.
*
- * @param[in] dev
- * Pointer to Ethernet device.
- * @param[in, out] flow
- * Pointer to flow structure.
+ * @param[in] actions
+ * Pointer to the list of actions.
+ *
+ * @return
+ * Pointer to the RSS action if exist, else return NULL.
*/
-static void
-mlx5_flow_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
+static const struct rte_flow_action_rss*
+flow_get_rss_action(const struct rte_flow_action actions[])
{
- struct mlx5_flow_verbs *verbs;
-
- LIST_FOREACH(verbs, &flow->verbs, next) {
- if (verbs->flow) {
- claim_zero(mlx5_glue->destroy_flow(verbs->flow));
- verbs->flow = NULL;
- }
- if (verbs->hrxq) {
- if (flow->fate & MLX5_FLOW_FATE_DROP)
- mlx5_hrxq_drop_release(dev);
- else
- mlx5_hrxq_release(dev, verbs->hrxq);
- verbs->hrxq = NULL;
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ return (const struct rte_flow_action_rss *)
+ actions->conf;
+ default:
+ break;
}
}
- if (flow->counter) {
- mlx5_flow_counter_release(flow->counter);
- flow->counter = NULL;
- }
+ return NULL;
}
-/**
- * Apply the flow.
- *
- * @param[in] dev
- * Pointer to Ethernet device structure.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[out] error
- * Pointer to error structure.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
- */
-static int
-mlx5_flow_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
- struct rte_flow_error *error)
+static unsigned int
+find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level)
{
- struct mlx5_flow_verbs *verbs;
- int err;
-
- LIST_FOREACH(verbs, &flow->verbs, next) {
- if (flow->fate & MLX5_FLOW_FATE_DROP) {
- verbs->hrxq = mlx5_hrxq_drop_new(dev);
- if (!verbs->hrxq) {
- rte_flow_error_set
- (error, errno,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "cannot get drop hash queue");
- goto error;
- }
- } else {
- struct mlx5_hrxq *hrxq;
-
- hrxq = mlx5_hrxq_get(dev, flow->key,
- MLX5_RSS_HASH_KEY_LEN,
- verbs->hash_fields,
- (*flow->queue),
- flow->rss.queue_num);
- if (!hrxq)
- hrxq = mlx5_hrxq_new(dev, flow->key,
- MLX5_RSS_HASH_KEY_LEN,
- verbs->hash_fields,
- (*flow->queue),
- flow->rss.queue_num);
- if (!hrxq) {
- rte_flow_error_set
- (error, rte_errno,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "cannot get hash queue");
- goto error;
- }
- verbs->hrxq = hrxq;
- }
- verbs->flow =
- mlx5_glue->create_flow(verbs->hrxq->qp, verbs->attr);
- if (!verbs->flow) {
- rte_flow_error_set(error, errno,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "hardware refuses to create flow");
- goto error;
- }
- }
- return 0;
-error:
- err = rte_errno; /* Save rte_errno before cleanup. */
- LIST_FOREACH(verbs, &flow->verbs, next) {
- if (verbs->hrxq) {
- if (flow->fate & MLX5_FLOW_FATE_DROP)
- mlx5_hrxq_drop_release(dev);
- else
- mlx5_hrxq_release(dev, verbs->hrxq);
- verbs->hrxq = NULL;
+ const struct rte_flow_item *item;
+ unsigned int has_vlan = 0;
+
+ for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ has_vlan = 1;
+ break;
}
}
- rte_errno = err; /* Restore rte_errno. */
- return -rte_errno;
+ if (has_vlan)
+ return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN :
+ MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN;
+ return rss_level < 2 ? MLX5_EXPANSION_ROOT :
+ MLX5_EXPANSION_ROOT_OUTER;
}
/**
* A flow on success, NULL otherwise and rte_errno is set.
*/
static struct rte_flow *
-mlx5_flow_list_create(struct rte_eth_dev *dev,
- struct mlx5_flows *list,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
+flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
{
struct rte_flow *flow = NULL;
- size_t size = 0;
+ struct mlx5_flow *dev_flow;
+ const struct rte_flow_action_rss *rss;
+ union {
+ struct rte_flow_expand_rss buf;
+ uint8_t buffer[2048];
+ } expand_buffer;
+ struct rte_flow_expand_rss *buf = &expand_buffer.buf;
int ret;
+ uint32_t i;
+ uint32_t flow_size;
- ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error);
+ ret = flow_drv_validate(dev, attr, items, actions, error);
if (ret < 0)
return NULL;
- size = ret;
- flow = rte_calloc(__func__, 1, size, 0);
- if (!flow) {
- rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "not enough memory to create flow");
- return NULL;
+ flow_size = sizeof(struct rte_flow);
+ rss = flow_get_rss_action(actions);
+ if (rss)
+ flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t),
+ sizeof(void *));
+ else
+ flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *));
+ flow = rte_calloc(__func__, 1, flow_size, 0);
+ flow->drv_type = flow_get_drv_type(dev, attr);
+ assert(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
+ flow->drv_type < MLX5_FLOW_TYPE_MAX);
+ flow->queue = (void *)(flow + 1);
+ LIST_INIT(&flow->dev_flows);
+ if (rss && rss->types) {
+ unsigned int graph_root;
+
+ graph_root = find_graph_root(items, rss->level);
+ ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
+ items, rss->types,
+ mlx5_support_expansion,
+ graph_root);
+ assert(ret > 0 &&
+ (unsigned int)ret < sizeof(expand_buffer.buffer));
+ } else {
+ buf->entries = 1;
+ buf->entry[0].pattern = (void *)(uintptr_t)items;
}
- ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error);
- if (ret < 0) {
- rte_free(flow);
- return NULL;
+ for (i = 0; i < buf->entries; ++i) {
+ dev_flow = flow_drv_prepare(flow, attr, buf->entry[i].pattern,
+ actions, error);
+ if (!dev_flow)
+ goto error;
+ dev_flow->flow = flow;
+ LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
+ ret = flow_drv_translate(dev, dev_flow, attr,
+ buf->entry[i].pattern,
+ actions, error);
+ if (ret < 0)
+ goto error;
}
- assert((size_t)ret == size);
if (dev->data->dev_started) {
- ret = mlx5_flow_apply(dev, flow, error);
- if (ret < 0) {
- ret = rte_errno; /* Save rte_errno before cleanup. */
- if (flow) {
- mlx5_flow_remove(dev, flow);
- rte_free(flow);
- }
- rte_errno = ret; /* Restore rte_errno. */
- return NULL;
- }
+ ret = flow_drv_apply(dev, flow, error);
+ if (ret < 0)
+ goto error;
}
TAILQ_INSERT_TAIL(list, flow, next);
- mlx5_flow_rxq_flags_set(dev, flow);
+ flow_rxq_flags_set(dev, flow);
return flow;
+error:
+ ret = rte_errno; /* Save rte_errno before cleanup. */
+ assert(flow);
+ flow_drv_destroy(dev, flow);
+ rte_free(flow);
+ rte_errno = ret; /* Restore rte_errno. */
+ return NULL;
}
/**
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
- return mlx5_flow_list_create
- (dev, &((struct priv *)dev->data->dev_private)->flows,
- attr, items, actions, error);
+ struct mlx5_priv *priv = (struct mlx5_priv *)dev->data->dev_private;
+
+ return flow_list_create(dev, &priv->flows,
+ attr, items, actions, error);
}
/**
* Flow to destroy.
*/
static void
-mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
- struct rte_flow *flow)
+flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
+ struct rte_flow *flow)
{
- mlx5_flow_remove(dev, flow);
- TAILQ_REMOVE(list, flow, next);
/*
* Update RX queue flags only if port is started, otherwise it is
* already clean.
*/
if (dev->data->dev_started)
- mlx5_flow_rxq_flags_trim(dev, flow);
+ flow_rxq_flags_trim(dev, flow);
+ flow_drv_destroy(dev, flow);
+ TAILQ_REMOVE(list, flow, next);
+ rte_free(flow->fdir);
rte_free(flow);
}
struct rte_flow *flow;
flow = TAILQ_FIRST(list);
- mlx5_flow_list_destroy(dev, list, flow);
+ flow_list_destroy(dev, list, flow);
}
}
struct rte_flow *flow;
TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next)
- mlx5_flow_remove(dev, flow);
- mlx5_flow_rxq_flags_clear(dev);
+ flow_drv_remove(dev, flow);
+ flow_rxq_flags_clear(dev);
}
/**
int ret = 0;
TAILQ_FOREACH(flow, list, next) {
- ret = mlx5_flow_apply(dev, flow, &error);
+ ret = flow_drv_apply(dev, flow, &error);
if (ret < 0)
goto error;
- mlx5_flow_rxq_flags_set(dev, flow);
+ flow_rxq_flags_set(dev, flow);
}
return 0;
error:
int
mlx5_flow_verify(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow *flow;
int ret = 0;
struct rte_flow_item_vlan *vlan_spec,
struct rte_flow_item_vlan *vlan_mask)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_attr attr = {
.ingress = 1,
.priority = MLX5_FLOW_PRIO_RSVD,
},
{
.type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
- RTE_FLOW_ITEM_TYPE_END,
+ RTE_FLOW_ITEM_TYPE_END,
.spec = vlan_spec,
.last = NULL,
.mask = vlan_mask,
struct rte_flow_error error;
unsigned int i;
- if (!priv->reta_idx_n) {
- rte_errno = EINVAL;
- return -rte_errno;
+ if (!priv->reta_idx_n || !priv->rxqs_n) {
+ return 0;
}
for (i = 0; i != priv->reta_idx_n; ++i)
queue[i] = (*priv->reta_idx)[i];
- flow = mlx5_flow_list_create(dev, &priv->ctrl_flows, &attr, items,
- actions, &error);
+ flow = flow_list_create(dev, &priv->ctrl_flows,
+ &attr, items, actions, &error);
if (!flow)
return -rte_errno;
return 0;
struct rte_flow *flow,
struct rte_flow_error *error __rte_unused)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
- mlx5_flow_list_destroy(dev, &priv->flows, flow);
+ flow_list_destroy(dev, &priv->flows, flow);
return 0;
}
mlx5_flow_flush(struct rte_eth_dev *dev,
struct rte_flow_error *error __rte_unused)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
mlx5_flow_list_flush(dev, &priv->flows);
return 0;
int enable,
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
if (dev->data->dev_started) {
rte_flow_error_set(error, EBUSY,
}
/**
- * Query flow counter.
- *
- * @param flow
- * Pointer to the flow.
+ * Query a flow.
*
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * @see rte_flow_query()
+ * @see rte_flow_ops
*/
static int
-mlx5_flow_query_count(struct rte_flow *flow __rte_unused,
- void *data __rte_unused,
- struct rte_flow_error *error)
-{
-#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
- struct rte_flow_query_count *qc = data;
- uint64_t counters[2] = {0, 0};
- struct ibv_query_counter_set_attr query_cs_attr = {
- .cs = flow->counter->cs,
- .query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
- };
- struct ibv_counter_set_data query_out = {
- .out = counters,
- .outlen = 2 * sizeof(uint64_t),
- };
- int err = mlx5_glue->query_counter_set(&query_cs_attr, &query_out);
+flow_drv_query(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ const struct rte_flow_action *actions,
+ void *data,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+ enum mlx5_flow_drv_type ftype = flow->drv_type;
- if (err)
- return rte_flow_error_set(error, err,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "cannot read counter");
- qc->hits_set = 1;
- qc->bytes_set = 1;
- qc->hits = counters[0] - flow->counter->hits;
- qc->bytes = counters[1] - flow->counter->bytes;
- if (qc->reset) {
- flow->counter->hits = counters[0];
- flow->counter->bytes = counters[1];
- }
- return 0;
-#endif
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "counters are not available");
+ assert(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX);
+ fops = flow_get_drv_ops(ftype);
+
+ return fops->query(dev, flow, actions, data, error);
}
/**
- * Query a flows.
+ * Query a flow.
*
* @see rte_flow_query()
* @see rte_flow_ops
*/
int
-mlx5_flow_query(struct rte_eth_dev *dev __rte_unused,
+mlx5_flow_query(struct rte_eth_dev *dev,
struct rte_flow *flow,
const struct rte_flow_action *actions,
void *data,
struct rte_flow_error *error)
{
- int ret = 0;
+ int ret;
- for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
- switch (actions->type) {
- case RTE_FLOW_ACTION_TYPE_VOID:
- break;
- case RTE_FLOW_ACTION_TYPE_COUNT:
- ret = mlx5_flow_query_count(flow, data, error);
- break;
- default:
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions,
- "action not supported");
- }
- if (ret < 0)
- return ret;
- }
+ ret = flow_drv_query(dev, flow, actions, data, error);
+ if (ret < 0)
+ return ret;
return 0;
}
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
+flow_fdir_filter_convert(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *fdir_filter,
struct mlx5_fdir *attributes)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_eth_fdir_input *input = &fdir_filter->input;
const struct rte_eth_fdir_masks *mask =
&dev->data->dev_conf.fdir_conf.mask;
.dst_addr = input->flow.ip4_flow.dst_ip,
.time_to_live = input->flow.ip4_flow.ttl,
.type_of_service = input->flow.ip4_flow.tos,
- .next_proto_id = input->flow.ip4_flow.proto,
};
attributes->l3_mask.ipv4.hdr = (struct ipv4_hdr){
.src_addr = mask->ipv4_mask.src_ip,
return 0;
}
+#define FLOW_FDIR_CMP(f1, f2, fld) \
+ memcmp(&(f1)->fld, &(f2)->fld, sizeof(f1->fld))
+
+/**
+ * Compare two FDIR flows. If items and actions are identical, the two flows are
+ * regarded as same.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param f1
+ * FDIR flow to compare.
+ * @param f2
+ * FDIR flow to compare.
+ *
+ * @return
+ * Zero on match, 1 otherwise.
+ */
+static int
+flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2)
+{
+ if (FLOW_FDIR_CMP(f1, f2, attr) ||
+ FLOW_FDIR_CMP(f1, f2, l2) ||
+ FLOW_FDIR_CMP(f1, f2, l2_mask) ||
+ FLOW_FDIR_CMP(f1, f2, l3) ||
+ FLOW_FDIR_CMP(f1, f2, l3_mask) ||
+ FLOW_FDIR_CMP(f1, f2, l4) ||
+ FLOW_FDIR_CMP(f1, f2, l4_mask) ||
+ FLOW_FDIR_CMP(f1, f2, actions[0].type))
+ return 1;
+ if (f1->actions[0].type == RTE_FLOW_ACTION_TYPE_QUEUE &&
+ FLOW_FDIR_CMP(f1, f2, queue))
+ return 1;
+ return 0;
+}
+
+/**
+ * Search device flow list to find out a matched FDIR flow.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param fdir_flow
+ * FDIR flow to lookup.
+ *
+ * @return
+ * Pointer of flow if found, NULL otherwise.
+ */
+static struct rte_flow *
+flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct rte_flow *flow = NULL;
+
+ assert(fdir_flow);
+ TAILQ_FOREACH(flow, &priv->flows, next) {
+ if (flow->fdir && !flow_fdir_cmp(flow->fdir, fdir_flow)) {
+ DRV_LOG(DEBUG, "port %u found FDIR flow %p",
+ dev->data->port_id, (void *)flow);
+ break;
+ }
+ }
+ return flow;
+}
+
/**
* Add new flow director filter and store it in list.
*
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_fdir_filter_add(struct rte_eth_dev *dev,
+flow_fdir_filter_add(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *fdir_filter)
{
- struct priv *priv = dev->data->dev_private;
- struct mlx5_fdir attributes = {
- .attr.group = 0,
- .l2_mask = {
- .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
- .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
- .type = 0,
- },
- };
- struct rte_flow_error error;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_fdir *fdir_flow;
struct rte_flow *flow;
int ret;
- ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes);
+ fdir_flow = rte_zmalloc(__func__, sizeof(*fdir_flow), 0);
+ if (!fdir_flow) {
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ ret = flow_fdir_filter_convert(dev, fdir_filter, fdir_flow);
if (ret)
- return ret;
- flow = mlx5_flow_list_create(dev, &priv->flows, &attributes.attr,
- attributes.items, attributes.actions,
- &error);
+ goto error;
+ flow = flow_fdir_filter_lookup(dev, fdir_flow);
if (flow) {
- DRV_LOG(DEBUG, "port %u FDIR created %p", dev->data->port_id,
- (void *)flow);
- return 0;
+ rte_errno = EEXIST;
+ goto error;
}
+ flow = flow_list_create(dev, &priv->flows, &fdir_flow->attr,
+ fdir_flow->items, fdir_flow->actions, NULL);
+ if (!flow)
+ goto error;
+ assert(!flow->fdir);
+ flow->fdir = fdir_flow;
+ DRV_LOG(DEBUG, "port %u created FDIR flow %p",
+ dev->data->port_id, (void *)flow);
+ return 0;
+error:
+ rte_free(fdir_flow);
return -rte_errno;
}
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_fdir_filter_delete(struct rte_eth_dev *dev __rte_unused,
- const struct rte_eth_fdir_filter *fdir_filter
- __rte_unused)
+flow_fdir_filter_delete(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *fdir_filter)
{
- rte_errno = ENOTSUP;
- return -rte_errno;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct rte_flow *flow;
+ struct mlx5_fdir fdir_flow = {
+ .attr.group = 0,
+ };
+ int ret;
+
+ ret = flow_fdir_filter_convert(dev, fdir_filter, &fdir_flow);
+ if (ret)
+ return -rte_errno;
+ flow = flow_fdir_filter_lookup(dev, &fdir_flow);
+ if (!flow) {
+ rte_errno = ENOENT;
+ return -rte_errno;
+ }
+ flow_list_destroy(dev, &priv->flows, flow);
+ DRV_LOG(DEBUG, "port %u deleted FDIR flow %p",
+ dev->data->port_id, (void *)flow);
+ return 0;
}
/**
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_fdir_filter_update(struct rte_eth_dev *dev,
+flow_fdir_filter_update(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *fdir_filter)
{
int ret;
- ret = mlx5_fdir_filter_delete(dev, fdir_filter);
+ ret = flow_fdir_filter_delete(dev, fdir_filter);
if (ret)
return ret;
- return mlx5_fdir_filter_add(dev, fdir_filter);
+ return flow_fdir_filter_add(dev, fdir_filter);
}
/**
* Pointer to Ethernet device.
*/
static void
-mlx5_fdir_filter_flush(struct rte_eth_dev *dev)
+flow_fdir_filter_flush(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
mlx5_flow_list_flush(dev, &priv->flows);
}
* Resulting flow director information.
*/
static void
-mlx5_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
+flow_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
{
struct rte_eth_fdir_masks *mask =
&dev->data->dev_conf.fdir_conf.mask;
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
+flow_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
void *arg)
{
enum rte_fdir_mode fdir_mode =
}
switch (filter_op) {
case RTE_ETH_FILTER_ADD:
- return mlx5_fdir_filter_add(dev, arg);
+ return flow_fdir_filter_add(dev, arg);
case RTE_ETH_FILTER_UPDATE:
- return mlx5_fdir_filter_update(dev, arg);
+ return flow_fdir_filter_update(dev, arg);
case RTE_ETH_FILTER_DELETE:
- return mlx5_fdir_filter_delete(dev, arg);
+ return flow_fdir_filter_delete(dev, arg);
case RTE_ETH_FILTER_FLUSH:
- mlx5_fdir_filter_flush(dev);
+ flow_fdir_filter_flush(dev);
break;
case RTE_ETH_FILTER_INFO:
- mlx5_fdir_info_get(dev, arg);
+ flow_fdir_info_get(dev, arg);
break;
default:
DRV_LOG(DEBUG, "port %u unknown operation %u",
*(const void **)arg = &mlx5_flow_ops;
return 0;
case RTE_ETH_FILTER_FDIR:
- return mlx5_fdir_ctrl_func(dev, filter_op, arg);
+ return flow_fdir_ctrl_func(dev, filter_op, arg);
default:
DRV_LOG(ERR, "port %u filter type (%d) not supported",
dev->data->port_id, filter_type);