* Copyright 2016 Mellanox Technologies, Ltd
*/
+#include <netinet/in.h>
#include <sys/queue.h>
+#include <stdalign.h>
#include <stdint.h>
#include <string.h>
#include <rte_common.h>
#include <rte_ether.h>
-#include <rte_eth_ctrl.h>
#include <rte_ethdev_driver.h>
#include <rte_flow.h>
#include <rte_flow_driver.h>
#include "mlx5.h"
#include "mlx5_defs.h"
-#include "mlx5_prm.h"
+#include "mlx5_flow.h"
#include "mlx5_glue.h"
+#include "mlx5_prm.h"
+#include "mlx5_rxtx.h"
/* Dev ops structure defined in mlx5.c */
extern const struct eth_dev_ops mlx5_dev_ops;
extern const struct eth_dev_ops mlx5_dev_ops_isolate;
-/* Pattern outer Layer bits. */
-#define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0)
-#define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1)
-#define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2)
-#define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3)
-#define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4)
-#define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5)
-
-/* Pattern inner Layer bits. */
-#define MLX5_FLOW_LAYER_INNER_L2 (1u << 6)
-#define MLX5_FLOW_LAYER_INNER_L3_IPV4 (1u << 7)
-#define MLX5_FLOW_LAYER_INNER_L3_IPV6 (1u << 8)
-#define MLX5_FLOW_LAYER_INNER_L4_UDP (1u << 9)
-#define MLX5_FLOW_LAYER_INNER_L4_TCP (1u << 10)
-#define MLX5_FLOW_LAYER_INNER_VLAN (1u << 11)
-
-/* Pattern tunnel Layer bits. */
-#define MLX5_FLOW_LAYER_VXLAN (1u << 12)
-#define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13)
-#define MLX5_FLOW_LAYER_GRE (1u << 14)
-#define MLX5_FLOW_LAYER_MPLS (1u << 15)
-
-/* Outer Masks. */
-#define MLX5_FLOW_LAYER_OUTER_L3 \
- (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
-#define MLX5_FLOW_LAYER_OUTER_L4 \
- (MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP)
-#define MLX5_FLOW_LAYER_OUTER \
- (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \
- MLX5_FLOW_LAYER_OUTER_L4)
-
-/* Tunnel Masks. */
-#define MLX5_FLOW_LAYER_TUNNEL \
- (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
- MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_MPLS)
-
-/* Inner Masks. */
-#define MLX5_FLOW_LAYER_INNER_L3 \
- (MLX5_FLOW_LAYER_INNER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV6)
-#define MLX5_FLOW_LAYER_INNER_L4 \
- (MLX5_FLOW_LAYER_INNER_L4_UDP | MLX5_FLOW_LAYER_INNER_L4_TCP)
-#define MLX5_FLOW_LAYER_INNER \
- (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3 | \
- MLX5_FLOW_LAYER_INNER_L4)
-
-/* Actions that modify the fate of matching traffic. */
-#define MLX5_FLOW_FATE_DROP (1u << 0)
-#define MLX5_FLOW_FATE_QUEUE (1u << 1)
-#define MLX5_FLOW_FATE_RSS (1u << 2)
-
-/* Modify a packet. */
-#define MLX5_FLOW_MOD_FLAG (1u << 0)
-#define MLX5_FLOW_MOD_MARK (1u << 1)
-
-/* possible L3 layers protocols filtering. */
-#define MLX5_IP_PROTOCOL_TCP 6
-#define MLX5_IP_PROTOCOL_UDP 17
-#define MLX5_IP_PROTOCOL_GRE 47
-#define MLX5_IP_PROTOCOL_MPLS 147
-
-/* Priority reserved for default flows. */
-#define MLX5_FLOW_PRIO_RSVD ((uint32_t)-1)
+/** Device flow drivers. */
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+extern const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops;
+#endif
+extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops;
+
+const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops;
+
+const struct mlx5_flow_driver_ops *flow_drv_ops[] = {
+ [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops,
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops,
+#endif
+ [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops,
+ [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops
+};
enum mlx5_expansion {
MLX5_EXPANSION_ROOT,
MLX5_EXPANSION_ROOT_OUTER,
+ MLX5_EXPANSION_ROOT_ETH_VLAN,
+ MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN,
MLX5_EXPANSION_OUTER_ETH,
+ MLX5_EXPANSION_OUTER_ETH_VLAN,
+ MLX5_EXPANSION_OUTER_VLAN,
MLX5_EXPANSION_OUTER_IPV4,
MLX5_EXPANSION_OUTER_IPV4_UDP,
MLX5_EXPANSION_OUTER_IPV4_TCP,
MLX5_EXPANSION_GRE,
MLX5_EXPANSION_MPLS,
MLX5_EXPANSION_ETH,
+ MLX5_EXPANSION_ETH_VLAN,
+ MLX5_EXPANSION_VLAN,
MLX5_EXPANSION_IPV4,
MLX5_EXPANSION_IPV4_UDP,
MLX5_EXPANSION_IPV4_TCP,
MLX5_EXPANSION_OUTER_IPV6),
.type = RTE_FLOW_ITEM_TYPE_END,
},
+ [MLX5_EXPANSION_ROOT_ETH_VLAN] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN),
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH_VLAN),
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
[MLX5_EXPANSION_OUTER_ETH] = {
.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
MLX5_EXPANSION_OUTER_IPV6,
.type = RTE_FLOW_ITEM_TYPE_ETH,
.rss_types = 0,
},
+ [MLX5_EXPANSION_OUTER_ETH_VLAN] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN),
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .rss_types = 0,
+ },
+ [MLX5_EXPANSION_OUTER_VLAN] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
+ MLX5_EXPANSION_OUTER_IPV6),
+ .type = RTE_FLOW_ITEM_TYPE_VLAN,
+ },
[MLX5_EXPANSION_OUTER_IPV4] = {
.next = RTE_FLOW_EXPAND_RSS_NEXT
(MLX5_EXPANSION_OUTER_IPV4_UDP,
MLX5_EXPANSION_OUTER_IPV4_TCP,
- MLX5_EXPANSION_GRE),
+ MLX5_EXPANSION_GRE,
+ MLX5_EXPANSION_IPV4,
+ MLX5_EXPANSION_IPV6),
.type = RTE_FLOW_ITEM_TYPE_IPV4,
.rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
ETH_RSS_NONFRAG_IPV4_OTHER,
[MLX5_EXPANSION_OUTER_IPV6] = {
.next = RTE_FLOW_EXPAND_RSS_NEXT
(MLX5_EXPANSION_OUTER_IPV6_UDP,
- MLX5_EXPANSION_OUTER_IPV6_TCP),
+ MLX5_EXPANSION_OUTER_IPV6_TCP,
+ MLX5_EXPANSION_IPV4,
+ MLX5_EXPANSION_IPV6),
.type = RTE_FLOW_ITEM_TYPE_IPV6,
.rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
ETH_RSS_NONFRAG_IPV6_OTHER,
MLX5_EXPANSION_IPV6),
.type = RTE_FLOW_ITEM_TYPE_ETH,
},
+ [MLX5_EXPANSION_ETH_VLAN] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN),
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ },
+ [MLX5_EXPANSION_VLAN] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
+ MLX5_EXPANSION_IPV6),
+ .type = RTE_FLOW_ITEM_TYPE_VLAN,
+ },
[MLX5_EXPANSION_IPV4] = {
.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
MLX5_EXPANSION_IPV4_TCP),
},
};
-/** Handles information leading to a drop fate. */
-struct mlx5_flow_verbs {
- LIST_ENTRY(mlx5_flow_verbs) next;
- unsigned int size; /**< Size of the attribute. */
- struct {
- struct ibv_flow_attr *attr;
- /**< Pointer to the Specification buffer. */
- uint8_t *specs; /**< Pointer to the specifications. */
- };
- struct ibv_flow *flow; /**< Verbs flow pointer. */
- struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */
- uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */
-};
-
-/* Flow structure. */
-struct rte_flow {
- TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
- struct rte_flow_attr attributes; /**< User flow attribute. */
- uint32_t l3_protocol_en:1; /**< Protocol filtering requested. */
- uint32_t layers;
- /**< Bit-fields of present layers see MLX5_FLOW_LAYER_*. */
- uint32_t modifier;
- /**< Bit-fields of present modifier see MLX5_FLOW_MOD_*. */
- uint32_t fate;
- /**< Bit-fields of present fate see MLX5_FLOW_FATE_*. */
- uint8_t l3_protocol; /**< valid when l3_protocol_en is set. */
- LIST_HEAD(verbs, mlx5_flow_verbs) verbs; /**< Verbs flows list. */
- struct mlx5_flow_verbs *cur_verbs;
- /**< Current Verbs flow structure being filled. */
- struct rte_flow_action_rss rss;/**< RSS context. */
- uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
- uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */
-};
-
static const struct rte_flow_ops mlx5_flow_ops = {
.validate = mlx5_flow_validate,
.create = mlx5_flow_create,
.destroy = mlx5_flow_destroy,
.flush = mlx5_flow_flush,
.isolate = mlx5_flow_isolate,
+ .query = mlx5_flow_query,
};
/* Convert FDIR request to Generic flow. */
struct mlx5_fdir {
struct rte_flow_attr attr;
- struct rte_flow_action actions[2];
struct rte_flow_item items[4];
struct rte_flow_item_eth l2;
struct rte_flow_item_eth l2_mask;
struct rte_flow_item_udp udp;
struct rte_flow_item_tcp tcp;
} l4_mask;
+ struct rte_flow_action actions[2];
struct rte_flow_action_queue queue;
};
-/* Verbs specification header. */
-struct ibv_spec_header {
- enum ibv_flow_spec_type type;
- uint16_t size;
-};
-
-/*
- * Number of sub priorities.
- * For each kind of pattern matching i.e. L2, L3, L4 to have a correct
- * matching on the NIC (firmware dependent) L4 most have the higher priority
- * followed by L3 and ending with L2.
- */
-#define MLX5_PRIORITY_MAP_L2 2
-#define MLX5_PRIORITY_MAP_L3 1
-#define MLX5_PRIORITY_MAP_L4 0
-#define MLX5_PRIORITY_MAP_MAX 3
-
/* Map of Verbs to Flow priority with 8 Verbs priorities. */
static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
{ 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
/* Tunnel information. */
struct mlx5_flow_tunnel_info {
- uint32_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
+ uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */
};
.tunnel = MLX5_FLOW_LAYER_VXLAN,
.ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP,
},
+ {
+ .tunnel = MLX5_FLOW_LAYER_GENEVE,
+ .ptype = RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L4_UDP,
+ },
{
.tunnel = MLX5_FLOW_LAYER_VXLAN_GPE,
.ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP,
},
{
.tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP,
- .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE | RTE_PTYPE_L4_UDP,
+ .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP,
},
{
.tunnel = MLX5_FLOW_LAYER_MPLS,
.ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE,
},
+ {
+ .tunnel = MLX5_FLOW_LAYER_NVGRE,
+ .ptype = RTE_PTYPE_TUNNEL_NVGRE,
+ },
+ {
+ .tunnel = MLX5_FLOW_LAYER_IPIP,
+ .ptype = RTE_PTYPE_TUNNEL_IP,
+ },
+ {
+ .tunnel = MLX5_FLOW_LAYER_IPV6_ENCAP,
+ .ptype = RTE_PTYPE_TUNNEL_IP,
+ },
};
+/**
+ * Translate tag ID to register.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] feature
+ * The feature that request the register.
+ * @param[in] id
+ * The request register ID.
+ * @param[out] error
+ * Error description in case of any.
+ *
+ * @return
+ * The request register on success, a negative errno
+ * value otherwise and rte_errno is set.
+ */
+enum modify_reg
+mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
+ enum mlx5_feature_name feature,
+ uint32_t id,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_config *config = &priv->config;
+ enum modify_reg start_reg;
+
+ switch (feature) {
+ case MLX5_HAIRPIN_RX:
+ return REG_B;
+ case MLX5_HAIRPIN_TX:
+ return REG_A;
+ case MLX5_METADATA_RX:
+ switch (config->dv_xmeta_en) {
+ case MLX5_XMETA_MODE_LEGACY:
+ return REG_B;
+ case MLX5_XMETA_MODE_META16:
+ return REG_C_0;
+ case MLX5_XMETA_MODE_META32:
+ return REG_C_1;
+ }
+ break;
+ case MLX5_METADATA_TX:
+ return REG_A;
+ case MLX5_METADATA_FDB:
+ return REG_C_0;
+ case MLX5_FLOW_MARK:
+ switch (config->dv_xmeta_en) {
+ case MLX5_XMETA_MODE_LEGACY:
+ return REG_NONE;
+ case MLX5_XMETA_MODE_META16:
+ return REG_C_1;
+ case MLX5_XMETA_MODE_META32:
+ return REG_C_0;
+ }
+ break;
+ case MLX5_COPY_MARK:
+ case MLX5_MTR_SFX:
+ /*
+ * Metadata COPY_MARK register using is in meter suffix sub
+ * flow while with meter. It's safe to share the same register.
+ */
+ return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : REG_C_3;
+ case MLX5_MTR_COLOR:
+ RTE_ASSERT(priv->mtr_color_reg != REG_NONE);
+ return priv->mtr_color_reg;
+ case MLX5_APP_TAG:
+ /*
+ * If meter is enable, it will engage two registers for color
+ * match and flow match. If meter color match is not using the
+ * REG_C_2, need to skip the REG_C_x be used by meter color
+ * match.
+ * If meter is disable, free to use all available registers.
+ */
+ if (priv->mtr_color_reg != REG_NONE)
+ start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_3 :
+ REG_C_4;
+ else
+ start_reg = REG_C_2;
+ if (id > (REG_C_7 - start_reg))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "invalid tag id");
+ if (config->flow_mreg_c[id + start_reg - REG_C_0] == REG_NONE)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "unsupported tag id");
+ /*
+ * This case means meter is using the REG_C_x great than 2.
+ * Take care not to conflict with meter color REG_C_x.
+ * If the available index REG_C_y >= REG_C_x, skip the
+ * color register.
+ */
+ if (start_reg == REG_C_3 && config->flow_mreg_c
+ [id + REG_C_3 - REG_C_0] >= priv->mtr_color_reg) {
+ if (config->flow_mreg_c[id + 1 + REG_C_3 - REG_C_0] !=
+ REG_NONE)
+ return config->flow_mreg_c
+ [id + 1 + REG_C_3 - REG_C_0];
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "unsupported tag id");
+ }
+ return config->flow_mreg_c[id + start_reg - REG_C_0];
+ }
+ assert(false);
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "invalid feature name");
+}
+
+/**
+ * Check extensive flow metadata register support.
+ *
+ * @param dev
+ * Pointer to rte_eth_dev structure.
+ *
+ * @return
+ * True if device supports extensive flow metadata register, otherwise false.
+ */
+bool
+mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_config *config = &priv->config;
+
+ /*
+ * Having available reg_c can be regarded inclusively as supporting
+ * extensive flow metadata register, which could mean,
+ * - metadata register copy action by modify header.
+ * - 16 modify header actions is supported.
+ * - reg_c's are preserved across different domain (FDB and NIC) on
+ * packet loopback by flow lookup miss.
+ */
+ return config->flow_mreg_c[2] != REG_NONE;
+}
+
/**
* Discover the maximum number of priority available.
*
* @param[in] dev
- * Pointer to Ethernet device.
+ * Pointer to the Ethernet device structure.
*
* @return
* number of supported flow priority on success, a negative errno
int
mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
struct {
struct ibv_flow_attr attr;
struct ibv_flow_spec_eth eth;
} flow_attr = {
.attr = {
.num_of_specs = 2,
+ .port = (uint8_t)priv->ibv_port,
},
.eth = {
.type = IBV_FLOW_SPEC_ETH,
claim_zero(mlx5_glue->destroy_flow(flow));
priority = vprio[i];
}
+ mlx5_hrxq_drop_release(dev);
switch (priority) {
case 8:
priority = RTE_DIM(priority_map_3);
rte_errno = ENOTSUP;
DRV_LOG(ERR,
"port %u verbs maximum priority: %d expected 8/16",
- dev->data->port_id, vprio[i]);
+ dev->data->port_id, priority);
return -rte_errno;
}
- mlx5_hrxq_drop_release(dev);
DRV_LOG(INFO, "port %u flow maximum priority: %d",
dev->data->port_id, priority);
return priority;
}
/**
- * Adjust flow priority.
+ * Adjust flow priority based on the highest layer and the request priority.
*
- * @param dev
- * Pointer to Ethernet device.
- * @param flow
- * Pointer to an rte flow.
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] priority
+ * The rule base priority.
+ * @param[in] subpriority
+ * The priority based on the items.
+ *
+ * @return
+ * The new priority.
*/
-static void
-mlx5_flow_adjust_priority(struct rte_eth_dev *dev, struct rte_flow *flow)
+uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
+ uint32_t subpriority)
{
- struct priv *priv = dev->data->dev_private;
- uint32_t priority = flow->attributes.priority;
- uint32_t subpriority = flow->cur_verbs->attr->priority;
+ uint32_t res = 0;
+ struct mlx5_priv *priv = dev->data->dev_private;
switch (priv->config.flow_prio) {
case RTE_DIM(priority_map_3):
- priority = priority_map_3[priority][subpriority];
+ res = priority_map_3[priority][subpriority];
break;
case RTE_DIM(priority_map_5):
- priority = priority_map_5[priority][subpriority];
+ res = priority_map_5[priority][subpriority];
break;
}
- flow->cur_verbs->attr->priority = priority;
-}
-
-/**
- * Verify the @p attributes will be correctly understood by the NIC and store
- * them in the @p flow if everything is correct.
- *
- * @param[in] dev
- * Pointer to Ethernet device.
- * @param[in] attributes
- * Pointer to flow attributes
- * @param[in, out] flow
- * Pointer to the rte_flow structure.
- * @param[out] error
- * Pointer to error structure.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
- */
-static int
-mlx5_flow_attributes(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attributes,
- struct rte_flow *flow,
- struct rte_flow_error *error)
-{
- uint32_t priority_max =
- ((struct priv *)dev->data->dev_private)->config.flow_prio - 1;
-
- if (attributes->group)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
- NULL,
- "groups is not supported");
- if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
- attributes->priority >= priority_max)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
- NULL,
- "priority out of range");
- if (attributes->egress)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
- NULL,
- "egress is not supported");
- if (attributes->transfer)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
- NULL,
- "transfer is not supported");
- if (!attributes->ingress)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
- NULL,
- "ingress attribute is mandatory");
- flow->attributes = *attributes;
- if (attributes->priority == MLX5_FLOW_PRIO_RSVD)
- flow->attributes.priority = priority_max;
- return 0;
+ return res;
}
/**
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
+int
mlx5_flow_item_acceptable(const struct rte_flow_item *item,
const uint8_t *mask,
const uint8_t *nic_mask,
" bits");
if (!item->spec && (item->mask || item->last))
return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
"mask/last without a spec is not"
" supported");
if (item->spec && item->last) {
}
ret = memcmp(spec, last, size);
if (ret != 0)
- return rte_flow_error_set(error, ENOTSUP,
+ return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
- "range is not supported");
+ "range is not valid");
}
return 0;
}
/**
- * Add a verbs item specification into @p flow.
- *
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] src
- * Create specification.
- * @param[in] size
- * Size in bytes of the specification to copy.
- */
-static void
-mlx5_flow_spec_verbs_add(struct rte_flow *flow, void *src, unsigned int size)
-{
- struct mlx5_flow_verbs *verbs = flow->cur_verbs;
-
- if (verbs->specs) {
- void *dst;
-
- dst = (void *)(verbs->specs + verbs->size);
- memcpy(dst, src, size);
- ++verbs->attr->num_of_specs;
- }
- verbs->size += size;
-}
-
-/**
- * Adjust verbs hash fields according to the @p flow information.
+ * Adjust the hash fields according to the @p flow information.
*
- * @param[in, out] flow.
- * Pointer to flow structure.
+ * @param[in] dev_flow.
+ * Pointer to the mlx5_flow.
* @param[in] tunnel
* 1 when the hash field is for a tunnel item.
* @param[in] layer_types
* ETH_RSS_* types.
* @param[in] hash_fields
* Item hash fields.
+ *
+ * @return
+ * The hash fields that should be used.
*/
-static void
-mlx5_flow_verbs_hashfields_adjust(struct rte_flow *flow,
- int tunnel __rte_unused,
- uint32_t layer_types, uint64_t hash_fields)
+uint64_t
+mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow,
+ int tunnel __rte_unused, uint64_t layer_types,
+ uint64_t hash_fields)
{
+ struct rte_flow *flow = dev_flow->flow;
#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
- hash_fields |= (tunnel ? IBV_RX_HASH_INNER : 0);
- if (flow->rss.level == 2 && !tunnel)
- hash_fields = 0;
- else if (flow->rss.level < 2 && tunnel)
- hash_fields = 0;
+ int rss_request_inner = flow->rss.level >= 2;
+
+ /* Check RSS hash level for tunnel. */
+ if (tunnel && rss_request_inner)
+ hash_fields |= IBV_RX_HASH_INNER;
+ else if (tunnel || rss_request_inner)
+ return 0;
#endif
+ /* Check if requested layer matches RSS hash fields. */
if (!(flow->rss.types & layer_types))
- hash_fields = 0;
- flow->cur_verbs->hash_fields |= hash_fields;
+ return 0;
+ return hash_fields;
}
/**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
- *
- * @param[in] item
- * Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
- * @param[out] error
- * Pointer to error structure.
+ * Lookup and set the ptype in the data Rx part. A single Ptype can be used,
+ * if several tunnel rules are used on this queue, the tunnel ptype will be
+ * cleared.
*
- * @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * @param rxq_ctrl
+ * Rx queue to update.
*/
-static int
-mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow,
- const size_t flow_size, struct rte_flow_error *error)
+static void
+flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
{
- const struct rte_flow_item_eth *spec = item->spec;
- const struct rte_flow_item_eth *mask = item->mask;
- const struct rte_flow_item_eth nic_mask = {
- .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
- .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
- .type = RTE_BE16(0xffff),
- };
- const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
- const unsigned int size = sizeof(struct ibv_flow_spec_eth);
- struct ibv_flow_spec_eth eth = {
- .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
- .size = size,
- };
- int ret;
-
- if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
- MLX5_FLOW_LAYER_OUTER_L2))
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "L2 layers already configured");
- if (!mask)
- mask = &rte_flow_item_eth_mask;
- ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
- (const uint8_t *)&nic_mask,
- sizeof(struct rte_flow_item_eth),
- error);
- if (ret)
- return ret;
- flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
- MLX5_FLOW_LAYER_OUTER_L2;
- if (size > flow_size)
- return size;
- if (spec) {
- unsigned int i;
+ unsigned int i;
+ uint32_t tunnel_ptype = 0;
- memcpy(ð.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
- memcpy(ð.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
- eth.val.ether_type = spec->type;
- memcpy(ð.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
- memcpy(ð.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
- eth.mask.ether_type = mask->type;
- /* Remove unwanted bits from values. */
- for (i = 0; i < ETHER_ADDR_LEN; ++i) {
- eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
- eth.val.src_mac[i] &= eth.mask.src_mac[i];
+ /* Look up for the ptype to use. */
+ for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) {
+ if (!rxq_ctrl->flow_tunnels_n[i])
+ continue;
+ if (!tunnel_ptype) {
+ tunnel_ptype = tunnels_info[i].ptype;
+ } else {
+ tunnel_ptype = 0;
+ break;
}
- eth.val.ether_type &= eth.mask.ether_type;
}
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
- mlx5_flow_spec_verbs_add(flow, ð, size);
- return size;
+ rxq_ctrl->rxq.tunnel = tunnel_ptype;
}
/**
- * Update the VLAN tag in the Verbs Ethernet specification.
+ * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive
+ * flow.
*
- * @param[in, out] attr
- * Pointer to Verbs attributes structure.
- * @param[in] eth
- * Verbs structure containing the VLAN information to copy.
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] dev_flow
+ * Pointer to device flow structure.
*/
static void
-mlx5_flow_item_vlan_update(struct ibv_flow_attr *attr,
- struct ibv_flow_spec_eth *eth)
+flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct rte_flow *flow = dev_flow->flow;
+ const int mark = !!(dev_flow->actions &
+ (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
+ const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
unsigned int i;
- const enum ibv_flow_spec_type search = eth->type;
- struct ibv_spec_header *hdr = (struct ibv_spec_header *)
- ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
-
- for (i = 0; i != attr->num_of_specs; ++i) {
- if (hdr->type == search) {
- struct ibv_flow_spec_eth *e =
- (struct ibv_flow_spec_eth *)hdr;
-
- e->val.vlan_tag = eth->val.vlan_tag;
- e->mask.vlan_tag = eth->mask.vlan_tag;
- e->val.ether_type = eth->val.ether_type;
- e->mask.ether_type = eth->mask.ether_type;
- break;
+
+ for (i = 0; i != flow->rss.queue_num; ++i) {
+ int idx = (*flow->rss.queue)[i];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of((*priv->rxqs)[idx],
+ struct mlx5_rxq_ctrl, rxq);
+
+ /*
+ * To support metadata register copy on Tx loopback,
+ * this must be always enabled (metadata may arive
+ * from other port - not from local flows only.
+ */
+ if (priv->config.dv_flow_en &&
+ priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
+ mlx5_flow_ext_mreg_supported(dev)) {
+ rxq_ctrl->rxq.mark = 1;
+ rxq_ctrl->flow_mark_n = 1;
+ } else if (mark) {
+ rxq_ctrl->rxq.mark = 1;
+ rxq_ctrl->flow_mark_n++;
+ }
+ if (tunnel) {
+ unsigned int j;
+
+ /* Increase the counter matching the flow. */
+ for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
+ if ((tunnels_info[j].tunnel &
+ dev_flow->layers) ==
+ tunnels_info[j].tunnel) {
+ rxq_ctrl->flow_tunnels_n[j]++;
+ break;
+ }
+ }
+ flow_rxq_tunnel_ptype_update(rxq_ctrl);
}
- hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
}
}
/**
- * Convert the @p item into @p flow (or by updating the already present
- * Ethernet Verbs) specification after ensuring the NIC will understand and
- * process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow
*
- * @param[in] item
- * Item specification.
- * @param[in, out] flow
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] flow
* Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
- * @param[out] error
- * Pointer to error structure.
- *
- * @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- * On error, a negative errno value is returned and rte_errno is set.
*/
-static int
-mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow,
- const size_t flow_size, struct rte_flow_error *error)
+static void
+flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
{
- const struct rte_flow_item_vlan *spec = item->spec;
- const struct rte_flow_item_vlan *mask = item->mask;
- const struct rte_flow_item_vlan nic_mask = {
- .tci = RTE_BE16(0x0fff),
- .inner_type = RTE_BE16(0xffff),
- };
- unsigned int size = sizeof(struct ibv_flow_spec_eth);
- const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
- struct ibv_flow_spec_eth eth = {
- .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
- .size = size,
- };
- int ret;
- const uint32_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
- MLX5_FLOW_LAYER_INNER_L4) :
- (MLX5_FLOW_LAYER_OUTER_L3 | MLX5_FLOW_LAYER_OUTER_L4);
- const uint32_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
- MLX5_FLOW_LAYER_OUTER_VLAN;
- const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
- MLX5_FLOW_LAYER_OUTER_L2;
+ struct mlx5_flow *dev_flow;
- if (flow->layers & vlanm)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "VLAN layer already configured");
- else if ((flow->layers & l34m) != 0)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "L2 layer cannot follow L3/L4 layer");
- if (!mask)
- mask = &rte_flow_item_vlan_mask;
- ret = mlx5_flow_item_acceptable
- (item, (const uint8_t *)mask,
- (const uint8_t *)&nic_mask,
- sizeof(struct rte_flow_item_vlan), error);
+ LIST_FOREACH(dev_flow, &flow->dev_flows, next)
+ flow_drv_rxq_flags_set(dev, dev_flow);
+}
+
+/**
+ * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
+ * device flow if no other flow uses it with the same kind of request.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[in] dev_flow
+ * Pointer to the device flow.
+ */
+static void
+flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct rte_flow *flow = dev_flow->flow;
+ const int mark = !!(dev_flow->actions &
+ (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
+ const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ unsigned int i;
+
+ assert(dev->data->dev_started);
+ for (i = 0; i != flow->rss.queue_num; ++i) {
+ int idx = (*flow->rss.queue)[i];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of((*priv->rxqs)[idx],
+ struct mlx5_rxq_ctrl, rxq);
+
+ if (priv->config.dv_flow_en &&
+ priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
+ mlx5_flow_ext_mreg_supported(dev)) {
+ rxq_ctrl->rxq.mark = 1;
+ rxq_ctrl->flow_mark_n = 1;
+ } else if (mark) {
+ rxq_ctrl->flow_mark_n--;
+ rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
+ }
+ if (tunnel) {
+ unsigned int j;
+
+ /* Decrease the counter matching the flow. */
+ for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
+ if ((tunnels_info[j].tunnel &
+ dev_flow->layers) ==
+ tunnels_info[j].tunnel) {
+ rxq_ctrl->flow_tunnels_n[j]--;
+ break;
+ }
+ }
+ flow_rxq_tunnel_ptype_update(rxq_ctrl);
+ }
+ }
+}
+
+/**
+ * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
+ * @p flow if no other flow uses it with the same kind of request.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[in] flow
+ * Pointer to the flow.
+ */
+static void
+flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct mlx5_flow *dev_flow;
+
+ LIST_FOREACH(dev_flow, &flow->dev_flows, next)
+ flow_drv_rxq_flags_trim(dev, dev_flow);
+}
+
+/**
+ * Clear the Mark/Flag and Tunnel ptype information in all Rx queues.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+static void
+flow_rxq_flags_clear(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ unsigned int i;
+
+ for (i = 0; i != priv->rxqs_n; ++i) {
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+ unsigned int j;
+
+ if (!(*priv->rxqs)[i])
+ continue;
+ rxq_ctrl = container_of((*priv->rxqs)[i],
+ struct mlx5_rxq_ctrl, rxq);
+ rxq_ctrl->flow_mark_n = 0;
+ rxq_ctrl->rxq.mark = 0;
+ for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
+ rxq_ctrl->flow_tunnels_n[j] = 0;
+ rxq_ctrl->rxq.tunnel = 0;
+ }
+}
+
+/*
+ * return a pointer to the desired action in the list of actions.
+ *
+ * @param[in] actions
+ * The list of actions to search the action in.
+ * @param[in] action
+ * The action to find.
+ *
+ * @return
+ * Pointer to the action in the list, if found. NULL otherwise.
+ */
+const struct rte_flow_action *
+mlx5_flow_find_action(const struct rte_flow_action *actions,
+ enum rte_flow_action_type action)
+{
+ if (actions == NULL)
+ return NULL;
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++)
+ if (actions->type == action)
+ return actions;
+ return NULL;
+}
+
+/*
+ * Validate the flag action.
+ *
+ * @param[in] action_flags
+ * Bit-fields that holds the actions detected until now.
+ * @param[in] attr
+ * Attributes of flow that includes this action.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_action_flag(uint64_t action_flags,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+
+ if (action_flags & MLX5_FLOW_ACTION_DROP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't drop and flag in same flow");
+ if (action_flags & MLX5_FLOW_ACTION_MARK)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't mark and flag in same flow");
+ if (action_flags & MLX5_FLOW_ACTION_FLAG)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't have 2 flag"
+ " actions in same flow");
+ if (attr->egress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
+ "flag action not supported for "
+ "egress");
+ return 0;
+}
+
+/*
+ * Validate the mark action.
+ *
+ * @param[in] action
+ * Pointer to the queue action.
+ * @param[in] action_flags
+ * Bit-fields that holds the actions detected until now.
+ * @param[in] attr
+ * Attributes of flow that includes this action.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_action_mark(const struct rte_flow_action *action,
+ uint64_t action_flags,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_mark *mark = action->conf;
+
+ if (!mark)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "configuration cannot be null");
+ if (mark->id >= MLX5_FLOW_MARK_MAX)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &mark->id,
+ "mark id must in 0 <= id < "
+ RTE_STR(MLX5_FLOW_MARK_MAX));
+ if (action_flags & MLX5_FLOW_ACTION_DROP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't drop and mark in same flow");
+ if (action_flags & MLX5_FLOW_ACTION_FLAG)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't flag and mark in same flow");
+ if (action_flags & MLX5_FLOW_ACTION_MARK)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't have 2 mark actions in same"
+ " flow");
+ if (attr->egress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
+ "mark action not supported for "
+ "egress");
+ return 0;
+}
+
+/*
+ * Validate the drop action.
+ *
+ * @param[in] action_flags
+ * Bit-fields that holds the actions detected until now.
+ * @param[in] attr
+ * Attributes of flow that includes this action.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_action_drop(uint64_t action_flags,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ if (action_flags & MLX5_FLOW_ACTION_FLAG)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't drop and flag in same flow");
+ if (action_flags & MLX5_FLOW_ACTION_MARK)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't drop and mark in same flow");
+ if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
+ MLX5_FLOW_FATE_ESWITCH_ACTIONS))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't have 2 fate actions in"
+ " same flow");
+ if (attr->egress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
+ "drop action not supported for "
+ "egress");
+ return 0;
+}
+
+/*
+ * Validate the queue action.
+ *
+ * @param[in] action
+ * Pointer to the queue action.
+ * @param[in] action_flags
+ * Bit-fields that holds the actions detected until now.
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] attr
+ * Attributes of flow that includes this action.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
+ uint64_t action_flags,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_queue *queue = action->conf;
+
+ if (action_flags & MLX5_FLOW_FATE_ACTIONS)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't have 2 fate actions in"
+ " same flow");
+ if (!priv->rxqs_n)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL, "No Rx queues configured");
+ if (queue->index >= priv->rxqs_n)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &queue->index,
+ "queue index out of range");
+ if (!(*priv->rxqs)[queue->index])
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &queue->index,
+ "queue is not configured");
+ if (attr->egress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
+ "queue action not supported for "
+ "egress");
+ return 0;
+}
+
+/*
+ * Validate the rss action.
+ *
+ * @param[in] action
+ * Pointer to the queue action.
+ * @param[in] action_flags
+ * Bit-fields that holds the actions detected until now.
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] attr
+ * Attributes of flow that includes this action.
+ * @param[in] item_flags
+ * Items that were detected.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
+ uint64_t action_flags,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ uint64_t item_flags,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_rss *rss = action->conf;
+ int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ unsigned int i;
+
+ if (action_flags & MLX5_FLOW_FATE_ACTIONS)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't have 2 fate actions"
+ " in same flow");
+ if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
+ rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->func,
+ "RSS hash function not supported");
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ if (rss->level > 2)
+#else
+ if (rss->level > 1)
+#endif
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->level,
+ "tunnel RSS is not supported");
+ /* allow RSS key_len 0 in case of NULL (default) RSS key. */
+ if (rss->key_len == 0 && rss->key != NULL)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->key_len,
+ "RSS hash key length 0");
+ if (rss->key_len > 0 && rss->key_len < MLX5_RSS_HASH_KEY_LEN)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->key_len,
+ "RSS hash key too small");
+ if (rss->key_len > MLX5_RSS_HASH_KEY_LEN)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->key_len,
+ "RSS hash key too large");
+ if (rss->queue_num > priv->config.ind_table_max_size)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->queue_num,
+ "number of queues too large");
+ if (rss->types & MLX5_RSS_HF_MASK)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->types,
+ "some RSS protocols are not"
+ " supported");
+ if (!priv->rxqs_n)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL, "No Rx queues configured");
+ if (!rss->queue_num)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL, "No queues configured");
+ for (i = 0; i != rss->queue_num; ++i) {
+ if (!(*priv->rxqs)[rss->queue[i]])
+ return rte_flow_error_set
+ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->queue[i], "queue is not configured");
+ }
+ if (attr->egress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
+ "rss action not supported for "
+ "egress");
+ if (rss->level > 1 && !tunnel)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+ "inner RSS is not supported for "
+ "non-tunnel flows");
+ return 0;
+}
+
+/*
+ * Validate the count action.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] attr
+ * Attributes of flow that includes this action.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ if (attr->egress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
+ "count action not supported for "
+ "egress");
+ return 0;
+}
+
+/**
+ * Verify the @p attributes will be correctly understood by the NIC and store
+ * them in the @p flow if everything is correct.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] attributes
+ * Pointer to flow attributes
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attributes,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t priority_max = priv->config.flow_prio - 1;
+
+ if (attributes->group)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ NULL, "groups is not supported");
+ if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
+ attributes->priority >= priority_max)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ NULL, "priority out of range");
+ if (attributes->egress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
+ "egress is not supported");
+ if (attributes->transfer && !priv->config.dv_esw_en)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ NULL, "transfer is not supported");
+ if (!attributes->ingress)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ NULL,
+ "ingress attribute is mandatory");
+ return 0;
+}
+
+/**
+ * Validate ICMP6 item.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ uint8_t target_protocol,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_icmp6 *mask = item->mask;
+ const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+ const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4;
+ int ret;
+
+ if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "protocol filtering not compatible"
+ " with ICMP6 layer");
+ if (!(item_flags & l3m))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "IPv6 is mandatory to filter on"
+ " ICMP6");
+ if (item_flags & l4m)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple L4 layers not supported");
+ if (!mask)
+ mask = &rte_flow_item_icmp6_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_icmp6_mask,
+ sizeof(struct rte_flow_item_icmp6), error);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+/**
+ * Validate ICMP item.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_item_icmp(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ uint8_t target_protocol,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_icmp *mask = item->mask;
+ const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+ const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4;
+ int ret;
+
+ if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "protocol filtering not compatible"
+ " with ICMP layer");
+ if (!(item_flags & l3m))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "IPv4 is mandatory to filter"
+ " on ICMP");
+ if (item_flags & l4m)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple L4 layers not supported");
+ if (!mask)
+ mask = &rte_flow_item_icmp_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_icmp_mask,
+ sizeof(struct rte_flow_item_icmp), error);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+/**
+ * Validate Ethernet item.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_eth *mask = item->mask;
+ const struct rte_flow_item_eth nic_mask = {
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .type = RTE_BE16(0xffff),
+ };
+ int ret;
+ int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2;
+
+ if (item_flags & ethm)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple L2 layers not supported");
+ if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_L3)) ||
+ (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_L3)))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "L2 layer should not follow "
+ "L3 layers");
+ if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) ||
+ (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_VLAN)))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "L2 layer should not follow VLAN");
+ if (!mask)
+ mask = &rte_flow_item_eth_mask;
+ ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_eth),
+ error);
+ return ret;
+}
+
+/**
+ * Validate VLAN item.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[in] dev
+ * Ethernet device flow is being created on.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_vlan *spec = item->spec;
+ const struct rte_flow_item_vlan *mask = item->mask;
+ const struct rte_flow_item_vlan nic_mask = {
+ .tci = RTE_BE16(UINT16_MAX),
+ .inner_type = RTE_BE16(UINT16_MAX),
+ };
+ uint16_t vlan_tag = 0;
+ const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ int ret;
+ const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
+ MLX5_FLOW_LAYER_INNER_L4) :
+ (MLX5_FLOW_LAYER_OUTER_L3 |
+ MLX5_FLOW_LAYER_OUTER_L4);
+ const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
+ MLX5_FLOW_LAYER_OUTER_VLAN;
+
+ if (item_flags & vlanm)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple VLAN layers not supported");
+ else if ((item_flags & l34m) != 0)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "VLAN cannot follow L3/L4 layer");
+ if (!mask)
+ mask = &rte_flow_item_vlan_mask;
+ ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_vlan),
+ error);
if (ret)
return ret;
- if (spec) {
- eth.val.vlan_tag = spec->tci;
- eth.mask.vlan_tag = mask->tci;
- eth.val.vlan_tag &= eth.mask.vlan_tag;
- eth.val.ether_type = spec->inner_type;
- eth.mask.ether_type = mask->inner_type;
- eth.val.ether_type &= eth.mask.ether_type;
- }
- /*
- * From verbs perspective an empty VLAN is equivalent
- * to a packet without VLAN layer.
- */
- if (!eth.mask.vlan_tag)
+ if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (priv->vmwa_context) {
+ /*
+ * Non-NULL context means we have a virtual machine
+ * and SR-IOV enabled, we have to create VLAN interface
+ * to make hypervisor to setup E-Switch vport
+ * context correctly. We avoid creating the multiple
+ * VLAN interfaces, so we cannot support VLAN tag mask.
+ */
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VLAN tag mask is not"
+ " supported in virtual"
+ " environment");
+ }
+ }
+ if (spec) {
+ vlan_tag = spec->tci;
+ vlan_tag &= mask->tci;
+ }
+ /*
+ * From verbs perspective an empty VLAN is equivalent
+ * to a packet without VLAN layer.
+ */
+ if (!vlan_tag)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+ item->spec,
+ "VLAN cannot be empty");
+ return 0;
+}
+
+/**
+ * Validate IPV4 item.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[in] acc_mask
+ * Acceptable mask, if NULL default internal default mask
+ * will be used to check whether item fields are supported.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ uint64_t last_item,
+ uint16_t ether_type,
+ const struct rte_flow_item_ipv4 *acc_mask,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_ipv4 *mask = item->mask;
+ const struct rte_flow_item_ipv4 *spec = item->spec;
+ const struct rte_flow_item_ipv4 nic_mask = {
+ .hdr = {
+ .src_addr = RTE_BE32(0xffffffff),
+ .dst_addr = RTE_BE32(0xffffffff),
+ .type_of_service = 0xff,
+ .next_proto_id = 0xff,
+ },
+ };
+ const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+ MLX5_FLOW_LAYER_OUTER_L3;
+ const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4;
+ int ret;
+ uint8_t next_proto = 0xFF;
+ const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 |
+ MLX5_FLOW_LAYER_OUTER_VLAN |
+ MLX5_FLOW_LAYER_INNER_VLAN);
+
+ if ((last_item & l2_vlan) && ether_type &&
+ ether_type != RTE_ETHER_TYPE_IPV4)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "IPv4 cannot follow L2/VLAN layer "
+ "which ether type is not IPv4");
+ if (item_flags & MLX5_FLOW_LAYER_IPIP) {
+ if (mask && spec)
+ next_proto = mask->hdr.next_proto_id &
+ spec->hdr.next_proto_id;
+ if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "multiple tunnel "
+ "not supported");
+ }
+ if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "wrong tunnel type - IPv6 specified "
+ "but IPv4 item provided");
+ if (item_flags & l3m)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple L3 layers not supported");
+ else if (item_flags & l4m)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "L3 cannot follow an L4 layer.");
+ else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) &&
+ !(item_flags & MLX5_FLOW_LAYER_INNER_L2))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "L3 cannot follow an NVGRE layer.");
+ if (!mask)
+ mask = &rte_flow_item_ipv4_mask;
+ else if (mask->hdr.next_proto_id != 0 &&
+ mask->hdr.next_proto_id != 0xff)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
+ "partial mask is not supported"
+ " for protocol");
+ ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+ acc_mask ? (const uint8_t *)acc_mask
+ : (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_ipv4),
+ error);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+/**
+ * Validate IPV6 item.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[in] acc_mask
+ * Acceptable mask, if NULL default internal default mask
+ * will be used to check whether item fields are supported.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ uint64_t last_item,
+ uint16_t ether_type,
+ const struct rte_flow_item_ipv6 *acc_mask,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_ipv6 *mask = item->mask;
+ const struct rte_flow_item_ipv6 *spec = item->spec;
+ const struct rte_flow_item_ipv6 nic_mask = {
+ .hdr = {
+ .src_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .dst_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .vtc_flow = RTE_BE32(0xffffffff),
+ .proto = 0xff,
+ .hop_limits = 0xff,
+ },
+ };
+ const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+ MLX5_FLOW_LAYER_OUTER_L3;
+ const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4;
+ int ret;
+ uint8_t next_proto = 0xFF;
+ const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 |
+ MLX5_FLOW_LAYER_OUTER_VLAN |
+ MLX5_FLOW_LAYER_INNER_VLAN);
+
+ if ((last_item & l2_vlan) && ether_type &&
+ ether_type != RTE_ETHER_TYPE_IPV6)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "IPv6 cannot follow L2/VLAN layer "
+ "which ether type is not IPv6");
+ if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) {
+ if (mask && spec)
+ next_proto = mask->hdr.proto & spec->hdr.proto;
+ if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "multiple tunnel "
+ "not supported");
+ }
+ if (item_flags & MLX5_FLOW_LAYER_IPIP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "wrong tunnel type - IPv4 specified "
+ "but IPv6 item provided");
+ if (item_flags & l3m)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple L3 layers not supported");
+ else if (item_flags & l4m)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "L3 cannot follow an L4 layer.");
+ else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) &&
+ !(item_flags & MLX5_FLOW_LAYER_INNER_L2))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "L3 cannot follow an NVGRE layer.");
+ if (!mask)
+ mask = &rte_flow_item_ipv6_mask;
+ ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+ acc_mask ? (const uint8_t *)acc_mask
+ : (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_ipv6),
+ error);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+/**
+ * Validate UDP item.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[in] target_protocol
+ * The next protocol in the previous item.
+ * @param[in] flow_mask
+ * mlx5 flow-specific (DV, verbs, etc.) supported header fields mask.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ uint8_t target_protocol,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_udp *mask = item->mask;
+ const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+ MLX5_FLOW_LAYER_OUTER_L3;
+ const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4;
+ int ret;
+
+ if (target_protocol != 0xff && target_protocol != IPPROTO_UDP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "protocol filtering not compatible"
+ " with UDP layer");
+ if (!(item_flags & l3m))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "L3 is mandatory to filter on L4");
+ if (item_flags & l4m)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple L4 layers not supported");
+ if (!mask)
+ mask = &rte_flow_item_udp_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_udp_mask,
+ sizeof(struct rte_flow_item_udp), error);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+/**
+ * Validate TCP item.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[in] target_protocol
+ * The next protocol in the previous item.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ uint8_t target_protocol,
+ const struct rte_flow_item_tcp *flow_mask,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_tcp *mask = item->mask;
+ const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+ MLX5_FLOW_LAYER_OUTER_L3;
+ const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4;
+ int ret;
+
+ assert(flow_mask);
+ if (target_protocol != 0xff && target_protocol != IPPROTO_TCP)
return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
- item->spec,
- "VLAN cannot be empty");
- if (!(flow->layers & l2m)) {
- if (size <= flow_size) {
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
- mlx5_flow_spec_verbs_add(flow, ð, size);
- }
- } else {
- if (flow->cur_verbs)
- mlx5_flow_item_vlan_update(flow->cur_verbs->attr,
- ð);
- size = 0; /* Only an update is done in eth specification. */
- }
- flow->layers |= tunnel ?
- (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_VLAN) :
- (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_VLAN);
- return size;
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "protocol filtering not compatible"
+ " with TCP layer");
+ if (!(item_flags & l3m))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "L3 is mandatory to filter on L4");
+ if (item_flags & l4m)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple L4 layers not supported");
+ if (!mask)
+ mask = &rte_flow_item_tcp_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)flow_mask,
+ sizeof(struct rte_flow_item_tcp), error);
+ if (ret < 0)
+ return ret;
+ return 0;
}
/**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Validate VXLAN item.
*
* @param[in] item
* Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[in] target_protocol
+ * The next protocol in the previous item.
* @param[out] error
* Pointer to error structure.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow,
- const size_t flow_size, struct rte_flow_error *error)
+int
+mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ struct rte_flow_error *error)
{
- const struct rte_flow_item_ipv4 *spec = item->spec;
- const struct rte_flow_item_ipv4 *mask = item->mask;
- const struct rte_flow_item_ipv4 nic_mask = {
- .hdr = {
- .src_addr = RTE_BE32(0xffffffff),
- .dst_addr = RTE_BE32(0xffffffff),
- .type_of_service = 0xff,
- .next_proto_id = 0xff,
- },
- };
- const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
- unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
- struct ibv_flow_spec_ipv4_ext ipv4 = {
- .type = IBV_FLOW_SPEC_IPV4_EXT |
- (tunnel ? IBV_FLOW_SPEC_INNER : 0),
- .size = size,
- };
+ const struct rte_flow_item_vxlan *spec = item->spec;
+ const struct rte_flow_item_vxlan *mask = item->mask;
int ret;
+ union vni {
+ uint32_t vlan_id;
+ uint8_t vni[4];
+ } id = { .vlan_id = 0, };
+ uint32_t vlan_id = 0;
- if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
- MLX5_FLOW_LAYER_OUTER_L3))
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "multiple L3 layers not supported");
- else if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
- MLX5_FLOW_LAYER_OUTER_L4))
+
+ if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "L3 cannot follow an L4 layer.");
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple tunnel layers not"
+ " supported");
+ /*
+ * Verify only UDPv4 is present as defined in
+ * https://tools.ietf.org/html/rfc7348
+ */
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "no outer UDP layer found");
if (!mask)
- mask = &rte_flow_item_ipv4_mask;
+ mask = &rte_flow_item_vxlan_mask;
ret = mlx5_flow_item_acceptable
(item, (const uint8_t *)mask,
- (const uint8_t *)&nic_mask,
- sizeof(struct rte_flow_item_ipv4), error);
+ (const uint8_t *)&rte_flow_item_vxlan_mask,
+ sizeof(struct rte_flow_item_vxlan),
+ error);
if (ret < 0)
return ret;
- flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
- MLX5_FLOW_LAYER_OUTER_L3_IPV4;
if (spec) {
- ipv4.val = (struct ibv_flow_ipv4_ext_filter){
- .src_ip = spec->hdr.src_addr,
- .dst_ip = spec->hdr.dst_addr,
- .proto = spec->hdr.next_proto_id,
- .tos = spec->hdr.type_of_service,
- };
- ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
- .src_ip = mask->hdr.src_addr,
- .dst_ip = mask->hdr.dst_addr,
- .proto = mask->hdr.next_proto_id,
- .tos = mask->hdr.type_of_service,
- };
- /* Remove unwanted bits from values. */
- ipv4.val.src_ip &= ipv4.mask.src_ip;
- ipv4.val.dst_ip &= ipv4.mask.dst_ip;
- ipv4.val.proto &= ipv4.mask.proto;
- ipv4.val.tos &= ipv4.mask.tos;
- }
- flow->l3_protocol_en = !!ipv4.mask.proto;
- flow->l3_protocol = ipv4.val.proto;
- if (size <= flow_size) {
- mlx5_flow_verbs_hashfields_adjust
- (flow, tunnel,
- (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
- ETH_RSS_NONFRAG_IPV4_OTHER),
- (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4));
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3;
- mlx5_flow_spec_verbs_add(flow, &ipv4, size);
+ memcpy(&id.vni[1], spec->vni, 3);
+ vlan_id = id.vlan_id;
+ memcpy(&id.vni[1], mask->vni, 3);
+ vlan_id &= id.vlan_id;
}
- return size;
+ /*
+ * Tunnel id 0 is equivalent as not adding a VXLAN layer, if
+ * only this layer is defined in the Verbs specification it is
+ * interpreted as wildcard and all packets will match this
+ * rule, if it follows a full stack layer (ex: eth / ipv4 /
+ * udp), all packets matching the layers before will also
+ * match this rule. To avoid such situation, VNI 0 is
+ * currently refused.
+ */
+ if (!vlan_id)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "VXLAN vni cannot be 0");
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "VXLAN tunnel must be fully defined");
+ return 0;
}
/**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Validate VXLAN_GPE item.
*
* @param[in] item
* Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[in] priv
+ * Pointer to the private data structure.
+ * @param[in] target_protocol
+ * The next protocol in the previous item.
* @param[out] error
* Pointer to error structure.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow,
- const size_t flow_size, struct rte_flow_error *error)
+int
+mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
{
- const struct rte_flow_item_ipv6 *spec = item->spec;
- const struct rte_flow_item_ipv6 *mask = item->mask;
- const struct rte_flow_item_ipv6 nic_mask = {
- .hdr = {
- .src_addr =
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff",
- .dst_addr =
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff",
- .vtc_flow = RTE_BE32(0xffffffff),
- .proto = 0xff,
- .hop_limits = 0xff,
- },
- };
- const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
- unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
- struct ibv_flow_spec_ipv6 ipv6 = {
- .type = IBV_FLOW_SPEC_IPV6 | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
- .size = size,
- };
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_item_vxlan_gpe *spec = item->spec;
+ const struct rte_flow_item_vxlan_gpe *mask = item->mask;
int ret;
+ union vni {
+ uint32_t vlan_id;
+ uint8_t vni[4];
+ } id = { .vlan_id = 0, };
+ uint32_t vlan_id = 0;
- if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
- MLX5_FLOW_LAYER_OUTER_L3))
+ if (!priv->config.l3_vxlan_en)
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "multiple L3 layers not supported");
- else if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
- MLX5_FLOW_LAYER_OUTER_L4))
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "L3 VXLAN is not enabled by device"
+ " parameter and/or not configured in"
+ " firmware");
+ if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "L3 cannot follow an L4 layer.");
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple tunnel layers not"
+ " supported");
/*
- * IPv6 is not recognised by the NIC inside a GRE tunnel.
- * Such support has to be disabled as the rule will be
- * accepted. Issue reproduced with Mellanox OFED 4.3-3.0.2.1 and
- * Mellanox OFED 4.4-1.0.0.0.
+ * Verify only UDPv4 is present as defined in
+ * https://tools.ietf.org/html/rfc7348
*/
- if (tunnel && flow->layers & MLX5_FLOW_LAYER_GRE)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "IPv6 inside a GRE tunnel is"
- " not recognised.");
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "no outer UDP layer found");
if (!mask)
- mask = &rte_flow_item_ipv6_mask;
+ mask = &rte_flow_item_vxlan_gpe_mask;
ret = mlx5_flow_item_acceptable
(item, (const uint8_t *)mask,
- (const uint8_t *)&nic_mask,
- sizeof(struct rte_flow_item_ipv6), error);
+ (const uint8_t *)&rte_flow_item_vxlan_gpe_mask,
+ sizeof(struct rte_flow_item_vxlan_gpe),
+ error);
if (ret < 0)
return ret;
- flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
- MLX5_FLOW_LAYER_OUTER_L3_IPV6;
if (spec) {
- unsigned int i;
- uint32_t vtc_flow_val;
- uint32_t vtc_flow_mask;
-
- memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
- RTE_DIM(ipv6.val.src_ip));
- memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
- RTE_DIM(ipv6.val.dst_ip));
- memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
- RTE_DIM(ipv6.mask.src_ip));
- memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
- RTE_DIM(ipv6.mask.dst_ip));
- vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
- vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
- ipv6.val.flow_label =
- rte_cpu_to_be_32((vtc_flow_val & IPV6_HDR_FL_MASK) >>
- IPV6_HDR_FL_SHIFT);
- ipv6.val.traffic_class = (vtc_flow_val & IPV6_HDR_TC_MASK) >>
- IPV6_HDR_TC_SHIFT;
- ipv6.val.next_hdr = spec->hdr.proto;
- ipv6.val.hop_limit = spec->hdr.hop_limits;
- ipv6.mask.flow_label =
- rte_cpu_to_be_32((vtc_flow_mask & IPV6_HDR_FL_MASK) >>
- IPV6_HDR_FL_SHIFT);
- ipv6.mask.traffic_class = (vtc_flow_mask & IPV6_HDR_TC_MASK) >>
- IPV6_HDR_TC_SHIFT;
- ipv6.mask.next_hdr = mask->hdr.proto;
- ipv6.mask.hop_limit = mask->hdr.hop_limits;
- /* Remove unwanted bits from values. */
- for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
- ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
- ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
- }
- ipv6.val.flow_label &= ipv6.mask.flow_label;
- ipv6.val.traffic_class &= ipv6.mask.traffic_class;
- ipv6.val.next_hdr &= ipv6.mask.next_hdr;
- ipv6.val.hop_limit &= ipv6.mask.hop_limit;
- }
- flow->l3_protocol_en = !!ipv6.mask.next_hdr;
- flow->l3_protocol = ipv6.val.next_hdr;
- if (size <= flow_size) {
- mlx5_flow_verbs_hashfields_adjust
- (flow, tunnel,
- (ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER),
- (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6));
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3;
- mlx5_flow_spec_verbs_add(flow, &ipv6, size);
+ if (spec->protocol)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VxLAN-GPE protocol"
+ " not supported");
+ memcpy(&id.vni[1], spec->vni, 3);
+ vlan_id = id.vlan_id;
+ memcpy(&id.vni[1], mask->vni, 3);
+ vlan_id &= id.vlan_id;
}
- return size;
+ /*
+ * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this
+ * layer is defined in the Verbs specification it is interpreted as
+ * wildcard and all packets will match this rule, if it follows a full
+ * stack layer (ex: eth / ipv4 / udp), all packets matching the layers
+ * before will also match this rule. To avoid such situation, VNI 0
+ * is currently refused.
+ */
+ if (!vlan_id)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "VXLAN-GPE vni cannot be 0");
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "VXLAN-GPE tunnel must be fully"
+ " defined");
+ return 0;
+}
+/**
+ * Validate GRE Key item.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in] item_flags
+ * Bit flags to mark detected items.
+ * @param[in] gre_item
+ * Pointer to gre_item
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ const struct rte_flow_item *gre_item,
+ struct rte_flow_error *error)
+{
+ const rte_be32_t *mask = item->mask;
+ int ret = 0;
+ rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
+ const struct rte_flow_item_gre *gre_spec = gre_item->spec;
+ const struct rte_flow_item_gre *gre_mask = gre_item->mask;
+
+ if (item_flags & MLX5_FLOW_LAYER_GRE_KEY)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Multiple GRE key not support");
+ if (!(item_flags & MLX5_FLOW_LAYER_GRE))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "No preceding GRE header");
+ if (item_flags & MLX5_FLOW_LAYER_INNER)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "GRE key following a wrong item");
+ if (!gre_mask)
+ gre_mask = &rte_flow_item_gre_mask;
+ if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) &&
+ !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000)))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Key bit must be on");
+
+ if (!mask)
+ mask = &gre_key_default_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&gre_key_default_mask,
+ sizeof(rte_be32_t), error);
+ return ret;
}
/**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Validate GRE item.
*
* @param[in] item
* Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
+ * @param[in] item_flags
+ * Bit flags to mark detected items.
+ * @param[in] target_protocol
+ * The next protocol in the previous item.
* @param[out] error
* Pointer to error structure.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow,
- const size_t flow_size, struct rte_flow_error *error)
+int
+mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ uint8_t target_protocol,
+ struct rte_flow_error *error)
{
- const struct rte_flow_item_udp *spec = item->spec;
- const struct rte_flow_item_udp *mask = item->mask;
- const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
- unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
- struct ibv_flow_spec_tcp_udp udp = {
- .type = IBV_FLOW_SPEC_UDP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
- .size = size,
- };
+ const struct rte_flow_item_gre *spec __rte_unused = item->spec;
+ const struct rte_flow_item_gre *mask = item->mask;
int ret;
+ const struct rte_flow_item_gre nic_mask = {
+ .c_rsvd0_ver = RTE_BE16(0xB000),
+ .protocol = RTE_BE16(UINT16_MAX),
+ };
- if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_UDP)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
+ if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
"protocol filtering not compatible"
- " with UDP layer");
- if (!(flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
- MLX5_FLOW_LAYER_OUTER_L3)))
+ " with this GRE layer");
+ if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "L3 is mandatory to filter"
- " on L4");
- if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
- MLX5_FLOW_LAYER_OUTER_L4))
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple tunnel layers not"
+ " supported");
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "L4 layer is already"
- " present");
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "L3 Layer is missing");
if (!mask)
- mask = &rte_flow_item_udp_mask;
+ mask = &rte_flow_item_gre_mask;
ret = mlx5_flow_item_acceptable
(item, (const uint8_t *)mask,
- (const uint8_t *)&rte_flow_item_udp_mask,
- sizeof(struct rte_flow_item_udp), error);
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_gre), error);
if (ret < 0)
return ret;
- flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
- MLX5_FLOW_LAYER_OUTER_L4_UDP;
+#ifndef HAVE_MLX5DV_DR
+#ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
+ if (spec && (spec->protocol & mask->protocol))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "without MPLS support the"
+ " specification cannot be used for"
+ " filtering");
+#endif
+#endif
+ return 0;
+}
+
+/**
+ * Validate Geneve item.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in] itemFlags
+ * Bit-fields that holds the items detected until now.
+ * @param[in] enPriv
+ * Pointer to the private data structure.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+
+int
+mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_item_geneve *spec = item->spec;
+ const struct rte_flow_item_geneve *mask = item->mask;
+ int ret;
+ uint16_t gbhdr;
+ uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ?
+ MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0;
+ const struct rte_flow_item_geneve nic_mask = {
+ .ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80),
+ .vni = "\xff\xff\xff",
+ .protocol = RTE_BE16(UINT16_MAX),
+ };
+
+ if (!(priv->config.hca_attr.flex_parser_protocols &
+ MLX5_HCA_FLEX_GENEVE_ENABLED) ||
+ !priv->config.hca_attr.tunnel_stateless_geneve_rx)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "L3 Geneve is not enabled by device"
+ " parameter and/or not configured in"
+ " firmware");
+ if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple tunnel layers not"
+ " supported");
+ /*
+ * Verify only UDPv4 is present as defined in
+ * https://tools.ietf.org/html/rfc7348
+ */
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "no outer UDP layer found");
+ if (!mask)
+ mask = &rte_flow_item_geneve_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_geneve), error);
+ if (ret)
+ return ret;
if (spec) {
- udp.val.dst_port = spec->hdr.dst_port;
- udp.val.src_port = spec->hdr.src_port;
- udp.mask.dst_port = mask->hdr.dst_port;
- udp.mask.src_port = mask->hdr.src_port;
- /* Remove unwanted bits from values. */
- udp.val.src_port &= udp.mask.src_port;
- udp.val.dst_port &= udp.mask.dst_port;
- }
- if (size <= flow_size) {
- mlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_UDP,
- (IBV_RX_HASH_SRC_PORT_UDP |
- IBV_RX_HASH_DST_PORT_UDP));
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4;
- mlx5_flow_spec_verbs_add(flow, &udp, size);
+ gbhdr = rte_be_to_cpu_16(spec->ver_opt_len_o_c_rsvd0);
+ if (MLX5_GENEVE_VER_VAL(gbhdr) ||
+ MLX5_GENEVE_CRITO_VAL(gbhdr) ||
+ MLX5_GENEVE_RSVD_VAL(gbhdr) || spec->rsvd1)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Geneve protocol unsupported"
+ " fields are being used");
+ if (MLX5_GENEVE_OPTLEN_VAL(gbhdr) > opt_len)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unsupported Geneve options length");
}
- return size;
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Geneve tunnel must be fully defined");
+ return 0;
}
/**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Validate MPLS item.
*
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
* @param[in] item
* Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[in] prev_layer
+ * The protocol layer indicated in previous item.
* @param[out] error
* Pointer to error structure.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow,
- const size_t flow_size, struct rte_flow_error *error)
+int
+mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_item *item __rte_unused,
+ uint64_t item_flags __rte_unused,
+ uint64_t prev_layer __rte_unused,
+ struct rte_flow_error *error)
{
- const struct rte_flow_item_tcp *spec = item->spec;
- const struct rte_flow_item_tcp *mask = item->mask;
- const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
- unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
- struct ibv_flow_spec_tcp_udp tcp = {
- .type = IBV_FLOW_SPEC_TCP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
- .size = size,
- };
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+ const struct rte_flow_item_mpls *mask = item->mask;
+ struct mlx5_priv *priv = dev->data->dev_private;
int ret;
- if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_TCP)
+ if (!priv->config.mpls_en)
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "MPLS not supported or"
+ " disabled in firmware"
+ " configuration.");
+ /* MPLS over IP, UDP, GRE is allowed */
+ if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 |
+ MLX5_FLOW_LAYER_OUTER_L4_UDP |
+ MLX5_FLOW_LAYER_GRE)))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
"protocol filtering not compatible"
- " with TCP layer");
- if (!(flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
- MLX5_FLOW_LAYER_OUTER_L3)))
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "L3 is mandatory to filter on L4");
- if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
- MLX5_FLOW_LAYER_OUTER_L4))
+ " with MPLS layer");
+ /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */
+ if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) &&
+ !(item_flags & MLX5_FLOW_LAYER_GRE))
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "L4 layer is already present");
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple tunnel layers not"
+ " supported");
if (!mask)
- mask = &rte_flow_item_tcp_mask;
+ mask = &rte_flow_item_mpls_mask;
ret = mlx5_flow_item_acceptable
(item, (const uint8_t *)mask,
- (const uint8_t *)&rte_flow_item_tcp_mask,
- sizeof(struct rte_flow_item_tcp), error);
+ (const uint8_t *)&rte_flow_item_mpls_mask,
+ sizeof(struct rte_flow_item_mpls), error);
if (ret < 0)
return ret;
- flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
- MLX5_FLOW_LAYER_OUTER_L4_TCP;
- if (spec) {
- tcp.val.dst_port = spec->hdr.dst_port;
- tcp.val.src_port = spec->hdr.src_port;
- tcp.mask.dst_port = mask->hdr.dst_port;
- tcp.mask.src_port = mask->hdr.src_port;
- /* Remove unwanted bits from values. */
- tcp.val.src_port &= tcp.mask.src_port;
- tcp.val.dst_port &= tcp.mask.dst_port;
- }
- if (size <= flow_size) {
- mlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_TCP,
- (IBV_RX_HASH_SRC_PORT_TCP |
- IBV_RX_HASH_DST_PORT_TCP));
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4;
- mlx5_flow_spec_verbs_add(flow, &tcp, size);
- }
- return size;
+ return 0;
+#endif
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "MPLS is not supported by Verbs, please"
+ " update.");
}
/**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Validate NVGRE item.
*
* @param[in] item
* Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
+ * @param[in] item_flags
+ * Bit flags to mark detected items.
+ * @param[in] target_protocol
+ * The next protocol in the previous item.
* @param[out] error
* Pointer to error structure.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-mlx5_flow_item_vxlan(const struct rte_flow_item *item, struct rte_flow *flow,
- const size_t flow_size, struct rte_flow_error *error)
+int
+mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ uint8_t target_protocol,
+ struct rte_flow_error *error)
{
- const struct rte_flow_item_vxlan *spec = item->spec;
- const struct rte_flow_item_vxlan *mask = item->mask;
- unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
- struct ibv_flow_spec_tunnel vxlan = {
- .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
- .size = size,
- };
+ const struct rte_flow_item_nvgre *mask = item->mask;
int ret;
- union vni {
- uint32_t vlan_id;
- uint8_t vni[4];
- } id = { .vlan_id = 0, };
- if (flow->layers & MLX5_FLOW_LAYER_TUNNEL)
+ if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "protocol filtering not compatible"
+ " with this GRE layer");
+ if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "a tunnel is already present");
- /*
- * Verify only UDPv4 is present as defined in
- * https://tools.ietf.org/html/rfc7348
- */
- if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP))
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple tunnel layers not"
+ " supported");
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "no outer UDP layer found");
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "L3 Layer is missing");
if (!mask)
- mask = &rte_flow_item_vxlan_mask;
+ mask = &rte_flow_item_nvgre_mask;
ret = mlx5_flow_item_acceptable
(item, (const uint8_t *)mask,
- (const uint8_t *)&rte_flow_item_vxlan_mask,
- sizeof(struct rte_flow_item_vxlan), error);
+ (const uint8_t *)&rte_flow_item_nvgre_mask,
+ sizeof(struct rte_flow_item_nvgre), error);
if (ret < 0)
return ret;
- if (spec) {
- memcpy(&id.vni[1], spec->vni, 3);
- vxlan.val.tunnel_id = id.vlan_id;
- memcpy(&id.vni[1], mask->vni, 3);
- vxlan.mask.tunnel_id = id.vlan_id;
- /* Remove unwanted bits from values. */
- vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
- }
- /*
- * Tunnel id 0 is equivalent as not adding a VXLAN layer, if
- * only this layer is defined in the Verbs specification it is
- * interpreted as wildcard and all packets will match this
- * rule, if it follows a full stack layer (ex: eth / ipv4 /
- * udp), all packets matching the layers before will also
- * match this rule. To avoid such situation, VNI 0 is
- * currently refused.
- */
- if (!vxlan.val.tunnel_id)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "VXLAN vni cannot be 0");
- if (!(flow->layers & MLX5_FLOW_LAYER_OUTER))
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "VXLAN tunnel must be fully defined");
- if (size <= flow_size) {
- mlx5_flow_spec_verbs_add(flow, &vxlan, size);
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
- }
- flow->layers |= MLX5_FLOW_LAYER_VXLAN;
- return size;
+ return 0;
+}
+
+/* Allocate unique ID for the split Q/RSS subflows. */
+static uint32_t
+flow_qrss_get_id(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t qrss_id, ret;
+
+ ret = mlx5_flow_id_get(priv->qrss_id_pool, &qrss_id);
+ if (ret)
+ return 0;
+ assert(qrss_id);
+ return qrss_id;
+}
+
+/* Free unique ID for the split Q/RSS subflows. */
+static void
+flow_qrss_free_id(struct rte_eth_dev *dev, uint32_t qrss_id)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (qrss_id)
+ mlx5_flow_id_release(priv->qrss_id_pool, qrss_id);
}
/**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Release resource related QUEUE/RSS action split.
*
* @param dev
* Pointer to Ethernet device.
- * @param[in] item
- * Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
+ * @param flow
+ * Flow to release id's from.
+ */
+static void
+flow_mreg_split_qrss_release(struct rte_eth_dev *dev,
+ struct rte_flow *flow)
+{
+ struct mlx5_flow *dev_flow;
+
+ LIST_FOREACH(dev_flow, &flow->dev_flows, next)
+ if (dev_flow->qrss_id)
+ flow_qrss_free_id(dev, dev_flow->qrss_id);
+}
+
+static int
+flow_null_validate(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_attr *attr __rte_unused,
+ const struct rte_flow_item items[] __rte_unused,
+ const struct rte_flow_action actions[] __rte_unused,
+ bool external __rte_unused,
+ struct rte_flow_error *error)
+{
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
+}
+
+static struct mlx5_flow *
+flow_null_prepare(const struct rte_flow_attr *attr __rte_unused,
+ const struct rte_flow_item items[] __rte_unused,
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_error *error)
+{
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
+ return NULL;
+}
+
+static int
+flow_null_translate(struct rte_eth_dev *dev __rte_unused,
+ struct mlx5_flow *dev_flow __rte_unused,
+ const struct rte_flow_attr *attr __rte_unused,
+ const struct rte_flow_item items[] __rte_unused,
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_error *error)
+{
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
+}
+
+static int
+flow_null_apply(struct rte_eth_dev *dev __rte_unused,
+ struct rte_flow *flow __rte_unused,
+ struct rte_flow_error *error)
+{
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
+}
+
+static void
+flow_null_remove(struct rte_eth_dev *dev __rte_unused,
+ struct rte_flow *flow __rte_unused)
+{
+}
+
+static void
+flow_null_destroy(struct rte_eth_dev *dev __rte_unused,
+ struct rte_flow *flow __rte_unused)
+{
+}
+
+static int
+flow_null_query(struct rte_eth_dev *dev __rte_unused,
+ struct rte_flow *flow __rte_unused,
+ const struct rte_flow_action *actions __rte_unused,
+ void *data __rte_unused,
+ struct rte_flow_error *error)
+{
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
+}
+
+/* Void driver to protect from null pointer reference. */
+const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = {
+ .validate = flow_null_validate,
+ .prepare = flow_null_prepare,
+ .translate = flow_null_translate,
+ .apply = flow_null_apply,
+ .remove = flow_null_remove,
+ .destroy = flow_null_destroy,
+ .query = flow_null_query,
+};
+
+/**
+ * Select flow driver type according to flow attributes and device
+ * configuration.
+ *
+ * @param[in] dev
+ * Pointer to the dev structure.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ *
+ * @return
+ * flow driver type, MLX5_FLOW_TYPE_MAX otherwise.
+ */
+static enum mlx5_flow_drv_type
+flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ enum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX;
+
+ if (attr->transfer && priv->config.dv_esw_en)
+ type = MLX5_FLOW_TYPE_DV;
+ if (!attr->transfer)
+ type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :
+ MLX5_FLOW_TYPE_VERBS;
+ return type;
+}
+
+#define flow_get_drv_ops(type) flow_drv_ops[type]
+
+/**
+ * Flow driver validation API. This abstracts calling driver specific functions.
+ * The type of flow driver is determined according to flow attributes.
+ *
+ * @param[in] dev
+ * Pointer to the dev structure.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] items
+ * Pointer to the list of items.
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[in] external
+ * This flow rule is created by request external to PMD.
* @param[out] error
- * Pointer to error structure.
+ * Pointer to the error structure.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-mlx5_flow_item_vxlan_gpe(struct rte_eth_dev *dev,
- const struct rte_flow_item *item,
- struct rte_flow *flow, const size_t flow_size,
- struct rte_flow_error *error)
+static inline int
+flow_drv_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ bool external, struct rte_flow_error *error)
{
- const struct rte_flow_item_vxlan_gpe *spec = item->spec;
- const struct rte_flow_item_vxlan_gpe *mask = item->mask;
- unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
- struct ibv_flow_spec_tunnel vxlan_gpe = {
- .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
- .size = size,
- };
- int ret;
- union vni {
- uint32_t vlan_id;
- uint8_t vni[4];
- } id = { .vlan_id = 0, };
+ const struct mlx5_flow_driver_ops *fops;
+ enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr);
- if (!((struct priv *)dev->data->dev_private)->config.l3_vxlan_en)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "L3 VXLAN is not enabled by device"
- " parameter and/or not configured in"
- " firmware");
- if (flow->layers & MLX5_FLOW_LAYER_TUNNEL)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "a tunnel is already present");
- /*
- * Verify only UDPv4 is present as defined in
- * https://tools.ietf.org/html/rfc7348
- */
- if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP))
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "no outer UDP layer found");
- if (!mask)
- mask = &rte_flow_item_vxlan_gpe_mask;
- ret = mlx5_flow_item_acceptable
- (item, (const uint8_t *)mask,
- (const uint8_t *)&rte_flow_item_vxlan_gpe_mask,
- sizeof(struct rte_flow_item_vxlan_gpe), error);
- if (ret < 0)
- return ret;
- if (spec) {
- memcpy(&id.vni[1], spec->vni, 3);
- vxlan_gpe.val.tunnel_id = id.vlan_id;
- memcpy(&id.vni[1], mask->vni, 3);
- vxlan_gpe.mask.tunnel_id = id.vlan_id;
- if (spec->protocol)
- return rte_flow_error_set
- (error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "VxLAN-GPE protocol not supported");
- /* Remove unwanted bits from values. */
- vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;
- }
- /*
- * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this
- * layer is defined in the Verbs specification it is interpreted as
- * wildcard and all packets will match this rule, if it follows a full
- * stack layer (ex: eth / ipv4 / udp), all packets matching the layers
- * before will also match this rule. To avoid such situation, VNI 0
- * is currently refused.
- */
- if (!vxlan_gpe.val.tunnel_id)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "VXLAN-GPE vni cannot be 0");
- if (!(flow->layers & MLX5_FLOW_LAYER_OUTER))
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "VXLAN-GPE tunnel must be fully"
- " defined");
- if (size <= flow_size) {
- mlx5_flow_spec_verbs_add(flow, &vxlan_gpe, size);
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
- }
- flow->layers |= MLX5_FLOW_LAYER_VXLAN_GPE;
- return size;
+ fops = flow_get_drv_ops(type);
+ return fops->validate(dev, attr, items, actions, external, error);
+}
+
+/**
+ * Flow driver preparation API. This abstracts calling driver specific
+ * functions. Parent flow (rte_flow) should have driver type (drv_type). It
+ * calculates the size of memory required for device flow, allocates the memory,
+ * initializes the device flow and returns the pointer.
+ *
+ * @note
+ * This function initializes device flow structure such as dv or verbs in
+ * struct mlx5_flow. However, it is caller's responsibility to initialize the
+ * rest. For example, adding returning device flow to flow->dev_flow list and
+ * setting backward reference to the flow should be done out of this function.
+ * layers field is not filled either.
+ *
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] items
+ * Pointer to the list of items.
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * Pointer to device flow on success, otherwise NULL and rte_errno is set.
+ */
+static inline struct mlx5_flow *
+flow_drv_prepare(const struct rte_flow *flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+ enum mlx5_flow_drv_type type = flow->drv_type;
+
+ assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+ fops = flow_get_drv_ops(type);
+ return fops->prepare(attr, items, actions, error);
+}
+
+/**
+ * Flow driver translation API. This abstracts calling driver specific
+ * functions. Parent flow (rte_flow) should have driver type (drv_type). It
+ * translates a generic flow into a driver flow. flow_drv_prepare() must
+ * precede.
+ *
+ * @note
+ * dev_flow->layers could be filled as a result of parsing during translation
+ * if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled
+ * if necessary. As a flow can have multiple dev_flows by RSS flow expansion,
+ * flow->actions could be overwritten even though all the expanded dev_flows
+ * have the same actions.
+ *
+ * @param[in] dev
+ * Pointer to the rte dev structure.
+ * @param[in, out] dev_flow
+ * Pointer to the mlx5 flow.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] items
+ * Pointer to the list of items.
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static inline int
+flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+ enum mlx5_flow_drv_type type = dev_flow->flow->drv_type;
+
+ assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+ fops = flow_get_drv_ops(type);
+ return fops->translate(dev, dev_flow, attr, items, actions, error);
+}
+
+/**
+ * Flow driver apply API. This abstracts calling driver specific functions.
+ * Parent flow (rte_flow) should have driver type (drv_type). It applies
+ * translated driver flows on to device. flow_drv_translate() must precede.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device structure.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static inline int
+flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+ enum mlx5_flow_drv_type type = flow->drv_type;
+
+ assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+ fops = flow_get_drv_ops(type);
+ return fops->apply(dev, flow, error);
}
/**
- * Update the protocol in Verbs IPv4/IPv6 spec.
+ * Flow driver remove API. This abstracts calling driver specific functions.
+ * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
+ * on device. All the resources of the flow should be freed by calling
+ * flow_drv_destroy().
*
- * @param[in, out] attr
- * Pointer to Verbs attributes structure.
- * @param[in] search
- * Specification type to search in order to update the IP protocol.
- * @param[in] protocol
- * Protocol value to set if none is present in the specification.
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in, out] flow
+ * Pointer to flow structure.
*/
-static void
-mlx5_flow_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
- enum ibv_flow_spec_type search,
- uint8_t protocol)
+static inline void
+flow_drv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
{
- unsigned int i;
- struct ibv_spec_header *hdr = (struct ibv_spec_header *)
- ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
+ const struct mlx5_flow_driver_ops *fops;
+ enum mlx5_flow_drv_type type = flow->drv_type;
- if (!attr)
- return;
- for (i = 0; i != attr->num_of_specs; ++i) {
- if (hdr->type == search) {
- union {
- struct ibv_flow_spec_ipv4_ext *ipv4;
- struct ibv_flow_spec_ipv6 *ipv6;
- } ip;
-
- switch (search) {
- case IBV_FLOW_SPEC_IPV4_EXT:
- ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr;
- if (!ip.ipv4->val.proto) {
- ip.ipv4->val.proto = protocol;
- ip.ipv4->mask.proto = 0xff;
- }
- break;
- case IBV_FLOW_SPEC_IPV6:
- ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr;
- if (!ip.ipv6->val.next_hdr) {
- ip.ipv6->val.next_hdr = protocol;
- ip.ipv6->mask.next_hdr = 0xff;
- }
- break;
- default:
- break;
- }
- break;
- }
- hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
- }
+ assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+ fops = flow_get_drv_ops(type);
+ fops->remove(dev, flow);
}
/**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * It will also update the previous L3 layer with the protocol value matching
- * the GRE.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Flow driver destroy API. This abstracts calling driver specific functions.
+ * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
+ * on device and releases resources of the flow.
*
- * @param dev
+ * @param[in] dev
* Pointer to Ethernet device.
- * @param[in] item
- * Item specification.
* @param[in, out] flow
* Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
- * @param[out] error
- * Pointer to error structure.
+ */
+static inline void
+flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ const struct mlx5_flow_driver_ops *fops;
+ enum mlx5_flow_drv_type type = flow->drv_type;
+
+ flow_mreg_split_qrss_release(dev, flow);
+ assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+ fops = flow_get_drv_ops(type);
+ fops->destroy(dev, flow);
+}
+
+/**
+ * Validate a flow supported by the NIC.
*
- * @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * @see rte_flow_validate()
+ * @see rte_flow_ops
*/
-static int
-mlx5_flow_item_gre(const struct rte_flow_item *item,
- struct rte_flow *flow, const size_t flow_size,
+int
+mlx5_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
- struct mlx5_flow_verbs *verbs = flow->cur_verbs;
- const struct rte_flow_item_gre *spec = item->spec;
- const struct rte_flow_item_gre *mask = item->mask;
-#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
- unsigned int size = sizeof(struct ibv_flow_spec_gre);
- struct ibv_flow_spec_gre tunnel = {
- .type = IBV_FLOW_SPEC_GRE,
- .size = size,
- };
-#else
- unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
- struct ibv_flow_spec_tunnel tunnel = {
- .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
- .size = size,
- };
-#endif
int ret;
- if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_GRE)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "protocol filtering not compatible"
- " with this GRE layer");
- if (flow->layers & MLX5_FLOW_LAYER_TUNNEL)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "a tunnel is already present");
- if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L3))
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "L3 Layer is missing");
- if (!mask)
- mask = &rte_flow_item_gre_mask;
- ret = mlx5_flow_item_acceptable
- (item, (const uint8_t *)mask,
- (const uint8_t *)&rte_flow_item_gre_mask,
- sizeof(struct rte_flow_item_gre), error);
+ ret = flow_drv_validate(dev, attr, items, actions, true, error);
if (ret < 0)
return ret;
-#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
- if (spec) {
- tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
- tunnel.val.protocol = spec->protocol;
- tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
- tunnel.mask.protocol = mask->protocol;
- /* Remove unwanted bits from values. */
- tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
- tunnel.val.protocol &= tunnel.mask.protocol;
- tunnel.val.key &= tunnel.mask.key;
- }
-#else
- if (spec && (spec->protocol & mask->protocol))
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "without MPLS support the"
- " specification cannot be used for"
- " filtering");
-#endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */
- if (size <= flow_size) {
- if (flow->layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
- mlx5_flow_item_gre_ip_protocol_update
- (verbs->attr, IBV_FLOW_SPEC_IPV4_EXT,
- MLX5_IP_PROTOCOL_GRE);
- else
- mlx5_flow_item_gre_ip_protocol_update
- (verbs->attr, IBV_FLOW_SPEC_IPV6,
- MLX5_IP_PROTOCOL_GRE);
- mlx5_flow_spec_verbs_add(flow, &tunnel, size);
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
- }
- flow->layers |= MLX5_FLOW_LAYER_GRE;
- return size;
+ return 0;
}
/**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Get RSS action from the action list.
*
- * @param[in] item
- * Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
- * @param[out] error
- * Pointer to error structure.
+ * @param[in] actions
+ * Pointer to the list of actions.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * Pointer to the RSS action if exist, else return NULL.
*/
-static int
-mlx5_flow_item_mpls(const struct rte_flow_item *item __rte_unused,
- struct rte_flow *flow __rte_unused,
- const size_t flow_size __rte_unused,
- struct rte_flow_error *error)
+static const struct rte_flow_action_rss*
+flow_get_rss_action(const struct rte_flow_action actions[])
{
-#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
- const struct rte_flow_item_mpls *spec = item->spec;
- const struct rte_flow_item_mpls *mask = item->mask;
- unsigned int size = sizeof(struct ibv_flow_spec_mpls);
- struct ibv_flow_spec_mpls mpls = {
- .type = IBV_FLOW_SPEC_MPLS,
- .size = size,
- };
- int ret;
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ return (const struct rte_flow_action_rss *)
+ actions->conf;
+ default:
+ break;
+ }
+ }
+ return NULL;
+}
- if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_MPLS)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "protocol filtering not compatible"
- " with MPLS layer");
- if (flow->layers & MLX5_FLOW_LAYER_TUNNEL)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "a tunnel is already"
- " present");
- if (!mask)
- mask = &rte_flow_item_mpls_mask;
- ret = mlx5_flow_item_acceptable
- (item, (const uint8_t *)mask,
- (const uint8_t *)&rte_flow_item_mpls_mask,
- sizeof(struct rte_flow_item_mpls), error);
- if (ret < 0)
- return ret;
- if (spec) {
- memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
- memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
- /* Remove unwanted bits from values. */
- mpls.val.label &= mpls.mask.label;
- }
- if (size <= flow_size) {
- mlx5_flow_spec_verbs_add(flow, &mpls, size);
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
- }
- flow->layers |= MLX5_FLOW_LAYER_MPLS;
- return size;
-#endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "MPLS is not supported by Verbs, please"
- " update.");
+static unsigned int
+find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level)
+{
+ const struct rte_flow_item *item;
+ unsigned int has_vlan = 0;
+
+ for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ has_vlan = 1;
+ break;
+ }
+ }
+ if (has_vlan)
+ return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN :
+ MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN;
+ return rss_level < 2 ? MLX5_EXPANSION_ROOT :
+ MLX5_EXPANSION_ROOT_OUTER;
}
/**
- * Convert the @p pattern into a Verbs specifications after ensuring the NIC
- * will understand and process it correctly.
- * The conversion is performed item per item, each of them is written into
- * the @p flow if its size is lesser or equal to @p flow_size.
- * Validation and memory consumption computation are still performed until the
- * end of @p pattern, unless an error is encountered.
+ * Get QUEUE/RSS action from the action list.
*
- * @param[in] pattern
- * Flow pattern.
- * @param[in, out] flow
- * Pointer to the rte_flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small some
- * garbage may be present.
- * @param[out] error
- * Pointer to error structure.
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[out] qrss
+ * Pointer to the return pointer.
+ * @param[out] qrss_type
+ * Pointer to the action type to return. RTE_FLOW_ACTION_TYPE_END is returned
+ * if no QUEUE/RSS is found.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @pattern has fully been
- * converted, otherwise another call with this returned memory size should
- * be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * Total number of actions.
*/
static int
-mlx5_flow_items(struct rte_eth_dev *dev,
- const struct rte_flow_item pattern[],
- struct rte_flow *flow, const size_t flow_size,
- struct rte_flow_error *error)
+flow_parse_qrss_action(const struct rte_flow_action actions[],
+ const struct rte_flow_action **qrss)
{
- int remain = flow_size;
- size_t size = 0;
+ int actions_n = 0;
- for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
- int ret = 0;
-
- switch (pattern->type) {
- case RTE_FLOW_ITEM_TYPE_VOID:
- break;
- case RTE_FLOW_ITEM_TYPE_ETH:
- ret = mlx5_flow_item_eth(pattern, flow, remain, error);
- break;
- case RTE_FLOW_ITEM_TYPE_VLAN:
- ret = mlx5_flow_item_vlan(pattern, flow, remain, error);
- break;
- case RTE_FLOW_ITEM_TYPE_IPV4:
- ret = mlx5_flow_item_ipv4(pattern, flow, remain, error);
- break;
- case RTE_FLOW_ITEM_TYPE_IPV6:
- ret = mlx5_flow_item_ipv6(pattern, flow, remain, error);
- break;
- case RTE_FLOW_ITEM_TYPE_UDP:
- ret = mlx5_flow_item_udp(pattern, flow, remain, error);
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ *qrss = actions;
break;
- case RTE_FLOW_ITEM_TYPE_TCP:
- ret = mlx5_flow_item_tcp(pattern, flow, remain, error);
+ default:
break;
- case RTE_FLOW_ITEM_TYPE_VXLAN:
- ret = mlx5_flow_item_vxlan(pattern, flow, remain,
- error);
+ }
+ actions_n++;
+ }
+ /* Count RTE_FLOW_ACTION_TYPE_END. */
+ return actions_n + 1;
+}
+
+/**
+ * Check if the flow should be splited due to hairpin.
+ * The reason for the split is that in current HW we can't
+ * support encap on Rx, so if a flow have encap we move it
+ * to Tx.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ *
+ * @return
+ * > 0 the number of actions and the flow should be split,
+ * 0 when no split required.
+ */
+static int
+flow_check_hairpin_split(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_action actions[])
+{
+ int queue_action = 0;
+ int action_n = 0;
+ int encap = 0;
+ const struct rte_flow_action_queue *queue;
+ const struct rte_flow_action_rss *rss;
+ const struct rte_flow_action_raw_encap *raw_encap;
+
+ if (!attr->ingress)
+ return 0;
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ queue = actions->conf;
+ if (mlx5_rxq_get_type(dev, queue->index) !=
+ MLX5_RXQ_TYPE_HAIRPIN)
+ return 0;
+ queue_action = 1;
+ action_n++;
break;
- case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
- ret = mlx5_flow_item_vxlan_gpe(dev, pattern, flow,
- remain, error);
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ rss = actions->conf;
+ if (mlx5_rxq_get_type(dev, rss->queue[0]) !=
+ MLX5_RXQ_TYPE_HAIRPIN)
+ return 0;
+ queue_action = 1;
+ action_n++;
break;
- case RTE_FLOW_ITEM_TYPE_GRE:
- ret = mlx5_flow_item_gre(pattern, flow, remain, error);
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ encap = 1;
+ action_n++;
break;
- case RTE_FLOW_ITEM_TYPE_MPLS:
- ret = mlx5_flow_item_mpls(pattern, flow, remain, error);
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ raw_encap = actions->conf;
+ if (raw_encap->size >
+ (sizeof(struct rte_flow_item_eth) +
+ sizeof(struct rte_flow_item_ipv4)))
+ encap = 1;
+ action_n++;
break;
default:
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- pattern,
- "item not supported");
+ action_n++;
+ break;
}
- if (ret < 0)
- return ret;
- if (remain > ret)
- remain -= ret;
- else
- remain = 0;
- size += ret;
- }
- if (!flow->layers) {
- const struct rte_flow_item item = {
- .type = RTE_FLOW_ITEM_TYPE_ETH,
- };
-
- return mlx5_flow_item_eth(&item, flow, flow_size, error);
}
- return size;
+ if (encap == 1 && queue_action)
+ return action_n;
+ return 0;
}
+/* Declare flow create/destroy prototype in advance. */
+static struct rte_flow *
+flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ bool external, struct rte_flow_error *error);
+
+static void
+flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
+ struct rte_flow *flow);
+
/**
- * Convert the @p action into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Add a flow of copying flow metadata registers in RX_CP_TBL.
*
- * @param[in] action
- * Action configuration.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
+ * As mark_id is unique, if there's already a registered flow for the mark_id,
+ * return by increasing the reference counter of the resource. Otherwise, create
+ * the resource (mcp_res) and flow.
+ *
+ * Flow looks like,
+ * - If ingress port is ANY and reg_c[1] is mark_id,
+ * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL.
+ *
+ * For default flow (zero mark_id), flow is like,
+ * - If ingress port is ANY,
+ * reg_b := reg_c[0] and jump to RX_ACT_TBL.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mark_id
+ * ID of MARK action, zero means default flow for META.
* @param[out] error
- * Pointer to error structure.
+ * Perform verbose error reporting if not NULL.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p action has fully been
- * converted, otherwise another call with this returned memory size should
- * be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * Associated resource on success, NULL otherwise and rte_errno is set.
*/
-static int
-mlx5_flow_action_drop(const struct rte_flow_action *action,
- struct rte_flow *flow, const size_t flow_size,
- struct rte_flow_error *error)
+static struct mlx5_flow_mreg_copy_resource *
+flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
+ struct rte_flow_error *error)
{
- unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
- struct ibv_flow_spec_action_drop drop = {
- .type = IBV_FLOW_SPEC_ACTION_DROP,
- .size = size,
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct rte_flow_attr attr = {
+ .group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
+ .ingress = 1,
+ };
+ struct mlx5_rte_flow_item_tag tag_spec = {
+ .data = mark_id,
};
+ struct rte_flow_item items[] = {
+ [1] = { .type = RTE_FLOW_ITEM_TYPE_END, },
+ };
+ struct rte_flow_action_mark ftag = {
+ .id = mark_id,
+ };
+ struct mlx5_flow_action_copy_mreg cp_mreg = {
+ .dst = REG_B,
+ .src = 0,
+ };
+ struct rte_flow_action_jump jump = {
+ .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
+ };
+ struct rte_flow_action actions[] = {
+ [3] = { .type = RTE_FLOW_ACTION_TYPE_END, },
+ };
+ struct mlx5_flow_mreg_copy_resource *mcp_res;
+ int ret;
- if (flow->fate)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- action,
- "multiple fate actions are not"
- " supported");
- if (flow->modifier & (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK))
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- action,
- "drop is not compatible with"
- " flag/mark action");
- if (size < flow_size)
- mlx5_flow_spec_verbs_add(flow, &drop, size);
- flow->fate |= MLX5_FLOW_FATE_DROP;
- return size;
+ /* Fill the register fileds in the flow. */
+ ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
+ if (ret < 0)
+ return NULL;
+ tag_spec.id = ret;
+ ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error);
+ if (ret < 0)
+ return NULL;
+ cp_mreg.src = ret;
+ /* Check if already registered. */
+ assert(priv->mreg_cp_tbl);
+ mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id);
+ if (mcp_res) {
+ /* For non-default rule. */
+ if (mark_id)
+ mcp_res->refcnt++;
+ assert(mark_id || mcp_res->refcnt == 1);
+ return mcp_res;
+ }
+ /* Provide the full width of FLAG specific value. */
+ if (mark_id == (priv->sh->dv_regc0_mask & MLX5_FLOW_MARK_DEFAULT))
+ tag_spec.data = MLX5_FLOW_MARK_DEFAULT;
+ /* Build a new flow. */
+ if (mark_id) {
+ items[0] = (struct rte_flow_item){
+ .type = MLX5_RTE_FLOW_ITEM_TYPE_TAG,
+ .spec = &tag_spec,
+ };
+ items[1] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ };
+ actions[0] = (struct rte_flow_action){
+ .type = MLX5_RTE_FLOW_ACTION_TYPE_MARK,
+ .conf = &ftag,
+ };
+ actions[1] = (struct rte_flow_action){
+ .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
+ .conf = &cp_mreg,
+ };
+ actions[2] = (struct rte_flow_action){
+ .type = RTE_FLOW_ACTION_TYPE_JUMP,
+ .conf = &jump,
+ };
+ actions[3] = (struct rte_flow_action){
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ };
+ } else {
+ /* Default rule, wildcard match. */
+ attr.priority = MLX5_FLOW_PRIO_RSVD;
+ items[0] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ };
+ actions[0] = (struct rte_flow_action){
+ .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
+ .conf = &cp_mreg,
+ };
+ actions[1] = (struct rte_flow_action){
+ .type = RTE_FLOW_ACTION_TYPE_JUMP,
+ .conf = &jump,
+ };
+ actions[2] = (struct rte_flow_action){
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ };
+ }
+ /* Build a new entry. */
+ mcp_res = rte_zmalloc(__func__, sizeof(*mcp_res), 0);
+ if (!mcp_res) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ /*
+ * The copy Flows are not included in any list. There
+ * ones are referenced from other Flows and can not
+ * be applied, removed, deleted in ardbitrary order
+ * by list traversing.
+ */
+ mcp_res->flow = flow_list_create(dev, NULL, &attr, items,
+ actions, false, error);
+ if (!mcp_res->flow)
+ goto error;
+ mcp_res->refcnt++;
+ mcp_res->hlist_ent.key = mark_id;
+ ret = mlx5_hlist_insert(priv->mreg_cp_tbl,
+ &mcp_res->hlist_ent);
+ assert(!ret);
+ if (ret)
+ goto error;
+ return mcp_res;
+error:
+ if (mcp_res->flow)
+ flow_list_destroy(dev, NULL, mcp_res->flow);
+ rte_free(mcp_res);
+ return NULL;
}
/**
- * Convert the @p action into @p flow after ensuring the NIC will understand
- * and process it correctly.
- *
- * @param[in] dev
- * Pointer to Ethernet device structure.
- * @param[in] action
- * Action configuration.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[out] error
- * Pointer to error structure.
+ * Release flow in RX_CP_TBL.
*
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * @param dev
+ * Pointer to Ethernet device.
+ * @flow
+ * Parent flow for wich copying is provided.
*/
-static int
-mlx5_flow_action_queue(struct rte_eth_dev *dev,
- const struct rte_flow_action *action,
- struct rte_flow *flow,
- struct rte_flow_error *error)
+static void
+flow_mreg_del_copy_action(struct rte_eth_dev *dev,
+ struct rte_flow *flow)
{
- struct priv *priv = dev->data->dev_private;
- const struct rte_flow_action_queue *queue = action->conf;
+ struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy;
+ struct mlx5_priv *priv = dev->data->dev_private;
- if (flow->fate)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- action,
- "multiple fate actions are not"
- " supported");
- if (queue->index >= priv->rxqs_n)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- &queue->index,
- "queue index out of range");
- if (!(*priv->rxqs)[queue->index])
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- &queue->index,
- "queue is not configured");
- if (flow->queue)
- (*flow->queue)[0] = queue->index;
- flow->rss.queue_num = 1;
- flow->fate |= MLX5_FLOW_FATE_QUEUE;
- return 0;
+ if (!mcp_res || !priv->mreg_cp_tbl)
+ return;
+ if (flow->copy_applied) {
+ assert(mcp_res->appcnt);
+ flow->copy_applied = 0;
+ --mcp_res->appcnt;
+ if (!mcp_res->appcnt)
+ flow_drv_remove(dev, mcp_res->flow);
+ }
+ /*
+ * We do not check availability of metadata registers here,
+ * because copy resources are allocated in this case.
+ */
+ if (--mcp_res->refcnt)
+ return;
+ assert(mcp_res->flow);
+ flow_list_destroy(dev, NULL, mcp_res->flow);
+ mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
+ rte_free(mcp_res);
+ flow->mreg_copy = NULL;
}
/**
- * Ensure the @p action will be understood and used correctly by the NIC.
+ * Start flow in RX_CP_TBL.
*
* @param dev
- * Pointer to Ethernet device structure.
- * @param action[in]
- * Pointer to flow actions array.
- * @param flow[in, out]
- * Pointer to the rte_flow structure.
- * @param error[in, out]
- * Pointer to error structure.
+ * Pointer to Ethernet device.
+ * @flow
+ * Parent flow for wich copying is provided.
*
* @return
- * On success @p flow->queue array and @p flow->rss are filled and valid.
- * On error, a negative errno value is returned and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_flow_action_rss(struct rte_eth_dev *dev,
- const struct rte_flow_action *action,
- struct rte_flow *flow,
- struct rte_flow_error *error)
+flow_mreg_start_copy_action(struct rte_eth_dev *dev,
+ struct rte_flow *flow)
{
- struct priv *priv = dev->data->dev_private;
- const struct rte_flow_action_rss *rss = action->conf;
- unsigned int i;
+ struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy;
+ int ret;
- if (flow->fate)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- action,
- "multiple fate actions are not"
- " supported");
- if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
- rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- &rss->func,
- "RSS hash function not supported");
-#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
- if (rss->level > 2)
-#else
- if (rss->level > 1)
-#endif
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- &rss->level,
- "tunnel RSS is not supported");
- if (rss->key_len < MLX5_RSS_HASH_KEY_LEN)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- &rss->key_len,
- "RSS hash key too small");
- if (rss->key_len > MLX5_RSS_HASH_KEY_LEN)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- &rss->key_len,
- "RSS hash key too large");
- if (rss->queue_num > priv->config.ind_table_max_size)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- &rss->queue_num,
- "number of queues too large");
- if (rss->types & MLX5_RSS_HF_MASK)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- &rss->types,
- "some RSS protocols are not"
- " supported");
- for (i = 0; i != rss->queue_num; ++i) {
- if (!(*priv->rxqs)[rss->queue[i]])
- return rte_flow_error_set
- (error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- &rss->queue[i],
- "queue is not configured");
- }
- if (flow->queue)
- memcpy((*flow->queue), rss->queue,
- rss->queue_num * sizeof(uint16_t));
- flow->rss.queue_num = rss->queue_num;
- memcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN);
- flow->rss.types = rss->types;
- flow->rss.level = rss->level;
- flow->fate |= MLX5_FLOW_FATE_RSS;
+ if (!mcp_res || flow->copy_applied)
+ return 0;
+ if (!mcp_res->appcnt) {
+ ret = flow_drv_apply(dev, mcp_res->flow, NULL);
+ if (ret)
+ return ret;
+ }
+ ++mcp_res->appcnt;
+ flow->copy_applied = 1;
return 0;
}
/**
- * Convert the @p action into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
- *
- * @param[in] action
- * Action configuration.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
- * @param[out] error
- * Pointer to error structure.
+ * Stop flow in RX_CP_TBL.
*
- * @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p action has fully been
- * converted, otherwise another call with this returned memory size should
- * be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * @param dev
+ * Pointer to Ethernet device.
+ * @flow
+ * Parent flow for wich copying is provided.
*/
-static int
-mlx5_flow_action_flag(const struct rte_flow_action *action,
- struct rte_flow *flow, const size_t flow_size,
- struct rte_flow_error *error)
-{
- unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
- struct ibv_flow_spec_action_tag tag = {
- .type = IBV_FLOW_SPEC_ACTION_TAG,
- .size = size,
- .tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),
- };
- struct mlx5_flow_verbs *verbs = flow->cur_verbs;
+static void
+flow_mreg_stop_copy_action(struct rte_eth_dev *dev,
+ struct rte_flow *flow)
+{
+ struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy;
- if (flow->modifier & MLX5_FLOW_MOD_FLAG)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- action,
- "flag action already present");
- if (flow->fate & MLX5_FLOW_FATE_DROP)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- action,
- "flag is not compatible with drop"
- " action");
- if (flow->modifier & MLX5_FLOW_MOD_MARK)
- size = 0;
- else if (size <= flow_size && verbs)
- mlx5_flow_spec_verbs_add(flow, &tag, size);
- flow->modifier |= MLX5_FLOW_MOD_FLAG;
- return size;
+ if (!mcp_res || !flow->copy_applied)
+ return;
+ assert(mcp_res->appcnt);
+ --mcp_res->appcnt;
+ flow->copy_applied = 0;
+ if (!mcp_res->appcnt)
+ flow_drv_remove(dev, mcp_res->flow);
}
/**
- * Update verbs specification to modify the flag to mark.
+ * Remove the default copy action from RX_CP_TBL.
*
- * @param[in, out] verbs
- * Pointer to the mlx5_flow_verbs structure.
- * @param[in] mark_id
- * Mark identifier to replace the flag.
+ * @param dev
+ * Pointer to Ethernet device.
*/
static void
-mlx5_flow_verbs_mark_update(struct mlx5_flow_verbs *verbs, uint32_t mark_id)
+flow_mreg_del_default_copy_action(struct rte_eth_dev *dev)
{
- struct ibv_spec_header *hdr;
- int i;
+ struct mlx5_flow_mreg_copy_resource *mcp_res;
+ struct mlx5_priv *priv = dev->data->dev_private;
- if (!verbs)
+ /* Check if default flow is registered. */
+ if (!priv->mreg_cp_tbl)
return;
- /* Update Verbs specification. */
- hdr = (struct ibv_spec_header *)verbs->specs;
- if (!hdr)
+ mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, 0ULL);
+ if (!mcp_res)
return;
- for (i = 0; i != verbs->attr->num_of_specs; ++i) {
- if (hdr->type == IBV_FLOW_SPEC_ACTION_TAG) {
- struct ibv_flow_spec_action_tag *t =
- (struct ibv_flow_spec_action_tag *)hdr;
-
- t->tag_id = mlx5_flow_mark_set(mark_id);
- }
- hdr = (struct ibv_spec_header *)((uintptr_t)hdr + hdr->size);
- }
+ assert(mcp_res->flow);
+ flow_list_destroy(dev, NULL, mcp_res->flow);
+ mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
+ rte_free(mcp_res);
}
/**
- * Convert the @p action into @p flow (or by updating the already present
- * Flag Verbs specification) after ensuring the NIC will understand and
- * process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Add the default copy action in in RX_CP_TBL.
*
- * @param[in] action
- * Action configuration.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
+ * @param dev
+ * Pointer to Ethernet device.
* @param[out] error
- * Pointer to error structure.
+ * Perform verbose error reporting if not NULL.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p action has fully been
- * converted, otherwise another call with this returned memory size should
- * be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * 0 for success, negative value otherwise and rte_errno is set.
*/
static int
-mlx5_flow_action_mark(const struct rte_flow_action *action,
- struct rte_flow *flow, const size_t flow_size,
- struct rte_flow_error *error)
+flow_mreg_add_default_copy_action(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
{
- const struct rte_flow_action_mark *mark = action->conf;
- unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
- struct ibv_flow_spec_action_tag tag = {
- .type = IBV_FLOW_SPEC_ACTION_TAG,
- .size = size,
- };
- struct mlx5_flow_verbs *verbs = flow->cur_verbs;
-
- if (!mark)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- action,
- "configuration cannot be null");
- if (mark->id >= MLX5_FLOW_MARK_MAX)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- &mark->id,
- "mark id must in 0 <= id < "
- RTE_STR(MLX5_FLOW_MARK_MAX));
- if (flow->modifier & MLX5_FLOW_MOD_MARK)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- action,
- "mark action already present");
- if (flow->fate & MLX5_FLOW_FATE_DROP)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- action,
- "mark is not compatible with drop"
- " action");
- if (flow->modifier & MLX5_FLOW_MOD_FLAG) {
- mlx5_flow_verbs_mark_update(verbs, mark->id);
- size = 0;
- } else if (size <= flow_size) {
- tag.tag_id = mlx5_flow_mark_set(mark->id);
- mlx5_flow_spec_verbs_add(flow, &tag, size);
- }
- flow->modifier |= MLX5_FLOW_MOD_MARK;
- return size;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_mreg_copy_resource *mcp_res;
+
+ /* Check whether extensive metadata feature is engaged. */
+ if (!priv->config.dv_flow_en ||
+ priv->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
+ !mlx5_flow_ext_mreg_supported(dev) ||
+ !priv->sh->dv_regc0_mask)
+ return 0;
+ mcp_res = flow_mreg_add_copy_action(dev, 0, error);
+ if (!mcp_res)
+ return -rte_errno;
+ return 0;
}
/**
- * Convert the @p action into @p flow after ensuring the NIC will understand
- * and process it correctly.
- * The conversion is performed action per action, each of them is written into
- * the @p flow if its size is lesser or equal to @p flow_size.
- * Validation and memory consumption computation are still performed until the
- * end of @p action, unless an error is encountered.
+ * Add a flow of copying flow metadata registers in RX_CP_TBL.
*
- * @param[in] dev
- * Pointer to Ethernet device structure.
+ * All the flow having Q/RSS action should be split by
+ * flow_mreg_split_qrss_prep() to pass by RX_CP_TBL. A flow in the RX_CP_TBL
+ * performs the following,
+ * - CQE->flow_tag := reg_c[1] (MARK)
+ * - CQE->flow_table_metadata (reg_b) := reg_c[0] (META)
+ * As CQE's flow_tag is not a register, it can't be simply copied from reg_c[1]
+ * but there should be a flow per each MARK ID set by MARK action.
+ *
+ * For the aforementioned reason, if there's a MARK action in flow's action
+ * list, a corresponding flow should be added to the RX_CP_TBL in order to copy
+ * the MARK ID to CQE's flow_tag like,
+ * - If reg_c[1] is mark_id,
+ * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL.
+ *
+ * For SET_META action which stores value in reg_c[0], as the destination is
+ * also a flow metadata register (reg_b), adding a default flow is enough. Zero
+ * MARK ID means the default flow. The default flow looks like,
+ * - For all flow, reg_b := reg_c[0] and jump to RX_ACT_TBL.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param flow
+ * Pointer to flow structure.
* @param[in] actions
- * Pointer to flow actions array.
- * @param[in, out] flow
- * Pointer to the rte_flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small some
- * garbage may be present.
+ * Pointer to the list of actions.
* @param[out] error
- * Pointer to error structure.
+ * Perform verbose error reporting if not NULL.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p actions has fully been
- * converted, otherwise another call with this returned memory size should
- * be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * 0 on success, negative value otherwise and rte_errno is set.
*/
static int
-mlx5_flow_actions(struct rte_eth_dev *dev,
- const struct rte_flow_action actions[],
- struct rte_flow *flow, const size_t flow_size,
- struct rte_flow_error *error)
+flow_mreg_update_copy_table(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
{
- size_t size = 0;
- int remain = flow_size;
- int ret = 0;
-
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_config *config = &priv->config;
+ struct mlx5_flow_mreg_copy_resource *mcp_res;
+ const struct rte_flow_action_mark *mark;
+
+ /* Check whether extensive metadata feature is engaged. */
+ if (!config->dv_flow_en ||
+ config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
+ !mlx5_flow_ext_mreg_supported(dev) ||
+ !priv->sh->dv_regc0_mask)
+ return 0;
+ /* Find MARK action. */
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
switch (actions->type) {
- case RTE_FLOW_ACTION_TYPE_VOID:
- break;
case RTE_FLOW_ACTION_TYPE_FLAG:
- ret = mlx5_flow_action_flag(actions, flow, remain,
- error);
- break;
+ mcp_res = flow_mreg_add_copy_action
+ (dev, MLX5_FLOW_MARK_DEFAULT, error);
+ if (!mcp_res)
+ return -rte_errno;
+ flow->mreg_copy = mcp_res;
+ if (dev->data->dev_started) {
+ mcp_res->appcnt++;
+ flow->copy_applied = 1;
+ }
+ return 0;
case RTE_FLOW_ACTION_TYPE_MARK:
- ret = mlx5_flow_action_mark(actions, flow, remain,
- error);
- break;
- case RTE_FLOW_ACTION_TYPE_DROP:
- ret = mlx5_flow_action_drop(actions, flow, remain,
- error);
- break;
- case RTE_FLOW_ACTION_TYPE_QUEUE:
- ret = mlx5_flow_action_queue(dev, actions, flow, error);
- break;
- case RTE_FLOW_ACTION_TYPE_RSS:
- ret = mlx5_flow_action_rss(dev, actions, flow, error);
- break;
+ mark = (const struct rte_flow_action_mark *)
+ actions->conf;
+ mcp_res =
+ flow_mreg_add_copy_action(dev, mark->id, error);
+ if (!mcp_res)
+ return -rte_errno;
+ flow->mreg_copy = mcp_res;
+ if (dev->data->dev_started) {
+ mcp_res->appcnt++;
+ flow->copy_applied = 1;
+ }
+ return 0;
default:
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions,
- "action not supported");
+ break;
}
- if (ret < 0)
- return ret;
- if (remain > ret)
- remain -= ret;
- else
- remain = 0;
- size += ret;
}
- if (!flow->fate)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "no fate action found");
- return size;
+ return 0;
}
+#define MLX5_MAX_SPLIT_ACTIONS 24
+#define MLX5_MAX_SPLIT_ITEMS 24
+
/**
- * Convert the @p attributes, @p pattern, @p action, into an flow for the NIC
- * after ensuring the NIC will understand and process it correctly.
- * The conversion is only performed item/action per item/action, each of
- * them is written into the @p flow if its size is lesser or equal to @p
- * flow_size.
- * Validation and memory consumption computation are still performed until the
- * end, unless an error is encountered.
+ * Split the hairpin flow.
+ * Since HW can't support encap on Rx we move the encap to Tx.
+ * If the count action is after the encap then we also
+ * move the count action. in this case the count will also measure
+ * the outer bytes.
*
- * @param[in] dev
+ * @param dev
* Pointer to Ethernet device.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small some
- * garbage may be present.
- * @param[in] attributes
- * Flow rule attributes.
- * @param[in] pattern
- * Pattern specification (list terminated by the END pattern item).
* @param[in] actions
* Associated actions (list terminated by the END action).
- * @param[out] error
- * Perform verbose error reporting if not NULL.
+ * @param[out] actions_rx
+ * Rx flow actions.
+ * @param[out] actions_tx
+ * Tx flow actions..
+ * @param[out] pattern_tx
+ * The pattern items for the Tx flow.
+ * @param[out] flow_id
+ * The flow ID connected to this flow.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the flow has fully been converted and
- * can be applied, otherwise another call with this returned memory size
- * should be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * 0 on success.
*/
static int
-mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow,
- const size_t flow_size,
- const struct rte_flow_attr *attributes,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
-{
- struct rte_flow local_flow = { .layers = 0, };
- size_t size = sizeof(*flow);
- union {
- struct rte_flow_expand_rss buf;
- uint8_t buffer[2048];
- } expand_buffer;
- struct rte_flow_expand_rss *buf = &expand_buffer.buf;
- struct mlx5_flow_verbs *original_verbs = NULL;
- size_t original_verbs_size = 0;
- uint32_t original_layers = 0;
- int expanded_pattern_idx = 0;
- int ret;
- uint32_t i;
-
- if (size > flow_size)
- flow = &local_flow;
- ret = mlx5_flow_attributes(dev, attributes, flow, error);
- if (ret < 0)
- return ret;
- ret = mlx5_flow_actions(dev, actions, &local_flow, 0, error);
- if (ret < 0)
- return ret;
- if (local_flow.rss.types) {
- ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
- pattern, local_flow.rss.types,
- mlx5_support_expansion,
- local_flow.rss.level < 2 ?
- MLX5_EXPANSION_ROOT :
- MLX5_EXPANSION_ROOT_OUTER);
- assert(ret > 0 &&
- (unsigned int)ret < sizeof(expand_buffer.buffer));
- } else {
- buf->entries = 1;
- buf->entry[0].pattern = (void *)(uintptr_t)pattern;
- }
- size += RTE_ALIGN_CEIL(local_flow.rss.queue_num * sizeof(uint16_t),
- sizeof(void *));
- if (size <= flow_size)
- flow->queue = (void *)(flow + 1);
- LIST_INIT(&flow->verbs);
- flow->layers = 0;
- flow->modifier = 0;
- flow->fate = 0;
- for (i = 0; i != buf->entries; ++i) {
- size_t off = size;
- size_t off2;
-
- flow->layers = original_layers;
- size += sizeof(struct ibv_flow_attr) +
- sizeof(struct mlx5_flow_verbs);
- off2 = size;
- if (size < flow_size) {
- flow->cur_verbs = (void *)((uintptr_t)flow + off);
- flow->cur_verbs->attr = (void *)(flow->cur_verbs + 1);
- flow->cur_verbs->specs =
- (void *)(flow->cur_verbs->attr + 1);
- }
- /* First iteration convert the pattern into Verbs. */
- if (i == 0) {
- /* Actions don't need to be converted several time. */
- ret = mlx5_flow_actions(dev, actions, flow,
- (size < flow_size) ?
- flow_size - size : 0,
- error);
- if (ret < 0)
- return ret;
- size += ret;
- } else {
- /*
- * Next iteration means the pattern has already been
- * converted and an expansion is necessary to match
- * the user RSS request. For that only the expanded
- * items will be converted, the common part with the
- * user pattern are just copied into the next buffer
- * zone.
- */
- size += original_verbs_size;
- if (size < flow_size) {
- rte_memcpy(flow->cur_verbs->attr,
- original_verbs->attr,
- original_verbs_size +
- sizeof(struct ibv_flow_attr));
- flow->cur_verbs->size = original_verbs_size;
- }
- }
- ret = mlx5_flow_items
- (dev,
- (const struct rte_flow_item *)
- &buf->entry[i].pattern[expanded_pattern_idx],
- flow,
- (size < flow_size) ? flow_size - size : 0, error);
- if (ret < 0)
- return ret;
- size += ret;
- if (size <= flow_size) {
- mlx5_flow_adjust_priority(dev, flow);
- LIST_INSERT_HEAD(&flow->verbs, flow->cur_verbs, next);
- }
- /*
- * Keep a pointer of the first verbs conversion and the layers
- * it has encountered.
- */
- if (i == 0) {
- original_verbs = flow->cur_verbs;
- original_verbs_size = size - off2;
- original_layers = flow->layers;
- /*
- * move the index of the expanded pattern to the
- * first item not addressed yet.
- */
- if (pattern->type == RTE_FLOW_ITEM_TYPE_END) {
- expanded_pattern_idx++;
- } else {
- const struct rte_flow_item *item = pattern;
-
- for (item = pattern;
- item->type != RTE_FLOW_ITEM_TYPE_END;
- ++item)
- expanded_pattern_idx++;
- }
- }
- }
- /* Restore the origin layers in the flow. */
- flow->layers = original_layers;
- return size;
-}
-
-/**
- * Lookup and set the ptype in the data Rx part. A single Ptype can be used,
- * if several tunnel rules are used on this queue, the tunnel ptype will be
- * cleared.
- *
- * @param rxq_ctrl
- * Rx queue to update.
- */
-static void
-mlx5_flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
-{
- unsigned int i;
- uint32_t tunnel_ptype = 0;
-
- /* Look up for the ptype to use. */
- for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) {
- if (!rxq_ctrl->flow_tunnels_n[i])
- continue;
- if (!tunnel_ptype) {
- tunnel_ptype = tunnels_info[i].ptype;
- } else {
- tunnel_ptype = 0;
- break;
- }
- }
- rxq_ctrl->rxq.tunnel = tunnel_ptype;
-}
-
-/**
- * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the flow.
- *
- * @param[in] dev
- * Pointer to Ethernet device.
- * @param[in] flow
- * Pointer to flow structure.
- */
-static void
-mlx5_flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
+flow_hairpin_split(struct rte_eth_dev *dev,
+ const struct rte_flow_action actions[],
+ struct rte_flow_action actions_rx[],
+ struct rte_flow_action actions_tx[],
+ struct rte_flow_item pattern_tx[],
+ uint32_t *flow_id)
{
- struct priv *priv = dev->data->dev_private;
- const int mark = !!(flow->modifier &
- (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK));
- const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
- unsigned int i;
-
- for (i = 0; i != flow->rss.queue_num; ++i) {
- int idx = (*flow->queue)[i];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of((*priv->rxqs)[idx],
- struct mlx5_rxq_ctrl, rxq);
-
- if (mark) {
- rxq_ctrl->rxq.mark = 1;
- rxq_ctrl->flow_mark_n++;
- }
- if (tunnel) {
- unsigned int j;
-
- /* Increase the counter matching the flow. */
- for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
- if ((tunnels_info[j].tunnel & flow->layers) ==
- tunnels_info[j].tunnel) {
- rxq_ctrl->flow_tunnels_n[j]++;
- break;
- }
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_raw_encap *raw_encap;
+ const struct rte_flow_action_raw_decap *raw_decap;
+ struct mlx5_rte_flow_action_set_tag *set_tag;
+ struct rte_flow_action *tag_action;
+ struct mlx5_rte_flow_item_tag *tag_item;
+ struct rte_flow_item *item;
+ char *addr;
+ int encap = 0;
+
+ mlx5_flow_id_get(priv->sh->flow_id_pool, flow_id);
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ rte_memcpy(actions_tx, actions,
+ sizeof(struct rte_flow_action));
+ actions_tx++;
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ if (encap) {
+ rte_memcpy(actions_tx, actions,
+ sizeof(struct rte_flow_action));
+ actions_tx++;
+ } else {
+ rte_memcpy(actions_rx, actions,
+ sizeof(struct rte_flow_action));
+ actions_rx++;
+ }
+ break;
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ raw_encap = actions->conf;
+ if (raw_encap->size >
+ (sizeof(struct rte_flow_item_eth) +
+ sizeof(struct rte_flow_item_ipv4))) {
+ memcpy(actions_tx, actions,
+ sizeof(struct rte_flow_action));
+ actions_tx++;
+ encap = 1;
+ } else {
+ rte_memcpy(actions_rx, actions,
+ sizeof(struct rte_flow_action));
+ actions_rx++;
+ }
+ break;
+ case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+ raw_decap = actions->conf;
+ if (raw_decap->size <
+ (sizeof(struct rte_flow_item_eth) +
+ sizeof(struct rte_flow_item_ipv4))) {
+ memcpy(actions_tx, actions,
+ sizeof(struct rte_flow_action));
+ actions_tx++;
+ } else {
+ rte_memcpy(actions_rx, actions,
+ sizeof(struct rte_flow_action));
+ actions_rx++;
}
- mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl);
+ break;
+ default:
+ rte_memcpy(actions_rx, actions,
+ sizeof(struct rte_flow_action));
+ actions_rx++;
+ break;
}
}
+ /* Add set meta action and end action for the Rx flow. */
+ tag_action = actions_rx;
+ tag_action->type = MLX5_RTE_FLOW_ACTION_TYPE_TAG;
+ actions_rx++;
+ rte_memcpy(actions_rx, actions, sizeof(struct rte_flow_action));
+ actions_rx++;
+ set_tag = (void *)actions_rx;
+ set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL);
+ assert(set_tag->id > REG_NONE);
+ set_tag->data = *flow_id;
+ tag_action->conf = set_tag;
+ /* Create Tx item list. */
+ rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action));
+ addr = (void *)&pattern_tx[2];
+ item = pattern_tx;
+ item->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG;
+ tag_item = (void *)addr;
+ tag_item->data = *flow_id;
+ tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL);
+ assert(set_tag->id > REG_NONE);
+ item->spec = tag_item;
+ addr += sizeof(struct mlx5_rte_flow_item_tag);
+ tag_item = (void *)addr;
+ tag_item->data = UINT32_MAX;
+ tag_item->id = UINT16_MAX;
+ item->mask = tag_item;
+ addr += sizeof(struct mlx5_rte_flow_item_tag);
+ item->last = NULL;
+ item++;
+ item->type = RTE_FLOW_ITEM_TYPE_END;
+ return 0;
}
/**
- * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
- * @p flow if no other flow uses it with the same kind of request.
+ * The last stage of splitting chain, just creates the subflow
+ * without any modification.
*
* @param dev
* Pointer to Ethernet device.
* @param[in] flow
- * Pointer to the flow.
+ * Parent flow structure pointer.
+ * @param[in, out] sub_flow
+ * Pointer to return the created subflow, may be NULL.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] items
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ * @param[in] external
+ * This flow rule is created by request external to PMD.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ * @return
+ * 0 on success, negative value otherwise
*/
-static void
-mlx5_flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
+static int
+flow_create_split_inner(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct mlx5_flow **sub_flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ bool external, struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
- const int mark = !!(flow->modifier &
- (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK));
- const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
- unsigned int i;
-
- assert(dev->data->dev_started);
- for (i = 0; i != flow->rss.queue_num; ++i) {
- int idx = (*flow->queue)[i];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of((*priv->rxqs)[idx],
- struct mlx5_rxq_ctrl, rxq);
-
- if (mark) {
- rxq_ctrl->flow_mark_n--;
- rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
- }
- if (tunnel) {
- unsigned int j;
+ struct mlx5_flow *dev_flow;
- /* Decrease the counter matching the flow. */
- for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
- if ((tunnels_info[j].tunnel & flow->layers) ==
- tunnels_info[j].tunnel) {
- rxq_ctrl->flow_tunnels_n[j]--;
- break;
- }
- }
- mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl);
- }
- }
+ dev_flow = flow_drv_prepare(flow, attr, items, actions, error);
+ if (!dev_flow)
+ return -rte_errno;
+ dev_flow->flow = flow;
+ dev_flow->external = external;
+ /* Subflow object was created, we must include one in the list. */
+ LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
+ if (sub_flow)
+ *sub_flow = dev_flow;
+ return flow_drv_translate(dev, dev_flow, attr, items, actions, error);
}
/**
- * Clear the Mark/Flag and Tunnel ptype information in all Rx queues.
+ * Split action list having QUEUE/RSS for metadata register copy.
+ *
+ * Once Q/RSS action is detected in user's action list, the flow action
+ * should be split in order to copy metadata registers, which will happen in
+ * RX_CP_TBL like,
+ * - CQE->flow_tag := reg_c[1] (MARK)
+ * - CQE->flow_table_metadata (reg_b) := reg_c[0] (META)
+ * The Q/RSS action will be performed on RX_ACT_TBL after passing by RX_CP_TBL.
+ * This is because the last action of each flow must be a terminal action
+ * (QUEUE, RSS or DROP).
+ *
+ * Flow ID must be allocated to identify actions in the RX_ACT_TBL and it is
+ * stored and kept in the mlx5_flow structure per each sub_flow.
+ *
+ * The Q/RSS action is replaced with,
+ * - SET_TAG, setting the allocated flow ID to reg_c[2].
+ * And the following JUMP action is added at the end,
+ * - JUMP, to RX_CP_TBL.
+ *
+ * A flow to perform remained Q/RSS action will be created in RX_ACT_TBL by
+ * flow_create_split_metadata() routine. The flow will look like,
+ * - If flow ID matches (reg_c[2]), perform Q/RSS.
*
* @param dev
* Pointer to Ethernet device.
+ * @param[out] split_actions
+ * Pointer to store split actions to jump to CP_TBL.
+ * @param[in] actions
+ * Pointer to the list of original flow actions.
+ * @param[in] qrss
+ * Pointer to the Q/RSS action.
+ * @param[in] actions_n
+ * Number of original actions.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * non-zero unique flow_id on success, otherwise 0 and
+ * error/rte_error are set.
*/
-static void
-mlx5_flow_rxq_flags_clear(struct rte_eth_dev *dev)
+static uint32_t
+flow_mreg_split_qrss_prep(struct rte_eth_dev *dev,
+ struct rte_flow_action *split_actions,
+ const struct rte_flow_action *actions,
+ const struct rte_flow_action *qrss,
+ int actions_n, struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
- unsigned int i;
- unsigned int idx;
-
- for (idx = 0, i = 0; idx != priv->rxqs_n; ++i) {
- struct mlx5_rxq_ctrl *rxq_ctrl;
- unsigned int j;
+ struct mlx5_rte_flow_action_set_tag *set_tag;
+ struct rte_flow_action_jump *jump;
+ const int qrss_idx = qrss - actions;
+ uint32_t flow_id;
+ int ret = 0;
- if (!(*priv->rxqs)[idx])
- continue;
- rxq_ctrl = container_of((*priv->rxqs)[idx],
- struct mlx5_rxq_ctrl, rxq);
- rxq_ctrl->flow_mark_n = 0;
- rxq_ctrl->rxq.mark = 0;
- for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
- rxq_ctrl->flow_tunnels_n[j] = 0;
- rxq_ctrl->rxq.tunnel = 0;
- ++idx;
- }
+ /*
+ * Given actions will be split
+ * - Replace QUEUE/RSS action with SET_TAG to set flow ID.
+ * - Add jump to mreg CP_TBL.
+ * As a result, there will be one more action.
+ */
+ ++actions_n;
+ /*
+ * Allocate the new subflow ID. This one is unique within
+ * device and not shared with representors. Otherwise,
+ * we would have to resolve multi-thread access synch
+ * issue. Each flow on the shared device is appended
+ * with source vport identifier, so the resulting
+ * flows will be unique in the shared (by master and
+ * representors) domain even if they have coinciding
+ * IDs.
+ */
+ flow_id = flow_qrss_get_id(dev);
+ if (!flow_id)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "can't allocate id "
+ "for split Q/RSS subflow");
+ /* Internal SET_TAG action to set flow ID. */
+ set_tag = (void *)(split_actions + actions_n);
+ *set_tag = (struct mlx5_rte_flow_action_set_tag){
+ .data = flow_id,
+ };
+ ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error);
+ if (ret < 0)
+ return ret;
+ set_tag->id = ret;
+ /* JUMP action to jump to mreg copy table (CP_TBL). */
+ jump = (void *)(set_tag + 1);
+ *jump = (struct rte_flow_action_jump){
+ .group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
+ };
+ /* Construct new actions array. */
+ memcpy(split_actions, actions, sizeof(*split_actions) * actions_n);
+ /* Replace QUEUE/RSS action. */
+ split_actions[qrss_idx] = (struct rte_flow_action){
+ .type = MLX5_RTE_FLOW_ACTION_TYPE_TAG,
+ .conf = set_tag,
+ };
+ split_actions[actions_n - 2] = (struct rte_flow_action){
+ .type = RTE_FLOW_ACTION_TYPE_JUMP,
+ .conf = jump,
+ };
+ split_actions[actions_n - 1] = (struct rte_flow_action){
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ };
+ return flow_id;
}
/**
- * Validate a flow supported by the NIC.
+ * Extend the given action list for Tx metadata copy.
*
- * @see rte_flow_validate()
- * @see rte_flow_ops
+ * Copy the given action list to the ext_actions and add flow metadata register
+ * copy action in order to copy reg_a set by WQE to reg_c[0].
+ *
+ * @param[out] ext_actions
+ * Pointer to the extended action list.
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[in] actions_n
+ * Number of actions in the list.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, negative value otherwise
*/
-int
-mlx5_flow_validate(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
+static int
+flow_mreg_tx_copy_prep(struct rte_eth_dev *dev,
+ struct rte_flow_action *ext_actions,
+ const struct rte_flow_action *actions,
+ int actions_n, struct rte_flow_error *error)
{
- int ret = mlx5_flow_merge(dev, NULL, 0, attr, items, actions, error);
+ struct mlx5_flow_action_copy_mreg *cp_mreg =
+ (struct mlx5_flow_action_copy_mreg *)
+ (ext_actions + actions_n + 1);
+ int ret;
+ ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error);
if (ret < 0)
return ret;
+ cp_mreg->dst = ret;
+ ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_TX, 0, error);
+ if (ret < 0)
+ return ret;
+ cp_mreg->src = ret;
+ memcpy(ext_actions, actions,
+ sizeof(*ext_actions) * actions_n);
+ ext_actions[actions_n - 1] = (struct rte_flow_action){
+ .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
+ .conf = cp_mreg,
+ };
+ ext_actions[actions_n] = (struct rte_flow_action){
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ };
return 0;
}
/**
- * Remove the flow.
+ * The splitting for metadata feature.
*
- * @param[in] dev
+ * - Q/RSS action on NIC Rx should be split in order to pass by
+ * the mreg copy table (RX_CP_TBL) and then it jumps to the
+ * action table (RX_ACT_TBL) which has the split Q/RSS action.
+ *
+ * - All the actions on NIC Tx should have a mreg copy action to
+ * copy reg_a from WQE to reg_c[0].
+ *
+ * @param dev
* Pointer to Ethernet device.
- * @param[in, out] flow
- * Pointer to flow structure.
+ * @param[in] flow
+ * Parent flow structure pointer.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] items
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ * @param[in] external
+ * This flow rule is created by request external to PMD.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ * @return
+ * 0 on success, negative value otherwise
*/
-static void
-mlx5_flow_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
+static int
+flow_create_split_metadata(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ bool external, struct rte_flow_error *error)
{
- struct mlx5_flow_verbs *verbs;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_config *config = &priv->config;
+ const struct rte_flow_action *qrss = NULL;
+ struct rte_flow_action *ext_actions = NULL;
+ struct mlx5_flow *dev_flow = NULL;
+ uint32_t qrss_id = 0;
+ size_t act_size;
+ int actions_n;
+ int ret;
- LIST_FOREACH(verbs, &flow->verbs, next) {
- if (verbs->flow) {
- claim_zero(mlx5_glue->destroy_flow(verbs->flow));
- verbs->flow = NULL;
+ /* Check whether extensive metadata feature is engaged. */
+ if (!config->dv_flow_en ||
+ config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
+ !mlx5_flow_ext_mreg_supported(dev))
+ return flow_create_split_inner(dev, flow, NULL, attr, items,
+ actions, external, error);
+ actions_n = flow_parse_qrss_action(actions, &qrss);
+ if (qrss) {
+ /* Exclude hairpin flows from splitting. */
+ if (qrss->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ const struct rte_flow_action_queue *queue;
+
+ queue = qrss->conf;
+ if (mlx5_rxq_get_type(dev, queue->index) ==
+ MLX5_RXQ_TYPE_HAIRPIN)
+ qrss = NULL;
+ } else if (qrss->type == RTE_FLOW_ACTION_TYPE_RSS) {
+ const struct rte_flow_action_rss *rss;
+
+ rss = qrss->conf;
+ if (mlx5_rxq_get_type(dev, rss->queue[0]) ==
+ MLX5_RXQ_TYPE_HAIRPIN)
+ qrss = NULL;
}
- if (verbs->hrxq) {
- if (flow->fate & MLX5_FLOW_FATE_DROP)
- mlx5_hrxq_drop_release(dev);
- else
- mlx5_hrxq_release(dev, verbs->hrxq);
- verbs->hrxq = NULL;
+ }
+ if (qrss) {
+ /*
+ * Q/RSS action on NIC Rx should be split in order to pass by
+ * the mreg copy table (RX_CP_TBL) and then it jumps to the
+ * action table (RX_ACT_TBL) which has the split Q/RSS action.
+ */
+ act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
+ sizeof(struct rte_flow_action_set_tag) +
+ sizeof(struct rte_flow_action_jump);
+ ext_actions = rte_zmalloc(__func__, act_size, 0);
+ if (!ext_actions)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "no memory to split "
+ "metadata flow");
+ /*
+ * Create the new actions list with removed Q/RSS action
+ * and appended set tag and jump to register copy table
+ * (RX_CP_TBL). We should preallocate unique tag ID here
+ * in advance, because it is needed for set tag action.
+ */
+ qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions,
+ qrss, actions_n, error);
+ if (!qrss_id) {
+ ret = -rte_errno;
+ goto exit;
}
+ } else if (attr->egress && !attr->transfer) {
+ /*
+ * All the actions on NIC Tx should have a metadata register
+ * copy action to copy reg_a from WQE to reg_c[meta]
+ */
+ act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
+ sizeof(struct mlx5_flow_action_copy_mreg);
+ ext_actions = rte_zmalloc(__func__, act_size, 0);
+ if (!ext_actions)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "no memory to split "
+ "metadata flow");
+ /* Create the action list appended with copy register. */
+ ret = flow_mreg_tx_copy_prep(dev, ext_actions, actions,
+ actions_n, error);
+ if (ret < 0)
+ goto exit;
+ }
+ /* Add the unmodified original or prefix subflow. */
+ ret = flow_create_split_inner(dev, flow, &dev_flow, attr, items,
+ ext_actions ? ext_actions : actions,
+ external, error);
+ if (ret < 0)
+ goto exit;
+ assert(dev_flow);
+ if (qrss_id) {
+ const struct rte_flow_attr q_attr = {
+ .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
+ .ingress = 1,
+ };
+ /* Internal PMD action to set register. */
+ struct mlx5_rte_flow_item_tag q_tag_spec = {
+ .data = qrss_id,
+ .id = 0,
+ };
+ struct rte_flow_item q_items[] = {
+ {
+ .type = MLX5_RTE_FLOW_ITEM_TYPE_TAG,
+ .spec = &q_tag_spec,
+ .last = NULL,
+ .mask = NULL,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ };
+ struct rte_flow_action q_actions[] = {
+ {
+ .type = qrss->type,
+ .conf = qrss->conf,
+ },
+ {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ },
+ };
+ uint64_t hash_fields = dev_flow->hash_fields;
+ /*
+ * Put unique id in prefix flow due to it is destroyed after
+ * prefix flow and id will be freed after there is no actual
+ * flows with this id and identifier reallocation becomes
+ * possible (for example, for other flows in other threads).
+ */
+ dev_flow->qrss_id = qrss_id;
+ qrss_id = 0;
+ dev_flow = NULL;
+ ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error);
+ if (ret < 0)
+ goto exit;
+ q_tag_spec.id = ret;
+ /* Add suffix subflow to execute Q/RSS. */
+ ret = flow_create_split_inner(dev, flow, &dev_flow,
+ &q_attr, q_items, q_actions,
+ external, error);
+ if (ret < 0)
+ goto exit;
+ assert(dev_flow);
+ dev_flow->hash_fields = hash_fields;
}
+
+exit:
+ /*
+ * We do not destroy the partially created sub_flows in case of error.
+ * These ones are included into parent flow list and will be destroyed
+ * by flow_drv_destroy.
+ */
+ flow_qrss_free_id(dev, qrss_id);
+ rte_free(ext_actions);
+ return ret;
}
/**
- * Apply the flow.
+ * Split the flow to subflow set. The splitters might be linked
+ * in the chain, like this:
+ * flow_create_split_outer() calls:
+ * flow_create_split_meter() calls:
+ * flow_create_split_metadata(meter_subflow_0) calls:
+ * flow_create_split_inner(metadata_subflow_0)
+ * flow_create_split_inner(metadata_subflow_1)
+ * flow_create_split_inner(metadata_subflow_2)
+ * flow_create_split_metadata(meter_subflow_1) calls:
+ * flow_create_split_inner(metadata_subflow_0)
+ * flow_create_split_inner(metadata_subflow_1)
+ * flow_create_split_inner(metadata_subflow_2)
*
- * @param[in] dev
- * Pointer to Ethernet device structure.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[out] error
- * Pointer to error structure.
+ * This provide flexible way to add new levels of flow splitting.
+ * The all of successfully created subflows are included to the
+ * parent flow dev_flow list.
*
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[in] flow
+ * Parent flow structure pointer.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] items
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ * @param[in] external
+ * This flow rule is created by request external to PMD.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
* @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * 0 on success, negative value otherwise
*/
static int
-mlx5_flow_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
- struct rte_flow_error *error)
+flow_create_split_outer(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ bool external, struct rte_flow_error *error)
{
- struct mlx5_flow_verbs *verbs;
- int err;
-
- LIST_FOREACH(verbs, &flow->verbs, next) {
- if (flow->fate & MLX5_FLOW_FATE_DROP) {
- verbs->hrxq = mlx5_hrxq_drop_new(dev);
- if (!verbs->hrxq) {
- rte_flow_error_set
- (error, errno,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "cannot get drop hash queue");
- goto error;
- }
- } else {
- struct mlx5_hrxq *hrxq;
-
- hrxq = mlx5_hrxq_get(dev, flow->key,
- MLX5_RSS_HASH_KEY_LEN,
- verbs->hash_fields,
- (*flow->queue),
- flow->rss.queue_num);
- if (!hrxq)
- hrxq = mlx5_hrxq_new(dev, flow->key,
- MLX5_RSS_HASH_KEY_LEN,
- verbs->hash_fields,
- (*flow->queue),
- flow->rss.queue_num);
- if (!hrxq) {
- rte_flow_error_set
- (error, rte_errno,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "cannot get hash queue");
- goto error;
- }
- verbs->hrxq = hrxq;
- }
- verbs->flow =
- mlx5_glue->create_flow(verbs->hrxq->qp, verbs->attr);
- if (!verbs->flow) {
- rte_flow_error_set(error, errno,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "hardware refuses to create flow");
- goto error;
- }
- }
- return 0;
-error:
- err = rte_errno; /* Save rte_errno before cleanup. */
- LIST_FOREACH(verbs, &flow->verbs, next) {
- if (verbs->hrxq) {
- if (flow->fate & MLX5_FLOW_FATE_DROP)
- mlx5_hrxq_drop_release(dev);
- else
- mlx5_hrxq_release(dev, verbs->hrxq);
- verbs->hrxq = NULL;
- }
- }
- rte_errno = err; /* Restore rte_errno. */
- return -rte_errno;
+ int ret;
+
+ ret = flow_create_split_metadata(dev, flow, attr, items,
+ actions, external, error);
+ assert(ret <= 0);
+ return ret;
}
/**
* @param dev
* Pointer to Ethernet device.
* @param list
- * Pointer to a TAILQ flow list.
+ * Pointer to a TAILQ flow list. If this parameter NULL,
+ * no list insertion occurred, flow is just created,
+ * this is caller's responsibility to track the
+ * created flow.
* @param[in] attr
* Flow rule attributes.
* @param[in] items
* Pattern specification (list terminated by the END pattern item).
* @param[in] actions
* Associated actions (list terminated by the END action).
+ * @param[in] external
+ * This flow rule is created by request external to PMD.
* @param[out] error
* Perform verbose error reporting if not NULL.
*
* @return
- * A flow on success, NULL otherwise and rte_errno is set.
+ * A flow on success, NULL otherwise and rte_errno is set.
+ */
+static struct rte_flow *
+flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ bool external, struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct rte_flow *flow = NULL;
+ struct mlx5_flow *dev_flow;
+ const struct rte_flow_action_rss *rss;
+ union {
+ struct rte_flow_expand_rss buf;
+ uint8_t buffer[2048];
+ } expand_buffer;
+ union {
+ struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
+ uint8_t buffer[2048];
+ } actions_rx;
+ union {
+ struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
+ uint8_t buffer[2048];
+ } actions_hairpin_tx;
+ union {
+ struct rte_flow_item items[MLX5_MAX_SPLIT_ITEMS];
+ uint8_t buffer[2048];
+ } items_tx;
+ struct rte_flow_expand_rss *buf = &expand_buffer.buf;
+ const struct rte_flow_action *p_actions_rx = actions;
+ int ret;
+ uint32_t i;
+ uint32_t flow_size;
+ int hairpin_flow = 0;
+ uint32_t hairpin_id = 0;
+ struct rte_flow_attr attr_tx = { .priority = 0 };
+
+ hairpin_flow = flow_check_hairpin_split(dev, attr, actions);
+ if (hairpin_flow > 0) {
+ if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+ flow_hairpin_split(dev, actions, actions_rx.actions,
+ actions_hairpin_tx.actions, items_tx.items,
+ &hairpin_id);
+ p_actions_rx = actions_rx.actions;
+ }
+ ret = flow_drv_validate(dev, attr, items, p_actions_rx, external,
+ error);
+ if (ret < 0)
+ goto error_before_flow;
+ flow_size = sizeof(struct rte_flow);
+ rss = flow_get_rss_action(p_actions_rx);
+ if (rss)
+ flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t),
+ sizeof(void *));
+ else
+ flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *));
+ flow = rte_calloc(__func__, 1, flow_size, 0);
+ if (!flow) {
+ rte_errno = ENOMEM;
+ goto error_before_flow;
+ }
+ flow->drv_type = flow_get_drv_type(dev, attr);
+ if (hairpin_id != 0)
+ flow->hairpin_flow_id = hairpin_id;
+ assert(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
+ flow->drv_type < MLX5_FLOW_TYPE_MAX);
+ flow->rss.queue = (void *)(flow + 1);
+ if (rss) {
+ /*
+ * The following information is required by
+ * mlx5_flow_hashfields_adjust() in advance.
+ */
+ flow->rss.level = rss->level;
+ /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
+ flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
+ }
+ LIST_INIT(&flow->dev_flows);
+ if (rss && rss->types) {
+ unsigned int graph_root;
+
+ graph_root = find_graph_root(items, rss->level);
+ ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
+ items, rss->types,
+ mlx5_support_expansion,
+ graph_root);
+ assert(ret > 0 &&
+ (unsigned int)ret < sizeof(expand_buffer.buffer));
+ } else {
+ buf->entries = 1;
+ buf->entry[0].pattern = (void *)(uintptr_t)items;
+ }
+ for (i = 0; i < buf->entries; ++i) {
+ /*
+ * The splitter may create multiple dev_flows,
+ * depending on configuration. In the simplest
+ * case it just creates unmodified original flow.
+ */
+ ret = flow_create_split_outer(dev, flow, attr,
+ buf->entry[i].pattern,
+ p_actions_rx, external,
+ error);
+ if (ret < 0)
+ goto error;
+ }
+ /* Create the tx flow. */
+ if (hairpin_flow) {
+ attr_tx.group = MLX5_HAIRPIN_TX_TABLE;
+ attr_tx.ingress = 0;
+ attr_tx.egress = 1;
+ dev_flow = flow_drv_prepare(flow, &attr_tx, items_tx.items,
+ actions_hairpin_tx.actions, error);
+ if (!dev_flow)
+ goto error;
+ dev_flow->flow = flow;
+ dev_flow->external = 0;
+ LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
+ ret = flow_drv_translate(dev, dev_flow, &attr_tx,
+ items_tx.items,
+ actions_hairpin_tx.actions, error);
+ if (ret < 0)
+ goto error;
+ }
+ /*
+ * Update the metadata register copy table. If extensive
+ * metadata feature is enabled and registers are supported
+ * we might create the extra rte_flow for each unique
+ * MARK/FLAG action ID.
+ *
+ * The table is updated for ingress Flows only, because
+ * the egress Flows belong to the different device and
+ * copy table should be updated in peer NIC Rx domain.
+ */
+ if (attr->ingress &&
+ (external || attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) {
+ ret = flow_mreg_update_copy_table(dev, flow, actions, error);
+ if (ret)
+ goto error;
+ }
+ if (dev->data->dev_started) {
+ ret = flow_drv_apply(dev, flow, error);
+ if (ret < 0)
+ goto error;
+ }
+ if (list)
+ TAILQ_INSERT_TAIL(list, flow, next);
+ flow_rxq_flags_set(dev, flow);
+ return flow;
+error_before_flow:
+ if (hairpin_id)
+ mlx5_flow_id_release(priv->sh->flow_id_pool,
+ hairpin_id);
+ return NULL;
+error:
+ assert(flow);
+ flow_mreg_del_copy_action(dev, flow);
+ ret = rte_errno; /* Save rte_errno before cleanup. */
+ if (flow->hairpin_flow_id)
+ mlx5_flow_id_release(priv->sh->flow_id_pool,
+ flow->hairpin_flow_id);
+ assert(flow);
+ flow_drv_destroy(dev, flow);
+ rte_free(flow);
+ rte_errno = ret; /* Restore rte_errno. */
+ return NULL;
+}
+
+/**
+ * Create a dedicated flow rule on e-switch table 0 (root table), to direct all
+ * incoming packets to table 1.
+ *
+ * Other flow rules, requested for group n, will be created in
+ * e-switch table n+1.
+ * Jump action to e-switch group n will be created to group n+1.
+ *
+ * Used when working in switchdev mode, to utilise advantages of table 1
+ * and above.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * Pointer to flow on success, NULL otherwise and rte_errno is set.
*/
-static struct rte_flow *
-mlx5_flow_list_create(struct rte_eth_dev *dev,
- struct mlx5_flows *list,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
+struct rte_flow *
+mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev)
{
- struct rte_flow *flow = NULL;
- size_t size = 0;
- int ret;
+ const struct rte_flow_attr attr = {
+ .group = 0,
+ .priority = 0,
+ .ingress = 1,
+ .egress = 0,
+ .transfer = 1,
+ };
+ const struct rte_flow_item pattern = {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ };
+ struct rte_flow_action_jump jump = {
+ .group = 1,
+ };
+ const struct rte_flow_action actions[] = {
+ {
+ .type = RTE_FLOW_ACTION_TYPE_JUMP,
+ .conf = &jump,
+ },
+ {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ },
+ };
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct rte_flow_error error;
- ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error);
- if (ret < 0)
- return NULL;
- size = ret;
- flow = rte_calloc(__func__, 1, size, 0);
- if (!flow) {
- rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "not enough memory to create flow");
- return NULL;
- }
- ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error);
- if (ret < 0) {
- rte_free(flow);
- return NULL;
- }
- assert((size_t)ret == size);
- if (dev->data->dev_started) {
- ret = mlx5_flow_apply(dev, flow, error);
- if (ret < 0) {
- ret = rte_errno; /* Save rte_errno before cleanup. */
- if (flow) {
- mlx5_flow_remove(dev, flow);
- rte_free(flow);
- }
- rte_errno = ret; /* Restore rte_errno. */
- return NULL;
- }
- }
- TAILQ_INSERT_TAIL(list, flow, next);
- mlx5_flow_rxq_flags_set(dev, flow);
- return flow;
+ return flow_list_create(dev, &priv->ctrl_flows, &attr, &pattern,
+ actions, false, &error);
}
/**
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
- return mlx5_flow_list_create
- (dev, &((struct priv *)dev->data->dev_private)->flows,
- attr, items, actions, error);
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ return flow_list_create(dev, &priv->flows,
+ attr, items, actions, true, error);
}
/**
* @param dev
* Pointer to Ethernet device.
* @param list
- * Pointer to a TAILQ flow list.
+ * Pointer to a TAILQ flow list. If this parameter NULL,
+ * there is no flow removal from the list.
* @param[in] flow
* Flow to destroy.
*/
static void
-mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
- struct rte_flow *flow)
+flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
+ struct rte_flow *flow)
{
- mlx5_flow_remove(dev, flow);
- TAILQ_REMOVE(list, flow, next);
+ struct mlx5_priv *priv = dev->data->dev_private;
+
/*
* Update RX queue flags only if port is started, otherwise it is
* already clean.
*/
if (dev->data->dev_started)
- mlx5_flow_rxq_flags_trim(dev, flow);
+ flow_rxq_flags_trim(dev, flow);
+ if (flow->hairpin_flow_id)
+ mlx5_flow_id_release(priv->sh->flow_id_pool,
+ flow->hairpin_flow_id);
+ flow_drv_destroy(dev, flow);
+ if (list)
+ TAILQ_REMOVE(list, flow, next);
+ flow_mreg_del_copy_action(dev, flow);
+ rte_free(flow->fdir);
rte_free(flow);
}
struct rte_flow *flow;
flow = TAILQ_FIRST(list);
- mlx5_flow_list_destroy(dev, list, flow);
+ flow_list_destroy(dev, list, flow);
}
}
{
struct rte_flow *flow;
- TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next)
- mlx5_flow_remove(dev, flow);
- mlx5_flow_rxq_flags_clear(dev);
+ TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) {
+ flow_drv_remove(dev, flow);
+ flow_mreg_stop_copy_action(dev, flow);
+ }
+ flow_mreg_del_default_copy_action(dev);
+ flow_rxq_flags_clear(dev);
}
/**
struct rte_flow_error error;
int ret = 0;
+ /* Make sure default copy action (reg_c[0] -> reg_b) is created. */
+ ret = flow_mreg_add_default_copy_action(dev, &error);
+ if (ret < 0)
+ return -rte_errno;
+ /* Apply Flows created by application. */
TAILQ_FOREACH(flow, list, next) {
- ret = mlx5_flow_apply(dev, flow, &error);
+ ret = flow_mreg_start_copy_action(dev, flow);
+ if (ret < 0)
+ goto error;
+ ret = flow_drv_apply(dev, flow, &error);
if (ret < 0)
goto error;
- mlx5_flow_rxq_flags_set(dev, flow);
+ flow_rxq_flags_set(dev, flow);
}
return 0;
error:
int
mlx5_flow_verify(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow *flow;
int ret = 0;
return ret;
}
+/**
+ * Enable default hairpin egress flow.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param queue
+ * The queue index.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev,
+ uint32_t queue)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_attr attr = {
+ .egress = 1,
+ .priority = 0,
+ };
+ struct mlx5_rte_flow_item_tx_queue queue_spec = {
+ .queue = queue,
+ };
+ struct mlx5_rte_flow_item_tx_queue queue_mask = {
+ .queue = UINT32_MAX,
+ };
+ struct rte_flow_item items[] = {
+ {
+ .type = MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,
+ .spec = &queue_spec,
+ .last = NULL,
+ .mask = &queue_mask,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ };
+ struct rte_flow_action_jump jump = {
+ .group = MLX5_HAIRPIN_TX_TABLE,
+ };
+ struct rte_flow_action actions[2];
+ struct rte_flow *flow;
+ struct rte_flow_error error;
+
+ actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP;
+ actions[0].conf = &jump;
+ actions[1].type = RTE_FLOW_ACTION_TYPE_END;
+ flow = flow_list_create(dev, &priv->ctrl_flows,
+ &attr, items, actions, false, &error);
+ if (!flow) {
+ DRV_LOG(DEBUG,
+ "Failed to create ctrl flow: rte_errno(%d),"
+ " type(%d), message(%s)",
+ rte_errno, error.type,
+ error.message ? error.message : " (no stated reason)");
+ return -rte_errno;
+ }
+ return 0;
+}
+
/**
* Enable a control flow configured from the control plane.
*
struct rte_flow_item_vlan *vlan_spec,
struct rte_flow_item_vlan *vlan_mask)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_attr attr = {
.ingress = 1,
.priority = MLX5_FLOW_PRIO_RSVD,
},
{
.type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
- RTE_FLOW_ITEM_TYPE_END,
+ RTE_FLOW_ITEM_TYPE_END,
.spec = vlan_spec,
.last = NULL,
.mask = vlan_mask,
struct rte_flow_error error;
unsigned int i;
- if (!priv->reta_idx_n) {
- rte_errno = EINVAL;
- return -rte_errno;
+ if (!priv->reta_idx_n || !priv->rxqs_n) {
+ return 0;
}
for (i = 0; i != priv->reta_idx_n; ++i)
queue[i] = (*priv->reta_idx)[i];
- flow = mlx5_flow_list_create(dev, &priv->ctrl_flows, &attr, items,
- actions, &error);
+ flow = flow_list_create(dev, &priv->ctrl_flows,
+ &attr, items, actions, false, &error);
if (!flow)
return -rte_errno;
return 0;
struct rte_flow *flow,
struct rte_flow_error *error __rte_unused)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
- mlx5_flow_list_destroy(dev, &priv->flows, flow);
+ flow_list_destroy(dev, &priv->flows, flow);
return 0;
}
mlx5_flow_flush(struct rte_eth_dev *dev,
struct rte_flow_error *error __rte_unused)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
mlx5_flow_list_flush(dev, &priv->flows);
return 0;
int enable,
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
if (dev->data->dev_started) {
rte_flow_error_set(error, EBUSY,
return 0;
}
+/**
+ * Query a flow.
+ *
+ * @see rte_flow_query()
+ * @see rte_flow_ops
+ */
+static int
+flow_drv_query(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ const struct rte_flow_action *actions,
+ void *data,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+ enum mlx5_flow_drv_type ftype = flow->drv_type;
+
+ assert(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX);
+ fops = flow_get_drv_ops(ftype);
+
+ return fops->query(dev, flow, actions, data, error);
+}
+
+/**
+ * Query a flow.
+ *
+ * @see rte_flow_query()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_query(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ const struct rte_flow_action *actions,
+ void *data,
+ struct rte_flow_error *error)
+{
+ int ret;
+
+ ret = flow_drv_query(dev, flow, actions, data, error);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
/**
* Convert a flow director filter to a generic flow.
*
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
+flow_fdir_filter_convert(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *fdir_filter,
struct mlx5_fdir *attributes)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_eth_fdir_input *input = &fdir_filter->input;
const struct rte_eth_fdir_masks *mask =
&dev->data->dev_conf.fdir_conf.mask;
case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
- attributes->l3.ipv4.hdr = (struct ipv4_hdr){
+ attributes->l3.ipv4.hdr = (struct rte_ipv4_hdr){
.src_addr = input->flow.ip4_flow.src_ip,
.dst_addr = input->flow.ip4_flow.dst_ip,
.time_to_live = input->flow.ip4_flow.ttl,
.type_of_service = input->flow.ip4_flow.tos,
- .next_proto_id = input->flow.ip4_flow.proto,
};
- attributes->l3_mask.ipv4.hdr = (struct ipv4_hdr){
+ attributes->l3_mask.ipv4.hdr = (struct rte_ipv4_hdr){
.src_addr = mask->ipv4_mask.src_ip,
.dst_addr = mask->ipv4_mask.dst_ip,
.time_to_live = mask->ipv4_mask.ttl,
case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
- attributes->l3.ipv6.hdr = (struct ipv6_hdr){
+ attributes->l3.ipv6.hdr = (struct rte_ipv6_hdr){
.hop_limits = input->flow.ipv6_flow.hop_limits,
.proto = input->flow.ipv6_flow.proto,
};
/* Handle L4. */
switch (fdir_filter->input.flow_type) {
case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
- attributes->l4.udp.hdr = (struct udp_hdr){
+ attributes->l4.udp.hdr = (struct rte_udp_hdr){
.src_port = input->flow.udp4_flow.src_port,
.dst_port = input->flow.udp4_flow.dst_port,
};
- attributes->l4_mask.udp.hdr = (struct udp_hdr){
+ attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){
.src_port = mask->src_port_mask,
.dst_port = mask->dst_port_mask,
};
};
break;
case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
- attributes->l4.tcp.hdr = (struct tcp_hdr){
+ attributes->l4.tcp.hdr = (struct rte_tcp_hdr){
.src_port = input->flow.tcp4_flow.src_port,
.dst_port = input->flow.tcp4_flow.dst_port,
};
- attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
+ attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){
.src_port = mask->src_port_mask,
.dst_port = mask->dst_port_mask,
};
};
break;
case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
- attributes->l4.udp.hdr = (struct udp_hdr){
+ attributes->l4.udp.hdr = (struct rte_udp_hdr){
.src_port = input->flow.udp6_flow.src_port,
.dst_port = input->flow.udp6_flow.dst_port,
};
- attributes->l4_mask.udp.hdr = (struct udp_hdr){
+ attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){
.src_port = mask->src_port_mask,
.dst_port = mask->dst_port_mask,
};
};
break;
case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
- attributes->l4.tcp.hdr = (struct tcp_hdr){
+ attributes->l4.tcp.hdr = (struct rte_tcp_hdr){
.src_port = input->flow.tcp6_flow.src_port,
.dst_port = input->flow.tcp6_flow.dst_port,
};
- attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
+ attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){
.src_port = mask->src_port_mask,
.dst_port = mask->dst_port_mask,
};
return 0;
}
+#define FLOW_FDIR_CMP(f1, f2, fld) \
+ memcmp(&(f1)->fld, &(f2)->fld, sizeof(f1->fld))
+
+/**
+ * Compare two FDIR flows. If items and actions are identical, the two flows are
+ * regarded as same.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param f1
+ * FDIR flow to compare.
+ * @param f2
+ * FDIR flow to compare.
+ *
+ * @return
+ * Zero on match, 1 otherwise.
+ */
+static int
+flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2)
+{
+ if (FLOW_FDIR_CMP(f1, f2, attr) ||
+ FLOW_FDIR_CMP(f1, f2, l2) ||
+ FLOW_FDIR_CMP(f1, f2, l2_mask) ||
+ FLOW_FDIR_CMP(f1, f2, l3) ||
+ FLOW_FDIR_CMP(f1, f2, l3_mask) ||
+ FLOW_FDIR_CMP(f1, f2, l4) ||
+ FLOW_FDIR_CMP(f1, f2, l4_mask) ||
+ FLOW_FDIR_CMP(f1, f2, actions[0].type))
+ return 1;
+ if (f1->actions[0].type == RTE_FLOW_ACTION_TYPE_QUEUE &&
+ FLOW_FDIR_CMP(f1, f2, queue))
+ return 1;
+ return 0;
+}
+
+/**
+ * Search device flow list to find out a matched FDIR flow.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param fdir_flow
+ * FDIR flow to lookup.
+ *
+ * @return
+ * Pointer of flow if found, NULL otherwise.
+ */
+static struct rte_flow *
+flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct rte_flow *flow = NULL;
+
+ assert(fdir_flow);
+ TAILQ_FOREACH(flow, &priv->flows, next) {
+ if (flow->fdir && !flow_fdir_cmp(flow->fdir, fdir_flow)) {
+ DRV_LOG(DEBUG, "port %u found FDIR flow %p",
+ dev->data->port_id, (void *)flow);
+ break;
+ }
+ }
+ return flow;
+}
+
/**
* Add new flow director filter and store it in list.
*
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_fdir_filter_add(struct rte_eth_dev *dev,
+flow_fdir_filter_add(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *fdir_filter)
{
- struct priv *priv = dev->data->dev_private;
- struct mlx5_fdir attributes = {
- .attr.group = 0,
- .l2_mask = {
- .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
- .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
- .type = 0,
- },
- };
- struct rte_flow_error error;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_fdir *fdir_flow;
struct rte_flow *flow;
int ret;
- ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes);
+ fdir_flow = rte_zmalloc(__func__, sizeof(*fdir_flow), 0);
+ if (!fdir_flow) {
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ ret = flow_fdir_filter_convert(dev, fdir_filter, fdir_flow);
if (ret)
- return ret;
- flow = mlx5_flow_list_create(dev, &priv->flows, &attributes.attr,
- attributes.items, attributes.actions,
- &error);
+ goto error;
+ flow = flow_fdir_filter_lookup(dev, fdir_flow);
if (flow) {
- DRV_LOG(DEBUG, "port %u FDIR created %p", dev->data->port_id,
- (void *)flow);
- return 0;
+ rte_errno = EEXIST;
+ goto error;
}
+ flow = flow_list_create(dev, &priv->flows, &fdir_flow->attr,
+ fdir_flow->items, fdir_flow->actions, true,
+ NULL);
+ if (!flow)
+ goto error;
+ assert(!flow->fdir);
+ flow->fdir = fdir_flow;
+ DRV_LOG(DEBUG, "port %u created FDIR flow %p",
+ dev->data->port_id, (void *)flow);
+ return 0;
+error:
+ rte_free(fdir_flow);
return -rte_errno;
}
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_fdir_filter_delete(struct rte_eth_dev *dev __rte_unused,
- const struct rte_eth_fdir_filter *fdir_filter
- __rte_unused)
+flow_fdir_filter_delete(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *fdir_filter)
{
- rte_errno = ENOTSUP;
- return -rte_errno;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct rte_flow *flow;
+ struct mlx5_fdir fdir_flow = {
+ .attr.group = 0,
+ };
+ int ret;
+
+ ret = flow_fdir_filter_convert(dev, fdir_filter, &fdir_flow);
+ if (ret)
+ return -rte_errno;
+ flow = flow_fdir_filter_lookup(dev, &fdir_flow);
+ if (!flow) {
+ rte_errno = ENOENT;
+ return -rte_errno;
+ }
+ flow_list_destroy(dev, &priv->flows, flow);
+ DRV_LOG(DEBUG, "port %u deleted FDIR flow %p",
+ dev->data->port_id, (void *)flow);
+ return 0;
}
/**
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_fdir_filter_update(struct rte_eth_dev *dev,
+flow_fdir_filter_update(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *fdir_filter)
{
int ret;
- ret = mlx5_fdir_filter_delete(dev, fdir_filter);
+ ret = flow_fdir_filter_delete(dev, fdir_filter);
if (ret)
return ret;
- return mlx5_fdir_filter_add(dev, fdir_filter);
+ return flow_fdir_filter_add(dev, fdir_filter);
}
/**
* Pointer to Ethernet device.
*/
static void
-mlx5_fdir_filter_flush(struct rte_eth_dev *dev)
+flow_fdir_filter_flush(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
mlx5_flow_list_flush(dev, &priv->flows);
}
* Resulting flow director information.
*/
static void
-mlx5_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
+flow_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
{
struct rte_eth_fdir_masks *mask =
&dev->data->dev_conf.fdir_conf.mask;
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
+flow_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
void *arg)
{
enum rte_fdir_mode fdir_mode =
}
switch (filter_op) {
case RTE_ETH_FILTER_ADD:
- return mlx5_fdir_filter_add(dev, arg);
+ return flow_fdir_filter_add(dev, arg);
case RTE_ETH_FILTER_UPDATE:
- return mlx5_fdir_filter_update(dev, arg);
+ return flow_fdir_filter_update(dev, arg);
case RTE_ETH_FILTER_DELETE:
- return mlx5_fdir_filter_delete(dev, arg);
+ return flow_fdir_filter_delete(dev, arg);
case RTE_ETH_FILTER_FLUSH:
- mlx5_fdir_filter_flush(dev);
+ flow_fdir_filter_flush(dev);
break;
case RTE_ETH_FILTER_INFO:
- mlx5_fdir_info_get(dev, arg);
+ flow_fdir_info_get(dev, arg);
break;
default:
DRV_LOG(DEBUG, "port %u unknown operation %u",
*(const void **)arg = &mlx5_flow_ops;
return 0;
case RTE_ETH_FILTER_FDIR:
- return mlx5_fdir_ctrl_func(dev, filter_op, arg);
+ return flow_fdir_ctrl_func(dev, filter_op, arg);
default:
DRV_LOG(ERR, "port %u filter type (%d) not supported",
dev->data->port_id, filter_type);
}
return 0;
}
+
+/**
+ * Create the needed meter and suffix tables.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * Pointer to table set on success, NULL otherwise.
+ */
+struct mlx5_meter_domains_infos *
+mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+ return fops->create_mtr_tbls(dev);
+}
+
+/**
+ * Destroy the meter table set.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] tbl
+ * Pointer to the meter table set.
+ *
+ * @return
+ * 0 on success.
+ */
+int
+mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev,
+ struct mlx5_meter_domains_infos *tbls)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+ return fops->destroy_mtr_tbls(dev, tbls);
+}
+
+#define MLX5_POOL_QUERY_FREQ_US 1000000
+
+/**
+ * Set the periodic procedure for triggering asynchronous batch queries for all
+ * the counter pools.
+ *
+ * @param[in] sh
+ * Pointer to mlx5_ibv_shared object.
+ */
+void
+mlx5_set_query_alarm(struct mlx5_ibv_shared *sh)
+{
+ struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(sh, 0, 0);
+ uint32_t pools_n = rte_atomic16_read(&cont->n_valid);
+ uint32_t us;
+
+ cont = MLX5_CNT_CONTAINER(sh, 1, 0);
+ pools_n += rte_atomic16_read(&cont->n_valid);
+ us = MLX5_POOL_QUERY_FREQ_US / pools_n;
+ DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us);
+ if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) {
+ sh->cmng.query_thread_on = 0;
+ DRV_LOG(ERR, "Cannot reinitialize query alarm");
+ } else {
+ sh->cmng.query_thread_on = 1;
+ }
+}
+
+/**
+ * The periodic procedure for triggering asynchronous batch queries for all the
+ * counter pools. This function is probably called by the host thread.
+ *
+ * @param[in] arg
+ * The parameter for the alarm process.
+ */
+void
+mlx5_flow_query_alarm(void *arg)
+{
+ struct mlx5_ibv_shared *sh = arg;
+ struct mlx5_devx_obj *dcs;
+ uint16_t offset;
+ int ret;
+ uint8_t batch = sh->cmng.batch;
+ uint16_t pool_index = sh->cmng.pool_index;
+ struct mlx5_pools_container *cont;
+ struct mlx5_pools_container *mcont;
+ struct mlx5_flow_counter_pool *pool;
+
+ if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES)
+ goto set_alarm;
+next_container:
+ cont = MLX5_CNT_CONTAINER(sh, batch, 1);
+ mcont = MLX5_CNT_CONTAINER(sh, batch, 0);
+ /* Check if resize was done and need to flip a container. */
+ if (cont != mcont) {
+ if (cont->pools) {
+ /* Clean the old container. */
+ rte_free(cont->pools);
+ memset(cont, 0, sizeof(*cont));
+ }
+ rte_cio_wmb();
+ /* Flip the host container. */
+ sh->cmng.mhi[batch] ^= (uint8_t)2;
+ cont = mcont;
+ }
+ if (!cont->pools) {
+ /* 2 empty containers case is unexpected. */
+ if (unlikely(batch != sh->cmng.batch))
+ goto set_alarm;
+ batch ^= 0x1;
+ pool_index = 0;
+ goto next_container;
+ }
+ pool = cont->pools[pool_index];
+ if (pool->raw_hw)
+ /* There is a pool query in progress. */
+ goto set_alarm;
+ pool->raw_hw =
+ LIST_FIRST(&sh->cmng.free_stat_raws);
+ if (!pool->raw_hw)
+ /* No free counter statistics raw memory. */
+ goto set_alarm;
+ dcs = (struct mlx5_devx_obj *)(uintptr_t)rte_atomic64_read
+ (&pool->a64_dcs);
+ offset = batch ? 0 : dcs->id % MLX5_COUNTERS_PER_POOL;
+ ret = mlx5_devx_cmd_flow_counter_query(dcs, 0, MLX5_COUNTERS_PER_POOL -
+ offset, NULL, NULL,
+ pool->raw_hw->mem_mng->dm->id,
+ (void *)(uintptr_t)
+ (pool->raw_hw->data + offset),
+ sh->devx_comp,
+ (uint64_t)(uintptr_t)pool);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID"
+ " %d", pool->min_dcs->id);
+ pool->raw_hw = NULL;
+ goto set_alarm;
+ }
+ pool->raw_hw->min_dcs_id = dcs->id;
+ LIST_REMOVE(pool->raw_hw, next);
+ sh->cmng.pending_queries++;
+ pool_index++;
+ if (pool_index >= rte_atomic16_read(&cont->n_valid)) {
+ batch ^= 0x1;
+ pool_index = 0;
+ }
+set_alarm:
+ sh->cmng.batch = batch;
+ sh->cmng.pool_index = pool_index;
+ mlx5_set_query_alarm(sh);
+}
+
+/**
+ * Handler for the HW respond about ready values from an asynchronous batch
+ * query. This function is probably called by the host thread.
+ *
+ * @param[in] sh
+ * The pointer to the shared IB device context.
+ * @param[in] async_id
+ * The Devx async ID.
+ * @param[in] status
+ * The status of the completion.
+ */
+void
+mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh,
+ uint64_t async_id, int status)
+{
+ struct mlx5_flow_counter_pool *pool =
+ (struct mlx5_flow_counter_pool *)(uintptr_t)async_id;
+ struct mlx5_counter_stats_raw *raw_to_free;
+
+ if (unlikely(status)) {
+ raw_to_free = pool->raw_hw;
+ } else {
+ raw_to_free = pool->raw;
+ rte_spinlock_lock(&pool->sl);
+ pool->raw = pool->raw_hw;
+ rte_spinlock_unlock(&pool->sl);
+ rte_atomic64_add(&pool->query_gen, 1);
+ /* Be sure the new raw counters data is updated in memory. */
+ rte_cio_wmb();
+ }
+ LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next);
+ pool->raw_hw = NULL;
+ sh->cmng.pending_queries--;
+}
+
+/**
+ * Translate the rte_flow group index to HW table value.
+ *
+ * @param[in] attributes
+ * Pointer to flow attributes
+ * @param[in] external
+ * Value is part of flow rule created by request external to PMD.
+ * @param[in] group
+ * rte_flow group index value.
+ * @param[out] table
+ * HW table value.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_group_to_table(const struct rte_flow_attr *attributes, bool external,
+ uint32_t group, uint32_t *table,
+ struct rte_flow_error *error)
+{
+ if (attributes->transfer && external) {
+ if (group == UINT32_MAX)
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ NULL,
+ "group index not supported");
+ *table = group + 1;
+ } else {
+ *table = group;
+ }
+ return 0;
+}
+
+/**
+ * Discover availability of metadata reg_c's.
+ *
+ * Iteratively use test flows to check availability.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_config *config = &priv->config;
+ enum modify_reg idx;
+ int n = 0;
+
+ /* reg_c[0] and reg_c[1] are reserved. */
+ config->flow_mreg_c[n++] = REG_C_0;
+ config->flow_mreg_c[n++] = REG_C_1;
+ /* Discover availability of other reg_c's. */
+ for (idx = REG_C_2; idx <= REG_C_7; ++idx) {
+ struct rte_flow_attr attr = {
+ .group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
+ .priority = MLX5_FLOW_PRIO_RSVD,
+ .ingress = 1,
+ };
+ struct rte_flow_item items[] = {
+ [0] = {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ };
+ struct rte_flow_action actions[] = {
+ [0] = {
+ .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
+ .conf = &(struct mlx5_flow_action_copy_mreg){
+ .src = REG_C_1,
+ .dst = idx,
+ },
+ },
+ [1] = {
+ .type = RTE_FLOW_ACTION_TYPE_JUMP,
+ .conf = &(struct rte_flow_action_jump){
+ .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
+ },
+ },
+ [2] = {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ },
+ };
+ struct rte_flow *flow;
+ struct rte_flow_error error;
+
+ if (!config->dv_flow_en)
+ break;
+ /* Create internal flow, validation skips copy action. */
+ flow = flow_list_create(dev, NULL, &attr, items,
+ actions, false, &error);
+ if (!flow)
+ continue;
+ if (dev->data->dev_started || !flow_drv_apply(dev, flow, NULL))
+ config->flow_mreg_c[n++] = idx;
+ flow_list_destroy(dev, NULL, flow);
+ }
+ for (; n < MLX5_MREG_C_NUM; ++n)
+ config->flow_mreg_c[n] = REG_NONE;
+ return 0;
+}