* Copyright 2016 Mellanox Technologies, Ltd
*/
+#include <netinet/in.h>
#include <sys/queue.h>
#include <stdalign.h>
#include <stdint.h>
#include "mlx5_defs.h"
#include "mlx5_prm.h"
#include "mlx5_glue.h"
+#include "mlx5_flow.h"
/* Dev ops structure defined in mlx5.c */
extern const struct eth_dev_ops mlx5_dev_ops;
extern const struct eth_dev_ops mlx5_dev_ops_isolate;
-/* Pattern outer Layer bits. */
-#define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0)
-#define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1)
-#define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2)
-#define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3)
-#define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4)
-#define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5)
-
-/* Pattern inner Layer bits. */
-#define MLX5_FLOW_LAYER_INNER_L2 (1u << 6)
-#define MLX5_FLOW_LAYER_INNER_L3_IPV4 (1u << 7)
-#define MLX5_FLOW_LAYER_INNER_L3_IPV6 (1u << 8)
-#define MLX5_FLOW_LAYER_INNER_L4_UDP (1u << 9)
-#define MLX5_FLOW_LAYER_INNER_L4_TCP (1u << 10)
-#define MLX5_FLOW_LAYER_INNER_VLAN (1u << 11)
-
-/* Pattern tunnel Layer bits. */
-#define MLX5_FLOW_LAYER_VXLAN (1u << 12)
-#define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13)
-#define MLX5_FLOW_LAYER_GRE (1u << 14)
-#define MLX5_FLOW_LAYER_MPLS (1u << 15)
-
-/* Outer Masks. */
-#define MLX5_FLOW_LAYER_OUTER_L3 \
- (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
-#define MLX5_FLOW_LAYER_OUTER_L4 \
- (MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP)
-#define MLX5_FLOW_LAYER_OUTER \
- (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \
- MLX5_FLOW_LAYER_OUTER_L4)
-
-/* Tunnel Masks. */
-#define MLX5_FLOW_LAYER_TUNNEL \
- (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
- MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_MPLS)
-
-/* Inner Masks. */
-#define MLX5_FLOW_LAYER_INNER_L3 \
- (MLX5_FLOW_LAYER_INNER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV6)
-#define MLX5_FLOW_LAYER_INNER_L4 \
- (MLX5_FLOW_LAYER_INNER_L4_UDP | MLX5_FLOW_LAYER_INNER_L4_TCP)
-#define MLX5_FLOW_LAYER_INNER \
- (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3 | \
- MLX5_FLOW_LAYER_INNER_L4)
-
-/* Actions that modify the fate of matching traffic. */
-#define MLX5_FLOW_FATE_DROP (1u << 0)
-#define MLX5_FLOW_FATE_QUEUE (1u << 1)
-#define MLX5_FLOW_FATE_RSS (1u << 2)
-
-/* Modify a packet. */
-#define MLX5_FLOW_MOD_FLAG (1u << 0)
-#define MLX5_FLOW_MOD_MARK (1u << 1)
-#define MLX5_FLOW_MOD_COUNT (1u << 2)
-
-/* Actions */
-#define MLX5_FLOW_ACTION_DROP (1u << 0)
-#define MLX5_FLOW_ACTION_QUEUE (1u << 1)
-#define MLX5_FLOW_ACTION_RSS (1u << 2)
-#define MLX5_FLOW_ACTION_FLAG (1u << 3)
-#define MLX5_FLOW_ACTION_MARK (1u << 4)
-#define MLX5_FLOW_ACTION_COUNT (1u << 5)
-
-#define MLX5_FLOW_FATE_ACTIONS \
- (MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)
-
-/* possible L3 layers protocols filtering. */
-#define MLX5_IP_PROTOCOL_TCP 6
-#define MLX5_IP_PROTOCOL_UDP 17
-#define MLX5_IP_PROTOCOL_GRE 47
-#define MLX5_IP_PROTOCOL_MPLS 147
-
-/* Priority reserved for default flows. */
-#define MLX5_FLOW_PRIO_RSVD ((uint32_t)-1)
-
enum mlx5_expansion {
MLX5_EXPANSION_ROOT,
MLX5_EXPANSION_ROOT_OUTER,
},
};
-/** Handles information leading to a drop fate. */
-struct mlx5_flow_verbs {
- LIST_ENTRY(mlx5_flow_verbs) next;
- unsigned int size; /**< Size of the attribute. */
- struct {
- struct ibv_flow_attr *attr;
- /**< Pointer to the Specification buffer. */
- uint8_t *specs; /**< Pointer to the specifications. */
- };
- struct ibv_flow *flow; /**< Verbs flow pointer. */
- struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */
- uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */
-};
-
-/** Device flow structure. */
-struct mlx5_flow {
- LIST_ENTRY(mlx5_flow) next;
- struct rte_flow *flow; /**< Pointer to the main flow. */
- union {
- struct mlx5_flow_verbs verbs; /**< Holds the verbs dev-flow. */
- };
-};
-
-/* Counters information. */
-struct mlx5_flow_counter {
- LIST_ENTRY(mlx5_flow_counter) next; /**< Pointer to the next counter. */
- uint32_t shared:1; /**< Share counter ID with other flow rules. */
- uint32_t ref_cnt:31; /**< Reference counter. */
- uint32_t id; /**< Counter ID. */
- struct ibv_counter_set *cs; /**< Holds the counters for the rule. */
- uint64_t hits; /**< Number of packets matched by the rule. */
- uint64_t bytes; /**< Number of bytes matched by the rule. */
-};
-
-/* Flow structure. */
-struct rte_flow {
- TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
- struct rte_flow_attr attributes; /**< User flow attribute. */
- uint32_t layers;
- /**< Bit-fields of present layers see MLX5_FLOW_LAYER_*. */
- uint32_t modifier;
- /**< Bit-fields of present modifier see MLX5_FLOW_MOD_*. */
- uint32_t fate;
- /**< Bit-fields of present fate see MLX5_FLOW_FATE_*. */
- LIST_HEAD(verbs, mlx5_flow_verbs) verbs; /**< Verbs flows list. */
- struct mlx5_flow_verbs *cur_verbs;
- /**< Current Verbs flow structure being filled. */
- struct mlx5_flow_counter *counter; /**< Holds Verbs flow counter. */
- struct rte_flow_action_rss rss;/**< RSS context. */
- uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
- uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */
- void *nl_flow; /**< Netlink flow buffer if relevant. */
- LIST_HEAD(dev_flows, mlx5_flow) dev_flows;
- /**< Device flows that are part of the flow. */
-};
-
static const struct rte_flow_ops mlx5_flow_ops = {
.validate = mlx5_flow_validate,
.create = mlx5_flow_create,
struct rte_flow_action_queue queue;
};
-/* Verbs specification header. */
-struct ibv_spec_header {
- enum ibv_flow_spec_type type;
- uint16_t size;
-};
-
-/*
- * Number of sub priorities.
- * For each kind of pattern matching i.e. L2, L3, L4 to have a correct
- * matching on the NIC (firmware dependent) L4 most have the higher priority
- * followed by L3 and ending with L2.
- */
-#define MLX5_PRIORITY_MAP_L2 2
-#define MLX5_PRIORITY_MAP_L3 1
-#define MLX5_PRIORITY_MAP_L4 0
-#define MLX5_PRIORITY_MAP_MAX 3
-
/* Map of Verbs to Flow priority with 8 Verbs priorities. */
static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
{ 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
},
};
+/* Holds the nic operations that should be used. */
+struct mlx5_flow_driver_ops nic_ops;
+
/**
* Discover the maximum number of priority available.
*
* @param[in] dev
- * Pointer to Ethernet device.
+ * Pointer to the Ethernet device structure.
*
* @return
* number of supported flow priority on success, a negative errno
}
/**
- * Adjust flow priority.
+ * Adjust flow priority based on the highest layer and the request priority.
*
- * @param dev
- * Pointer to Ethernet device.
- * @param flow
- * Pointer to an rte flow.
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] priority
+ * The rule base priority.
+ * @param[in] subpriority
+ * The priority based on the items.
+ *
+ * @return
+ * The new priority.
*/
-static void
-mlx5_flow_adjust_priority(struct rte_eth_dev *dev, struct rte_flow *flow)
+uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
+ uint32_t subpriority)
{
+ uint32_t res = 0;
struct priv *priv = dev->data->dev_private;
- uint32_t priority = flow->attributes.priority;
- uint32_t subpriority = flow->cur_verbs->attr->priority;
switch (priv->config.flow_prio) {
case RTE_DIM(priority_map_3):
- priority = priority_map_3[priority][subpriority];
+ res = priority_map_3[priority][subpriority];
break;
case RTE_DIM(priority_map_5):
- priority = priority_map_5[priority][subpriority];
+ res = priority_map_5[priority][subpriority];
break;
}
- flow->cur_verbs->attr->priority = priority;
-}
-
-/**
- * Get a flow counter.
- *
- * @param[in] dev
- * Pointer to Ethernet device.
- * @param[in] shared
- * Indicate if this counter is shared with other flows.
- * @param[in] id
- * Counter identifier.
- *
- * @return
- * A pointer to the counter, NULL otherwise and rte_errno is set.
- */
-static struct mlx5_flow_counter *
-mlx5_flow_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
-{
- struct priv *priv = dev->data->dev_private;
- struct mlx5_flow_counter *cnt;
-
- LIST_FOREACH(cnt, &priv->flow_counters, next) {
- if (!cnt->shared || cnt->shared != shared)
- continue;
- if (cnt->id != id)
- continue;
- cnt->ref_cnt++;
- return cnt;
- }
-#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
-
- struct mlx5_flow_counter tmpl = {
- .shared = shared,
- .id = id,
- .cs = mlx5_glue->create_counter_set
- (priv->ctx,
- &(struct ibv_counter_set_init_attr){
- .counter_set_id = id,
- }),
- .hits = 0,
- .bytes = 0,
- };
-
- if (!tmpl.cs) {
- rte_errno = errno;
- return NULL;
- }
- cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
- if (!cnt) {
- rte_errno = ENOMEM;
- return NULL;
- }
- *cnt = tmpl;
- LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
- return cnt;
-#endif
- rte_errno = ENOTSUP;
- return NULL;
-}
-
-/**
- * Release a flow counter.
- *
- * @param[in] counter
- * Pointer to the counter handler.
- */
-static void
-mlx5_flow_counter_release(struct mlx5_flow_counter *counter)
-{
- if (--counter->ref_cnt == 0) {
- claim_zero(mlx5_glue->destroy_counter_set(counter->cs));
- LIST_REMOVE(counter, next);
- rte_free(counter);
- }
-}
-
-/**
- * Verify the @p attributes will be correctly understood by the NIC and store
- * them in the @p flow if everything is correct.
- *
- * @param[in] dev
- * Pointer to Ethernet device structure.
- * @param[in] attributes
- * Pointer to flow attributes
- * @param[in, out] flow
- * Pointer to the rte_flow structure.
- *
- * @return
- * 0 on success.
- */
-static int
-mlx5_flow_attributes(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attributes,
- struct rte_flow *flow)
-{
- struct priv *priv = dev->data->dev_private;
- uint32_t priority_max = priv->config.flow_prio - 1;
-
- flow->attributes = *attributes;
- if (attributes->priority == MLX5_FLOW_PRIO_RSVD)
- flow->attributes.priority = priority_max;
- return 0;
+ return res;
}
/**
}
/**
- * Add a verbs item specification into @p flow.
+ * Lookup and set the ptype in the data Rx part. A single Ptype can be used,
+ * if several tunnel rules are used on this queue, the tunnel ptype will be
+ * cleared.
*
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] src
- * Create specification.
- * @param[in] size
- * Size in bytes of the specification to copy.
+ * @param rxq_ctrl
+ * Rx queue to update.
*/
static void
-mlx5_flow_spec_verbs_add(struct rte_flow *flow, void *src, unsigned int size)
+mlx5_flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
{
- struct mlx5_flow_verbs *verbs = flow->cur_verbs;
-
- if (verbs->specs) {
- void *dst;
+ unsigned int i;
+ uint32_t tunnel_ptype = 0;
- dst = (void *)(verbs->specs + verbs->size);
- memcpy(dst, src, size);
- ++verbs->attr->num_of_specs;
+ /* Look up for the ptype to use. */
+ for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) {
+ if (!rxq_ctrl->flow_tunnels_n[i])
+ continue;
+ if (!tunnel_ptype) {
+ tunnel_ptype = tunnels_info[i].ptype;
+ } else {
+ tunnel_ptype = 0;
+ break;
+ }
}
- verbs->size += size;
+ rxq_ctrl->rxq.tunnel = tunnel_ptype;
}
/**
- * Adjust verbs hash fields according to the @p flow information.
+ * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the flow.
*
- * @param[in, out] flow.
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] flow
* Pointer to flow structure.
- * @param[in] tunnel
- * 1 when the hash field is for a tunnel item.
- * @param[in] layer_types
- * ETH_RSS_* types.
- * @param[in] hash_fields
- * Item hash fields.
*/
static void
-mlx5_flow_verbs_hashfields_adjust(struct rte_flow *flow,
- int tunnel __rte_unused,
- uint32_t layer_types, uint64_t hash_fields)
-{
-#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
- hash_fields |= (tunnel ? IBV_RX_HASH_INNER : 0);
- if (flow->rss.level == 2 && !tunnel)
- hash_fields = 0;
- else if (flow->rss.level < 2 && tunnel)
- hash_fields = 0;
-#endif
- if (!(flow->rss.types & layer_types))
- hash_fields = 0;
- flow->cur_verbs->hash_fields |= hash_fields;
-}
-
-/**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
- *
- * @param[in] item
- * Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
- *
- * @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- * On error, a negative errno value is returned and rte_errno is set.
- */
-static int
-mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow,
- const size_t flow_size)
+mlx5_flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
{
- const struct rte_flow_item_eth *spec = item->spec;
- const struct rte_flow_item_eth *mask = item->mask;
+ struct priv *priv = dev->data->dev_private;
+ const int mark = !!(flow->actions &
+ (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
- const unsigned int size = sizeof(struct ibv_flow_spec_eth);
- struct ibv_flow_spec_eth eth = {
- .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
- .size = size,
- };
+ unsigned int i;
- if (!mask)
- mask = &rte_flow_item_eth_mask;
- flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
- MLX5_FLOW_LAYER_OUTER_L2;
- if (size > flow_size)
- return size;
- if (spec) {
- unsigned int i;
+ for (i = 0; i != flow->rss.queue_num; ++i) {
+ int idx = (*flow->queue)[i];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of((*priv->rxqs)[idx],
+ struct mlx5_rxq_ctrl, rxq);
+
+ if (mark) {
+ rxq_ctrl->rxq.mark = 1;
+ rxq_ctrl->flow_mark_n++;
+ }
+ if (tunnel) {
+ unsigned int j;
- memcpy(ð.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
- memcpy(ð.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
- eth.val.ether_type = spec->type;
- memcpy(ð.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
- memcpy(ð.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
- eth.mask.ether_type = mask->type;
- /* Remove unwanted bits from values. */
- for (i = 0; i < ETHER_ADDR_LEN; ++i) {
- eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
- eth.val.src_mac[i] &= eth.mask.src_mac[i];
+ /* Increase the counter matching the flow. */
+ for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
+ if ((tunnels_info[j].tunnel & flow->layers) ==
+ tunnels_info[j].tunnel) {
+ rxq_ctrl->flow_tunnels_n[j]++;
+ break;
+ }
+ }
+ mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl);
}
- eth.val.ether_type &= eth.mask.ether_type;
}
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
- mlx5_flow_spec_verbs_add(flow, ð, size);
- return size;
}
/**
- * Update the VLAN tag in the Verbs Ethernet specification.
+ * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
+ * @p flow if no other flow uses it with the same kind of request.
*
- * @param[in, out] attr
- * Pointer to Verbs attributes structure.
- * @param[in] eth
- * Verbs structure containing the VLAN information to copy.
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[in] flow
+ * Pointer to the flow.
*/
static void
-mlx5_flow_item_vlan_update(struct ibv_flow_attr *attr,
- struct ibv_flow_spec_eth *eth)
+mlx5_flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
{
+ struct priv *priv = dev->data->dev_private;
+ const int mark = !!(flow->actions &
+ (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
+ const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
unsigned int i;
- const enum ibv_flow_spec_type search = eth->type;
- struct ibv_spec_header *hdr = (struct ibv_spec_header *)
- ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
- for (i = 0; i != attr->num_of_specs; ++i) {
- if (hdr->type == search) {
- struct ibv_flow_spec_eth *e =
- (struct ibv_flow_spec_eth *)hdr;
+ assert(dev->data->dev_started);
+ for (i = 0; i != flow->rss.queue_num; ++i) {
+ int idx = (*flow->queue)[i];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of((*priv->rxqs)[idx],
+ struct mlx5_rxq_ctrl, rxq);
+
+ if (mark) {
+ rxq_ctrl->flow_mark_n--;
+ rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
+ }
+ if (tunnel) {
+ unsigned int j;
- e->val.vlan_tag = eth->val.vlan_tag;
- e->mask.vlan_tag = eth->mask.vlan_tag;
- e->val.ether_type = eth->val.ether_type;
- e->mask.ether_type = eth->mask.ether_type;
- break;
+ /* Decrease the counter matching the flow. */
+ for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
+ if ((tunnels_info[j].tunnel & flow->layers) ==
+ tunnels_info[j].tunnel) {
+ rxq_ctrl->flow_tunnels_n[j]--;
+ break;
+ }
+ }
+ mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl);
}
- hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
}
}
/**
- * Convert the @p item into @p flow (or by updating the already present
- * Ethernet Verbs) specification after ensuring the NIC will understand and
- * process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
- *
- * @param[in] item
- * Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
+ * Clear the Mark/Flag and Tunnel ptype information in all Rx queues.
*
- * @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * @param dev
+ * Pointer to Ethernet device.
*/
-static int
-mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow,
- const size_t flow_size)
+static void
+mlx5_flow_rxq_flags_clear(struct rte_eth_dev *dev)
{
- const struct rte_flow_item_vlan *spec = item->spec;
- const struct rte_flow_item_vlan *mask = item->mask;
- unsigned int size = sizeof(struct ibv_flow_spec_eth);
- const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
- struct ibv_flow_spec_eth eth = {
- .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
- .size = size,
- };
- const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
- MLX5_FLOW_LAYER_OUTER_L2;
+ struct priv *priv = dev->data->dev_private;
+ unsigned int i;
- if (!mask)
- mask = &rte_flow_item_vlan_mask;
- if (spec) {
- eth.val.vlan_tag = spec->tci;
- eth.mask.vlan_tag = mask->tci;
- eth.val.vlan_tag &= eth.mask.vlan_tag;
- eth.val.ether_type = spec->inner_type;
- eth.mask.ether_type = mask->inner_type;
- eth.val.ether_type &= eth.mask.ether_type;
- }
- if (!(flow->layers & l2m)) {
- if (size <= flow_size) {
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
- mlx5_flow_spec_verbs_add(flow, ð, size);
- }
- } else {
- if (flow->cur_verbs)
- mlx5_flow_item_vlan_update(flow->cur_verbs->attr,
- ð);
- size = 0; /* Only an update is done in eth specification. */
+ for (i = 0; i != priv->rxqs_n; ++i) {
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+ unsigned int j;
+
+ if (!(*priv->rxqs)[i])
+ continue;
+ rxq_ctrl = container_of((*priv->rxqs)[i],
+ struct mlx5_rxq_ctrl, rxq);
+ rxq_ctrl->flow_mark_n = 0;
+ rxq_ctrl->rxq.mark = 0;
+ for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
+ rxq_ctrl->flow_tunnels_n[j] = 0;
+ rxq_ctrl->rxq.tunnel = 0;
}
- flow->layers |= tunnel ?
- (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_VLAN) :
- (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_VLAN);
- return size;
}
-/**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+/*
+ * Validate the flag action.
*
- * @param[in] item
- * Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
+ * @param[in] action_flags
+ * Bit-fields that holds the actions detected until now.
+ * @param[out] error
+ * Pointer to error structure.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow,
- const size_t flow_size)
-{
- const struct rte_flow_item_ipv4 *spec = item->spec;
- const struct rte_flow_item_ipv4 *mask = item->mask;
- const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
- unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
- struct ibv_flow_spec_ipv4_ext ipv4 = {
- .type = IBV_FLOW_SPEC_IPV4_EXT |
- (tunnel ? IBV_FLOW_SPEC_INNER : 0),
- .size = size,
- };
-
- if (!mask)
- mask = &rte_flow_item_ipv4_mask;
- flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
- MLX5_FLOW_LAYER_OUTER_L3_IPV4;
- if (spec) {
- ipv4.val = (struct ibv_flow_ipv4_ext_filter){
- .src_ip = spec->hdr.src_addr,
- .dst_ip = spec->hdr.dst_addr,
- .proto = spec->hdr.next_proto_id,
- .tos = spec->hdr.type_of_service,
- };
- ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
- .src_ip = mask->hdr.src_addr,
- .dst_ip = mask->hdr.dst_addr,
- .proto = mask->hdr.next_proto_id,
- .tos = mask->hdr.type_of_service,
- };
- /* Remove unwanted bits from values. */
- ipv4.val.src_ip &= ipv4.mask.src_ip;
- ipv4.val.dst_ip &= ipv4.mask.dst_ip;
- ipv4.val.proto &= ipv4.mask.proto;
- ipv4.val.tos &= ipv4.mask.tos;
- }
- if (size <= flow_size) {
- mlx5_flow_verbs_hashfields_adjust
- (flow, tunnel,
- (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
- ETH_RSS_NONFRAG_IPV4_TCP |
- ETH_RSS_NONFRAG_IPV4_UDP |
- ETH_RSS_NONFRAG_IPV4_OTHER),
- (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4));
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3;
- mlx5_flow_spec_verbs_add(flow, &ipv4, size);
- }
- return size;
-}
-
-/**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
- *
- * @param[in] item
- * Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
- *
- * @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- */
-static int
-mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow,
- const size_t flow_size)
-{
- const struct rte_flow_item_ipv6 *spec = item->spec;
- const struct rte_flow_item_ipv6 *mask = item->mask;
- const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
- unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
- struct ibv_flow_spec_ipv6 ipv6 = {
- .type = IBV_FLOW_SPEC_IPV6 | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
- .size = size,
- };
-
- if (!mask)
- mask = &rte_flow_item_ipv6_mask;
- flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
- MLX5_FLOW_LAYER_OUTER_L3_IPV6;
- if (spec) {
- unsigned int i;
- uint32_t vtc_flow_val;
- uint32_t vtc_flow_mask;
-
- memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
- RTE_DIM(ipv6.val.src_ip));
- memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
- RTE_DIM(ipv6.val.dst_ip));
- memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
- RTE_DIM(ipv6.mask.src_ip));
- memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
- RTE_DIM(ipv6.mask.dst_ip));
- vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
- vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
- ipv6.val.flow_label =
- rte_cpu_to_be_32((vtc_flow_val & IPV6_HDR_FL_MASK) >>
- IPV6_HDR_FL_SHIFT);
- ipv6.val.traffic_class = (vtc_flow_val & IPV6_HDR_TC_MASK) >>
- IPV6_HDR_TC_SHIFT;
- ipv6.val.next_hdr = spec->hdr.proto;
- ipv6.val.hop_limit = spec->hdr.hop_limits;
- ipv6.mask.flow_label =
- rte_cpu_to_be_32((vtc_flow_mask & IPV6_HDR_FL_MASK) >>
- IPV6_HDR_FL_SHIFT);
- ipv6.mask.traffic_class = (vtc_flow_mask & IPV6_HDR_TC_MASK) >>
- IPV6_HDR_TC_SHIFT;
- ipv6.mask.next_hdr = mask->hdr.proto;
- ipv6.mask.hop_limit = mask->hdr.hop_limits;
- /* Remove unwanted bits from values. */
- for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
- ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
- ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
- }
- ipv6.val.flow_label &= ipv6.mask.flow_label;
- ipv6.val.traffic_class &= ipv6.mask.traffic_class;
- ipv6.val.next_hdr &= ipv6.mask.next_hdr;
- ipv6.val.hop_limit &= ipv6.mask.hop_limit;
- }
- if (size <= flow_size) {
- mlx5_flow_verbs_hashfields_adjust
- (flow, tunnel,
- (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
- ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_NONFRAG_IPV6_UDP |
- ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX |
- ETH_RSS_IPV6_TCP_EX | ETH_RSS_IPV6_UDP_EX),
- (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6));
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3;
- mlx5_flow_spec_verbs_add(flow, &ipv6, size);
- }
- return size;
-}
-
-/**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
- *
- * @param[in] item
- * Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
- *
- * @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- */
-static int
-mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow,
- const size_t flow_size)
-{
- const struct rte_flow_item_udp *spec = item->spec;
- const struct rte_flow_item_udp *mask = item->mask;
- const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
- unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
- struct ibv_flow_spec_tcp_udp udp = {
- .type = IBV_FLOW_SPEC_UDP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
- .size = size,
- };
-
- if (!mask)
- mask = &rte_flow_item_udp_mask;
- flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
- MLX5_FLOW_LAYER_OUTER_L4_UDP;
- if (spec) {
- udp.val.dst_port = spec->hdr.dst_port;
- udp.val.src_port = spec->hdr.src_port;
- udp.mask.dst_port = mask->hdr.dst_port;
- udp.mask.src_port = mask->hdr.src_port;
- /* Remove unwanted bits from values. */
- udp.val.src_port &= udp.mask.src_port;
- udp.val.dst_port &= udp.mask.dst_port;
- }
- if (size <= flow_size) {
- mlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_UDP,
- (IBV_RX_HASH_SRC_PORT_UDP |
- IBV_RX_HASH_DST_PORT_UDP));
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4;
- mlx5_flow_spec_verbs_add(flow, &udp, size);
- }
- return size;
-}
-
-/**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
- *
- * @param[in] item
- * Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
- * @param[out] error
- * Pointer to error structure.
- *
- * @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- */
-static int
-mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow,
- const size_t flow_size)
-{
- const struct rte_flow_item_tcp *spec = item->spec;
- const struct rte_flow_item_tcp *mask = item->mask;
- const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
- unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
- struct ibv_flow_spec_tcp_udp tcp = {
- .type = IBV_FLOW_SPEC_TCP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
- .size = size,
- };
-
- if (!mask)
- mask = &rte_flow_item_tcp_mask;
- flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
- MLX5_FLOW_LAYER_OUTER_L4_TCP;
- if (spec) {
- tcp.val.dst_port = spec->hdr.dst_port;
- tcp.val.src_port = spec->hdr.src_port;
- tcp.mask.dst_port = mask->hdr.dst_port;
- tcp.mask.src_port = mask->hdr.src_port;
- /* Remove unwanted bits from values. */
- tcp.val.src_port &= tcp.mask.src_port;
- tcp.val.dst_port &= tcp.mask.dst_port;
- }
- if (size <= flow_size) {
- mlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_TCP,
- (IBV_RX_HASH_SRC_PORT_TCP |
- IBV_RX_HASH_DST_PORT_TCP));
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4;
- mlx5_flow_spec_verbs_add(flow, &tcp, size);
- }
- return size;
-}
-
-/**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
- *
- * @param[in] item
- * Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
- * @param[out] error
- * Pointer to error structure.
- *
- * @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- */
-static int
-mlx5_flow_item_vxlan(const struct rte_flow_item *item, struct rte_flow *flow,
- const size_t flow_size)
-{
- const struct rte_flow_item_vxlan *spec = item->spec;
- const struct rte_flow_item_vxlan *mask = item->mask;
- unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
- struct ibv_flow_spec_tunnel vxlan = {
- .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
- .size = size,
- };
- union vni {
- uint32_t vlan_id;
- uint8_t vni[4];
- } id = { .vlan_id = 0, };
-
- if (!mask)
- mask = &rte_flow_item_vxlan_mask;
- if (spec) {
- memcpy(&id.vni[1], spec->vni, 3);
- vxlan.val.tunnel_id = id.vlan_id;
- memcpy(&id.vni[1], mask->vni, 3);
- vxlan.mask.tunnel_id = id.vlan_id;
- /* Remove unwanted bits from values. */
- vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
- }
- if (size <= flow_size) {
- mlx5_flow_spec_verbs_add(flow, &vxlan, size);
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
- }
- flow->layers |= MLX5_FLOW_LAYER_VXLAN;
- return size;
-}
-
-/**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
- *
- * @param[in] item
- * Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
- * @param[out] error
- * Pointer to error structure.
- *
- * @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- */
-static int
-mlx5_flow_item_vxlan_gpe(const struct rte_flow_item *item,
- struct rte_flow *flow, const size_t flow_size)
-{
- const struct rte_flow_item_vxlan_gpe *spec = item->spec;
- const struct rte_flow_item_vxlan_gpe *mask = item->mask;
- unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
- struct ibv_flow_spec_tunnel vxlan_gpe = {
- .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
- .size = size,
- };
- union vni {
- uint32_t vlan_id;
- uint8_t vni[4];
- } id = { .vlan_id = 0, };
-
- if (!mask)
- mask = &rte_flow_item_vxlan_gpe_mask;
- if (spec) {
- memcpy(&id.vni[1], spec->vni, 3);
- vxlan_gpe.val.tunnel_id = id.vlan_id;
- memcpy(&id.vni[1], mask->vni, 3);
- vxlan_gpe.mask.tunnel_id = id.vlan_id;
- /* Remove unwanted bits from values. */
- vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;
- }
- if (size <= flow_size) {
- mlx5_flow_spec_verbs_add(flow, &vxlan_gpe, size);
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
- }
- flow->layers |= MLX5_FLOW_LAYER_VXLAN_GPE;
- return size;
-}
-
-/**
- * Update the protocol in Verbs IPv4/IPv6 spec.
- *
- * @param[in, out] attr
- * Pointer to Verbs attributes structure.
- * @param[in] search
- * Specification type to search in order to update the IP protocol.
- * @param[in] protocol
- * Protocol value to set if none is present in the specification.
- */
-static void
-mlx5_flow_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
- enum ibv_flow_spec_type search,
- uint8_t protocol)
-{
- unsigned int i;
- struct ibv_spec_header *hdr = (struct ibv_spec_header *)
- ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
-
- if (!attr)
- return;
- for (i = 0; i != attr->num_of_specs; ++i) {
- if (hdr->type == search) {
- union {
- struct ibv_flow_spec_ipv4_ext *ipv4;
- struct ibv_flow_spec_ipv6 *ipv6;
- } ip;
-
- switch (search) {
- case IBV_FLOW_SPEC_IPV4_EXT:
- ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr;
- if (!ip.ipv4->val.proto) {
- ip.ipv4->val.proto = protocol;
- ip.ipv4->mask.proto = 0xff;
- }
- break;
- case IBV_FLOW_SPEC_IPV6:
- ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr;
- if (!ip.ipv6->val.next_hdr) {
- ip.ipv6->val.next_hdr = protocol;
- ip.ipv6->mask.next_hdr = 0xff;
- }
- break;
- default:
- break;
- }
- break;
- }
- hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
- }
-}
-
-/**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * It will also update the previous L3 layer with the protocol value matching
- * the GRE.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @param[in] item
- * Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
- *
- * @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- */
-static int
-mlx5_flow_item_gre(const struct rte_flow_item *item __rte_unused,
- struct rte_flow *flow, const size_t flow_size)
-{
- struct mlx5_flow_verbs *verbs = flow->cur_verbs;
-#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
- const struct rte_flow_item_gre *spec = item->spec;
- const struct rte_flow_item_gre *mask = item->mask;
- unsigned int size = sizeof(struct ibv_flow_spec_gre);
- struct ibv_flow_spec_gre tunnel = {
- .type = IBV_FLOW_SPEC_GRE,
- .size = size,
- };
-#else
- unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
- struct ibv_flow_spec_tunnel tunnel = {
- .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
- .size = size,
- };
-#endif
-
-#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
- if (!mask)
- mask = &rte_flow_item_gre_mask;
- if (spec) {
- tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
- tunnel.val.protocol = spec->protocol;
- tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
- tunnel.mask.protocol = mask->protocol;
- /* Remove unwanted bits from values. */
- tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
- tunnel.val.protocol &= tunnel.mask.protocol;
- tunnel.val.key &= tunnel.mask.key;
- }
-#else
-#endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */
- if (size <= flow_size) {
- if (flow->layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
- mlx5_flow_item_gre_ip_protocol_update
- (verbs->attr, IBV_FLOW_SPEC_IPV4_EXT,
- MLX5_IP_PROTOCOL_GRE);
- else
- mlx5_flow_item_gre_ip_protocol_update
- (verbs->attr, IBV_FLOW_SPEC_IPV6,
- MLX5_IP_PROTOCOL_GRE);
- mlx5_flow_spec_verbs_add(flow, &tunnel, size);
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
- }
- flow->layers |= MLX5_FLOW_LAYER_GRE;
- return size;
-}
-
-/**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
- *
- * @param[in] item
- * Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
- * @param[out] error
- * Pointer to error structure.
- *
- * @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- * On error, a negative errno value is returned and rte_errno is set.
- */
-static int
-mlx5_flow_item_mpls(const struct rte_flow_item *item __rte_unused,
- struct rte_flow *flow __rte_unused,
- const size_t flow_size __rte_unused,
- struct rte_flow_error *error)
-{
-#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
- const struct rte_flow_item_mpls *spec = item->spec;
- const struct rte_flow_item_mpls *mask = item->mask;
- unsigned int size = sizeof(struct ibv_flow_spec_mpls);
- struct ibv_flow_spec_mpls mpls = {
- .type = IBV_FLOW_SPEC_MPLS,
- .size = size,
- };
-
- if (!mask)
- mask = &rte_flow_item_mpls_mask;
- if (spec) {
- memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
- memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
- /* Remove unwanted bits from values. */
- mpls.val.label &= mpls.mask.label;
- }
- if (size <= flow_size) {
- mlx5_flow_spec_verbs_add(flow, &mpls, size);
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
- }
- flow->layers |= MLX5_FLOW_LAYER_MPLS;
- return size;
-#endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "MPLS is not supported by Verbs, please"
- " update.");
-}
-
-/**
- * Convert the @p pattern into a Verbs specifications after ensuring the NIC
- * will understand and process it correctly.
- * The conversion is performed item per item, each of them is written into
- * the @p flow if its size is lesser or equal to @p flow_size.
- * Validation and memory consumption computation are still performed until the
- * end of @p pattern, unless an error is encountered.
- *
- * @param[in] pattern
- * Flow pattern.
- * @param[in, out] flow
- * Pointer to the rte_flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small some
- * garbage may be present.
- * @param[out] error
- * Pointer to error structure.
- *
- * @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @pattern has fully been
- * converted, otherwise another call with this returned memory size should
- * be done.
- * On error, a negative errno value is returned and rte_errno is set.
- */
-static int
-mlx5_flow_items(const struct rte_flow_item pattern[],
- struct rte_flow *flow, const size_t flow_size,
- struct rte_flow_error *error)
-{
- int remain = flow_size;
- size_t size = 0;
-
- for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
- int ret = 0;
-
- switch (pattern->type) {
- case RTE_FLOW_ITEM_TYPE_VOID:
- break;
- case RTE_FLOW_ITEM_TYPE_ETH:
- ret = mlx5_flow_item_eth(pattern, flow, remain);
- break;
- case RTE_FLOW_ITEM_TYPE_VLAN:
- ret = mlx5_flow_item_vlan(pattern, flow, remain);
- break;
- case RTE_FLOW_ITEM_TYPE_IPV4:
- ret = mlx5_flow_item_ipv4(pattern, flow, remain);
- break;
- case RTE_FLOW_ITEM_TYPE_IPV6:
- ret = mlx5_flow_item_ipv6(pattern, flow, remain);
- break;
- case RTE_FLOW_ITEM_TYPE_UDP:
- ret = mlx5_flow_item_udp(pattern, flow, remain);
- break;
- case RTE_FLOW_ITEM_TYPE_TCP:
- ret = mlx5_flow_item_tcp(pattern, flow, remain);
- break;
- case RTE_FLOW_ITEM_TYPE_VXLAN:
- ret = mlx5_flow_item_vxlan(pattern, flow, remain);
- break;
- case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
- ret = mlx5_flow_item_vxlan_gpe(pattern, flow,
- remain);
- break;
- case RTE_FLOW_ITEM_TYPE_GRE:
- ret = mlx5_flow_item_gre(pattern, flow, remain);
- break;
- case RTE_FLOW_ITEM_TYPE_MPLS:
- ret = mlx5_flow_item_mpls(pattern, flow, remain, error);
- break;
- default:
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- pattern,
- "item not supported");
- }
- if (ret < 0)
- return ret;
- if (remain > ret)
- remain -= ret;
- else
- remain = 0;
- size += ret;
- }
- if (!flow->layers) {
- const struct rte_flow_item item = {
- .type = RTE_FLOW_ITEM_TYPE_ETH,
- };
-
- return mlx5_flow_item_eth(&item, flow, flow_size);
- }
- return size;
-}
-
-/**
- * Convert the @p action into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
- *
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
- *
- * @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p action has fully been
- * converted, otherwise another call with this returned memory size should
- * be done.
- * On error, a negative errno value is returned and rte_errno is set.
- */
-static int
-mlx5_flow_action_drop(struct rte_flow *flow, const size_t flow_size)
-{
- unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
- struct ibv_flow_spec_action_drop drop = {
- .type = IBV_FLOW_SPEC_ACTION_DROP,
- .size = size,
- };
-
- if (size < flow_size)
- mlx5_flow_spec_verbs_add(flow, &drop, size);
- flow->fate |= MLX5_FLOW_FATE_DROP;
- return size;
-}
-
-/**
- * Convert the @p action into @p flow after ensuring the NIC will understand
- * and process it correctly.
- *
- * @param[in] action
- * Action configuration.
- * @param[in, out] flow
- * Pointer to flow structure.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
- */
-static int
-mlx5_flow_action_queue(const struct rte_flow_action *action,
- struct rte_flow *flow)
-{
- const struct rte_flow_action_queue *queue = action->conf;
-
- if (flow->queue)
- (*flow->queue)[0] = queue->index;
- flow->rss.queue_num = 1;
- flow->fate |= MLX5_FLOW_FATE_QUEUE;
- return 0;
-}
-
-/**
- * Ensure the @p action will be understood and used correctly by the NIC.
- *
- * @param[in] action
- * Action configuration.
- * @param flow[in, out]
- * Pointer to the rte_flow structure.
- *
- * @return
- * 0 On success.
- */
-static int
-mlx5_flow_action_rss(const struct rte_flow_action *action,
- struct rte_flow *flow)
-{
- const struct rte_flow_action_rss *rss = action->conf;
-
- if (flow->queue)
- memcpy((*flow->queue), rss->queue,
- rss->queue_num * sizeof(uint16_t));
- flow->rss.queue_num = rss->queue_num;
- memcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN);
- flow->rss.types = rss->types;
- flow->rss.level = rss->level;
- flow->fate |= MLX5_FLOW_FATE_RSS;
- return 0;
-}
-
-/**
- * Convert the @p action into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
- *
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
- *
- * @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p action has fully been
- * converted, otherwise another call with this returned memory size should
- * be done.
- */
-static int
-mlx5_flow_action_flag(struct rte_flow *flow, const size_t flow_size)
-{
- unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
- struct ibv_flow_spec_action_tag tag = {
- .type = IBV_FLOW_SPEC_ACTION_TAG,
- .size = size,
- .tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),
- };
- struct mlx5_flow_verbs *verbs = flow->cur_verbs;
-
- if (flow->modifier & MLX5_FLOW_MOD_MARK)
- size = 0;
- else if (size <= flow_size && verbs)
- mlx5_flow_spec_verbs_add(flow, &tag, size);
- flow->modifier |= MLX5_FLOW_MOD_FLAG;
- return size;
-}
-
-/**
- * Update verbs specification to modify the flag to mark.
- *
- * @param[in, out] verbs
- * Pointer to the mlx5_flow_verbs structure.
- * @param[in] mark_id
- * Mark identifier to replace the flag.
- */
-static void
-mlx5_flow_verbs_mark_update(struct mlx5_flow_verbs *verbs, uint32_t mark_id)
-{
- struct ibv_spec_header *hdr;
- int i;
-
- if (!verbs)
- return;
- /* Update Verbs specification. */
- hdr = (struct ibv_spec_header *)verbs->specs;
- if (!hdr)
- return;
- for (i = 0; i != verbs->attr->num_of_specs; ++i) {
- if (hdr->type == IBV_FLOW_SPEC_ACTION_TAG) {
- struct ibv_flow_spec_action_tag *t =
- (struct ibv_flow_spec_action_tag *)hdr;
-
- t->tag_id = mlx5_flow_mark_set(mark_id);
- }
- hdr = (struct ibv_spec_header *)((uintptr_t)hdr + hdr->size);
- }
-}
-
-/**
- * Convert the @p action into @p flow (or by updating the already present
- * Flag Verbs specification) after ensuring the NIC will understand and
- * process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
- *
- * @param[in] action
- * Action configuration.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
- *
- * @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p action has fully been
- * converted, otherwise another call with this returned memory size should
- * be done.
- */
-static int
-mlx5_flow_action_mark(const struct rte_flow_action *action,
- struct rte_flow *flow, const size_t flow_size)
-{
- const struct rte_flow_action_mark *mark = action->conf;
- unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
- struct ibv_flow_spec_action_tag tag = {
- .type = IBV_FLOW_SPEC_ACTION_TAG,
- .size = size,
- };
- struct mlx5_flow_verbs *verbs = flow->cur_verbs;
-
- if (flow->modifier & MLX5_FLOW_MOD_FLAG) {
- mlx5_flow_verbs_mark_update(verbs, mark->id);
- size = 0;
- } else if (size <= flow_size) {
- tag.tag_id = mlx5_flow_mark_set(mark->id);
- mlx5_flow_spec_verbs_add(flow, &tag, size);
- }
- flow->modifier |= MLX5_FLOW_MOD_MARK;
- return size;
-}
-
-/**
- * Convert the @p action into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
- *
- * @param action[in]
- * Action configuration.
- * @param flow[in, out]
- * Pointer to flow structure.
- * @param flow_size[in]
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
- * @param error[int, out]
- * Pointer to error structure.
- *
- * @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p action has fully been
- * converted, otherwise another call with this returned memory size should
- * be done.
- * On error, a negative errno value is returned and rte_errno is set.
- */
-static int
-mlx5_flow_action_count(struct rte_eth_dev *dev,
- const struct rte_flow_action *action,
- struct rte_flow *flow,
- const size_t flow_size __rte_unused,
- struct rte_flow_error *error)
-{
- const struct rte_flow_action_count *count = action->conf;
-#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
- unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
- struct ibv_flow_spec_counter_action counter = {
- .type = IBV_FLOW_SPEC_ACTION_COUNT,
- .size = size,
- };
-#endif
-
- if (!flow->counter) {
- flow->counter = mlx5_flow_counter_new(dev, count->shared,
- count->id);
- if (!flow->counter)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- action,
- "cannot get counter"
- " context.");
- }
- flow->modifier |= MLX5_FLOW_MOD_COUNT;
-#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
- counter.counter_set_handle = flow->counter->cs->handle;
- if (size <= flow_size)
- mlx5_flow_spec_verbs_add(flow, &counter, size);
- return size;
-#endif
- return 0;
-}
-
-/**
- * Convert the @p action into @p flow after ensuring the NIC will understand
- * and process it correctly.
- * The conversion is performed action per action, each of them is written into
- * the @p flow if its size is lesser or equal to @p flow_size.
- * Validation and memory consumption computation are still performed until the
- * end of @p action, unless an error is encountered.
- *
- * @param[in] dev
- * Pointer to Ethernet device structure.
- * @param[in] actions
- * Pointer to flow actions array.
- * @param[in, out] flow
- * Pointer to the rte_flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small some
- * garbage may be present.
- * @param[out] error
- * Pointer to error structure.
- *
- * @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p actions has fully been
- * converted, otherwise another call with this returned memory size should
- * be done.
- * On error, a negative errno value is returned and rte_errno is set.
- */
-static int
-mlx5_flow_actions(struct rte_eth_dev *dev,
- const struct rte_flow_action actions[],
- struct rte_flow *flow, const size_t flow_size,
- struct rte_flow_error *error)
-{
- size_t size = 0;
- int remain = flow_size;
- int ret = 0;
-
- for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
- switch (actions->type) {
- case RTE_FLOW_ACTION_TYPE_VOID:
- break;
- case RTE_FLOW_ACTION_TYPE_FLAG:
- ret = mlx5_flow_action_flag(flow, remain);
- break;
- case RTE_FLOW_ACTION_TYPE_MARK:
- ret = mlx5_flow_action_mark(actions, flow, remain);
- break;
- case RTE_FLOW_ACTION_TYPE_DROP:
- ret = mlx5_flow_action_drop(flow, remain);
- break;
- case RTE_FLOW_ACTION_TYPE_QUEUE:
- ret = mlx5_flow_action_queue(actions, flow);
- break;
- case RTE_FLOW_ACTION_TYPE_RSS:
- ret = mlx5_flow_action_rss(actions, flow);
- break;
- case RTE_FLOW_ACTION_TYPE_COUNT:
- ret = mlx5_flow_action_count(dev, actions, flow, remain,
- error);
- break;
- default:
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions,
- "action not supported");
- }
- if (ret < 0)
- return ret;
- if (remain > ret)
- remain -= ret;
- else
- remain = 0;
- size += ret;
- }
- if (!flow->fate)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "no fate action found");
- return size;
-}
-
-/**
- * Validate flow rule and fill flow structure accordingly.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @param[out] flow
- * Pointer to flow structure.
- * @param flow_size
- * Size of allocated space for @p flow.
- * @param[in] attr
- * Flow rule attributes.
- * @param[in] pattern
- * Pattern specification (list terminated by the END pattern item).
- * @param[in] actions
- * Associated actions (list terminated by the END action).
- * @param[out] error
- * Perform verbose error reporting if not NULL.
- *
- * @return
- * A positive value representing the size of the flow object in bytes
- * regardless of @p flow_size on success, a negative errno value otherwise
- * and rte_errno is set.
- */
-static int
-mlx5_flow_merge_switch(struct rte_eth_dev *dev,
- struct rte_flow *flow,
- size_t flow_size,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
-{
- unsigned int n = mlx5_dev_to_port_id(dev->device, NULL, 0);
- uint16_t port_id[!n + n];
- struct mlx5_nl_flow_ptoi ptoi[!n + n + 1];
- size_t off = RTE_ALIGN_CEIL(sizeof(*flow), alignof(max_align_t));
- unsigned int i;
- unsigned int own = 0;
- int ret;
-
- /* At least one port is needed when no switch domain is present. */
- if (!n) {
- n = 1;
- port_id[0] = dev->data->port_id;
- } else {
- n = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, n), n);
- }
- for (i = 0; i != n; ++i) {
- struct rte_eth_dev_info dev_info;
-
- rte_eth_dev_info_get(port_id[i], &dev_info);
- if (port_id[i] == dev->data->port_id)
- own = i;
- ptoi[i].port_id = port_id[i];
- ptoi[i].ifindex = dev_info.if_index;
- }
- /* Ensure first entry of ptoi[] is the current device. */
- if (own) {
- ptoi[n] = ptoi[0];
- ptoi[0] = ptoi[own];
- ptoi[own] = ptoi[n];
- }
- /* An entry with zero ifindex terminates ptoi[]. */
- ptoi[n].port_id = 0;
- ptoi[n].ifindex = 0;
- if (flow_size < off)
- flow_size = 0;
- ret = mlx5_nl_flow_transpose((uint8_t *)flow + off,
- flow_size ? flow_size - off : 0,
- ptoi, attr, pattern, actions, error);
- if (ret < 0)
- return ret;
- if (flow_size) {
- *flow = (struct rte_flow){
- .attributes = *attr,
- .nl_flow = (uint8_t *)flow + off,
- };
- /*
- * Generate a reasonably unique handle based on the address
- * of the target buffer.
- *
- * This is straightforward on 32-bit systems where the flow
- * pointer can be used directly. Otherwise, its least
- * significant part is taken after shifting it by the
- * previous power of two of the pointed buffer size.
- */
- if (sizeof(flow) <= 4)
- mlx5_nl_flow_brand(flow->nl_flow, (uintptr_t)flow);
- else
- mlx5_nl_flow_brand
- (flow->nl_flow,
- (uintptr_t)flow >>
- rte_log2_u32(rte_align32prevpow2(flow_size)));
- }
- return off + ret;
-}
-
-static unsigned int
-mlx5_find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level)
-{
- const struct rte_flow_item *item;
- unsigned int has_vlan = 0;
-
- for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
- if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
- has_vlan = 1;
- break;
- }
- }
- if (has_vlan)
- return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN :
- MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN;
- return rss_level < 2 ? MLX5_EXPANSION_ROOT :
- MLX5_EXPANSION_ROOT_OUTER;
-}
-
-/**
- * Convert the @p attributes, @p pattern, @p action, into an flow for the NIC
- * after ensuring the NIC will understand and process it correctly.
- * The conversion is only performed item/action per item/action, each of
- * them is written into the @p flow if its size is lesser or equal to @p
- * flow_size.
- * Validation and memory consumption computation are still performed until the
- * end, unless an error is encountered.
- *
- * @param[in] dev
- * Pointer to Ethernet device.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small some
- * garbage may be present.
- * @param[in] attributes
- * Flow rule attributes.
- * @param[in] pattern
- * Pattern specification (list terminated by the END pattern item).
- * @param[in] actions
- * Associated actions (list terminated by the END action).
- * @param[out] error
- * Perform verbose error reporting if not NULL.
- *
- * @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the flow has fully been converted and
- * can be applied, otherwise another call with this returned memory size
- * should be done.
- * On error, a negative errno value is returned and rte_errno is set.
- */
-static int
-mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow,
- const size_t flow_size,
- const struct rte_flow_attr *attributes,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
-{
- struct rte_flow local_flow = { .layers = 0, };
- size_t size = sizeof(*flow);
- union {
- struct rte_flow_expand_rss buf;
- uint8_t buffer[2048];
- } expand_buffer;
- struct rte_flow_expand_rss *buf = &expand_buffer.buf;
- struct mlx5_flow_verbs *original_verbs = NULL;
- size_t original_verbs_size = 0;
- uint32_t original_layers = 0;
- int expanded_pattern_idx = 0;
- int ret = 0;
- uint32_t i;
-
- if (attributes->transfer)
- return mlx5_flow_merge_switch(dev, flow, flow_size,
- attributes, pattern,
- actions, error);
- if (size > flow_size)
- flow = &local_flow;
- ret = mlx5_flow_attributes(dev->data->dev_private, attributes, flow);
- if (ret < 0)
- return ret;
- ret = mlx5_flow_actions(dev, actions, &local_flow, 0, error);
- if (ret < 0)
- return ret;
- if (local_flow.rss.types) {
- unsigned int graph_root;
-
- graph_root = mlx5_find_graph_root(pattern,
- local_flow.rss.level);
- ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
- pattern, local_flow.rss.types,
- mlx5_support_expansion,
- graph_root);
- assert(ret > 0 &&
- (unsigned int)ret < sizeof(expand_buffer.buffer));
- } else {
- buf->entries = 1;
- buf->entry[0].pattern = (void *)(uintptr_t)pattern;
- }
- size += RTE_ALIGN_CEIL(local_flow.rss.queue_num * sizeof(uint16_t),
- sizeof(void *));
- if (size <= flow_size)
- flow->queue = (void *)(flow + 1);
- LIST_INIT(&flow->verbs);
- flow->layers = 0;
- flow->modifier = 0;
- flow->fate = 0;
- for (i = 0; i != buf->entries; ++i) {
- size_t off = size;
- size_t off2;
-
- flow->layers = original_layers;
- size += sizeof(struct ibv_flow_attr) +
- sizeof(struct mlx5_flow_verbs);
- off2 = size;
- if (size < flow_size) {
- flow->cur_verbs = (void *)((uintptr_t)flow + off);
- flow->cur_verbs->attr = (void *)(flow->cur_verbs + 1);
- flow->cur_verbs->specs =
- (void *)(flow->cur_verbs->attr + 1);
- }
- /* First iteration convert the pattern into Verbs. */
- if (i == 0) {
- /* Actions don't need to be converted several time. */
- ret = mlx5_flow_actions(dev, actions, flow,
- (size < flow_size) ?
- flow_size - size : 0,
- error);
- if (ret < 0)
- return ret;
- size += ret;
- } else {
- /*
- * Next iteration means the pattern has already been
- * converted and an expansion is necessary to match
- * the user RSS request. For that only the expanded
- * items will be converted, the common part with the
- * user pattern are just copied into the next buffer
- * zone.
- */
- size += original_verbs_size;
- if (size < flow_size) {
- rte_memcpy(flow->cur_verbs->attr,
- original_verbs->attr,
- original_verbs_size +
- sizeof(struct ibv_flow_attr));
- flow->cur_verbs->size = original_verbs_size;
- }
- }
- ret = mlx5_flow_items
- ((const struct rte_flow_item *)
- &buf->entry[i].pattern[expanded_pattern_idx],
- flow,
- (size < flow_size) ? flow_size - size : 0, error);
- if (ret < 0)
- return ret;
- size += ret;
- if (size <= flow_size) {
- mlx5_flow_adjust_priority(dev, flow);
- LIST_INSERT_HEAD(&flow->verbs, flow->cur_verbs, next);
- }
- /*
- * Keep a pointer of the first verbs conversion and the layers
- * it has encountered.
- */
- if (i == 0) {
- original_verbs = flow->cur_verbs;
- original_verbs_size = size - off2;
- original_layers = flow->layers;
- /*
- * move the index of the expanded pattern to the
- * first item not addressed yet.
- */
- if (pattern->type == RTE_FLOW_ITEM_TYPE_END) {
- expanded_pattern_idx++;
- } else {
- const struct rte_flow_item *item = pattern;
-
- for (item = pattern;
- item->type != RTE_FLOW_ITEM_TYPE_END;
- ++item)
- expanded_pattern_idx++;
- }
- }
- }
- /* Restore the origin layers in the flow. */
- flow->layers = original_layers;
- return size;
-}
-
-/**
- * Lookup and set the ptype in the data Rx part. A single Ptype can be used,
- * if several tunnel rules are used on this queue, the tunnel ptype will be
- * cleared.
- *
- * @param rxq_ctrl
- * Rx queue to update.
- */
-static void
-mlx5_flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
-{
- unsigned int i;
- uint32_t tunnel_ptype = 0;
-
- /* Look up for the ptype to use. */
- for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) {
- if (!rxq_ctrl->flow_tunnels_n[i])
- continue;
- if (!tunnel_ptype) {
- tunnel_ptype = tunnels_info[i].ptype;
- } else {
- tunnel_ptype = 0;
- break;
- }
- }
- rxq_ctrl->rxq.tunnel = tunnel_ptype;
-}
-
-/**
- * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the flow.
- *
- * @param[in] dev
- * Pointer to Ethernet device.
- * @param[in] flow
- * Pointer to flow structure.
- */
-static void
-mlx5_flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
-{
- struct priv *priv = dev->data->dev_private;
- const int mark = !!(flow->modifier &
- (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK));
- const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
- unsigned int i;
-
- for (i = 0; i != flow->rss.queue_num; ++i) {
- int idx = (*flow->queue)[i];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of((*priv->rxqs)[idx],
- struct mlx5_rxq_ctrl, rxq);
-
- if (mark) {
- rxq_ctrl->rxq.mark = 1;
- rxq_ctrl->flow_mark_n++;
- }
- if (tunnel) {
- unsigned int j;
-
- /* Increase the counter matching the flow. */
- for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
- if ((tunnels_info[j].tunnel & flow->layers) ==
- tunnels_info[j].tunnel) {
- rxq_ctrl->flow_tunnels_n[j]++;
- break;
- }
- }
- mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl);
- }
- }
-}
-
-/**
- * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
- * @p flow if no other flow uses it with the same kind of request.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @param[in] flow
- * Pointer to the flow.
- */
-static void
-mlx5_flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
-{
- struct priv *priv = dev->data->dev_private;
- const int mark = !!(flow->modifier &
- (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK));
- const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
- unsigned int i;
-
- assert(dev->data->dev_started);
- for (i = 0; i != flow->rss.queue_num; ++i) {
- int idx = (*flow->queue)[i];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of((*priv->rxqs)[idx],
- struct mlx5_rxq_ctrl, rxq);
-
- if (mark) {
- rxq_ctrl->flow_mark_n--;
- rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
- }
- if (tunnel) {
- unsigned int j;
-
- /* Decrease the counter matching the flow. */
- for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
- if ((tunnels_info[j].tunnel & flow->layers) ==
- tunnels_info[j].tunnel) {
- rxq_ctrl->flow_tunnels_n[j]--;
- break;
- }
- }
- mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl);
- }
- }
-}
-
-/**
- * Clear the Mark/Flag and Tunnel ptype information in all Rx queues.
- *
- * @param dev
- * Pointer to Ethernet device.
- */
-static void
-mlx5_flow_rxq_flags_clear(struct rte_eth_dev *dev)
-{
- struct priv *priv = dev->data->dev_private;
- unsigned int i;
-
- for (i = 0; i != priv->rxqs_n; ++i) {
- struct mlx5_rxq_ctrl *rxq_ctrl;
- unsigned int j;
-
- if (!(*priv->rxqs)[i])
- continue;
- rxq_ctrl = container_of((*priv->rxqs)[i],
- struct mlx5_rxq_ctrl, rxq);
- rxq_ctrl->flow_mark_n = 0;
- rxq_ctrl->rxq.mark = 0;
- for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
- rxq_ctrl->flow_tunnels_n[j] = 0;
- rxq_ctrl->rxq.tunnel = 0;
- }
-}
-
-/*
- * Validate the flag action.
- *
- * @param[in] action_flags
- * Bit-fields that holds the actions detected until now.
- * @param[out] error
- * Pointer to error structure.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
- */
-static int
+int
mlx5_flow_validate_action_flag(uint64_t action_flags,
struct rte_flow_error *error)
{
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
+int
mlx5_flow_validate_action_mark(const struct rte_flow_action *action,
uint64_t action_flags,
struct rte_flow_error *error)
* @return
* 0 on success, a negative errno value otherwise and rte_ernno is set.
*/
-static int
+int
mlx5_flow_validate_action_drop(uint64_t action_flags,
struct rte_flow_error *error)
{
* @return
* 0 on success, a negative errno value otherwise and rte_ernno is set.
*/
-static int
+int
mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
uint64_t action_flags,
struct rte_eth_dev *dev,
* @return
* 0 on success, a negative errno value otherwise and rte_ernno is set.
*/
-static int
+int
mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
uint64_t action_flags,
struct rte_eth_dev *dev,
* @return
* 0 on success, a negative errno value otherwise and rte_ernno is set.
*/
-static int
+int
mlx5_flow_validate_action_count(struct rte_eth_dev *dev,
struct rte_flow_error *error)
{
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
+int
mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
const struct rte_flow_attr *attributes,
struct rte_flow_error *error)
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
+int
mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
uint64_t item_flags,
struct rte_flow_error *error)
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
+int
mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
int64_t item_flags,
struct rte_flow_error *error)
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
+int
mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
int64_t item_flags,
struct rte_flow_error *error)
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
+int
mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
uint64_t item_flags,
struct rte_flow_error *error)
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
+int
mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
uint64_t item_flags,
uint8_t target_protocol,
const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
int ret;
- if (target_protocol != 0xff && target_protocol != MLX5_IP_PROTOCOL_UDP)
+ if (target_protocol != 0xff && target_protocol != IPPROTO_UDP)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"protocol filtering not compatible"
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
+int
mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
uint64_t item_flags,
uint8_t target_protocol,
const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
int ret;
- if (target_protocol != 0xff && target_protocol != MLX5_IP_PROTOCOL_TCP)
+ if (target_protocol != 0xff && target_protocol != IPPROTO_TCP)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"protocol filtering not compatible"
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
+int
mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
uint64_t item_flags,
struct rte_flow_error *error)
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
+int
mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
uint64_t item_flags,
struct rte_eth_dev *dev,
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
+int
mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
uint64_t item_flags,
uint8_t target_protocol,
const struct rte_flow_item_gre *mask = item->mask;
int ret;
- if (target_protocol != 0xff && target_protocol != MLX5_IP_PROTOCOL_GRE)
+ if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"protocol filtering not compatible"
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
+int
mlx5_flow_validate_item_mpls(const struct rte_flow_item *item __rte_unused,
uint64_t item_flags __rte_unused,
uint8_t target_protocol __rte_unused,
const struct rte_flow_item_mpls *mask = item->mask;
int ret;
- if (target_protocol != 0xff && target_protocol != MLX5_IP_PROTOCOL_MPLS)
+ if (target_protocol != 0xff && target_protocol != IPPROTO_MPLS)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"protocol filtering not compatible"
" update.");
}
-/**
- * Internal validation function.
- *
- * @param[in] dev
- * Pointer to the Ethernet device structure.
- * @param[in] attr
- * Pointer to the flow attributes.
- * @param[in] items
- * Pointer to the list of items.
- * @param[in] actions
- * Pointer to the list of actions.
- * @param[out] error
- * Pointer to the error structure.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_ernno is set.
- */
-static int mlx5_flow_verbs_validate(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
-{
- int ret;
- uint32_t action_flags = 0;
- uint32_t item_flags = 0;
- int tunnel = 0;
- uint8_t next_protocol = 0xff;
-
- if (items == NULL)
- return -1;
- ret = mlx5_flow_validate_attributes(dev, attr, error);
- if (ret < 0)
- return ret;
- for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
- int ret = 0;
- switch (items->type) {
- case RTE_FLOW_ITEM_TYPE_VOID:
- break;
- case RTE_FLOW_ITEM_TYPE_ETH:
- ret = mlx5_flow_validate_item_eth(items, item_flags,
- error);
- if (ret < 0)
- return ret;
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
- MLX5_FLOW_LAYER_OUTER_L2;
- break;
- case RTE_FLOW_ITEM_TYPE_VLAN:
- ret = mlx5_flow_validate_item_vlan(items, item_flags,
- error);
- if (ret < 0)
- return ret;
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
- MLX5_FLOW_LAYER_OUTER_VLAN;
- break;
- case RTE_FLOW_ITEM_TYPE_IPV4:
- ret = mlx5_flow_validate_item_ipv4(items, item_flags,
- error);
- if (ret < 0)
- return ret;
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
- MLX5_FLOW_LAYER_OUTER_L3_IPV4;
- if (items->mask != NULL &&
- ((const struct rte_flow_item_ipv4 *)
- items->mask)->hdr.next_proto_id)
- next_protocol =
- ((const struct rte_flow_item_ipv4 *)
- (items->spec))->hdr.next_proto_id;
- break;
- case RTE_FLOW_ITEM_TYPE_IPV6:
- ret = mlx5_flow_validate_item_ipv6(items, item_flags,
- error);
- if (ret < 0)
- return ret;
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
- MLX5_FLOW_LAYER_OUTER_L3_IPV6;
- if (items->mask != NULL &&
- ((const struct rte_flow_item_ipv6 *)
- items->mask)->hdr.proto)
- next_protocol =
- ((const struct rte_flow_item_ipv6 *)
- items->spec)->hdr.proto;
- break;
- case RTE_FLOW_ITEM_TYPE_UDP:
- ret = mlx5_flow_validate_item_udp(items, item_flags,
- next_protocol,
- error);
- if (ret < 0)
- return ret;
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
- MLX5_FLOW_LAYER_OUTER_L4_UDP;
- break;
- case RTE_FLOW_ITEM_TYPE_TCP:
- ret = mlx5_flow_validate_item_tcp(items, item_flags,
- next_protocol, error);
- if (ret < 0)
- return ret;
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
- MLX5_FLOW_LAYER_OUTER_L4_TCP;
- break;
- case RTE_FLOW_ITEM_TYPE_VXLAN:
- ret = mlx5_flow_validate_item_vxlan(items, item_flags,
- error);
- if (ret < 0)
- return ret;
- item_flags |= MLX5_FLOW_LAYER_VXLAN;
- break;
- case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
- ret = mlx5_flow_validate_item_vxlan_gpe(items,
- item_flags,
- dev, error);
- if (ret < 0)
- return ret;
- item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
- break;
- case RTE_FLOW_ITEM_TYPE_GRE:
- ret = mlx5_flow_validate_item_gre(items, item_flags,
- next_protocol, error);
- if (ret < 0)
- return ret;
- item_flags |= MLX5_FLOW_LAYER_GRE;
- break;
- case RTE_FLOW_ITEM_TYPE_MPLS:
- ret = mlx5_flow_validate_item_mpls(items, item_flags,
- next_protocol,
- error);
- if (ret < 0)
- return ret;
- if (next_protocol != 0xff &&
- next_protocol != MLX5_IP_PROTOCOL_MPLS)
- return rte_flow_error_set
- (error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM, items,
- "protocol filtering not compatible"
- " with MPLS layer");
- item_flags |= MLX5_FLOW_LAYER_MPLS;
- break;
- default:
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- NULL,
- "item not supported");
- }
- }
- for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
- tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
- switch (actions->type) {
- case RTE_FLOW_ACTION_TYPE_VOID:
- break;
- case RTE_FLOW_ACTION_TYPE_FLAG:
- ret = mlx5_flow_validate_action_flag(action_flags,
- error);
- if (ret < 0)
- return ret;
- action_flags |= MLX5_FLOW_ACTION_FLAG;
- break;
- case RTE_FLOW_ACTION_TYPE_MARK:
- ret = mlx5_flow_validate_action_mark(actions,
- action_flags,
- error);
- if (ret < 0)
- return ret;
- action_flags |= MLX5_FLOW_ACTION_MARK;
- break;
- case RTE_FLOW_ACTION_TYPE_DROP:
- ret = mlx5_flow_validate_action_drop(action_flags,
- error);
- if (ret < 0)
- return ret;
- action_flags |= MLX5_FLOW_ACTION_DROP;
- break;
- case RTE_FLOW_ACTION_TYPE_QUEUE:
- ret = mlx5_flow_validate_action_queue(actions,
- action_flags, dev,
- error);
- if (ret < 0)
- return ret;
- action_flags |= MLX5_FLOW_ACTION_QUEUE;
- break;
- case RTE_FLOW_ACTION_TYPE_RSS:
- ret = mlx5_flow_validate_action_rss(actions,
- action_flags, dev,
- error);
- if (ret < 0)
- return ret;
- action_flags |= MLX5_FLOW_ACTION_RSS;
- break;
- case RTE_FLOW_ACTION_TYPE_COUNT:
- ret = mlx5_flow_validate_action_count(dev, error);
- if (ret < 0)
- return ret;
- action_flags |= MLX5_FLOW_ACTION_COUNT;
- break;
- default:
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions,
- "action not supported");
- }
- }
- return 0;
-}
-
/**
* Validate a flow supported by the NIC.
*
{
int ret;
- ret = mlx5_flow_verbs_validate(dev, attr, items, actions, error);
+ ret = nic_ops.validate(dev, attr, items, actions, error);
if (ret < 0)
return ret;
return 0;
}
-/**
- * Calculate the required bytes that are needed for the action part of the verbs
- * flow, in addtion returns bit-fields with all the detected action, in order to
- * avoid another interation over the actions.
- *
- * @param[in] actions
- * Pointer to the list of actions.
- * @param[out] action_flags
- * Pointer to the detected actions.
- *
- * @return
- * The size of the memory needed for all actions.
- */
-static int
-mlx5_flow_verbs_get_actions_and_size(const struct rte_flow_action actions[],
- uint64_t *action_flags)
-{
- int size = 0;
- uint64_t detected_actions = 0;
-
- for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
- switch (actions->type) {
- case RTE_FLOW_ACTION_TYPE_VOID:
- break;
- case RTE_FLOW_ACTION_TYPE_FLAG:
- size += sizeof(struct ibv_flow_spec_action_tag);
- detected_actions |= MLX5_FLOW_ACTION_FLAG;
- break;
- case RTE_FLOW_ACTION_TYPE_MARK:
- size += sizeof(struct ibv_flow_spec_action_tag);
- detected_actions |= MLX5_FLOW_ACTION_MARK;
- break;
- case RTE_FLOW_ACTION_TYPE_DROP:
- size += sizeof(struct ibv_flow_spec_action_drop);
- detected_actions |= MLX5_FLOW_ACTION_DROP;
- break;
- case RTE_FLOW_ACTION_TYPE_QUEUE:
- detected_actions |= MLX5_FLOW_ACTION_QUEUE;
- break;
- case RTE_FLOW_ACTION_TYPE_RSS:
- detected_actions |= MLX5_FLOW_ACTION_RSS;
- break;
- case RTE_FLOW_ACTION_TYPE_COUNT:
-#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
- size += sizeof(struct ibv_flow_spec_counter_action);
-#endif
- detected_actions |= MLX5_FLOW_ACTION_COUNT;
- break;
- default:
- break;
- }
- }
- *action_flags = detected_actions;
- return size;
-}
-
-/**
- * Calculate the required bytes that are needed for the item part of the verbs
- * flow, in addtion returns bit-fields with all the detected action, in order to
- * avoid another interation over the actions.
- *
- * @param[in] actions
- * Pointer to the list of items.
- * @param[in, out] item_flags
- * Pointer to the detected items.
- *
- * @return
- * The size of the memory needed for all items.
- */
-static int
-mlx5_flow_verbs_get_items_and_size(const struct rte_flow_item items[],
- uint64_t *item_flags)
-{
- int size = 0;
- uint64_t detected_items = 0;
- const int tunnel = !!(*item_flags & MLX5_FLOW_LAYER_TUNNEL);
-
- for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
- switch (items->type) {
- case RTE_FLOW_ITEM_TYPE_VOID:
- break;
- case RTE_FLOW_ITEM_TYPE_ETH:
- size += sizeof(struct ibv_flow_spec_eth);
- detected_items |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
- MLX5_FLOW_LAYER_OUTER_L2;
- break;
- case RTE_FLOW_ITEM_TYPE_VLAN:
- size += sizeof(struct ibv_flow_spec_eth);
- detected_items |= tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
- MLX5_FLOW_LAYER_OUTER_VLAN;
- break;
- case RTE_FLOW_ITEM_TYPE_IPV4:
- size += sizeof(struct ibv_flow_spec_ipv4_ext);
- detected_items |= tunnel ?
- MLX5_FLOW_LAYER_INNER_L3_IPV4 :
- MLX5_FLOW_LAYER_OUTER_L3_IPV4;
- break;
- case RTE_FLOW_ITEM_TYPE_IPV6:
- size += sizeof(struct ibv_flow_spec_ipv6);
- detected_items |= tunnel ?
- MLX5_FLOW_LAYER_INNER_L3_IPV6 :
- MLX5_FLOW_LAYER_OUTER_L3_IPV6;
- break;
- case RTE_FLOW_ITEM_TYPE_UDP:
- size += sizeof(struct ibv_flow_spec_tcp_udp);
- detected_items |= tunnel ?
- MLX5_FLOW_LAYER_INNER_L4_UDP :
- MLX5_FLOW_LAYER_OUTER_L4_UDP;
- break;
- case RTE_FLOW_ITEM_TYPE_TCP:
- size += sizeof(struct ibv_flow_spec_tcp_udp);
- detected_items |= tunnel ?
- MLX5_FLOW_LAYER_INNER_L4_TCP :
- MLX5_FLOW_LAYER_OUTER_L4_TCP;
- break;
- case RTE_FLOW_ITEM_TYPE_VXLAN:
- size += sizeof(struct ibv_flow_spec_tunnel);
- detected_items |= MLX5_FLOW_LAYER_VXLAN;
- break;
- case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
- size += sizeof(struct ibv_flow_spec_tunnel);
- detected_items |= MLX5_FLOW_LAYER_VXLAN_GPE;
- break;
- case RTE_FLOW_ITEM_TYPE_GRE:
-#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
- size += sizeof(struct ibv_flow_spec_gre);
- detected_items |= MLX5_FLOW_LAYER_GRE;
-#else
- size += sizeof(struct ibv_flow_spec_tunnel);
- detected_items |= MLX5_FLOW_LAYER_TUNNEL;
-#endif
- break;
- case RTE_FLOW_ITEM_TYPE_MPLS:
-#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
- size += sizeof(struct ibv_flow_spec_mpls);
- detected_items |= MLX5_FLOW_LAYER_MPLS;
-#endif
- break;
- default:
- break;
- }
- }
- *item_flags = detected_items;
- return size;
-}
-
/**
* Get RSS action from the action list.
*
return NULL;
}
-/**
- * Internal preparation function. Allocate mlx5_flow with the required size.
- * The required size is calculate based on the actions and items. This function
- * also returns the detected actions and items for later use.
- *
- * @param[in] attr
- * Pointer to the flow attributes.
- * @param[in] items
- * Pointer to the list of items.
- * @param[in] actions
- * Pointer to the list of actions.
- * @param[out] item_flags
- * Pointer to bit mask of all items detected.
- * @param[out] action_flags
- * Pointer to bit mask of all actions detected.
- * @param[out] error
- * Pointer to the error structure.
- *
- * @return
- * Pointer to mlx5_flow object on success, otherwise NULL and rte_errno
- * is set.
- */
-static struct mlx5_flow *
-mlx5_flow_verbs_prepare(const struct rte_flow_attr *attr __rte_unused,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
- uint64_t *item_flags,
- uint64_t *action_flags,
- struct rte_flow_error *error)
-{
- uint32_t size = sizeof(struct ibv_flow_attr);
- struct mlx5_flow *flow;
-
- size += mlx5_flow_verbs_get_actions_and_size(actions, action_flags);
- size += mlx5_flow_verbs_get_items_and_size(items, item_flags);
- flow = rte_calloc(__func__, 1, size, 0);
- if (!flow) {
- rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "not enough memory to create flow");
- return NULL;
- }
- return flow;
-}
-
-/**
- * Remove the flow.
- *
- * @param[in] dev
- * Pointer to the Ethernet device structure.
- * @param[in, out] flow
- * Pointer to flow structure.
- */
-static void
-mlx5_flow_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
-{
- struct priv *priv = dev->data->dev_private;
- struct mlx5_flow_verbs *verbs;
-
- if (flow->nl_flow && priv->mnl_socket)
- mlx5_nl_flow_destroy(priv->mnl_socket, flow->nl_flow, NULL);
- LIST_FOREACH(verbs, &flow->verbs, next) {
- if (verbs->flow) {
- claim_zero(mlx5_glue->destroy_flow(verbs->flow));
- verbs->flow = NULL;
- }
- if (verbs->hrxq) {
- if (flow->fate & MLX5_FLOW_FATE_DROP)
- mlx5_hrxq_drop_release(dev);
- else
- mlx5_hrxq_release(dev, verbs->hrxq);
- verbs->hrxq = NULL;
- }
- }
- if (flow->counter) {
- mlx5_flow_counter_release(flow->counter);
- flow->counter = NULL;
- }
-}
-
-/**
- * Apply the flow.
- *
- * @param[in] dev
- * Pointer to the Ethernet device structure.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[out] error
- * Pointer to error structure.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
- */
-static int
-mlx5_flow_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
- struct rte_flow_error *error)
+static unsigned int
+mlx5_find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level)
{
- struct priv *priv = dev->data->dev_private;
- struct mlx5_flow_verbs *verbs;
- int err;
-
- LIST_FOREACH(verbs, &flow->verbs, next) {
- if (flow->fate & MLX5_FLOW_FATE_DROP) {
- verbs->hrxq = mlx5_hrxq_drop_new(dev);
- if (!verbs->hrxq) {
- rte_flow_error_set
- (error, errno,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "cannot get drop hash queue");
- goto error;
- }
- } else {
- struct mlx5_hrxq *hrxq;
+ const struct rte_flow_item *item;
+ unsigned int has_vlan = 0;
- hrxq = mlx5_hrxq_get(dev, flow->key,
- MLX5_RSS_HASH_KEY_LEN,
- verbs->hash_fields,
- (*flow->queue),
- flow->rss.queue_num);
- if (!hrxq)
- hrxq = mlx5_hrxq_new(dev, flow->key,
- MLX5_RSS_HASH_KEY_LEN,
- verbs->hash_fields,
- (*flow->queue),
- flow->rss.queue_num,
- !!(flow->layers &
- MLX5_FLOW_LAYER_TUNNEL));
- if (!hrxq) {
- rte_flow_error_set
- (error, rte_errno,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "cannot get hash queue");
- goto error;
- }
- verbs->hrxq = hrxq;
- }
- verbs->flow =
- mlx5_glue->create_flow(verbs->hrxq->qp, verbs->attr);
- if (!verbs->flow) {
- rte_flow_error_set(error, errno,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "hardware refuses to create flow");
- goto error;
- }
- }
- if (flow->nl_flow &&
- priv->mnl_socket &&
- mlx5_nl_flow_create(priv->mnl_socket, flow->nl_flow, error))
- goto error;
- return 0;
-error:
- err = rte_errno; /* Save rte_errno before cleanup. */
- LIST_FOREACH(verbs, &flow->verbs, next) {
- if (verbs->hrxq) {
- if (flow->fate & MLX5_FLOW_FATE_DROP)
- mlx5_hrxq_drop_release(dev);
- else
- mlx5_hrxq_release(dev, verbs->hrxq);
- verbs->hrxq = NULL;
+ for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ has_vlan = 1;
+ break;
}
}
- rte_errno = err; /* Restore rte_errno. */
- return -rte_errno;
+ if (has_vlan)
+ return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN :
+ MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN;
+ return rss_level < 2 ? MLX5_EXPANSION_ROOT :
+ MLX5_EXPANSION_ROOT_OUTER;
}
/**
{
struct rte_flow *flow = NULL;
struct mlx5_flow *dev_flow;
- size_t size = 0;
uint64_t action_flags = 0;
uint64_t item_flags = 0;
const struct rte_flow_action_rss *rss;
struct rte_flow_expand_rss *buf = &expand_buffer.buf;
int ret;
uint32_t i;
+ uint32_t flow_size;
ret = mlx5_flow_validate(dev, attr, items, actions, error);
if (ret < 0)
return NULL;
- flow = rte_calloc(__func__, 1, sizeof(*flow), 0);
- LIST_INIT(&flow->dev_flows);
+ flow_size = sizeof(struct rte_flow);
rss = mlx5_flow_get_rss_action(actions);
+ if (rss)
+ flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t),
+ sizeof(void *));
+ else
+ flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *));
+ flow = rte_calloc(__func__, 1, flow_size, 0);
+ flow->queue = (void *)(flow + 1);
+ LIST_INIT(&flow->dev_flows);
if (rss && rss->types) {
unsigned int graph_root;
buf->entry[0].pattern = (void *)(uintptr_t)items;
}
for (i = 0; i < buf->entries; ++i) {
- dev_flow = mlx5_flow_verbs_prepare(attr, buf->entry[i].pattern,
- actions, &item_flags,
- &action_flags, error);
+ dev_flow = nic_ops.prepare(attr, buf->entry[i].pattern,
+ actions, &item_flags,
+ &action_flags, error);
+ if (!dev_flow)
+ goto error;
dev_flow->flow = flow;
LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
+ ret = nic_ops.translate(dev, dev_flow, attr,
+ buf->entry[i].pattern,
+ actions, error);
+ if (ret < 0)
+ goto error;
}
- ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error);
- if (ret < 0)
- return NULL;
- size = ret;
- flow = rte_calloc(__func__, 1, size, 0);
- if (!flow) {
- rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "not enough memory to create flow");
- return NULL;
- }
- ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error);
- if (ret < 0) {
- rte_free(flow);
- return NULL;
- }
- assert((size_t)ret == size);
if (dev->data->dev_started) {
- ret = mlx5_flow_apply(dev, flow, error);
- if (ret < 0) {
- ret = rte_errno; /* Save rte_errno before cleanup. */
- if (flow) {
- mlx5_flow_remove(dev, flow);
- rte_free(flow);
- }
- rte_errno = ret; /* Restore rte_errno. */
- return NULL;
- }
+ ret = nic_ops.apply(dev, flow, error);
+ if (ret < 0)
+ goto error;
}
TAILQ_INSERT_TAIL(list, flow, next);
mlx5_flow_rxq_flags_set(dev, flow);
return flow;
+error:
+ ret = rte_errno; /* Save rte_errno before cleanup. */
+ assert(flow);
+ nic_ops.destroy(dev, flow);
+ rte_free(flow);
+ rte_errno = ret; /* Restore rte_errno. */
+ return NULL;
}
/**
mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
struct rte_flow *flow)
{
- mlx5_flow_remove(dev, flow);
+ nic_ops.destroy(dev, flow);
TAILQ_REMOVE(list, flow, next);
/*
* Update RX queue flags only if port is started, otherwise it is
struct rte_flow *flow;
TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next)
- mlx5_flow_remove(dev, flow);
+ nic_ops.remove(dev, flow);
mlx5_flow_rxq_flags_clear(dev);
}
int ret = 0;
TAILQ_FOREACH(flow, list, next) {
- ret = mlx5_flow_apply(dev, flow, &error);
+ ret = nic_ops.apply(dev, flow, &error);
if (ret < 0)
goto error;
mlx5_flow_rxq_flags_set(dev, flow);
struct rte_flow_error *error)
{
#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
- if (flow->modifier & MLX5_FLOW_MOD_COUNT) {
+ if (flow->actions & MLX5_FLOW_ACTION_COUNT) {
struct rte_flow_query_count *qc = data;
uint64_t counters[2] = {0, 0};
struct ibv_query_counter_set_attr query_cs_attr = {
}
return 0;
}
+
+/**
+ * Init the driver ops structure.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+void
+mlx5_flow_init_driver_ops(struct rte_eth_dev *dev __rte_unused)
+{
+ mlx5_flow_verbs_get_driver_ops(&nic_ops);
+}
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 Mellanox Technologies, Ltd
+ */
+
+#include <netinet/in.h>
+#include <sys/queue.h>
+#include <stdalign.h>
+#include <stdint.h>
+#include <string.h>
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_common.h>
+#include <rte_ether.h>
+#include <rte_eth_ctrl.h>
+#include <rte_ethdev_driver.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+#include <rte_malloc.h>
+#include <rte_ip.h>
+
+#include "mlx5.h"
+#include "mlx5_defs.h"
+#include "mlx5_prm.h"
+#include "mlx5_glue.h"
+#include "mlx5_flow.h"
+
+/**
+ * Get a flow counter.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] shared
+ * Indicate if this counter is shared with other flows.
+ * @param[in] id
+ * Counter identifier.
+ *
+ * @return
+ * A pointer to the counter, NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_flow_counter *
+flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_flow_counter *cnt;
+
+ LIST_FOREACH(cnt, &priv->flow_counters, next) {
+ if (!cnt->shared || cnt->shared != shared)
+ continue;
+ if (cnt->id != id)
+ continue;
+ cnt->ref_cnt++;
+ return cnt;
+ }
+#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+
+ struct mlx5_flow_counter tmpl = {
+ .shared = shared,
+ .id = id,
+ .cs = mlx5_glue->create_counter_set
+ (priv->ctx,
+ &(struct ibv_counter_set_init_attr){
+ .counter_set_id = id,
+ }),
+ .hits = 0,
+ .bytes = 0,
+ };
+
+ if (!tmpl.cs) {
+ rte_errno = errno;
+ return NULL;
+ }
+ cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
+ if (!cnt) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ *cnt = tmpl;
+ LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
+ return cnt;
+#endif
+ rte_errno = ENOTSUP;
+ return NULL;
+}
+
+/**
+ * Release a flow counter.
+ *
+ * @param[in] counter
+ * Pointer to the counter handler.
+ */
+static void
+flow_verbs_counter_release(struct mlx5_flow_counter *counter)
+{
+ if (--counter->ref_cnt == 0) {
+ claim_zero(mlx5_glue->destroy_counter_set(counter->cs));
+ LIST_REMOVE(counter, next);
+ rte_free(counter);
+ }
+}
+
+/**
+ * Add a verbs item specification into @p flow.
+ *
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] src
+ * Create specification.
+ * @param[in] size
+ * Size in bytes of the specification to copy.
+ */
+static void
+flow_verbs_spec_add(struct mlx5_flow *flow, void *src, unsigned int size)
+{
+ struct mlx5_flow_verbs *verbs = &flow->verbs;
+
+ if (verbs->specs) {
+ void *dst;
+
+ dst = (void *)(verbs->specs + verbs->size);
+ memcpy(dst, src, size);
+ ++verbs->attr->num_of_specs;
+ }
+ verbs->size += size;
+}
+
+/**
+ * Adjust verbs hash fields according to the @p flow information.
+ *
+ * @param[in] dev_flow.
+ * Pointer to dev flow structure.
+ * @param[in] tunnel
+ * 1 when the hash field is for a tunnel item.
+ * @param[in] layer_types
+ * ETH_RSS_* types.
+ * @param[in] hash_fields
+ * Item hash fields.
+ */
+static void
+flow_verbs_hashfields_adjust(struct mlx5_flow *dev_flow,
+ int tunnel __rte_unused,
+ uint32_t layer_types, uint64_t hash_fields)
+{
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ int rss_request_inner = dev_flow->flow->rss.level >= 2;
+
+ hash_fields |= (tunnel ? IBV_RX_HASH_INNER : 0);
+ if (rss_request_inner && !tunnel)
+ hash_fields = 0;
+ else if (rss_request_inner < 2 && tunnel)
+ hash_fields = 0;
+#endif
+ if (!(dev_flow->flow->rss.types & layer_types))
+ hash_fields = 0;
+ dev_flow->verbs.hash_fields |= hash_fields;
+}
+
+/**
+ * Convert the @p item into a Verbs specification. This function assumes that
+ * the input is valid and that there is space to insert the requested item
+ * into the flow.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in] item_flags
+ * Bit field with all detected items.
+ * @param[in, out] dev_flow
+ * Pointer to dev_flow structure.
+ */
+static void
+flow_verbs_translate_item_eth(const struct rte_flow_item *item,
+ uint64_t *item_flags,
+ struct mlx5_flow *dev_flow)
+{
+ const struct rte_flow_item_eth *spec = item->spec;
+ const struct rte_flow_item_eth *mask = item->mask;
+ const int tunnel = !!(*item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ const unsigned int size = sizeof(struct ibv_flow_spec_eth);
+ struct ibv_flow_spec_eth eth = {
+ .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
+ .size = size,
+ };
+
+ if (!mask)
+ mask = &rte_flow_item_eth_mask;
+ if (spec) {
+ unsigned int i;
+
+ memcpy(ð.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
+ memcpy(ð.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
+ eth.val.ether_type = spec->type;
+ memcpy(ð.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
+ memcpy(ð.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
+ eth.mask.ether_type = mask->type;
+ /* Remove unwanted bits from values. */
+ for (i = 0; i < ETHER_ADDR_LEN; ++i) {
+ eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
+ eth.val.src_mac[i] &= eth.mask.src_mac[i];
+ }
+ eth.val.ether_type &= eth.mask.ether_type;
+ dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L2;
+ }
+ flow_verbs_spec_add(dev_flow, ð, size);
+ *item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2;
+}
+
+/**
+ * Update the VLAN tag in the Verbs Ethernet specification.
+ * This function assumes that the input is valid and there is space to add
+ * the requested item.
+ *
+ * @param[in, out] attr
+ * Pointer to Verbs attributes structure.
+ * @param[in] eth
+ * Verbs structure containing the VLAN information to copy.
+ */
+static void
+flow_verbs_item_vlan_update(struct ibv_flow_attr *attr,
+ struct ibv_flow_spec_eth *eth)
+{
+ unsigned int i;
+ const enum ibv_flow_spec_type search = eth->type;
+ struct ibv_spec_header *hdr = (struct ibv_spec_header *)
+ ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
+
+ for (i = 0; i != attr->num_of_specs; ++i) {
+ if (hdr->type == search) {
+ struct ibv_flow_spec_eth *e =
+ (struct ibv_flow_spec_eth *)hdr;
+
+ e->val.vlan_tag = eth->val.vlan_tag;
+ e->mask.vlan_tag = eth->mask.vlan_tag;
+ e->val.ether_type = eth->val.ether_type;
+ e->mask.ether_type = eth->mask.ether_type;
+ break;
+ }
+ hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
+ }
+}
+
+/**
+ * Convert the @p item into a Verbs specification. This function assumes that
+ * the input is valid and that there is space to insert the requested item
+ * into the flow.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] item_flags
+ * Bit mask that holds all detected items.
+ * @param[in, out] dev_flow
+ * Pointer to dev_flow structure.
+ */
+static void
+flow_verbs_translate_item_vlan(const struct rte_flow_item *item,
+ uint64_t *item_flags,
+ struct mlx5_flow *dev_flow)
+{
+ const struct rte_flow_item_vlan *spec = item->spec;
+ const struct rte_flow_item_vlan *mask = item->mask;
+ unsigned int size = sizeof(struct ibv_flow_spec_eth);
+ const int tunnel = !!(*item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ struct ibv_flow_spec_eth eth = {
+ .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
+ .size = size,
+ };
+ const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2;
+
+ if (!mask)
+ mask = &rte_flow_item_vlan_mask;
+ if (spec) {
+ eth.val.vlan_tag = spec->tci;
+ eth.mask.vlan_tag = mask->tci;
+ eth.val.vlan_tag &= eth.mask.vlan_tag;
+ eth.val.ether_type = spec->inner_type;
+ eth.mask.ether_type = mask->inner_type;
+ eth.val.ether_type &= eth.mask.ether_type;
+ }
+ if (!(*item_flags & l2m)) {
+ dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L2;
+ flow_verbs_spec_add(dev_flow, ð, size);
+ } else {
+ flow_verbs_item_vlan_update(dev_flow->verbs.attr, ð);
+ size = 0; /* Only an update is done in eth specification. */
+ }
+ *item_flags |= tunnel ?
+ (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_VLAN) :
+ (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_VLAN);
+}
+
+/**
+ * Convert the @p item into a Verbs specification. This function assumes that
+ * the input is valid and that there is space to insert the requested item
+ * into the flow.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] item_flags
+ * Bit mask that marks all detected items.
+ * @param[in, out] dev_flow
+ * Pointer to sepacific flow structure.
+ */
+static void
+flow_verbs_translate_item_ipv4(const struct rte_flow_item *item,
+ uint64_t *item_flags,
+ struct mlx5_flow *dev_flow)
+{
+ const struct rte_flow_item_ipv4 *spec = item->spec;
+ const struct rte_flow_item_ipv4 *mask = item->mask;
+ const int tunnel = !!(*item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
+ struct ibv_flow_spec_ipv4_ext ipv4 = {
+ .type = IBV_FLOW_SPEC_IPV4_EXT |
+ (tunnel ? IBV_FLOW_SPEC_INNER : 0),
+ .size = size,
+ };
+
+ if (!mask)
+ mask = &rte_flow_item_ipv4_mask;
+ *item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+ if (spec) {
+ ipv4.val = (struct ibv_flow_ipv4_ext_filter){
+ .src_ip = spec->hdr.src_addr,
+ .dst_ip = spec->hdr.dst_addr,
+ .proto = spec->hdr.next_proto_id,
+ .tos = spec->hdr.type_of_service,
+ };
+ ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
+ .src_ip = mask->hdr.src_addr,
+ .dst_ip = mask->hdr.dst_addr,
+ .proto = mask->hdr.next_proto_id,
+ .tos = mask->hdr.type_of_service,
+ };
+ /* Remove unwanted bits from values. */
+ ipv4.val.src_ip &= ipv4.mask.src_ip;
+ ipv4.val.dst_ip &= ipv4.mask.dst_ip;
+ ipv4.val.proto &= ipv4.mask.proto;
+ ipv4.val.tos &= ipv4.mask.tos;
+ }
+ flow_verbs_hashfields_adjust(dev_flow, tunnel,
+ (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
+ ETH_RSS_NONFRAG_IPV4_TCP |
+ ETH_RSS_NONFRAG_IPV4_UDP |
+ ETH_RSS_NONFRAG_IPV4_OTHER),
+ (IBV_RX_HASH_SRC_IPV4 |
+ IBV_RX_HASH_DST_IPV4));
+ dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L3;
+ flow_verbs_spec_add(dev_flow, &ipv4, size);
+}
+
+/**
+ * Convert the @p item into a Verbs specification. This function assumes that
+ * the input is valid and that there is space to insert the requested item
+ * into the flow.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] item_flags
+ * Bit mask that marks all detected items.
+ * @param[in, out] dev_flow
+ * Pointer to sepacific flow structure.
+ */
+static void
+flow_verbs_translate_item_ipv6(const struct rte_flow_item *item,
+ uint64_t *item_flags,
+ struct mlx5_flow *dev_flow)
+{
+ const struct rte_flow_item_ipv6 *spec = item->spec;
+ const struct rte_flow_item_ipv6 *mask = item->mask;
+ const int tunnel = !!(dev_flow->flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
+ struct ibv_flow_spec_ipv6 ipv6 = {
+ .type = IBV_FLOW_SPEC_IPV6 | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
+ .size = size,
+ };
+
+ if (!mask)
+ mask = &rte_flow_item_ipv6_mask;
+ *item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+ if (spec) {
+ unsigned int i;
+ uint32_t vtc_flow_val;
+ uint32_t vtc_flow_mask;
+
+ memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
+ RTE_DIM(ipv6.val.src_ip));
+ memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
+ RTE_DIM(ipv6.val.dst_ip));
+ memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
+ RTE_DIM(ipv6.mask.src_ip));
+ memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
+ RTE_DIM(ipv6.mask.dst_ip));
+ vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
+ vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
+ ipv6.val.flow_label =
+ rte_cpu_to_be_32((vtc_flow_val & IPV6_HDR_FL_MASK) >>
+ IPV6_HDR_FL_SHIFT);
+ ipv6.val.traffic_class = (vtc_flow_val & IPV6_HDR_TC_MASK) >>
+ IPV6_HDR_TC_SHIFT;
+ ipv6.val.next_hdr = spec->hdr.proto;
+ ipv6.val.hop_limit = spec->hdr.hop_limits;
+ ipv6.mask.flow_label =
+ rte_cpu_to_be_32((vtc_flow_mask & IPV6_HDR_FL_MASK) >>
+ IPV6_HDR_FL_SHIFT);
+ ipv6.mask.traffic_class = (vtc_flow_mask & IPV6_HDR_TC_MASK) >>
+ IPV6_HDR_TC_SHIFT;
+ ipv6.mask.next_hdr = mask->hdr.proto;
+ ipv6.mask.hop_limit = mask->hdr.hop_limits;
+ /* Remove unwanted bits from values. */
+ for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
+ ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
+ ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
+ }
+ ipv6.val.flow_label &= ipv6.mask.flow_label;
+ ipv6.val.traffic_class &= ipv6.mask.traffic_class;
+ ipv6.val.next_hdr &= ipv6.mask.next_hdr;
+ ipv6.val.hop_limit &= ipv6.mask.hop_limit;
+ }
+ flow_verbs_hashfields_adjust(dev_flow, tunnel,
+ (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
+ ETH_RSS_NONFRAG_IPV6_TCP |
+ ETH_RSS_NONFRAG_IPV6_UDP |
+ ETH_RSS_IPV6_EX |
+ ETH_RSS_IPV6_TCP_EX |
+ ETH_RSS_IPV6_UDP_EX |
+ ETH_RSS_NONFRAG_IPV6_OTHER),
+ (IBV_RX_HASH_SRC_IPV6 |
+ IBV_RX_HASH_DST_IPV6));
+ dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L3;
+ flow_verbs_spec_add(dev_flow, &ipv6, size);
+}
+
+/**
+ * Convert the @p item into a Verbs specification. This function assumes that
+ * the input is valid and that there is space to insert the requested item
+ * into the flow.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] item_flags
+ * Bit mask that marks all detected items.
+ * @param[in, out] dev_flow
+ * Pointer to sepacific flow structure.
+ */
+static void
+flow_verbs_translate_item_udp(const struct rte_flow_item *item,
+ uint64_t *item_flags,
+ struct mlx5_flow *dev_flow)
+{
+ const struct rte_flow_item_udp *spec = item->spec;
+ const struct rte_flow_item_udp *mask = item->mask;
+ const int tunnel = !!(*item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
+ struct ibv_flow_spec_tcp_udp udp = {
+ .type = IBV_FLOW_SPEC_UDP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
+ .size = size,
+ };
+
+ if (!mask)
+ mask = &rte_flow_item_udp_mask;
+ *item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
+ MLX5_FLOW_LAYER_OUTER_L4_UDP;
+ if (spec) {
+ udp.val.dst_port = spec->hdr.dst_port;
+ udp.val.src_port = spec->hdr.src_port;
+ udp.mask.dst_port = mask->hdr.dst_port;
+ udp.mask.src_port = mask->hdr.src_port;
+ /* Remove unwanted bits from values. */
+ udp.val.src_port &= udp.mask.src_port;
+ udp.val.dst_port &= udp.mask.dst_port;
+ }
+ flow_verbs_hashfields_adjust(dev_flow,
+ tunnel, ETH_RSS_UDP,
+ (IBV_RX_HASH_SRC_PORT_UDP |
+ IBV_RX_HASH_DST_PORT_UDP));
+ dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L4;
+ flow_verbs_spec_add(dev_flow, &udp, size);
+}
+
+/**
+ * Convert the @p item into a Verbs specification. This function assumes that
+ * the input is valid and that there is space to insert the requested item
+ * into the flow.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] item_flags
+ * Bit mask that marks all detected items.
+ * @param[in, out] dev_flow
+ * Pointer to sepacific flow structure.
+ */
+static void
+flow_verbs_translate_item_tcp(const struct rte_flow_item *item,
+ uint64_t *item_flags,
+ struct mlx5_flow *dev_flow)
+{
+ const struct rte_flow_item_tcp *spec = item->spec;
+ const struct rte_flow_item_tcp *mask = item->mask;
+ const int tunnel = !!(dev_flow->flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
+ struct ibv_flow_spec_tcp_udp tcp = {
+ .type = IBV_FLOW_SPEC_TCP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
+ .size = size,
+ };
+
+ if (!mask)
+ mask = &rte_flow_item_tcp_mask;
+ *item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
+ MLX5_FLOW_LAYER_OUTER_L4_TCP;
+ if (spec) {
+ tcp.val.dst_port = spec->hdr.dst_port;
+ tcp.val.src_port = spec->hdr.src_port;
+ tcp.mask.dst_port = mask->hdr.dst_port;
+ tcp.mask.src_port = mask->hdr.src_port;
+ /* Remove unwanted bits from values. */
+ tcp.val.src_port &= tcp.mask.src_port;
+ tcp.val.dst_port &= tcp.mask.dst_port;
+ }
+ flow_verbs_hashfields_adjust(dev_flow,
+ tunnel, ETH_RSS_TCP,
+ (IBV_RX_HASH_SRC_PORT_TCP |
+ IBV_RX_HASH_DST_PORT_TCP));
+ dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L4;
+ flow_verbs_spec_add(dev_flow, &tcp, size);
+}
+
+/**
+ * Convert the @p item into a Verbs specification. This function assumes that
+ * the input is valid and that there is space to insert the requested item
+ * into the flow.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] item_flags
+ * Bit mask that marks all detected items.
+ * @param[in, out] dev_flow
+ * Pointer to sepacific flow structure.
+ */
+static void
+flow_verbs_translate_item_vxlan(const struct rte_flow_item *item,
+ uint64_t *item_flags,
+ struct mlx5_flow *dev_flow)
+{
+ const struct rte_flow_item_vxlan *spec = item->spec;
+ const struct rte_flow_item_vxlan *mask = item->mask;
+ unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
+ struct ibv_flow_spec_tunnel vxlan = {
+ .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
+ .size = size,
+ };
+ union vni {
+ uint32_t vlan_id;
+ uint8_t vni[4];
+ } id = { .vlan_id = 0, };
+
+ if (!mask)
+ mask = &rte_flow_item_vxlan_mask;
+ if (spec) {
+ memcpy(&id.vni[1], spec->vni, 3);
+ vxlan.val.tunnel_id = id.vlan_id;
+ memcpy(&id.vni[1], mask->vni, 3);
+ vxlan.mask.tunnel_id = id.vlan_id;
+ /* Remove unwanted bits from values. */
+ vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
+ }
+ flow_verbs_spec_add(dev_flow, &vxlan, size);
+ dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L2;
+ *item_flags |= MLX5_FLOW_LAYER_VXLAN;
+}
+
+/**
+ * Convert the @p item into a Verbs specification. This function assumes that
+ * the input is valid and that there is space to insert the requested item
+ * into the flow.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] item_flags
+ * Bit mask that marks all detected items.
+ * @param[in, out] dev_flow
+ * Pointer to sepacific flow structure.
+ */
+static void
+flow_verbs_translate_item_vxlan_gpe(const struct rte_flow_item *item,
+ uint64_t *item_flags,
+ struct mlx5_flow *dev_flow)
+{
+ const struct rte_flow_item_vxlan_gpe *spec = item->spec;
+ const struct rte_flow_item_vxlan_gpe *mask = item->mask;
+ unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
+ struct ibv_flow_spec_tunnel vxlan_gpe = {
+ .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
+ .size = size,
+ };
+ union vni {
+ uint32_t vlan_id;
+ uint8_t vni[4];
+ } id = { .vlan_id = 0, };
+
+ if (!mask)
+ mask = &rte_flow_item_vxlan_gpe_mask;
+ if (spec) {
+ memcpy(&id.vni[1], spec->vni, 3);
+ vxlan_gpe.val.tunnel_id = id.vlan_id;
+ memcpy(&id.vni[1], mask->vni, 3);
+ vxlan_gpe.mask.tunnel_id = id.vlan_id;
+ /* Remove unwanted bits from values. */
+ vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;
+ }
+ flow_verbs_spec_add(dev_flow, &vxlan_gpe, size);
+ dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L2;
+ *item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
+}
+
+/**
+ * Update the protocol in Verbs IPv4/IPv6 spec.
+ *
+ * @param[in, out] attr
+ * Pointer to Verbs attributes structure.
+ * @param[in] search
+ * Specification type to search in order to update the IP protocol.
+ * @param[in] protocol
+ * Protocol value to set if none is present in the specification.
+ */
+static void
+flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
+ enum ibv_flow_spec_type search,
+ uint8_t protocol)
+{
+ unsigned int i;
+ struct ibv_spec_header *hdr = (struct ibv_spec_header *)
+ ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
+
+ if (!attr)
+ return;
+ for (i = 0; i != attr->num_of_specs; ++i) {
+ if (hdr->type == search) {
+ union {
+ struct ibv_flow_spec_ipv4_ext *ipv4;
+ struct ibv_flow_spec_ipv6 *ipv6;
+ } ip;
+
+ switch (search) {
+ case IBV_FLOW_SPEC_IPV4_EXT:
+ ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr;
+ if (!ip.ipv4->val.proto) {
+ ip.ipv4->val.proto = protocol;
+ ip.ipv4->mask.proto = 0xff;
+ }
+ break;
+ case IBV_FLOW_SPEC_IPV6:
+ ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr;
+ if (!ip.ipv6->val.next_hdr) {
+ ip.ipv6->val.next_hdr = protocol;
+ ip.ipv6->mask.next_hdr = 0xff;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+ hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
+ }
+}
+
+/**
+ * Convert the @p item into a Verbs specification. This function assumes that
+ * the input is valid and that there is space to insert the requested item
+ * into the flow.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] item_flags
+ * Bit mask that marks all detected items.
+ * @param[in, out] dev_flow
+ * Pointer to sepacific flow structure.
+ */
+static void
+flow_verbs_translate_item_gre(const struct rte_flow_item *item __rte_unused,
+ uint64_t *item_flags,
+ struct mlx5_flow *dev_flow)
+{
+ struct mlx5_flow_verbs *verbs = &dev_flow->verbs;
+#ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
+ unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
+ struct ibv_flow_spec_tunnel tunnel = {
+ .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
+ .size = size,
+ };
+#else
+ const struct rte_flow_item_gre *spec = item->spec;
+ const struct rte_flow_item_gre *mask = item->mask;
+ unsigned int size = sizeof(struct ibv_flow_spec_gre);
+ struct ibv_flow_spec_gre tunnel = {
+ .type = IBV_FLOW_SPEC_GRE,
+ .size = size,
+ };
+
+ if (!mask)
+ mask = &rte_flow_item_gre_mask;
+ if (spec) {
+ tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
+ tunnel.val.protocol = spec->protocol;
+ tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
+ tunnel.mask.protocol = mask->protocol;
+ /* Remove unwanted bits from values. */
+ tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
+ tunnel.val.protocol &= tunnel.mask.protocol;
+ tunnel.val.key &= tunnel.mask.key;
+ }
+#endif
+ if (*item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
+ flow_verbs_item_gre_ip_protocol_update(verbs->attr,
+ IBV_FLOW_SPEC_IPV4_EXT,
+ IPPROTO_GRE);
+ else
+ flow_verbs_item_gre_ip_protocol_update(verbs->attr,
+ IBV_FLOW_SPEC_IPV6,
+ IPPROTO_GRE);
+ flow_verbs_spec_add(dev_flow, &tunnel, size);
+ verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
+ *item_flags |= MLX5_FLOW_LAYER_GRE;
+}
+
+/**
+ * Convert the @p action into a Verbs specification. This function assumes that
+ * the input is valid and that there is space to insert the requested action
+ * into the flow. This function also return the action that was added.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] item_flags
+ * Bit mask that marks all detected items.
+ * @param[in, out] dev_flow
+ * Pointer to sepacific flow structure.
+ */
+static void
+flow_verbs_translate_item_mpls(const struct rte_flow_item *item __rte_unused,
+ uint64_t *action_flags __rte_unused,
+ struct mlx5_flow *dev_flow __rte_unused)
+{
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+ const struct rte_flow_item_mpls *spec = item->spec;
+ const struct rte_flow_item_mpls *mask = item->mask;
+ unsigned int size = sizeof(struct ibv_flow_spec_mpls);
+ struct ibv_flow_spec_mpls mpls = {
+ .type = IBV_FLOW_SPEC_MPLS,
+ .size = size,
+ };
+
+ if (!mask)
+ mask = &rte_flow_item_mpls_mask;
+ if (spec) {
+ memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
+ memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
+ /* Remove unwanted bits from values. */
+ mpls.val.label &= mpls.mask.label;
+ }
+ flow_verbs_spec_add(dev_flow, &mpls, size);
+ dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L2;
+ *action_flags |= MLX5_FLOW_LAYER_MPLS;
+#endif
+}
+
+/**
+ * Convert the @p action into a Verbs specification. This function assumes that
+ * the input is valid and that there is space to insert the requested action
+ * into the flow. This function also return the action that was added.
+ *
+ * @param[in, out] action_flags
+ * Pointer to the detected actions.
+ * @param[in] dev_flow
+ * Pointer to mlx5_flow.
+ */
+static void
+flow_verbs_translate_action_drop(uint64_t *action_flags,
+ struct mlx5_flow *dev_flow)
+{
+ unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
+ struct ibv_flow_spec_action_drop drop = {
+ .type = IBV_FLOW_SPEC_ACTION_DROP,
+ .size = size,
+ };
+
+ flow_verbs_spec_add(dev_flow, &drop, size);
+ *action_flags |= MLX5_FLOW_ACTION_DROP;
+}
+
+/**
+ * Convert the @p action into a Verbs specification. This function assumes that
+ * the input is valid and that there is space to insert the requested action
+ * into the flow. This function also return the action that was added.
+ *
+ * @param[in] action
+ * Action configuration.
+ * @param[in, out] action_flags
+ * Pointer to the detected actions.
+ * @param[in] dev_flow
+ * Pointer to mlx5_flow.
+ */
+static void
+flow_verbs_translate_action_queue(const struct rte_flow_action *action,
+ uint64_t *action_flags,
+ struct mlx5_flow *dev_flow)
+{
+ const struct rte_flow_action_queue *queue = action->conf;
+ struct rte_flow *flow = dev_flow->flow;
+
+ if (flow->queue)
+ (*flow->queue)[0] = queue->index;
+ flow->rss.queue_num = 1;
+ *action_flags |= MLX5_FLOW_ACTION_QUEUE;
+}
+
+/**
+ * Convert the @p action into a Verbs specification. This function assumes that
+ * the input is valid and that there is space to insert the requested action
+ * into the flow. This function also return the action that was added.
+ *
+ * @param[in] action
+ * Action configuration.
+ * @param[in, out] action_flags
+ * Pointer to the detected actions.
+ * @param[in] dev_flow
+ * Pointer to mlx5_flow.
+ */
+static void
+flow_verbs_translate_action_rss(const struct rte_flow_action *action,
+ uint64_t *action_flags,
+ struct mlx5_flow *dev_flow)
+{
+ const struct rte_flow_action_rss *rss = action->conf;
+ struct rte_flow *flow = dev_flow->flow;
+
+ if (flow->queue)
+ memcpy((*flow->queue), rss->queue,
+ rss->queue_num * sizeof(uint16_t));
+ flow->rss.queue_num = rss->queue_num;
+ memcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN);
+ flow->rss.types = rss->types;
+ flow->rss.level = rss->level;
+ *action_flags |= MLX5_FLOW_ACTION_RSS;
+}
+
+/**
+ * Convert the @p action into a Verbs specification. This function assumes that
+ * the input is valid and that there is space to insert the requested action
+ * into the flow. This function also return the action that was added.
+ *
+ * @param[in] action
+ * Action configuration.
+ * @param[in, out] action_flags
+ * Pointer to the detected actions.
+ * @param[in] dev_flow
+ * Pointer to mlx5_flow.
+ */
+static void
+flow_verbs_translate_action_flag
+ (const struct rte_flow_action *action __rte_unused,
+ uint64_t *action_flags,
+ struct mlx5_flow *dev_flow)
+{
+ unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
+ struct ibv_flow_spec_action_tag tag = {
+ .type = IBV_FLOW_SPEC_ACTION_TAG,
+ .size = size,
+ .tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),
+ };
+ *action_flags |= MLX5_FLOW_ACTION_MARK;
+ flow_verbs_spec_add(dev_flow, &tag, size);
+}
+
+/**
+ * Update verbs specification to modify the flag to mark.
+ *
+ * @param[in, out] verbs
+ * Pointer to the mlx5_flow_verbs structure.
+ * @param[in] mark_id
+ * Mark identifier to replace the flag.
+ */
+static void
+flow_verbs_mark_update(struct mlx5_flow_verbs *verbs, uint32_t mark_id)
+{
+ struct ibv_spec_header *hdr;
+ int i;
+
+ if (!verbs)
+ return;
+ /* Update Verbs specification. */
+ hdr = (struct ibv_spec_header *)verbs->specs;
+ if (!hdr)
+ return;
+ for (i = 0; i != verbs->attr->num_of_specs; ++i) {
+ if (hdr->type == IBV_FLOW_SPEC_ACTION_TAG) {
+ struct ibv_flow_spec_action_tag *t =
+ (struct ibv_flow_spec_action_tag *)hdr;
+
+ t->tag_id = mlx5_flow_mark_set(mark_id);
+ }
+ hdr = (struct ibv_spec_header *)((uintptr_t)hdr + hdr->size);
+ }
+}
+
+/**
+ * Convert the @p action into a Verbs specification. This function assumes that
+ * the input is valid and that there is space to insert the requested action
+ * into the flow. This function also return the action that was added.
+ *
+ * @param[in] action
+ * Action configuration.
+ * @param[in, out] action_flags
+ * Pointer to the detected actions.
+ * @param[in] dev_flow
+ * Pointer to mlx5_flow.
+ */
+static void
+flow_verbs_translate_action_mark(const struct rte_flow_action *action,
+ uint64_t *action_flags,
+ struct mlx5_flow *dev_flow)
+{
+ const struct rte_flow_action_mark *mark = action->conf;
+ unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
+ struct ibv_flow_spec_action_tag tag = {
+ .type = IBV_FLOW_SPEC_ACTION_TAG,
+ .size = size,
+ };
+ struct mlx5_flow_verbs *verbs = &dev_flow->verbs;
+
+ if (*action_flags & MLX5_FLOW_ACTION_FLAG) {
+ flow_verbs_mark_update(verbs, mark->id);
+ size = 0;
+ } else {
+ tag.tag_id = mlx5_flow_mark_set(mark->id);
+ flow_verbs_spec_add(dev_flow, &tag, size);
+ }
+ *action_flags |= MLX5_FLOW_ACTION_MARK;
+}
+
+/**
+ * Convert the @p action into a Verbs specification. This function assumes that
+ * the input is valid and that there is space to insert the requested action
+ * into the flow. This function also return the action that was added.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] action
+ * Action configuration.
+ * @param[in, out] action_flags
+ * Pointer to the detected actions.
+ * @param[in] dev_flow
+ * Pointer to mlx5_flow.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 On success else a negative errno value is returned and rte_errno is set.
+ */
+static int
+flow_verbs_translate_action_count(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ uint64_t *action_flags,
+ struct mlx5_flow *dev_flow,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_count *count = action->conf;
+ struct rte_flow *flow = dev_flow->flow;
+#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+ unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
+ struct ibv_flow_spec_counter_action counter = {
+ .type = IBV_FLOW_SPEC_ACTION_COUNT,
+ .size = size,
+ };
+#endif
+
+ if (!flow->counter) {
+ flow->counter = flow_verbs_counter_new(dev, count->shared,
+ count->id);
+ if (!flow->counter)
+ return rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "cannot get counter"
+ " context.");
+ }
+ *action_flags |= MLX5_FLOW_ACTION_COUNT;
+#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+ counter.counter_set_handle = flow->counter->cs->handle;
+ flow_verbs_spec_add(dev_flow, &counter, size);
+#endif
+ return 0;
+}
+
+/**
+ * Internal validation function. For validating both actions and items.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] items
+ * Pointer to the list of items.
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_verbs_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ int ret;
+ uint32_t action_flags = 0;
+ uint32_t item_flags = 0;
+ int tunnel = 0;
+ uint8_t next_protocol = 0xff;
+
+ if (items == NULL)
+ return -1;
+ ret = mlx5_flow_validate_attributes(dev, attr, error);
+ if (ret < 0)
+ return ret;
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ int ret = 0;
+ switch (items->type) {
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ break;
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ ret = mlx5_flow_validate_item_eth(items, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ ret = mlx5_flow_validate_item_vlan(items, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
+ MLX5_FLOW_LAYER_OUTER_VLAN;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ ret = mlx5_flow_validate_item_ipv4(items, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+ if (items->mask != NULL &&
+ ((const struct rte_flow_item_ipv4 *)
+ items->mask)->hdr.next_proto_id)
+ next_protocol =
+ ((const struct rte_flow_item_ipv4 *)
+ (items->spec))->hdr.next_proto_id;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ ret = mlx5_flow_validate_item_ipv6(items, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+ if (items->mask != NULL &&
+ ((const struct rte_flow_item_ipv6 *)
+ items->mask)->hdr.proto)
+ next_protocol =
+ ((const struct rte_flow_item_ipv6 *)
+ items->spec)->hdr.proto;
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ ret = mlx5_flow_validate_item_udp(items, item_flags,
+ next_protocol,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
+ MLX5_FLOW_LAYER_OUTER_L4_UDP;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ ret = mlx5_flow_validate_item_tcp(items, item_flags,
+ next_protocol, error);
+ if (ret < 0)
+ return ret;
+ item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
+ MLX5_FLOW_LAYER_OUTER_L4_TCP;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ ret = mlx5_flow_validate_item_vxlan(items, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= MLX5_FLOW_LAYER_VXLAN;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ ret = mlx5_flow_validate_item_vxlan_gpe(items,
+ item_flags,
+ dev, error);
+ if (ret < 0)
+ return ret;
+ item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
+ break;
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ ret = mlx5_flow_validate_item_gre(items, item_flags,
+ next_protocol, error);
+ if (ret < 0)
+ return ret;
+ item_flags |= MLX5_FLOW_LAYER_GRE;
+ break;
+ case RTE_FLOW_ITEM_TYPE_MPLS:
+ ret = mlx5_flow_validate_item_mpls(items, item_flags,
+ next_protocol,
+ error);
+ if (ret < 0)
+ return ret;
+ if (next_protocol != 0xff &&
+ next_protocol != IPPROTO_MPLS)
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, items,
+ "protocol filtering not compatible"
+ " with MPLS layer");
+ item_flags |= MLX5_FLOW_LAYER_MPLS;
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "item not supported");
+ }
+ }
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_FLAG:
+ ret = mlx5_flow_validate_action_flag(action_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_FLAG;
+ break;
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ ret = mlx5_flow_validate_action_mark(actions,
+ action_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_MARK;
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ ret = mlx5_flow_validate_action_drop(action_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_DROP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ ret = mlx5_flow_validate_action_queue(actions,
+ action_flags, dev,
+ error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_QUEUE;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ ret = mlx5_flow_validate_action_rss(actions,
+ action_flags, dev,
+ error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_RSS;
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ ret = mlx5_flow_validate_action_count(dev, error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_COUNT;
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
+ }
+ }
+ if (!(action_flags & MLX5_FLOW_FATE_ACTIONS))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "no fate action is found");
+ return 0;
+}
+
+/**
+ * Calculate the required bytes that are needed for the action part of the verbs
+ * flow, in addtion returns bit-fields with all the detected action, in order to
+ * avoid another interation over the actions.
+ *
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[out] action_flags
+ * Pointer to the detected actions.
+ *
+ * @return
+ * The size of the memory needed for all actions.
+ */
+static int
+flow_verbs_get_actions_and_size(const struct rte_flow_action actions[],
+ uint64_t *action_flags)
+{
+ int size = 0;
+ uint64_t detected_actions = 0;
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_FLAG:
+ size += sizeof(struct ibv_flow_spec_action_tag);
+ detected_actions |= MLX5_FLOW_ACTION_FLAG;
+ break;
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ size += sizeof(struct ibv_flow_spec_action_tag);
+ detected_actions |= MLX5_FLOW_ACTION_MARK;
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ size += sizeof(struct ibv_flow_spec_action_drop);
+ detected_actions |= MLX5_FLOW_ACTION_DROP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ detected_actions |= MLX5_FLOW_ACTION_QUEUE;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ detected_actions |= MLX5_FLOW_ACTION_RSS;
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+ size += sizeof(struct ibv_flow_spec_counter_action);
+#endif
+ detected_actions |= MLX5_FLOW_ACTION_COUNT;
+ break;
+ default:
+ break;
+ }
+ }
+ *action_flags = detected_actions;
+ return size;
+}
+
+/**
+ * Calculate the required bytes that are needed for the item part of the verbs
+ * flow, in addtion returns bit-fields with all the detected action, in order to
+ * avoid another interation over the actions.
+ *
+ * @param[in] actions
+ * Pointer to the list of items.
+ * @param[in, out] item_flags
+ * Pointer to the detected items.
+ *
+ * @return
+ * The size of the memory needed for all items.
+ */
+static int
+flow_verbs_get_items_and_size(const struct rte_flow_item items[],
+ uint64_t *item_flags)
+{
+ int size = 0;
+ uint64_t detected_items = 0;
+ const int tunnel = !!(*item_flags & MLX5_FLOW_LAYER_TUNNEL);
+
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ switch (items->type) {
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ break;
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ size += sizeof(struct ibv_flow_spec_eth);
+ detected_items |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ size += sizeof(struct ibv_flow_spec_eth);
+ detected_items |= tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
+ MLX5_FLOW_LAYER_OUTER_VLAN;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ size += sizeof(struct ibv_flow_spec_ipv4_ext);
+ detected_items |= tunnel ?
+ MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ size += sizeof(struct ibv_flow_spec_ipv6);
+ detected_items |= tunnel ?
+ MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ size += sizeof(struct ibv_flow_spec_tcp_udp);
+ detected_items |= tunnel ?
+ MLX5_FLOW_LAYER_INNER_L4_UDP :
+ MLX5_FLOW_LAYER_OUTER_L4_UDP;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ size += sizeof(struct ibv_flow_spec_tcp_udp);
+ detected_items |= tunnel ?
+ MLX5_FLOW_LAYER_INNER_L4_TCP :
+ MLX5_FLOW_LAYER_OUTER_L4_TCP;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ size += sizeof(struct ibv_flow_spec_tunnel);
+ detected_items |= MLX5_FLOW_LAYER_VXLAN;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ size += sizeof(struct ibv_flow_spec_tunnel);
+ detected_items |= MLX5_FLOW_LAYER_VXLAN_GPE;
+ break;
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ size += sizeof(struct ibv_flow_spec_gre);
+ detected_items |= MLX5_FLOW_LAYER_GRE;
+ break;
+ case RTE_FLOW_ITEM_TYPE_MPLS:
+ size += sizeof(struct ibv_flow_spec_mpls);
+ detected_items |= MLX5_FLOW_LAYER_MPLS;
+ break;
+#else
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ size += sizeof(struct ibv_flow_spec_tunnel);
+ detected_items |= MLX5_FLOW_LAYER_TUNNEL;
+ break;
+#endif
+ default:
+ break;
+ }
+ }
+ *item_flags = detected_items;
+ return size;
+}
+
+/**
+ * Internal preparation function. Allocate mlx5_flow with the required size.
+ * The required size is calculate based on the actions and items. This function
+ * also returns the detected actions and items for later use.
+ *
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] items
+ * Pointer to the list of items.
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[out] item_flags
+ * Pointer to bit mask of all items detected.
+ * @param[out] action_flags
+ * Pointer to bit mask of all actions detected.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * Pointer to mlx5_flow object on success, otherwise NULL and rte_errno
+ * is set.
+ */
+static struct mlx5_flow *
+flow_verbs_prepare(const struct rte_flow_attr *attr __rte_unused,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ uint64_t *item_flags,
+ uint64_t *action_flags,
+ struct rte_flow_error *error)
+{
+ uint32_t size = sizeof(struct mlx5_flow) + sizeof(struct ibv_flow_attr);
+ struct mlx5_flow *flow;
+
+ size += flow_verbs_get_actions_and_size(actions, action_flags);
+ size += flow_verbs_get_items_and_size(items, item_flags);
+ flow = rte_calloc(__func__, 1, size, 0);
+ if (!flow) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "not enough memory to create flow");
+ return NULL;
+ }
+ flow->verbs.attr = (void *)(flow + 1);
+ flow->verbs.specs =
+ (uint8_t *)(flow + 1) + sizeof(struct ibv_flow_attr);
+ return flow;
+}
+
+/**
+ * Fill the flow with verb spec.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in, out] dev_flow
+ * Pointer to the mlx5 flow.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] items
+ * Pointer to the list of items.
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, else a negative errno value otherwise and rte_ernno is set.
+ */
+static int
+flow_verbs_translate(struct rte_eth_dev *dev,
+ struct mlx5_flow *dev_flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ uint64_t action_flags = 0;
+ uint64_t item_flags = 0;
+ uint64_t priority = attr->priority;
+ struct priv *priv = dev->data->dev_private;
+
+ if (priority == MLX5_FLOW_PRIO_RSVD)
+ priority = priv->config.flow_prio - 1;
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ int ret;
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_FLAG:
+ flow_verbs_translate_action_flag(actions,
+ &action_flags,
+ dev_flow);
+ break;
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ flow_verbs_translate_action_mark(actions,
+ &action_flags,
+ dev_flow);
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ flow_verbs_translate_action_drop(&action_flags,
+ dev_flow);
+ break;
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ flow_verbs_translate_action_queue(actions,
+ &action_flags,
+ dev_flow);
+ break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ flow_verbs_translate_action_rss(actions,
+ &action_flags,
+ dev_flow);
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ ret = flow_verbs_translate_action_count(dev,
+ actions,
+ &action_flags,
+ dev_flow,
+ error);
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
+ }
+ }
+ dev_flow->flow->actions |= action_flags;
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ switch (items->type) {
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ break;
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ flow_verbs_translate_item_eth(items, &item_flags,
+ dev_flow);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ flow_verbs_translate_item_vlan(items, &item_flags,
+ dev_flow);
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ flow_verbs_translate_item_ipv4(items, &item_flags,
+ dev_flow);
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ flow_verbs_translate_item_ipv6(items, &item_flags,
+ dev_flow);
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ flow_verbs_translate_item_udp(items, &item_flags,
+ dev_flow);
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ flow_verbs_translate_item_tcp(items, &item_flags,
+ dev_flow);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ flow_verbs_translate_item_vxlan(items, &item_flags,
+ dev_flow);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ flow_verbs_translate_item_vxlan_gpe(items, &item_flags,
+ dev_flow);
+ break;
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ flow_verbs_translate_item_gre(items, &item_flags,
+ dev_flow);
+ break;
+ case RTE_FLOW_ITEM_TYPE_MPLS:
+ flow_verbs_translate_item_mpls(items, &item_flags,
+ dev_flow);
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "item not supported");
+ }
+ }
+ dev_flow->verbs.attr->priority =
+ mlx5_flow_adjust_priority(dev, priority,
+ dev_flow->verbs.attr->priority);
+ return 0;
+}
+
+/**
+ * Remove the flow from the NIC but keeps it in memory.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ */
+static void
+flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct mlx5_flow_verbs *verbs;
+ struct mlx5_flow *dev_flow;
+
+ if (!flow)
+ return;
+ LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
+ verbs = &dev_flow->verbs;
+ if (verbs->flow) {
+ claim_zero(mlx5_glue->destroy_flow(verbs->flow));
+ verbs->flow = NULL;
+ }
+ if (verbs->hrxq) {
+ if (flow->actions & MLX5_FLOW_ACTION_DROP)
+ mlx5_hrxq_drop_release(dev);
+ else
+ mlx5_hrxq_release(dev, verbs->hrxq);
+ verbs->hrxq = NULL;
+ }
+ }
+ if (flow->counter) {
+ flow_verbs_counter_release(flow->counter);
+ flow->counter = NULL;
+ }
+}
+
+/**
+ * Remove the flow from the NIC and the memory.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ */
+static void
+flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct mlx5_flow *dev_flow;
+
+ if (!flow)
+ return;
+ flow_verbs_remove(dev, flow);
+ while (!LIST_EMPTY(&flow->dev_flows)) {
+ dev_flow = LIST_FIRST(&flow->dev_flows);
+ LIST_REMOVE(dev_flow, next);
+ rte_free(dev_flow);
+ }
+}
+
+/**
+ * Apply the flow to the NIC.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct mlx5_flow_verbs *verbs;
+ struct mlx5_flow *dev_flow;
+ int err;
+
+ LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
+ verbs = &dev_flow->verbs;
+ if (flow->actions & MLX5_FLOW_ACTION_DROP) {
+ verbs->hrxq = mlx5_hrxq_drop_new(dev);
+ if (!verbs->hrxq) {
+ rte_flow_error_set
+ (error, errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot get drop hash queue");
+ goto error;
+ }
+ } else {
+ struct mlx5_hrxq *hrxq;
+
+ hrxq = mlx5_hrxq_get(dev, flow->key,
+ MLX5_RSS_HASH_KEY_LEN,
+ verbs->hash_fields,
+ (*flow->queue),
+ flow->rss.queue_num);
+ if (!hrxq)
+ hrxq = mlx5_hrxq_new(dev, flow->key,
+ MLX5_RSS_HASH_KEY_LEN,
+ verbs->hash_fields,
+ (*flow->queue),
+ flow->rss.queue_num,
+ !!(flow->layers &
+ MLX5_FLOW_LAYER_TUNNEL));
+ if (!hrxq) {
+ rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot get hash queue");
+ goto error;
+ }
+ verbs->hrxq = hrxq;
+ }
+ verbs->flow = mlx5_glue->create_flow(verbs->hrxq->qp,
+ verbs->attr);
+ if (!verbs->flow) {
+ rte_flow_error_set(error, errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "hardware refuses to create flow");
+ goto error;
+ }
+ }
+ return 0;
+error:
+ err = rte_errno; /* Save rte_errno before cleanup. */
+ LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
+ verbs = &dev_flow->verbs;
+ if (verbs->hrxq) {
+ if (flow->actions & MLX5_FLOW_ACTION_DROP)
+ mlx5_hrxq_drop_release(dev);
+ else
+ mlx5_hrxq_release(dev, verbs->hrxq);
+ verbs->hrxq = NULL;
+ }
+ }
+ rte_errno = err; /* Restore rte_errno. */
+ return -rte_errno;
+}
+
+void
+mlx5_flow_verbs_get_driver_ops(struct mlx5_flow_driver_ops *flow_ops)
+{
+ *flow_ops = (struct mlx5_flow_driver_ops) {
+ .validate = flow_verbs_validate,
+ .prepare = flow_verbs_prepare,
+ .translate = flow_verbs_translate,
+ .apply = flow_verbs_apply,
+ .remove = flow_verbs_remove,
+ .destroy = flow_verbs_destroy,
+ };
+}