*/
#include <sys/queue.h>
+#include <stdalign.h>
#include <stdint.h>
#include <string.h>
#define MLX5_FLOW_LAYER_INNER_L4_TCP (1u << 10)
#define MLX5_FLOW_LAYER_INNER_VLAN (1u << 11)
+/* Pattern tunnel Layer bits. */
+#define MLX5_FLOW_LAYER_VXLAN (1u << 12)
+#define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13)
+#define MLX5_FLOW_LAYER_GRE (1u << 14)
+#define MLX5_FLOW_LAYER_MPLS (1u << 15)
+
/* Outer Masks. */
#define MLX5_FLOW_LAYER_OUTER_L3 \
(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
MLX5_FLOW_LAYER_OUTER_L4)
/* Tunnel Masks. */
-#define MLX5_FLOW_LAYER_TUNNEL 0
+#define MLX5_FLOW_LAYER_TUNNEL \
+ (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
+ MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_MPLS)
/* Inner Masks. */
#define MLX5_FLOW_LAYER_INNER_L3 \
/* Modify a packet. */
#define MLX5_FLOW_MOD_FLAG (1u << 0)
#define MLX5_FLOW_MOD_MARK (1u << 1)
+#define MLX5_FLOW_MOD_COUNT (1u << 2)
/* possible L3 layers protocols filtering. */
#define MLX5_IP_PROTOCOL_TCP 6
#define MLX5_IP_PROTOCOL_UDP 17
+#define MLX5_IP_PROTOCOL_GRE 47
+#define MLX5_IP_PROTOCOL_MPLS 147
/* Priority reserved for default flows. */
#define MLX5_FLOW_PRIO_RSVD ((uint32_t)-1)
MLX5_EXPANSION_OUTER_IPV6,
MLX5_EXPANSION_OUTER_IPV6_UDP,
MLX5_EXPANSION_OUTER_IPV6_TCP,
+ MLX5_EXPANSION_VXLAN,
+ MLX5_EXPANSION_VXLAN_GPE,
+ MLX5_EXPANSION_GRE,
+ MLX5_EXPANSION_MPLS,
MLX5_EXPANSION_ETH,
MLX5_EXPANSION_IPV4,
MLX5_EXPANSION_IPV4_UDP,
},
[MLX5_EXPANSION_OUTER_ETH] = {
.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
- MLX5_EXPANSION_OUTER_IPV6),
+ MLX5_EXPANSION_OUTER_IPV6,
+ MLX5_EXPANSION_MPLS),
.type = RTE_FLOW_ITEM_TYPE_ETH,
.rss_types = 0,
},
[MLX5_EXPANSION_OUTER_IPV4] = {
.next = RTE_FLOW_EXPAND_RSS_NEXT
(MLX5_EXPANSION_OUTER_IPV4_UDP,
- MLX5_EXPANSION_OUTER_IPV4_TCP),
+ MLX5_EXPANSION_OUTER_IPV4_TCP,
+ MLX5_EXPANSION_GRE),
.type = RTE_FLOW_ITEM_TYPE_IPV4,
.rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
ETH_RSS_NONFRAG_IPV4_OTHER,
},
[MLX5_EXPANSION_OUTER_IPV4_UDP] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
+ MLX5_EXPANSION_VXLAN_GPE),
.type = RTE_FLOW_ITEM_TYPE_UDP,
.rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
},
ETH_RSS_NONFRAG_IPV6_OTHER,
},
[MLX5_EXPANSION_OUTER_IPV6_UDP] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
+ MLX5_EXPANSION_VXLAN_GPE),
.type = RTE_FLOW_ITEM_TYPE_UDP,
.rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
},
.type = RTE_FLOW_ITEM_TYPE_TCP,
.rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
},
+ [MLX5_EXPANSION_VXLAN] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH),
+ .type = RTE_FLOW_ITEM_TYPE_VXLAN,
+ },
+ [MLX5_EXPANSION_VXLAN_GPE] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
+ MLX5_EXPANSION_IPV4,
+ MLX5_EXPANSION_IPV6),
+ .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
+ },
+ [MLX5_EXPANSION_GRE] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4),
+ .type = RTE_FLOW_ITEM_TYPE_GRE,
+ },
+ [MLX5_EXPANSION_MPLS] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
+ MLX5_EXPANSION_IPV6),
+ .type = RTE_FLOW_ITEM_TYPE_MPLS,
+ },
[MLX5_EXPANSION_ETH] = {
.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
MLX5_EXPANSION_IPV6),
uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */
};
+/* Counters information. */
+struct mlx5_flow_counter {
+ LIST_ENTRY(mlx5_flow_counter) next; /**< Pointer to the next counter. */
+ uint32_t shared:1; /**< Share counter ID with other flow rules. */
+ uint32_t ref_cnt:31; /**< Reference counter. */
+ uint32_t id; /**< Counter ID. */
+ struct ibv_counter_set *cs; /**< Holds the counters for the rule. */
+ uint64_t hits; /**< Number of packets matched by the rule. */
+ uint64_t bytes; /**< Number of bytes matched by the rule. */
+};
+
/* Flow structure. */
struct rte_flow {
TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
LIST_HEAD(verbs, mlx5_flow_verbs) verbs; /**< Verbs flows list. */
struct mlx5_flow_verbs *cur_verbs;
/**< Current Verbs flow structure being filled. */
+ struct mlx5_flow_counter *counter; /**< Holds Verbs flow counter. */
struct rte_flow_action_rss rss;/**< RSS context. */
uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */
+ void *nl_flow; /**< Netlink flow buffer if relevant. */
};
static const struct rte_flow_ops mlx5_flow_ops = {
.destroy = mlx5_flow_destroy,
.flush = mlx5_flow_flush,
.isolate = mlx5_flow_isolate,
+ .query = mlx5_flow_query,
};
/* Convert FDIR request to Generic flow. */
{ 9, 10, 11 }, { 12, 13, 14 },
};
+/* Tunnel information. */
+struct mlx5_flow_tunnel_info {
+ uint32_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
+ uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */
+};
+
+static struct mlx5_flow_tunnel_info tunnels_info[] = {
+ {
+ .tunnel = MLX5_FLOW_LAYER_VXLAN,
+ .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP,
+ },
+ {
+ .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE,
+ .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP,
+ },
+ {
+ .tunnel = MLX5_FLOW_LAYER_GRE,
+ .ptype = RTE_PTYPE_TUNNEL_GRE,
+ },
+ {
+ .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP,
+ .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE | RTE_PTYPE_L4_UDP,
+ },
+ {
+ .tunnel = MLX5_FLOW_LAYER_MPLS,
+ .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE,
+ },
+};
+
/**
* Discover the maximum number of priority available.
*
flow->cur_verbs->attr->priority = priority;
}
+/**
+ * Get a flow counter.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] shared
+ * Indicate if this counter is shared with other flows.
+ * @param[in] id
+ * Counter identifier.
+ *
+ * @return
+ * A pointer to the counter, NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_flow_counter *
+mlx5_flow_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_flow_counter *cnt;
+
+ LIST_FOREACH(cnt, &priv->flow_counters, next) {
+ if (!cnt->shared || cnt->shared != shared)
+ continue;
+ if (cnt->id != id)
+ continue;
+ cnt->ref_cnt++;
+ return cnt;
+ }
+#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+
+ struct mlx5_flow_counter tmpl = {
+ .shared = shared,
+ .id = id,
+ .cs = mlx5_glue->create_counter_set
+ (priv->ctx,
+ &(struct ibv_counter_set_init_attr){
+ .counter_set_id = id,
+ }),
+ .hits = 0,
+ .bytes = 0,
+ };
+
+ if (!tmpl.cs) {
+ rte_errno = errno;
+ return NULL;
+ }
+ cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
+ if (!cnt) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ *cnt = tmpl;
+ LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
+ return cnt;
+#endif
+ rte_errno = ENOTSUP;
+ return NULL;
+}
+
+/**
+ * Release a flow counter.
+ *
+ * @param[in] counter
+ * Pointer to the counter handler.
+ */
+static void
+mlx5_flow_counter_release(struct mlx5_flow_counter *counter)
+{
+ if (--counter->ref_cnt == 0) {
+ claim_zero(mlx5_glue->destroy_counter_set(counter->cs));
+ LIST_REMOVE(counter, next);
+ rte_free(counter);
+ }
+}
+
/**
* Verify the @p attributes will be correctly understood by the NIC and store
* them in the @p flow if everything is correct.
RTE_FLOW_ERROR_TYPE_ITEM,
item,
"L3 cannot follow an L4 layer.");
+ /*
+ * IPv6 is not recognised by the NIC inside a GRE tunnel.
+ * Such support has to be disabled as the rule will be
+ * accepted. Issue reproduced with Mellanox OFED 4.3-3.0.2.1 and
+ * Mellanox OFED 4.4-1.0.0.0.
+ */
+ if (tunnel && flow->layers & MLX5_FLOW_LAYER_GRE)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "IPv6 inside a GRE tunnel is"
+ " not recognised.");
if (!mask)
mask = &rte_flow_item_ipv6_mask;
ret = mlx5_flow_item_acceptable
return size;
}
+/**
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_item_vxlan(const struct rte_flow_item *item, struct rte_flow *flow,
+ const size_t flow_size, struct rte_flow_error *error)
+{
+ const struct rte_flow_item_vxlan *spec = item->spec;
+ const struct rte_flow_item_vxlan *mask = item->mask;
+ unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
+ struct ibv_flow_spec_tunnel vxlan = {
+ .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
+ .size = size,
+ };
+ int ret;
+ union vni {
+ uint32_t vlan_id;
+ uint8_t vni[4];
+ } id = { .vlan_id = 0, };
+
+ if (flow->layers & MLX5_FLOW_LAYER_TUNNEL)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "a tunnel is already present");
+ /*
+ * Verify only UDPv4 is present as defined in
+ * https://tools.ietf.org/html/rfc7348
+ */
+ if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "no outer UDP layer found");
+ if (!mask)
+ mask = &rte_flow_item_vxlan_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_vxlan_mask,
+ sizeof(struct rte_flow_item_vxlan), error);
+ if (ret < 0)
+ return ret;
+ if (spec) {
+ memcpy(&id.vni[1], spec->vni, 3);
+ vxlan.val.tunnel_id = id.vlan_id;
+ memcpy(&id.vni[1], mask->vni, 3);
+ vxlan.mask.tunnel_id = id.vlan_id;
+ /* Remove unwanted bits from values. */
+ vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
+ }
+ /*
+ * Tunnel id 0 is equivalent as not adding a VXLAN layer, if
+ * only this layer is defined in the Verbs specification it is
+ * interpreted as wildcard and all packets will match this
+ * rule, if it follows a full stack layer (ex: eth / ipv4 /
+ * udp), all packets matching the layers before will also
+ * match this rule. To avoid such situation, VNI 0 is
+ * currently refused.
+ */
+ if (!vxlan.val.tunnel_id)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VXLAN vni cannot be 0");
+ if (!(flow->layers & MLX5_FLOW_LAYER_OUTER))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VXLAN tunnel must be fully defined");
+ if (size <= flow_size) {
+ mlx5_flow_spec_verbs_add(flow, &vxlan, size);
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
+ }
+ flow->layers |= MLX5_FLOW_LAYER_VXLAN;
+ return size;
+}
+
+/**
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_item_vxlan_gpe(struct rte_eth_dev *dev,
+ const struct rte_flow_item *item,
+ struct rte_flow *flow, const size_t flow_size,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_vxlan_gpe *spec = item->spec;
+ const struct rte_flow_item_vxlan_gpe *mask = item->mask;
+ unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
+ struct ibv_flow_spec_tunnel vxlan_gpe = {
+ .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
+ .size = size,
+ };
+ int ret;
+ union vni {
+ uint32_t vlan_id;
+ uint8_t vni[4];
+ } id = { .vlan_id = 0, };
+
+ if (!((struct priv *)dev->data->dev_private)->config.l3_vxlan_en)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L3 VXLAN is not enabled by device"
+ " parameter and/or not configured in"
+ " firmware");
+ if (flow->layers & MLX5_FLOW_LAYER_TUNNEL)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "a tunnel is already present");
+ /*
+ * Verify only UDPv4 is present as defined in
+ * https://tools.ietf.org/html/rfc7348
+ */
+ if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "no outer UDP layer found");
+ if (!mask)
+ mask = &rte_flow_item_vxlan_gpe_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_vxlan_gpe_mask,
+ sizeof(struct rte_flow_item_vxlan_gpe), error);
+ if (ret < 0)
+ return ret;
+ if (spec) {
+ memcpy(&id.vni[1], spec->vni, 3);
+ vxlan_gpe.val.tunnel_id = id.vlan_id;
+ memcpy(&id.vni[1], mask->vni, 3);
+ vxlan_gpe.mask.tunnel_id = id.vlan_id;
+ if (spec->protocol)
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VxLAN-GPE protocol not supported");
+ /* Remove unwanted bits from values. */
+ vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;
+ }
+ /*
+ * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this
+ * layer is defined in the Verbs specification it is interpreted as
+ * wildcard and all packets will match this rule, if it follows a full
+ * stack layer (ex: eth / ipv4 / udp), all packets matching the layers
+ * before will also match this rule. To avoid such situation, VNI 0
+ * is currently refused.
+ */
+ if (!vxlan_gpe.val.tunnel_id)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VXLAN-GPE vni cannot be 0");
+ if (!(flow->layers & MLX5_FLOW_LAYER_OUTER))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VXLAN-GPE tunnel must be fully"
+ " defined");
+ if (size <= flow_size) {
+ mlx5_flow_spec_verbs_add(flow, &vxlan_gpe, size);
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
+ }
+ flow->layers |= MLX5_FLOW_LAYER_VXLAN_GPE;
+ return size;
+}
+
+/**
+ * Update the protocol in Verbs IPv4/IPv6 spec.
+ *
+ * @param[in, out] attr
+ * Pointer to Verbs attributes structure.
+ * @param[in] search
+ * Specification type to search in order to update the IP protocol.
+ * @param[in] protocol
+ * Protocol value to set if none is present in the specification.
+ */
+static void
+mlx5_flow_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
+ enum ibv_flow_spec_type search,
+ uint8_t protocol)
+{
+ unsigned int i;
+ struct ibv_spec_header *hdr = (struct ibv_spec_header *)
+ ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
+
+ if (!attr)
+ return;
+ for (i = 0; i != attr->num_of_specs; ++i) {
+ if (hdr->type == search) {
+ union {
+ struct ibv_flow_spec_ipv4_ext *ipv4;
+ struct ibv_flow_spec_ipv6 *ipv6;
+ } ip;
+
+ switch (search) {
+ case IBV_FLOW_SPEC_IPV4_EXT:
+ ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr;
+ if (!ip.ipv4->val.proto) {
+ ip.ipv4->val.proto = protocol;
+ ip.ipv4->mask.proto = 0xff;
+ }
+ break;
+ case IBV_FLOW_SPEC_IPV6:
+ ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr;
+ if (!ip.ipv6->val.next_hdr) {
+ ip.ipv6->val.next_hdr = protocol;
+ ip.ipv6->mask.next_hdr = 0xff;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+ hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
+ }
+}
+
+/**
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * It will also update the previous L3 layer with the protocol value matching
+ * the GRE.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_item_gre(const struct rte_flow_item *item,
+ struct rte_flow *flow, const size_t flow_size,
+ struct rte_flow_error *error)
+{
+ struct mlx5_flow_verbs *verbs = flow->cur_verbs;
+ const struct rte_flow_item_gre *spec = item->spec;
+ const struct rte_flow_item_gre *mask = item->mask;
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+ unsigned int size = sizeof(struct ibv_flow_spec_gre);
+ struct ibv_flow_spec_gre tunnel = {
+ .type = IBV_FLOW_SPEC_GRE,
+ .size = size,
+ };
+#else
+ unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
+ struct ibv_flow_spec_tunnel tunnel = {
+ .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
+ .size = size,
+ };
+#endif
+ int ret;
+
+ if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_GRE)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "protocol filtering not compatible"
+ " with this GRE layer");
+ if (flow->layers & MLX5_FLOW_LAYER_TUNNEL)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "a tunnel is already present");
+ if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L3))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L3 Layer is missing");
+ if (!mask)
+ mask = &rte_flow_item_gre_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_gre_mask,
+ sizeof(struct rte_flow_item_gre), error);
+ if (ret < 0)
+ return ret;
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+ if (spec) {
+ tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
+ tunnel.val.protocol = spec->protocol;
+ tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
+ tunnel.mask.protocol = mask->protocol;
+ /* Remove unwanted bits from values. */
+ tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
+ tunnel.val.protocol &= tunnel.mask.protocol;
+ tunnel.val.key &= tunnel.mask.key;
+ }
+#else
+ if (spec && (spec->protocol & mask->protocol))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "without MPLS support the"
+ " specification cannot be used for"
+ " filtering");
+#endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */
+ if (size <= flow_size) {
+ if (flow->layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
+ mlx5_flow_item_gre_ip_protocol_update
+ (verbs->attr, IBV_FLOW_SPEC_IPV4_EXT,
+ MLX5_IP_PROTOCOL_GRE);
+ else
+ mlx5_flow_item_gre_ip_protocol_update
+ (verbs->attr, IBV_FLOW_SPEC_IPV6,
+ MLX5_IP_PROTOCOL_GRE);
+ mlx5_flow_spec_verbs_add(flow, &tunnel, size);
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
+ }
+ flow->layers |= MLX5_FLOW_LAYER_GRE;
+ return size;
+}
+
+/**
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_item_mpls(const struct rte_flow_item *item __rte_unused,
+ struct rte_flow *flow __rte_unused,
+ const size_t flow_size __rte_unused,
+ struct rte_flow_error *error)
+{
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+ const struct rte_flow_item_mpls *spec = item->spec;
+ const struct rte_flow_item_mpls *mask = item->mask;
+ unsigned int size = sizeof(struct ibv_flow_spec_mpls);
+ struct ibv_flow_spec_mpls mpls = {
+ .type = IBV_FLOW_SPEC_MPLS,
+ .size = size,
+ };
+ int ret;
+
+ if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_MPLS)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "protocol filtering not compatible"
+ " with MPLS layer");
+ if (flow->layers & MLX5_FLOW_LAYER_TUNNEL)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "a tunnel is already"
+ " present");
+ if (!mask)
+ mask = &rte_flow_item_mpls_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_mpls_mask,
+ sizeof(struct rte_flow_item_mpls), error);
+ if (ret < 0)
+ return ret;
+ if (spec) {
+ memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
+ memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
+ /* Remove unwanted bits from values. */
+ mpls.val.label &= mpls.mask.label;
+ }
+ if (size <= flow_size) {
+ mlx5_flow_spec_verbs_add(flow, &mpls, size);
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
+ }
+ flow->layers |= MLX5_FLOW_LAYER_MPLS;
+ return size;
+#endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "MPLS is not supported by Verbs, please"
+ " update.");
+}
+
/**
* Convert the @p pattern into a Verbs specifications after ensuring the NIC
* will understand and process it correctly.
* On error, a negative errno value is returned and rte_errno is set.
*/
static int
-mlx5_flow_items(const struct rte_flow_item pattern[],
+mlx5_flow_items(struct rte_eth_dev *dev,
+ const struct rte_flow_item pattern[],
struct rte_flow *flow, const size_t flow_size,
struct rte_flow_error *error)
{
case RTE_FLOW_ITEM_TYPE_TCP:
ret = mlx5_flow_item_tcp(pattern, flow, remain, error);
break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ ret = mlx5_flow_item_vxlan(pattern, flow, remain,
+ error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ ret = mlx5_flow_item_vxlan_gpe(dev, pattern, flow,
+ remain, error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ ret = mlx5_flow_item_gre(pattern, flow, remain, error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_MPLS:
+ ret = mlx5_flow_item_mpls(pattern, flow, remain, error);
+ break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
return size;
}
+/**
+ * Convert the @p action into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param action[in]
+ * Action configuration.
+ * @param flow[in, out]
+ * Pointer to flow structure.
+ * @param flow_size[in]
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param error[int, out]
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p action has fully been
+ * converted, otherwise another call with this returned memory size should
+ * be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_action_count(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct rte_flow *flow,
+ const size_t flow_size __rte_unused,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_count *count = action->conf;
+#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+ unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
+ struct ibv_flow_spec_counter_action counter = {
+ .type = IBV_FLOW_SPEC_ACTION_COUNT,
+ .size = size,
+ };
+#endif
+
+ if (!flow->counter) {
+ flow->counter = mlx5_flow_counter_new(dev, count->shared,
+ count->id);
+ if (!flow->counter)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "cannot get counter"
+ " context.");
+ }
+ if (!((struct priv *)dev->data->dev_private)->config.flow_counter_en)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "flow counters are not supported.");
+ flow->modifier |= MLX5_FLOW_MOD_COUNT;
+#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+ counter.counter_set_handle = flow->counter->cs->handle;
+ if (size <= flow_size)
+ mlx5_flow_spec_verbs_add(flow, &counter, size);
+ return size;
+#endif
+ return 0;
+}
+
/**
* Convert the @p action into @p flow after ensuring the NIC will understand
* and process it correctly.
case RTE_FLOW_ACTION_TYPE_RSS:
ret = mlx5_flow_action_rss(dev, actions, flow, error);
break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ ret = mlx5_flow_action_count(dev, actions, flow, remain,
+ error);
+ break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
return size;
}
+/**
+ * Validate flow rule and fill flow structure accordingly.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[out] flow
+ * Pointer to flow structure.
+ * @param flow_size
+ * Size of allocated space for @p flow.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] pattern
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * A positive value representing the size of the flow object in bytes
+ * regardless of @p flow_size on success, a negative errno value otherwise
+ * and rte_errno is set.
+ */
+static int
+mlx5_flow_merge_switch(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ size_t flow_size,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ unsigned int n = mlx5_dev_to_port_id(dev->device, NULL, 0);
+ uint16_t port_id[!n + n];
+ struct mlx5_nl_flow_ptoi ptoi[!n + n + 1];
+ size_t off = RTE_ALIGN_CEIL(sizeof(*flow), alignof(max_align_t));
+ unsigned int i;
+ unsigned int own = 0;
+ int ret;
+
+ /* At least one port is needed when no switch domain is present. */
+ if (!n) {
+ n = 1;
+ port_id[0] = dev->data->port_id;
+ } else {
+ n = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, n), n);
+ }
+ for (i = 0; i != n; ++i) {
+ struct rte_eth_dev_info dev_info;
+
+ rte_eth_dev_info_get(port_id[i], &dev_info);
+ if (port_id[i] == dev->data->port_id)
+ own = i;
+ ptoi[i].port_id = port_id[i];
+ ptoi[i].ifindex = dev_info.if_index;
+ }
+ /* Ensure first entry of ptoi[] is the current device. */
+ if (own) {
+ ptoi[n] = ptoi[0];
+ ptoi[0] = ptoi[own];
+ ptoi[own] = ptoi[n];
+ }
+ /* An entry with zero ifindex terminates ptoi[]. */
+ ptoi[n].port_id = 0;
+ ptoi[n].ifindex = 0;
+ if (flow_size < off)
+ flow_size = 0;
+ ret = mlx5_nl_flow_transpose((uint8_t *)flow + off,
+ flow_size ? flow_size - off : 0,
+ ptoi, attr, pattern, actions, error);
+ if (ret < 0)
+ return ret;
+ if (flow_size) {
+ *flow = (struct rte_flow){
+ .attributes = *attr,
+ .nl_flow = (uint8_t *)flow + off,
+ };
+ /*
+ * Generate a reasonably unique handle based on the address
+ * of the target buffer.
+ *
+ * This is straightforward on 32-bit systems where the flow
+ * pointer can be used directly. Otherwise, its least
+ * significant part is taken after shifting it by the
+ * previous power of two of the pointed buffer size.
+ */
+ if (sizeof(flow) <= 4)
+ mlx5_nl_flow_brand(flow->nl_flow, (uintptr_t)flow);
+ else
+ mlx5_nl_flow_brand
+ (flow->nl_flow,
+ (uintptr_t)flow >>
+ rte_log2_u32(rte_align32prevpow2(flow_size)));
+ }
+ return off + ret;
+}
+
/**
* Convert the @p attributes, @p pattern, @p action, into an flow for the NIC
* after ensuring the NIC will understand and process it correctly.
int ret;
uint32_t i;
+ if (attributes->transfer)
+ return mlx5_flow_merge_switch(dev, flow, flow_size,
+ attributes, pattern,
+ actions, error);
if (size > flow_size)
flow = &local_flow;
ret = mlx5_flow_attributes(dev, attributes, flow, error);
}
}
ret = mlx5_flow_items
- ((const struct rte_flow_item *)
+ (dev,
+ (const struct rte_flow_item *)
&buf->entry[i].pattern[expanded_pattern_idx],
flow,
(size < flow_size) ? flow_size - size : 0, error);
}
/**
- * Mark the Rx queues mark flag if the flow has a mark or flag modifier.
+ * Lookup and set the ptype in the data Rx part. A single Ptype can be used,
+ * if several tunnel rules are used on this queue, the tunnel ptype will be
+ * cleared.
+ *
+ * @param rxq_ctrl
+ * Rx queue to update.
+ */
+static void
+mlx5_flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
+{
+ unsigned int i;
+ uint32_t tunnel_ptype = 0;
+
+ /* Look up for the ptype to use. */
+ for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) {
+ if (!rxq_ctrl->flow_tunnels_n[i])
+ continue;
+ if (!tunnel_ptype) {
+ tunnel_ptype = tunnels_info[i].ptype;
+ } else {
+ tunnel_ptype = 0;
+ break;
+ }
+ }
+ rxq_ctrl->rxq.tunnel = tunnel_ptype;
+}
+
+/**
+ * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the flow.
*
* @param[in] dev
* Pointer to Ethernet device.
* Pointer to flow structure.
*/
static void
-mlx5_flow_rxq_mark_set(struct rte_eth_dev *dev, struct rte_flow *flow)
+mlx5_flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
{
struct priv *priv = dev->data->dev_private;
+ const int mark = !!(flow->modifier &
+ (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK));
+ const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ unsigned int i;
- if (flow->modifier & (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK)) {
- unsigned int i;
-
- for (i = 0; i != flow->rss.queue_num; ++i) {
- int idx = (*flow->queue)[i];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of((*priv->rxqs)[idx],
- struct mlx5_rxq_ctrl, rxq);
+ for (i = 0; i != flow->rss.queue_num; ++i) {
+ int idx = (*flow->queue)[i];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of((*priv->rxqs)[idx],
+ struct mlx5_rxq_ctrl, rxq);
+ if (mark) {
rxq_ctrl->rxq.mark = 1;
rxq_ctrl->flow_mark_n++;
}
+ if (tunnel) {
+ unsigned int j;
+
+ /* Increase the counter matching the flow. */
+ for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
+ if ((tunnels_info[j].tunnel & flow->layers) ==
+ tunnels_info[j].tunnel) {
+ rxq_ctrl->flow_tunnels_n[j]++;
+ break;
+ }
+ }
+ mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl);
+ }
}
}
/**
- * Clear the Rx queue mark associated with the @p flow if no other flow uses
- * it with a mark request.
+ * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
+ * @p flow if no other flow uses it with the same kind of request.
*
* @param dev
* Pointer to Ethernet device.
* Pointer to the flow.
*/
static void
-mlx5_flow_rxq_mark_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
+mlx5_flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
{
struct priv *priv = dev->data->dev_private;
+ const int mark = !!(flow->modifier &
+ (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK));
+ const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ unsigned int i;
- if (flow->modifier & (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK)) {
- unsigned int i;
-
- for (i = 0; i != flow->rss.queue_num; ++i) {
- int idx = (*flow->queue)[i];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of((*priv->rxqs)[idx],
- struct mlx5_rxq_ctrl, rxq);
+ assert(dev->data->dev_started);
+ for (i = 0; i != flow->rss.queue_num; ++i) {
+ int idx = (*flow->queue)[i];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of((*priv->rxqs)[idx],
+ struct mlx5_rxq_ctrl, rxq);
+ if (mark) {
rxq_ctrl->flow_mark_n--;
rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
}
+ if (tunnel) {
+ unsigned int j;
+
+ /* Decrease the counter matching the flow. */
+ for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
+ if ((tunnels_info[j].tunnel & flow->layers) ==
+ tunnels_info[j].tunnel) {
+ rxq_ctrl->flow_tunnels_n[j]--;
+ break;
+ }
+ }
+ mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl);
+ }
}
}
/**
- * Clear the mark bit in all Rx queues.
+ * Clear the Mark/Flag and Tunnel ptype information in all Rx queues.
*
* @param dev
* Pointer to Ethernet device.
*/
static void
-mlx5_flow_rxq_mark_clear(struct rte_eth_dev *dev)
+mlx5_flow_rxq_flags_clear(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
unsigned int i;
- unsigned int idx;
- for (idx = 0, i = 0; idx != priv->rxqs_n; ++i) {
+ for (i = 0; i != priv->rxqs_n; ++i) {
struct mlx5_rxq_ctrl *rxq_ctrl;
+ unsigned int j;
- if (!(*priv->rxqs)[idx])
+ if (!(*priv->rxqs)[i])
continue;
- rxq_ctrl = container_of((*priv->rxqs)[idx],
+ rxq_ctrl = container_of((*priv->rxqs)[i],
struct mlx5_rxq_ctrl, rxq);
rxq_ctrl->flow_mark_n = 0;
rxq_ctrl->rxq.mark = 0;
- ++idx;
+ for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
+ rxq_ctrl->flow_tunnels_n[j] = 0;
+ rxq_ctrl->rxq.tunnel = 0;
}
}
static void
mlx5_flow_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_flow_verbs *verbs;
+ if (flow->nl_flow && priv->mnl_socket)
+ mlx5_nl_flow_destroy(priv->mnl_socket, flow->nl_flow, NULL);
LIST_FOREACH(verbs, &flow->verbs, next) {
if (verbs->flow) {
claim_zero(mlx5_glue->destroy_flow(verbs->flow));
verbs->hrxq = NULL;
}
}
+ if (flow->counter) {
+ mlx5_flow_counter_release(flow->counter);
+ flow->counter = NULL;
+ }
}
/**
mlx5_flow_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
struct rte_flow_error *error)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_flow_verbs *verbs;
int err;
MLX5_RSS_HASH_KEY_LEN,
verbs->hash_fields,
(*flow->queue),
- flow->rss.queue_num);
+ flow->rss.queue_num,
+ !!(flow->layers &
+ MLX5_FLOW_LAYER_TUNNEL));
if (!hrxq) {
rte_flow_error_set
(error, rte_errno,
goto error;
}
}
+ if (flow->nl_flow &&
+ priv->mnl_socket &&
+ mlx5_nl_flow_create(priv->mnl_socket, flow->nl_flow, error))
+ goto error;
return 0;
error:
err = rte_errno; /* Save rte_errno before cleanup. */
}
}
TAILQ_INSERT_TAIL(list, flow, next);
- mlx5_flow_rxq_mark_set(dev, flow);
+ mlx5_flow_rxq_flags_set(dev, flow);
return flow;
}
{
mlx5_flow_remove(dev, flow);
TAILQ_REMOVE(list, flow, next);
- mlx5_flow_rxq_mark_trim(dev, flow);
+ /*
+ * Update RX queue flags only if port is started, otherwise it is
+ * already clean.
+ */
+ if (dev->data->dev_started)
+ mlx5_flow_rxq_flags_trim(dev, flow);
rte_free(flow);
}
TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next)
mlx5_flow_remove(dev, flow);
- mlx5_flow_rxq_mark_clear(dev);
+ mlx5_flow_rxq_flags_clear(dev);
}
/**
ret = mlx5_flow_apply(dev, flow, &error);
if (ret < 0)
goto error;
- mlx5_flow_rxq_mark_set(dev, flow);
+ mlx5_flow_rxq_flags_set(dev, flow);
}
return 0;
error:
return 0;
}
+/**
+ * Query flow counter.
+ *
+ * @param flow
+ * Pointer to the flow.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_query_count(struct rte_flow *flow __rte_unused,
+ void *data __rte_unused,
+ struct rte_flow_error *error)
+{
+#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+ if (flow->modifier & MLX5_FLOW_MOD_COUNT) {
+ struct rte_flow_query_count *qc = data;
+ uint64_t counters[2] = {0, 0};
+ struct ibv_query_counter_set_attr query_cs_attr = {
+ .cs = flow->counter->cs,
+ .query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
+ };
+ struct ibv_counter_set_data query_out = {
+ .out = counters,
+ .outlen = 2 * sizeof(uint64_t),
+ };
+ int err = mlx5_glue->query_counter_set(&query_cs_attr,
+ &query_out);
+
+ if (err)
+ return rte_flow_error_set
+ (error, err,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot read counter");
+ qc->hits_set = 1;
+ qc->bytes_set = 1;
+ qc->hits = counters[0] - flow->counter->hits;
+ qc->bytes = counters[1] - flow->counter->bytes;
+ if (qc->reset) {
+ flow->counter->hits = counters[0];
+ flow->counter->bytes = counters[1];
+ }
+ return 0;
+ }
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "flow does not have counter");
+#endif
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "counters are not available");
+}
+
+/**
+ * Query a flows.
+ *
+ * @see rte_flow_query()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_query(struct rte_eth_dev *dev __rte_unused,
+ struct rte_flow *flow,
+ const struct rte_flow_action *actions,
+ void *data,
+ struct rte_flow_error *error)
+{
+ int ret = 0;
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ ret = mlx5_flow_query_count(flow, data, error);
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
+ }
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
/**
* Convert a flow director filter to a generic flow.
*