1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
14 #pragma GCC diagnostic ignored "-Wpedantic"
16 #include <infiniband/verbs.h>
18 #pragma GCC diagnostic error "-Wpedantic"
21 #include <rte_common.h>
22 #include <rte_ether.h>
23 #include <rte_eth_ctrl.h>
24 #include <rte_ethdev_driver.h>
26 #include <rte_flow_driver.h>
27 #include <rte_malloc.h>
31 #include "mlx5_defs.h"
33 #include "mlx5_glue.h"
35 /* Dev ops structure defined in mlx5.c */
36 extern const struct eth_dev_ops mlx5_dev_ops;
37 extern const struct eth_dev_ops mlx5_dev_ops_isolate;
39 /* Pattern outer Layer bits. */
40 #define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0)
41 #define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1)
42 #define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2)
43 #define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3)
44 #define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4)
45 #define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5)
47 /* Pattern inner Layer bits. */
48 #define MLX5_FLOW_LAYER_INNER_L2 (1u << 6)
49 #define MLX5_FLOW_LAYER_INNER_L3_IPV4 (1u << 7)
50 #define MLX5_FLOW_LAYER_INNER_L3_IPV6 (1u << 8)
51 #define MLX5_FLOW_LAYER_INNER_L4_UDP (1u << 9)
52 #define MLX5_FLOW_LAYER_INNER_L4_TCP (1u << 10)
53 #define MLX5_FLOW_LAYER_INNER_VLAN (1u << 11)
55 /* Pattern tunnel Layer bits. */
56 #define MLX5_FLOW_LAYER_VXLAN (1u << 12)
57 #define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13)
58 #define MLX5_FLOW_LAYER_GRE (1u << 14)
59 #define MLX5_FLOW_LAYER_MPLS (1u << 15)
62 #define MLX5_FLOW_LAYER_OUTER_L3 \
63 (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
64 #define MLX5_FLOW_LAYER_OUTER_L4 \
65 (MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP)
66 #define MLX5_FLOW_LAYER_OUTER \
67 (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \
68 MLX5_FLOW_LAYER_OUTER_L4)
71 #define MLX5_FLOW_LAYER_TUNNEL \
72 (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
73 MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_MPLS)
76 #define MLX5_FLOW_LAYER_INNER_L3 \
77 (MLX5_FLOW_LAYER_INNER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV6)
78 #define MLX5_FLOW_LAYER_INNER_L4 \
79 (MLX5_FLOW_LAYER_INNER_L4_UDP | MLX5_FLOW_LAYER_INNER_L4_TCP)
80 #define MLX5_FLOW_LAYER_INNER \
81 (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3 | \
82 MLX5_FLOW_LAYER_INNER_L4)
84 /* Actions that modify the fate of matching traffic. */
85 #define MLX5_FLOW_FATE_DROP (1u << 0)
86 #define MLX5_FLOW_FATE_QUEUE (1u << 1)
87 #define MLX5_FLOW_FATE_RSS (1u << 2)
89 /* Modify a packet. */
90 #define MLX5_FLOW_MOD_FLAG (1u << 0)
91 #define MLX5_FLOW_MOD_MARK (1u << 1)
92 #define MLX5_FLOW_MOD_COUNT (1u << 2)
94 /* possible L3 layers protocols filtering. */
95 #define MLX5_IP_PROTOCOL_TCP 6
96 #define MLX5_IP_PROTOCOL_UDP 17
97 #define MLX5_IP_PROTOCOL_GRE 47
98 #define MLX5_IP_PROTOCOL_MPLS 147
100 /* Priority reserved for default flows. */
101 #define MLX5_FLOW_PRIO_RSVD ((uint32_t)-1)
103 enum mlx5_expansion {
105 MLX5_EXPANSION_ROOT_OUTER,
106 MLX5_EXPANSION_ROOT_ETH_VLAN,
107 MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN,
108 MLX5_EXPANSION_OUTER_ETH,
109 MLX5_EXPANSION_OUTER_ETH_VLAN,
110 MLX5_EXPANSION_OUTER_VLAN,
111 MLX5_EXPANSION_OUTER_IPV4,
112 MLX5_EXPANSION_OUTER_IPV4_UDP,
113 MLX5_EXPANSION_OUTER_IPV4_TCP,
114 MLX5_EXPANSION_OUTER_IPV6,
115 MLX5_EXPANSION_OUTER_IPV6_UDP,
116 MLX5_EXPANSION_OUTER_IPV6_TCP,
117 MLX5_EXPANSION_VXLAN,
118 MLX5_EXPANSION_VXLAN_GPE,
122 MLX5_EXPANSION_ETH_VLAN,
125 MLX5_EXPANSION_IPV4_UDP,
126 MLX5_EXPANSION_IPV4_TCP,
128 MLX5_EXPANSION_IPV6_UDP,
129 MLX5_EXPANSION_IPV6_TCP,
132 /** Supported expansion of items. */
133 static const struct rte_flow_expand_node mlx5_support_expansion[] = {
134 [MLX5_EXPANSION_ROOT] = {
135 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
137 MLX5_EXPANSION_IPV6),
138 .type = RTE_FLOW_ITEM_TYPE_END,
140 [MLX5_EXPANSION_ROOT_OUTER] = {
141 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH,
142 MLX5_EXPANSION_OUTER_IPV4,
143 MLX5_EXPANSION_OUTER_IPV6),
144 .type = RTE_FLOW_ITEM_TYPE_END,
146 [MLX5_EXPANSION_ROOT_ETH_VLAN] = {
147 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN),
148 .type = RTE_FLOW_ITEM_TYPE_END,
150 [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = {
151 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH_VLAN),
152 .type = RTE_FLOW_ITEM_TYPE_END,
154 [MLX5_EXPANSION_OUTER_ETH] = {
155 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
156 MLX5_EXPANSION_OUTER_IPV6,
157 MLX5_EXPANSION_MPLS),
158 .type = RTE_FLOW_ITEM_TYPE_ETH,
161 [MLX5_EXPANSION_OUTER_ETH_VLAN] = {
162 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN),
163 .type = RTE_FLOW_ITEM_TYPE_ETH,
166 [MLX5_EXPANSION_OUTER_VLAN] = {
167 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
168 MLX5_EXPANSION_OUTER_IPV6),
169 .type = RTE_FLOW_ITEM_TYPE_VLAN,
171 [MLX5_EXPANSION_OUTER_IPV4] = {
172 .next = RTE_FLOW_EXPAND_RSS_NEXT
173 (MLX5_EXPANSION_OUTER_IPV4_UDP,
174 MLX5_EXPANSION_OUTER_IPV4_TCP,
176 .type = RTE_FLOW_ITEM_TYPE_IPV4,
177 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
178 ETH_RSS_NONFRAG_IPV4_OTHER,
180 [MLX5_EXPANSION_OUTER_IPV4_UDP] = {
181 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
182 MLX5_EXPANSION_VXLAN_GPE),
183 .type = RTE_FLOW_ITEM_TYPE_UDP,
184 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
186 [MLX5_EXPANSION_OUTER_IPV4_TCP] = {
187 .type = RTE_FLOW_ITEM_TYPE_TCP,
188 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
190 [MLX5_EXPANSION_OUTER_IPV6] = {
191 .next = RTE_FLOW_EXPAND_RSS_NEXT
192 (MLX5_EXPANSION_OUTER_IPV6_UDP,
193 MLX5_EXPANSION_OUTER_IPV6_TCP),
194 .type = RTE_FLOW_ITEM_TYPE_IPV6,
195 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
196 ETH_RSS_NONFRAG_IPV6_OTHER,
198 [MLX5_EXPANSION_OUTER_IPV6_UDP] = {
199 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
200 MLX5_EXPANSION_VXLAN_GPE),
201 .type = RTE_FLOW_ITEM_TYPE_UDP,
202 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
204 [MLX5_EXPANSION_OUTER_IPV6_TCP] = {
205 .type = RTE_FLOW_ITEM_TYPE_TCP,
206 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
208 [MLX5_EXPANSION_VXLAN] = {
209 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH),
210 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
212 [MLX5_EXPANSION_VXLAN_GPE] = {
213 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
215 MLX5_EXPANSION_IPV6),
216 .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
218 [MLX5_EXPANSION_GRE] = {
219 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4),
220 .type = RTE_FLOW_ITEM_TYPE_GRE,
222 [MLX5_EXPANSION_MPLS] = {
223 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
224 MLX5_EXPANSION_IPV6),
225 .type = RTE_FLOW_ITEM_TYPE_MPLS,
227 [MLX5_EXPANSION_ETH] = {
228 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
229 MLX5_EXPANSION_IPV6),
230 .type = RTE_FLOW_ITEM_TYPE_ETH,
232 [MLX5_EXPANSION_ETH_VLAN] = {
233 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN),
234 .type = RTE_FLOW_ITEM_TYPE_ETH,
236 [MLX5_EXPANSION_VLAN] = {
237 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
238 MLX5_EXPANSION_IPV6),
239 .type = RTE_FLOW_ITEM_TYPE_VLAN,
241 [MLX5_EXPANSION_IPV4] = {
242 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
243 MLX5_EXPANSION_IPV4_TCP),
244 .type = RTE_FLOW_ITEM_TYPE_IPV4,
245 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
246 ETH_RSS_NONFRAG_IPV4_OTHER,
248 [MLX5_EXPANSION_IPV4_UDP] = {
249 .type = RTE_FLOW_ITEM_TYPE_UDP,
250 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
252 [MLX5_EXPANSION_IPV4_TCP] = {
253 .type = RTE_FLOW_ITEM_TYPE_TCP,
254 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
256 [MLX5_EXPANSION_IPV6] = {
257 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
258 MLX5_EXPANSION_IPV6_TCP),
259 .type = RTE_FLOW_ITEM_TYPE_IPV6,
260 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
261 ETH_RSS_NONFRAG_IPV6_OTHER,
263 [MLX5_EXPANSION_IPV6_UDP] = {
264 .type = RTE_FLOW_ITEM_TYPE_UDP,
265 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
267 [MLX5_EXPANSION_IPV6_TCP] = {
268 .type = RTE_FLOW_ITEM_TYPE_TCP,
269 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
273 /** Handles information leading to a drop fate. */
274 struct mlx5_flow_verbs {
275 LIST_ENTRY(mlx5_flow_verbs) next;
276 unsigned int size; /**< Size of the attribute. */
278 struct ibv_flow_attr *attr;
279 /**< Pointer to the Specification buffer. */
280 uint8_t *specs; /**< Pointer to the specifications. */
282 struct ibv_flow *flow; /**< Verbs flow pointer. */
283 struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */
284 uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */
287 /* Counters information. */
288 struct mlx5_flow_counter {
289 LIST_ENTRY(mlx5_flow_counter) next; /**< Pointer to the next counter. */
290 uint32_t shared:1; /**< Share counter ID with other flow rules. */
291 uint32_t ref_cnt:31; /**< Reference counter. */
292 uint32_t id; /**< Counter ID. */
293 struct ibv_counter_set *cs; /**< Holds the counters for the rule. */
294 uint64_t hits; /**< Number of packets matched by the rule. */
295 uint64_t bytes; /**< Number of bytes matched by the rule. */
298 /* Flow structure. */
300 TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
301 struct rte_flow_attr attributes; /**< User flow attribute. */
302 uint32_t l3_protocol_en:1; /**< Protocol filtering requested. */
304 /**< Bit-fields of present layers see MLX5_FLOW_LAYER_*. */
306 /**< Bit-fields of present modifier see MLX5_FLOW_MOD_*. */
308 /**< Bit-fields of present fate see MLX5_FLOW_FATE_*. */
309 uint8_t l3_protocol; /**< valid when l3_protocol_en is set. */
310 LIST_HEAD(verbs, mlx5_flow_verbs) verbs; /**< Verbs flows list. */
311 struct mlx5_flow_verbs *cur_verbs;
312 /**< Current Verbs flow structure being filled. */
313 struct mlx5_flow_counter *counter; /**< Holds Verbs flow counter. */
314 struct rte_flow_action_rss rss;/**< RSS context. */
315 uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
316 uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */
317 void *nl_flow; /**< Netlink flow buffer if relevant. */
320 static const struct rte_flow_ops mlx5_flow_ops = {
321 .validate = mlx5_flow_validate,
322 .create = mlx5_flow_create,
323 .destroy = mlx5_flow_destroy,
324 .flush = mlx5_flow_flush,
325 .isolate = mlx5_flow_isolate,
326 .query = mlx5_flow_query,
329 /* Convert FDIR request to Generic flow. */
331 struct rte_flow_attr attr;
332 struct rte_flow_action actions[2];
333 struct rte_flow_item items[4];
334 struct rte_flow_item_eth l2;
335 struct rte_flow_item_eth l2_mask;
337 struct rte_flow_item_ipv4 ipv4;
338 struct rte_flow_item_ipv6 ipv6;
341 struct rte_flow_item_ipv4 ipv4;
342 struct rte_flow_item_ipv6 ipv6;
345 struct rte_flow_item_udp udp;
346 struct rte_flow_item_tcp tcp;
349 struct rte_flow_item_udp udp;
350 struct rte_flow_item_tcp tcp;
352 struct rte_flow_action_queue queue;
355 /* Verbs specification header. */
356 struct ibv_spec_header {
357 enum ibv_flow_spec_type type;
362 * Number of sub priorities.
363 * For each kind of pattern matching i.e. L2, L3, L4 to have a correct
364 * matching on the NIC (firmware dependent) L4 most have the higher priority
365 * followed by L3 and ending with L2.
367 #define MLX5_PRIORITY_MAP_L2 2
368 #define MLX5_PRIORITY_MAP_L3 1
369 #define MLX5_PRIORITY_MAP_L4 0
370 #define MLX5_PRIORITY_MAP_MAX 3
372 /* Map of Verbs to Flow priority with 8 Verbs priorities. */
373 static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
374 { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
377 /* Map of Verbs to Flow priority with 16 Verbs priorities. */
378 static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {
379 { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 },
380 { 9, 10, 11 }, { 12, 13, 14 },
383 /* Tunnel information. */
384 struct mlx5_flow_tunnel_info {
385 uint32_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
386 uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */
389 static struct mlx5_flow_tunnel_info tunnels_info[] = {
391 .tunnel = MLX5_FLOW_LAYER_VXLAN,
392 .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP,
395 .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE,
396 .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP,
399 .tunnel = MLX5_FLOW_LAYER_GRE,
400 .ptype = RTE_PTYPE_TUNNEL_GRE,
403 .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP,
404 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE | RTE_PTYPE_L4_UDP,
407 .tunnel = MLX5_FLOW_LAYER_MPLS,
408 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE,
413 * Discover the maximum number of priority available.
416 * Pointer to Ethernet device.
419 * number of supported flow priority on success, a negative errno
420 * value otherwise and rte_errno is set.
423 mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
426 struct ibv_flow_attr attr;
427 struct ibv_flow_spec_eth eth;
428 struct ibv_flow_spec_action_drop drop;
434 .type = IBV_FLOW_SPEC_ETH,
435 .size = sizeof(struct ibv_flow_spec_eth),
438 .size = sizeof(struct ibv_flow_spec_action_drop),
439 .type = IBV_FLOW_SPEC_ACTION_DROP,
442 struct ibv_flow *flow;
443 struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev);
444 uint16_t vprio[] = { 8, 16 };
452 for (i = 0; i != RTE_DIM(vprio); i++) {
453 flow_attr.attr.priority = vprio[i] - 1;
454 flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr);
457 claim_zero(mlx5_glue->destroy_flow(flow));
462 priority = RTE_DIM(priority_map_3);
465 priority = RTE_DIM(priority_map_5);
470 "port %u verbs maximum priority: %d expected 8/16",
471 dev->data->port_id, vprio[i]);
474 mlx5_hrxq_drop_release(dev);
475 DRV_LOG(INFO, "port %u flow maximum priority: %d",
476 dev->data->port_id, priority);
481 * Adjust flow priority.
484 * Pointer to Ethernet device.
486 * Pointer to an rte flow.
489 mlx5_flow_adjust_priority(struct rte_eth_dev *dev, struct rte_flow *flow)
491 struct priv *priv = dev->data->dev_private;
492 uint32_t priority = flow->attributes.priority;
493 uint32_t subpriority = flow->cur_verbs->attr->priority;
495 switch (priv->config.flow_prio) {
496 case RTE_DIM(priority_map_3):
497 priority = priority_map_3[priority][subpriority];
499 case RTE_DIM(priority_map_5):
500 priority = priority_map_5[priority][subpriority];
503 flow->cur_verbs->attr->priority = priority;
507 * Get a flow counter.
510 * Pointer to Ethernet device.
512 * Indicate if this counter is shared with other flows.
514 * Counter identifier.
517 * A pointer to the counter, NULL otherwise and rte_errno is set.
519 static struct mlx5_flow_counter *
520 mlx5_flow_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
522 struct priv *priv = dev->data->dev_private;
523 struct mlx5_flow_counter *cnt;
525 LIST_FOREACH(cnt, &priv->flow_counters, next) {
526 if (!cnt->shared || cnt->shared != shared)
533 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
535 struct mlx5_flow_counter tmpl = {
538 .cs = mlx5_glue->create_counter_set
540 &(struct ibv_counter_set_init_attr){
541 .counter_set_id = id,
551 cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
557 LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
565 * Release a flow counter.
568 * Pointer to the counter handler.
571 mlx5_flow_counter_release(struct mlx5_flow_counter *counter)
573 if (--counter->ref_cnt == 0) {
574 claim_zero(mlx5_glue->destroy_counter_set(counter->cs));
575 LIST_REMOVE(counter, next);
581 * Verify the @p attributes will be correctly understood by the NIC and store
582 * them in the @p flow if everything is correct.
585 * Pointer to Ethernet device.
586 * @param[in] attributes
587 * Pointer to flow attributes
588 * @param[in, out] flow
589 * Pointer to the rte_flow structure.
591 * Pointer to error structure.
594 * 0 on success, a negative errno value otherwise and rte_errno is set.
597 mlx5_flow_attributes(struct rte_eth_dev *dev,
598 const struct rte_flow_attr *attributes,
599 struct rte_flow *flow,
600 struct rte_flow_error *error)
602 uint32_t priority_max =
603 ((struct priv *)dev->data->dev_private)->config.flow_prio - 1;
605 if (attributes->group)
606 return rte_flow_error_set(error, ENOTSUP,
607 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
609 "groups is not supported");
610 if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
611 attributes->priority >= priority_max)
612 return rte_flow_error_set(error, ENOTSUP,
613 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
615 "priority out of range");
616 if (attributes->egress)
617 return rte_flow_error_set(error, ENOTSUP,
618 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
620 "egress is not supported");
621 if (attributes->transfer)
622 return rte_flow_error_set(error, ENOTSUP,
623 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
625 "transfer is not supported");
626 if (!attributes->ingress)
627 return rte_flow_error_set(error, ENOTSUP,
628 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
630 "ingress attribute is mandatory");
631 flow->attributes = *attributes;
632 if (attributes->priority == MLX5_FLOW_PRIO_RSVD)
633 flow->attributes.priority = priority_max;
638 * Verify the @p item specifications (spec, last, mask) are compatible with the
642 * Item specification.
644 * @p item->mask or flow default bit-masks.
645 * @param[in] nic_mask
646 * Bit-masks covering supported fields by the NIC to compare with user mask.
648 * Bit-masks size in bytes.
650 * Pointer to error structure.
653 * 0 on success, a negative errno value otherwise and rte_errno is set.
656 mlx5_flow_item_acceptable(const struct rte_flow_item *item,
658 const uint8_t *nic_mask,
660 struct rte_flow_error *error)
665 for (i = 0; i < size; ++i)
666 if ((nic_mask[i] | mask[i]) != nic_mask[i])
667 return rte_flow_error_set(error, ENOTSUP,
668 RTE_FLOW_ERROR_TYPE_ITEM,
670 "mask enables non supported"
672 if (!item->spec && (item->mask || item->last))
673 return rte_flow_error_set(error, EINVAL,
674 RTE_FLOW_ERROR_TYPE_ITEM,
676 "mask/last without a spec is not"
678 if (item->spec && item->last) {
684 for (i = 0; i < size; ++i) {
685 spec[i] = ((const uint8_t *)item->spec)[i] & mask[i];
686 last[i] = ((const uint8_t *)item->last)[i] & mask[i];
688 ret = memcmp(spec, last, size);
690 return rte_flow_error_set(error, ENOTSUP,
691 RTE_FLOW_ERROR_TYPE_ITEM,
693 "range is not supported");
699 * Add a verbs item specification into @p flow.
701 * @param[in, out] flow
702 * Pointer to flow structure.
704 * Create specification.
706 * Size in bytes of the specification to copy.
709 mlx5_flow_spec_verbs_add(struct rte_flow *flow, void *src, unsigned int size)
711 struct mlx5_flow_verbs *verbs = flow->cur_verbs;
716 dst = (void *)(verbs->specs + verbs->size);
717 memcpy(dst, src, size);
718 ++verbs->attr->num_of_specs;
724 * Adjust verbs hash fields according to the @p flow information.
726 * @param[in, out] flow.
727 * Pointer to flow structure.
729 * 1 when the hash field is for a tunnel item.
730 * @param[in] layer_types
732 * @param[in] hash_fields
736 mlx5_flow_verbs_hashfields_adjust(struct rte_flow *flow,
737 int tunnel __rte_unused,
738 uint32_t layer_types, uint64_t hash_fields)
740 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
741 hash_fields |= (tunnel ? IBV_RX_HASH_INNER : 0);
742 if (flow->rss.level == 2 && !tunnel)
744 else if (flow->rss.level < 2 && tunnel)
747 if (!(flow->rss.types & layer_types))
749 flow->cur_verbs->hash_fields |= hash_fields;
753 * Convert the @p item into a Verbs specification after ensuring the NIC
754 * will understand and process it correctly.
755 * If the necessary size for the conversion is greater than the @p flow_size,
756 * nothing is written in @p flow, the validation is still performed.
759 * Item specification.
760 * @param[in, out] flow
761 * Pointer to flow structure.
762 * @param[in] flow_size
763 * Size in bytes of the available space in @p flow, if too small, nothing is
766 * Pointer to error structure.
769 * On success the number of bytes consumed/necessary, if the returned value
770 * is lesser or equal to @p flow_size, the @p item has fully been converted,
771 * otherwise another call with this returned memory size should be done.
772 * On error, a negative errno value is returned and rte_errno is set.
775 mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow,
776 const size_t flow_size, struct rte_flow_error *error)
778 const struct rte_flow_item_eth *spec = item->spec;
779 const struct rte_flow_item_eth *mask = item->mask;
780 const struct rte_flow_item_eth nic_mask = {
781 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
782 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
783 .type = RTE_BE16(0xffff),
785 const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
786 const unsigned int size = sizeof(struct ibv_flow_spec_eth);
787 struct ibv_flow_spec_eth eth = {
788 .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
793 if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
794 MLX5_FLOW_LAYER_OUTER_L2))
795 return rte_flow_error_set(error, ENOTSUP,
796 RTE_FLOW_ERROR_TYPE_ITEM,
798 "L2 layers already configured");
800 mask = &rte_flow_item_eth_mask;
801 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
802 (const uint8_t *)&nic_mask,
803 sizeof(struct rte_flow_item_eth),
807 flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
808 MLX5_FLOW_LAYER_OUTER_L2;
809 if (size > flow_size)
814 memcpy(ð.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
815 memcpy(ð.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
816 eth.val.ether_type = spec->type;
817 memcpy(ð.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
818 memcpy(ð.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
819 eth.mask.ether_type = mask->type;
820 /* Remove unwanted bits from values. */
821 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
822 eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
823 eth.val.src_mac[i] &= eth.mask.src_mac[i];
825 eth.val.ether_type &= eth.mask.ether_type;
827 flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
828 mlx5_flow_spec_verbs_add(flow, ð, size);
833 * Update the VLAN tag in the Verbs Ethernet specification.
835 * @param[in, out] attr
836 * Pointer to Verbs attributes structure.
838 * Verbs structure containing the VLAN information to copy.
841 mlx5_flow_item_vlan_update(struct ibv_flow_attr *attr,
842 struct ibv_flow_spec_eth *eth)
845 const enum ibv_flow_spec_type search = eth->type;
846 struct ibv_spec_header *hdr = (struct ibv_spec_header *)
847 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
849 for (i = 0; i != attr->num_of_specs; ++i) {
850 if (hdr->type == search) {
851 struct ibv_flow_spec_eth *e =
852 (struct ibv_flow_spec_eth *)hdr;
854 e->val.vlan_tag = eth->val.vlan_tag;
855 e->mask.vlan_tag = eth->mask.vlan_tag;
856 e->val.ether_type = eth->val.ether_type;
857 e->mask.ether_type = eth->mask.ether_type;
860 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
865 * Convert the @p item into @p flow (or by updating the already present
866 * Ethernet Verbs) specification after ensuring the NIC will understand and
867 * process it correctly.
868 * If the necessary size for the conversion is greater than the @p flow_size,
869 * nothing is written in @p flow, the validation is still performed.
872 * Item specification.
873 * @param[in, out] flow
874 * Pointer to flow structure.
875 * @param[in] flow_size
876 * Size in bytes of the available space in @p flow, if too small, nothing is
879 * Pointer to error structure.
882 * On success the number of bytes consumed/necessary, if the returned value
883 * is lesser or equal to @p flow_size, the @p item has fully been converted,
884 * otherwise another call with this returned memory size should be done.
885 * On error, a negative errno value is returned and rte_errno is set.
888 mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow,
889 const size_t flow_size, struct rte_flow_error *error)
891 const struct rte_flow_item_vlan *spec = item->spec;
892 const struct rte_flow_item_vlan *mask = item->mask;
893 const struct rte_flow_item_vlan nic_mask = {
894 .tci = RTE_BE16(0x0fff),
895 .inner_type = RTE_BE16(0xffff),
897 unsigned int size = sizeof(struct ibv_flow_spec_eth);
898 const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
899 struct ibv_flow_spec_eth eth = {
900 .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
904 const uint32_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
905 MLX5_FLOW_LAYER_INNER_L4) :
906 (MLX5_FLOW_LAYER_OUTER_L3 | MLX5_FLOW_LAYER_OUTER_L4);
907 const uint32_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
908 MLX5_FLOW_LAYER_OUTER_VLAN;
909 const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
910 MLX5_FLOW_LAYER_OUTER_L2;
912 if (flow->layers & vlanm)
913 return rte_flow_error_set(error, ENOTSUP,
914 RTE_FLOW_ERROR_TYPE_ITEM,
916 "VLAN layer already configured");
917 else if ((flow->layers & l34m) != 0)
918 return rte_flow_error_set(error, ENOTSUP,
919 RTE_FLOW_ERROR_TYPE_ITEM,
921 "L2 layer cannot follow L3/L4 layer");
923 mask = &rte_flow_item_vlan_mask;
924 ret = mlx5_flow_item_acceptable
925 (item, (const uint8_t *)mask,
926 (const uint8_t *)&nic_mask,
927 sizeof(struct rte_flow_item_vlan), error);
931 eth.val.vlan_tag = spec->tci;
932 eth.mask.vlan_tag = mask->tci;
933 eth.val.vlan_tag &= eth.mask.vlan_tag;
934 eth.val.ether_type = spec->inner_type;
935 eth.mask.ether_type = mask->inner_type;
936 eth.val.ether_type &= eth.mask.ether_type;
939 * From verbs perspective an empty VLAN is equivalent
940 * to a packet without VLAN layer.
942 if (!eth.mask.vlan_tag)
943 return rte_flow_error_set(error, EINVAL,
944 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
946 "VLAN cannot be empty");
947 if (!(flow->layers & l2m)) {
948 if (size <= flow_size) {
949 flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
950 mlx5_flow_spec_verbs_add(flow, ð, size);
954 mlx5_flow_item_vlan_update(flow->cur_verbs->attr,
956 size = 0; /* Only an update is done in eth specification. */
958 flow->layers |= tunnel ?
959 (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_VLAN) :
960 (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_VLAN);
965 * Convert the @p item into a Verbs specification after ensuring the NIC
966 * will understand and process it correctly.
967 * If the necessary size for the conversion is greater than the @p flow_size,
968 * nothing is written in @p flow, the validation is still performed.
971 * Item specification.
972 * @param[in, out] flow
973 * Pointer to flow structure.
974 * @param[in] flow_size
975 * Size in bytes of the available space in @p flow, if too small, nothing is
978 * Pointer to error structure.
981 * On success the number of bytes consumed/necessary, if the returned value
982 * is lesser or equal to @p flow_size, the @p item has fully been converted,
983 * otherwise another call with this returned memory size should be done.
984 * On error, a negative errno value is returned and rte_errno is set.
987 mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow,
988 const size_t flow_size, struct rte_flow_error *error)
990 const struct rte_flow_item_ipv4 *spec = item->spec;
991 const struct rte_flow_item_ipv4 *mask = item->mask;
992 const struct rte_flow_item_ipv4 nic_mask = {
994 .src_addr = RTE_BE32(0xffffffff),
995 .dst_addr = RTE_BE32(0xffffffff),
996 .type_of_service = 0xff,
997 .next_proto_id = 0xff,
1000 const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
1001 unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
1002 struct ibv_flow_spec_ipv4_ext ipv4 = {
1003 .type = IBV_FLOW_SPEC_IPV4_EXT |
1004 (tunnel ? IBV_FLOW_SPEC_INNER : 0),
1009 if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
1010 MLX5_FLOW_LAYER_OUTER_L3))
1011 return rte_flow_error_set(error, ENOTSUP,
1012 RTE_FLOW_ERROR_TYPE_ITEM,
1014 "multiple L3 layers not supported");
1015 else if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1016 MLX5_FLOW_LAYER_OUTER_L4))
1017 return rte_flow_error_set(error, ENOTSUP,
1018 RTE_FLOW_ERROR_TYPE_ITEM,
1020 "L3 cannot follow an L4 layer.");
1022 mask = &rte_flow_item_ipv4_mask;
1023 ret = mlx5_flow_item_acceptable
1024 (item, (const uint8_t *)mask,
1025 (const uint8_t *)&nic_mask,
1026 sizeof(struct rte_flow_item_ipv4), error);
1029 flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1030 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1032 ipv4.val = (struct ibv_flow_ipv4_ext_filter){
1033 .src_ip = spec->hdr.src_addr,
1034 .dst_ip = spec->hdr.dst_addr,
1035 .proto = spec->hdr.next_proto_id,
1036 .tos = spec->hdr.type_of_service,
1038 ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
1039 .src_ip = mask->hdr.src_addr,
1040 .dst_ip = mask->hdr.dst_addr,
1041 .proto = mask->hdr.next_proto_id,
1042 .tos = mask->hdr.type_of_service,
1044 /* Remove unwanted bits from values. */
1045 ipv4.val.src_ip &= ipv4.mask.src_ip;
1046 ipv4.val.dst_ip &= ipv4.mask.dst_ip;
1047 ipv4.val.proto &= ipv4.mask.proto;
1048 ipv4.val.tos &= ipv4.mask.tos;
1050 flow->l3_protocol_en = !!ipv4.mask.proto;
1051 flow->l3_protocol = ipv4.val.proto;
1052 if (size <= flow_size) {
1053 mlx5_flow_verbs_hashfields_adjust
1055 (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
1056 ETH_RSS_NONFRAG_IPV4_TCP |
1057 ETH_RSS_NONFRAG_IPV4_UDP |
1058 ETH_RSS_NONFRAG_IPV4_OTHER),
1059 (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4));
1060 flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3;
1061 mlx5_flow_spec_verbs_add(flow, &ipv4, size);
1067 * Convert the @p item into a Verbs specification after ensuring the NIC
1068 * will understand and process it correctly.
1069 * If the necessary size for the conversion is greater than the @p flow_size,
1070 * nothing is written in @p flow, the validation is still performed.
1073 * Item specification.
1074 * @param[in, out] flow
1075 * Pointer to flow structure.
1076 * @param[in] flow_size
1077 * Size in bytes of the available space in @p flow, if too small, nothing is
1080 * Pointer to error structure.
1083 * On success the number of bytes consumed/necessary, if the returned value
1084 * is lesser or equal to @p flow_size, the @p item has fully been converted,
1085 * otherwise another call with this returned memory size should be done.
1086 * On error, a negative errno value is returned and rte_errno is set.
1089 mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow,
1090 const size_t flow_size, struct rte_flow_error *error)
1092 const struct rte_flow_item_ipv6 *spec = item->spec;
1093 const struct rte_flow_item_ipv6 *mask = item->mask;
1094 const struct rte_flow_item_ipv6 nic_mask = {
1097 "\xff\xff\xff\xff\xff\xff\xff\xff"
1098 "\xff\xff\xff\xff\xff\xff\xff\xff",
1100 "\xff\xff\xff\xff\xff\xff\xff\xff"
1101 "\xff\xff\xff\xff\xff\xff\xff\xff",
1102 .vtc_flow = RTE_BE32(0xffffffff),
1107 const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
1108 unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
1109 struct ibv_flow_spec_ipv6 ipv6 = {
1110 .type = IBV_FLOW_SPEC_IPV6 | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
1115 if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
1116 MLX5_FLOW_LAYER_OUTER_L3))
1117 return rte_flow_error_set(error, ENOTSUP,
1118 RTE_FLOW_ERROR_TYPE_ITEM,
1120 "multiple L3 layers not supported");
1121 else if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1122 MLX5_FLOW_LAYER_OUTER_L4))
1123 return rte_flow_error_set(error, ENOTSUP,
1124 RTE_FLOW_ERROR_TYPE_ITEM,
1126 "L3 cannot follow an L4 layer.");
1128 * IPv6 is not recognised by the NIC inside a GRE tunnel.
1129 * Such support has to be disabled as the rule will be
1130 * accepted. Issue reproduced with Mellanox OFED 4.3-3.0.2.1 and
1131 * Mellanox OFED 4.4-1.0.0.0.
1133 if (tunnel && flow->layers & MLX5_FLOW_LAYER_GRE)
1134 return rte_flow_error_set(error, ENOTSUP,
1135 RTE_FLOW_ERROR_TYPE_ITEM,
1137 "IPv6 inside a GRE tunnel is"
1138 " not recognised.");
1140 mask = &rte_flow_item_ipv6_mask;
1141 ret = mlx5_flow_item_acceptable
1142 (item, (const uint8_t *)mask,
1143 (const uint8_t *)&nic_mask,
1144 sizeof(struct rte_flow_item_ipv6), error);
1147 flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1148 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1151 uint32_t vtc_flow_val;
1152 uint32_t vtc_flow_mask;
1154 memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
1155 RTE_DIM(ipv6.val.src_ip));
1156 memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
1157 RTE_DIM(ipv6.val.dst_ip));
1158 memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
1159 RTE_DIM(ipv6.mask.src_ip));
1160 memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
1161 RTE_DIM(ipv6.mask.dst_ip));
1162 vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
1163 vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
1164 ipv6.val.flow_label =
1165 rte_cpu_to_be_32((vtc_flow_val & IPV6_HDR_FL_MASK) >>
1167 ipv6.val.traffic_class = (vtc_flow_val & IPV6_HDR_TC_MASK) >>
1169 ipv6.val.next_hdr = spec->hdr.proto;
1170 ipv6.val.hop_limit = spec->hdr.hop_limits;
1171 ipv6.mask.flow_label =
1172 rte_cpu_to_be_32((vtc_flow_mask & IPV6_HDR_FL_MASK) >>
1174 ipv6.mask.traffic_class = (vtc_flow_mask & IPV6_HDR_TC_MASK) >>
1176 ipv6.mask.next_hdr = mask->hdr.proto;
1177 ipv6.mask.hop_limit = mask->hdr.hop_limits;
1178 /* Remove unwanted bits from values. */
1179 for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
1180 ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
1181 ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
1183 ipv6.val.flow_label &= ipv6.mask.flow_label;
1184 ipv6.val.traffic_class &= ipv6.mask.traffic_class;
1185 ipv6.val.next_hdr &= ipv6.mask.next_hdr;
1186 ipv6.val.hop_limit &= ipv6.mask.hop_limit;
1188 flow->l3_protocol_en = !!ipv6.mask.next_hdr;
1189 flow->l3_protocol = ipv6.val.next_hdr;
1190 if (size <= flow_size) {
1191 mlx5_flow_verbs_hashfields_adjust
1193 (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
1194 ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_NONFRAG_IPV6_UDP |
1195 ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX |
1196 ETH_RSS_IPV6_TCP_EX | ETH_RSS_IPV6_UDP_EX),
1197 (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6));
1198 flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3;
1199 mlx5_flow_spec_verbs_add(flow, &ipv6, size);
1205 * Convert the @p item into a Verbs specification after ensuring the NIC
1206 * will understand and process it correctly.
1207 * If the necessary size for the conversion is greater than the @p flow_size,
1208 * nothing is written in @p flow, the validation is still performed.
1211 * Item specification.
1212 * @param[in, out] flow
1213 * Pointer to flow structure.
1214 * @param[in] flow_size
1215 * Size in bytes of the available space in @p flow, if too small, nothing is
1218 * Pointer to error structure.
1221 * On success the number of bytes consumed/necessary, if the returned value
1222 * is lesser or equal to @p flow_size, the @p item has fully been converted,
1223 * otherwise another call with this returned memory size should be done.
1224 * On error, a negative errno value is returned and rte_errno is set.
1227 mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow,
1228 const size_t flow_size, struct rte_flow_error *error)
1230 const struct rte_flow_item_udp *spec = item->spec;
1231 const struct rte_flow_item_udp *mask = item->mask;
1232 const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
1233 unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
1234 struct ibv_flow_spec_tcp_udp udp = {
1235 .type = IBV_FLOW_SPEC_UDP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
1240 if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_UDP)
1241 return rte_flow_error_set(error, ENOTSUP,
1242 RTE_FLOW_ERROR_TYPE_ITEM,
1244 "protocol filtering not compatible"
1246 if (!(flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
1247 MLX5_FLOW_LAYER_OUTER_L3)))
1248 return rte_flow_error_set(error, ENOTSUP,
1249 RTE_FLOW_ERROR_TYPE_ITEM,
1251 "L3 is mandatory to filter"
1253 if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1254 MLX5_FLOW_LAYER_OUTER_L4))
1255 return rte_flow_error_set(error, ENOTSUP,
1256 RTE_FLOW_ERROR_TYPE_ITEM,
1258 "L4 layer is already"
1261 mask = &rte_flow_item_udp_mask;
1262 ret = mlx5_flow_item_acceptable
1263 (item, (const uint8_t *)mask,
1264 (const uint8_t *)&rte_flow_item_udp_mask,
1265 sizeof(struct rte_flow_item_udp), error);
1268 flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1269 MLX5_FLOW_LAYER_OUTER_L4_UDP;
1271 udp.val.dst_port = spec->hdr.dst_port;
1272 udp.val.src_port = spec->hdr.src_port;
1273 udp.mask.dst_port = mask->hdr.dst_port;
1274 udp.mask.src_port = mask->hdr.src_port;
1275 /* Remove unwanted bits from values. */
1276 udp.val.src_port &= udp.mask.src_port;
1277 udp.val.dst_port &= udp.mask.dst_port;
1279 if (size <= flow_size) {
1280 mlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_UDP,
1281 (IBV_RX_HASH_SRC_PORT_UDP |
1282 IBV_RX_HASH_DST_PORT_UDP));
1283 flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4;
1284 mlx5_flow_spec_verbs_add(flow, &udp, size);
1290 * Convert the @p item into a Verbs specification after ensuring the NIC
1291 * will understand and process it correctly.
1292 * If the necessary size for the conversion is greater than the @p flow_size,
1293 * nothing is written in @p flow, the validation is still performed.
1296 * Item specification.
1297 * @param[in, out] flow
1298 * Pointer to flow structure.
1299 * @param[in] flow_size
1300 * Size in bytes of the available space in @p flow, if too small, nothing is
1303 * Pointer to error structure.
1306 * On success the number of bytes consumed/necessary, if the returned value
1307 * is lesser or equal to @p flow_size, the @p item has fully been converted,
1308 * otherwise another call with this returned memory size should be done.
1309 * On error, a negative errno value is returned and rte_errno is set.
1312 mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow,
1313 const size_t flow_size, struct rte_flow_error *error)
1315 const struct rte_flow_item_tcp *spec = item->spec;
1316 const struct rte_flow_item_tcp *mask = item->mask;
1317 const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
1318 unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
1319 struct ibv_flow_spec_tcp_udp tcp = {
1320 .type = IBV_FLOW_SPEC_TCP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
1325 if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_TCP)
1326 return rte_flow_error_set(error, ENOTSUP,
1327 RTE_FLOW_ERROR_TYPE_ITEM,
1329 "protocol filtering not compatible"
1331 if (!(flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
1332 MLX5_FLOW_LAYER_OUTER_L3)))
1333 return rte_flow_error_set(error, ENOTSUP,
1334 RTE_FLOW_ERROR_TYPE_ITEM,
1336 "L3 is mandatory to filter on L4");
1337 if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1338 MLX5_FLOW_LAYER_OUTER_L4))
1339 return rte_flow_error_set(error, ENOTSUP,
1340 RTE_FLOW_ERROR_TYPE_ITEM,
1342 "L4 layer is already present");
1344 mask = &rte_flow_item_tcp_mask;
1345 ret = mlx5_flow_item_acceptable
1346 (item, (const uint8_t *)mask,
1347 (const uint8_t *)&rte_flow_item_tcp_mask,
1348 sizeof(struct rte_flow_item_tcp), error);
1351 flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1352 MLX5_FLOW_LAYER_OUTER_L4_TCP;
1354 tcp.val.dst_port = spec->hdr.dst_port;
1355 tcp.val.src_port = spec->hdr.src_port;
1356 tcp.mask.dst_port = mask->hdr.dst_port;
1357 tcp.mask.src_port = mask->hdr.src_port;
1358 /* Remove unwanted bits from values. */
1359 tcp.val.src_port &= tcp.mask.src_port;
1360 tcp.val.dst_port &= tcp.mask.dst_port;
1362 if (size <= flow_size) {
1363 mlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_TCP,
1364 (IBV_RX_HASH_SRC_PORT_TCP |
1365 IBV_RX_HASH_DST_PORT_TCP));
1366 flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4;
1367 mlx5_flow_spec_verbs_add(flow, &tcp, size);
1373 * Convert the @p item into a Verbs specification after ensuring the NIC
1374 * will understand and process it correctly.
1375 * If the necessary size for the conversion is greater than the @p flow_size,
1376 * nothing is written in @p flow, the validation is still performed.
1379 * Item specification.
1380 * @param[in, out] flow
1381 * Pointer to flow structure.
1382 * @param[in] flow_size
1383 * Size in bytes of the available space in @p flow, if too small, nothing is
1386 * Pointer to error structure.
1389 * On success the number of bytes consumed/necessary, if the returned value
1390 * is lesser or equal to @p flow_size, the @p item has fully been converted,
1391 * otherwise another call with this returned memory size should be done.
1392 * On error, a negative errno value is returned and rte_errno is set.
1395 mlx5_flow_item_vxlan(const struct rte_flow_item *item, struct rte_flow *flow,
1396 const size_t flow_size, struct rte_flow_error *error)
1398 const struct rte_flow_item_vxlan *spec = item->spec;
1399 const struct rte_flow_item_vxlan *mask = item->mask;
1400 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
1401 struct ibv_flow_spec_tunnel vxlan = {
1402 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
1409 } id = { .vlan_id = 0, };
1411 if (flow->layers & MLX5_FLOW_LAYER_TUNNEL)
1412 return rte_flow_error_set(error, ENOTSUP,
1413 RTE_FLOW_ERROR_TYPE_ITEM,
1415 "a tunnel is already present");
1417 * Verify only UDPv4 is present as defined in
1418 * https://tools.ietf.org/html/rfc7348
1420 if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1421 return rte_flow_error_set(error, ENOTSUP,
1422 RTE_FLOW_ERROR_TYPE_ITEM,
1424 "no outer UDP layer found");
1426 mask = &rte_flow_item_vxlan_mask;
1427 ret = mlx5_flow_item_acceptable
1428 (item, (const uint8_t *)mask,
1429 (const uint8_t *)&rte_flow_item_vxlan_mask,
1430 sizeof(struct rte_flow_item_vxlan), error);
1434 memcpy(&id.vni[1], spec->vni, 3);
1435 vxlan.val.tunnel_id = id.vlan_id;
1436 memcpy(&id.vni[1], mask->vni, 3);
1437 vxlan.mask.tunnel_id = id.vlan_id;
1438 /* Remove unwanted bits from values. */
1439 vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
1442 * Tunnel id 0 is equivalent as not adding a VXLAN layer, if
1443 * only this layer is defined in the Verbs specification it is
1444 * interpreted as wildcard and all packets will match this
1445 * rule, if it follows a full stack layer (ex: eth / ipv4 /
1446 * udp), all packets matching the layers before will also
1447 * match this rule. To avoid such situation, VNI 0 is
1448 * currently refused.
1450 if (!vxlan.val.tunnel_id)
1451 return rte_flow_error_set(error, EINVAL,
1452 RTE_FLOW_ERROR_TYPE_ITEM,
1454 "VXLAN vni cannot be 0");
1455 if (!(flow->layers & MLX5_FLOW_LAYER_OUTER))
1456 return rte_flow_error_set(error, EINVAL,
1457 RTE_FLOW_ERROR_TYPE_ITEM,
1459 "VXLAN tunnel must be fully defined");
1460 if (size <= flow_size) {
1461 mlx5_flow_spec_verbs_add(flow, &vxlan, size);
1462 flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
1464 flow->layers |= MLX5_FLOW_LAYER_VXLAN;
1469 * Convert the @p item into a Verbs specification after ensuring the NIC
1470 * will understand and process it correctly.
1471 * If the necessary size for the conversion is greater than the @p flow_size,
1472 * nothing is written in @p flow, the validation is still performed.
1475 * Pointer to Ethernet device.
1477 * Item specification.
1478 * @param[in, out] flow
1479 * Pointer to flow structure.
1480 * @param[in] flow_size
1481 * Size in bytes of the available space in @p flow, if too small, nothing is
1484 * Pointer to error structure.
1487 * On success the number of bytes consumed/necessary, if the returned value
1488 * is lesser or equal to @p flow_size, the @p item has fully been converted,
1489 * otherwise another call with this returned memory size should be done.
1490 * On error, a negative errno value is returned and rte_errno is set.
1493 mlx5_flow_item_vxlan_gpe(struct rte_eth_dev *dev,
1494 const struct rte_flow_item *item,
1495 struct rte_flow *flow, const size_t flow_size,
1496 struct rte_flow_error *error)
1498 const struct rte_flow_item_vxlan_gpe *spec = item->spec;
1499 const struct rte_flow_item_vxlan_gpe *mask = item->mask;
1500 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
1501 struct ibv_flow_spec_tunnel vxlan_gpe = {
1502 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
1509 } id = { .vlan_id = 0, };
1511 if (!((struct priv *)dev->data->dev_private)->config.l3_vxlan_en)
1512 return rte_flow_error_set(error, ENOTSUP,
1513 RTE_FLOW_ERROR_TYPE_ITEM,
1515 "L3 VXLAN is not enabled by device"
1516 " parameter and/or not configured in"
1518 if (flow->layers & MLX5_FLOW_LAYER_TUNNEL)
1519 return rte_flow_error_set(error, ENOTSUP,
1520 RTE_FLOW_ERROR_TYPE_ITEM,
1522 "a tunnel is already present");
1524 * Verify only UDPv4 is present as defined in
1525 * https://tools.ietf.org/html/rfc7348
1527 if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1528 return rte_flow_error_set(error, ENOTSUP,
1529 RTE_FLOW_ERROR_TYPE_ITEM,
1531 "no outer UDP layer found");
1533 mask = &rte_flow_item_vxlan_gpe_mask;
1534 ret = mlx5_flow_item_acceptable
1535 (item, (const uint8_t *)mask,
1536 (const uint8_t *)&rte_flow_item_vxlan_gpe_mask,
1537 sizeof(struct rte_flow_item_vxlan_gpe), error);
1541 memcpy(&id.vni[1], spec->vni, 3);
1542 vxlan_gpe.val.tunnel_id = id.vlan_id;
1543 memcpy(&id.vni[1], mask->vni, 3);
1544 vxlan_gpe.mask.tunnel_id = id.vlan_id;
1546 return rte_flow_error_set
1548 RTE_FLOW_ERROR_TYPE_ITEM,
1550 "VxLAN-GPE protocol not supported");
1551 /* Remove unwanted bits from values. */
1552 vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;
1555 * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this
1556 * layer is defined in the Verbs specification it is interpreted as
1557 * wildcard and all packets will match this rule, if it follows a full
1558 * stack layer (ex: eth / ipv4 / udp), all packets matching the layers
1559 * before will also match this rule. To avoid such situation, VNI 0
1560 * is currently refused.
1562 if (!vxlan_gpe.val.tunnel_id)
1563 return rte_flow_error_set(error, EINVAL,
1564 RTE_FLOW_ERROR_TYPE_ITEM,
1566 "VXLAN-GPE vni cannot be 0");
1567 if (!(flow->layers & MLX5_FLOW_LAYER_OUTER))
1568 return rte_flow_error_set(error, EINVAL,
1569 RTE_FLOW_ERROR_TYPE_ITEM,
1571 "VXLAN-GPE tunnel must be fully"
1573 if (size <= flow_size) {
1574 mlx5_flow_spec_verbs_add(flow, &vxlan_gpe, size);
1575 flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
1577 flow->layers |= MLX5_FLOW_LAYER_VXLAN_GPE;
1582 * Update the protocol in Verbs IPv4/IPv6 spec.
1584 * @param[in, out] attr
1585 * Pointer to Verbs attributes structure.
1587 * Specification type to search in order to update the IP protocol.
1588 * @param[in] protocol
1589 * Protocol value to set if none is present in the specification.
1592 mlx5_flow_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
1593 enum ibv_flow_spec_type search,
1597 struct ibv_spec_header *hdr = (struct ibv_spec_header *)
1598 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
1602 for (i = 0; i != attr->num_of_specs; ++i) {
1603 if (hdr->type == search) {
1605 struct ibv_flow_spec_ipv4_ext *ipv4;
1606 struct ibv_flow_spec_ipv6 *ipv6;
1610 case IBV_FLOW_SPEC_IPV4_EXT:
1611 ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr;
1612 if (!ip.ipv4->val.proto) {
1613 ip.ipv4->val.proto = protocol;
1614 ip.ipv4->mask.proto = 0xff;
1617 case IBV_FLOW_SPEC_IPV6:
1618 ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr;
1619 if (!ip.ipv6->val.next_hdr) {
1620 ip.ipv6->val.next_hdr = protocol;
1621 ip.ipv6->mask.next_hdr = 0xff;
1629 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
1634 * Convert the @p item into a Verbs specification after ensuring the NIC
1635 * will understand and process it correctly.
1636 * It will also update the previous L3 layer with the protocol value matching
1638 * If the necessary size for the conversion is greater than the @p flow_size,
1639 * nothing is written in @p flow, the validation is still performed.
1642 * Pointer to Ethernet device.
1644 * Item specification.
1645 * @param[in, out] flow
1646 * Pointer to flow structure.
1647 * @param[in] flow_size
1648 * Size in bytes of the available space in @p flow, if too small, nothing is
1651 * Pointer to error structure.
1654 * On success the number of bytes consumed/necessary, if the returned value
1655 * is lesser or equal to @p flow_size, the @p item has fully been converted,
1656 * otherwise another call with this returned memory size should be done.
1657 * On error, a negative errno value is returned and rte_errno is set.
1660 mlx5_flow_item_gre(const struct rte_flow_item *item,
1661 struct rte_flow *flow, const size_t flow_size,
1662 struct rte_flow_error *error)
1664 struct mlx5_flow_verbs *verbs = flow->cur_verbs;
1665 const struct rte_flow_item_gre *spec = item->spec;
1666 const struct rte_flow_item_gre *mask = item->mask;
1667 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1668 unsigned int size = sizeof(struct ibv_flow_spec_gre);
1669 struct ibv_flow_spec_gre tunnel = {
1670 .type = IBV_FLOW_SPEC_GRE,
1674 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
1675 struct ibv_flow_spec_tunnel tunnel = {
1676 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
1682 if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_GRE)
1683 return rte_flow_error_set(error, ENOTSUP,
1684 RTE_FLOW_ERROR_TYPE_ITEM,
1686 "protocol filtering not compatible"
1687 " with this GRE layer");
1688 if (flow->layers & MLX5_FLOW_LAYER_TUNNEL)
1689 return rte_flow_error_set(error, ENOTSUP,
1690 RTE_FLOW_ERROR_TYPE_ITEM,
1692 "a tunnel is already present");
1693 if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L3))
1694 return rte_flow_error_set(error, ENOTSUP,
1695 RTE_FLOW_ERROR_TYPE_ITEM,
1697 "L3 Layer is missing");
1699 mask = &rte_flow_item_gre_mask;
1700 ret = mlx5_flow_item_acceptable
1701 (item, (const uint8_t *)mask,
1702 (const uint8_t *)&rte_flow_item_gre_mask,
1703 sizeof(struct rte_flow_item_gre), error);
1706 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1708 tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
1709 tunnel.val.protocol = spec->protocol;
1710 tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
1711 tunnel.mask.protocol = mask->protocol;
1712 /* Remove unwanted bits from values. */
1713 tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
1714 tunnel.val.protocol &= tunnel.mask.protocol;
1715 tunnel.val.key &= tunnel.mask.key;
1718 if (spec && (spec->protocol & mask->protocol))
1719 return rte_flow_error_set(error, ENOTSUP,
1720 RTE_FLOW_ERROR_TYPE_ITEM,
1722 "without MPLS support the"
1723 " specification cannot be used for"
1725 #endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */
1726 if (size <= flow_size) {
1727 if (flow->layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
1728 mlx5_flow_item_gre_ip_protocol_update
1729 (verbs->attr, IBV_FLOW_SPEC_IPV4_EXT,
1730 MLX5_IP_PROTOCOL_GRE);
1732 mlx5_flow_item_gre_ip_protocol_update
1733 (verbs->attr, IBV_FLOW_SPEC_IPV6,
1734 MLX5_IP_PROTOCOL_GRE);
1735 mlx5_flow_spec_verbs_add(flow, &tunnel, size);
1736 flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
1738 flow->layers |= MLX5_FLOW_LAYER_GRE;
1743 * Convert the @p item into a Verbs specification after ensuring the NIC
1744 * will understand and process it correctly.
1745 * If the necessary size for the conversion is greater than the @p flow_size,
1746 * nothing is written in @p flow, the validation is still performed.
1749 * Item specification.
1750 * @param[in, out] flow
1751 * Pointer to flow structure.
1752 * @param[in] flow_size
1753 * Size in bytes of the available space in @p flow, if too small, nothing is
1756 * Pointer to error structure.
1759 * On success the number of bytes consumed/necessary, if the returned value
1760 * is lesser or equal to @p flow_size, the @p item has fully been converted,
1761 * otherwise another call with this returned memory size should be done.
1762 * On error, a negative errno value is returned and rte_errno is set.
1765 mlx5_flow_item_mpls(const struct rte_flow_item *item __rte_unused,
1766 struct rte_flow *flow __rte_unused,
1767 const size_t flow_size __rte_unused,
1768 struct rte_flow_error *error)
1770 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1771 const struct rte_flow_item_mpls *spec = item->spec;
1772 const struct rte_flow_item_mpls *mask = item->mask;
1773 unsigned int size = sizeof(struct ibv_flow_spec_mpls);
1774 struct ibv_flow_spec_mpls mpls = {
1775 .type = IBV_FLOW_SPEC_MPLS,
1780 if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_MPLS)
1781 return rte_flow_error_set(error, ENOTSUP,
1782 RTE_FLOW_ERROR_TYPE_ITEM,
1784 "protocol filtering not compatible"
1785 " with MPLS layer");
1786 /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */
1787 if (flow->layers & MLX5_FLOW_LAYER_TUNNEL &&
1788 (flow->layers & MLX5_FLOW_LAYER_GRE) != MLX5_FLOW_LAYER_GRE)
1789 return rte_flow_error_set(error, ENOTSUP,
1790 RTE_FLOW_ERROR_TYPE_ITEM,
1792 "a tunnel is already"
1795 mask = &rte_flow_item_mpls_mask;
1796 ret = mlx5_flow_item_acceptable
1797 (item, (const uint8_t *)mask,
1798 (const uint8_t *)&rte_flow_item_mpls_mask,
1799 sizeof(struct rte_flow_item_mpls), error);
1803 memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
1804 memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
1805 /* Remove unwanted bits from values. */
1806 mpls.val.label &= mpls.mask.label;
1808 if (size <= flow_size) {
1809 mlx5_flow_spec_verbs_add(flow, &mpls, size);
1810 flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
1812 flow->layers |= MLX5_FLOW_LAYER_MPLS;
1814 #endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */
1815 return rte_flow_error_set(error, ENOTSUP,
1816 RTE_FLOW_ERROR_TYPE_ITEM,
1818 "MPLS is not supported by Verbs, please"
1823 * Convert the @p pattern into a Verbs specifications after ensuring the NIC
1824 * will understand and process it correctly.
1825 * The conversion is performed item per item, each of them is written into
1826 * the @p flow if its size is lesser or equal to @p flow_size.
1827 * Validation and memory consumption computation are still performed until the
1828 * end of @p pattern, unless an error is encountered.
1830 * @param[in] pattern
1832 * @param[in, out] flow
1833 * Pointer to the rte_flow structure.
1834 * @param[in] flow_size
1835 * Size in bytes of the available space in @p flow, if too small some
1836 * garbage may be present.
1838 * Pointer to error structure.
1841 * On success the number of bytes consumed/necessary, if the returned value
1842 * is lesser or equal to @p flow_size, the @pattern has fully been
1843 * converted, otherwise another call with this returned memory size should
1845 * On error, a negative errno value is returned and rte_errno is set.
1848 mlx5_flow_items(struct rte_eth_dev *dev,
1849 const struct rte_flow_item pattern[],
1850 struct rte_flow *flow, const size_t flow_size,
1851 struct rte_flow_error *error)
1853 int remain = flow_size;
1856 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1859 switch (pattern->type) {
1860 case RTE_FLOW_ITEM_TYPE_VOID:
1862 case RTE_FLOW_ITEM_TYPE_ETH:
1863 ret = mlx5_flow_item_eth(pattern, flow, remain, error);
1865 case RTE_FLOW_ITEM_TYPE_VLAN:
1866 ret = mlx5_flow_item_vlan(pattern, flow, remain, error);
1868 case RTE_FLOW_ITEM_TYPE_IPV4:
1869 ret = mlx5_flow_item_ipv4(pattern, flow, remain, error);
1871 case RTE_FLOW_ITEM_TYPE_IPV6:
1872 ret = mlx5_flow_item_ipv6(pattern, flow, remain, error);
1874 case RTE_FLOW_ITEM_TYPE_UDP:
1875 ret = mlx5_flow_item_udp(pattern, flow, remain, error);
1877 case RTE_FLOW_ITEM_TYPE_TCP:
1878 ret = mlx5_flow_item_tcp(pattern, flow, remain, error);
1880 case RTE_FLOW_ITEM_TYPE_VXLAN:
1881 ret = mlx5_flow_item_vxlan(pattern, flow, remain,
1884 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1885 ret = mlx5_flow_item_vxlan_gpe(dev, pattern, flow,
1888 case RTE_FLOW_ITEM_TYPE_GRE:
1889 ret = mlx5_flow_item_gre(pattern, flow, remain, error);
1891 case RTE_FLOW_ITEM_TYPE_MPLS:
1892 ret = mlx5_flow_item_mpls(pattern, flow, remain, error);
1895 return rte_flow_error_set(error, ENOTSUP,
1896 RTE_FLOW_ERROR_TYPE_ITEM,
1898 "item not supported");
1908 if (!flow->layers) {
1909 const struct rte_flow_item item = {
1910 .type = RTE_FLOW_ITEM_TYPE_ETH,
1913 return mlx5_flow_item_eth(&item, flow, flow_size, error);
1919 * Convert the @p action into a Verbs specification after ensuring the NIC
1920 * will understand and process it correctly.
1921 * If the necessary size for the conversion is greater than the @p flow_size,
1922 * nothing is written in @p flow, the validation is still performed.
1925 * Action configuration.
1926 * @param[in, out] flow
1927 * Pointer to flow structure.
1928 * @param[in] flow_size
1929 * Size in bytes of the available space in @p flow, if too small, nothing is
1932 * Pointer to error structure.
1935 * On success the number of bytes consumed/necessary, if the returned value
1936 * is lesser or equal to @p flow_size, the @p action has fully been
1937 * converted, otherwise another call with this returned memory size should
1939 * On error, a negative errno value is returned and rte_errno is set.
1942 mlx5_flow_action_drop(const struct rte_flow_action *action,
1943 struct rte_flow *flow, const size_t flow_size,
1944 struct rte_flow_error *error)
1946 unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
1947 struct ibv_flow_spec_action_drop drop = {
1948 .type = IBV_FLOW_SPEC_ACTION_DROP,
1953 return rte_flow_error_set(error, ENOTSUP,
1954 RTE_FLOW_ERROR_TYPE_ACTION,
1956 "multiple fate actions are not"
1958 if (flow->modifier & (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK))
1959 return rte_flow_error_set(error, ENOTSUP,
1960 RTE_FLOW_ERROR_TYPE_ACTION,
1962 "drop is not compatible with"
1963 " flag/mark action");
1964 if (size < flow_size)
1965 mlx5_flow_spec_verbs_add(flow, &drop, size);
1966 flow->fate |= MLX5_FLOW_FATE_DROP;
1971 * Convert the @p action into @p flow after ensuring the NIC will understand
1972 * and process it correctly.
1975 * Pointer to Ethernet device structure.
1977 * Action configuration.
1978 * @param[in, out] flow
1979 * Pointer to flow structure.
1981 * Pointer to error structure.
1984 * 0 on success, a negative errno value otherwise and rte_errno is set.
1987 mlx5_flow_action_queue(struct rte_eth_dev *dev,
1988 const struct rte_flow_action *action,
1989 struct rte_flow *flow,
1990 struct rte_flow_error *error)
1992 struct priv *priv = dev->data->dev_private;
1993 const struct rte_flow_action_queue *queue = action->conf;
1996 return rte_flow_error_set(error, ENOTSUP,
1997 RTE_FLOW_ERROR_TYPE_ACTION,
1999 "multiple fate actions are not"
2001 if (queue->index >= priv->rxqs_n)
2002 return rte_flow_error_set(error, EINVAL,
2003 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2005 "queue index out of range");
2006 if (!(*priv->rxqs)[queue->index])
2007 return rte_flow_error_set(error, EINVAL,
2008 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2010 "queue is not configured");
2012 (*flow->queue)[0] = queue->index;
2013 flow->rss.queue_num = 1;
2014 flow->fate |= MLX5_FLOW_FATE_QUEUE;
2019 * Ensure the @p action will be understood and used correctly by the NIC.
2022 * Pointer to Ethernet device structure.
2024 * Pointer to flow actions array.
2025 * @param flow[in, out]
2026 * Pointer to the rte_flow structure.
2027 * @param error[in, out]
2028 * Pointer to error structure.
2031 * On success @p flow->queue array and @p flow->rss are filled and valid.
2032 * On error, a negative errno value is returned and rte_errno is set.
2035 mlx5_flow_action_rss(struct rte_eth_dev *dev,
2036 const struct rte_flow_action *action,
2037 struct rte_flow *flow,
2038 struct rte_flow_error *error)
2040 struct priv *priv = dev->data->dev_private;
2041 const struct rte_flow_action_rss *rss = action->conf;
2045 return rte_flow_error_set(error, ENOTSUP,
2046 RTE_FLOW_ERROR_TYPE_ACTION,
2048 "multiple fate actions are not"
2050 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
2051 rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
2052 return rte_flow_error_set(error, ENOTSUP,
2053 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2055 "RSS hash function not supported");
2056 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
2061 return rte_flow_error_set(error, ENOTSUP,
2062 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2064 "tunnel RSS is not supported");
2065 if (rss->key_len < MLX5_RSS_HASH_KEY_LEN)
2066 return rte_flow_error_set(error, ENOTSUP,
2067 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2069 "RSS hash key too small");
2070 if (rss->key_len > MLX5_RSS_HASH_KEY_LEN)
2071 return rte_flow_error_set(error, ENOTSUP,
2072 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2074 "RSS hash key too large");
2075 if (!rss->queue_num)
2076 return rte_flow_error_set(error, ENOTSUP,
2077 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2079 "no queues were provided for RSS");
2080 if (rss->queue_num > priv->config.ind_table_max_size)
2081 return rte_flow_error_set(error, ENOTSUP,
2082 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2084 "number of queues too large");
2085 if (rss->types & MLX5_RSS_HF_MASK)
2086 return rte_flow_error_set(error, ENOTSUP,
2087 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2089 "some RSS protocols are not"
2091 for (i = 0; i != rss->queue_num; ++i) {
2092 if (rss->queue[i] >= priv->rxqs_n)
2093 return rte_flow_error_set
2095 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2097 "queue index out of range");
2098 if (!(*priv->rxqs)[rss->queue[i]])
2099 return rte_flow_error_set
2101 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2103 "queue is not configured");
2106 memcpy((*flow->queue), rss->queue,
2107 rss->queue_num * sizeof(uint16_t));
2108 flow->rss.queue_num = rss->queue_num;
2109 memcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN);
2110 flow->rss.types = rss->types;
2111 flow->rss.level = rss->level;
2112 flow->fate |= MLX5_FLOW_FATE_RSS;
2117 * Convert the @p action into a Verbs specification after ensuring the NIC
2118 * will understand and process it correctly.
2119 * If the necessary size for the conversion is greater than the @p flow_size,
2120 * nothing is written in @p flow, the validation is still performed.
2123 * Action configuration.
2124 * @param[in, out] flow
2125 * Pointer to flow structure.
2126 * @param[in] flow_size
2127 * Size in bytes of the available space in @p flow, if too small, nothing is
2130 * Pointer to error structure.
2133 * On success the number of bytes consumed/necessary, if the returned value
2134 * is lesser or equal to @p flow_size, the @p action has fully been
2135 * converted, otherwise another call with this returned memory size should
2137 * On error, a negative errno value is returned and rte_errno is set.
2140 mlx5_flow_action_flag(const struct rte_flow_action *action,
2141 struct rte_flow *flow, const size_t flow_size,
2142 struct rte_flow_error *error)
2144 unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
2145 struct ibv_flow_spec_action_tag tag = {
2146 .type = IBV_FLOW_SPEC_ACTION_TAG,
2148 .tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),
2150 struct mlx5_flow_verbs *verbs = flow->cur_verbs;
2152 if (flow->modifier & MLX5_FLOW_MOD_FLAG)
2153 return rte_flow_error_set(error, ENOTSUP,
2154 RTE_FLOW_ERROR_TYPE_ACTION,
2156 "flag action already present");
2157 if (flow->fate & MLX5_FLOW_FATE_DROP)
2158 return rte_flow_error_set(error, ENOTSUP,
2159 RTE_FLOW_ERROR_TYPE_ACTION,
2161 "flag is not compatible with drop"
2163 if (flow->modifier & MLX5_FLOW_MOD_MARK)
2165 else if (size <= flow_size && verbs)
2166 mlx5_flow_spec_verbs_add(flow, &tag, size);
2167 flow->modifier |= MLX5_FLOW_MOD_FLAG;
2172 * Update verbs specification to modify the flag to mark.
2174 * @param[in, out] verbs
2175 * Pointer to the mlx5_flow_verbs structure.
2176 * @param[in] mark_id
2177 * Mark identifier to replace the flag.
2180 mlx5_flow_verbs_mark_update(struct mlx5_flow_verbs *verbs, uint32_t mark_id)
2182 struct ibv_spec_header *hdr;
2187 /* Update Verbs specification. */
2188 hdr = (struct ibv_spec_header *)verbs->specs;
2191 for (i = 0; i != verbs->attr->num_of_specs; ++i) {
2192 if (hdr->type == IBV_FLOW_SPEC_ACTION_TAG) {
2193 struct ibv_flow_spec_action_tag *t =
2194 (struct ibv_flow_spec_action_tag *)hdr;
2196 t->tag_id = mlx5_flow_mark_set(mark_id);
2198 hdr = (struct ibv_spec_header *)((uintptr_t)hdr + hdr->size);
2203 * Convert the @p action into @p flow (or by updating the already present
2204 * Flag Verbs specification) after ensuring the NIC will understand and
2205 * process it correctly.
2206 * If the necessary size for the conversion is greater than the @p flow_size,
2207 * nothing is written in @p flow, the validation is still performed.
2210 * Action configuration.
2211 * @param[in, out] flow
2212 * Pointer to flow structure.
2213 * @param[in] flow_size
2214 * Size in bytes of the available space in @p flow, if too small, nothing is
2217 * Pointer to error structure.
2220 * On success the number of bytes consumed/necessary, if the returned value
2221 * is lesser or equal to @p flow_size, the @p action has fully been
2222 * converted, otherwise another call with this returned memory size should
2224 * On error, a negative errno value is returned and rte_errno is set.
2227 mlx5_flow_action_mark(const struct rte_flow_action *action,
2228 struct rte_flow *flow, const size_t flow_size,
2229 struct rte_flow_error *error)
2231 const struct rte_flow_action_mark *mark = action->conf;
2232 unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
2233 struct ibv_flow_spec_action_tag tag = {
2234 .type = IBV_FLOW_SPEC_ACTION_TAG,
2237 struct mlx5_flow_verbs *verbs = flow->cur_verbs;
2240 return rte_flow_error_set(error, EINVAL,
2241 RTE_FLOW_ERROR_TYPE_ACTION,
2243 "configuration cannot be null");
2244 if (mark->id >= MLX5_FLOW_MARK_MAX)
2245 return rte_flow_error_set(error, EINVAL,
2246 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2248 "mark id must in 0 <= id < "
2249 RTE_STR(MLX5_FLOW_MARK_MAX));
2250 if (flow->modifier & MLX5_FLOW_MOD_MARK)
2251 return rte_flow_error_set(error, ENOTSUP,
2252 RTE_FLOW_ERROR_TYPE_ACTION,
2254 "mark action already present");
2255 if (flow->fate & MLX5_FLOW_FATE_DROP)
2256 return rte_flow_error_set(error, ENOTSUP,
2257 RTE_FLOW_ERROR_TYPE_ACTION,
2259 "mark is not compatible with drop"
2261 if (flow->modifier & MLX5_FLOW_MOD_FLAG) {
2262 mlx5_flow_verbs_mark_update(verbs, mark->id);
2264 } else if (size <= flow_size) {
2265 tag.tag_id = mlx5_flow_mark_set(mark->id);
2266 mlx5_flow_spec_verbs_add(flow, &tag, size);
2268 flow->modifier |= MLX5_FLOW_MOD_MARK;
2273 * Convert the @p action into a Verbs specification after ensuring the NIC
2274 * will understand and process it correctly.
2275 * If the necessary size for the conversion is greater than the @p flow_size,
2276 * nothing is written in @p flow, the validation is still performed.
2279 * Action configuration.
2280 * @param flow[in, out]
2281 * Pointer to flow structure.
2282 * @param flow_size[in]
2283 * Size in bytes of the available space in @p flow, if too small, nothing is
2285 * @param error[int, out]
2286 * Pointer to error structure.
2289 * On success the number of bytes consumed/necessary, if the returned value
2290 * is lesser or equal to @p flow_size, the @p action has fully been
2291 * converted, otherwise another call with this returned memory size should
2293 * On error, a negative errno value is returned and rte_errno is set.
2296 mlx5_flow_action_count(struct rte_eth_dev *dev,
2297 const struct rte_flow_action *action,
2298 struct rte_flow *flow,
2299 const size_t flow_size __rte_unused,
2300 struct rte_flow_error *error)
2302 const struct rte_flow_action_count *count = action->conf;
2303 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
2304 unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
2305 struct ibv_flow_spec_counter_action counter = {
2306 .type = IBV_FLOW_SPEC_ACTION_COUNT,
2311 if (!flow->counter) {
2312 flow->counter = mlx5_flow_counter_new(dev, count->shared,
2315 return rte_flow_error_set(error, ENOTSUP,
2316 RTE_FLOW_ERROR_TYPE_ACTION,
2318 "cannot get counter"
2321 if (!((struct priv *)dev->data->dev_private)->config.flow_counter_en)
2322 return rte_flow_error_set(error, ENOTSUP,
2323 RTE_FLOW_ERROR_TYPE_ACTION,
2325 "flow counters are not supported.");
2326 flow->modifier |= MLX5_FLOW_MOD_COUNT;
2327 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
2328 counter.counter_set_handle = flow->counter->cs->handle;
2329 if (size <= flow_size)
2330 mlx5_flow_spec_verbs_add(flow, &counter, size);
2337 * Convert the @p action into @p flow after ensuring the NIC will understand
2338 * and process it correctly.
2339 * The conversion is performed action per action, each of them is written into
2340 * the @p flow if its size is lesser or equal to @p flow_size.
2341 * Validation and memory consumption computation are still performed until the
2342 * end of @p action, unless an error is encountered.
2345 * Pointer to Ethernet device structure.
2346 * @param[in] actions
2347 * Pointer to flow actions array.
2348 * @param[in, out] flow
2349 * Pointer to the rte_flow structure.
2350 * @param[in] flow_size
2351 * Size in bytes of the available space in @p flow, if too small some
2352 * garbage may be present.
2354 * Pointer to error structure.
2357 * On success the number of bytes consumed/necessary, if the returned value
2358 * is lesser or equal to @p flow_size, the @p actions has fully been
2359 * converted, otherwise another call with this returned memory size should
2361 * On error, a negative errno value is returned and rte_errno is set.
2364 mlx5_flow_actions(struct rte_eth_dev *dev,
2365 const struct rte_flow_action actions[],
2366 struct rte_flow *flow, const size_t flow_size,
2367 struct rte_flow_error *error)
2370 int remain = flow_size;
2373 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2374 switch (actions->type) {
2375 case RTE_FLOW_ACTION_TYPE_VOID:
2377 case RTE_FLOW_ACTION_TYPE_FLAG:
2378 ret = mlx5_flow_action_flag(actions, flow, remain,
2381 case RTE_FLOW_ACTION_TYPE_MARK:
2382 ret = mlx5_flow_action_mark(actions, flow, remain,
2385 case RTE_FLOW_ACTION_TYPE_DROP:
2386 ret = mlx5_flow_action_drop(actions, flow, remain,
2389 case RTE_FLOW_ACTION_TYPE_QUEUE:
2390 ret = mlx5_flow_action_queue(dev, actions, flow, error);
2392 case RTE_FLOW_ACTION_TYPE_RSS:
2393 ret = mlx5_flow_action_rss(dev, actions, flow, error);
2395 case RTE_FLOW_ACTION_TYPE_COUNT:
2396 ret = mlx5_flow_action_count(dev, actions, flow, remain,
2400 return rte_flow_error_set(error, ENOTSUP,
2401 RTE_FLOW_ERROR_TYPE_ACTION,
2403 "action not supported");
2414 return rte_flow_error_set(error, ENOTSUP,
2415 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2417 "no fate action found");
2422 * Validate flow rule and fill flow structure accordingly.
2425 * Pointer to Ethernet device.
2427 * Pointer to flow structure.
2429 * Size of allocated space for @p flow.
2431 * Flow rule attributes.
2432 * @param[in] pattern
2433 * Pattern specification (list terminated by the END pattern item).
2434 * @param[in] actions
2435 * Associated actions (list terminated by the END action).
2437 * Perform verbose error reporting if not NULL.
2440 * A positive value representing the size of the flow object in bytes
2441 * regardless of @p flow_size on success, a negative errno value otherwise
2442 * and rte_errno is set.
2445 mlx5_flow_merge_switch(struct rte_eth_dev *dev,
2446 struct rte_flow *flow,
2448 const struct rte_flow_attr *attr,
2449 const struct rte_flow_item pattern[],
2450 const struct rte_flow_action actions[],
2451 struct rte_flow_error *error)
2453 unsigned int n = mlx5_dev_to_port_id(dev->device, NULL, 0);
2454 uint16_t port_id[!n + n];
2455 struct mlx5_nl_flow_ptoi ptoi[!n + n + 1];
2456 size_t off = RTE_ALIGN_CEIL(sizeof(*flow), alignof(max_align_t));
2458 unsigned int own = 0;
2461 /* At least one port is needed when no switch domain is present. */
2464 port_id[0] = dev->data->port_id;
2466 n = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, n), n);
2468 for (i = 0; i != n; ++i) {
2469 struct rte_eth_dev_info dev_info;
2471 rte_eth_dev_info_get(port_id[i], &dev_info);
2472 if (port_id[i] == dev->data->port_id)
2474 ptoi[i].port_id = port_id[i];
2475 ptoi[i].ifindex = dev_info.if_index;
2477 /* Ensure first entry of ptoi[] is the current device. */
2480 ptoi[0] = ptoi[own];
2481 ptoi[own] = ptoi[n];
2483 /* An entry with zero ifindex terminates ptoi[]. */
2484 ptoi[n].port_id = 0;
2485 ptoi[n].ifindex = 0;
2486 if (flow_size < off)
2488 ret = mlx5_nl_flow_transpose((uint8_t *)flow + off,
2489 flow_size ? flow_size - off : 0,
2490 ptoi, attr, pattern, actions, error);
2494 *flow = (struct rte_flow){
2495 .attributes = *attr,
2496 .nl_flow = (uint8_t *)flow + off,
2499 * Generate a reasonably unique handle based on the address
2500 * of the target buffer.
2502 * This is straightforward on 32-bit systems where the flow
2503 * pointer can be used directly. Otherwise, its least
2504 * significant part is taken after shifting it by the
2505 * previous power of two of the pointed buffer size.
2507 if (sizeof(flow) <= 4)
2508 mlx5_nl_flow_brand(flow->nl_flow, (uintptr_t)flow);
2513 rte_log2_u32(rte_align32prevpow2(flow_size)));
2519 mlx5_find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level)
2521 const struct rte_flow_item *item;
2522 unsigned int has_vlan = 0;
2524 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2525 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2531 return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN :
2532 MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN;
2533 return rss_level < 2 ? MLX5_EXPANSION_ROOT :
2534 MLX5_EXPANSION_ROOT_OUTER;
2538 * Convert the @p attributes, @p pattern, @p action, into an flow for the NIC
2539 * after ensuring the NIC will understand and process it correctly.
2540 * The conversion is only performed item/action per item/action, each of
2541 * them is written into the @p flow if its size is lesser or equal to @p
2543 * Validation and memory consumption computation are still performed until the
2544 * end, unless an error is encountered.
2547 * Pointer to Ethernet device.
2548 * @param[in, out] flow
2549 * Pointer to flow structure.
2550 * @param[in] flow_size
2551 * Size in bytes of the available space in @p flow, if too small some
2552 * garbage may be present.
2553 * @param[in] attributes
2554 * Flow rule attributes.
2555 * @param[in] pattern
2556 * Pattern specification (list terminated by the END pattern item).
2557 * @param[in] actions
2558 * Associated actions (list terminated by the END action).
2560 * Perform verbose error reporting if not NULL.
2563 * On success the number of bytes consumed/necessary, if the returned value
2564 * is lesser or equal to @p flow_size, the flow has fully been converted and
2565 * can be applied, otherwise another call with this returned memory size
2567 * On error, a negative errno value is returned and rte_errno is set.
2570 mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow,
2571 const size_t flow_size,
2572 const struct rte_flow_attr *attributes,
2573 const struct rte_flow_item pattern[],
2574 const struct rte_flow_action actions[],
2575 struct rte_flow_error *error)
2577 struct rte_flow local_flow = { .layers = 0, };
2578 size_t size = sizeof(*flow);
2580 struct rte_flow_expand_rss buf;
2581 uint8_t buffer[2048];
2583 struct rte_flow_expand_rss *buf = &expand_buffer.buf;
2584 struct mlx5_flow_verbs *original_verbs = NULL;
2585 size_t original_verbs_size = 0;
2586 uint32_t original_layers = 0;
2587 int expanded_pattern_idx = 0;
2591 if (attributes->transfer)
2592 return mlx5_flow_merge_switch(dev, flow, flow_size,
2593 attributes, pattern,
2595 if (size > flow_size)
2597 ret = mlx5_flow_attributes(dev, attributes, flow, error);
2600 ret = mlx5_flow_actions(dev, actions, &local_flow, 0, error);
2603 if (local_flow.rss.types) {
2604 unsigned int graph_root;
2606 graph_root = mlx5_find_graph_root(pattern,
2607 local_flow.rss.level);
2608 ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
2609 pattern, local_flow.rss.types,
2610 mlx5_support_expansion,
2613 (unsigned int)ret < sizeof(expand_buffer.buffer));
2616 buf->entry[0].pattern = (void *)(uintptr_t)pattern;
2618 size += RTE_ALIGN_CEIL(local_flow.rss.queue_num * sizeof(uint16_t),
2620 if (size <= flow_size)
2621 flow->queue = (void *)(flow + 1);
2622 LIST_INIT(&flow->verbs);
2626 for (i = 0; i != buf->entries; ++i) {
2630 flow->layers = original_layers;
2631 size += sizeof(struct ibv_flow_attr) +
2632 sizeof(struct mlx5_flow_verbs);
2634 if (size < flow_size) {
2635 flow->cur_verbs = (void *)((uintptr_t)flow + off);
2636 flow->cur_verbs->attr = (void *)(flow->cur_verbs + 1);
2637 flow->cur_verbs->specs =
2638 (void *)(flow->cur_verbs->attr + 1);
2640 /* First iteration convert the pattern into Verbs. */
2642 /* Actions don't need to be converted several time. */
2643 ret = mlx5_flow_actions(dev, actions, flow,
2644 (size < flow_size) ?
2645 flow_size - size : 0,
2652 * Next iteration means the pattern has already been
2653 * converted and an expansion is necessary to match
2654 * the user RSS request. For that only the expanded
2655 * items will be converted, the common part with the
2656 * user pattern are just copied into the next buffer
2659 size += original_verbs_size;
2660 if (size < flow_size) {
2661 rte_memcpy(flow->cur_verbs->attr,
2662 original_verbs->attr,
2663 original_verbs_size +
2664 sizeof(struct ibv_flow_attr));
2665 flow->cur_verbs->size = original_verbs_size;
2668 ret = mlx5_flow_items
2670 (const struct rte_flow_item *)
2671 &buf->entry[i].pattern[expanded_pattern_idx],
2673 (size < flow_size) ? flow_size - size : 0, error);
2677 if (size <= flow_size) {
2678 mlx5_flow_adjust_priority(dev, flow);
2679 LIST_INSERT_HEAD(&flow->verbs, flow->cur_verbs, next);
2682 * Keep a pointer of the first verbs conversion and the layers
2683 * it has encountered.
2686 original_verbs = flow->cur_verbs;
2687 original_verbs_size = size - off2;
2688 original_layers = flow->layers;
2690 * move the index of the expanded pattern to the
2691 * first item not addressed yet.
2693 if (pattern->type == RTE_FLOW_ITEM_TYPE_END) {
2694 expanded_pattern_idx++;
2696 const struct rte_flow_item *item = pattern;
2698 for (item = pattern;
2699 item->type != RTE_FLOW_ITEM_TYPE_END;
2701 expanded_pattern_idx++;
2705 /* Restore the origin layers in the flow. */
2706 flow->layers = original_layers;
2711 * Lookup and set the ptype in the data Rx part. A single Ptype can be used,
2712 * if several tunnel rules are used on this queue, the tunnel ptype will be
2716 * Rx queue to update.
2719 mlx5_flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
2722 uint32_t tunnel_ptype = 0;
2724 /* Look up for the ptype to use. */
2725 for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) {
2726 if (!rxq_ctrl->flow_tunnels_n[i])
2728 if (!tunnel_ptype) {
2729 tunnel_ptype = tunnels_info[i].ptype;
2735 rxq_ctrl->rxq.tunnel = tunnel_ptype;
2739 * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the flow.
2742 * Pointer to Ethernet device.
2744 * Pointer to flow structure.
2747 mlx5_flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
2749 struct priv *priv = dev->data->dev_private;
2750 const int mark = !!(flow->modifier &
2751 (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK));
2752 const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
2755 for (i = 0; i != flow->rss.queue_num; ++i) {
2756 int idx = (*flow->queue)[i];
2757 struct mlx5_rxq_ctrl *rxq_ctrl =
2758 container_of((*priv->rxqs)[idx],
2759 struct mlx5_rxq_ctrl, rxq);
2762 rxq_ctrl->rxq.mark = 1;
2763 rxq_ctrl->flow_mark_n++;
2768 /* Increase the counter matching the flow. */
2769 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
2770 if ((tunnels_info[j].tunnel & flow->layers) ==
2771 tunnels_info[j].tunnel) {
2772 rxq_ctrl->flow_tunnels_n[j]++;
2776 mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl);
2782 * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
2783 * @p flow if no other flow uses it with the same kind of request.
2786 * Pointer to Ethernet device.
2788 * Pointer to the flow.
2791 mlx5_flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
2793 struct priv *priv = dev->data->dev_private;
2794 const int mark = !!(flow->modifier &
2795 (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK));
2796 const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
2799 assert(dev->data->dev_started);
2800 for (i = 0; i != flow->rss.queue_num; ++i) {
2801 int idx = (*flow->queue)[i];
2802 struct mlx5_rxq_ctrl *rxq_ctrl =
2803 container_of((*priv->rxqs)[idx],
2804 struct mlx5_rxq_ctrl, rxq);
2807 rxq_ctrl->flow_mark_n--;
2808 rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
2813 /* Decrease the counter matching the flow. */
2814 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
2815 if ((tunnels_info[j].tunnel & flow->layers) ==
2816 tunnels_info[j].tunnel) {
2817 rxq_ctrl->flow_tunnels_n[j]--;
2821 mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl);
2827 * Clear the Mark/Flag and Tunnel ptype information in all Rx queues.
2830 * Pointer to Ethernet device.
2833 mlx5_flow_rxq_flags_clear(struct rte_eth_dev *dev)
2835 struct priv *priv = dev->data->dev_private;
2838 for (i = 0; i != priv->rxqs_n; ++i) {
2839 struct mlx5_rxq_ctrl *rxq_ctrl;
2842 if (!(*priv->rxqs)[i])
2844 rxq_ctrl = container_of((*priv->rxqs)[i],
2845 struct mlx5_rxq_ctrl, rxq);
2846 rxq_ctrl->flow_mark_n = 0;
2847 rxq_ctrl->rxq.mark = 0;
2848 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
2849 rxq_ctrl->flow_tunnels_n[j] = 0;
2850 rxq_ctrl->rxq.tunnel = 0;
2855 * Validate a flow supported by the NIC.
2857 * @see rte_flow_validate()
2861 mlx5_flow_validate(struct rte_eth_dev *dev,
2862 const struct rte_flow_attr *attr,
2863 const struct rte_flow_item items[],
2864 const struct rte_flow_action actions[],
2865 struct rte_flow_error *error)
2867 int ret = mlx5_flow_merge(dev, NULL, 0, attr, items, actions, error);
2878 * Pointer to Ethernet device.
2879 * @param[in, out] flow
2880 * Pointer to flow structure.
2883 mlx5_flow_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
2885 struct priv *priv = dev->data->dev_private;
2886 struct mlx5_flow_verbs *verbs;
2888 if (flow->nl_flow && priv->mnl_socket)
2889 mlx5_nl_flow_destroy(priv->mnl_socket, flow->nl_flow, NULL);
2890 LIST_FOREACH(verbs, &flow->verbs, next) {
2892 claim_zero(mlx5_glue->destroy_flow(verbs->flow));
2896 if (flow->fate & MLX5_FLOW_FATE_DROP)
2897 mlx5_hrxq_drop_release(dev);
2899 mlx5_hrxq_release(dev, verbs->hrxq);
2903 if (flow->counter) {
2904 mlx5_flow_counter_release(flow->counter);
2905 flow->counter = NULL;
2913 * Pointer to Ethernet device structure.
2914 * @param[in, out] flow
2915 * Pointer to flow structure.
2917 * Pointer to error structure.
2920 * 0 on success, a negative errno value otherwise and rte_errno is set.
2923 mlx5_flow_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
2924 struct rte_flow_error *error)
2926 struct priv *priv = dev->data->dev_private;
2927 struct mlx5_flow_verbs *verbs;
2930 LIST_FOREACH(verbs, &flow->verbs, next) {
2931 if (flow->fate & MLX5_FLOW_FATE_DROP) {
2932 verbs->hrxq = mlx5_hrxq_drop_new(dev);
2936 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2938 "cannot get drop hash queue");
2942 struct mlx5_hrxq *hrxq;
2944 hrxq = mlx5_hrxq_get(dev, flow->key,
2945 MLX5_RSS_HASH_KEY_LEN,
2948 flow->rss.queue_num);
2950 hrxq = mlx5_hrxq_new(dev, flow->key,
2951 MLX5_RSS_HASH_KEY_LEN,
2954 flow->rss.queue_num,
2956 MLX5_FLOW_LAYER_TUNNEL));
2960 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2962 "cannot get hash queue");
2968 mlx5_glue->create_flow(verbs->hrxq->qp, verbs->attr);
2970 rte_flow_error_set(error, errno,
2971 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2973 "hardware refuses to create flow");
2977 if (flow->nl_flow &&
2979 mlx5_nl_flow_create(priv->mnl_socket, flow->nl_flow, error))
2983 err = rte_errno; /* Save rte_errno before cleanup. */
2984 LIST_FOREACH(verbs, &flow->verbs, next) {
2986 if (flow->fate & MLX5_FLOW_FATE_DROP)
2987 mlx5_hrxq_drop_release(dev);
2989 mlx5_hrxq_release(dev, verbs->hrxq);
2993 rte_errno = err; /* Restore rte_errno. */
2998 * Create a flow and add it to @p list.
3001 * Pointer to Ethernet device.
3003 * Pointer to a TAILQ flow list.
3005 * Flow rule attributes.
3007 * Pattern specification (list terminated by the END pattern item).
3008 * @param[in] actions
3009 * Associated actions (list terminated by the END action).
3011 * Perform verbose error reporting if not NULL.
3014 * A flow on success, NULL otherwise and rte_errno is set.
3016 static struct rte_flow *
3017 mlx5_flow_list_create(struct rte_eth_dev *dev,
3018 struct mlx5_flows *list,
3019 const struct rte_flow_attr *attr,
3020 const struct rte_flow_item items[],
3021 const struct rte_flow_action actions[],
3022 struct rte_flow_error *error)
3024 struct rte_flow *flow = NULL;
3028 ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error);
3032 flow = rte_calloc(__func__, 1, size, 0);
3034 rte_flow_error_set(error, ENOMEM,
3035 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3037 "not enough memory to create flow");
3040 ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error);
3045 assert((size_t)ret == size);
3046 if (dev->data->dev_started) {
3047 ret = mlx5_flow_apply(dev, flow, error);
3049 ret = rte_errno; /* Save rte_errno before cleanup. */
3051 mlx5_flow_remove(dev, flow);
3054 rte_errno = ret; /* Restore rte_errno. */
3058 TAILQ_INSERT_TAIL(list, flow, next);
3059 mlx5_flow_rxq_flags_set(dev, flow);
3066 * @see rte_flow_create()
3070 mlx5_flow_create(struct rte_eth_dev *dev,
3071 const struct rte_flow_attr *attr,
3072 const struct rte_flow_item items[],
3073 const struct rte_flow_action actions[],
3074 struct rte_flow_error *error)
3076 return mlx5_flow_list_create
3077 (dev, &((struct priv *)dev->data->dev_private)->flows,
3078 attr, items, actions, error);
3082 * Destroy a flow in a list.
3085 * Pointer to Ethernet device.
3087 * Pointer to a TAILQ flow list.
3092 mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
3093 struct rte_flow *flow)
3095 mlx5_flow_remove(dev, flow);
3096 TAILQ_REMOVE(list, flow, next);
3098 * Update RX queue flags only if port is started, otherwise it is
3101 if (dev->data->dev_started)
3102 mlx5_flow_rxq_flags_trim(dev, flow);
3107 * Destroy all flows.
3110 * Pointer to Ethernet device.
3112 * Pointer to a TAILQ flow list.
3115 mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)
3117 while (!TAILQ_EMPTY(list)) {
3118 struct rte_flow *flow;
3120 flow = TAILQ_FIRST(list);
3121 mlx5_flow_list_destroy(dev, list, flow);
3129 * Pointer to Ethernet device.
3131 * Pointer to a TAILQ flow list.
3134 mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
3136 struct rte_flow *flow;
3138 TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next)
3139 mlx5_flow_remove(dev, flow);
3140 mlx5_flow_rxq_flags_clear(dev);
3147 * Pointer to Ethernet device.
3149 * Pointer to a TAILQ flow list.
3152 * 0 on success, a negative errno value otherwise and rte_errno is set.
3155 mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
3157 struct rte_flow *flow;
3158 struct rte_flow_error error;
3161 TAILQ_FOREACH(flow, list, next) {
3162 ret = mlx5_flow_apply(dev, flow, &error);
3165 mlx5_flow_rxq_flags_set(dev, flow);
3169 ret = rte_errno; /* Save rte_errno before cleanup. */
3170 mlx5_flow_stop(dev, list);
3171 rte_errno = ret; /* Restore rte_errno. */
3176 * Verify the flow list is empty
3179 * Pointer to Ethernet device.
3181 * @return the number of flows not released.
3184 mlx5_flow_verify(struct rte_eth_dev *dev)
3186 struct priv *priv = dev->data->dev_private;
3187 struct rte_flow *flow;
3190 TAILQ_FOREACH(flow, &priv->flows, next) {
3191 DRV_LOG(DEBUG, "port %u flow %p still referenced",
3192 dev->data->port_id, (void *)flow);
3199 * Enable a control flow configured from the control plane.
3202 * Pointer to Ethernet device.
3204 * An Ethernet flow spec to apply.
3206 * An Ethernet flow mask to apply.
3208 * A VLAN flow spec to apply.
3210 * A VLAN flow mask to apply.
3213 * 0 on success, a negative errno value otherwise and rte_errno is set.
3216 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
3217 struct rte_flow_item_eth *eth_spec,
3218 struct rte_flow_item_eth *eth_mask,
3219 struct rte_flow_item_vlan *vlan_spec,
3220 struct rte_flow_item_vlan *vlan_mask)
3222 struct priv *priv = dev->data->dev_private;
3223 const struct rte_flow_attr attr = {
3225 .priority = MLX5_FLOW_PRIO_RSVD,
3227 struct rte_flow_item items[] = {
3229 .type = RTE_FLOW_ITEM_TYPE_ETH,
3235 .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
3236 RTE_FLOW_ITEM_TYPE_END,
3242 .type = RTE_FLOW_ITEM_TYPE_END,
3245 uint16_t queue[priv->reta_idx_n];
3246 struct rte_flow_action_rss action_rss = {
3247 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
3249 .types = priv->rss_conf.rss_hf,
3250 .key_len = priv->rss_conf.rss_key_len,
3251 .queue_num = priv->reta_idx_n,
3252 .key = priv->rss_conf.rss_key,
3255 struct rte_flow_action actions[] = {
3257 .type = RTE_FLOW_ACTION_TYPE_RSS,
3258 .conf = &action_rss,
3261 .type = RTE_FLOW_ACTION_TYPE_END,
3264 struct rte_flow *flow;
3265 struct rte_flow_error error;
3268 if (!priv->reta_idx_n) {
3272 for (i = 0; i != priv->reta_idx_n; ++i)
3273 queue[i] = (*priv->reta_idx)[i];
3274 flow = mlx5_flow_list_create(dev, &priv->ctrl_flows, &attr, items,
3282 * Enable a flow control configured from the control plane.
3285 * Pointer to Ethernet device.
3287 * An Ethernet flow spec to apply.
3289 * An Ethernet flow mask to apply.
3292 * 0 on success, a negative errno value otherwise and rte_errno is set.
3295 mlx5_ctrl_flow(struct rte_eth_dev *dev,
3296 struct rte_flow_item_eth *eth_spec,
3297 struct rte_flow_item_eth *eth_mask)
3299 return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
3305 * @see rte_flow_destroy()
3309 mlx5_flow_destroy(struct rte_eth_dev *dev,
3310 struct rte_flow *flow,
3311 struct rte_flow_error *error __rte_unused)
3313 struct priv *priv = dev->data->dev_private;
3315 mlx5_flow_list_destroy(dev, &priv->flows, flow);
3320 * Destroy all flows.
3322 * @see rte_flow_flush()
3326 mlx5_flow_flush(struct rte_eth_dev *dev,
3327 struct rte_flow_error *error __rte_unused)
3329 struct priv *priv = dev->data->dev_private;
3331 mlx5_flow_list_flush(dev, &priv->flows);
3338 * @see rte_flow_isolate()
3342 mlx5_flow_isolate(struct rte_eth_dev *dev,
3344 struct rte_flow_error *error)
3346 struct priv *priv = dev->data->dev_private;
3348 if (dev->data->dev_started) {
3349 rte_flow_error_set(error, EBUSY,
3350 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3352 "port must be stopped first");
3355 priv->isolated = !!enable;
3357 dev->dev_ops = &mlx5_dev_ops_isolate;
3359 dev->dev_ops = &mlx5_dev_ops;
3364 * Query flow counter.
3367 * Pointer to the flow.
3370 * 0 on success, a negative errno value otherwise and rte_errno is set.
3373 mlx5_flow_query_count(struct rte_flow *flow __rte_unused,
3374 void *data __rte_unused,
3375 struct rte_flow_error *error)
3377 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
3378 if (flow->modifier & MLX5_FLOW_MOD_COUNT) {
3379 struct rte_flow_query_count *qc = data;
3380 uint64_t counters[2] = {0, 0};
3381 struct ibv_query_counter_set_attr query_cs_attr = {
3382 .cs = flow->counter->cs,
3383 .query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
3385 struct ibv_counter_set_data query_out = {
3387 .outlen = 2 * sizeof(uint64_t),
3389 int err = mlx5_glue->query_counter_set(&query_cs_attr,
3393 return rte_flow_error_set
3395 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3397 "cannot read counter");
3400 qc->hits = counters[0] - flow->counter->hits;
3401 qc->bytes = counters[1] - flow->counter->bytes;
3403 flow->counter->hits = counters[0];
3404 flow->counter->bytes = counters[1];
3408 return rte_flow_error_set(error, ENOTSUP,
3409 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3411 "flow does not have counter");
3413 return rte_flow_error_set(error, ENOTSUP,
3414 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3416 "counters are not available");
3422 * @see rte_flow_query()
3426 mlx5_flow_query(struct rte_eth_dev *dev __rte_unused,
3427 struct rte_flow *flow,
3428 const struct rte_flow_action *actions,
3430 struct rte_flow_error *error)
3434 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3435 switch (actions->type) {
3436 case RTE_FLOW_ACTION_TYPE_VOID:
3438 case RTE_FLOW_ACTION_TYPE_COUNT:
3439 ret = mlx5_flow_query_count(flow, data, error);
3442 return rte_flow_error_set(error, ENOTSUP,
3443 RTE_FLOW_ERROR_TYPE_ACTION,
3445 "action not supported");
3454 * Convert a flow director filter to a generic flow.
3457 * Pointer to Ethernet device.
3458 * @param fdir_filter
3459 * Flow director filter to add.
3461 * Generic flow parameters structure.
3464 * 0 on success, a negative errno value otherwise and rte_errno is set.
3467 mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
3468 const struct rte_eth_fdir_filter *fdir_filter,
3469 struct mlx5_fdir *attributes)
3471 struct priv *priv = dev->data->dev_private;
3472 const struct rte_eth_fdir_input *input = &fdir_filter->input;
3473 const struct rte_eth_fdir_masks *mask =
3474 &dev->data->dev_conf.fdir_conf.mask;
3476 /* Validate queue number. */
3477 if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
3478 DRV_LOG(ERR, "port %u invalid queue number %d",
3479 dev->data->port_id, fdir_filter->action.rx_queue);
3483 attributes->attr.ingress = 1;
3484 attributes->items[0] = (struct rte_flow_item) {
3485 .type = RTE_FLOW_ITEM_TYPE_ETH,
3486 .spec = &attributes->l2,
3487 .mask = &attributes->l2_mask,
3489 switch (fdir_filter->action.behavior) {
3490 case RTE_ETH_FDIR_ACCEPT:
3491 attributes->actions[0] = (struct rte_flow_action){
3492 .type = RTE_FLOW_ACTION_TYPE_QUEUE,
3493 .conf = &attributes->queue,
3496 case RTE_ETH_FDIR_REJECT:
3497 attributes->actions[0] = (struct rte_flow_action){
3498 .type = RTE_FLOW_ACTION_TYPE_DROP,
3502 DRV_LOG(ERR, "port %u invalid behavior %d",
3504 fdir_filter->action.behavior);
3505 rte_errno = ENOTSUP;
3508 attributes->queue.index = fdir_filter->action.rx_queue;
3510 switch (fdir_filter->input.flow_type) {
3511 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
3512 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
3513 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
3514 attributes->l3.ipv4.hdr = (struct ipv4_hdr){
3515 .src_addr = input->flow.ip4_flow.src_ip,
3516 .dst_addr = input->flow.ip4_flow.dst_ip,
3517 .time_to_live = input->flow.ip4_flow.ttl,
3518 .type_of_service = input->flow.ip4_flow.tos,
3519 .next_proto_id = input->flow.ip4_flow.proto,
3521 attributes->l3_mask.ipv4.hdr = (struct ipv4_hdr){
3522 .src_addr = mask->ipv4_mask.src_ip,
3523 .dst_addr = mask->ipv4_mask.dst_ip,
3524 .time_to_live = mask->ipv4_mask.ttl,
3525 .type_of_service = mask->ipv4_mask.tos,
3526 .next_proto_id = mask->ipv4_mask.proto,
3528 attributes->items[1] = (struct rte_flow_item){
3529 .type = RTE_FLOW_ITEM_TYPE_IPV4,
3530 .spec = &attributes->l3,
3531 .mask = &attributes->l3_mask,
3534 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
3535 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
3536 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
3537 attributes->l3.ipv6.hdr = (struct ipv6_hdr){
3538 .hop_limits = input->flow.ipv6_flow.hop_limits,
3539 .proto = input->flow.ipv6_flow.proto,
3542 memcpy(attributes->l3.ipv6.hdr.src_addr,
3543 input->flow.ipv6_flow.src_ip,
3544 RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
3545 memcpy(attributes->l3.ipv6.hdr.dst_addr,
3546 input->flow.ipv6_flow.dst_ip,
3547 RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
3548 memcpy(attributes->l3_mask.ipv6.hdr.src_addr,
3549 mask->ipv6_mask.src_ip,
3550 RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
3551 memcpy(attributes->l3_mask.ipv6.hdr.dst_addr,
3552 mask->ipv6_mask.dst_ip,
3553 RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
3554 attributes->items[1] = (struct rte_flow_item){
3555 .type = RTE_FLOW_ITEM_TYPE_IPV6,
3556 .spec = &attributes->l3,
3557 .mask = &attributes->l3_mask,
3561 DRV_LOG(ERR, "port %u invalid flow type%d",
3562 dev->data->port_id, fdir_filter->input.flow_type);
3563 rte_errno = ENOTSUP;
3567 switch (fdir_filter->input.flow_type) {
3568 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
3569 attributes->l4.udp.hdr = (struct udp_hdr){
3570 .src_port = input->flow.udp4_flow.src_port,
3571 .dst_port = input->flow.udp4_flow.dst_port,
3573 attributes->l4_mask.udp.hdr = (struct udp_hdr){
3574 .src_port = mask->src_port_mask,
3575 .dst_port = mask->dst_port_mask,
3577 attributes->items[2] = (struct rte_flow_item){
3578 .type = RTE_FLOW_ITEM_TYPE_UDP,
3579 .spec = &attributes->l4,
3580 .mask = &attributes->l4_mask,
3583 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
3584 attributes->l4.tcp.hdr = (struct tcp_hdr){
3585 .src_port = input->flow.tcp4_flow.src_port,
3586 .dst_port = input->flow.tcp4_flow.dst_port,
3588 attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
3589 .src_port = mask->src_port_mask,
3590 .dst_port = mask->dst_port_mask,
3592 attributes->items[2] = (struct rte_flow_item){
3593 .type = RTE_FLOW_ITEM_TYPE_TCP,
3594 .spec = &attributes->l4,
3595 .mask = &attributes->l4_mask,
3598 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
3599 attributes->l4.udp.hdr = (struct udp_hdr){
3600 .src_port = input->flow.udp6_flow.src_port,
3601 .dst_port = input->flow.udp6_flow.dst_port,
3603 attributes->l4_mask.udp.hdr = (struct udp_hdr){
3604 .src_port = mask->src_port_mask,
3605 .dst_port = mask->dst_port_mask,
3607 attributes->items[2] = (struct rte_flow_item){
3608 .type = RTE_FLOW_ITEM_TYPE_UDP,
3609 .spec = &attributes->l4,
3610 .mask = &attributes->l4_mask,
3613 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
3614 attributes->l4.tcp.hdr = (struct tcp_hdr){
3615 .src_port = input->flow.tcp6_flow.src_port,
3616 .dst_port = input->flow.tcp6_flow.dst_port,
3618 attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
3619 .src_port = mask->src_port_mask,
3620 .dst_port = mask->dst_port_mask,
3622 attributes->items[2] = (struct rte_flow_item){
3623 .type = RTE_FLOW_ITEM_TYPE_TCP,
3624 .spec = &attributes->l4,
3625 .mask = &attributes->l4_mask,
3628 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
3629 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
3632 DRV_LOG(ERR, "port %u invalid flow type%d",
3633 dev->data->port_id, fdir_filter->input.flow_type);
3634 rte_errno = ENOTSUP;
3641 * Add new flow director filter and store it in list.
3644 * Pointer to Ethernet device.
3645 * @param fdir_filter
3646 * Flow director filter to add.
3649 * 0 on success, a negative errno value otherwise and rte_errno is set.
3652 mlx5_fdir_filter_add(struct rte_eth_dev *dev,
3653 const struct rte_eth_fdir_filter *fdir_filter)
3655 struct priv *priv = dev->data->dev_private;
3656 struct mlx5_fdir attributes = {
3659 .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
3660 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
3664 struct rte_flow_error error;
3665 struct rte_flow *flow;
3668 ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes);
3671 flow = mlx5_flow_list_create(dev, &priv->flows, &attributes.attr,
3672 attributes.items, attributes.actions,
3675 DRV_LOG(DEBUG, "port %u FDIR created %p", dev->data->port_id,
3683 * Delete specific filter.
3686 * Pointer to Ethernet device.
3687 * @param fdir_filter
3688 * Filter to be deleted.
3691 * 0 on success, a negative errno value otherwise and rte_errno is set.
3694 mlx5_fdir_filter_delete(struct rte_eth_dev *dev __rte_unused,
3695 const struct rte_eth_fdir_filter *fdir_filter
3698 rte_errno = ENOTSUP;
3703 * Update queue for specific filter.
3706 * Pointer to Ethernet device.
3707 * @param fdir_filter
3708 * Filter to be updated.
3711 * 0 on success, a negative errno value otherwise and rte_errno is set.
3714 mlx5_fdir_filter_update(struct rte_eth_dev *dev,
3715 const struct rte_eth_fdir_filter *fdir_filter)
3719 ret = mlx5_fdir_filter_delete(dev, fdir_filter);
3722 return mlx5_fdir_filter_add(dev, fdir_filter);
3726 * Flush all filters.
3729 * Pointer to Ethernet device.
3732 mlx5_fdir_filter_flush(struct rte_eth_dev *dev)
3734 struct priv *priv = dev->data->dev_private;
3736 mlx5_flow_list_flush(dev, &priv->flows);
3740 * Get flow director information.
3743 * Pointer to Ethernet device.
3744 * @param[out] fdir_info
3745 * Resulting flow director information.
3748 mlx5_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
3750 struct rte_eth_fdir_masks *mask =
3751 &dev->data->dev_conf.fdir_conf.mask;
3753 fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
3754 fdir_info->guarant_spc = 0;
3755 rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
3756 fdir_info->max_flexpayload = 0;
3757 fdir_info->flow_types_mask[0] = 0;
3758 fdir_info->flex_payload_unit = 0;
3759 fdir_info->max_flex_payload_segment_num = 0;
3760 fdir_info->flex_payload_limit = 0;
3761 memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf));
3765 * Deal with flow director operations.
3768 * Pointer to Ethernet device.
3770 * Operation to perform.
3772 * Pointer to operation-specific structure.
3775 * 0 on success, a negative errno value otherwise and rte_errno is set.
3778 mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
3781 enum rte_fdir_mode fdir_mode =
3782 dev->data->dev_conf.fdir_conf.mode;
3784 if (filter_op == RTE_ETH_FILTER_NOP)
3786 if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
3787 fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
3788 DRV_LOG(ERR, "port %u flow director mode %d not supported",
3789 dev->data->port_id, fdir_mode);
3793 switch (filter_op) {
3794 case RTE_ETH_FILTER_ADD:
3795 return mlx5_fdir_filter_add(dev, arg);
3796 case RTE_ETH_FILTER_UPDATE:
3797 return mlx5_fdir_filter_update(dev, arg);
3798 case RTE_ETH_FILTER_DELETE:
3799 return mlx5_fdir_filter_delete(dev, arg);
3800 case RTE_ETH_FILTER_FLUSH:
3801 mlx5_fdir_filter_flush(dev);
3803 case RTE_ETH_FILTER_INFO:
3804 mlx5_fdir_info_get(dev, arg);
3807 DRV_LOG(DEBUG, "port %u unknown operation %u",
3808 dev->data->port_id, filter_op);
3816 * Manage filter operations.
3819 * Pointer to Ethernet device structure.
3820 * @param filter_type
3823 * Operation to perform.
3825 * Pointer to operation-specific structure.
3828 * 0 on success, a negative errno value otherwise and rte_errno is set.
3831 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
3832 enum rte_filter_type filter_type,
3833 enum rte_filter_op filter_op,
3836 switch (filter_type) {
3837 case RTE_ETH_FILTER_GENERIC:
3838 if (filter_op != RTE_ETH_FILTER_GET) {
3842 *(const void **)arg = &mlx5_flow_ops;
3844 case RTE_ETH_FILTER_FDIR:
3845 return mlx5_fdir_ctrl_func(dev, filter_op, arg);
3847 DRV_LOG(ERR, "port %u filter type (%d) not supported",
3848 dev->data->port_id, filter_type);
3849 rte_errno = ENOTSUP;