1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #pragma GCC diagnostic ignored "-Wpedantic"
15 #include <infiniband/verbs.h>
17 #pragma GCC diagnostic error "-Wpedantic"
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_eth_ctrl.h>
23 #include <rte_ethdev_driver.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
30 #include "mlx5_defs.h"
32 #include "mlx5_glue.h"
34 /* Dev ops structure defined in mlx5.c */
35 extern const struct eth_dev_ops mlx5_dev_ops;
36 extern const struct eth_dev_ops mlx5_dev_ops_isolate;
38 /* Pattern outer Layer bits. */
39 #define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0)
40 #define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1)
41 #define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2)
42 #define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3)
43 #define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4)
44 #define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5)
46 /* Pattern inner Layer bits. */
47 #define MLX5_FLOW_LAYER_INNER_L2 (1u << 6)
48 #define MLX5_FLOW_LAYER_INNER_L3_IPV4 (1u << 7)
49 #define MLX5_FLOW_LAYER_INNER_L3_IPV6 (1u << 8)
50 #define MLX5_FLOW_LAYER_INNER_L4_UDP (1u << 9)
51 #define MLX5_FLOW_LAYER_INNER_L4_TCP (1u << 10)
52 #define MLX5_FLOW_LAYER_INNER_VLAN (1u << 11)
54 /* Pattern tunnel Layer bits. */
55 #define MLX5_FLOW_LAYER_VXLAN (1u << 12)
56 #define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13)
59 #define MLX5_FLOW_LAYER_OUTER_L3 \
60 (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
61 #define MLX5_FLOW_LAYER_OUTER_L4 \
62 (MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP)
63 #define MLX5_FLOW_LAYER_OUTER \
64 (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \
65 MLX5_FLOW_LAYER_OUTER_L4)
68 #define MLX5_FLOW_LAYER_TUNNEL \
69 (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE)
72 #define MLX5_FLOW_LAYER_INNER_L3 \
73 (MLX5_FLOW_LAYER_INNER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV6)
74 #define MLX5_FLOW_LAYER_INNER_L4 \
75 (MLX5_FLOW_LAYER_INNER_L4_UDP | MLX5_FLOW_LAYER_INNER_L4_TCP)
76 #define MLX5_FLOW_LAYER_INNER \
77 (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3 | \
78 MLX5_FLOW_LAYER_INNER_L4)
80 /* Actions that modify the fate of matching traffic. */
81 #define MLX5_FLOW_FATE_DROP (1u << 0)
82 #define MLX5_FLOW_FATE_QUEUE (1u << 1)
83 #define MLX5_FLOW_FATE_RSS (1u << 2)
85 /* Modify a packet. */
86 #define MLX5_FLOW_MOD_FLAG (1u << 0)
87 #define MLX5_FLOW_MOD_MARK (1u << 1)
89 /* possible L3 layers protocols filtering. */
90 #define MLX5_IP_PROTOCOL_TCP 6
91 #define MLX5_IP_PROTOCOL_UDP 17
93 /* Priority reserved for default flows. */
94 #define MLX5_FLOW_PRIO_RSVD ((uint32_t)-1)
98 MLX5_EXPANSION_ROOT_OUTER,
99 MLX5_EXPANSION_OUTER_ETH,
100 MLX5_EXPANSION_OUTER_IPV4,
101 MLX5_EXPANSION_OUTER_IPV4_UDP,
102 MLX5_EXPANSION_OUTER_IPV4_TCP,
103 MLX5_EXPANSION_OUTER_IPV6,
104 MLX5_EXPANSION_OUTER_IPV6_UDP,
105 MLX5_EXPANSION_OUTER_IPV6_TCP,
106 MLX5_EXPANSION_VXLAN,
107 MLX5_EXPANSION_VXLAN_GPE,
110 MLX5_EXPANSION_IPV4_UDP,
111 MLX5_EXPANSION_IPV4_TCP,
113 MLX5_EXPANSION_IPV6_UDP,
114 MLX5_EXPANSION_IPV6_TCP,
117 /** Supported expansion of items. */
118 static const struct rte_flow_expand_node mlx5_support_expansion[] = {
119 [MLX5_EXPANSION_ROOT] = {
120 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
122 MLX5_EXPANSION_IPV6),
123 .type = RTE_FLOW_ITEM_TYPE_END,
125 [MLX5_EXPANSION_ROOT_OUTER] = {
126 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH,
127 MLX5_EXPANSION_OUTER_IPV4,
128 MLX5_EXPANSION_OUTER_IPV6),
129 .type = RTE_FLOW_ITEM_TYPE_END,
131 [MLX5_EXPANSION_OUTER_ETH] = {
132 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
133 MLX5_EXPANSION_OUTER_IPV6),
134 .type = RTE_FLOW_ITEM_TYPE_ETH,
137 [MLX5_EXPANSION_OUTER_IPV4] = {
138 .next = RTE_FLOW_EXPAND_RSS_NEXT
139 (MLX5_EXPANSION_OUTER_IPV4_UDP,
140 MLX5_EXPANSION_OUTER_IPV4_TCP),
141 .type = RTE_FLOW_ITEM_TYPE_IPV4,
142 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
143 ETH_RSS_NONFRAG_IPV4_OTHER,
145 [MLX5_EXPANSION_OUTER_IPV4_UDP] = {
146 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
147 MLX5_EXPANSION_VXLAN_GPE),
148 .type = RTE_FLOW_ITEM_TYPE_UDP,
149 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
151 [MLX5_EXPANSION_OUTER_IPV4_TCP] = {
152 .type = RTE_FLOW_ITEM_TYPE_TCP,
153 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
155 [MLX5_EXPANSION_OUTER_IPV6] = {
156 .next = RTE_FLOW_EXPAND_RSS_NEXT
157 (MLX5_EXPANSION_OUTER_IPV6_UDP,
158 MLX5_EXPANSION_OUTER_IPV6_TCP),
159 .type = RTE_FLOW_ITEM_TYPE_IPV6,
160 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
161 ETH_RSS_NONFRAG_IPV6_OTHER,
163 [MLX5_EXPANSION_OUTER_IPV6_UDP] = {
164 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
165 MLX5_EXPANSION_VXLAN_GPE),
166 .type = RTE_FLOW_ITEM_TYPE_UDP,
167 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
169 [MLX5_EXPANSION_OUTER_IPV6_TCP] = {
170 .type = RTE_FLOW_ITEM_TYPE_TCP,
171 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
173 [MLX5_EXPANSION_VXLAN] = {
174 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH),
175 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
177 [MLX5_EXPANSION_VXLAN_GPE] = {
178 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
180 MLX5_EXPANSION_IPV6),
181 .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
183 [MLX5_EXPANSION_ETH] = {
184 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
185 MLX5_EXPANSION_IPV6),
186 .type = RTE_FLOW_ITEM_TYPE_ETH,
188 [MLX5_EXPANSION_IPV4] = {
189 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
190 MLX5_EXPANSION_IPV4_TCP),
191 .type = RTE_FLOW_ITEM_TYPE_IPV4,
192 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
193 ETH_RSS_NONFRAG_IPV4_OTHER,
195 [MLX5_EXPANSION_IPV4_UDP] = {
196 .type = RTE_FLOW_ITEM_TYPE_UDP,
197 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
199 [MLX5_EXPANSION_IPV4_TCP] = {
200 .type = RTE_FLOW_ITEM_TYPE_TCP,
201 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
203 [MLX5_EXPANSION_IPV6] = {
204 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
205 MLX5_EXPANSION_IPV6_TCP),
206 .type = RTE_FLOW_ITEM_TYPE_IPV6,
207 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
208 ETH_RSS_NONFRAG_IPV6_OTHER,
210 [MLX5_EXPANSION_IPV6_UDP] = {
211 .type = RTE_FLOW_ITEM_TYPE_UDP,
212 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
214 [MLX5_EXPANSION_IPV6_TCP] = {
215 .type = RTE_FLOW_ITEM_TYPE_TCP,
216 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
220 /** Handles information leading to a drop fate. */
221 struct mlx5_flow_verbs {
222 LIST_ENTRY(mlx5_flow_verbs) next;
223 unsigned int size; /**< Size of the attribute. */
225 struct ibv_flow_attr *attr;
226 /**< Pointer to the Specification buffer. */
227 uint8_t *specs; /**< Pointer to the specifications. */
229 struct ibv_flow *flow; /**< Verbs flow pointer. */
230 struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */
231 uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */
234 /* Flow structure. */
236 TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
237 struct rte_flow_attr attributes; /**< User flow attribute. */
238 uint32_t l3_protocol_en:1; /**< Protocol filtering requested. */
240 /**< Bit-fields of present layers see MLX5_FLOW_LAYER_*. */
242 /**< Bit-fields of present modifier see MLX5_FLOW_MOD_*. */
244 /**< Bit-fields of present fate see MLX5_FLOW_FATE_*. */
245 uint8_t l3_protocol; /**< valid when l3_protocol_en is set. */
246 LIST_HEAD(verbs, mlx5_flow_verbs) verbs; /**< Verbs flows list. */
247 struct mlx5_flow_verbs *cur_verbs;
248 /**< Current Verbs flow structure being filled. */
249 struct rte_flow_action_rss rss;/**< RSS context. */
250 uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
251 uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */
254 static const struct rte_flow_ops mlx5_flow_ops = {
255 .validate = mlx5_flow_validate,
256 .create = mlx5_flow_create,
257 .destroy = mlx5_flow_destroy,
258 .flush = mlx5_flow_flush,
259 .isolate = mlx5_flow_isolate,
262 /* Convert FDIR request to Generic flow. */
264 struct rte_flow_attr attr;
265 struct rte_flow_action actions[2];
266 struct rte_flow_item items[4];
267 struct rte_flow_item_eth l2;
268 struct rte_flow_item_eth l2_mask;
270 struct rte_flow_item_ipv4 ipv4;
271 struct rte_flow_item_ipv6 ipv6;
274 struct rte_flow_item_ipv4 ipv4;
275 struct rte_flow_item_ipv6 ipv6;
278 struct rte_flow_item_udp udp;
279 struct rte_flow_item_tcp tcp;
282 struct rte_flow_item_udp udp;
283 struct rte_flow_item_tcp tcp;
285 struct rte_flow_action_queue queue;
288 /* Verbs specification header. */
289 struct ibv_spec_header {
290 enum ibv_flow_spec_type type;
295 * Number of sub priorities.
296 * For each kind of pattern matching i.e. L2, L3, L4 to have a correct
297 * matching on the NIC (firmware dependent) L4 most have the higher priority
298 * followed by L3 and ending with L2.
300 #define MLX5_PRIORITY_MAP_L2 2
301 #define MLX5_PRIORITY_MAP_L3 1
302 #define MLX5_PRIORITY_MAP_L4 0
303 #define MLX5_PRIORITY_MAP_MAX 3
305 /* Map of Verbs to Flow priority with 8 Verbs priorities. */
306 static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
307 { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
310 /* Map of Verbs to Flow priority with 16 Verbs priorities. */
311 static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {
312 { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 },
313 { 9, 10, 11 }, { 12, 13, 14 },
316 /* Tunnel information. */
317 struct mlx5_flow_tunnel_info {
318 uint32_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
319 uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */
322 static struct mlx5_flow_tunnel_info tunnels_info[] = {
324 .tunnel = MLX5_FLOW_LAYER_VXLAN,
325 .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP,
328 .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE,
329 .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP,
334 * Discover the maximum number of priority available.
337 * Pointer to Ethernet device.
340 * number of supported flow priority on success, a negative errno
341 * value otherwise and rte_errno is set.
344 mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
347 struct ibv_flow_attr attr;
348 struct ibv_flow_spec_eth eth;
349 struct ibv_flow_spec_action_drop drop;
355 .type = IBV_FLOW_SPEC_ETH,
356 .size = sizeof(struct ibv_flow_spec_eth),
359 .size = sizeof(struct ibv_flow_spec_action_drop),
360 .type = IBV_FLOW_SPEC_ACTION_DROP,
363 struct ibv_flow *flow;
364 struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev);
365 uint16_t vprio[] = { 8, 16 };
373 for (i = 0; i != RTE_DIM(vprio); i++) {
374 flow_attr.attr.priority = vprio[i] - 1;
375 flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr);
378 claim_zero(mlx5_glue->destroy_flow(flow));
383 priority = RTE_DIM(priority_map_3);
386 priority = RTE_DIM(priority_map_5);
391 "port %u verbs maximum priority: %d expected 8/16",
392 dev->data->port_id, vprio[i]);
395 mlx5_hrxq_drop_release(dev);
396 DRV_LOG(INFO, "port %u flow maximum priority: %d",
397 dev->data->port_id, priority);
402 * Adjust flow priority.
405 * Pointer to Ethernet device.
407 * Pointer to an rte flow.
410 mlx5_flow_adjust_priority(struct rte_eth_dev *dev, struct rte_flow *flow)
412 struct priv *priv = dev->data->dev_private;
413 uint32_t priority = flow->attributes.priority;
414 uint32_t subpriority = flow->cur_verbs->attr->priority;
416 switch (priv->config.flow_prio) {
417 case RTE_DIM(priority_map_3):
418 priority = priority_map_3[priority][subpriority];
420 case RTE_DIM(priority_map_5):
421 priority = priority_map_5[priority][subpriority];
424 flow->cur_verbs->attr->priority = priority;
428 * Verify the @p attributes will be correctly understood by the NIC and store
429 * them in the @p flow if everything is correct.
432 * Pointer to Ethernet device.
433 * @param[in] attributes
434 * Pointer to flow attributes
435 * @param[in, out] flow
436 * Pointer to the rte_flow structure.
438 * Pointer to error structure.
441 * 0 on success, a negative errno value otherwise and rte_errno is set.
444 mlx5_flow_attributes(struct rte_eth_dev *dev,
445 const struct rte_flow_attr *attributes,
446 struct rte_flow *flow,
447 struct rte_flow_error *error)
449 uint32_t priority_max =
450 ((struct priv *)dev->data->dev_private)->config.flow_prio - 1;
452 if (attributes->group)
453 return rte_flow_error_set(error, ENOTSUP,
454 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
456 "groups is not supported");
457 if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
458 attributes->priority >= priority_max)
459 return rte_flow_error_set(error, ENOTSUP,
460 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
462 "priority out of range");
463 if (attributes->egress)
464 return rte_flow_error_set(error, ENOTSUP,
465 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
467 "egress is not supported");
468 if (attributes->transfer)
469 return rte_flow_error_set(error, ENOTSUP,
470 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
472 "transfer is not supported");
473 if (!attributes->ingress)
474 return rte_flow_error_set(error, ENOTSUP,
475 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
477 "ingress attribute is mandatory");
478 flow->attributes = *attributes;
479 if (attributes->priority == MLX5_FLOW_PRIO_RSVD)
480 flow->attributes.priority = priority_max;
485 * Verify the @p item specifications (spec, last, mask) are compatible with the
489 * Item specification.
491 * @p item->mask or flow default bit-masks.
492 * @param[in] nic_mask
493 * Bit-masks covering supported fields by the NIC to compare with user mask.
495 * Bit-masks size in bytes.
497 * Pointer to error structure.
500 * 0 on success, a negative errno value otherwise and rte_errno is set.
503 mlx5_flow_item_acceptable(const struct rte_flow_item *item,
505 const uint8_t *nic_mask,
507 struct rte_flow_error *error)
512 for (i = 0; i < size; ++i)
513 if ((nic_mask[i] | mask[i]) != nic_mask[i])
514 return rte_flow_error_set(error, ENOTSUP,
515 RTE_FLOW_ERROR_TYPE_ITEM,
517 "mask enables non supported"
519 if (!item->spec && (item->mask || item->last))
520 return rte_flow_error_set(error, EINVAL,
521 RTE_FLOW_ERROR_TYPE_ITEM,
523 "mask/last without a spec is not"
525 if (item->spec && item->last) {
531 for (i = 0; i < size; ++i) {
532 spec[i] = ((const uint8_t *)item->spec)[i] & mask[i];
533 last[i] = ((const uint8_t *)item->last)[i] & mask[i];
535 ret = memcmp(spec, last, size);
537 return rte_flow_error_set(error, ENOTSUP,
538 RTE_FLOW_ERROR_TYPE_ITEM,
540 "range is not supported");
546 * Add a verbs item specification into @p flow.
548 * @param[in, out] flow
549 * Pointer to flow structure.
551 * Create specification.
553 * Size in bytes of the specification to copy.
556 mlx5_flow_spec_verbs_add(struct rte_flow *flow, void *src, unsigned int size)
558 struct mlx5_flow_verbs *verbs = flow->cur_verbs;
563 dst = (void *)(verbs->specs + verbs->size);
564 memcpy(dst, src, size);
565 ++verbs->attr->num_of_specs;
571 * Adjust verbs hash fields according to the @p flow information.
573 * @param[in, out] flow.
574 * Pointer to flow structure.
576 * 1 when the hash field is for a tunnel item.
577 * @param[in] layer_types
579 * @param[in] hash_fields
583 mlx5_flow_verbs_hashfields_adjust(struct rte_flow *flow,
584 int tunnel __rte_unused,
585 uint32_t layer_types, uint64_t hash_fields)
587 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
588 hash_fields |= (tunnel ? IBV_RX_HASH_INNER : 0);
589 if (flow->rss.level == 2 && !tunnel)
591 else if (flow->rss.level < 2 && tunnel)
594 if (!(flow->rss.types & layer_types))
596 flow->cur_verbs->hash_fields |= hash_fields;
600 * Convert the @p item into a Verbs specification after ensuring the NIC
601 * will understand and process it correctly.
602 * If the necessary size for the conversion is greater than the @p flow_size,
603 * nothing is written in @p flow, the validation is still performed.
606 * Item specification.
607 * @param[in, out] flow
608 * Pointer to flow structure.
609 * @param[in] flow_size
610 * Size in bytes of the available space in @p flow, if too small, nothing is
613 * Pointer to error structure.
616 * On success the number of bytes consumed/necessary, if the returned value
617 * is lesser or equal to @p flow_size, the @p item has fully been converted,
618 * otherwise another call with this returned memory size should be done.
619 * On error, a negative errno value is returned and rte_errno is set.
622 mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow,
623 const size_t flow_size, struct rte_flow_error *error)
625 const struct rte_flow_item_eth *spec = item->spec;
626 const struct rte_flow_item_eth *mask = item->mask;
627 const struct rte_flow_item_eth nic_mask = {
628 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
629 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
630 .type = RTE_BE16(0xffff),
632 const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
633 const unsigned int size = sizeof(struct ibv_flow_spec_eth);
634 struct ibv_flow_spec_eth eth = {
635 .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
640 if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
641 MLX5_FLOW_LAYER_OUTER_L2))
642 return rte_flow_error_set(error, ENOTSUP,
643 RTE_FLOW_ERROR_TYPE_ITEM,
645 "L2 layers already configured");
647 mask = &rte_flow_item_eth_mask;
648 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
649 (const uint8_t *)&nic_mask,
650 sizeof(struct rte_flow_item_eth),
654 flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
655 MLX5_FLOW_LAYER_OUTER_L2;
656 if (size > flow_size)
661 memcpy(ð.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
662 memcpy(ð.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
663 eth.val.ether_type = spec->type;
664 memcpy(ð.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
665 memcpy(ð.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
666 eth.mask.ether_type = mask->type;
667 /* Remove unwanted bits from values. */
668 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
669 eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
670 eth.val.src_mac[i] &= eth.mask.src_mac[i];
672 eth.val.ether_type &= eth.mask.ether_type;
674 flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
675 mlx5_flow_spec_verbs_add(flow, ð, size);
680 * Update the VLAN tag in the Verbs Ethernet specification.
682 * @param[in, out] attr
683 * Pointer to Verbs attributes structure.
685 * Verbs structure containing the VLAN information to copy.
688 mlx5_flow_item_vlan_update(struct ibv_flow_attr *attr,
689 struct ibv_flow_spec_eth *eth)
692 const enum ibv_flow_spec_type search = eth->type;
693 struct ibv_spec_header *hdr = (struct ibv_spec_header *)
694 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
696 for (i = 0; i != attr->num_of_specs; ++i) {
697 if (hdr->type == search) {
698 struct ibv_flow_spec_eth *e =
699 (struct ibv_flow_spec_eth *)hdr;
701 e->val.vlan_tag = eth->val.vlan_tag;
702 e->mask.vlan_tag = eth->mask.vlan_tag;
703 e->val.ether_type = eth->val.ether_type;
704 e->mask.ether_type = eth->mask.ether_type;
707 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
712 * Convert the @p item into @p flow (or by updating the already present
713 * Ethernet Verbs) specification after ensuring the NIC will understand and
714 * process it correctly.
715 * If the necessary size for the conversion is greater than the @p flow_size,
716 * nothing is written in @p flow, the validation is still performed.
719 * Item specification.
720 * @param[in, out] flow
721 * Pointer to flow structure.
722 * @param[in] flow_size
723 * Size in bytes of the available space in @p flow, if too small, nothing is
726 * Pointer to error structure.
729 * On success the number of bytes consumed/necessary, if the returned value
730 * is lesser or equal to @p flow_size, the @p item has fully been converted,
731 * otherwise another call with this returned memory size should be done.
732 * On error, a negative errno value is returned and rte_errno is set.
735 mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow,
736 const size_t flow_size, struct rte_flow_error *error)
738 const struct rte_flow_item_vlan *spec = item->spec;
739 const struct rte_flow_item_vlan *mask = item->mask;
740 const struct rte_flow_item_vlan nic_mask = {
741 .tci = RTE_BE16(0x0fff),
742 .inner_type = RTE_BE16(0xffff),
744 unsigned int size = sizeof(struct ibv_flow_spec_eth);
745 const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
746 struct ibv_flow_spec_eth eth = {
747 .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
751 const uint32_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
752 MLX5_FLOW_LAYER_INNER_L4) :
753 (MLX5_FLOW_LAYER_OUTER_L3 | MLX5_FLOW_LAYER_OUTER_L4);
754 const uint32_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
755 MLX5_FLOW_LAYER_OUTER_VLAN;
756 const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
757 MLX5_FLOW_LAYER_OUTER_L2;
759 if (flow->layers & vlanm)
760 return rte_flow_error_set(error, ENOTSUP,
761 RTE_FLOW_ERROR_TYPE_ITEM,
763 "VLAN layer already configured");
764 else if ((flow->layers & l34m) != 0)
765 return rte_flow_error_set(error, ENOTSUP,
766 RTE_FLOW_ERROR_TYPE_ITEM,
768 "L2 layer cannot follow L3/L4 layer");
770 mask = &rte_flow_item_vlan_mask;
771 ret = mlx5_flow_item_acceptable
772 (item, (const uint8_t *)mask,
773 (const uint8_t *)&nic_mask,
774 sizeof(struct rte_flow_item_vlan), error);
778 eth.val.vlan_tag = spec->tci;
779 eth.mask.vlan_tag = mask->tci;
780 eth.val.vlan_tag &= eth.mask.vlan_tag;
781 eth.val.ether_type = spec->inner_type;
782 eth.mask.ether_type = mask->inner_type;
783 eth.val.ether_type &= eth.mask.ether_type;
786 * From verbs perspective an empty VLAN is equivalent
787 * to a packet without VLAN layer.
789 if (!eth.mask.vlan_tag)
790 return rte_flow_error_set(error, EINVAL,
791 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
793 "VLAN cannot be empty");
794 if (!(flow->layers & l2m)) {
795 if (size <= flow_size) {
796 flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
797 mlx5_flow_spec_verbs_add(flow, ð, size);
801 mlx5_flow_item_vlan_update(flow->cur_verbs->attr,
803 size = 0; /* Only an update is done in eth specification. */
805 flow->layers |= tunnel ?
806 (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_VLAN) :
807 (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_VLAN);
812 * Convert the @p item into a Verbs specification after ensuring the NIC
813 * will understand and process it correctly.
814 * If the necessary size for the conversion is greater than the @p flow_size,
815 * nothing is written in @p flow, the validation is still performed.
818 * Item specification.
819 * @param[in, out] flow
820 * Pointer to flow structure.
821 * @param[in] flow_size
822 * Size in bytes of the available space in @p flow, if too small, nothing is
825 * Pointer to error structure.
828 * On success the number of bytes consumed/necessary, if the returned value
829 * is lesser or equal to @p flow_size, the @p item has fully been converted,
830 * otherwise another call with this returned memory size should be done.
831 * On error, a negative errno value is returned and rte_errno is set.
834 mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow,
835 const size_t flow_size, struct rte_flow_error *error)
837 const struct rte_flow_item_ipv4 *spec = item->spec;
838 const struct rte_flow_item_ipv4 *mask = item->mask;
839 const struct rte_flow_item_ipv4 nic_mask = {
841 .src_addr = RTE_BE32(0xffffffff),
842 .dst_addr = RTE_BE32(0xffffffff),
843 .type_of_service = 0xff,
844 .next_proto_id = 0xff,
847 const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
848 unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
849 struct ibv_flow_spec_ipv4_ext ipv4 = {
850 .type = IBV_FLOW_SPEC_IPV4_EXT |
851 (tunnel ? IBV_FLOW_SPEC_INNER : 0),
856 if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
857 MLX5_FLOW_LAYER_OUTER_L3))
858 return rte_flow_error_set(error, ENOTSUP,
859 RTE_FLOW_ERROR_TYPE_ITEM,
861 "multiple L3 layers not supported");
862 else if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
863 MLX5_FLOW_LAYER_OUTER_L4))
864 return rte_flow_error_set(error, ENOTSUP,
865 RTE_FLOW_ERROR_TYPE_ITEM,
867 "L3 cannot follow an L4 layer.");
869 mask = &rte_flow_item_ipv4_mask;
870 ret = mlx5_flow_item_acceptable
871 (item, (const uint8_t *)mask,
872 (const uint8_t *)&nic_mask,
873 sizeof(struct rte_flow_item_ipv4), error);
876 flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
877 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
879 ipv4.val = (struct ibv_flow_ipv4_ext_filter){
880 .src_ip = spec->hdr.src_addr,
881 .dst_ip = spec->hdr.dst_addr,
882 .proto = spec->hdr.next_proto_id,
883 .tos = spec->hdr.type_of_service,
885 ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
886 .src_ip = mask->hdr.src_addr,
887 .dst_ip = mask->hdr.dst_addr,
888 .proto = mask->hdr.next_proto_id,
889 .tos = mask->hdr.type_of_service,
891 /* Remove unwanted bits from values. */
892 ipv4.val.src_ip &= ipv4.mask.src_ip;
893 ipv4.val.dst_ip &= ipv4.mask.dst_ip;
894 ipv4.val.proto &= ipv4.mask.proto;
895 ipv4.val.tos &= ipv4.mask.tos;
897 flow->l3_protocol_en = !!ipv4.mask.proto;
898 flow->l3_protocol = ipv4.val.proto;
899 if (size <= flow_size) {
900 mlx5_flow_verbs_hashfields_adjust
902 (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
903 ETH_RSS_NONFRAG_IPV4_OTHER),
904 (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4));
905 flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3;
906 mlx5_flow_spec_verbs_add(flow, &ipv4, size);
912 * Convert the @p item into a Verbs specification after ensuring the NIC
913 * will understand and process it correctly.
914 * If the necessary size for the conversion is greater than the @p flow_size,
915 * nothing is written in @p flow, the validation is still performed.
918 * Item specification.
919 * @param[in, out] flow
920 * Pointer to flow structure.
921 * @param[in] flow_size
922 * Size in bytes of the available space in @p flow, if too small, nothing is
925 * Pointer to error structure.
928 * On success the number of bytes consumed/necessary, if the returned value
929 * is lesser or equal to @p flow_size, the @p item has fully been converted,
930 * otherwise another call with this returned memory size should be done.
931 * On error, a negative errno value is returned and rte_errno is set.
934 mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow,
935 const size_t flow_size, struct rte_flow_error *error)
937 const struct rte_flow_item_ipv6 *spec = item->spec;
938 const struct rte_flow_item_ipv6 *mask = item->mask;
939 const struct rte_flow_item_ipv6 nic_mask = {
942 "\xff\xff\xff\xff\xff\xff\xff\xff"
943 "\xff\xff\xff\xff\xff\xff\xff\xff",
945 "\xff\xff\xff\xff\xff\xff\xff\xff"
946 "\xff\xff\xff\xff\xff\xff\xff\xff",
947 .vtc_flow = RTE_BE32(0xffffffff),
952 const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
953 unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
954 struct ibv_flow_spec_ipv6 ipv6 = {
955 .type = IBV_FLOW_SPEC_IPV6 | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
960 if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
961 MLX5_FLOW_LAYER_OUTER_L3))
962 return rte_flow_error_set(error, ENOTSUP,
963 RTE_FLOW_ERROR_TYPE_ITEM,
965 "multiple L3 layers not supported");
966 else if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
967 MLX5_FLOW_LAYER_OUTER_L4))
968 return rte_flow_error_set(error, ENOTSUP,
969 RTE_FLOW_ERROR_TYPE_ITEM,
971 "L3 cannot follow an L4 layer.");
973 mask = &rte_flow_item_ipv6_mask;
974 ret = mlx5_flow_item_acceptable
975 (item, (const uint8_t *)mask,
976 (const uint8_t *)&nic_mask,
977 sizeof(struct rte_flow_item_ipv6), error);
980 flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
981 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
984 uint32_t vtc_flow_val;
985 uint32_t vtc_flow_mask;
987 memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
988 RTE_DIM(ipv6.val.src_ip));
989 memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
990 RTE_DIM(ipv6.val.dst_ip));
991 memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
992 RTE_DIM(ipv6.mask.src_ip));
993 memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
994 RTE_DIM(ipv6.mask.dst_ip));
995 vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
996 vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
997 ipv6.val.flow_label =
998 rte_cpu_to_be_32((vtc_flow_val & IPV6_HDR_FL_MASK) >>
1000 ipv6.val.traffic_class = (vtc_flow_val & IPV6_HDR_TC_MASK) >>
1002 ipv6.val.next_hdr = spec->hdr.proto;
1003 ipv6.val.hop_limit = spec->hdr.hop_limits;
1004 ipv6.mask.flow_label =
1005 rte_cpu_to_be_32((vtc_flow_mask & IPV6_HDR_FL_MASK) >>
1007 ipv6.mask.traffic_class = (vtc_flow_mask & IPV6_HDR_TC_MASK) >>
1009 ipv6.mask.next_hdr = mask->hdr.proto;
1010 ipv6.mask.hop_limit = mask->hdr.hop_limits;
1011 /* Remove unwanted bits from values. */
1012 for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
1013 ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
1014 ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
1016 ipv6.val.flow_label &= ipv6.mask.flow_label;
1017 ipv6.val.traffic_class &= ipv6.mask.traffic_class;
1018 ipv6.val.next_hdr &= ipv6.mask.next_hdr;
1019 ipv6.val.hop_limit &= ipv6.mask.hop_limit;
1021 flow->l3_protocol_en = !!ipv6.mask.next_hdr;
1022 flow->l3_protocol = ipv6.val.next_hdr;
1023 if (size <= flow_size) {
1024 mlx5_flow_verbs_hashfields_adjust
1026 (ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER),
1027 (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6));
1028 flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3;
1029 mlx5_flow_spec_verbs_add(flow, &ipv6, size);
1035 * Convert the @p item into a Verbs specification after ensuring the NIC
1036 * will understand and process it correctly.
1037 * If the necessary size for the conversion is greater than the @p flow_size,
1038 * nothing is written in @p flow, the validation is still performed.
1041 * Item specification.
1042 * @param[in, out] flow
1043 * Pointer to flow structure.
1044 * @param[in] flow_size
1045 * Size in bytes of the available space in @p flow, if too small, nothing is
1048 * Pointer to error structure.
1051 * On success the number of bytes consumed/necessary, if the returned value
1052 * is lesser or equal to @p flow_size, the @p item has fully been converted,
1053 * otherwise another call with this returned memory size should be done.
1054 * On error, a negative errno value is returned and rte_errno is set.
1057 mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow,
1058 const size_t flow_size, struct rte_flow_error *error)
1060 const struct rte_flow_item_udp *spec = item->spec;
1061 const struct rte_flow_item_udp *mask = item->mask;
1062 const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
1063 unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
1064 struct ibv_flow_spec_tcp_udp udp = {
1065 .type = IBV_FLOW_SPEC_UDP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
1070 if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_UDP)
1071 return rte_flow_error_set(error, ENOTSUP,
1072 RTE_FLOW_ERROR_TYPE_ITEM,
1074 "protocol filtering not compatible"
1076 if (!(flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
1077 MLX5_FLOW_LAYER_OUTER_L3)))
1078 return rte_flow_error_set(error, ENOTSUP,
1079 RTE_FLOW_ERROR_TYPE_ITEM,
1081 "L3 is mandatory to filter"
1083 if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1084 MLX5_FLOW_LAYER_OUTER_L4))
1085 return rte_flow_error_set(error, ENOTSUP,
1086 RTE_FLOW_ERROR_TYPE_ITEM,
1088 "L4 layer is already"
1091 mask = &rte_flow_item_udp_mask;
1092 ret = mlx5_flow_item_acceptable
1093 (item, (const uint8_t *)mask,
1094 (const uint8_t *)&rte_flow_item_udp_mask,
1095 sizeof(struct rte_flow_item_udp), error);
1098 flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1099 MLX5_FLOW_LAYER_OUTER_L4_UDP;
1101 udp.val.dst_port = spec->hdr.dst_port;
1102 udp.val.src_port = spec->hdr.src_port;
1103 udp.mask.dst_port = mask->hdr.dst_port;
1104 udp.mask.src_port = mask->hdr.src_port;
1105 /* Remove unwanted bits from values. */
1106 udp.val.src_port &= udp.mask.src_port;
1107 udp.val.dst_port &= udp.mask.dst_port;
1109 if (size <= flow_size) {
1110 mlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_UDP,
1111 (IBV_RX_HASH_SRC_PORT_UDP |
1112 IBV_RX_HASH_DST_PORT_UDP));
1113 flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4;
1114 mlx5_flow_spec_verbs_add(flow, &udp, size);
1120 * Convert the @p item into a Verbs specification after ensuring the NIC
1121 * will understand and process it correctly.
1122 * If the necessary size for the conversion is greater than the @p flow_size,
1123 * nothing is written in @p flow, the validation is still performed.
1126 * Item specification.
1127 * @param[in, out] flow
1128 * Pointer to flow structure.
1129 * @param[in] flow_size
1130 * Size in bytes of the available space in @p flow, if too small, nothing is
1133 * Pointer to error structure.
1136 * On success the number of bytes consumed/necessary, if the returned value
1137 * is lesser or equal to @p flow_size, the @p item has fully been converted,
1138 * otherwise another call with this returned memory size should be done.
1139 * On error, a negative errno value is returned and rte_errno is set.
1142 mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow,
1143 const size_t flow_size, struct rte_flow_error *error)
1145 const struct rte_flow_item_tcp *spec = item->spec;
1146 const struct rte_flow_item_tcp *mask = item->mask;
1147 const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
1148 unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
1149 struct ibv_flow_spec_tcp_udp tcp = {
1150 .type = IBV_FLOW_SPEC_TCP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
1155 if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_TCP)
1156 return rte_flow_error_set(error, ENOTSUP,
1157 RTE_FLOW_ERROR_TYPE_ITEM,
1159 "protocol filtering not compatible"
1161 if (!(flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
1162 MLX5_FLOW_LAYER_OUTER_L3)))
1163 return rte_flow_error_set(error, ENOTSUP,
1164 RTE_FLOW_ERROR_TYPE_ITEM,
1166 "L3 is mandatory to filter on L4");
1167 if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1168 MLX5_FLOW_LAYER_OUTER_L4))
1169 return rte_flow_error_set(error, ENOTSUP,
1170 RTE_FLOW_ERROR_TYPE_ITEM,
1172 "L4 layer is already present");
1174 mask = &rte_flow_item_tcp_mask;
1175 ret = mlx5_flow_item_acceptable
1176 (item, (const uint8_t *)mask,
1177 (const uint8_t *)&rte_flow_item_tcp_mask,
1178 sizeof(struct rte_flow_item_tcp), error);
1181 flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1182 MLX5_FLOW_LAYER_OUTER_L4_TCP;
1184 tcp.val.dst_port = spec->hdr.dst_port;
1185 tcp.val.src_port = spec->hdr.src_port;
1186 tcp.mask.dst_port = mask->hdr.dst_port;
1187 tcp.mask.src_port = mask->hdr.src_port;
1188 /* Remove unwanted bits from values. */
1189 tcp.val.src_port &= tcp.mask.src_port;
1190 tcp.val.dst_port &= tcp.mask.dst_port;
1192 if (size <= flow_size) {
1193 mlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_TCP,
1194 (IBV_RX_HASH_SRC_PORT_TCP |
1195 IBV_RX_HASH_DST_PORT_TCP));
1196 flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4;
1197 mlx5_flow_spec_verbs_add(flow, &tcp, size);
1203 * Convert the @p item into a Verbs specification after ensuring the NIC
1204 * will understand and process it correctly.
1205 * If the necessary size for the conversion is greater than the @p flow_size,
1206 * nothing is written in @p flow, the validation is still performed.
1209 * Item specification.
1210 * @param[in, out] flow
1211 * Pointer to flow structure.
1212 * @param[in] flow_size
1213 * Size in bytes of the available space in @p flow, if too small, nothing is
1216 * Pointer to error structure.
1219 * On success the number of bytes consumed/necessary, if the returned value
1220 * is lesser or equal to @p flow_size, the @p item has fully been converted,
1221 * otherwise another call with this returned memory size should be done.
1222 * On error, a negative errno value is returned and rte_errno is set.
1225 mlx5_flow_item_vxlan(const struct rte_flow_item *item, struct rte_flow *flow,
1226 const size_t flow_size, struct rte_flow_error *error)
1228 const struct rte_flow_item_vxlan *spec = item->spec;
1229 const struct rte_flow_item_vxlan *mask = item->mask;
1230 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
1231 struct ibv_flow_spec_tunnel vxlan = {
1232 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
1239 } id = { .vlan_id = 0, };
1241 if (flow->layers & MLX5_FLOW_LAYER_TUNNEL)
1242 return rte_flow_error_set(error, ENOTSUP,
1243 RTE_FLOW_ERROR_TYPE_ITEM,
1245 "a tunnel is already present");
1247 * Verify only UDPv4 is present as defined in
1248 * https://tools.ietf.org/html/rfc7348
1250 if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1251 return rte_flow_error_set(error, ENOTSUP,
1252 RTE_FLOW_ERROR_TYPE_ITEM,
1254 "no outer UDP layer found");
1256 mask = &rte_flow_item_vxlan_mask;
1257 ret = mlx5_flow_item_acceptable
1258 (item, (const uint8_t *)mask,
1259 (const uint8_t *)&rte_flow_item_vxlan_mask,
1260 sizeof(struct rte_flow_item_vxlan), error);
1264 memcpy(&id.vni[1], spec->vni, 3);
1265 vxlan.val.tunnel_id = id.vlan_id;
1266 memcpy(&id.vni[1], mask->vni, 3);
1267 vxlan.mask.tunnel_id = id.vlan_id;
1268 /* Remove unwanted bits from values. */
1269 vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
1272 * Tunnel id 0 is equivalent as not adding a VXLAN layer, if
1273 * only this layer is defined in the Verbs specification it is
1274 * interpreted as wildcard and all packets will match this
1275 * rule, if it follows a full stack layer (ex: eth / ipv4 /
1276 * udp), all packets matching the layers before will also
1277 * match this rule. To avoid such situation, VNI 0 is
1278 * currently refused.
1280 if (!vxlan.val.tunnel_id)
1281 return rte_flow_error_set(error, EINVAL,
1282 RTE_FLOW_ERROR_TYPE_ITEM,
1284 "VXLAN vni cannot be 0");
1285 if (!(flow->layers & MLX5_FLOW_LAYER_OUTER))
1286 return rte_flow_error_set(error, EINVAL,
1287 RTE_FLOW_ERROR_TYPE_ITEM,
1289 "VXLAN tunnel must be fully defined");
1290 if (size <= flow_size) {
1291 mlx5_flow_spec_verbs_add(flow, &vxlan, size);
1292 flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
1294 flow->layers |= MLX5_FLOW_LAYER_VXLAN;
1299 * Convert the @p item into a Verbs specification after ensuring the NIC
1300 * will understand and process it correctly.
1301 * If the necessary size for the conversion is greater than the @p flow_size,
1302 * nothing is written in @p flow, the validation is still performed.
1305 * Pointer to Ethernet device.
1307 * Item specification.
1308 * @param[in, out] flow
1309 * Pointer to flow structure.
1310 * @param[in] flow_size
1311 * Size in bytes of the available space in @p flow, if too small, nothing is
1314 * Pointer to error structure.
1317 * On success the number of bytes consumed/necessary, if the returned value
1318 * is lesser or equal to @p flow_size, the @p item has fully been converted,
1319 * otherwise another call with this returned memory size should be done.
1320 * On error, a negative errno value is returned and rte_errno is set.
1323 mlx5_flow_item_vxlan_gpe(struct rte_eth_dev *dev,
1324 const struct rte_flow_item *item,
1325 struct rte_flow *flow, const size_t flow_size,
1326 struct rte_flow_error *error)
1328 const struct rte_flow_item_vxlan_gpe *spec = item->spec;
1329 const struct rte_flow_item_vxlan_gpe *mask = item->mask;
1330 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
1331 struct ibv_flow_spec_tunnel vxlan_gpe = {
1332 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
1339 } id = { .vlan_id = 0, };
1341 if (!((struct priv *)dev->data->dev_private)->config.l3_vxlan_en)
1342 return rte_flow_error_set(error, ENOTSUP,
1343 RTE_FLOW_ERROR_TYPE_ITEM,
1345 "L3 VXLAN is not enabled by device"
1346 " parameter and/or not configured in"
1348 if (flow->layers & MLX5_FLOW_LAYER_TUNNEL)
1349 return rte_flow_error_set(error, ENOTSUP,
1350 RTE_FLOW_ERROR_TYPE_ITEM,
1352 "a tunnel is already present");
1354 * Verify only UDPv4 is present as defined in
1355 * https://tools.ietf.org/html/rfc7348
1357 if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1358 return rte_flow_error_set(error, ENOTSUP,
1359 RTE_FLOW_ERROR_TYPE_ITEM,
1361 "no outer UDP layer found");
1363 mask = &rte_flow_item_vxlan_gpe_mask;
1364 ret = mlx5_flow_item_acceptable
1365 (item, (const uint8_t *)mask,
1366 (const uint8_t *)&rte_flow_item_vxlan_gpe_mask,
1367 sizeof(struct rte_flow_item_vxlan_gpe), error);
1371 memcpy(&id.vni[1], spec->vni, 3);
1372 vxlan_gpe.val.tunnel_id = id.vlan_id;
1373 memcpy(&id.vni[1], mask->vni, 3);
1374 vxlan_gpe.mask.tunnel_id = id.vlan_id;
1376 return rte_flow_error_set
1378 RTE_FLOW_ERROR_TYPE_ITEM,
1380 "VxLAN-GPE protocol not supported");
1381 /* Remove unwanted bits from values. */
1382 vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;
1385 * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this
1386 * layer is defined in the Verbs specification it is interpreted as
1387 * wildcard and all packets will match this rule, if it follows a full
1388 * stack layer (ex: eth / ipv4 / udp), all packets matching the layers
1389 * before will also match this rule. To avoid such situation, VNI 0
1390 * is currently refused.
1392 if (!vxlan_gpe.val.tunnel_id)
1393 return rte_flow_error_set(error, EINVAL,
1394 RTE_FLOW_ERROR_TYPE_ITEM,
1396 "VXLAN-GPE vni cannot be 0");
1397 if (!(flow->layers & MLX5_FLOW_LAYER_OUTER))
1398 return rte_flow_error_set(error, EINVAL,
1399 RTE_FLOW_ERROR_TYPE_ITEM,
1401 "VXLAN-GPE tunnel must be fully"
1403 if (size <= flow_size) {
1404 mlx5_flow_spec_verbs_add(flow, &vxlan_gpe, size);
1405 flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
1407 flow->layers |= MLX5_FLOW_LAYER_VXLAN_GPE;
1412 * Convert the @p pattern into a Verbs specifications after ensuring the NIC
1413 * will understand and process it correctly.
1414 * The conversion is performed item per item, each of them is written into
1415 * the @p flow if its size is lesser or equal to @p flow_size.
1416 * Validation and memory consumption computation are still performed until the
1417 * end of @p pattern, unless an error is encountered.
1419 * @param[in] pattern
1421 * @param[in, out] flow
1422 * Pointer to the rte_flow structure.
1423 * @param[in] flow_size
1424 * Size in bytes of the available space in @p flow, if too small some
1425 * garbage may be present.
1427 * Pointer to error structure.
1430 * On success the number of bytes consumed/necessary, if the returned value
1431 * is lesser or equal to @p flow_size, the @pattern has fully been
1432 * converted, otherwise another call with this returned memory size should
1434 * On error, a negative errno value is returned and rte_errno is set.
1437 mlx5_flow_items(struct rte_eth_dev *dev,
1438 const struct rte_flow_item pattern[],
1439 struct rte_flow *flow, const size_t flow_size,
1440 struct rte_flow_error *error)
1442 int remain = flow_size;
1445 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1448 switch (pattern->type) {
1449 case RTE_FLOW_ITEM_TYPE_VOID:
1451 case RTE_FLOW_ITEM_TYPE_ETH:
1452 ret = mlx5_flow_item_eth(pattern, flow, remain, error);
1454 case RTE_FLOW_ITEM_TYPE_VLAN:
1455 ret = mlx5_flow_item_vlan(pattern, flow, remain, error);
1457 case RTE_FLOW_ITEM_TYPE_IPV4:
1458 ret = mlx5_flow_item_ipv4(pattern, flow, remain, error);
1460 case RTE_FLOW_ITEM_TYPE_IPV6:
1461 ret = mlx5_flow_item_ipv6(pattern, flow, remain, error);
1463 case RTE_FLOW_ITEM_TYPE_UDP:
1464 ret = mlx5_flow_item_udp(pattern, flow, remain, error);
1466 case RTE_FLOW_ITEM_TYPE_TCP:
1467 ret = mlx5_flow_item_tcp(pattern, flow, remain, error);
1469 case RTE_FLOW_ITEM_TYPE_VXLAN:
1470 ret = mlx5_flow_item_vxlan(pattern, flow, remain,
1473 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1474 ret = mlx5_flow_item_vxlan_gpe(dev, pattern, flow,
1478 return rte_flow_error_set(error, ENOTSUP,
1479 RTE_FLOW_ERROR_TYPE_ITEM,
1481 "item not supported");
1491 if (!flow->layers) {
1492 const struct rte_flow_item item = {
1493 .type = RTE_FLOW_ITEM_TYPE_ETH,
1496 return mlx5_flow_item_eth(&item, flow, flow_size, error);
1502 * Convert the @p action into a Verbs specification after ensuring the NIC
1503 * will understand and process it correctly.
1504 * If the necessary size for the conversion is greater than the @p flow_size,
1505 * nothing is written in @p flow, the validation is still performed.
1508 * Action configuration.
1509 * @param[in, out] flow
1510 * Pointer to flow structure.
1511 * @param[in] flow_size
1512 * Size in bytes of the available space in @p flow, if too small, nothing is
1515 * Pointer to error structure.
1518 * On success the number of bytes consumed/necessary, if the returned value
1519 * is lesser or equal to @p flow_size, the @p action has fully been
1520 * converted, otherwise another call with this returned memory size should
1522 * On error, a negative errno value is returned and rte_errno is set.
1525 mlx5_flow_action_drop(const struct rte_flow_action *action,
1526 struct rte_flow *flow, const size_t flow_size,
1527 struct rte_flow_error *error)
1529 unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
1530 struct ibv_flow_spec_action_drop drop = {
1531 .type = IBV_FLOW_SPEC_ACTION_DROP,
1536 return rte_flow_error_set(error, ENOTSUP,
1537 RTE_FLOW_ERROR_TYPE_ACTION,
1539 "multiple fate actions are not"
1541 if (flow->modifier & (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK))
1542 return rte_flow_error_set(error, ENOTSUP,
1543 RTE_FLOW_ERROR_TYPE_ACTION,
1545 "drop is not compatible with"
1546 " flag/mark action");
1547 if (size < flow_size)
1548 mlx5_flow_spec_verbs_add(flow, &drop, size);
1549 flow->fate |= MLX5_FLOW_FATE_DROP;
1554 * Convert the @p action into @p flow after ensuring the NIC will understand
1555 * and process it correctly.
1558 * Pointer to Ethernet device structure.
1560 * Action configuration.
1561 * @param[in, out] flow
1562 * Pointer to flow structure.
1564 * Pointer to error structure.
1567 * 0 on success, a negative errno value otherwise and rte_errno is set.
1570 mlx5_flow_action_queue(struct rte_eth_dev *dev,
1571 const struct rte_flow_action *action,
1572 struct rte_flow *flow,
1573 struct rte_flow_error *error)
1575 struct priv *priv = dev->data->dev_private;
1576 const struct rte_flow_action_queue *queue = action->conf;
1579 return rte_flow_error_set(error, ENOTSUP,
1580 RTE_FLOW_ERROR_TYPE_ACTION,
1582 "multiple fate actions are not"
1584 if (queue->index >= priv->rxqs_n)
1585 return rte_flow_error_set(error, EINVAL,
1586 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1588 "queue index out of range");
1589 if (!(*priv->rxqs)[queue->index])
1590 return rte_flow_error_set(error, EINVAL,
1591 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1593 "queue is not configured");
1595 (*flow->queue)[0] = queue->index;
1596 flow->rss.queue_num = 1;
1597 flow->fate |= MLX5_FLOW_FATE_QUEUE;
1602 * Ensure the @p action will be understood and used correctly by the NIC.
1605 * Pointer to Ethernet device structure.
1607 * Pointer to flow actions array.
1608 * @param flow[in, out]
1609 * Pointer to the rte_flow structure.
1610 * @param error[in, out]
1611 * Pointer to error structure.
1614 * On success @p flow->queue array and @p flow->rss are filled and valid.
1615 * On error, a negative errno value is returned and rte_errno is set.
1618 mlx5_flow_action_rss(struct rte_eth_dev *dev,
1619 const struct rte_flow_action *action,
1620 struct rte_flow *flow,
1621 struct rte_flow_error *error)
1623 struct priv *priv = dev->data->dev_private;
1624 const struct rte_flow_action_rss *rss = action->conf;
1628 return rte_flow_error_set(error, ENOTSUP,
1629 RTE_FLOW_ERROR_TYPE_ACTION,
1631 "multiple fate actions are not"
1633 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
1634 rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
1635 return rte_flow_error_set(error, ENOTSUP,
1636 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1638 "RSS hash function not supported");
1639 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1644 return rte_flow_error_set(error, ENOTSUP,
1645 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1647 "tunnel RSS is not supported");
1648 if (rss->key_len < MLX5_RSS_HASH_KEY_LEN)
1649 return rte_flow_error_set(error, ENOTSUP,
1650 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1652 "RSS hash key too small");
1653 if (rss->key_len > MLX5_RSS_HASH_KEY_LEN)
1654 return rte_flow_error_set(error, ENOTSUP,
1655 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1657 "RSS hash key too large");
1658 if (rss->queue_num > priv->config.ind_table_max_size)
1659 return rte_flow_error_set(error, ENOTSUP,
1660 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1662 "number of queues too large");
1663 if (rss->types & MLX5_RSS_HF_MASK)
1664 return rte_flow_error_set(error, ENOTSUP,
1665 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1667 "some RSS protocols are not"
1669 for (i = 0; i != rss->queue_num; ++i) {
1670 if (!(*priv->rxqs)[rss->queue[i]])
1671 return rte_flow_error_set
1673 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1675 "queue is not configured");
1678 memcpy((*flow->queue), rss->queue,
1679 rss->queue_num * sizeof(uint16_t));
1680 flow->rss.queue_num = rss->queue_num;
1681 memcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN);
1682 flow->rss.types = rss->types;
1683 flow->rss.level = rss->level;
1684 flow->fate |= MLX5_FLOW_FATE_RSS;
1689 * Convert the @p action into a Verbs specification after ensuring the NIC
1690 * will understand and process it correctly.
1691 * If the necessary size for the conversion is greater than the @p flow_size,
1692 * nothing is written in @p flow, the validation is still performed.
1695 * Action configuration.
1696 * @param[in, out] flow
1697 * Pointer to flow structure.
1698 * @param[in] flow_size
1699 * Size in bytes of the available space in @p flow, if too small, nothing is
1702 * Pointer to error structure.
1705 * On success the number of bytes consumed/necessary, if the returned value
1706 * is lesser or equal to @p flow_size, the @p action has fully been
1707 * converted, otherwise another call with this returned memory size should
1709 * On error, a negative errno value is returned and rte_errno is set.
1712 mlx5_flow_action_flag(const struct rte_flow_action *action,
1713 struct rte_flow *flow, const size_t flow_size,
1714 struct rte_flow_error *error)
1716 unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
1717 struct ibv_flow_spec_action_tag tag = {
1718 .type = IBV_FLOW_SPEC_ACTION_TAG,
1720 .tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),
1722 struct mlx5_flow_verbs *verbs = flow->cur_verbs;
1724 if (flow->modifier & MLX5_FLOW_MOD_FLAG)
1725 return rte_flow_error_set(error, ENOTSUP,
1726 RTE_FLOW_ERROR_TYPE_ACTION,
1728 "flag action already present");
1729 if (flow->fate & MLX5_FLOW_FATE_DROP)
1730 return rte_flow_error_set(error, ENOTSUP,
1731 RTE_FLOW_ERROR_TYPE_ACTION,
1733 "flag is not compatible with drop"
1735 if (flow->modifier & MLX5_FLOW_MOD_MARK)
1737 else if (size <= flow_size && verbs)
1738 mlx5_flow_spec_verbs_add(flow, &tag, size);
1739 flow->modifier |= MLX5_FLOW_MOD_FLAG;
1744 * Update verbs specification to modify the flag to mark.
1746 * @param[in, out] verbs
1747 * Pointer to the mlx5_flow_verbs structure.
1748 * @param[in] mark_id
1749 * Mark identifier to replace the flag.
1752 mlx5_flow_verbs_mark_update(struct mlx5_flow_verbs *verbs, uint32_t mark_id)
1754 struct ibv_spec_header *hdr;
1759 /* Update Verbs specification. */
1760 hdr = (struct ibv_spec_header *)verbs->specs;
1763 for (i = 0; i != verbs->attr->num_of_specs; ++i) {
1764 if (hdr->type == IBV_FLOW_SPEC_ACTION_TAG) {
1765 struct ibv_flow_spec_action_tag *t =
1766 (struct ibv_flow_spec_action_tag *)hdr;
1768 t->tag_id = mlx5_flow_mark_set(mark_id);
1770 hdr = (struct ibv_spec_header *)((uintptr_t)hdr + hdr->size);
1775 * Convert the @p action into @p flow (or by updating the already present
1776 * Flag Verbs specification) after ensuring the NIC will understand and
1777 * process it correctly.
1778 * If the necessary size for the conversion is greater than the @p flow_size,
1779 * nothing is written in @p flow, the validation is still performed.
1782 * Action configuration.
1783 * @param[in, out] flow
1784 * Pointer to flow structure.
1785 * @param[in] flow_size
1786 * Size in bytes of the available space in @p flow, if too small, nothing is
1789 * Pointer to error structure.
1792 * On success the number of bytes consumed/necessary, if the returned value
1793 * is lesser or equal to @p flow_size, the @p action has fully been
1794 * converted, otherwise another call with this returned memory size should
1796 * On error, a negative errno value is returned and rte_errno is set.
1799 mlx5_flow_action_mark(const struct rte_flow_action *action,
1800 struct rte_flow *flow, const size_t flow_size,
1801 struct rte_flow_error *error)
1803 const struct rte_flow_action_mark *mark = action->conf;
1804 unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
1805 struct ibv_flow_spec_action_tag tag = {
1806 .type = IBV_FLOW_SPEC_ACTION_TAG,
1809 struct mlx5_flow_verbs *verbs = flow->cur_verbs;
1812 return rte_flow_error_set(error, EINVAL,
1813 RTE_FLOW_ERROR_TYPE_ACTION,
1815 "configuration cannot be null");
1816 if (mark->id >= MLX5_FLOW_MARK_MAX)
1817 return rte_flow_error_set(error, EINVAL,
1818 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1820 "mark id must in 0 <= id < "
1821 RTE_STR(MLX5_FLOW_MARK_MAX));
1822 if (flow->modifier & MLX5_FLOW_MOD_MARK)
1823 return rte_flow_error_set(error, ENOTSUP,
1824 RTE_FLOW_ERROR_TYPE_ACTION,
1826 "mark action already present");
1827 if (flow->fate & MLX5_FLOW_FATE_DROP)
1828 return rte_flow_error_set(error, ENOTSUP,
1829 RTE_FLOW_ERROR_TYPE_ACTION,
1831 "mark is not compatible with drop"
1833 if (flow->modifier & MLX5_FLOW_MOD_FLAG) {
1834 mlx5_flow_verbs_mark_update(verbs, mark->id);
1836 } else if (size <= flow_size) {
1837 tag.tag_id = mlx5_flow_mark_set(mark->id);
1838 mlx5_flow_spec_verbs_add(flow, &tag, size);
1840 flow->modifier |= MLX5_FLOW_MOD_MARK;
1845 * Convert the @p action into @p flow after ensuring the NIC will understand
1846 * and process it correctly.
1847 * The conversion is performed action per action, each of them is written into
1848 * the @p flow if its size is lesser or equal to @p flow_size.
1849 * Validation and memory consumption computation are still performed until the
1850 * end of @p action, unless an error is encountered.
1853 * Pointer to Ethernet device structure.
1854 * @param[in] actions
1855 * Pointer to flow actions array.
1856 * @param[in, out] flow
1857 * Pointer to the rte_flow structure.
1858 * @param[in] flow_size
1859 * Size in bytes of the available space in @p flow, if too small some
1860 * garbage may be present.
1862 * Pointer to error structure.
1865 * On success the number of bytes consumed/necessary, if the returned value
1866 * is lesser or equal to @p flow_size, the @p actions has fully been
1867 * converted, otherwise another call with this returned memory size should
1869 * On error, a negative errno value is returned and rte_errno is set.
1872 mlx5_flow_actions(struct rte_eth_dev *dev,
1873 const struct rte_flow_action actions[],
1874 struct rte_flow *flow, const size_t flow_size,
1875 struct rte_flow_error *error)
1878 int remain = flow_size;
1881 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1882 switch (actions->type) {
1883 case RTE_FLOW_ACTION_TYPE_VOID:
1885 case RTE_FLOW_ACTION_TYPE_FLAG:
1886 ret = mlx5_flow_action_flag(actions, flow, remain,
1889 case RTE_FLOW_ACTION_TYPE_MARK:
1890 ret = mlx5_flow_action_mark(actions, flow, remain,
1893 case RTE_FLOW_ACTION_TYPE_DROP:
1894 ret = mlx5_flow_action_drop(actions, flow, remain,
1897 case RTE_FLOW_ACTION_TYPE_QUEUE:
1898 ret = mlx5_flow_action_queue(dev, actions, flow, error);
1900 case RTE_FLOW_ACTION_TYPE_RSS:
1901 ret = mlx5_flow_action_rss(dev, actions, flow, error);
1904 return rte_flow_error_set(error, ENOTSUP,
1905 RTE_FLOW_ERROR_TYPE_ACTION,
1907 "action not supported");
1918 return rte_flow_error_set(error, ENOTSUP,
1919 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1921 "no fate action found");
1926 * Convert the @p attributes, @p pattern, @p action, into an flow for the NIC
1927 * after ensuring the NIC will understand and process it correctly.
1928 * The conversion is only performed item/action per item/action, each of
1929 * them is written into the @p flow if its size is lesser or equal to @p
1931 * Validation and memory consumption computation are still performed until the
1932 * end, unless an error is encountered.
1935 * Pointer to Ethernet device.
1936 * @param[in, out] flow
1937 * Pointer to flow structure.
1938 * @param[in] flow_size
1939 * Size in bytes of the available space in @p flow, if too small some
1940 * garbage may be present.
1941 * @param[in] attributes
1942 * Flow rule attributes.
1943 * @param[in] pattern
1944 * Pattern specification (list terminated by the END pattern item).
1945 * @param[in] actions
1946 * Associated actions (list terminated by the END action).
1948 * Perform verbose error reporting if not NULL.
1951 * On success the number of bytes consumed/necessary, if the returned value
1952 * is lesser or equal to @p flow_size, the flow has fully been converted and
1953 * can be applied, otherwise another call with this returned memory size
1955 * On error, a negative errno value is returned and rte_errno is set.
1958 mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow,
1959 const size_t flow_size,
1960 const struct rte_flow_attr *attributes,
1961 const struct rte_flow_item pattern[],
1962 const struct rte_flow_action actions[],
1963 struct rte_flow_error *error)
1965 struct rte_flow local_flow = { .layers = 0, };
1966 size_t size = sizeof(*flow);
1968 struct rte_flow_expand_rss buf;
1969 uint8_t buffer[2048];
1971 struct rte_flow_expand_rss *buf = &expand_buffer.buf;
1972 struct mlx5_flow_verbs *original_verbs = NULL;
1973 size_t original_verbs_size = 0;
1974 uint32_t original_layers = 0;
1975 int expanded_pattern_idx = 0;
1979 if (size > flow_size)
1981 ret = mlx5_flow_attributes(dev, attributes, flow, error);
1984 ret = mlx5_flow_actions(dev, actions, &local_flow, 0, error);
1987 if (local_flow.rss.types) {
1988 ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
1989 pattern, local_flow.rss.types,
1990 mlx5_support_expansion,
1991 local_flow.rss.level < 2 ?
1992 MLX5_EXPANSION_ROOT :
1993 MLX5_EXPANSION_ROOT_OUTER);
1995 (unsigned int)ret < sizeof(expand_buffer.buffer));
1998 buf->entry[0].pattern = (void *)(uintptr_t)pattern;
2000 size += RTE_ALIGN_CEIL(local_flow.rss.queue_num * sizeof(uint16_t),
2002 if (size <= flow_size)
2003 flow->queue = (void *)(flow + 1);
2004 LIST_INIT(&flow->verbs);
2008 for (i = 0; i != buf->entries; ++i) {
2012 flow->layers = original_layers;
2013 size += sizeof(struct ibv_flow_attr) +
2014 sizeof(struct mlx5_flow_verbs);
2016 if (size < flow_size) {
2017 flow->cur_verbs = (void *)((uintptr_t)flow + off);
2018 flow->cur_verbs->attr = (void *)(flow->cur_verbs + 1);
2019 flow->cur_verbs->specs =
2020 (void *)(flow->cur_verbs->attr + 1);
2022 /* First iteration convert the pattern into Verbs. */
2024 /* Actions don't need to be converted several time. */
2025 ret = mlx5_flow_actions(dev, actions, flow,
2026 (size < flow_size) ?
2027 flow_size - size : 0,
2034 * Next iteration means the pattern has already been
2035 * converted and an expansion is necessary to match
2036 * the user RSS request. For that only the expanded
2037 * items will be converted, the common part with the
2038 * user pattern are just copied into the next buffer
2041 size += original_verbs_size;
2042 if (size < flow_size) {
2043 rte_memcpy(flow->cur_verbs->attr,
2044 original_verbs->attr,
2045 original_verbs_size +
2046 sizeof(struct ibv_flow_attr));
2047 flow->cur_verbs->size = original_verbs_size;
2050 ret = mlx5_flow_items
2052 (const struct rte_flow_item *)
2053 &buf->entry[i].pattern[expanded_pattern_idx],
2055 (size < flow_size) ? flow_size - size : 0, error);
2059 if (size <= flow_size) {
2060 mlx5_flow_adjust_priority(dev, flow);
2061 LIST_INSERT_HEAD(&flow->verbs, flow->cur_verbs, next);
2064 * Keep a pointer of the first verbs conversion and the layers
2065 * it has encountered.
2068 original_verbs = flow->cur_verbs;
2069 original_verbs_size = size - off2;
2070 original_layers = flow->layers;
2072 * move the index of the expanded pattern to the
2073 * first item not addressed yet.
2075 if (pattern->type == RTE_FLOW_ITEM_TYPE_END) {
2076 expanded_pattern_idx++;
2078 const struct rte_flow_item *item = pattern;
2080 for (item = pattern;
2081 item->type != RTE_FLOW_ITEM_TYPE_END;
2083 expanded_pattern_idx++;
2087 /* Restore the origin layers in the flow. */
2088 flow->layers = original_layers;
2093 * Lookup and set the ptype in the data Rx part. A single Ptype can be used,
2094 * if several tunnel rules are used on this queue, the tunnel ptype will be
2098 * Rx queue to update.
2101 mlx5_flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
2104 uint32_t tunnel_ptype = 0;
2106 /* Look up for the ptype to use. */
2107 for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) {
2108 if (!rxq_ctrl->flow_tunnels_n[i])
2110 if (!tunnel_ptype) {
2111 tunnel_ptype = tunnels_info[i].ptype;
2117 rxq_ctrl->rxq.tunnel = tunnel_ptype;
2121 * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the flow.
2124 * Pointer to Ethernet device.
2126 * Pointer to flow structure.
2129 mlx5_flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
2131 struct priv *priv = dev->data->dev_private;
2132 const int mark = !!(flow->modifier &
2133 (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK));
2134 const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
2137 for (i = 0; i != flow->rss.queue_num; ++i) {
2138 int idx = (*flow->queue)[i];
2139 struct mlx5_rxq_ctrl *rxq_ctrl =
2140 container_of((*priv->rxqs)[idx],
2141 struct mlx5_rxq_ctrl, rxq);
2144 rxq_ctrl->rxq.mark = 1;
2145 rxq_ctrl->flow_mark_n++;
2150 /* Increase the counter matching the flow. */
2151 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
2152 if ((tunnels_info[j].tunnel & flow->layers) ==
2153 tunnels_info[j].tunnel) {
2154 rxq_ctrl->flow_tunnels_n[j]++;
2158 mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl);
2164 * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
2165 * @p flow if no other flow uses it with the same kind of request.
2168 * Pointer to Ethernet device.
2170 * Pointer to the flow.
2173 mlx5_flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
2175 struct priv *priv = dev->data->dev_private;
2176 const int mark = !!(flow->modifier &
2177 (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK));
2178 const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
2181 assert(dev->data->dev_started);
2182 for (i = 0; i != flow->rss.queue_num; ++i) {
2183 int idx = (*flow->queue)[i];
2184 struct mlx5_rxq_ctrl *rxq_ctrl =
2185 container_of((*priv->rxqs)[idx],
2186 struct mlx5_rxq_ctrl, rxq);
2189 rxq_ctrl->flow_mark_n--;
2190 rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
2195 /* Decrease the counter matching the flow. */
2196 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
2197 if ((tunnels_info[j].tunnel & flow->layers) ==
2198 tunnels_info[j].tunnel) {
2199 rxq_ctrl->flow_tunnels_n[j]--;
2203 mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl);
2209 * Clear the Mark/Flag and Tunnel ptype information in all Rx queues.
2212 * Pointer to Ethernet device.
2215 mlx5_flow_rxq_flags_clear(struct rte_eth_dev *dev)
2217 struct priv *priv = dev->data->dev_private;
2221 for (idx = 0, i = 0; idx != priv->rxqs_n; ++i) {
2222 struct mlx5_rxq_ctrl *rxq_ctrl;
2225 if (!(*priv->rxqs)[idx])
2227 rxq_ctrl = container_of((*priv->rxqs)[idx],
2228 struct mlx5_rxq_ctrl, rxq);
2229 rxq_ctrl->flow_mark_n = 0;
2230 rxq_ctrl->rxq.mark = 0;
2231 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
2232 rxq_ctrl->flow_tunnels_n[j] = 0;
2233 rxq_ctrl->rxq.tunnel = 0;
2239 * Validate a flow supported by the NIC.
2241 * @see rte_flow_validate()
2245 mlx5_flow_validate(struct rte_eth_dev *dev,
2246 const struct rte_flow_attr *attr,
2247 const struct rte_flow_item items[],
2248 const struct rte_flow_action actions[],
2249 struct rte_flow_error *error)
2251 int ret = mlx5_flow_merge(dev, NULL, 0, attr, items, actions, error);
2262 * Pointer to Ethernet device.
2263 * @param[in, out] flow
2264 * Pointer to flow structure.
2267 mlx5_flow_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
2269 struct mlx5_flow_verbs *verbs;
2271 LIST_FOREACH(verbs, &flow->verbs, next) {
2273 claim_zero(mlx5_glue->destroy_flow(verbs->flow));
2277 if (flow->fate & MLX5_FLOW_FATE_DROP)
2278 mlx5_hrxq_drop_release(dev);
2280 mlx5_hrxq_release(dev, verbs->hrxq);
2290 * Pointer to Ethernet device structure.
2291 * @param[in, out] flow
2292 * Pointer to flow structure.
2294 * Pointer to error structure.
2297 * 0 on success, a negative errno value otherwise and rte_errno is set.
2300 mlx5_flow_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
2301 struct rte_flow_error *error)
2303 struct mlx5_flow_verbs *verbs;
2306 LIST_FOREACH(verbs, &flow->verbs, next) {
2307 if (flow->fate & MLX5_FLOW_FATE_DROP) {
2308 verbs->hrxq = mlx5_hrxq_drop_new(dev);
2312 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2314 "cannot get drop hash queue");
2318 struct mlx5_hrxq *hrxq;
2320 hrxq = mlx5_hrxq_get(dev, flow->key,
2321 MLX5_RSS_HASH_KEY_LEN,
2324 flow->rss.queue_num);
2326 hrxq = mlx5_hrxq_new(dev, flow->key,
2327 MLX5_RSS_HASH_KEY_LEN,
2330 flow->rss.queue_num);
2334 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2336 "cannot get hash queue");
2342 mlx5_glue->create_flow(verbs->hrxq->qp, verbs->attr);
2344 rte_flow_error_set(error, errno,
2345 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2347 "hardware refuses to create flow");
2353 err = rte_errno; /* Save rte_errno before cleanup. */
2354 LIST_FOREACH(verbs, &flow->verbs, next) {
2356 if (flow->fate & MLX5_FLOW_FATE_DROP)
2357 mlx5_hrxq_drop_release(dev);
2359 mlx5_hrxq_release(dev, verbs->hrxq);
2363 rte_errno = err; /* Restore rte_errno. */
2368 * Create a flow and add it to @p list.
2371 * Pointer to Ethernet device.
2373 * Pointer to a TAILQ flow list.
2375 * Flow rule attributes.
2377 * Pattern specification (list terminated by the END pattern item).
2378 * @param[in] actions
2379 * Associated actions (list terminated by the END action).
2381 * Perform verbose error reporting if not NULL.
2384 * A flow on success, NULL otherwise and rte_errno is set.
2386 static struct rte_flow *
2387 mlx5_flow_list_create(struct rte_eth_dev *dev,
2388 struct mlx5_flows *list,
2389 const struct rte_flow_attr *attr,
2390 const struct rte_flow_item items[],
2391 const struct rte_flow_action actions[],
2392 struct rte_flow_error *error)
2394 struct rte_flow *flow = NULL;
2398 ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error);
2402 flow = rte_calloc(__func__, 1, size, 0);
2404 rte_flow_error_set(error, ENOMEM,
2405 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2407 "not enough memory to create flow");
2410 ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error);
2415 assert((size_t)ret == size);
2416 if (dev->data->dev_started) {
2417 ret = mlx5_flow_apply(dev, flow, error);
2419 ret = rte_errno; /* Save rte_errno before cleanup. */
2421 mlx5_flow_remove(dev, flow);
2424 rte_errno = ret; /* Restore rte_errno. */
2428 TAILQ_INSERT_TAIL(list, flow, next);
2429 mlx5_flow_rxq_flags_set(dev, flow);
2436 * @see rte_flow_create()
2440 mlx5_flow_create(struct rte_eth_dev *dev,
2441 const struct rte_flow_attr *attr,
2442 const struct rte_flow_item items[],
2443 const struct rte_flow_action actions[],
2444 struct rte_flow_error *error)
2446 return mlx5_flow_list_create
2447 (dev, &((struct priv *)dev->data->dev_private)->flows,
2448 attr, items, actions, error);
2452 * Destroy a flow in a list.
2455 * Pointer to Ethernet device.
2457 * Pointer to a TAILQ flow list.
2462 mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
2463 struct rte_flow *flow)
2465 mlx5_flow_remove(dev, flow);
2466 TAILQ_REMOVE(list, flow, next);
2468 * Update RX queue flags only if port is started, otherwise it is
2471 if (dev->data->dev_started)
2472 mlx5_flow_rxq_flags_trim(dev, flow);
2477 * Destroy all flows.
2480 * Pointer to Ethernet device.
2482 * Pointer to a TAILQ flow list.
2485 mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)
2487 while (!TAILQ_EMPTY(list)) {
2488 struct rte_flow *flow;
2490 flow = TAILQ_FIRST(list);
2491 mlx5_flow_list_destroy(dev, list, flow);
2499 * Pointer to Ethernet device.
2501 * Pointer to a TAILQ flow list.
2504 mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
2506 struct rte_flow *flow;
2508 TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next)
2509 mlx5_flow_remove(dev, flow);
2510 mlx5_flow_rxq_flags_clear(dev);
2517 * Pointer to Ethernet device.
2519 * Pointer to a TAILQ flow list.
2522 * 0 on success, a negative errno value otherwise and rte_errno is set.
2525 mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
2527 struct rte_flow *flow;
2528 struct rte_flow_error error;
2531 TAILQ_FOREACH(flow, list, next) {
2532 ret = mlx5_flow_apply(dev, flow, &error);
2535 mlx5_flow_rxq_flags_set(dev, flow);
2539 ret = rte_errno; /* Save rte_errno before cleanup. */
2540 mlx5_flow_stop(dev, list);
2541 rte_errno = ret; /* Restore rte_errno. */
2546 * Verify the flow list is empty
2549 * Pointer to Ethernet device.
2551 * @return the number of flows not released.
2554 mlx5_flow_verify(struct rte_eth_dev *dev)
2556 struct priv *priv = dev->data->dev_private;
2557 struct rte_flow *flow;
2560 TAILQ_FOREACH(flow, &priv->flows, next) {
2561 DRV_LOG(DEBUG, "port %u flow %p still referenced",
2562 dev->data->port_id, (void *)flow);
2569 * Enable a control flow configured from the control plane.
2572 * Pointer to Ethernet device.
2574 * An Ethernet flow spec to apply.
2576 * An Ethernet flow mask to apply.
2578 * A VLAN flow spec to apply.
2580 * A VLAN flow mask to apply.
2583 * 0 on success, a negative errno value otherwise and rte_errno is set.
2586 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
2587 struct rte_flow_item_eth *eth_spec,
2588 struct rte_flow_item_eth *eth_mask,
2589 struct rte_flow_item_vlan *vlan_spec,
2590 struct rte_flow_item_vlan *vlan_mask)
2592 struct priv *priv = dev->data->dev_private;
2593 const struct rte_flow_attr attr = {
2595 .priority = MLX5_FLOW_PRIO_RSVD,
2597 struct rte_flow_item items[] = {
2599 .type = RTE_FLOW_ITEM_TYPE_ETH,
2605 .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
2606 RTE_FLOW_ITEM_TYPE_END,
2612 .type = RTE_FLOW_ITEM_TYPE_END,
2615 uint16_t queue[priv->reta_idx_n];
2616 struct rte_flow_action_rss action_rss = {
2617 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
2619 .types = priv->rss_conf.rss_hf,
2620 .key_len = priv->rss_conf.rss_key_len,
2621 .queue_num = priv->reta_idx_n,
2622 .key = priv->rss_conf.rss_key,
2625 struct rte_flow_action actions[] = {
2627 .type = RTE_FLOW_ACTION_TYPE_RSS,
2628 .conf = &action_rss,
2631 .type = RTE_FLOW_ACTION_TYPE_END,
2634 struct rte_flow *flow;
2635 struct rte_flow_error error;
2638 if (!priv->reta_idx_n) {
2642 for (i = 0; i != priv->reta_idx_n; ++i)
2643 queue[i] = (*priv->reta_idx)[i];
2644 flow = mlx5_flow_list_create(dev, &priv->ctrl_flows, &attr, items,
2652 * Enable a flow control configured from the control plane.
2655 * Pointer to Ethernet device.
2657 * An Ethernet flow spec to apply.
2659 * An Ethernet flow mask to apply.
2662 * 0 on success, a negative errno value otherwise and rte_errno is set.
2665 mlx5_ctrl_flow(struct rte_eth_dev *dev,
2666 struct rte_flow_item_eth *eth_spec,
2667 struct rte_flow_item_eth *eth_mask)
2669 return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
2675 * @see rte_flow_destroy()
2679 mlx5_flow_destroy(struct rte_eth_dev *dev,
2680 struct rte_flow *flow,
2681 struct rte_flow_error *error __rte_unused)
2683 struct priv *priv = dev->data->dev_private;
2685 mlx5_flow_list_destroy(dev, &priv->flows, flow);
2690 * Destroy all flows.
2692 * @see rte_flow_flush()
2696 mlx5_flow_flush(struct rte_eth_dev *dev,
2697 struct rte_flow_error *error __rte_unused)
2699 struct priv *priv = dev->data->dev_private;
2701 mlx5_flow_list_flush(dev, &priv->flows);
2708 * @see rte_flow_isolate()
2712 mlx5_flow_isolate(struct rte_eth_dev *dev,
2714 struct rte_flow_error *error)
2716 struct priv *priv = dev->data->dev_private;
2718 if (dev->data->dev_started) {
2719 rte_flow_error_set(error, EBUSY,
2720 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2722 "port must be stopped first");
2725 priv->isolated = !!enable;
2727 dev->dev_ops = &mlx5_dev_ops_isolate;
2729 dev->dev_ops = &mlx5_dev_ops;
2734 * Convert a flow director filter to a generic flow.
2737 * Pointer to Ethernet device.
2738 * @param fdir_filter
2739 * Flow director filter to add.
2741 * Generic flow parameters structure.
2744 * 0 on success, a negative errno value otherwise and rte_errno is set.
2747 mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
2748 const struct rte_eth_fdir_filter *fdir_filter,
2749 struct mlx5_fdir *attributes)
2751 struct priv *priv = dev->data->dev_private;
2752 const struct rte_eth_fdir_input *input = &fdir_filter->input;
2753 const struct rte_eth_fdir_masks *mask =
2754 &dev->data->dev_conf.fdir_conf.mask;
2756 /* Validate queue number. */
2757 if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
2758 DRV_LOG(ERR, "port %u invalid queue number %d",
2759 dev->data->port_id, fdir_filter->action.rx_queue);
2763 attributes->attr.ingress = 1;
2764 attributes->items[0] = (struct rte_flow_item) {
2765 .type = RTE_FLOW_ITEM_TYPE_ETH,
2766 .spec = &attributes->l2,
2767 .mask = &attributes->l2_mask,
2769 switch (fdir_filter->action.behavior) {
2770 case RTE_ETH_FDIR_ACCEPT:
2771 attributes->actions[0] = (struct rte_flow_action){
2772 .type = RTE_FLOW_ACTION_TYPE_QUEUE,
2773 .conf = &attributes->queue,
2776 case RTE_ETH_FDIR_REJECT:
2777 attributes->actions[0] = (struct rte_flow_action){
2778 .type = RTE_FLOW_ACTION_TYPE_DROP,
2782 DRV_LOG(ERR, "port %u invalid behavior %d",
2784 fdir_filter->action.behavior);
2785 rte_errno = ENOTSUP;
2788 attributes->queue.index = fdir_filter->action.rx_queue;
2790 switch (fdir_filter->input.flow_type) {
2791 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
2792 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
2793 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
2794 attributes->l3.ipv4.hdr = (struct ipv4_hdr){
2795 .src_addr = input->flow.ip4_flow.src_ip,
2796 .dst_addr = input->flow.ip4_flow.dst_ip,
2797 .time_to_live = input->flow.ip4_flow.ttl,
2798 .type_of_service = input->flow.ip4_flow.tos,
2799 .next_proto_id = input->flow.ip4_flow.proto,
2801 attributes->l3_mask.ipv4.hdr = (struct ipv4_hdr){
2802 .src_addr = mask->ipv4_mask.src_ip,
2803 .dst_addr = mask->ipv4_mask.dst_ip,
2804 .time_to_live = mask->ipv4_mask.ttl,
2805 .type_of_service = mask->ipv4_mask.tos,
2806 .next_proto_id = mask->ipv4_mask.proto,
2808 attributes->items[1] = (struct rte_flow_item){
2809 .type = RTE_FLOW_ITEM_TYPE_IPV4,
2810 .spec = &attributes->l3,
2811 .mask = &attributes->l3_mask,
2814 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
2815 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
2816 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
2817 attributes->l3.ipv6.hdr = (struct ipv6_hdr){
2818 .hop_limits = input->flow.ipv6_flow.hop_limits,
2819 .proto = input->flow.ipv6_flow.proto,
2822 memcpy(attributes->l3.ipv6.hdr.src_addr,
2823 input->flow.ipv6_flow.src_ip,
2824 RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
2825 memcpy(attributes->l3.ipv6.hdr.dst_addr,
2826 input->flow.ipv6_flow.dst_ip,
2827 RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
2828 memcpy(attributes->l3_mask.ipv6.hdr.src_addr,
2829 mask->ipv6_mask.src_ip,
2830 RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
2831 memcpy(attributes->l3_mask.ipv6.hdr.dst_addr,
2832 mask->ipv6_mask.dst_ip,
2833 RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
2834 attributes->items[1] = (struct rte_flow_item){
2835 .type = RTE_FLOW_ITEM_TYPE_IPV6,
2836 .spec = &attributes->l3,
2837 .mask = &attributes->l3_mask,
2841 DRV_LOG(ERR, "port %u invalid flow type%d",
2842 dev->data->port_id, fdir_filter->input.flow_type);
2843 rte_errno = ENOTSUP;
2847 switch (fdir_filter->input.flow_type) {
2848 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
2849 attributes->l4.udp.hdr = (struct udp_hdr){
2850 .src_port = input->flow.udp4_flow.src_port,
2851 .dst_port = input->flow.udp4_flow.dst_port,
2853 attributes->l4_mask.udp.hdr = (struct udp_hdr){
2854 .src_port = mask->src_port_mask,
2855 .dst_port = mask->dst_port_mask,
2857 attributes->items[2] = (struct rte_flow_item){
2858 .type = RTE_FLOW_ITEM_TYPE_UDP,
2859 .spec = &attributes->l4,
2860 .mask = &attributes->l4_mask,
2863 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
2864 attributes->l4.tcp.hdr = (struct tcp_hdr){
2865 .src_port = input->flow.tcp4_flow.src_port,
2866 .dst_port = input->flow.tcp4_flow.dst_port,
2868 attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
2869 .src_port = mask->src_port_mask,
2870 .dst_port = mask->dst_port_mask,
2872 attributes->items[2] = (struct rte_flow_item){
2873 .type = RTE_FLOW_ITEM_TYPE_TCP,
2874 .spec = &attributes->l4,
2875 .mask = &attributes->l4_mask,
2878 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
2879 attributes->l4.udp.hdr = (struct udp_hdr){
2880 .src_port = input->flow.udp6_flow.src_port,
2881 .dst_port = input->flow.udp6_flow.dst_port,
2883 attributes->l4_mask.udp.hdr = (struct udp_hdr){
2884 .src_port = mask->src_port_mask,
2885 .dst_port = mask->dst_port_mask,
2887 attributes->items[2] = (struct rte_flow_item){
2888 .type = RTE_FLOW_ITEM_TYPE_UDP,
2889 .spec = &attributes->l4,
2890 .mask = &attributes->l4_mask,
2893 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
2894 attributes->l4.tcp.hdr = (struct tcp_hdr){
2895 .src_port = input->flow.tcp6_flow.src_port,
2896 .dst_port = input->flow.tcp6_flow.dst_port,
2898 attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
2899 .src_port = mask->src_port_mask,
2900 .dst_port = mask->dst_port_mask,
2902 attributes->items[2] = (struct rte_flow_item){
2903 .type = RTE_FLOW_ITEM_TYPE_TCP,
2904 .spec = &attributes->l4,
2905 .mask = &attributes->l4_mask,
2908 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
2909 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
2912 DRV_LOG(ERR, "port %u invalid flow type%d",
2913 dev->data->port_id, fdir_filter->input.flow_type);
2914 rte_errno = ENOTSUP;
2921 * Add new flow director filter and store it in list.
2924 * Pointer to Ethernet device.
2925 * @param fdir_filter
2926 * Flow director filter to add.
2929 * 0 on success, a negative errno value otherwise and rte_errno is set.
2932 mlx5_fdir_filter_add(struct rte_eth_dev *dev,
2933 const struct rte_eth_fdir_filter *fdir_filter)
2935 struct priv *priv = dev->data->dev_private;
2936 struct mlx5_fdir attributes = {
2939 .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
2940 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
2944 struct rte_flow_error error;
2945 struct rte_flow *flow;
2948 ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes);
2951 flow = mlx5_flow_list_create(dev, &priv->flows, &attributes.attr,
2952 attributes.items, attributes.actions,
2955 DRV_LOG(DEBUG, "port %u FDIR created %p", dev->data->port_id,
2963 * Delete specific filter.
2966 * Pointer to Ethernet device.
2967 * @param fdir_filter
2968 * Filter to be deleted.
2971 * 0 on success, a negative errno value otherwise and rte_errno is set.
2974 mlx5_fdir_filter_delete(struct rte_eth_dev *dev __rte_unused,
2975 const struct rte_eth_fdir_filter *fdir_filter
2978 rte_errno = ENOTSUP;
2983 * Update queue for specific filter.
2986 * Pointer to Ethernet device.
2987 * @param fdir_filter
2988 * Filter to be updated.
2991 * 0 on success, a negative errno value otherwise and rte_errno is set.
2994 mlx5_fdir_filter_update(struct rte_eth_dev *dev,
2995 const struct rte_eth_fdir_filter *fdir_filter)
2999 ret = mlx5_fdir_filter_delete(dev, fdir_filter);
3002 return mlx5_fdir_filter_add(dev, fdir_filter);
3006 * Flush all filters.
3009 * Pointer to Ethernet device.
3012 mlx5_fdir_filter_flush(struct rte_eth_dev *dev)
3014 struct priv *priv = dev->data->dev_private;
3016 mlx5_flow_list_flush(dev, &priv->flows);
3020 * Get flow director information.
3023 * Pointer to Ethernet device.
3024 * @param[out] fdir_info
3025 * Resulting flow director information.
3028 mlx5_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
3030 struct rte_eth_fdir_masks *mask =
3031 &dev->data->dev_conf.fdir_conf.mask;
3033 fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
3034 fdir_info->guarant_spc = 0;
3035 rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
3036 fdir_info->max_flexpayload = 0;
3037 fdir_info->flow_types_mask[0] = 0;
3038 fdir_info->flex_payload_unit = 0;
3039 fdir_info->max_flex_payload_segment_num = 0;
3040 fdir_info->flex_payload_limit = 0;
3041 memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf));
3045 * Deal with flow director operations.
3048 * Pointer to Ethernet device.
3050 * Operation to perform.
3052 * Pointer to operation-specific structure.
3055 * 0 on success, a negative errno value otherwise and rte_errno is set.
3058 mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
3061 enum rte_fdir_mode fdir_mode =
3062 dev->data->dev_conf.fdir_conf.mode;
3064 if (filter_op == RTE_ETH_FILTER_NOP)
3066 if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
3067 fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
3068 DRV_LOG(ERR, "port %u flow director mode %d not supported",
3069 dev->data->port_id, fdir_mode);
3073 switch (filter_op) {
3074 case RTE_ETH_FILTER_ADD:
3075 return mlx5_fdir_filter_add(dev, arg);
3076 case RTE_ETH_FILTER_UPDATE:
3077 return mlx5_fdir_filter_update(dev, arg);
3078 case RTE_ETH_FILTER_DELETE:
3079 return mlx5_fdir_filter_delete(dev, arg);
3080 case RTE_ETH_FILTER_FLUSH:
3081 mlx5_fdir_filter_flush(dev);
3083 case RTE_ETH_FILTER_INFO:
3084 mlx5_fdir_info_get(dev, arg);
3087 DRV_LOG(DEBUG, "port %u unknown operation %u",
3088 dev->data->port_id, filter_op);
3096 * Manage filter operations.
3099 * Pointer to Ethernet device structure.
3100 * @param filter_type
3103 * Operation to perform.
3105 * Pointer to operation-specific structure.
3108 * 0 on success, a negative errno value otherwise and rte_errno is set.
3111 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
3112 enum rte_filter_type filter_type,
3113 enum rte_filter_op filter_op,
3116 switch (filter_type) {
3117 case RTE_ETH_FILTER_GENERIC:
3118 if (filter_op != RTE_ETH_FILTER_GET) {
3122 *(const void **)arg = &mlx5_flow_ops;
3124 case RTE_ETH_FILTER_FDIR:
3125 return mlx5_fdir_ctrl_func(dev, filter_op, arg);
3127 DRV_LOG(ERR, "port %u filter type (%d) not supported",
3128 dev->data->port_id, filter_type);
3129 rte_errno = ENOTSUP;