1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #pragma GCC diagnostic ignored "-Wpedantic"
15 #include <infiniband/verbs.h>
17 #pragma GCC diagnostic error "-Wpedantic"
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_eth_ctrl.h>
23 #include <rte_ethdev_driver.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
30 #include "mlx5_defs.h"
32 #include "mlx5_glue.h"
34 /* Dev ops structure defined in mlx5.c */
35 extern const struct eth_dev_ops mlx5_dev_ops;
36 extern const struct eth_dev_ops mlx5_dev_ops_isolate;
38 /* Pattern Layer bits. */
39 #define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0)
40 #define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1)
41 #define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2)
42 #define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3)
43 #define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4)
44 #define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5)
46 #define MLX5_FLOW_LAYER_OUTER_L3 \
47 (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
48 #define MLX5_FLOW_LAYER_OUTER_L4 \
49 (MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP)
51 /* Actions that modify the fate of matching traffic. */
52 #define MLX5_FLOW_FATE_DROP (1u << 0)
53 #define MLX5_FLOW_FATE_QUEUE (1u << 1)
55 /* possible L3 layers protocols filtering. */
56 #define MLX5_IP_PROTOCOL_UDP 17
58 /** Handles information leading to a drop fate. */
59 struct mlx5_flow_verbs {
60 unsigned int size; /**< Size of the attribute. */
62 struct ibv_flow_attr *attr;
63 /**< Pointer to the Specification buffer. */
64 uint8_t *specs; /**< Pointer to the specifications. */
66 struct ibv_flow *flow; /**< Verbs flow pointer. */
67 struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */
72 TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
73 struct rte_flow_attr attributes; /**< User flow attribute. */
74 uint32_t l3_protocol_en:1; /**< Protocol filtering requested. */
76 /**< Bit-fields of present layers see MLX5_FLOW_LAYER_*. */
78 /**< Bit-fields of present fate see MLX5_FLOW_FATE_*. */
79 uint8_t l3_protocol; /**< valid when l3_protocol_en is set. */
80 struct mlx5_flow_verbs verbs; /* Verbs flow. */
81 uint16_t queue; /**< Destination queue to redirect traffic to. */
84 static const struct rte_flow_ops mlx5_flow_ops = {
85 .validate = mlx5_flow_validate,
86 .create = mlx5_flow_create,
87 .destroy = mlx5_flow_destroy,
88 .flush = mlx5_flow_flush,
89 .isolate = mlx5_flow_isolate,
92 /* Convert FDIR request to Generic flow. */
94 struct rte_flow_attr attr;
95 struct rte_flow_action actions[2];
96 struct rte_flow_item items[4];
97 struct rte_flow_item_eth l2;
98 struct rte_flow_item_eth l2_mask;
100 struct rte_flow_item_ipv4 ipv4;
101 struct rte_flow_item_ipv6 ipv6;
104 struct rte_flow_item_ipv4 ipv4;
105 struct rte_flow_item_ipv6 ipv6;
108 struct rte_flow_item_udp udp;
109 struct rte_flow_item_tcp tcp;
112 struct rte_flow_item_udp udp;
113 struct rte_flow_item_tcp tcp;
115 struct rte_flow_action_queue queue;
118 /* Verbs specification header. */
119 struct ibv_spec_header {
120 enum ibv_flow_spec_type type;
125 * Discover the maximum number of priority available.
128 * Pointer to Ethernet device.
131 * number of supported flow priority on success, a negative errno value
132 * otherwise and rte_errno is set.
135 mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
138 struct ibv_flow_attr attr;
139 struct ibv_flow_spec_eth eth;
140 struct ibv_flow_spec_action_drop drop;
146 .type = IBV_FLOW_SPEC_ETH,
147 .size = sizeof(struct ibv_flow_spec_eth),
150 .size = sizeof(struct ibv_flow_spec_action_drop),
151 .type = IBV_FLOW_SPEC_ACTION_DROP,
154 struct ibv_flow *flow;
155 struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev);
156 uint16_t vprio[] = { 8, 16 };
163 for (i = 0; i != RTE_DIM(vprio); i++) {
164 flow_attr.attr.priority = vprio[i] - 1;
165 flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr);
168 claim_zero(mlx5_glue->destroy_flow(flow));
170 mlx5_hrxq_drop_release(dev);
171 DRV_LOG(INFO, "port %u flow maximum priority: %d",
172 dev->data->port_id, vprio[i - 1]);
177 * Verify the @p attributes will be correctly understood by the NIC and store
178 * them in the @p flow if everything is correct.
181 * Pointer to Ethernet device.
182 * @param[in] attributes
183 * Pointer to flow attributes
184 * @param[in, out] flow
185 * Pointer to the rte_flow structure.
187 * Pointer to error structure.
190 * 0 on success, a negative errno value otherwise and rte_errno is set.
193 mlx5_flow_attributes(struct rte_eth_dev *dev,
194 const struct rte_flow_attr *attributes,
195 struct rte_flow *flow,
196 struct rte_flow_error *error)
198 uint32_t priority_max =
199 ((struct priv *)dev->data->dev_private)->config.flow_prio;
201 if (attributes->group)
202 return rte_flow_error_set(error, ENOTSUP,
203 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
205 "groups is not supported");
206 if (attributes->priority >= priority_max)
207 return rte_flow_error_set(error, ENOTSUP,
208 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
210 "priority out of range");
211 if (attributes->egress)
212 return rte_flow_error_set(error, ENOTSUP,
213 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
215 "egress is not supported");
216 if (attributes->transfer)
217 return rte_flow_error_set(error, ENOTSUP,
218 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
220 "transfer is not supported");
221 if (!attributes->ingress)
222 return rte_flow_error_set(error, ENOTSUP,
223 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
225 "ingress attribute is mandatory");
226 flow->attributes = *attributes;
231 * Verify the @p item specifications (spec, last, mask) are compatible with the
235 * Item specification.
237 * @p item->mask or flow default bit-masks.
238 * @param[in] nic_mask
239 * Bit-masks covering supported fields by the NIC to compare with user mask.
241 * Bit-masks size in bytes.
243 * Pointer to error structure.
246 * 0 on success, a negative errno value otherwise and rte_errno is set.
249 mlx5_flow_item_acceptable(const struct rte_flow_item *item,
251 const uint8_t *nic_mask,
253 struct rte_flow_error *error)
258 for (i = 0; i < size; ++i)
259 if ((nic_mask[i] | mask[i]) != nic_mask[i])
260 return rte_flow_error_set(error, ENOTSUP,
261 RTE_FLOW_ERROR_TYPE_ITEM,
263 "mask enables non supported"
265 if (!item->spec && (item->mask || item->last))
266 return rte_flow_error_set(error, EINVAL,
267 RTE_FLOW_ERROR_TYPE_ITEM,
269 "mask/last without a spec is not"
271 if (item->spec && item->last) {
277 for (i = 0; i < size; ++i) {
278 spec[i] = ((const uint8_t *)item->spec)[i] & mask[i];
279 last[i] = ((const uint8_t *)item->last)[i] & mask[i];
281 ret = memcmp(spec, last, size);
283 return rte_flow_error_set(error, ENOTSUP,
284 RTE_FLOW_ERROR_TYPE_ITEM,
286 "range is not supported");
292 * Add a verbs specification into @p flow.
294 * @param[in, out] flow
295 * Pointer to flow structure.
297 * Create specification.
299 * Size in bytes of the specification to copy.
302 mlx5_flow_spec_verbs_add(struct rte_flow *flow, void *src, unsigned int size)
304 if (flow->verbs.specs) {
307 dst = (void *)(flow->verbs.specs + flow->verbs.size);
308 memcpy(dst, src, size);
309 ++flow->verbs.attr->num_of_specs;
311 flow->verbs.size += size;
315 * Convert the @p item into a Verbs specification after ensuring the NIC
316 * will understand and process it correctly.
317 * If the necessary size for the conversion is greater than the @p flow_size,
318 * nothing is written in @p flow, the validation is still performed.
321 * Item specification.
322 * @param[in, out] flow
323 * Pointer to flow structure.
324 * @param[in] flow_size
325 * Size in bytes of the available space in @p flow, if too small, nothing is
328 * Pointer to error structure.
331 * On success the number of bytes consumed/necessary, if the returned value
332 * is lesser or equal to @p flow_size, the @p item has fully been converted,
333 * otherwise another call with this returned memory size should be done.
334 * On error, a negative errno value is returned and rte_errno is set.
337 mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow,
338 const size_t flow_size, struct rte_flow_error *error)
340 const struct rte_flow_item_eth *spec = item->spec;
341 const struct rte_flow_item_eth *mask = item->mask;
342 const struct rte_flow_item_eth nic_mask = {
343 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
344 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
345 .type = RTE_BE16(0xffff),
347 const unsigned int size = sizeof(struct ibv_flow_spec_eth);
348 struct ibv_flow_spec_eth eth = {
349 .type = IBV_FLOW_SPEC_ETH,
354 if (flow->layers & MLX5_FLOW_LAYER_OUTER_L2)
355 return rte_flow_error_set(error, ENOTSUP,
356 RTE_FLOW_ERROR_TYPE_ITEM,
358 "L2 layers already configured");
360 mask = &rte_flow_item_eth_mask;
361 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
362 (const uint8_t *)&nic_mask,
363 sizeof(struct rte_flow_item_eth),
367 flow->layers |= MLX5_FLOW_LAYER_OUTER_L2;
368 if (size > flow_size)
373 memcpy(ð.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
374 memcpy(ð.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
375 eth.val.ether_type = spec->type;
376 memcpy(ð.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
377 memcpy(ð.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
378 eth.mask.ether_type = mask->type;
379 /* Remove unwanted bits from values. */
380 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
381 eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
382 eth.val.src_mac[i] &= eth.mask.src_mac[i];
384 eth.val.ether_type &= eth.mask.ether_type;
386 mlx5_flow_spec_verbs_add(flow, ð, size);
391 * Update the VLAN tag in the Verbs Ethernet specification.
393 * @param[in, out] attr
394 * Pointer to Verbs attributes structure.
396 * Verbs structure containing the VLAN information to copy.
399 mlx5_flow_item_vlan_update(struct ibv_flow_attr *attr,
400 struct ibv_flow_spec_eth *eth)
403 enum ibv_flow_spec_type search = IBV_FLOW_SPEC_ETH;
404 struct ibv_spec_header *hdr = (struct ibv_spec_header *)
405 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
407 for (i = 0; i != attr->num_of_specs; ++i) {
408 if (hdr->type == search) {
409 struct ibv_flow_spec_eth *e =
410 (struct ibv_flow_spec_eth *)hdr;
412 e->val.vlan_tag = eth->val.vlan_tag;
413 e->mask.vlan_tag = eth->mask.vlan_tag;
414 e->val.ether_type = eth->val.ether_type;
415 e->mask.ether_type = eth->mask.ether_type;
418 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
423 * Convert the @p item into @p flow (or by updating the already present
424 * Ethernet Verbs) specification after ensuring the NIC will understand and
425 * process it correctly.
426 * If the necessary size for the conversion is greater than the @p flow_size,
427 * nothing is written in @p flow, the validation is still performed.
430 * Item specification.
431 * @param[in, out] flow
432 * Pointer to flow structure.
433 * @param[in] flow_size
434 * Size in bytes of the available space in @p flow, if too small, nothing is
437 * Pointer to error structure.
440 * On success the number of bytes consumed/necessary, if the returned value
441 * is lesser or equal to @p flow_size, the @p item has fully been converted,
442 * otherwise another call with this returned memory size should be done.
443 * On error, a negative errno value is returned and rte_errno is set.
446 mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow,
447 const size_t flow_size, struct rte_flow_error *error)
449 const struct rte_flow_item_vlan *spec = item->spec;
450 const struct rte_flow_item_vlan *mask = item->mask;
451 const struct rte_flow_item_vlan nic_mask = {
452 .tci = RTE_BE16(0x0fff),
453 .inner_type = RTE_BE16(0xffff),
455 unsigned int size = sizeof(struct ibv_flow_spec_eth);
456 struct ibv_flow_spec_eth eth = {
457 .type = IBV_FLOW_SPEC_ETH,
461 const uint32_t l34m = MLX5_FLOW_LAYER_OUTER_L3 |
462 MLX5_FLOW_LAYER_OUTER_L4;
463 const uint32_t vlanm = MLX5_FLOW_LAYER_OUTER_VLAN;
464 const uint32_t l2m = MLX5_FLOW_LAYER_OUTER_L2;
466 if (flow->layers & vlanm)
467 return rte_flow_error_set(error, ENOTSUP,
468 RTE_FLOW_ERROR_TYPE_ITEM,
470 "VLAN layer already configured");
471 else if ((flow->layers & l34m) != 0)
472 return rte_flow_error_set(error, ENOTSUP,
473 RTE_FLOW_ERROR_TYPE_ITEM,
475 "L2 layer cannot follow L3/L4 layer");
477 mask = &rte_flow_item_vlan_mask;
478 ret = mlx5_flow_item_acceptable
479 (item, (const uint8_t *)mask,
480 (const uint8_t *)&nic_mask,
481 sizeof(struct rte_flow_item_vlan), error);
485 eth.val.vlan_tag = spec->tci;
486 eth.mask.vlan_tag = mask->tci;
487 eth.val.vlan_tag &= eth.mask.vlan_tag;
488 eth.val.ether_type = spec->inner_type;
489 eth.mask.ether_type = mask->inner_type;
490 eth.val.ether_type &= eth.mask.ether_type;
493 * From verbs perspective an empty VLAN is equivalent
494 * to a packet without VLAN layer.
496 if (!eth.mask.vlan_tag)
497 return rte_flow_error_set(error, EINVAL,
498 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
500 "VLAN cannot be empty");
501 if (!(flow->layers & l2m)) {
502 if (size <= flow_size)
503 mlx5_flow_spec_verbs_add(flow, ð, size);
505 if (flow->verbs.attr)
506 mlx5_flow_item_vlan_update(flow->verbs.attr, ð);
507 size = 0; /* Only an update is done in eth specification. */
509 flow->layers |= MLX5_FLOW_LAYER_OUTER_L2 |
510 MLX5_FLOW_LAYER_OUTER_VLAN;
515 * Convert the @p item into a Verbs specification after ensuring the NIC
516 * will understand and process it correctly.
517 * If the necessary size for the conversion is greater than the @p flow_size,
518 * nothing is written in @p flow, the validation is still performed.
521 * Item specification.
522 * @param[in, out] flow
523 * Pointer to flow structure.
524 * @param[in] flow_size
525 * Size in bytes of the available space in @p flow, if too small, nothing is
528 * Pointer to error structure.
531 * On success the number of bytes consumed/necessary, if the returned value
532 * is lesser or equal to @p flow_size, the @p item has fully been converted,
533 * otherwise another call with this returned memory size should be done.
534 * On error, a negative errno value is returned and rte_errno is set.
537 mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow,
538 const size_t flow_size, struct rte_flow_error *error)
540 const struct rte_flow_item_ipv4 *spec = item->spec;
541 const struct rte_flow_item_ipv4 *mask = item->mask;
542 const struct rte_flow_item_ipv4 nic_mask = {
544 .src_addr = RTE_BE32(0xffffffff),
545 .dst_addr = RTE_BE32(0xffffffff),
546 .type_of_service = 0xff,
547 .next_proto_id = 0xff,
550 unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
551 struct ibv_flow_spec_ipv4_ext ipv4 = {
552 .type = IBV_FLOW_SPEC_IPV4_EXT,
557 if (flow->layers & MLX5_FLOW_LAYER_OUTER_L3)
558 return rte_flow_error_set(error, ENOTSUP,
559 RTE_FLOW_ERROR_TYPE_ITEM,
561 "multiple L3 layers not supported");
562 else if (flow->layers & MLX5_FLOW_LAYER_OUTER_L4)
563 return rte_flow_error_set(error, ENOTSUP,
564 RTE_FLOW_ERROR_TYPE_ITEM,
566 "L3 cannot follow an L4 layer.");
568 mask = &rte_flow_item_ipv4_mask;
569 ret = mlx5_flow_item_acceptable
570 (item, (const uint8_t *)mask,
571 (const uint8_t *)&nic_mask,
572 sizeof(struct rte_flow_item_ipv4), error);
575 flow->layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
577 ipv4.val = (struct ibv_flow_ipv4_ext_filter){
578 .src_ip = spec->hdr.src_addr,
579 .dst_ip = spec->hdr.dst_addr,
580 .proto = spec->hdr.next_proto_id,
581 .tos = spec->hdr.type_of_service,
583 ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
584 .src_ip = mask->hdr.src_addr,
585 .dst_ip = mask->hdr.dst_addr,
586 .proto = mask->hdr.next_proto_id,
587 .tos = mask->hdr.type_of_service,
589 /* Remove unwanted bits from values. */
590 ipv4.val.src_ip &= ipv4.mask.src_ip;
591 ipv4.val.dst_ip &= ipv4.mask.dst_ip;
592 ipv4.val.proto &= ipv4.mask.proto;
593 ipv4.val.tos &= ipv4.mask.tos;
595 flow->l3_protocol_en = !!ipv4.mask.proto;
596 flow->l3_protocol = ipv4.val.proto;
597 if (size <= flow_size)
598 mlx5_flow_spec_verbs_add(flow, &ipv4, size);
603 * Convert the @p item into a Verbs specification after ensuring the NIC
604 * will understand and process it correctly.
605 * If the necessary size for the conversion is greater than the @p flow_size,
606 * nothing is written in @p flow, the validation is still performed.
609 * Item specification.
610 * @param[in, out] flow
611 * Pointer to flow structure.
612 * @param[in] flow_size
613 * Size in bytes of the available space in @p flow, if too small, nothing is
616 * Pointer to error structure.
619 * On success the number of bytes consumed/necessary, if the returned value
620 * is lesser or equal to @p flow_size, the @p item has fully been converted,
621 * otherwise another call with this returned memory size should be done.
622 * On error, a negative errno value is returned and rte_errno is set.
625 mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow,
626 const size_t flow_size, struct rte_flow_error *error)
628 const struct rte_flow_item_ipv6 *spec = item->spec;
629 const struct rte_flow_item_ipv6 *mask = item->mask;
630 const struct rte_flow_item_ipv6 nic_mask = {
633 "\xff\xff\xff\xff\xff\xff\xff\xff"
634 "\xff\xff\xff\xff\xff\xff\xff\xff",
636 "\xff\xff\xff\xff\xff\xff\xff\xff"
637 "\xff\xff\xff\xff\xff\xff\xff\xff",
638 .vtc_flow = RTE_BE32(0xffffffff),
643 unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
644 struct ibv_flow_spec_ipv6 ipv6 = {
645 .type = IBV_FLOW_SPEC_IPV6,
650 if (flow->layers & MLX5_FLOW_LAYER_OUTER_L3)
651 return rte_flow_error_set(error, ENOTSUP,
652 RTE_FLOW_ERROR_TYPE_ITEM,
654 "multiple L3 layers not supported");
655 else if (flow->layers & MLX5_FLOW_LAYER_OUTER_L4)
656 return rte_flow_error_set(error, ENOTSUP,
657 RTE_FLOW_ERROR_TYPE_ITEM,
659 "L3 cannot follow an L4 layer.");
661 mask = &rte_flow_item_ipv6_mask;
662 ret = mlx5_flow_item_acceptable
663 (item, (const uint8_t *)mask,
664 (const uint8_t *)&nic_mask,
665 sizeof(struct rte_flow_item_ipv6), error);
668 flow->layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
671 uint32_t vtc_flow_val;
672 uint32_t vtc_flow_mask;
674 memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
675 RTE_DIM(ipv6.val.src_ip));
676 memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
677 RTE_DIM(ipv6.val.dst_ip));
678 memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
679 RTE_DIM(ipv6.mask.src_ip));
680 memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
681 RTE_DIM(ipv6.mask.dst_ip));
682 vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
683 vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
684 ipv6.val.flow_label =
685 rte_cpu_to_be_32((vtc_flow_val & IPV6_HDR_FL_MASK) >>
687 ipv6.val.traffic_class = (vtc_flow_val & IPV6_HDR_TC_MASK) >>
689 ipv6.val.next_hdr = spec->hdr.proto;
690 ipv6.val.hop_limit = spec->hdr.hop_limits;
691 ipv6.mask.flow_label =
692 rte_cpu_to_be_32((vtc_flow_mask & IPV6_HDR_FL_MASK) >>
694 ipv6.mask.traffic_class = (vtc_flow_mask & IPV6_HDR_TC_MASK) >>
696 ipv6.mask.next_hdr = mask->hdr.proto;
697 ipv6.mask.hop_limit = mask->hdr.hop_limits;
698 /* Remove unwanted bits from values. */
699 for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
700 ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
701 ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
703 ipv6.val.flow_label &= ipv6.mask.flow_label;
704 ipv6.val.traffic_class &= ipv6.mask.traffic_class;
705 ipv6.val.next_hdr &= ipv6.mask.next_hdr;
706 ipv6.val.hop_limit &= ipv6.mask.hop_limit;
708 flow->l3_protocol_en = !!ipv6.mask.next_hdr;
709 flow->l3_protocol = ipv6.val.next_hdr;
710 if (size <= flow_size)
711 mlx5_flow_spec_verbs_add(flow, &ipv6, size);
716 * Convert the @p item into a Verbs specification after ensuring the NIC
717 * will understand and process it correctly.
718 * If the necessary size for the conversion is greater than the @p flow_size,
719 * nothing is written in @p flow, the validation is still performed.
722 * Item specification.
723 * @param[in, out] flow
724 * Pointer to flow structure.
725 * @param[in] flow_size
726 * Size in bytes of the available space in @p flow, if too small, nothing is
729 * Pointer to error structure.
732 * On success the number of bytes consumed/necessary, if the returned value
733 * is lesser or equal to @p flow_size, the @p item has fully been converted,
734 * otherwise another call with this returned memory size should be done.
735 * On error, a negative errno value is returned and rte_errno is set.
738 mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow,
739 const size_t flow_size, struct rte_flow_error *error)
741 const struct rte_flow_item_udp *spec = item->spec;
742 const struct rte_flow_item_udp *mask = item->mask;
743 unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
744 struct ibv_flow_spec_tcp_udp udp = {
745 .type = IBV_FLOW_SPEC_UDP,
750 if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L3))
751 return rte_flow_error_set(error, ENOTSUP,
752 RTE_FLOW_ERROR_TYPE_ITEM,
754 "L3 is mandatory to filter on L4");
755 if (flow->layers & MLX5_FLOW_LAYER_OUTER_L4)
756 return rte_flow_error_set(error, ENOTSUP,
757 RTE_FLOW_ERROR_TYPE_ITEM,
759 "L4 layer is already present");
760 if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_UDP)
761 return rte_flow_error_set(error, ENOTSUP,
762 RTE_FLOW_ERROR_TYPE_ITEM,
764 "protocol filtering not compatible"
767 mask = &rte_flow_item_udp_mask;
768 ret = mlx5_flow_item_acceptable
769 (item, (const uint8_t *)mask,
770 (const uint8_t *)&rte_flow_item_udp_mask,
771 sizeof(struct rte_flow_item_udp), error);
774 flow->layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
775 if (size > flow_size)
778 udp.val.dst_port = spec->hdr.dst_port;
779 udp.val.src_port = spec->hdr.src_port;
780 udp.mask.dst_port = mask->hdr.dst_port;
781 udp.mask.src_port = mask->hdr.src_port;
782 /* Remove unwanted bits from values. */
783 udp.val.src_port &= udp.mask.src_port;
784 udp.val.dst_port &= udp.mask.dst_port;
786 mlx5_flow_spec_verbs_add(flow, &udp, size);
791 * Convert the @p pattern into a Verbs specifications after ensuring the NIC
792 * will understand and process it correctly.
793 * The conversion is performed item per item, each of them is written into
794 * the @p flow if its size is lesser or equal to @p flow_size.
795 * Validation and memory consumption computation are still performed until the
796 * end of @p pattern, unless an error is encountered.
800 * @param[in, out] flow
801 * Pointer to the rte_flow structure.
802 * @param[in] flow_size
803 * Size in bytes of the available space in @p flow, if too small some
804 * garbage may be present.
806 * Pointer to error structure.
809 * On success the number of bytes consumed/necessary, if the returned value
810 * is lesser or equal to @p flow_size, the @pattern has fully been
811 * converted, otherwise another call with this returned memory size should
813 * On error, a negative errno value is returned and rte_errno is set.
816 mlx5_flow_items(const struct rte_flow_item pattern[],
817 struct rte_flow *flow, const size_t flow_size,
818 struct rte_flow_error *error)
820 int remain = flow_size;
823 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
826 switch (pattern->type) {
827 case RTE_FLOW_ITEM_TYPE_VOID:
829 case RTE_FLOW_ITEM_TYPE_ETH:
830 ret = mlx5_flow_item_eth(pattern, flow, remain, error);
832 case RTE_FLOW_ITEM_TYPE_VLAN:
833 ret = mlx5_flow_item_vlan(pattern, flow, remain, error);
835 case RTE_FLOW_ITEM_TYPE_IPV4:
836 ret = mlx5_flow_item_ipv4(pattern, flow, remain, error);
838 case RTE_FLOW_ITEM_TYPE_IPV6:
839 ret = mlx5_flow_item_ipv6(pattern, flow, remain, error);
841 case RTE_FLOW_ITEM_TYPE_UDP:
842 ret = mlx5_flow_item_udp(pattern, flow, remain, error);
845 return rte_flow_error_set(error, ENOTSUP,
846 RTE_FLOW_ERROR_TYPE_ITEM,
848 "item not supported");
859 const struct rte_flow_item item = {
860 .type = RTE_FLOW_ITEM_TYPE_ETH,
863 return mlx5_flow_item_eth(&item, flow, flow_size, error);
869 * Convert the @p action into a Verbs specification after ensuring the NIC
870 * will understand and process it correctly.
871 * If the necessary size for the conversion is greater than the @p flow_size,
872 * nothing is written in @p flow, the validation is still performed.
875 * Action configuration.
876 * @param[in, out] flow
877 * Pointer to flow structure.
878 * @param[in] flow_size
879 * Size in bytes of the available space in @p flow, if too small, nothing is
882 * Pointer to error structure.
885 * On success the number of bytes consumed/necessary, if the returned value
886 * is lesser or equal to @p flow_size, the @p action has fully been
887 * converted, otherwise another call with this returned memory size should
889 * On error, a negative errno value is returned and rte_errno is set.
892 mlx5_flow_action_drop(const struct rte_flow_action *action,
893 struct rte_flow *flow, const size_t flow_size,
894 struct rte_flow_error *error)
896 unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
897 struct ibv_flow_spec_action_drop drop = {
898 .type = IBV_FLOW_SPEC_ACTION_DROP,
903 return rte_flow_error_set(error, ENOTSUP,
904 RTE_FLOW_ERROR_TYPE_ACTION,
906 "multiple fate actions are not"
908 if (size < flow_size)
909 mlx5_flow_spec_verbs_add(flow, &drop, size);
910 flow->fate |= MLX5_FLOW_FATE_DROP;
915 * Convert the @p action into @p flow after ensuring the NIC will understand
916 * and process it correctly.
919 * Pointer to Ethernet device structure.
921 * Action configuration.
922 * @param[in, out] flow
923 * Pointer to flow structure.
925 * Pointer to error structure.
928 * 0 on success, a negative errno value otherwise and rte_errno is set.
931 mlx5_flow_action_queue(struct rte_eth_dev *dev,
932 const struct rte_flow_action *action,
933 struct rte_flow *flow,
934 struct rte_flow_error *error)
936 struct priv *priv = dev->data->dev_private;
937 const struct rte_flow_action_queue *queue = action->conf;
940 return rte_flow_error_set(error, ENOTSUP,
941 RTE_FLOW_ERROR_TYPE_ACTION,
943 "multiple fate actions are not"
945 if (queue->index >= priv->rxqs_n)
946 return rte_flow_error_set(error, EINVAL,
947 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
949 "queue index out of range");
950 if (!(*priv->rxqs)[queue->index])
951 return rte_flow_error_set(error, EINVAL,
952 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
954 "queue is not configured");
955 flow->queue = queue->index;
956 flow->fate |= MLX5_FLOW_FATE_QUEUE;
961 * Convert the @p action into @p flow after ensuring the NIC will understand
962 * and process it correctly.
963 * The conversion is performed action per action, each of them is written into
964 * the @p flow if its size is lesser or equal to @p flow_size.
965 * Validation and memory consumption computation are still performed until the
966 * end of @p action, unless an error is encountered.
969 * Pointer to Ethernet device structure.
971 * Pointer to flow actions array.
972 * @param[in, out] flow
973 * Pointer to the rte_flow structure.
974 * @param[in] flow_size
975 * Size in bytes of the available space in @p flow, if too small some
976 * garbage may be present.
978 * Pointer to error structure.
981 * On success the number of bytes consumed/necessary, if the returned value
982 * is lesser or equal to @p flow_size, the @p actions has fully been
983 * converted, otherwise another call with this returned memory size should
985 * On error, a negative errno value is returned and rte_errno is set.
988 mlx5_flow_actions(struct rte_eth_dev *dev,
989 const struct rte_flow_action actions[],
990 struct rte_flow *flow, const size_t flow_size,
991 struct rte_flow_error *error)
994 int remain = flow_size;
997 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
998 switch (actions->type) {
999 case RTE_FLOW_ACTION_TYPE_VOID:
1001 case RTE_FLOW_ACTION_TYPE_DROP:
1002 ret = mlx5_flow_action_drop(actions, flow, remain,
1005 case RTE_FLOW_ACTION_TYPE_QUEUE:
1006 ret = mlx5_flow_action_queue(dev, actions, flow, error);
1009 return rte_flow_error_set(error, ENOTSUP,
1010 RTE_FLOW_ERROR_TYPE_ACTION,
1012 "action not supported");
1023 return rte_flow_error_set(error, ENOTSUP,
1024 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1026 "no fate action found");
1031 * Convert the @p attributes, @p pattern, @p action, into an flow for the NIC
1032 * after ensuring the NIC will understand and process it correctly.
1033 * The conversion is only performed item/action per item/action, each of
1034 * them is written into the @p flow if its size is lesser or equal to @p
1036 * Validation and memory consumption computation are still performed until the
1037 * end, unless an error is encountered.
1040 * Pointer to Ethernet device.
1041 * @param[in, out] flow
1042 * Pointer to flow structure.
1043 * @param[in] flow_size
1044 * Size in bytes of the available space in @p flow, if too small some
1045 * garbage may be present.
1046 * @param[in] attributes
1047 * Flow rule attributes.
1048 * @param[in] pattern
1049 * Pattern specification (list terminated by the END pattern item).
1050 * @param[in] actions
1051 * Associated actions (list terminated by the END action).
1053 * Perform verbose error reporting if not NULL.
1056 * On success the number of bytes consumed/necessary, if the returned value
1057 * is lesser or equal to @p flow_size, the flow has fully been converted and
1058 * can be applied, otherwise another call with this returned memory size
1060 * On error, a negative errno value is returned and rte_errno is set.
1063 mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow,
1064 const size_t flow_size,
1065 const struct rte_flow_attr *attributes,
1066 const struct rte_flow_item pattern[],
1067 const struct rte_flow_action actions[],
1068 struct rte_flow_error *error)
1070 struct rte_flow local_flow = { .layers = 0, };
1071 size_t size = sizeof(*flow) + sizeof(struct ibv_flow_attr);
1072 int remain = (flow_size > size) ? flow_size - size : 0;
1077 ret = mlx5_flow_attributes(dev, attributes, flow, error);
1080 ret = mlx5_flow_items(pattern, flow, remain, error);
1084 remain = (flow_size > size) ? flow_size - size : 0;
1085 ret = mlx5_flow_actions(dev, actions, flow, remain, error);
1089 if (size <= flow_size)
1090 flow->verbs.attr->priority = flow->attributes.priority;
1095 * Validate a flow supported by the NIC.
1097 * @see rte_flow_validate()
1101 mlx5_flow_validate(struct rte_eth_dev *dev,
1102 const struct rte_flow_attr *attr,
1103 const struct rte_flow_item items[],
1104 const struct rte_flow_action actions[],
1105 struct rte_flow_error *error)
1107 int ret = mlx5_flow_merge(dev, NULL, 0, attr, items, actions, error);
1118 * Pointer to Ethernet device.
1119 * @param[in, out] flow
1120 * Pointer to flow structure.
1123 mlx5_flow_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
1125 if (flow->fate & MLX5_FLOW_FATE_DROP) {
1126 if (flow->verbs.flow) {
1127 claim_zero(mlx5_glue->destroy_flow(flow->verbs.flow));
1128 flow->verbs.flow = NULL;
1131 if (flow->verbs.hrxq) {
1132 if (flow->fate & MLX5_FLOW_FATE_DROP)
1133 mlx5_hrxq_drop_release(dev);
1134 else if (flow->fate & MLX5_FLOW_FATE_QUEUE)
1135 mlx5_hrxq_release(dev, flow->verbs.hrxq);
1136 flow->verbs.hrxq = NULL;
1144 * Pointer to Ethernet device structure.
1145 * @param[in, out] flow
1146 * Pointer to flow structure.
1148 * Pointer to error structure.
1151 * 0 on success, a negative errno value otherwise and rte_errno is set.
1154 mlx5_flow_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
1155 struct rte_flow_error *error)
1157 if (flow->fate & MLX5_FLOW_FATE_DROP) {
1158 flow->verbs.hrxq = mlx5_hrxq_drop_new(dev);
1159 if (!flow->verbs.hrxq)
1160 return rte_flow_error_set
1162 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1164 "cannot allocate Drop queue");
1165 } else if (flow->fate & MLX5_FLOW_FATE_QUEUE) {
1166 struct mlx5_hrxq *hrxq;
1168 hrxq = mlx5_hrxq_get(dev, rss_hash_default_key,
1169 rss_hash_default_key_len, 0,
1170 &flow->queue, 1, 0, 0);
1172 hrxq = mlx5_hrxq_new(dev, rss_hash_default_key,
1173 rss_hash_default_key_len, 0,
1174 &flow->queue, 1, 0, 0);
1176 return rte_flow_error_set(error, rte_errno,
1177 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1179 "cannot create flow");
1180 flow->verbs.hrxq = hrxq;
1183 mlx5_glue->create_flow(flow->verbs.hrxq->qp, flow->verbs.attr);
1184 if (!flow->verbs.flow) {
1185 if (flow->fate & MLX5_FLOW_FATE_DROP)
1186 mlx5_hrxq_drop_release(dev);
1188 mlx5_hrxq_release(dev, flow->verbs.hrxq);
1189 flow->verbs.hrxq = NULL;
1190 return rte_flow_error_set(error, errno,
1191 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1193 "kernel module refuses to create"
1200 * Create a flow and add it to @p list.
1203 * Pointer to Ethernet device.
1205 * Pointer to a TAILQ flow list.
1207 * Flow rule attributes.
1209 * Pattern specification (list terminated by the END pattern item).
1210 * @param[in] actions
1211 * Associated actions (list terminated by the END action).
1213 * Perform verbose error reporting if not NULL.
1216 * A flow on success, NULL otherwise and rte_errno is set.
1218 static struct rte_flow *
1219 mlx5_flow_list_create(struct rte_eth_dev *dev,
1220 struct mlx5_flows *list,
1221 const struct rte_flow_attr *attr,
1222 const struct rte_flow_item items[],
1223 const struct rte_flow_action actions[],
1224 struct rte_flow_error *error)
1226 struct rte_flow *flow;
1230 ret = mlx5_flow_merge(dev, NULL, 0, attr, items, actions, error);
1234 flow = rte_zmalloc(__func__, size, 0);
1236 rte_flow_error_set(error, ENOMEM,
1237 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1239 "cannot allocate memory");
1242 flow->verbs.attr = (struct ibv_flow_attr *)(flow + 1);
1243 flow->verbs.specs = (uint8_t *)(flow->verbs.attr + 1);
1244 ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error);
1247 assert((size_t)ret == size);
1248 if (dev->data->dev_started) {
1249 ret = mlx5_flow_apply(dev, flow, error);
1253 TAILQ_INSERT_TAIL(list, flow, next);
1256 ret = rte_errno; /* Save rte_errno before cleanup. */
1257 mlx5_flow_remove(dev, flow);
1259 rte_errno = ret; /* Restore rte_errno. */
1266 * @see rte_flow_create()
1270 mlx5_flow_create(struct rte_eth_dev *dev,
1271 const struct rte_flow_attr *attr,
1272 const struct rte_flow_item items[],
1273 const struct rte_flow_action actions[],
1274 struct rte_flow_error *error)
1276 return mlx5_flow_list_create
1277 (dev, &((struct priv *)dev->data->dev_private)->flows,
1278 attr, items, actions, error);
1282 * Destroy a flow in a list.
1285 * Pointer to Ethernet device.
1287 * Pointer to a TAILQ flow list.
1292 mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
1293 struct rte_flow *flow)
1295 mlx5_flow_remove(dev, flow);
1296 TAILQ_REMOVE(list, flow, next);
1301 * Destroy all flows.
1304 * Pointer to Ethernet device.
1306 * Pointer to a TAILQ flow list.
1309 mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)
1311 while (!TAILQ_EMPTY(list)) {
1312 struct rte_flow *flow;
1314 flow = TAILQ_FIRST(list);
1315 mlx5_flow_list_destroy(dev, list, flow);
1323 * Pointer to Ethernet device.
1325 * Pointer to a TAILQ flow list.
1328 mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
1330 struct rte_flow *flow;
1332 TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next)
1333 mlx5_flow_remove(dev, flow);
1340 * Pointer to Ethernet device.
1342 * Pointer to a TAILQ flow list.
1345 * 0 on success, a negative errno value otherwise and rte_errno is set.
1348 mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
1350 struct rte_flow *flow;
1351 struct rte_flow_error error;
1354 TAILQ_FOREACH(flow, list, next) {
1355 ret = mlx5_flow_apply(dev, flow, &error);
1361 ret = rte_errno; /* Save rte_errno before cleanup. */
1362 mlx5_flow_stop(dev, list);
1363 rte_errno = ret; /* Restore rte_errno. */
1368 * Verify the flow list is empty
1371 * Pointer to Ethernet device.
1373 * @return the number of flows not released.
1376 mlx5_flow_verify(struct rte_eth_dev *dev)
1378 struct priv *priv = dev->data->dev_private;
1379 struct rte_flow *flow;
1382 TAILQ_FOREACH(flow, &priv->flows, next) {
1383 DRV_LOG(DEBUG, "port %u flow %p still referenced",
1384 dev->data->port_id, (void *)flow);
1391 * Enable a control flow configured from the control plane.
1394 * Pointer to Ethernet device.
1396 * An Ethernet flow spec to apply.
1398 * An Ethernet flow mask to apply.
1400 * A VLAN flow spec to apply.
1402 * A VLAN flow mask to apply.
1405 * 0 on success, a negative errno value otherwise and rte_errno is set.
1408 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
1409 struct rte_flow_item_eth *eth_spec,
1410 struct rte_flow_item_eth *eth_mask,
1411 struct rte_flow_item_vlan *vlan_spec,
1412 struct rte_flow_item_vlan *vlan_mask)
1414 struct priv *priv = dev->data->dev_private;
1415 const struct rte_flow_attr attr = {
1417 .priority = priv->config.flow_prio - 1,
1419 struct rte_flow_item items[] = {
1421 .type = RTE_FLOW_ITEM_TYPE_ETH,
1427 .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
1428 RTE_FLOW_ITEM_TYPE_END,
1434 .type = RTE_FLOW_ITEM_TYPE_END,
1437 uint16_t queue[priv->reta_idx_n];
1438 struct rte_flow_action_rss action_rss = {
1439 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
1441 .types = priv->rss_conf.rss_hf,
1442 .key_len = priv->rss_conf.rss_key_len,
1443 .queue_num = priv->reta_idx_n,
1444 .key = priv->rss_conf.rss_key,
1447 struct rte_flow_action actions[] = {
1449 .type = RTE_FLOW_ACTION_TYPE_RSS,
1450 .conf = &action_rss,
1453 .type = RTE_FLOW_ACTION_TYPE_END,
1456 struct rte_flow *flow;
1457 struct rte_flow_error error;
1460 if (!priv->reta_idx_n) {
1464 for (i = 0; i != priv->reta_idx_n; ++i)
1465 queue[i] = (*priv->reta_idx)[i];
1466 flow = mlx5_flow_list_create(dev, &priv->ctrl_flows, &attr, items,
1474 * Enable a flow control configured from the control plane.
1477 * Pointer to Ethernet device.
1479 * An Ethernet flow spec to apply.
1481 * An Ethernet flow mask to apply.
1484 * 0 on success, a negative errno value otherwise and rte_errno is set.
1487 mlx5_ctrl_flow(struct rte_eth_dev *dev,
1488 struct rte_flow_item_eth *eth_spec,
1489 struct rte_flow_item_eth *eth_mask)
1491 return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
1497 * @see rte_flow_destroy()
1501 mlx5_flow_destroy(struct rte_eth_dev *dev,
1502 struct rte_flow *flow,
1503 struct rte_flow_error *error __rte_unused)
1505 struct priv *priv = dev->data->dev_private;
1507 mlx5_flow_list_destroy(dev, &priv->flows, flow);
1512 * Destroy all flows.
1514 * @see rte_flow_flush()
1518 mlx5_flow_flush(struct rte_eth_dev *dev,
1519 struct rte_flow_error *error __rte_unused)
1521 struct priv *priv = dev->data->dev_private;
1523 mlx5_flow_list_flush(dev, &priv->flows);
1530 * @see rte_flow_isolate()
1534 mlx5_flow_isolate(struct rte_eth_dev *dev,
1536 struct rte_flow_error *error)
1538 struct priv *priv = dev->data->dev_private;
1540 if (dev->data->dev_started) {
1541 rte_flow_error_set(error, EBUSY,
1542 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1544 "port must be stopped first");
1547 priv->isolated = !!enable;
1549 dev->dev_ops = &mlx5_dev_ops_isolate;
1551 dev->dev_ops = &mlx5_dev_ops;
1556 * Convert a flow director filter to a generic flow.
1559 * Pointer to Ethernet device.
1560 * @param fdir_filter
1561 * Flow director filter to add.
1563 * Generic flow parameters structure.
1566 * 0 on success, a negative errno value otherwise and rte_errno is set.
1569 mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
1570 const struct rte_eth_fdir_filter *fdir_filter,
1571 struct mlx5_fdir *attributes)
1573 struct priv *priv = dev->data->dev_private;
1574 const struct rte_eth_fdir_input *input = &fdir_filter->input;
1575 const struct rte_eth_fdir_masks *mask =
1576 &dev->data->dev_conf.fdir_conf.mask;
1578 /* Validate queue number. */
1579 if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
1580 DRV_LOG(ERR, "port %u invalid queue number %d",
1581 dev->data->port_id, fdir_filter->action.rx_queue);
1585 attributes->attr.ingress = 1;
1586 attributes->items[0] = (struct rte_flow_item) {
1587 .type = RTE_FLOW_ITEM_TYPE_ETH,
1588 .spec = &attributes->l2,
1589 .mask = &attributes->l2_mask,
1591 switch (fdir_filter->action.behavior) {
1592 case RTE_ETH_FDIR_ACCEPT:
1593 attributes->actions[0] = (struct rte_flow_action){
1594 .type = RTE_FLOW_ACTION_TYPE_QUEUE,
1595 .conf = &attributes->queue,
1598 case RTE_ETH_FDIR_REJECT:
1599 attributes->actions[0] = (struct rte_flow_action){
1600 .type = RTE_FLOW_ACTION_TYPE_DROP,
1604 DRV_LOG(ERR, "port %u invalid behavior %d",
1606 fdir_filter->action.behavior);
1607 rte_errno = ENOTSUP;
1610 attributes->queue.index = fdir_filter->action.rx_queue;
1612 switch (fdir_filter->input.flow_type) {
1613 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
1614 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
1615 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
1616 attributes->l3.ipv4.hdr = (struct ipv4_hdr){
1617 .src_addr = input->flow.ip4_flow.src_ip,
1618 .dst_addr = input->flow.ip4_flow.dst_ip,
1619 .time_to_live = input->flow.ip4_flow.ttl,
1620 .type_of_service = input->flow.ip4_flow.tos,
1621 .next_proto_id = input->flow.ip4_flow.proto,
1623 attributes->l3_mask.ipv4.hdr = (struct ipv4_hdr){
1624 .src_addr = mask->ipv4_mask.src_ip,
1625 .dst_addr = mask->ipv4_mask.dst_ip,
1626 .time_to_live = mask->ipv4_mask.ttl,
1627 .type_of_service = mask->ipv4_mask.tos,
1628 .next_proto_id = mask->ipv4_mask.proto,
1630 attributes->items[1] = (struct rte_flow_item){
1631 .type = RTE_FLOW_ITEM_TYPE_IPV4,
1632 .spec = &attributes->l3,
1633 .mask = &attributes->l3_mask,
1636 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
1637 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
1638 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
1639 attributes->l3.ipv6.hdr = (struct ipv6_hdr){
1640 .hop_limits = input->flow.ipv6_flow.hop_limits,
1641 .proto = input->flow.ipv6_flow.proto,
1644 memcpy(attributes->l3.ipv6.hdr.src_addr,
1645 input->flow.ipv6_flow.src_ip,
1646 RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
1647 memcpy(attributes->l3.ipv6.hdr.dst_addr,
1648 input->flow.ipv6_flow.dst_ip,
1649 RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
1650 memcpy(attributes->l3_mask.ipv6.hdr.src_addr,
1651 mask->ipv6_mask.src_ip,
1652 RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
1653 memcpy(attributes->l3_mask.ipv6.hdr.dst_addr,
1654 mask->ipv6_mask.dst_ip,
1655 RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
1656 attributes->items[1] = (struct rte_flow_item){
1657 .type = RTE_FLOW_ITEM_TYPE_IPV6,
1658 .spec = &attributes->l3,
1659 .mask = &attributes->l3_mask,
1663 DRV_LOG(ERR, "port %u invalid flow type%d",
1664 dev->data->port_id, fdir_filter->input.flow_type);
1665 rte_errno = ENOTSUP;
1669 switch (fdir_filter->input.flow_type) {
1670 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
1671 attributes->l4.udp.hdr = (struct udp_hdr){
1672 .src_port = input->flow.udp4_flow.src_port,
1673 .dst_port = input->flow.udp4_flow.dst_port,
1675 attributes->l4_mask.udp.hdr = (struct udp_hdr){
1676 .src_port = mask->src_port_mask,
1677 .dst_port = mask->dst_port_mask,
1679 attributes->items[2] = (struct rte_flow_item){
1680 .type = RTE_FLOW_ITEM_TYPE_UDP,
1681 .spec = &attributes->l4,
1682 .mask = &attributes->l4_mask,
1685 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
1686 attributes->l4.tcp.hdr = (struct tcp_hdr){
1687 .src_port = input->flow.tcp4_flow.src_port,
1688 .dst_port = input->flow.tcp4_flow.dst_port,
1690 attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
1691 .src_port = mask->src_port_mask,
1692 .dst_port = mask->dst_port_mask,
1694 attributes->items[2] = (struct rte_flow_item){
1695 .type = RTE_FLOW_ITEM_TYPE_TCP,
1696 .spec = &attributes->l4,
1697 .mask = &attributes->l4_mask,
1700 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
1701 attributes->l4.udp.hdr = (struct udp_hdr){
1702 .src_port = input->flow.udp6_flow.src_port,
1703 .dst_port = input->flow.udp6_flow.dst_port,
1705 attributes->l4_mask.udp.hdr = (struct udp_hdr){
1706 .src_port = mask->src_port_mask,
1707 .dst_port = mask->dst_port_mask,
1709 attributes->items[2] = (struct rte_flow_item){
1710 .type = RTE_FLOW_ITEM_TYPE_UDP,
1711 .spec = &attributes->l4,
1712 .mask = &attributes->l4_mask,
1715 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
1716 attributes->l4.tcp.hdr = (struct tcp_hdr){
1717 .src_port = input->flow.tcp6_flow.src_port,
1718 .dst_port = input->flow.tcp6_flow.dst_port,
1720 attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
1721 .src_port = mask->src_port_mask,
1722 .dst_port = mask->dst_port_mask,
1724 attributes->items[2] = (struct rte_flow_item){
1725 .type = RTE_FLOW_ITEM_TYPE_TCP,
1726 .spec = &attributes->l4,
1727 .mask = &attributes->l4_mask,
1730 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
1731 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
1734 DRV_LOG(ERR, "port %u invalid flow type%d",
1735 dev->data->port_id, fdir_filter->input.flow_type);
1736 rte_errno = ENOTSUP;
1743 * Add new flow director filter and store it in list.
1746 * Pointer to Ethernet device.
1747 * @param fdir_filter
1748 * Flow director filter to add.
1751 * 0 on success, a negative errno value otherwise and rte_errno is set.
1754 mlx5_fdir_filter_add(struct rte_eth_dev *dev,
1755 const struct rte_eth_fdir_filter *fdir_filter)
1757 struct priv *priv = dev->data->dev_private;
1758 struct mlx5_fdir attributes = {
1761 .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
1762 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
1766 struct rte_flow_error error;
1767 struct rte_flow *flow;
1770 ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes);
1773 flow = mlx5_flow_list_create(dev, &priv->flows, &attributes.attr,
1774 attributes.items, attributes.actions,
1777 DRV_LOG(DEBUG, "port %u FDIR created %p", dev->data->port_id,
1785 * Delete specific filter.
1788 * Pointer to Ethernet device.
1789 * @param fdir_filter
1790 * Filter to be deleted.
1793 * 0 on success, a negative errno value otherwise and rte_errno is set.
1796 mlx5_fdir_filter_delete(struct rte_eth_dev *dev __rte_unused,
1797 const struct rte_eth_fdir_filter *fdir_filter
1800 rte_errno = ENOTSUP;
1805 * Update queue for specific filter.
1808 * Pointer to Ethernet device.
1809 * @param fdir_filter
1810 * Filter to be updated.
1813 * 0 on success, a negative errno value otherwise and rte_errno is set.
1816 mlx5_fdir_filter_update(struct rte_eth_dev *dev,
1817 const struct rte_eth_fdir_filter *fdir_filter)
1821 ret = mlx5_fdir_filter_delete(dev, fdir_filter);
1824 return mlx5_fdir_filter_add(dev, fdir_filter);
1828 * Flush all filters.
1831 * Pointer to Ethernet device.
1834 mlx5_fdir_filter_flush(struct rte_eth_dev *dev)
1836 struct priv *priv = dev->data->dev_private;
1838 mlx5_flow_list_flush(dev, &priv->flows);
1842 * Get flow director information.
1845 * Pointer to Ethernet device.
1846 * @param[out] fdir_info
1847 * Resulting flow director information.
1850 mlx5_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
1852 struct rte_eth_fdir_masks *mask =
1853 &dev->data->dev_conf.fdir_conf.mask;
1855 fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
1856 fdir_info->guarant_spc = 0;
1857 rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
1858 fdir_info->max_flexpayload = 0;
1859 fdir_info->flow_types_mask[0] = 0;
1860 fdir_info->flex_payload_unit = 0;
1861 fdir_info->max_flex_payload_segment_num = 0;
1862 fdir_info->flex_payload_limit = 0;
1863 memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf));
1867 * Deal with flow director operations.
1870 * Pointer to Ethernet device.
1872 * Operation to perform.
1874 * Pointer to operation-specific structure.
1877 * 0 on success, a negative errno value otherwise and rte_errno is set.
1880 mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
1883 enum rte_fdir_mode fdir_mode =
1884 dev->data->dev_conf.fdir_conf.mode;
1886 if (filter_op == RTE_ETH_FILTER_NOP)
1888 if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
1889 fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1890 DRV_LOG(ERR, "port %u flow director mode %d not supported",
1891 dev->data->port_id, fdir_mode);
1895 switch (filter_op) {
1896 case RTE_ETH_FILTER_ADD:
1897 return mlx5_fdir_filter_add(dev, arg);
1898 case RTE_ETH_FILTER_UPDATE:
1899 return mlx5_fdir_filter_update(dev, arg);
1900 case RTE_ETH_FILTER_DELETE:
1901 return mlx5_fdir_filter_delete(dev, arg);
1902 case RTE_ETH_FILTER_FLUSH:
1903 mlx5_fdir_filter_flush(dev);
1905 case RTE_ETH_FILTER_INFO:
1906 mlx5_fdir_info_get(dev, arg);
1909 DRV_LOG(DEBUG, "port %u unknown operation %u",
1910 dev->data->port_id, filter_op);
1918 * Manage filter operations.
1921 * Pointer to Ethernet device structure.
1922 * @param filter_type
1925 * Operation to perform.
1927 * Pointer to operation-specific structure.
1930 * 0 on success, a negative errno value otherwise and rte_errno is set.
1933 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
1934 enum rte_filter_type filter_type,
1935 enum rte_filter_op filter_op,
1938 switch (filter_type) {
1939 case RTE_ETH_FILTER_GENERIC:
1940 if (filter_op != RTE_ETH_FILTER_GET) {
1944 *(const void **)arg = &mlx5_flow_ops;
1946 case RTE_ETH_FILTER_FDIR:
1947 return mlx5_fdir_ctrl_func(dev, filter_op, arg);
1949 DRV_LOG(ERR, "port %u filter type (%d) not supported",
1950 dev->data->port_id, filter_type);
1951 rte_errno = ENOTSUP;