1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
14 #pragma GCC diagnostic ignored "-Wpedantic"
16 #include <infiniband/verbs.h>
18 #pragma GCC diagnostic error "-Wpedantic"
21 #include <rte_common.h>
22 #include <rte_ether.h>
23 #include <rte_eth_ctrl.h>
24 #include <rte_ethdev_driver.h>
26 #include <rte_flow_driver.h>
27 #include <rte_malloc.h>
31 #include "mlx5_defs.h"
33 #include "mlx5_glue.h"
34 #include "mlx5_flow.h"
36 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
39 * Verify the @p attributes will be correctly understood by the NIC and store
40 * them in the @p flow if everything is correct.
43 * Pointer to dev struct.
44 * @param[in] attributes
45 * Pointer to flow attributes
47 * Pointer to error structure.
50 * 0 on success, a negative errno value otherwise and rte_errno is set.
53 flow_dv_validate_attributes(struct rte_eth_dev *dev,
54 const struct rte_flow_attr *attributes,
55 struct rte_flow_error *error)
57 struct priv *priv = dev->data->dev_private;
58 uint32_t priority_max = priv->config.flow_prio - 1;
60 if (attributes->group)
61 return rte_flow_error_set(error, ENOTSUP,
62 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
64 "groups is not supported");
65 if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
66 attributes->priority >= priority_max)
67 return rte_flow_error_set(error, ENOTSUP,
68 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
70 "priority out of range");
71 if (attributes->transfer)
72 return rte_flow_error_set(error, ENOTSUP,
73 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
75 "transfer is not supported");
76 if (!(attributes->egress ^ attributes->ingress))
77 return rte_flow_error_set(error, ENOTSUP,
78 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
79 "must specify exactly one of "
85 * Internal validation function. For validating both actions and items.
88 * Pointer to the rte_eth_dev structure.
90 * Pointer to the flow attributes.
92 * Pointer to the list of items.
94 * Pointer to the list of actions.
96 * Pointer to the error structure.
99 * 0 on success, a negative errno value otherwise and rte_ernno is set.
102 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
103 const struct rte_flow_item items[],
104 const struct rte_flow_action actions[],
105 struct rte_flow_error *error)
108 uint32_t action_flags = 0;
109 uint32_t item_flags = 0;
111 uint8_t next_protocol = 0xff;
116 ret = flow_dv_validate_attributes(dev, attr, error);
119 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
120 switch (items->type) {
121 case RTE_FLOW_ITEM_TYPE_VOID:
123 case RTE_FLOW_ITEM_TYPE_ETH:
124 ret = mlx5_flow_validate_item_eth(items, item_flags,
128 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
129 MLX5_FLOW_LAYER_OUTER_L2;
131 case RTE_FLOW_ITEM_TYPE_VLAN:
132 ret = mlx5_flow_validate_item_vlan(items, item_flags,
136 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
137 MLX5_FLOW_LAYER_OUTER_VLAN;
139 case RTE_FLOW_ITEM_TYPE_IPV4:
140 ret = mlx5_flow_validate_item_ipv4(items, item_flags,
144 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
145 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
146 if (items->mask != NULL &&
147 ((const struct rte_flow_item_ipv4 *)
148 items->mask)->hdr.next_proto_id)
150 ((const struct rte_flow_item_ipv4 *)
151 (items->spec))->hdr.next_proto_id;
153 case RTE_FLOW_ITEM_TYPE_IPV6:
154 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
158 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
159 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
160 if (items->mask != NULL &&
161 ((const struct rte_flow_item_ipv6 *)
162 items->mask)->hdr.proto)
164 ((const struct rte_flow_item_ipv6 *)
165 items->spec)->hdr.proto;
167 case RTE_FLOW_ITEM_TYPE_UDP:
168 ret = mlx5_flow_validate_item_udp(items, item_flags,
173 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
174 MLX5_FLOW_LAYER_OUTER_L4_UDP;
176 case RTE_FLOW_ITEM_TYPE_TCP:
177 ret = mlx5_flow_validate_item_tcp
180 &rte_flow_item_tcp_mask,
184 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
185 MLX5_FLOW_LAYER_OUTER_L4_TCP;
187 case RTE_FLOW_ITEM_TYPE_VXLAN:
188 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
192 item_flags |= MLX5_FLOW_LAYER_VXLAN;
194 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
195 ret = mlx5_flow_validate_item_vxlan_gpe(items,
200 item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
202 case RTE_FLOW_ITEM_TYPE_GRE:
203 ret = mlx5_flow_validate_item_gre(items, item_flags,
204 next_protocol, error);
207 item_flags |= MLX5_FLOW_LAYER_GRE;
209 case RTE_FLOW_ITEM_TYPE_MPLS:
210 ret = mlx5_flow_validate_item_mpls(items, item_flags,
215 item_flags |= MLX5_FLOW_LAYER_MPLS;
218 return rte_flow_error_set(error, ENOTSUP,
219 RTE_FLOW_ERROR_TYPE_ITEM,
220 NULL, "item not supported");
223 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
224 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
225 return rte_flow_error_set(error, ENOTSUP,
226 RTE_FLOW_ERROR_TYPE_ACTION,
227 actions, "too many actions");
228 tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
229 switch (actions->type) {
230 case RTE_FLOW_ACTION_TYPE_VOID:
232 case RTE_FLOW_ACTION_TYPE_FLAG:
233 ret = mlx5_flow_validate_action_flag(action_flags,
237 action_flags |= MLX5_FLOW_ACTION_FLAG;
240 case RTE_FLOW_ACTION_TYPE_MARK:
241 ret = mlx5_flow_validate_action_mark(actions,
246 action_flags |= MLX5_FLOW_ACTION_MARK;
249 case RTE_FLOW_ACTION_TYPE_DROP:
250 ret = mlx5_flow_validate_action_drop(action_flags,
254 action_flags |= MLX5_FLOW_ACTION_DROP;
257 case RTE_FLOW_ACTION_TYPE_QUEUE:
258 ret = mlx5_flow_validate_action_queue(actions,
263 action_flags |= MLX5_FLOW_ACTION_QUEUE;
266 case RTE_FLOW_ACTION_TYPE_RSS:
267 ret = mlx5_flow_validate_action_rss(actions,
272 action_flags |= MLX5_FLOW_ACTION_RSS;
275 case RTE_FLOW_ACTION_TYPE_COUNT:
276 ret = mlx5_flow_validate_action_count(dev, attr, error);
279 action_flags |= MLX5_FLOW_ACTION_COUNT;
283 return rte_flow_error_set(error, ENOTSUP,
284 RTE_FLOW_ERROR_TYPE_ACTION,
286 "action not supported");
289 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
290 return rte_flow_error_set(error, EINVAL,
291 RTE_FLOW_ERROR_TYPE_ACTION, actions,
292 "no fate action is found");
297 * Internal preparation function. Allocates the DV flow size,
298 * this size is constant.
301 * Pointer to the flow attributes.
303 * Pointer to the list of items.
305 * Pointer to the list of actions.
306 * @param[out] item_flags
307 * Pointer to bit mask of all items detected.
308 * @param[out] action_flags
309 * Pointer to bit mask of all actions detected.
311 * Pointer to the error structure.
314 * Pointer to mlx5_flow object on success,
315 * otherwise NULL and rte_ernno is set.
317 static struct mlx5_flow *
318 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
319 const struct rte_flow_item items[] __rte_unused,
320 const struct rte_flow_action actions[] __rte_unused,
321 uint64_t *item_flags __rte_unused,
322 uint64_t *action_flags __rte_unused,
323 struct rte_flow_error *error)
325 uint32_t size = sizeof(struct mlx5_flow);
326 struct mlx5_flow *flow;
328 flow = rte_calloc(__func__, 1, size, 0);
330 rte_flow_error_set(error, ENOMEM,
331 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
332 "not enough memory to create flow");
335 flow->dv.value.size = MLX5_ST_SZ_DB(fte_match_param);
340 * Add Ethernet item to matcher and to the value.
342 * @param[in, out] matcher
344 * @param[in, out] key
345 * Flow matcher value.
347 * Flow pattern to translate.
349 * Item is inner pattern.
352 flow_dv_translate_item_eth(void *matcher, void *key,
353 const struct rte_flow_item *item, int inner)
355 const struct rte_flow_item_eth *eth_m = item->mask;
356 const struct rte_flow_item_eth *eth_v = item->spec;
357 const struct rte_flow_item_eth nic_mask = {
358 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
359 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
360 .type = RTE_BE16(0xffff),
372 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
374 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
376 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
378 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
380 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
381 ð_m->dst, sizeof(eth_m->dst));
382 /* The value must be in the range of the mask. */
383 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
384 for (i = 0; i < sizeof(eth_m->dst); ++i)
385 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
386 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
387 ð_m->src, sizeof(eth_m->src));
388 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
389 /* The value must be in the range of the mask. */
390 for (i = 0; i < sizeof(eth_m->dst); ++i)
391 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
392 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
393 rte_be_to_cpu_16(eth_m->type));
394 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
395 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
399 * Add VLAN item to matcher and to the value.
401 * @param[in, out] matcher
403 * @param[in, out] key
404 * Flow matcher value.
406 * Flow pattern to translate.
408 * Item is inner pattern.
411 flow_dv_translate_item_vlan(void *matcher, void *key,
412 const struct rte_flow_item *item,
415 const struct rte_flow_item_vlan *vlan_m = item->mask;
416 const struct rte_flow_item_vlan *vlan_v = item->spec;
417 const struct rte_flow_item_vlan nic_mask = {
418 .tci = RTE_BE16(0x0fff),
419 .inner_type = RTE_BE16(0xffff),
431 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
433 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
435 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
437 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
439 tci_m = rte_be_to_cpu_16(vlan_m->tci);
440 tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
441 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
442 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
443 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
444 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
445 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
446 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
447 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
448 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
452 * Add IPV4 item to matcher and to the value.
454 * @param[in, out] matcher
456 * @param[in, out] key
457 * Flow matcher value.
459 * Flow pattern to translate.
461 * Item is inner pattern.
464 flow_dv_translate_item_ipv4(void *matcher, void *key,
465 const struct rte_flow_item *item,
468 const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
469 const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
470 const struct rte_flow_item_ipv4 nic_mask = {
472 .src_addr = RTE_BE32(0xffffffff),
473 .dst_addr = RTE_BE32(0xffffffff),
474 .type_of_service = 0xff,
475 .next_proto_id = 0xff,
489 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
491 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
493 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
495 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
497 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
498 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
499 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
500 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
501 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
502 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
503 *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
504 *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
505 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
506 src_ipv4_src_ipv6.ipv4_layout.ipv4);
507 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
508 src_ipv4_src_ipv6.ipv4_layout.ipv4);
509 *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
510 *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
511 tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
512 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
513 ipv4_m->hdr.type_of_service);
514 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
515 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
516 ipv4_m->hdr.type_of_service >> 2);
517 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
518 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
519 ipv4_m->hdr.next_proto_id);
520 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
521 ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
525 * Add IPV6 item to matcher and to the value.
527 * @param[in, out] matcher
529 * @param[in, out] key
530 * Flow matcher value.
532 * Flow pattern to translate.
534 * Item is inner pattern.
537 flow_dv_translate_item_ipv6(void *matcher, void *key,
538 const struct rte_flow_item *item,
541 const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
542 const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
543 const struct rte_flow_item_ipv6 nic_mask = {
546 "\xff\xff\xff\xff\xff\xff\xff\xff"
547 "\xff\xff\xff\xff\xff\xff\xff\xff",
549 "\xff\xff\xff\xff\xff\xff\xff\xff"
550 "\xff\xff\xff\xff\xff\xff\xff\xff",
551 .vtc_flow = RTE_BE32(0xffffffff),
558 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
559 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
572 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
574 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
576 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
578 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
580 size = sizeof(ipv6_m->hdr.dst_addr);
581 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
582 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
583 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
584 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
585 memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
586 for (i = 0; i < size; ++i)
587 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
588 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
589 src_ipv4_src_ipv6.ipv6_layout.ipv6);
590 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
591 src_ipv4_src_ipv6.ipv6_layout.ipv6);
592 memcpy(l24_m, ipv6_m->hdr.src_addr, size);
593 for (i = 0; i < size; ++i)
594 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
595 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
596 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
598 vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
599 vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
600 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
601 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
602 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
603 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
606 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
608 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
611 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
613 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
617 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
619 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
620 ipv6_v->hdr.proto & ipv6_m->hdr.proto);
624 * Add TCP item to matcher and to the value.
626 * @param[in, out] matcher
628 * @param[in, out] key
629 * Flow matcher value.
631 * Flow pattern to translate.
633 * Item is inner pattern.
636 flow_dv_translate_item_tcp(void *matcher, void *key,
637 const struct rte_flow_item *item,
640 const struct rte_flow_item_tcp *tcp_m = item->mask;
641 const struct rte_flow_item_tcp *tcp_v = item->spec;
648 tcp_m = &rte_flow_item_tcp_mask;
650 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
652 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
654 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
656 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
658 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
659 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
660 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
661 rte_be_to_cpu_16(tcp_m->hdr.src_port));
662 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
663 rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
664 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
665 rte_be_to_cpu_16(tcp_m->hdr.dst_port));
666 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
667 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
671 * Add UDP item to matcher and to the value.
673 * @param[in, out] matcher
675 * @param[in, out] key
676 * Flow matcher value.
678 * Flow pattern to translate.
680 * Item is inner pattern.
683 flow_dv_translate_item_udp(void *matcher, void *key,
684 const struct rte_flow_item *item,
687 const struct rte_flow_item_udp *udp_m = item->mask;
688 const struct rte_flow_item_udp *udp_v = item->spec;
695 udp_m = &rte_flow_item_udp_mask;
697 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
699 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
701 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
703 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
705 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
706 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
707 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
708 rte_be_to_cpu_16(udp_m->hdr.src_port));
709 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
710 rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
711 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
712 rte_be_to_cpu_16(udp_m->hdr.dst_port));
713 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
714 rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
718 * Add GRE item to matcher and to the value.
720 * @param[in, out] matcher
722 * @param[in, out] key
723 * Flow matcher value.
725 * Flow pattern to translate.
727 * Item is inner pattern.
730 flow_dv_translate_item_gre(void *matcher, void *key,
731 const struct rte_flow_item *item,
734 const struct rte_flow_item_gre *gre_m = item->mask;
735 const struct rte_flow_item_gre *gre_v = item->spec;
738 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
739 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
744 gre_m = &rte_flow_item_gre_mask;
746 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
748 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
750 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
752 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
754 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
755 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
756 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
757 rte_be_to_cpu_16(gre_m->protocol));
758 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
759 rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
763 * Add NVGRE item to matcher and to the value.
765 * @param[in, out] matcher
767 * @param[in, out] key
768 * Flow matcher value.
770 * Flow pattern to translate.
772 * Item is inner pattern.
775 flow_dv_translate_item_nvgre(void *matcher, void *key,
776 const struct rte_flow_item *item,
779 const struct rte_flow_item_nvgre *nvgre_m = item->mask;
780 const struct rte_flow_item_nvgre *nvgre_v = item->spec;
781 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
782 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
783 const char *tni_flow_id_m = (const char *)nvgre_m->tni;
784 const char *tni_flow_id_v = (const char *)nvgre_v->tni;
793 nvgre_m = &rte_flow_item_nvgre_mask;
794 size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
795 gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
796 gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
797 memcpy(gre_key_m, tni_flow_id_m, size);
798 for (i = 0; i < size; ++i)
799 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
800 flow_dv_translate_item_gre(matcher, key, item, inner);
804 * Add VXLAN item to matcher and to the value.
806 * @param[in, out] matcher
808 * @param[in, out] key
809 * Flow matcher value.
811 * Flow pattern to translate.
813 * Item is inner pattern.
816 flow_dv_translate_item_vxlan(void *matcher, void *key,
817 const struct rte_flow_item *item,
820 const struct rte_flow_item_vxlan *vxlan_m = item->mask;
821 const struct rte_flow_item_vxlan *vxlan_v = item->spec;
824 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
825 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
835 vxlan_m = &rte_flow_item_vxlan_mask;
837 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
839 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
841 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
843 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
845 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
846 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
847 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
848 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
849 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
851 size = sizeof(vxlan_m->vni);
852 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
853 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
854 memcpy(vni_m, vxlan_m->vni, size);
855 for (i = 0; i < size; ++i)
856 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
860 * Update the matcher and the value based the selected item.
862 * @param[in, out] matcher
864 * @param[in, out] key
865 * Flow matcher value.
867 * Flow pattern to translate.
868 * @param[in, out] dev_flow
869 * Pointer to the mlx5_flow.
871 * Item is inner pattern.
874 flow_dv_create_item(void *matcher, void *key,
875 const struct rte_flow_item *item,
876 struct mlx5_flow *dev_flow,
879 struct mlx5_flow_dv_matcher *tmatcher = matcher;
881 switch (item->type) {
882 case RTE_FLOW_ITEM_TYPE_VOID:
883 case RTE_FLOW_ITEM_TYPE_END:
885 case RTE_FLOW_ITEM_TYPE_ETH:
886 flow_dv_translate_item_eth(tmatcher->mask.buf, key, item,
888 tmatcher->priority = MLX5_PRIORITY_MAP_L2;
890 case RTE_FLOW_ITEM_TYPE_VLAN:
891 flow_dv_translate_item_vlan(tmatcher->mask.buf, key, item,
894 case RTE_FLOW_ITEM_TYPE_IPV4:
895 flow_dv_translate_item_ipv4(tmatcher->mask.buf, key, item,
897 tmatcher->priority = MLX5_PRIORITY_MAP_L3;
898 dev_flow->dv.hash_fields |=
899 mlx5_flow_hashfields_adjust(dev_flow, inner,
900 MLX5_IPV4_LAYER_TYPES,
901 MLX5_IPV4_IBV_RX_HASH);
903 case RTE_FLOW_ITEM_TYPE_IPV6:
904 flow_dv_translate_item_ipv6(tmatcher->mask.buf, key, item,
906 tmatcher->priority = MLX5_PRIORITY_MAP_L3;
907 dev_flow->dv.hash_fields |=
908 mlx5_flow_hashfields_adjust(dev_flow, inner,
909 MLX5_IPV6_LAYER_TYPES,
910 MLX5_IPV6_IBV_RX_HASH);
912 case RTE_FLOW_ITEM_TYPE_TCP:
913 flow_dv_translate_item_tcp(tmatcher->mask.buf, key, item,
915 tmatcher->priority = MLX5_PRIORITY_MAP_L4;
916 dev_flow->dv.hash_fields |=
917 mlx5_flow_hashfields_adjust(dev_flow, inner,
919 (IBV_RX_HASH_SRC_PORT_TCP |
920 IBV_RX_HASH_DST_PORT_TCP));
922 case RTE_FLOW_ITEM_TYPE_UDP:
923 flow_dv_translate_item_udp(tmatcher->mask.buf, key, item,
925 tmatcher->priority = MLX5_PRIORITY_MAP_L4;
926 dev_flow->verbs.hash_fields |=
927 mlx5_flow_hashfields_adjust(dev_flow, inner,
929 (IBV_RX_HASH_SRC_PORT_TCP |
930 IBV_RX_HASH_DST_PORT_TCP));
932 case RTE_FLOW_ITEM_TYPE_NVGRE:
933 flow_dv_translate_item_nvgre(tmatcher->mask.buf, key, item,
936 case RTE_FLOW_ITEM_TYPE_GRE:
937 flow_dv_translate_item_gre(tmatcher->mask.buf, key, item,
940 case RTE_FLOW_ITEM_TYPE_VXLAN:
941 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
942 flow_dv_translate_item_vxlan(tmatcher->mask.buf, key, item,
951 * Store the requested actions in an array.
954 * Flow action to translate.
955 * @param[in, out] dev_flow
956 * Pointer to the mlx5_flow.
959 flow_dv_create_action(const struct rte_flow_action *action,
960 struct mlx5_flow *dev_flow)
962 const struct rte_flow_action_queue *queue;
963 const struct rte_flow_action_rss *rss;
964 int actions_n = dev_flow->dv.actions_n;
965 struct rte_flow *flow = dev_flow->flow;
967 switch (action->type) {
968 case RTE_FLOW_ACTION_TYPE_VOID:
970 case RTE_FLOW_ACTION_TYPE_FLAG:
971 dev_flow->dv.actions[actions_n].type = MLX5DV_FLOW_ACTION_TAG;
972 dev_flow->dv.actions[actions_n].tag_value =
973 MLX5_FLOW_MARK_DEFAULT;
975 flow->actions |= MLX5_FLOW_ACTION_FLAG;
977 case RTE_FLOW_ACTION_TYPE_MARK:
978 dev_flow->dv.actions[actions_n].type = MLX5DV_FLOW_ACTION_TAG;
979 dev_flow->dv.actions[actions_n].tag_value =
980 ((const struct rte_flow_action_mark *)
982 flow->actions |= MLX5_FLOW_ACTION_MARK;
985 case RTE_FLOW_ACTION_TYPE_DROP:
986 dev_flow->dv.actions[actions_n].type = MLX5DV_FLOW_ACTION_DROP;
987 flow->actions |= MLX5_FLOW_ACTION_DROP;
989 case RTE_FLOW_ACTION_TYPE_QUEUE:
990 queue = action->conf;
991 flow->rss.queue_num = 1;
992 (*flow->queue)[0] = queue->index;
993 flow->actions |= MLX5_FLOW_ACTION_QUEUE;
995 case RTE_FLOW_ACTION_TYPE_RSS:
998 memcpy((*flow->queue), rss->queue,
999 rss->queue_num * sizeof(uint16_t));
1000 flow->rss.queue_num = rss->queue_num;
1001 memcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN);
1002 flow->rss.types = rss->types;
1003 flow->rss.level = rss->level;
1004 /* Added to array only in apply since we need the QP */
1005 flow->actions |= MLX5_FLOW_ACTION_RSS;
1010 dev_flow->dv.actions_n = actions_n;
1013 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
1015 #define HEADER_IS_ZERO(match_criteria, headers) \
1016 !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
1017 matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
1020 * Calculate flow matcher enable bitmap.
1022 * @param match_criteria
1023 * Pointer to flow matcher criteria.
1026 * Bitmap of enabled fields.
1029 flow_dv_matcher_enable(uint32_t *match_criteria)
1031 uint8_t match_criteria_enable;
1033 match_criteria_enable =
1034 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
1035 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
1036 match_criteria_enable |=
1037 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
1038 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
1039 match_criteria_enable |=
1040 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
1041 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
1042 match_criteria_enable |=
1043 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
1044 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
1046 return match_criteria_enable;
1050 * Register the flow matcher.
1052 * @param dev[in, out]
1053 * Pointer to rte_eth_dev structure.
1054 * @param[in, out] matcher
1055 * Pointer to flow matcher.
1056 * @parm[in, out] dev_flow
1057 * Pointer to the dev_flow.
1059 * pointer to error structure.
1062 * 0 on success otherwise -errno and errno is set.
1065 flow_dv_matcher_register(struct rte_eth_dev *dev,
1066 struct mlx5_flow_dv_matcher *matcher,
1067 struct mlx5_flow *dev_flow,
1068 struct rte_flow_error *error)
1070 struct priv *priv = dev->data->dev_private;
1071 struct mlx5_flow_dv_matcher *cache_matcher;
1072 struct mlx5dv_flow_matcher_attr dv_attr = {
1073 .type = IBV_FLOW_ATTR_NORMAL,
1074 .match_mask = (void *)&matcher->mask,
1077 /* Lookup from cache. */
1078 LIST_FOREACH(cache_matcher, &priv->matchers, next) {
1079 if (matcher->crc == cache_matcher->crc &&
1080 matcher->priority == cache_matcher->priority &&
1081 matcher->egress == cache_matcher->egress &&
1082 !memcmp((const void *)matcher->mask.buf,
1083 (const void *)cache_matcher->mask.buf,
1084 cache_matcher->mask.size)) {
1086 "priority %hd use %s matcher %p: refcnt %d++",
1087 cache_matcher->priority,
1088 cache_matcher->egress ? "tx" : "rx",
1089 (void *)cache_matcher,
1090 rte_atomic32_read(&cache_matcher->refcnt));
1091 rte_atomic32_inc(&cache_matcher->refcnt);
1092 dev_flow->dv.matcher = cache_matcher;
1096 /* Register new matcher. */
1097 cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
1099 return rte_flow_error_set(error, ENOMEM,
1100 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1101 "cannot allocate matcher memory");
1102 *cache_matcher = *matcher;
1103 dv_attr.match_criteria_enable =
1104 flow_dv_matcher_enable(cache_matcher->mask.buf);
1105 dv_attr.priority = matcher->priority;
1106 if (matcher->egress)
1107 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
1108 cache_matcher->matcher_object =
1109 mlx5_glue->dv_create_flow_matcher(priv->ctx, &dv_attr);
1110 if (!cache_matcher->matcher_object)
1111 return rte_flow_error_set(error, ENOMEM,
1112 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1113 NULL, "cannot create matcher");
1114 rte_atomic32_inc(&cache_matcher->refcnt);
1115 LIST_INSERT_HEAD(&priv->matchers, cache_matcher, next);
1116 dev_flow->dv.matcher = cache_matcher;
1117 DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
1118 cache_matcher->priority,
1119 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
1120 rte_atomic32_read(&cache_matcher->refcnt));
1126 * Fill the flow with DV spec.
1129 * Pointer to rte_eth_dev structure.
1130 * @param[in, out] dev_flow
1131 * Pointer to the sub flow.
1133 * Pointer to the flow attributes.
1135 * Pointer to the list of items.
1136 * @param[in] actions
1137 * Pointer to the list of actions.
1139 * Pointer to the error structure.
1142 * 0 on success, a negative errno value otherwise and rte_ernno is set.
1145 flow_dv_translate(struct rte_eth_dev *dev,
1146 struct mlx5_flow *dev_flow,
1147 const struct rte_flow_attr *attr,
1148 const struct rte_flow_item items[],
1149 const struct rte_flow_action actions[] __rte_unused,
1150 struct rte_flow_error *error)
1152 struct priv *priv = dev->data->dev_private;
1153 uint64_t priority = attr->priority;
1154 struct mlx5_flow_dv_matcher matcher = {
1156 .size = sizeof(matcher.mask.buf),
1159 void *match_value = dev_flow->dv.value.buf;
1162 if (priority == MLX5_FLOW_PRIO_RSVD)
1163 priority = priv->config.flow_prio - 1;
1164 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++)
1165 flow_dv_create_item(&matcher, match_value, items, dev_flow,
1167 matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
1169 if (priority == MLX5_FLOW_PRIO_RSVD)
1170 priority = priv->config.flow_prio - 1;
1171 matcher.priority = mlx5_flow_adjust_priority(dev, priority,
1173 matcher.egress = attr->egress;
1174 if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
1176 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++)
1177 flow_dv_create_action(actions, dev_flow);
1182 * Apply the flow to the NIC.
1185 * Pointer to the Ethernet device structure.
1186 * @param[in, out] flow
1187 * Pointer to flow structure.
1189 * Pointer to error structure.
1192 * 0 on success, a negative errno value otherwise and rte_errno is set.
1195 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
1196 struct rte_flow_error *error)
1198 struct mlx5_flow_dv *dv;
1199 struct mlx5_flow *dev_flow;
1203 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1206 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
1207 dv->hrxq = mlx5_hrxq_drop_new(dev);
1211 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1212 "cannot get drop hash queue");
1215 dv->actions[n].type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;
1216 dv->actions[n].qp = dv->hrxq->qp;
1218 } else if (flow->actions &
1219 (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
1220 struct mlx5_hrxq *hrxq;
1221 hrxq = mlx5_hrxq_get(dev, flow->key,
1222 MLX5_RSS_HASH_KEY_LEN,
1225 flow->rss.queue_num);
1227 hrxq = mlx5_hrxq_new
1228 (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
1229 dv->hash_fields, (*flow->queue),
1230 flow->rss.queue_num,
1232 MLX5_FLOW_LAYER_TUNNEL));
1236 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1237 "cannot get hash queue");
1241 dv->actions[n].type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;
1242 dv->actions[n].qp = hrxq->qp;
1246 mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
1247 (void *)&dv->value, n,
1250 rte_flow_error_set(error, errno,
1251 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1253 "hardware refuses to create flow");
1259 err = rte_errno; /* Save rte_errno before cleanup. */
1260 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1261 struct mlx5_flow_dv *dv = &dev_flow->dv;
1263 if (flow->actions & MLX5_FLOW_ACTION_DROP)
1264 mlx5_hrxq_drop_release(dev);
1266 mlx5_hrxq_release(dev, dv->hrxq);
1270 rte_errno = err; /* Restore rte_errno. */
1275 * Release the flow matcher.
1278 * Pointer to Ethernet device.
1280 * Pointer to mlx5_flow.
1283 * 1 while a reference on it exists, 0 when freed.
1286 flow_dv_matcher_release(struct rte_eth_dev *dev,
1287 struct mlx5_flow *flow)
1289 struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
1291 assert(matcher->matcher_object);
1292 DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
1293 dev->data->port_id, (void *)matcher,
1294 rte_atomic32_read(&matcher->refcnt));
1295 if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
1296 claim_zero(mlx5_glue->dv_destroy_flow_matcher
1297 (matcher->matcher_object));
1298 LIST_REMOVE(matcher, next);
1300 DRV_LOG(DEBUG, "port %u matcher %p: removed",
1301 dev->data->port_id, (void *)matcher);
1308 * Remove the flow from the NIC but keeps it in memory.
1311 * Pointer to Ethernet device.
1312 * @param[in, out] flow
1313 * Pointer to flow structure.
1316 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
1318 struct mlx5_flow_dv *dv;
1319 struct mlx5_flow *dev_flow;
1323 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1326 claim_zero(mlx5_glue->destroy_flow(dv->flow));
1330 if (flow->actions & MLX5_FLOW_ACTION_DROP)
1331 mlx5_hrxq_drop_release(dev);
1333 mlx5_hrxq_release(dev, dv->hrxq);
1338 flow->counter = NULL;
1342 * Remove the flow from the NIC and the memory.
1345 * Pointer to the Ethernet device structure.
1346 * @param[in, out] flow
1347 * Pointer to flow structure.
1350 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1352 struct mlx5_flow *dev_flow;
1356 flow_dv_remove(dev, flow);
1357 while (!LIST_EMPTY(&flow->dev_flows)) {
1358 dev_flow = LIST_FIRST(&flow->dev_flows);
1359 LIST_REMOVE(dev_flow, next);
1360 if (dev_flow->dv.matcher)
1361 flow_dv_matcher_release(dev, dev_flow);
1366 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
1367 .validate = flow_dv_validate,
1368 .prepare = flow_dv_prepare,
1369 .translate = flow_dv_translate,
1370 .apply = flow_dv_apply,
1371 .remove = flow_dv_remove,
1372 .destroy = flow_dv_destroy,
1375 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */