1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #pragma GCC diagnostic ignored "-Wpedantic"
15 #include <infiniband/verbs.h>
17 #pragma GCC diagnostic error "-Wpedantic"
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_eth_ctrl.h>
23 #include <rte_ethdev_driver.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
31 #include "mlx5_defs.h"
33 #include "mlx5_glue.h"
34 #include "mlx5_flow.h"
36 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
38 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
39 #define MLX5DV_FLOW_ACTION_COUNTER_DEVX 0
55 * Initialize flow attributes structure according to flow items' types.
58 * Pointer to item specification.
60 * Pointer to flow attributes structure.
63 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
65 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
67 case RTE_FLOW_ITEM_TYPE_IPV4:
70 case RTE_FLOW_ITEM_TYPE_IPV6:
73 case RTE_FLOW_ITEM_TYPE_UDP:
76 case RTE_FLOW_ITEM_TYPE_TCP:
86 struct field_modify_info {
87 uint32_t size; /* Size of field in protocol header, in bytes. */
88 uint32_t offset; /* Offset of field in protocol header, in bytes. */
89 enum mlx5_modification_field id;
92 struct field_modify_info modify_eth[] = {
93 {4, 0, MLX5_MODI_OUT_DMAC_47_16},
94 {2, 4, MLX5_MODI_OUT_DMAC_15_0},
95 {4, 6, MLX5_MODI_OUT_SMAC_47_16},
96 {2, 10, MLX5_MODI_OUT_SMAC_15_0},
100 struct field_modify_info modify_ipv4[] = {
101 {1, 8, MLX5_MODI_OUT_IPV4_TTL},
102 {4, 12, MLX5_MODI_OUT_SIPV4},
103 {4, 16, MLX5_MODI_OUT_DIPV4},
107 struct field_modify_info modify_ipv6[] = {
108 {1, 7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
109 {4, 8, MLX5_MODI_OUT_SIPV6_127_96},
110 {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
111 {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
112 {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
113 {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
114 {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
115 {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
116 {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
120 struct field_modify_info modify_udp[] = {
121 {2, 0, MLX5_MODI_OUT_UDP_SPORT},
122 {2, 2, MLX5_MODI_OUT_UDP_DPORT},
126 struct field_modify_info modify_tcp[] = {
127 {2, 0, MLX5_MODI_OUT_TCP_SPORT},
128 {2, 2, MLX5_MODI_OUT_TCP_DPORT},
133 * Convert modify-header action to DV specification.
136 * Pointer to item specification.
138 * Pointer to field modification information.
139 * @param[in,out] resource
140 * Pointer to the modify-header resource.
142 * Type of modification.
144 * Pointer to the error structure.
147 * 0 on success, a negative errno value otherwise and rte_errno is set.
150 flow_dv_convert_modify_action(struct rte_flow_item *item,
151 struct field_modify_info *field,
152 struct mlx5_flow_dv_modify_hdr_resource *resource,
154 struct rte_flow_error *error)
156 uint32_t i = resource->actions_num;
157 struct mlx5_modification_cmd *actions = resource->actions;
158 const uint8_t *spec = item->spec;
159 const uint8_t *mask = item->mask;
162 while (field->size) {
164 /* Generate modify command for each mask segment. */
165 memcpy(&set, &mask[field->offset], field->size);
167 if (i >= MLX5_MODIFY_NUM)
168 return rte_flow_error_set(error, EINVAL,
169 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
170 "too many items to modify");
171 actions[i].action_type = type;
172 actions[i].field = field->id;
173 actions[i].length = field->size ==
174 4 ? 0 : field->size * 8;
175 rte_memcpy(&actions[i].data[4 - field->size],
176 &spec[field->offset], field->size);
177 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
180 if (resource->actions_num != i)
181 resource->actions_num = i;
184 if (!resource->actions_num)
185 return rte_flow_error_set(error, EINVAL,
186 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
187 "invalid modification flow item");
192 * Convert modify-header set IPv4 address action to DV specification.
194 * @param[in,out] resource
195 * Pointer to the modify-header resource.
197 * Pointer to action specification.
199 * Pointer to the error structure.
202 * 0 on success, a negative errno value otherwise and rte_errno is set.
205 flow_dv_convert_action_modify_ipv4
206 (struct mlx5_flow_dv_modify_hdr_resource *resource,
207 const struct rte_flow_action *action,
208 struct rte_flow_error *error)
210 const struct rte_flow_action_set_ipv4 *conf =
211 (const struct rte_flow_action_set_ipv4 *)(action->conf);
212 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
213 struct rte_flow_item_ipv4 ipv4;
214 struct rte_flow_item_ipv4 ipv4_mask;
216 memset(&ipv4, 0, sizeof(ipv4));
217 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
218 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
219 ipv4.hdr.src_addr = conf->ipv4_addr;
220 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
222 ipv4.hdr.dst_addr = conf->ipv4_addr;
223 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
226 item.mask = &ipv4_mask;
227 return flow_dv_convert_modify_action(&item, modify_ipv4, resource,
228 MLX5_MODIFICATION_TYPE_SET, error);
232 * Convert modify-header set IPv6 address action to DV specification.
234 * @param[in,out] resource
235 * Pointer to the modify-header resource.
237 * Pointer to action specification.
239 * Pointer to the error structure.
242 * 0 on success, a negative errno value otherwise and rte_errno is set.
245 flow_dv_convert_action_modify_ipv6
246 (struct mlx5_flow_dv_modify_hdr_resource *resource,
247 const struct rte_flow_action *action,
248 struct rte_flow_error *error)
250 const struct rte_flow_action_set_ipv6 *conf =
251 (const struct rte_flow_action_set_ipv6 *)(action->conf);
252 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
253 struct rte_flow_item_ipv6 ipv6;
254 struct rte_flow_item_ipv6 ipv6_mask;
256 memset(&ipv6, 0, sizeof(ipv6));
257 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
258 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
259 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
260 sizeof(ipv6.hdr.src_addr));
261 memcpy(&ipv6_mask.hdr.src_addr,
262 &rte_flow_item_ipv6_mask.hdr.src_addr,
263 sizeof(ipv6.hdr.src_addr));
265 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
266 sizeof(ipv6.hdr.dst_addr));
267 memcpy(&ipv6_mask.hdr.dst_addr,
268 &rte_flow_item_ipv6_mask.hdr.dst_addr,
269 sizeof(ipv6.hdr.dst_addr));
272 item.mask = &ipv6_mask;
273 return flow_dv_convert_modify_action(&item, modify_ipv6, resource,
274 MLX5_MODIFICATION_TYPE_SET, error);
278 * Convert modify-header set MAC address action to DV specification.
280 * @param[in,out] resource
281 * Pointer to the modify-header resource.
283 * Pointer to action specification.
285 * Pointer to the error structure.
288 * 0 on success, a negative errno value otherwise and rte_errno is set.
291 flow_dv_convert_action_modify_mac
292 (struct mlx5_flow_dv_modify_hdr_resource *resource,
293 const struct rte_flow_action *action,
294 struct rte_flow_error *error)
296 const struct rte_flow_action_set_mac *conf =
297 (const struct rte_flow_action_set_mac *)(action->conf);
298 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
299 struct rte_flow_item_eth eth;
300 struct rte_flow_item_eth eth_mask;
302 memset(ð, 0, sizeof(eth));
303 memset(ð_mask, 0, sizeof(eth_mask));
304 if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
305 memcpy(ð.src.addr_bytes, &conf->mac_addr,
306 sizeof(eth.src.addr_bytes));
307 memcpy(ð_mask.src.addr_bytes,
308 &rte_flow_item_eth_mask.src.addr_bytes,
309 sizeof(eth_mask.src.addr_bytes));
311 memcpy(ð.dst.addr_bytes, &conf->mac_addr,
312 sizeof(eth.dst.addr_bytes));
313 memcpy(ð_mask.dst.addr_bytes,
314 &rte_flow_item_eth_mask.dst.addr_bytes,
315 sizeof(eth_mask.dst.addr_bytes));
318 item.mask = ð_mask;
319 return flow_dv_convert_modify_action(&item, modify_eth, resource,
320 MLX5_MODIFICATION_TYPE_SET, error);
324 * Convert modify-header set TP action to DV specification.
326 * @param[in,out] resource
327 * Pointer to the modify-header resource.
329 * Pointer to action specification.
331 * Pointer to rte_flow_item objects list.
333 * Pointer to flow attributes structure.
335 * Pointer to the error structure.
338 * 0 on success, a negative errno value otherwise and rte_errno is set.
341 flow_dv_convert_action_modify_tp
342 (struct mlx5_flow_dv_modify_hdr_resource *resource,
343 const struct rte_flow_action *action,
344 const struct rte_flow_item *items,
345 union flow_dv_attr *attr,
346 struct rte_flow_error *error)
348 const struct rte_flow_action_set_tp *conf =
349 (const struct rte_flow_action_set_tp *)(action->conf);
350 struct rte_flow_item item;
351 struct rte_flow_item_udp udp;
352 struct rte_flow_item_udp udp_mask;
353 struct rte_flow_item_tcp tcp;
354 struct rte_flow_item_tcp tcp_mask;
355 struct field_modify_info *field;
358 flow_dv_attr_init(items, attr);
360 memset(&udp, 0, sizeof(udp));
361 memset(&udp_mask, 0, sizeof(udp_mask));
362 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
363 udp.hdr.src_port = conf->port;
364 udp_mask.hdr.src_port =
365 rte_flow_item_udp_mask.hdr.src_port;
367 udp.hdr.dst_port = conf->port;
368 udp_mask.hdr.dst_port =
369 rte_flow_item_udp_mask.hdr.dst_port;
371 item.type = RTE_FLOW_ITEM_TYPE_UDP;
373 item.mask = &udp_mask;
377 memset(&tcp, 0, sizeof(tcp));
378 memset(&tcp_mask, 0, sizeof(tcp_mask));
379 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
380 tcp.hdr.src_port = conf->port;
381 tcp_mask.hdr.src_port =
382 rte_flow_item_tcp_mask.hdr.src_port;
384 tcp.hdr.dst_port = conf->port;
385 tcp_mask.hdr.dst_port =
386 rte_flow_item_tcp_mask.hdr.dst_port;
388 item.type = RTE_FLOW_ITEM_TYPE_TCP;
390 item.mask = &tcp_mask;
393 return flow_dv_convert_modify_action(&item, field, resource,
394 MLX5_MODIFICATION_TYPE_SET, error);
398 * Convert modify-header set TTL action to DV specification.
400 * @param[in,out] resource
401 * Pointer to the modify-header resource.
403 * Pointer to action specification.
405 * Pointer to rte_flow_item objects list.
407 * Pointer to flow attributes structure.
409 * Pointer to the error structure.
412 * 0 on success, a negative errno value otherwise and rte_errno is set.
415 flow_dv_convert_action_modify_ttl
416 (struct mlx5_flow_dv_modify_hdr_resource *resource,
417 const struct rte_flow_action *action,
418 const struct rte_flow_item *items,
419 union flow_dv_attr *attr,
420 struct rte_flow_error *error)
422 const struct rte_flow_action_set_ttl *conf =
423 (const struct rte_flow_action_set_ttl *)(action->conf);
424 struct rte_flow_item item;
425 struct rte_flow_item_ipv4 ipv4;
426 struct rte_flow_item_ipv4 ipv4_mask;
427 struct rte_flow_item_ipv6 ipv6;
428 struct rte_flow_item_ipv6 ipv6_mask;
429 struct field_modify_info *field;
432 flow_dv_attr_init(items, attr);
434 memset(&ipv4, 0, sizeof(ipv4));
435 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
436 ipv4.hdr.time_to_live = conf->ttl_value;
437 ipv4_mask.hdr.time_to_live = 0xFF;
438 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
440 item.mask = &ipv4_mask;
444 memset(&ipv6, 0, sizeof(ipv6));
445 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
446 ipv6.hdr.hop_limits = conf->ttl_value;
447 ipv6_mask.hdr.hop_limits = 0xFF;
448 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
450 item.mask = &ipv6_mask;
453 return flow_dv_convert_modify_action(&item, field, resource,
454 MLX5_MODIFICATION_TYPE_SET, error);
458 * Convert modify-header decrement TTL action to DV specification.
460 * @param[in,out] resource
461 * Pointer to the modify-header resource.
463 * Pointer to action specification.
465 * Pointer to rte_flow_item objects list.
467 * Pointer to flow attributes structure.
469 * Pointer to the error structure.
472 * 0 on success, a negative errno value otherwise and rte_errno is set.
475 flow_dv_convert_action_modify_dec_ttl
476 (struct mlx5_flow_dv_modify_hdr_resource *resource,
477 const struct rte_flow_item *items,
478 union flow_dv_attr *attr,
479 struct rte_flow_error *error)
481 struct rte_flow_item item;
482 struct rte_flow_item_ipv4 ipv4;
483 struct rte_flow_item_ipv4 ipv4_mask;
484 struct rte_flow_item_ipv6 ipv6;
485 struct rte_flow_item_ipv6 ipv6_mask;
486 struct field_modify_info *field;
489 flow_dv_attr_init(items, attr);
491 memset(&ipv4, 0, sizeof(ipv4));
492 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
493 ipv4.hdr.time_to_live = 0xFF;
494 ipv4_mask.hdr.time_to_live = 0xFF;
495 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
497 item.mask = &ipv4_mask;
501 memset(&ipv6, 0, sizeof(ipv6));
502 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
503 ipv6.hdr.hop_limits = 0xFF;
504 ipv6_mask.hdr.hop_limits = 0xFF;
505 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
507 item.mask = &ipv6_mask;
510 return flow_dv_convert_modify_action(&item, field, resource,
511 MLX5_MODIFICATION_TYPE_ADD, error);
515 * Validate META item.
518 * Pointer to the rte_eth_dev structure.
520 * Item specification.
522 * Attributes of flow that includes this item.
524 * Pointer to error structure.
527 * 0 on success, a negative errno value otherwise and rte_errno is set.
530 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
531 const struct rte_flow_item *item,
532 const struct rte_flow_attr *attr,
533 struct rte_flow_error *error)
535 const struct rte_flow_item_meta *spec = item->spec;
536 const struct rte_flow_item_meta *mask = item->mask;
537 const struct rte_flow_item_meta nic_mask = {
538 .data = RTE_BE32(UINT32_MAX)
541 uint64_t offloads = dev->data->dev_conf.txmode.offloads;
543 if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
544 return rte_flow_error_set(error, EPERM,
545 RTE_FLOW_ERROR_TYPE_ITEM,
547 "match on metadata offload "
548 "configuration is off for this port");
550 return rte_flow_error_set(error, EINVAL,
551 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
553 "data cannot be empty");
555 return rte_flow_error_set(error, EINVAL,
556 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
558 "data cannot be zero");
560 mask = &rte_flow_item_meta_mask;
561 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
562 (const uint8_t *)&nic_mask,
563 sizeof(struct rte_flow_item_meta),
568 return rte_flow_error_set(error, ENOTSUP,
569 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
571 "pattern not supported for ingress");
576 * Validate count action.
581 * Pointer to error structure.
584 * 0 on success, a negative errno value otherwise and rte_errno is set.
587 flow_dv_validate_action_count(struct rte_eth_dev *dev,
588 struct rte_flow_error *error)
590 struct mlx5_priv *priv = dev->data->dev_private;
592 if (!priv->config.devx)
594 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
598 return rte_flow_error_set
600 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
602 "count action not supported");
606 * Validate the L2 encap action.
608 * @param[in] action_flags
609 * Holds the actions detected until now.
611 * Pointer to the encap action.
613 * Pointer to flow attributes
615 * Pointer to error structure.
618 * 0 on success, a negative errno value otherwise and rte_errno is set.
621 flow_dv_validate_action_l2_encap(uint64_t action_flags,
622 const struct rte_flow_action *action,
623 const struct rte_flow_attr *attr,
624 struct rte_flow_error *error)
627 return rte_flow_error_set(error, EINVAL,
628 RTE_FLOW_ERROR_TYPE_ACTION, action,
629 "configuration cannot be null");
630 if (action_flags & MLX5_FLOW_ACTION_DROP)
631 return rte_flow_error_set(error, EINVAL,
632 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
633 "can't drop and encap in same flow");
634 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
635 return rte_flow_error_set(error, EINVAL,
636 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
637 "can only have a single encap or"
638 " decap action in a flow");
640 return rte_flow_error_set(error, ENOTSUP,
641 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
643 "encap action not supported for "
649 * Validate the L2 decap action.
651 * @param[in] action_flags
652 * Holds the actions detected until now.
654 * Pointer to flow attributes
656 * Pointer to error structure.
659 * 0 on success, a negative errno value otherwise and rte_errno is set.
662 flow_dv_validate_action_l2_decap(uint64_t action_flags,
663 const struct rte_flow_attr *attr,
664 struct rte_flow_error *error)
666 if (action_flags & MLX5_FLOW_ACTION_DROP)
667 return rte_flow_error_set(error, EINVAL,
668 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
669 "can't drop and decap in same flow");
670 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
671 return rte_flow_error_set(error, EINVAL,
672 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
673 "can only have a single encap or"
674 " decap action in a flow");
675 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
676 return rte_flow_error_set(error, EINVAL,
677 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
678 "can't have decap action after"
681 return rte_flow_error_set(error, ENOTSUP,
682 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
684 "decap action not supported for "
690 * Validate the raw encap action.
692 * @param[in] action_flags
693 * Holds the actions detected until now.
695 * Pointer to the encap action.
697 * Pointer to flow attributes
699 * Pointer to error structure.
702 * 0 on success, a negative errno value otherwise and rte_errno is set.
705 flow_dv_validate_action_raw_encap(uint64_t action_flags,
706 const struct rte_flow_action *action,
707 const struct rte_flow_attr *attr,
708 struct rte_flow_error *error)
711 return rte_flow_error_set(error, EINVAL,
712 RTE_FLOW_ERROR_TYPE_ACTION, action,
713 "configuration cannot be null");
714 if (action_flags & MLX5_FLOW_ACTION_DROP)
715 return rte_flow_error_set(error, EINVAL,
716 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
717 "can't drop and encap in same flow");
718 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
719 return rte_flow_error_set(error, EINVAL,
720 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
721 "can only have a single encap"
722 " action in a flow");
723 /* encap without preceding decap is not supported for ingress */
724 if (attr->ingress && !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
725 return rte_flow_error_set(error, ENOTSUP,
726 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
728 "encap action not supported for "
734 * Validate the raw decap action.
736 * @param[in] action_flags
737 * Holds the actions detected until now.
739 * Pointer to the encap action.
741 * Pointer to flow attributes
743 * Pointer to error structure.
746 * 0 on success, a negative errno value otherwise and rte_errno is set.
749 flow_dv_validate_action_raw_decap(uint64_t action_flags,
750 const struct rte_flow_action *action,
751 const struct rte_flow_attr *attr,
752 struct rte_flow_error *error)
754 if (action_flags & MLX5_FLOW_ACTION_DROP)
755 return rte_flow_error_set(error, EINVAL,
756 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
757 "can't drop and decap in same flow");
758 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
759 return rte_flow_error_set(error, EINVAL,
760 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
761 "can't have encap action before"
763 if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
764 return rte_flow_error_set(error, EINVAL,
765 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
766 "can only have a single decap"
767 " action in a flow");
768 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
769 return rte_flow_error_set(error, EINVAL,
770 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
771 "can't have decap action after"
773 /* decap action is valid on egress only if it is followed by encap */
775 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
776 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
779 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
780 return rte_flow_error_set
782 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
783 NULL, "decap action not supported"
790 * Find existing encap/decap resource or create and register a new one.
792 * @param dev[in, out]
793 * Pointer to rte_eth_dev structure.
794 * @param[in, out] resource
795 * Pointer to encap/decap resource.
796 * @parm[in, out] dev_flow
797 * Pointer to the dev_flow.
799 * pointer to error structure.
802 * 0 on success otherwise -errno and errno is set.
805 flow_dv_encap_decap_resource_register
806 (struct rte_eth_dev *dev,
807 struct mlx5_flow_dv_encap_decap_resource *resource,
808 struct mlx5_flow *dev_flow,
809 struct rte_flow_error *error)
811 struct mlx5_priv *priv = dev->data->dev_private;
812 struct mlx5_flow_dv_encap_decap_resource *cache_resource;
814 /* Lookup a matching resource from cache. */
815 LIST_FOREACH(cache_resource, &priv->encaps_decaps, next) {
816 if (resource->reformat_type == cache_resource->reformat_type &&
817 resource->ft_type == cache_resource->ft_type &&
818 resource->size == cache_resource->size &&
819 !memcmp((const void *)resource->buf,
820 (const void *)cache_resource->buf,
822 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
823 (void *)cache_resource,
824 rte_atomic32_read(&cache_resource->refcnt));
825 rte_atomic32_inc(&cache_resource->refcnt);
826 dev_flow->dv.encap_decap = cache_resource;
830 /* Register new encap/decap resource. */
831 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
833 return rte_flow_error_set(error, ENOMEM,
834 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
835 "cannot allocate resource memory");
836 *cache_resource = *resource;
837 cache_resource->verbs_action =
838 mlx5_glue->dv_create_flow_action_packet_reformat
839 (priv->sh->ctx, cache_resource->size,
840 (cache_resource->size ? cache_resource->buf : NULL),
841 cache_resource->reformat_type,
842 cache_resource->ft_type);
843 if (!cache_resource->verbs_action) {
844 rte_free(cache_resource);
845 return rte_flow_error_set(error, ENOMEM,
846 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
847 NULL, "cannot create action");
849 rte_atomic32_init(&cache_resource->refcnt);
850 rte_atomic32_inc(&cache_resource->refcnt);
851 LIST_INSERT_HEAD(&priv->encaps_decaps, cache_resource, next);
852 dev_flow->dv.encap_decap = cache_resource;
853 DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
854 (void *)cache_resource,
855 rte_atomic32_read(&cache_resource->refcnt));
860 * Get the size of specific rte_flow_item_type
862 * @param[in] item_type
863 * Tested rte_flow_item_type.
866 * sizeof struct item_type, 0 if void or irrelevant.
869 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
874 case RTE_FLOW_ITEM_TYPE_ETH:
875 retval = sizeof(struct rte_flow_item_eth);
877 case RTE_FLOW_ITEM_TYPE_VLAN:
878 retval = sizeof(struct rte_flow_item_vlan);
880 case RTE_FLOW_ITEM_TYPE_IPV4:
881 retval = sizeof(struct rte_flow_item_ipv4);
883 case RTE_FLOW_ITEM_TYPE_IPV6:
884 retval = sizeof(struct rte_flow_item_ipv6);
886 case RTE_FLOW_ITEM_TYPE_UDP:
887 retval = sizeof(struct rte_flow_item_udp);
889 case RTE_FLOW_ITEM_TYPE_TCP:
890 retval = sizeof(struct rte_flow_item_tcp);
892 case RTE_FLOW_ITEM_TYPE_VXLAN:
893 retval = sizeof(struct rte_flow_item_vxlan);
895 case RTE_FLOW_ITEM_TYPE_GRE:
896 retval = sizeof(struct rte_flow_item_gre);
898 case RTE_FLOW_ITEM_TYPE_NVGRE:
899 retval = sizeof(struct rte_flow_item_nvgre);
901 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
902 retval = sizeof(struct rte_flow_item_vxlan_gpe);
904 case RTE_FLOW_ITEM_TYPE_MPLS:
905 retval = sizeof(struct rte_flow_item_mpls);
907 case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
915 #define MLX5_ENCAP_IPV4_VERSION 0x40
916 #define MLX5_ENCAP_IPV4_IHL_MIN 0x05
917 #define MLX5_ENCAP_IPV4_TTL_DEF 0x40
918 #define MLX5_ENCAP_IPV6_VTC_FLOW 0x60000000
919 #define MLX5_ENCAP_IPV6_HOP_LIMIT 0xff
920 #define MLX5_ENCAP_VXLAN_FLAGS 0x08000000
921 #define MLX5_ENCAP_VXLAN_GPE_FLAGS 0x04
924 * Convert the encap action data from list of rte_flow_item to raw buffer
927 * Pointer to rte_flow_item objects list.
929 * Pointer to the output buffer.
931 * Pointer to the output buffer size.
933 * Pointer to the error structure.
936 * 0 on success, a negative errno value otherwise and rte_errno is set.
939 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
940 size_t *size, struct rte_flow_error *error)
942 struct ether_hdr *eth = NULL;
943 struct vlan_hdr *vlan = NULL;
944 struct ipv4_hdr *ipv4 = NULL;
945 struct ipv6_hdr *ipv6 = NULL;
946 struct udp_hdr *udp = NULL;
947 struct vxlan_hdr *vxlan = NULL;
948 struct vxlan_gpe_hdr *vxlan_gpe = NULL;
949 struct gre_hdr *gre = NULL;
951 size_t temp_size = 0;
954 return rte_flow_error_set(error, EINVAL,
955 RTE_FLOW_ERROR_TYPE_ACTION,
956 NULL, "invalid empty data");
957 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
958 len = flow_dv_get_item_len(items->type);
959 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
960 return rte_flow_error_set(error, EINVAL,
961 RTE_FLOW_ERROR_TYPE_ACTION,
963 "items total size is too big"
964 " for encap action");
965 rte_memcpy((void *)&buf[temp_size], items->spec, len);
966 switch (items->type) {
967 case RTE_FLOW_ITEM_TYPE_ETH:
968 eth = (struct ether_hdr *)&buf[temp_size];
970 case RTE_FLOW_ITEM_TYPE_VLAN:
971 vlan = (struct vlan_hdr *)&buf[temp_size];
973 return rte_flow_error_set(error, EINVAL,
974 RTE_FLOW_ERROR_TYPE_ACTION,
976 "eth header not found");
977 if (!eth->ether_type)
978 eth->ether_type = RTE_BE16(ETHER_TYPE_VLAN);
980 case RTE_FLOW_ITEM_TYPE_IPV4:
981 ipv4 = (struct ipv4_hdr *)&buf[temp_size];
983 return rte_flow_error_set(error, EINVAL,
984 RTE_FLOW_ERROR_TYPE_ACTION,
986 "neither eth nor vlan"
988 if (vlan && !vlan->eth_proto)
989 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv4);
990 else if (eth && !eth->ether_type)
991 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv4);
992 if (!ipv4->version_ihl)
993 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
994 MLX5_ENCAP_IPV4_IHL_MIN;
995 if (!ipv4->time_to_live)
996 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
998 case RTE_FLOW_ITEM_TYPE_IPV6:
999 ipv6 = (struct ipv6_hdr *)&buf[temp_size];
1001 return rte_flow_error_set(error, EINVAL,
1002 RTE_FLOW_ERROR_TYPE_ACTION,
1003 (void *)items->type,
1004 "neither eth nor vlan"
1006 if (vlan && !vlan->eth_proto)
1007 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv6);
1008 else if (eth && !eth->ether_type)
1009 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv6);
1010 if (!ipv6->vtc_flow)
1012 RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
1013 if (!ipv6->hop_limits)
1014 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
1016 case RTE_FLOW_ITEM_TYPE_UDP:
1017 udp = (struct udp_hdr *)&buf[temp_size];
1019 return rte_flow_error_set(error, EINVAL,
1020 RTE_FLOW_ERROR_TYPE_ACTION,
1021 (void *)items->type,
1022 "ip header not found");
1023 if (ipv4 && !ipv4->next_proto_id)
1024 ipv4->next_proto_id = IPPROTO_UDP;
1025 else if (ipv6 && !ipv6->proto)
1026 ipv6->proto = IPPROTO_UDP;
1028 case RTE_FLOW_ITEM_TYPE_VXLAN:
1029 vxlan = (struct vxlan_hdr *)&buf[temp_size];
1031 return rte_flow_error_set(error, EINVAL,
1032 RTE_FLOW_ERROR_TYPE_ACTION,
1033 (void *)items->type,
1034 "udp header not found");
1036 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
1037 if (!vxlan->vx_flags)
1039 RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
1041 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1042 vxlan_gpe = (struct vxlan_gpe_hdr *)&buf[temp_size];
1044 return rte_flow_error_set(error, EINVAL,
1045 RTE_FLOW_ERROR_TYPE_ACTION,
1046 (void *)items->type,
1047 "udp header not found");
1048 if (!vxlan_gpe->proto)
1049 return rte_flow_error_set(error, EINVAL,
1050 RTE_FLOW_ERROR_TYPE_ACTION,
1051 (void *)items->type,
1052 "next protocol not found");
1055 RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
1056 if (!vxlan_gpe->vx_flags)
1057 vxlan_gpe->vx_flags =
1058 MLX5_ENCAP_VXLAN_GPE_FLAGS;
1060 case RTE_FLOW_ITEM_TYPE_GRE:
1061 case RTE_FLOW_ITEM_TYPE_NVGRE:
1062 gre = (struct gre_hdr *)&buf[temp_size];
1064 return rte_flow_error_set(error, EINVAL,
1065 RTE_FLOW_ERROR_TYPE_ACTION,
1066 (void *)items->type,
1067 "next protocol not found");
1069 return rte_flow_error_set(error, EINVAL,
1070 RTE_FLOW_ERROR_TYPE_ACTION,
1071 (void *)items->type,
1072 "ip header not found");
1073 if (ipv4 && !ipv4->next_proto_id)
1074 ipv4->next_proto_id = IPPROTO_GRE;
1075 else if (ipv6 && !ipv6->proto)
1076 ipv6->proto = IPPROTO_GRE;
1078 case RTE_FLOW_ITEM_TYPE_VOID:
1081 return rte_flow_error_set(error, EINVAL,
1082 RTE_FLOW_ERROR_TYPE_ACTION,
1083 (void *)items->type,
1084 "unsupported item type");
1094 * Convert L2 encap action to DV specification.
1097 * Pointer to rte_eth_dev structure.
1099 * Pointer to action structure.
1100 * @param[in, out] dev_flow
1101 * Pointer to the mlx5_flow.
1103 * Pointer to the error structure.
1106 * 0 on success, a negative errno value otherwise and rte_errno is set.
1109 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
1110 const struct rte_flow_action *action,
1111 struct mlx5_flow *dev_flow,
1112 struct rte_flow_error *error)
1114 const struct rte_flow_item *encap_data;
1115 const struct rte_flow_action_raw_encap *raw_encap_data;
1116 struct mlx5_flow_dv_encap_decap_resource res = {
1118 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
1119 .ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
1122 if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
1124 (const struct rte_flow_action_raw_encap *)action->conf;
1125 res.size = raw_encap_data->size;
1126 memcpy(res.buf, raw_encap_data->data, res.size);
1128 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
1130 ((const struct rte_flow_action_vxlan_encap *)
1131 action->conf)->definition;
1134 ((const struct rte_flow_action_nvgre_encap *)
1135 action->conf)->definition;
1136 if (flow_dv_convert_encap_data(encap_data, res.buf,
1140 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1141 return rte_flow_error_set(error, EINVAL,
1142 RTE_FLOW_ERROR_TYPE_ACTION,
1143 NULL, "can't create L2 encap action");
1148 * Convert L2 decap action to DV specification.
1151 * Pointer to rte_eth_dev structure.
1152 * @param[in, out] dev_flow
1153 * Pointer to the mlx5_flow.
1155 * Pointer to the error structure.
1158 * 0 on success, a negative errno value otherwise and rte_errno is set.
1161 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
1162 struct mlx5_flow *dev_flow,
1163 struct rte_flow_error *error)
1165 struct mlx5_flow_dv_encap_decap_resource res = {
1168 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
1169 .ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
1172 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1173 return rte_flow_error_set(error, EINVAL,
1174 RTE_FLOW_ERROR_TYPE_ACTION,
1175 NULL, "can't create L2 decap action");
1180 * Convert raw decap/encap (L3 tunnel) action to DV specification.
1183 * Pointer to rte_eth_dev structure.
1185 * Pointer to action structure.
1186 * @param[in, out] dev_flow
1187 * Pointer to the mlx5_flow.
1189 * Pointer to the flow attributes.
1191 * Pointer to the error structure.
1194 * 0 on success, a negative errno value otherwise and rte_errno is set.
1197 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
1198 const struct rte_flow_action *action,
1199 struct mlx5_flow *dev_flow,
1200 const struct rte_flow_attr *attr,
1201 struct rte_flow_error *error)
1203 const struct rte_flow_action_raw_encap *encap_data;
1204 struct mlx5_flow_dv_encap_decap_resource res;
1206 encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
1207 res.size = encap_data->size;
1208 memcpy(res.buf, encap_data->data, res.size);
1209 res.reformat_type = attr->egress ?
1210 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
1211 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
1212 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
1213 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
1214 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1215 return rte_flow_error_set(error, EINVAL,
1216 RTE_FLOW_ERROR_TYPE_ACTION,
1217 NULL, "can't create encap action");
1222 * Validate the modify-header actions.
1224 * @param[in] action_flags
1225 * Holds the actions detected until now.
1227 * Pointer to the modify action.
1229 * Pointer to error structure.
1232 * 0 on success, a negative errno value otherwise and rte_errno is set.
1235 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
1236 const struct rte_flow_action *action,
1237 struct rte_flow_error *error)
1239 if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
1240 return rte_flow_error_set(error, EINVAL,
1241 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1242 NULL, "action configuration not set");
1243 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1244 return rte_flow_error_set(error, EINVAL,
1245 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1246 "can't have encap action before"
1252 * Validate the modify-header MAC address actions.
1254 * @param[in] action_flags
1255 * Holds the actions detected until now.
1257 * Pointer to the modify action.
1258 * @param[in] item_flags
1259 * Holds the items detected.
1261 * Pointer to error structure.
1264 * 0 on success, a negative errno value otherwise and rte_errno is set.
1267 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
1268 const struct rte_flow_action *action,
1269 const uint64_t item_flags,
1270 struct rte_flow_error *error)
1274 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1276 if (!(item_flags & MLX5_FLOW_LAYER_L2))
1277 return rte_flow_error_set(error, EINVAL,
1278 RTE_FLOW_ERROR_TYPE_ACTION,
1280 "no L2 item in pattern");
1286 * Validate the modify-header IPv4 address actions.
1288 * @param[in] action_flags
1289 * Holds the actions detected until now.
1291 * Pointer to the modify action.
1292 * @param[in] item_flags
1293 * Holds the items detected.
1295 * Pointer to error structure.
1298 * 0 on success, a negative errno value otherwise and rte_errno is set.
1301 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
1302 const struct rte_flow_action *action,
1303 const uint64_t item_flags,
1304 struct rte_flow_error *error)
1308 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1310 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
1311 return rte_flow_error_set(error, EINVAL,
1312 RTE_FLOW_ERROR_TYPE_ACTION,
1314 "no ipv4 item in pattern");
1320 * Validate the modify-header IPv6 address actions.
1322 * @param[in] action_flags
1323 * Holds the actions detected until now.
1325 * Pointer to the modify action.
1326 * @param[in] item_flags
1327 * Holds the items detected.
1329 * Pointer to error structure.
1332 * 0 on success, a negative errno value otherwise and rte_errno is set.
1335 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
1336 const struct rte_flow_action *action,
1337 const uint64_t item_flags,
1338 struct rte_flow_error *error)
1342 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1344 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
1345 return rte_flow_error_set(error, EINVAL,
1346 RTE_FLOW_ERROR_TYPE_ACTION,
1348 "no ipv6 item in pattern");
1354 * Validate the modify-header TP actions.
1356 * @param[in] action_flags
1357 * Holds the actions detected until now.
1359 * Pointer to the modify action.
1360 * @param[in] item_flags
1361 * Holds the items detected.
1363 * Pointer to error structure.
1366 * 0 on success, a negative errno value otherwise and rte_errno is set.
1369 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
1370 const struct rte_flow_action *action,
1371 const uint64_t item_flags,
1372 struct rte_flow_error *error)
1376 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1378 if (!(item_flags & MLX5_FLOW_LAYER_L4))
1379 return rte_flow_error_set(error, EINVAL,
1380 RTE_FLOW_ERROR_TYPE_ACTION,
1381 NULL, "no transport layer "
1388 * Validate the modify-header TTL actions.
1390 * @param[in] action_flags
1391 * Holds the actions detected until now.
1393 * Pointer to the modify action.
1394 * @param[in] item_flags
1395 * Holds the items detected.
1397 * Pointer to error structure.
1400 * 0 on success, a negative errno value otherwise and rte_errno is set.
1403 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
1404 const struct rte_flow_action *action,
1405 const uint64_t item_flags,
1406 struct rte_flow_error *error)
1410 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1412 if (!(item_flags & MLX5_FLOW_LAYER_L3))
1413 return rte_flow_error_set(error, EINVAL,
1414 RTE_FLOW_ERROR_TYPE_ACTION,
1416 "no IP protocol in pattern");
1422 * Find existing modify-header resource or create and register a new one.
1424 * @param dev[in, out]
1425 * Pointer to rte_eth_dev structure.
1426 * @param[in, out] resource
1427 * Pointer to modify-header resource.
1428 * @parm[in, out] dev_flow
1429 * Pointer to the dev_flow.
1431 * pointer to error structure.
1434 * 0 on success otherwise -errno and errno is set.
1437 flow_dv_modify_hdr_resource_register
1438 (struct rte_eth_dev *dev,
1439 struct mlx5_flow_dv_modify_hdr_resource *resource,
1440 struct mlx5_flow *dev_flow,
1441 struct rte_flow_error *error)
1443 struct mlx5_priv *priv = dev->data->dev_private;
1444 struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
1446 /* Lookup a matching resource from cache. */
1447 LIST_FOREACH(cache_resource, &priv->modify_cmds, next) {
1448 if (resource->ft_type == cache_resource->ft_type &&
1449 resource->actions_num == cache_resource->actions_num &&
1450 !memcmp((const void *)resource->actions,
1451 (const void *)cache_resource->actions,
1452 (resource->actions_num *
1453 sizeof(resource->actions[0])))) {
1454 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
1455 (void *)cache_resource,
1456 rte_atomic32_read(&cache_resource->refcnt));
1457 rte_atomic32_inc(&cache_resource->refcnt);
1458 dev_flow->dv.modify_hdr = cache_resource;
1462 /* Register new modify-header resource. */
1463 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1464 if (!cache_resource)
1465 return rte_flow_error_set(error, ENOMEM,
1466 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1467 "cannot allocate resource memory");
1468 *cache_resource = *resource;
1469 cache_resource->verbs_action =
1470 mlx5_glue->dv_create_flow_action_modify_header
1472 cache_resource->actions_num *
1473 sizeof(cache_resource->actions[0]),
1474 (uint64_t *)cache_resource->actions,
1475 cache_resource->ft_type);
1476 if (!cache_resource->verbs_action) {
1477 rte_free(cache_resource);
1478 return rte_flow_error_set(error, ENOMEM,
1479 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1480 NULL, "cannot create action");
1482 rte_atomic32_init(&cache_resource->refcnt);
1483 rte_atomic32_inc(&cache_resource->refcnt);
1484 LIST_INSERT_HEAD(&priv->modify_cmds, cache_resource, next);
1485 dev_flow->dv.modify_hdr = cache_resource;
1486 DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
1487 (void *)cache_resource,
1488 rte_atomic32_read(&cache_resource->refcnt));
1493 * Get or create a flow counter.
1496 * Pointer to the Ethernet device structure.
1498 * Indicate if this counter is shared with other flows.
1500 * Counter identifier.
1503 * pointer to flow counter on success, NULL otherwise and rte_errno is set.
1505 static struct mlx5_flow_counter *
1506 flow_dv_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
1508 struct mlx5_priv *priv = dev->data->dev_private;
1509 struct mlx5_flow_counter *cnt = NULL;
1510 struct mlx5_devx_counter_set *dcs = NULL;
1513 if (!priv->config.devx) {
1518 LIST_FOREACH(cnt, &priv->flow_counters, next) {
1519 if (cnt->shared && cnt->id == id) {
1525 cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
1526 dcs = rte_calloc(__func__, 1, sizeof(*dcs), 0);
1531 ret = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, dcs);
1534 struct mlx5_flow_counter tmpl = {
1541 LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
1551 * Release a flow counter.
1553 * @param[in] counter
1554 * Pointer to the counter handler.
1557 flow_dv_counter_release(struct mlx5_flow_counter *counter)
1563 if (--counter->ref_cnt == 0) {
1564 ret = mlx5_devx_cmd_flow_counter_free(counter->dcs->obj);
1566 DRV_LOG(ERR, "Failed to free devx counters, %d", ret);
1567 LIST_REMOVE(counter, next);
1568 rte_free(counter->dcs);
1574 * Verify the @p attributes will be correctly understood by the NIC and store
1575 * them in the @p flow if everything is correct.
1578 * Pointer to dev struct.
1579 * @param[in] attributes
1580 * Pointer to flow attributes
1582 * Pointer to error structure.
1585 * 0 on success, a negative errno value otherwise and rte_errno is set.
1588 flow_dv_validate_attributes(struct rte_eth_dev *dev,
1589 const struct rte_flow_attr *attributes,
1590 struct rte_flow_error *error)
1592 struct mlx5_priv *priv = dev->data->dev_private;
1593 uint32_t priority_max = priv->config.flow_prio - 1;
1595 if (attributes->group)
1596 return rte_flow_error_set(error, ENOTSUP,
1597 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1599 "groups is not supported");
1600 if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
1601 attributes->priority >= priority_max)
1602 return rte_flow_error_set(error, ENOTSUP,
1603 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1605 "priority out of range");
1606 if (attributes->transfer)
1607 return rte_flow_error_set(error, ENOTSUP,
1608 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1610 "transfer is not supported");
1611 if (!(attributes->egress ^ attributes->ingress))
1612 return rte_flow_error_set(error, ENOTSUP,
1613 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1614 "must specify exactly one of "
1615 "ingress or egress");
1620 * Internal validation function. For validating both actions and items.
1623 * Pointer to the rte_eth_dev structure.
1625 * Pointer to the flow attributes.
1627 * Pointer to the list of items.
1628 * @param[in] actions
1629 * Pointer to the list of actions.
1631 * Pointer to the error structure.
1634 * 0 on success, a negative errno value otherwise and rte_ernno is set.
1637 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1638 const struct rte_flow_item items[],
1639 const struct rte_flow_action actions[],
1640 struct rte_flow_error *error)
1643 uint64_t action_flags = 0;
1644 uint64_t item_flags = 0;
1645 uint64_t last_item = 0;
1647 uint8_t next_protocol = 0xff;
1652 ret = flow_dv_validate_attributes(dev, attr, error);
1655 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1656 tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1657 switch (items->type) {
1658 case RTE_FLOW_ITEM_TYPE_VOID:
1660 case RTE_FLOW_ITEM_TYPE_ETH:
1661 ret = mlx5_flow_validate_item_eth(items, item_flags,
1665 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1666 MLX5_FLOW_LAYER_OUTER_L2;
1668 case RTE_FLOW_ITEM_TYPE_VLAN:
1669 ret = mlx5_flow_validate_item_vlan(items, item_flags,
1673 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1674 MLX5_FLOW_LAYER_OUTER_VLAN;
1676 case RTE_FLOW_ITEM_TYPE_IPV4:
1677 ret = mlx5_flow_validate_item_ipv4(items, item_flags,
1681 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1682 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1683 if (items->mask != NULL &&
1684 ((const struct rte_flow_item_ipv4 *)
1685 items->mask)->hdr.next_proto_id) {
1687 ((const struct rte_flow_item_ipv4 *)
1688 (items->spec))->hdr.next_proto_id;
1690 ((const struct rte_flow_item_ipv4 *)
1691 (items->mask))->hdr.next_proto_id;
1693 /* Reset for inner layer. */
1694 next_protocol = 0xff;
1697 case RTE_FLOW_ITEM_TYPE_IPV6:
1698 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
1702 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1703 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1704 if (items->mask != NULL &&
1705 ((const struct rte_flow_item_ipv6 *)
1706 items->mask)->hdr.proto) {
1708 ((const struct rte_flow_item_ipv6 *)
1709 items->spec)->hdr.proto;
1711 ((const struct rte_flow_item_ipv6 *)
1712 items->mask)->hdr.proto;
1714 /* Reset for inner layer. */
1715 next_protocol = 0xff;
1718 case RTE_FLOW_ITEM_TYPE_TCP:
1719 ret = mlx5_flow_validate_item_tcp
1722 &rte_flow_item_tcp_mask,
1726 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1727 MLX5_FLOW_LAYER_OUTER_L4_TCP;
1729 case RTE_FLOW_ITEM_TYPE_UDP:
1730 ret = mlx5_flow_validate_item_udp(items, item_flags,
1735 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1736 MLX5_FLOW_LAYER_OUTER_L4_UDP;
1738 case RTE_FLOW_ITEM_TYPE_GRE:
1739 case RTE_FLOW_ITEM_TYPE_NVGRE:
1740 ret = mlx5_flow_validate_item_gre(items, item_flags,
1741 next_protocol, error);
1744 last_item = MLX5_FLOW_LAYER_GRE;
1746 case RTE_FLOW_ITEM_TYPE_VXLAN:
1747 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
1751 last_item = MLX5_FLOW_LAYER_VXLAN;
1753 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1754 ret = mlx5_flow_validate_item_vxlan_gpe(items,
1759 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
1761 case RTE_FLOW_ITEM_TYPE_MPLS:
1762 ret = mlx5_flow_validate_item_mpls(dev, items,
1767 last_item = MLX5_FLOW_LAYER_MPLS;
1769 case RTE_FLOW_ITEM_TYPE_META:
1770 ret = flow_dv_validate_item_meta(dev, items, attr,
1774 last_item = MLX5_FLOW_ITEM_METADATA;
1777 return rte_flow_error_set(error, ENOTSUP,
1778 RTE_FLOW_ERROR_TYPE_ITEM,
1779 NULL, "item not supported");
1781 item_flags |= last_item;
1783 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1784 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
1785 return rte_flow_error_set(error, ENOTSUP,
1786 RTE_FLOW_ERROR_TYPE_ACTION,
1787 actions, "too many actions");
1788 switch (actions->type) {
1789 case RTE_FLOW_ACTION_TYPE_VOID:
1791 case RTE_FLOW_ACTION_TYPE_FLAG:
1792 ret = mlx5_flow_validate_action_flag(action_flags,
1796 action_flags |= MLX5_FLOW_ACTION_FLAG;
1799 case RTE_FLOW_ACTION_TYPE_MARK:
1800 ret = mlx5_flow_validate_action_mark(actions,
1805 action_flags |= MLX5_FLOW_ACTION_MARK;
1808 case RTE_FLOW_ACTION_TYPE_DROP:
1809 ret = mlx5_flow_validate_action_drop(action_flags,
1813 action_flags |= MLX5_FLOW_ACTION_DROP;
1816 case RTE_FLOW_ACTION_TYPE_QUEUE:
1817 ret = mlx5_flow_validate_action_queue(actions,
1822 action_flags |= MLX5_FLOW_ACTION_QUEUE;
1825 case RTE_FLOW_ACTION_TYPE_RSS:
1826 ret = mlx5_flow_validate_action_rss(actions,
1831 action_flags |= MLX5_FLOW_ACTION_RSS;
1834 case RTE_FLOW_ACTION_TYPE_COUNT:
1835 ret = flow_dv_validate_action_count(dev, error);
1838 action_flags |= MLX5_FLOW_ACTION_COUNT;
1841 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
1842 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
1843 ret = flow_dv_validate_action_l2_encap(action_flags,
1848 action_flags |= actions->type ==
1849 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
1850 MLX5_FLOW_ACTION_VXLAN_ENCAP :
1851 MLX5_FLOW_ACTION_NVGRE_ENCAP;
1854 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
1855 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
1856 ret = flow_dv_validate_action_l2_decap(action_flags,
1860 action_flags |= actions->type ==
1861 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
1862 MLX5_FLOW_ACTION_VXLAN_DECAP :
1863 MLX5_FLOW_ACTION_NVGRE_DECAP;
1866 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
1867 ret = flow_dv_validate_action_raw_encap(action_flags,
1872 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
1875 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
1876 ret = flow_dv_validate_action_raw_decap(action_flags,
1881 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
1884 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
1885 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
1886 ret = flow_dv_validate_action_modify_mac(action_flags,
1892 /* Count all modify-header actions as one action. */
1893 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
1895 action_flags |= actions->type ==
1896 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
1897 MLX5_FLOW_ACTION_SET_MAC_SRC :
1898 MLX5_FLOW_ACTION_SET_MAC_DST;
1901 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
1902 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
1903 ret = flow_dv_validate_action_modify_ipv4(action_flags,
1909 /* Count all modify-header actions as one action. */
1910 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
1912 action_flags |= actions->type ==
1913 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
1914 MLX5_FLOW_ACTION_SET_IPV4_SRC :
1915 MLX5_FLOW_ACTION_SET_IPV4_DST;
1917 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
1918 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
1919 ret = flow_dv_validate_action_modify_ipv6(action_flags,
1925 /* Count all modify-header actions as one action. */
1926 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
1928 action_flags |= actions->type ==
1929 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
1930 MLX5_FLOW_ACTION_SET_IPV6_SRC :
1931 MLX5_FLOW_ACTION_SET_IPV6_DST;
1933 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
1934 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
1935 ret = flow_dv_validate_action_modify_tp(action_flags,
1941 /* Count all modify-header actions as one action. */
1942 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
1944 action_flags |= actions->type ==
1945 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
1946 MLX5_FLOW_ACTION_SET_TP_SRC :
1947 MLX5_FLOW_ACTION_SET_TP_DST;
1949 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
1950 case RTE_FLOW_ACTION_TYPE_SET_TTL:
1951 ret = flow_dv_validate_action_modify_ttl(action_flags,
1957 /* Count all modify-header actions as one action. */
1958 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
1960 action_flags |= actions->type ==
1961 RTE_FLOW_ACTION_TYPE_SET_TTL ?
1962 MLX5_FLOW_ACTION_SET_TTL :
1963 MLX5_FLOW_ACTION_DEC_TTL;
1966 return rte_flow_error_set(error, ENOTSUP,
1967 RTE_FLOW_ERROR_TYPE_ACTION,
1969 "action not supported");
1972 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
1973 return rte_flow_error_set(error, EINVAL,
1974 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1975 "no fate action is found");
1980 * Internal preparation function. Allocates the DV flow size,
1981 * this size is constant.
1984 * Pointer to the flow attributes.
1986 * Pointer to the list of items.
1987 * @param[in] actions
1988 * Pointer to the list of actions.
1990 * Pointer to the error structure.
1993 * Pointer to mlx5_flow object on success,
1994 * otherwise NULL and rte_ernno is set.
1996 static struct mlx5_flow *
1997 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
1998 const struct rte_flow_item items[] __rte_unused,
1999 const struct rte_flow_action actions[] __rte_unused,
2000 struct rte_flow_error *error)
2002 uint32_t size = sizeof(struct mlx5_flow);
2003 struct mlx5_flow *flow;
2005 flow = rte_calloc(__func__, 1, size, 0);
2007 rte_flow_error_set(error, ENOMEM,
2008 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2009 "not enough memory to create flow");
2012 flow->dv.value.size = MLX5_ST_SZ_DB(fte_match_param);
2018 * Sanity check for match mask and value. Similar to check_valid_spec() in
2019 * kernel driver. If unmasked bit is present in value, it returns failure.
2022 * pointer to match mask buffer.
2023 * @param match_value
2024 * pointer to match value buffer.
2027 * 0 if valid, -EINVAL otherwise.
2030 flow_dv_check_valid_spec(void *match_mask, void *match_value)
2032 uint8_t *m = match_mask;
2033 uint8_t *v = match_value;
2036 for (i = 0; i < MLX5_ST_SZ_DB(fte_match_param); ++i) {
2039 "match_value differs from match_criteria"
2040 " %p[%u] != %p[%u]",
2041 match_value, i, match_mask, i);
2050 * Add Ethernet item to matcher and to the value.
2052 * @param[in, out] matcher
2054 * @param[in, out] key
2055 * Flow matcher value.
2057 * Flow pattern to translate.
2059 * Item is inner pattern.
2062 flow_dv_translate_item_eth(void *matcher, void *key,
2063 const struct rte_flow_item *item, int inner)
2065 const struct rte_flow_item_eth *eth_m = item->mask;
2066 const struct rte_flow_item_eth *eth_v = item->spec;
2067 const struct rte_flow_item_eth nic_mask = {
2068 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2069 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2070 .type = RTE_BE16(0xffff),
2082 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2084 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2086 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2088 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2090 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
2091 ð_m->dst, sizeof(eth_m->dst));
2092 /* The value must be in the range of the mask. */
2093 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
2094 for (i = 0; i < sizeof(eth_m->dst); ++i)
2095 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
2096 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
2097 ð_m->src, sizeof(eth_m->src));
2098 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
2099 /* The value must be in the range of the mask. */
2100 for (i = 0; i < sizeof(eth_m->dst); ++i)
2101 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
2102 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
2103 rte_be_to_cpu_16(eth_m->type));
2104 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
2105 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
2109 * Add VLAN item to matcher and to the value.
2111 * @param[in, out] matcher
2113 * @param[in, out] key
2114 * Flow matcher value.
2116 * Flow pattern to translate.
2118 * Item is inner pattern.
2121 flow_dv_translate_item_vlan(void *matcher, void *key,
2122 const struct rte_flow_item *item,
2125 const struct rte_flow_item_vlan *vlan_m = item->mask;
2126 const struct rte_flow_item_vlan *vlan_v = item->spec;
2127 const struct rte_flow_item_vlan nic_mask = {
2128 .tci = RTE_BE16(0x0fff),
2129 .inner_type = RTE_BE16(0xffff),
2141 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2143 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2145 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2147 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2149 tci_m = rte_be_to_cpu_16(vlan_m->tci);
2150 tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
2151 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
2152 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
2153 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
2154 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
2155 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
2156 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
2157 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
2158 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
2162 * Add IPV4 item to matcher and to the value.
2164 * @param[in, out] matcher
2166 * @param[in, out] key
2167 * Flow matcher value.
2169 * Flow pattern to translate.
2171 * Item is inner pattern.
2174 flow_dv_translate_item_ipv4(void *matcher, void *key,
2175 const struct rte_flow_item *item,
2178 const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
2179 const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
2180 const struct rte_flow_item_ipv4 nic_mask = {
2182 .src_addr = RTE_BE32(0xffffffff),
2183 .dst_addr = RTE_BE32(0xffffffff),
2184 .type_of_service = 0xff,
2185 .next_proto_id = 0xff,
2195 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2197 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2199 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2201 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2203 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2204 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
2209 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2210 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2211 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2212 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2213 *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
2214 *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
2215 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2216 src_ipv4_src_ipv6.ipv4_layout.ipv4);
2217 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2218 src_ipv4_src_ipv6.ipv4_layout.ipv4);
2219 *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
2220 *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
2221 tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
2222 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
2223 ipv4_m->hdr.type_of_service);
2224 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
2225 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
2226 ipv4_m->hdr.type_of_service >> 2);
2227 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
2228 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2229 ipv4_m->hdr.next_proto_id);
2230 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2231 ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
2235 * Add IPV6 item to matcher and to the value.
2237 * @param[in, out] matcher
2239 * @param[in, out] key
2240 * Flow matcher value.
2242 * Flow pattern to translate.
2244 * Item is inner pattern.
2247 flow_dv_translate_item_ipv6(void *matcher, void *key,
2248 const struct rte_flow_item *item,
2251 const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
2252 const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
2253 const struct rte_flow_item_ipv6 nic_mask = {
2256 "\xff\xff\xff\xff\xff\xff\xff\xff"
2257 "\xff\xff\xff\xff\xff\xff\xff\xff",
2259 "\xff\xff\xff\xff\xff\xff\xff\xff"
2260 "\xff\xff\xff\xff\xff\xff\xff\xff",
2261 .vtc_flow = RTE_BE32(0xffffffff),
2268 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2269 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2278 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2280 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2282 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2284 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2286 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2287 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
2292 size = sizeof(ipv6_m->hdr.dst_addr);
2293 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2294 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2295 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2296 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2297 memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
2298 for (i = 0; i < size; ++i)
2299 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
2300 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2301 src_ipv4_src_ipv6.ipv6_layout.ipv6);
2302 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2303 src_ipv4_src_ipv6.ipv6_layout.ipv6);
2304 memcpy(l24_m, ipv6_m->hdr.src_addr, size);
2305 for (i = 0; i < size; ++i)
2306 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
2308 vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
2309 vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
2310 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
2311 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
2312 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
2313 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
2316 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
2318 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
2321 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
2323 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
2327 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2329 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2330 ipv6_v->hdr.proto & ipv6_m->hdr.proto);
2334 * Add TCP item to matcher and to the value.
2336 * @param[in, out] matcher
2338 * @param[in, out] key
2339 * Flow matcher value.
2341 * Flow pattern to translate.
2343 * Item is inner pattern.
2346 flow_dv_translate_item_tcp(void *matcher, void *key,
2347 const struct rte_flow_item *item,
2350 const struct rte_flow_item_tcp *tcp_m = item->mask;
2351 const struct rte_flow_item_tcp *tcp_v = item->spec;
2356 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2358 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2360 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2362 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2364 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2365 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
2369 tcp_m = &rte_flow_item_tcp_mask;
2370 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
2371 rte_be_to_cpu_16(tcp_m->hdr.src_port));
2372 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
2373 rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
2374 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
2375 rte_be_to_cpu_16(tcp_m->hdr.dst_port));
2376 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
2377 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
2381 * Add UDP item to matcher and to the value.
2383 * @param[in, out] matcher
2385 * @param[in, out] key
2386 * Flow matcher value.
2388 * Flow pattern to translate.
2390 * Item is inner pattern.
2393 flow_dv_translate_item_udp(void *matcher, void *key,
2394 const struct rte_flow_item *item,
2397 const struct rte_flow_item_udp *udp_m = item->mask;
2398 const struct rte_flow_item_udp *udp_v = item->spec;
2403 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2405 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2407 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2409 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2411 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2412 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
2416 udp_m = &rte_flow_item_udp_mask;
2417 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
2418 rte_be_to_cpu_16(udp_m->hdr.src_port));
2419 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
2420 rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
2421 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
2422 rte_be_to_cpu_16(udp_m->hdr.dst_port));
2423 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2424 rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
2428 * Add GRE item to matcher and to the value.
2430 * @param[in, out] matcher
2432 * @param[in, out] key
2433 * Flow matcher value.
2435 * Flow pattern to translate.
2437 * Item is inner pattern.
2440 flow_dv_translate_item_gre(void *matcher, void *key,
2441 const struct rte_flow_item *item,
2444 const struct rte_flow_item_gre *gre_m = item->mask;
2445 const struct rte_flow_item_gre *gre_v = item->spec;
2448 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2449 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2452 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2454 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2456 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2458 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2460 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2461 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
2465 gre_m = &rte_flow_item_gre_mask;
2466 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
2467 rte_be_to_cpu_16(gre_m->protocol));
2468 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
2469 rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
2473 * Add NVGRE item to matcher and to the value.
2475 * @param[in, out] matcher
2477 * @param[in, out] key
2478 * Flow matcher value.
2480 * Flow pattern to translate.
2482 * Item is inner pattern.
2485 flow_dv_translate_item_nvgre(void *matcher, void *key,
2486 const struct rte_flow_item *item,
2489 const struct rte_flow_item_nvgre *nvgre_m = item->mask;
2490 const struct rte_flow_item_nvgre *nvgre_v = item->spec;
2491 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2492 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2493 const char *tni_flow_id_m = (const char *)nvgre_m->tni;
2494 const char *tni_flow_id_v = (const char *)nvgre_v->tni;
2500 flow_dv_translate_item_gre(matcher, key, item, inner);
2504 nvgre_m = &rte_flow_item_nvgre_mask;
2505 size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
2506 gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
2507 gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
2508 memcpy(gre_key_m, tni_flow_id_m, size);
2509 for (i = 0; i < size; ++i)
2510 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
2514 * Add VXLAN item to matcher and to the value.
2516 * @param[in, out] matcher
2518 * @param[in, out] key
2519 * Flow matcher value.
2521 * Flow pattern to translate.
2523 * Item is inner pattern.
2526 flow_dv_translate_item_vxlan(void *matcher, void *key,
2527 const struct rte_flow_item *item,
2530 const struct rte_flow_item_vxlan *vxlan_m = item->mask;
2531 const struct rte_flow_item_vxlan *vxlan_v = item->spec;
2534 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2535 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2543 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2545 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2547 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2549 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2551 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
2552 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
2553 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
2554 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
2555 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
2560 vxlan_m = &rte_flow_item_vxlan_mask;
2561 size = sizeof(vxlan_m->vni);
2562 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
2563 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
2564 memcpy(vni_m, vxlan_m->vni, size);
2565 for (i = 0; i < size; ++i)
2566 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
2570 * Add MPLS item to matcher and to the value.
2572 * @param[in, out] matcher
2574 * @param[in, out] key
2575 * Flow matcher value.
2577 * Flow pattern to translate.
2578 * @param[in] prev_layer
2579 * The protocol layer indicated in previous item.
2581 * Item is inner pattern.
2584 flow_dv_translate_item_mpls(void *matcher, void *key,
2585 const struct rte_flow_item *item,
2586 uint64_t prev_layer,
2589 const uint32_t *in_mpls_m = item->mask;
2590 const uint32_t *in_mpls_v = item->spec;
2591 uint32_t *out_mpls_m = 0;
2592 uint32_t *out_mpls_v = 0;
2593 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2594 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2595 void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
2597 void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
2598 void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
2599 void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2601 switch (prev_layer) {
2602 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
2603 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
2604 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2605 MLX5_UDP_PORT_MPLS);
2607 case MLX5_FLOW_LAYER_GRE:
2608 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
2609 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
2613 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2614 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2621 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
2622 switch (prev_layer) {
2623 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
2625 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
2626 outer_first_mpls_over_udp);
2628 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
2629 outer_first_mpls_over_udp);
2631 case MLX5_FLOW_LAYER_GRE:
2633 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
2634 outer_first_mpls_over_gre);
2636 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
2637 outer_first_mpls_over_gre);
2640 /* Inner MPLS not over GRE is not supported. */
2643 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
2647 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
2653 if (out_mpls_m && out_mpls_v) {
2654 *out_mpls_m = *in_mpls_m;
2655 *out_mpls_v = *in_mpls_v & *in_mpls_m;
2660 * Add META item to matcher
2662 * @param[in, out] matcher
2664 * @param[in, out] key
2665 * Flow matcher value.
2667 * Flow pattern to translate.
2669 * Item is inner pattern.
2672 flow_dv_translate_item_meta(void *matcher, void *key,
2673 const struct rte_flow_item *item)
2675 const struct rte_flow_item_meta *meta_m;
2676 const struct rte_flow_item_meta *meta_v;
2678 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
2680 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
2682 meta_m = (const void *)item->mask;
2684 meta_m = &rte_flow_item_meta_mask;
2685 meta_v = (const void *)item->spec;
2687 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
2688 rte_be_to_cpu_32(meta_m->data));
2689 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
2690 rte_be_to_cpu_32(meta_v->data & meta_m->data));
2694 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
2696 #define HEADER_IS_ZERO(match_criteria, headers) \
2697 !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
2698 matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
2701 * Calculate flow matcher enable bitmap.
2703 * @param match_criteria
2704 * Pointer to flow matcher criteria.
2707 * Bitmap of enabled fields.
2710 flow_dv_matcher_enable(uint32_t *match_criteria)
2712 uint8_t match_criteria_enable;
2714 match_criteria_enable =
2715 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
2716 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
2717 match_criteria_enable |=
2718 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
2719 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
2720 match_criteria_enable |=
2721 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
2722 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
2723 match_criteria_enable |=
2724 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
2725 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
2727 return match_criteria_enable;
2731 * Register the flow matcher.
2733 * @param dev[in, out]
2734 * Pointer to rte_eth_dev structure.
2735 * @param[in, out] matcher
2736 * Pointer to flow matcher.
2737 * @parm[in, out] dev_flow
2738 * Pointer to the dev_flow.
2740 * pointer to error structure.
2743 * 0 on success otherwise -errno and errno is set.
2746 flow_dv_matcher_register(struct rte_eth_dev *dev,
2747 struct mlx5_flow_dv_matcher *matcher,
2748 struct mlx5_flow *dev_flow,
2749 struct rte_flow_error *error)
2751 struct mlx5_priv *priv = dev->data->dev_private;
2752 struct mlx5_flow_dv_matcher *cache_matcher;
2753 struct mlx5dv_flow_matcher_attr dv_attr = {
2754 .type = IBV_FLOW_ATTR_NORMAL,
2755 .match_mask = (void *)&matcher->mask,
2758 /* Lookup from cache. */
2759 LIST_FOREACH(cache_matcher, &priv->matchers, next) {
2760 if (matcher->crc == cache_matcher->crc &&
2761 matcher->priority == cache_matcher->priority &&
2762 matcher->egress == cache_matcher->egress &&
2763 !memcmp((const void *)matcher->mask.buf,
2764 (const void *)cache_matcher->mask.buf,
2765 cache_matcher->mask.size)) {
2767 "priority %hd use %s matcher %p: refcnt %d++",
2768 cache_matcher->priority,
2769 cache_matcher->egress ? "tx" : "rx",
2770 (void *)cache_matcher,
2771 rte_atomic32_read(&cache_matcher->refcnt));
2772 rte_atomic32_inc(&cache_matcher->refcnt);
2773 dev_flow->dv.matcher = cache_matcher;
2777 /* Register new matcher. */
2778 cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
2780 return rte_flow_error_set(error, ENOMEM,
2781 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2782 "cannot allocate matcher memory");
2783 *cache_matcher = *matcher;
2784 dv_attr.match_criteria_enable =
2785 flow_dv_matcher_enable(cache_matcher->mask.buf);
2786 dv_attr.priority = matcher->priority;
2787 if (matcher->egress)
2788 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
2789 cache_matcher->matcher_object =
2790 mlx5_glue->dv_create_flow_matcher(priv->sh->ctx, &dv_attr);
2791 if (!cache_matcher->matcher_object) {
2792 rte_free(cache_matcher);
2793 return rte_flow_error_set(error, ENOMEM,
2794 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2795 NULL, "cannot create matcher");
2797 rte_atomic32_inc(&cache_matcher->refcnt);
2798 LIST_INSERT_HEAD(&priv->matchers, cache_matcher, next);
2799 dev_flow->dv.matcher = cache_matcher;
2800 DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
2801 cache_matcher->priority,
2802 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
2803 rte_atomic32_read(&cache_matcher->refcnt));
2808 * Add source vport match to the specified matcher.
2810 * @param[in, out] matcher
2812 * @param[in, out] key
2813 * Flow matcher value.
2815 * Source vport value to match
2820 flow_dv_translate_source_vport(void *matcher, void *key,
2821 int16_t port, uint16_t mask)
2823 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2824 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2826 MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
2827 MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
2831 * Fill the flow with DV spec.
2834 * Pointer to rte_eth_dev structure.
2835 * @param[in, out] dev_flow
2836 * Pointer to the sub flow.
2838 * Pointer to the flow attributes.
2840 * Pointer to the list of items.
2841 * @param[in] actions
2842 * Pointer to the list of actions.
2844 * Pointer to the error structure.
2847 * 0 on success, a negative errno value otherwise and rte_ernno is set.
2850 flow_dv_translate(struct rte_eth_dev *dev,
2851 struct mlx5_flow *dev_flow,
2852 const struct rte_flow_attr *attr,
2853 const struct rte_flow_item items[],
2854 const struct rte_flow_action actions[],
2855 struct rte_flow_error *error)
2857 struct mlx5_priv *priv = dev->data->dev_private;
2858 struct rte_flow *flow = dev_flow->flow;
2859 uint64_t item_flags = 0;
2860 uint64_t last_item = 0;
2861 uint64_t action_flags = 0;
2862 uint64_t priority = attr->priority;
2863 struct mlx5_flow_dv_matcher matcher = {
2865 .size = sizeof(matcher.mask.buf),
2869 bool actions_end = false;
2870 struct mlx5_flow_dv_modify_hdr_resource res = {
2871 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
2872 MLX5DV_FLOW_TABLE_TYPE_NIC_RX
2874 union flow_dv_attr flow_attr = { .attr = 0 };
2876 if (priority == MLX5_FLOW_PRIO_RSVD)
2877 priority = priv->config.flow_prio - 1;
2878 for (; !actions_end ; actions++) {
2879 const struct rte_flow_action_queue *queue;
2880 const struct rte_flow_action_rss *rss;
2881 const struct rte_flow_action *action = actions;
2882 const struct rte_flow_action_count *count = action->conf;
2883 const uint8_t *rss_key;
2885 switch (actions->type) {
2886 case RTE_FLOW_ACTION_TYPE_VOID:
2888 case RTE_FLOW_ACTION_TYPE_FLAG:
2889 dev_flow->dv.actions[actions_n].type =
2890 MLX5DV_FLOW_ACTION_TAG;
2891 dev_flow->dv.actions[actions_n].tag_value =
2892 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
2894 action_flags |= MLX5_FLOW_ACTION_FLAG;
2896 case RTE_FLOW_ACTION_TYPE_MARK:
2897 dev_flow->dv.actions[actions_n].type =
2898 MLX5DV_FLOW_ACTION_TAG;
2899 dev_flow->dv.actions[actions_n].tag_value =
2901 (((const struct rte_flow_action_mark *)
2902 (actions->conf))->id);
2904 action_flags |= MLX5_FLOW_ACTION_MARK;
2906 case RTE_FLOW_ACTION_TYPE_DROP:
2907 dev_flow->dv.actions[actions_n].type =
2908 MLX5DV_FLOW_ACTION_DROP;
2909 action_flags |= MLX5_FLOW_ACTION_DROP;
2911 case RTE_FLOW_ACTION_TYPE_QUEUE:
2912 queue = actions->conf;
2913 flow->rss.queue_num = 1;
2914 (*flow->queue)[0] = queue->index;
2915 action_flags |= MLX5_FLOW_ACTION_QUEUE;
2917 case RTE_FLOW_ACTION_TYPE_RSS:
2918 rss = actions->conf;
2920 memcpy((*flow->queue), rss->queue,
2921 rss->queue_num * sizeof(uint16_t));
2922 flow->rss.queue_num = rss->queue_num;
2923 /* NULL RSS key indicates default RSS key. */
2924 rss_key = !rss->key ? rss_hash_default_key : rss->key;
2925 memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
2926 /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
2927 flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
2928 flow->rss.level = rss->level;
2929 action_flags |= MLX5_FLOW_ACTION_RSS;
2931 case RTE_FLOW_ACTION_TYPE_COUNT:
2932 if (!priv->config.devx) {
2933 rte_errno = ENOTSUP;
2937 flow_dv_counter_new(dev,
2938 count->shared, count->id);
2939 if (flow->counter == NULL)
2941 dev_flow->dv.actions[actions_n].type =
2942 MLX5DV_FLOW_ACTION_COUNTER_DEVX;
2943 dev_flow->dv.actions[actions_n].obj =
2944 flow->counter->dcs->obj;
2945 action_flags |= MLX5_FLOW_ACTION_COUNT;
2949 if (rte_errno == ENOTSUP)
2950 return rte_flow_error_set
2952 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2954 "count action not supported");
2956 return rte_flow_error_set
2958 RTE_FLOW_ERROR_TYPE_ACTION,
2960 "cannot create counter"
2962 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2963 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
2964 if (flow_dv_create_action_l2_encap(dev, actions,
2967 dev_flow->dv.actions[actions_n].type =
2968 MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
2969 dev_flow->dv.actions[actions_n].action =
2970 dev_flow->dv.encap_decap->verbs_action;
2972 action_flags |= actions->type ==
2973 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
2974 MLX5_FLOW_ACTION_VXLAN_ENCAP :
2975 MLX5_FLOW_ACTION_NVGRE_ENCAP;
2977 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2978 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
2979 if (flow_dv_create_action_l2_decap(dev, dev_flow,
2982 dev_flow->dv.actions[actions_n].type =
2983 MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
2984 dev_flow->dv.actions[actions_n].action =
2985 dev_flow->dv.encap_decap->verbs_action;
2987 action_flags |= actions->type ==
2988 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
2989 MLX5_FLOW_ACTION_VXLAN_DECAP :
2990 MLX5_FLOW_ACTION_NVGRE_DECAP;
2992 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
2993 /* Handle encap with preceding decap. */
2994 if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
2995 if (flow_dv_create_action_raw_encap
2996 (dev, actions, dev_flow, attr, error))
2998 dev_flow->dv.actions[actions_n].type =
2999 MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
3000 dev_flow->dv.actions[actions_n].action =
3001 dev_flow->dv.encap_decap->verbs_action;
3003 /* Handle encap without preceding decap. */
3004 if (flow_dv_create_action_l2_encap(dev, actions,
3008 dev_flow->dv.actions[actions_n].type =
3009 MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
3010 dev_flow->dv.actions[actions_n].action =
3011 dev_flow->dv.encap_decap->verbs_action;
3014 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
3016 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3017 /* Check if this decap is followed by encap. */
3018 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
3019 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
3022 /* Handle decap only if it isn't followed by encap. */
3023 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3024 if (flow_dv_create_action_l2_decap(dev,
3028 dev_flow->dv.actions[actions_n].type =
3029 MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
3030 dev_flow->dv.actions[actions_n].action =
3031 dev_flow->dv.encap_decap->verbs_action;
3034 /* If decap is followed by encap, handle it at encap. */
3035 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
3037 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
3038 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
3039 if (flow_dv_convert_action_modify_mac(&res, actions,
3042 action_flags |= actions->type ==
3043 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
3044 MLX5_FLOW_ACTION_SET_MAC_SRC :
3045 MLX5_FLOW_ACTION_SET_MAC_DST;
3047 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
3048 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
3049 if (flow_dv_convert_action_modify_ipv4(&res, actions,
3052 action_flags |= actions->type ==
3053 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
3054 MLX5_FLOW_ACTION_SET_IPV4_SRC :
3055 MLX5_FLOW_ACTION_SET_IPV4_DST;
3057 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
3058 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
3059 if (flow_dv_convert_action_modify_ipv6(&res, actions,
3062 action_flags |= actions->type ==
3063 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
3064 MLX5_FLOW_ACTION_SET_IPV6_SRC :
3065 MLX5_FLOW_ACTION_SET_IPV6_DST;
3067 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
3068 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
3069 if (flow_dv_convert_action_modify_tp(&res, actions,
3073 action_flags |= actions->type ==
3074 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
3075 MLX5_FLOW_ACTION_SET_TP_SRC :
3076 MLX5_FLOW_ACTION_SET_TP_DST;
3078 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
3079 if (flow_dv_convert_action_modify_dec_ttl(&res, items,
3083 action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
3085 case RTE_FLOW_ACTION_TYPE_SET_TTL:
3086 if (flow_dv_convert_action_modify_ttl(&res, actions,
3090 action_flags |= MLX5_FLOW_ACTION_SET_TTL;
3092 case RTE_FLOW_ACTION_TYPE_END:
3094 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
3095 /* create modify action if needed. */
3096 if (flow_dv_modify_hdr_resource_register
3101 dev_flow->dv.actions[actions_n].type =
3102 MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
3103 dev_flow->dv.actions[actions_n].action =
3104 dev_flow->dv.modify_hdr->verbs_action;
3112 dev_flow->dv.actions_n = actions_n;
3113 flow->actions = action_flags;
3114 if (attr->ingress && !attr->transfer &&
3115 (priv->representor || priv->master)) {
3116 /* It was validated - we support unidirection flows only. */
3117 assert(!attr->egress);
3119 * Add matching on source vport index only
3120 * for ingress rules in E-Switch configurations.
3122 flow_dv_translate_source_vport(matcher.mask.buf,
3123 dev_flow->dv.value.buf,
3127 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3128 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
3129 void *match_mask = matcher.mask.buf;
3130 void *match_value = dev_flow->dv.value.buf;
3132 switch (items->type) {
3133 case RTE_FLOW_ITEM_TYPE_ETH:
3134 flow_dv_translate_item_eth(match_mask, match_value,
3136 matcher.priority = MLX5_PRIORITY_MAP_L2;
3137 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
3138 MLX5_FLOW_LAYER_OUTER_L2;
3140 case RTE_FLOW_ITEM_TYPE_VLAN:
3141 flow_dv_translate_item_vlan(match_mask, match_value,
3143 matcher.priority = MLX5_PRIORITY_MAP_L2;
3144 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
3145 MLX5_FLOW_LAYER_INNER_VLAN) :
3146 (MLX5_FLOW_LAYER_OUTER_L2 |
3147 MLX5_FLOW_LAYER_OUTER_VLAN);
3149 case RTE_FLOW_ITEM_TYPE_IPV4:
3150 flow_dv_translate_item_ipv4(match_mask, match_value,
3152 matcher.priority = MLX5_PRIORITY_MAP_L3;
3153 dev_flow->dv.hash_fields |=
3154 mlx5_flow_hashfields_adjust
3156 MLX5_IPV4_LAYER_TYPES,
3157 MLX5_IPV4_IBV_RX_HASH);
3158 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3159 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3161 case RTE_FLOW_ITEM_TYPE_IPV6:
3162 flow_dv_translate_item_ipv6(match_mask, match_value,
3164 matcher.priority = MLX5_PRIORITY_MAP_L3;
3165 dev_flow->dv.hash_fields |=
3166 mlx5_flow_hashfields_adjust
3168 MLX5_IPV6_LAYER_TYPES,
3169 MLX5_IPV6_IBV_RX_HASH);
3170 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3171 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3173 case RTE_FLOW_ITEM_TYPE_TCP:
3174 flow_dv_translate_item_tcp(match_mask, match_value,
3176 matcher.priority = MLX5_PRIORITY_MAP_L4;
3177 dev_flow->dv.hash_fields |=
3178 mlx5_flow_hashfields_adjust
3179 (dev_flow, tunnel, ETH_RSS_TCP,
3180 IBV_RX_HASH_SRC_PORT_TCP |
3181 IBV_RX_HASH_DST_PORT_TCP);
3182 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
3183 MLX5_FLOW_LAYER_OUTER_L4_TCP;
3185 case RTE_FLOW_ITEM_TYPE_UDP:
3186 flow_dv_translate_item_udp(match_mask, match_value,
3188 matcher.priority = MLX5_PRIORITY_MAP_L4;
3189 dev_flow->dv.hash_fields |=
3190 mlx5_flow_hashfields_adjust
3191 (dev_flow, tunnel, ETH_RSS_UDP,
3192 IBV_RX_HASH_SRC_PORT_UDP |
3193 IBV_RX_HASH_DST_PORT_UDP);
3194 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
3195 MLX5_FLOW_LAYER_OUTER_L4_UDP;
3197 case RTE_FLOW_ITEM_TYPE_GRE:
3198 flow_dv_translate_item_gre(match_mask, match_value,
3200 last_item = MLX5_FLOW_LAYER_GRE;
3202 case RTE_FLOW_ITEM_TYPE_NVGRE:
3203 flow_dv_translate_item_nvgre(match_mask, match_value,
3205 last_item = MLX5_FLOW_LAYER_GRE;
3207 case RTE_FLOW_ITEM_TYPE_VXLAN:
3208 flow_dv_translate_item_vxlan(match_mask, match_value,
3210 last_item = MLX5_FLOW_LAYER_VXLAN;
3212 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3213 flow_dv_translate_item_vxlan(match_mask, match_value,
3215 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
3217 case RTE_FLOW_ITEM_TYPE_MPLS:
3218 flow_dv_translate_item_mpls(match_mask, match_value,
3219 items, last_item, tunnel);
3220 last_item = MLX5_FLOW_LAYER_MPLS;
3222 case RTE_FLOW_ITEM_TYPE_META:
3223 flow_dv_translate_item_meta(match_mask, match_value,
3225 last_item = MLX5_FLOW_ITEM_METADATA;
3230 item_flags |= last_item;
3232 assert(!flow_dv_check_valid_spec(matcher.mask.buf,
3233 dev_flow->dv.value.buf));
3234 dev_flow->layers = item_flags;
3235 /* Register matcher. */
3236 matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
3238 matcher.priority = mlx5_flow_adjust_priority(dev, priority,
3240 matcher.egress = attr->egress;
3241 if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
3247 * Apply the flow to the NIC.
3250 * Pointer to the Ethernet device structure.
3251 * @param[in, out] flow
3252 * Pointer to flow structure.
3254 * Pointer to error structure.
3257 * 0 on success, a negative errno value otherwise and rte_errno is set.
3260 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
3261 struct rte_flow_error *error)
3263 struct mlx5_flow_dv *dv;
3264 struct mlx5_flow *dev_flow;
3268 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3271 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
3272 dv->hrxq = mlx5_hrxq_drop_new(dev);
3276 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3277 "cannot get drop hash queue");
3280 dv->actions[n].type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;
3281 dv->actions[n].qp = dv->hrxq->qp;
3283 } else if (flow->actions &
3284 (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
3285 struct mlx5_hrxq *hrxq;
3287 hrxq = mlx5_hrxq_get(dev, flow->key,
3288 MLX5_RSS_HASH_KEY_LEN,
3291 flow->rss.queue_num);
3293 hrxq = mlx5_hrxq_new
3294 (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
3295 dv->hash_fields, (*flow->queue),
3296 flow->rss.queue_num,
3297 !!(dev_flow->layers &
3298 MLX5_FLOW_LAYER_TUNNEL));
3302 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3303 "cannot get hash queue");
3307 dv->actions[n].type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;
3308 dv->actions[n].qp = hrxq->qp;
3312 mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
3313 (void *)&dv->value, n,
3316 rte_flow_error_set(error, errno,
3317 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3319 "hardware refuses to create flow");
3325 err = rte_errno; /* Save rte_errno before cleanup. */
3326 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3327 struct mlx5_flow_dv *dv = &dev_flow->dv;
3329 if (flow->actions & MLX5_FLOW_ACTION_DROP)
3330 mlx5_hrxq_drop_release(dev);
3332 mlx5_hrxq_release(dev, dv->hrxq);
3336 rte_errno = err; /* Restore rte_errno. */
3341 * Release the flow matcher.
3344 * Pointer to Ethernet device.
3346 * Pointer to mlx5_flow.
3349 * 1 while a reference on it exists, 0 when freed.
3352 flow_dv_matcher_release(struct rte_eth_dev *dev,
3353 struct mlx5_flow *flow)
3355 struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
3357 assert(matcher->matcher_object);
3358 DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
3359 dev->data->port_id, (void *)matcher,
3360 rte_atomic32_read(&matcher->refcnt));
3361 if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
3362 claim_zero(mlx5_glue->dv_destroy_flow_matcher
3363 (matcher->matcher_object));
3364 LIST_REMOVE(matcher, next);
3366 DRV_LOG(DEBUG, "port %u matcher %p: removed",
3367 dev->data->port_id, (void *)matcher);
3374 * Release an encap/decap resource.
3377 * Pointer to mlx5_flow.
3380 * 1 while a reference on it exists, 0 when freed.
3383 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
3385 struct mlx5_flow_dv_encap_decap_resource *cache_resource =
3386 flow->dv.encap_decap;
3388 assert(cache_resource->verbs_action);
3389 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
3390 (void *)cache_resource,
3391 rte_atomic32_read(&cache_resource->refcnt));
3392 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
3393 claim_zero(mlx5_glue->destroy_flow_action
3394 (cache_resource->verbs_action));
3395 LIST_REMOVE(cache_resource, next);
3396 rte_free(cache_resource);
3397 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
3398 (void *)cache_resource);
3405 * Release a modify-header resource.
3408 * Pointer to mlx5_flow.
3411 * 1 while a reference on it exists, 0 when freed.
3414 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
3416 struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
3417 flow->dv.modify_hdr;
3419 assert(cache_resource->verbs_action);
3420 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
3421 (void *)cache_resource,
3422 rte_atomic32_read(&cache_resource->refcnt));
3423 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
3424 claim_zero(mlx5_glue->destroy_flow_action
3425 (cache_resource->verbs_action));
3426 LIST_REMOVE(cache_resource, next);
3427 rte_free(cache_resource);
3428 DRV_LOG(DEBUG, "modify-header resource %p: removed",
3429 (void *)cache_resource);
3436 * Remove the flow from the NIC but keeps it in memory.
3439 * Pointer to Ethernet device.
3440 * @param[in, out] flow
3441 * Pointer to flow structure.
3444 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
3446 struct mlx5_flow_dv *dv;
3447 struct mlx5_flow *dev_flow;
3451 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3454 claim_zero(mlx5_glue->destroy_flow(dv->flow));
3458 if (flow->actions & MLX5_FLOW_ACTION_DROP)
3459 mlx5_hrxq_drop_release(dev);
3461 mlx5_hrxq_release(dev, dv->hrxq);
3468 * Remove the flow from the NIC and the memory.
3471 * Pointer to the Ethernet device structure.
3472 * @param[in, out] flow
3473 * Pointer to flow structure.
3476 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
3478 struct mlx5_flow *dev_flow;
3482 flow_dv_remove(dev, flow);
3483 if (flow->counter) {
3484 flow_dv_counter_release(flow->counter);
3485 flow->counter = NULL;
3487 while (!LIST_EMPTY(&flow->dev_flows)) {
3488 dev_flow = LIST_FIRST(&flow->dev_flows);
3489 LIST_REMOVE(dev_flow, next);
3490 if (dev_flow->dv.matcher)
3491 flow_dv_matcher_release(dev, dev_flow);
3492 if (dev_flow->dv.encap_decap)
3493 flow_dv_encap_decap_resource_release(dev_flow);
3494 if (dev_flow->dv.modify_hdr)
3495 flow_dv_modify_hdr_resource_release(dev_flow);
3501 * Query a dv flow rule for its statistics via devx.
3504 * Pointer to Ethernet device.
3506 * Pointer to the sub flow.
3508 * data retrieved by the query.
3510 * Perform verbose error reporting if not NULL.
3513 * 0 on success, a negative errno value otherwise and rte_errno is set.
3516 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
3517 void *data, struct rte_flow_error *error)
3519 struct mlx5_priv *priv = dev->data->dev_private;
3520 struct rte_flow_query_count *qc = data;
3525 if (!priv->config.devx)
3526 return rte_flow_error_set(error, ENOTSUP,
3527 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3529 "counters are not supported");
3530 if (flow->counter) {
3531 err = mlx5_devx_cmd_flow_counter_query
3532 (flow->counter->dcs,
3533 qc->reset, &pkts, &bytes);
3535 return rte_flow_error_set
3537 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3539 "cannot read counters");
3542 qc->hits = pkts - flow->counter->hits;
3543 qc->bytes = bytes - flow->counter->bytes;
3545 flow->counter->hits = pkts;
3546 flow->counter->bytes = bytes;
3550 return rte_flow_error_set(error, EINVAL,
3551 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3553 "counters are not available");
3559 * @see rte_flow_query()
3563 flow_dv_query(struct rte_eth_dev *dev,
3564 struct rte_flow *flow __rte_unused,
3565 const struct rte_flow_action *actions __rte_unused,
3566 void *data __rte_unused,
3567 struct rte_flow_error *error __rte_unused)
3571 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3572 switch (actions->type) {
3573 case RTE_FLOW_ACTION_TYPE_VOID:
3575 case RTE_FLOW_ACTION_TYPE_COUNT:
3576 ret = flow_dv_query_count(dev, flow, data, error);
3579 return rte_flow_error_set(error, ENOTSUP,
3580 RTE_FLOW_ERROR_TYPE_ACTION,
3582 "action not supported");
3589 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
3590 .validate = flow_dv_validate,
3591 .prepare = flow_dv_prepare,
3592 .translate = flow_dv_translate,
3593 .apply = flow_dv_apply,
3594 .remove = flow_dv_remove,
3595 .destroy = flow_dv_destroy,
3596 .query = flow_dv_query,
3599 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */