1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #pragma GCC diagnostic ignored "-Wpedantic"
15 #include <infiniband/verbs.h>
17 #pragma GCC diagnostic error "-Wpedantic"
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_eth_ctrl.h>
23 #include <rte_ethdev_driver.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
31 #include "mlx5_defs.h"
33 #include "mlx5_glue.h"
34 #include "mlx5_flow.h"
35 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
37 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
38 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
54 * Initialize flow attributes structure according to flow items' types.
57 * Pointer to item specification.
59 * Pointer to flow attributes structure.
62 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
64 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
66 case RTE_FLOW_ITEM_TYPE_IPV4:
69 case RTE_FLOW_ITEM_TYPE_IPV6:
72 case RTE_FLOW_ITEM_TYPE_UDP:
75 case RTE_FLOW_ITEM_TYPE_TCP:
85 struct field_modify_info {
86 uint32_t size; /* Size of field in protocol header, in bytes. */
87 uint32_t offset; /* Offset of field in protocol header, in bytes. */
88 enum mlx5_modification_field id;
91 struct field_modify_info modify_eth[] = {
92 {4, 0, MLX5_MODI_OUT_DMAC_47_16},
93 {2, 4, MLX5_MODI_OUT_DMAC_15_0},
94 {4, 6, MLX5_MODI_OUT_SMAC_47_16},
95 {2, 10, MLX5_MODI_OUT_SMAC_15_0},
99 struct field_modify_info modify_ipv4[] = {
100 {1, 8, MLX5_MODI_OUT_IPV4_TTL},
101 {4, 12, MLX5_MODI_OUT_SIPV4},
102 {4, 16, MLX5_MODI_OUT_DIPV4},
106 struct field_modify_info modify_ipv6[] = {
107 {1, 7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
108 {4, 8, MLX5_MODI_OUT_SIPV6_127_96},
109 {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
110 {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
111 {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
112 {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
113 {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
114 {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
115 {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
119 struct field_modify_info modify_udp[] = {
120 {2, 0, MLX5_MODI_OUT_UDP_SPORT},
121 {2, 2, MLX5_MODI_OUT_UDP_DPORT},
125 struct field_modify_info modify_tcp[] = {
126 {2, 0, MLX5_MODI_OUT_TCP_SPORT},
127 {2, 2, MLX5_MODI_OUT_TCP_DPORT},
132 * Convert modify-header action to DV specification.
135 * Pointer to item specification.
137 * Pointer to field modification information.
138 * @param[in,out] resource
139 * Pointer to the modify-header resource.
141 * Type of modification.
143 * Pointer to the error structure.
146 * 0 on success, a negative errno value otherwise and rte_errno is set.
149 flow_dv_convert_modify_action(struct rte_flow_item *item,
150 struct field_modify_info *field,
151 struct mlx5_flow_dv_modify_hdr_resource *resource,
153 struct rte_flow_error *error)
155 uint32_t i = resource->actions_num;
156 struct mlx5_modification_cmd *actions = resource->actions;
157 const uint8_t *spec = item->spec;
158 const uint8_t *mask = item->mask;
161 while (field->size) {
163 /* Generate modify command for each mask segment. */
164 memcpy(&set, &mask[field->offset], field->size);
166 if (i >= MLX5_MODIFY_NUM)
167 return rte_flow_error_set(error, EINVAL,
168 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
169 "too many items to modify");
170 actions[i].action_type = type;
171 actions[i].field = field->id;
172 actions[i].length = field->size ==
173 4 ? 0 : field->size * 8;
174 rte_memcpy(&actions[i].data[4 - field->size],
175 &spec[field->offset], field->size);
176 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
179 if (resource->actions_num != i)
180 resource->actions_num = i;
183 if (!resource->actions_num)
184 return rte_flow_error_set(error, EINVAL,
185 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
186 "invalid modification flow item");
191 * Convert modify-header set IPv4 address action to DV specification.
193 * @param[in,out] resource
194 * Pointer to the modify-header resource.
196 * Pointer to action specification.
198 * Pointer to the error structure.
201 * 0 on success, a negative errno value otherwise and rte_errno is set.
204 flow_dv_convert_action_modify_ipv4
205 (struct mlx5_flow_dv_modify_hdr_resource *resource,
206 const struct rte_flow_action *action,
207 struct rte_flow_error *error)
209 const struct rte_flow_action_set_ipv4 *conf =
210 (const struct rte_flow_action_set_ipv4 *)(action->conf);
211 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
212 struct rte_flow_item_ipv4 ipv4;
213 struct rte_flow_item_ipv4 ipv4_mask;
215 memset(&ipv4, 0, sizeof(ipv4));
216 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
217 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
218 ipv4.hdr.src_addr = conf->ipv4_addr;
219 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
221 ipv4.hdr.dst_addr = conf->ipv4_addr;
222 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
225 item.mask = &ipv4_mask;
226 return flow_dv_convert_modify_action(&item, modify_ipv4, resource,
227 MLX5_MODIFICATION_TYPE_SET, error);
231 * Convert modify-header set IPv6 address action to DV specification.
233 * @param[in,out] resource
234 * Pointer to the modify-header resource.
236 * Pointer to action specification.
238 * Pointer to the error structure.
241 * 0 on success, a negative errno value otherwise and rte_errno is set.
244 flow_dv_convert_action_modify_ipv6
245 (struct mlx5_flow_dv_modify_hdr_resource *resource,
246 const struct rte_flow_action *action,
247 struct rte_flow_error *error)
249 const struct rte_flow_action_set_ipv6 *conf =
250 (const struct rte_flow_action_set_ipv6 *)(action->conf);
251 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
252 struct rte_flow_item_ipv6 ipv6;
253 struct rte_flow_item_ipv6 ipv6_mask;
255 memset(&ipv6, 0, sizeof(ipv6));
256 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
257 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
258 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
259 sizeof(ipv6.hdr.src_addr));
260 memcpy(&ipv6_mask.hdr.src_addr,
261 &rte_flow_item_ipv6_mask.hdr.src_addr,
262 sizeof(ipv6.hdr.src_addr));
264 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
265 sizeof(ipv6.hdr.dst_addr));
266 memcpy(&ipv6_mask.hdr.dst_addr,
267 &rte_flow_item_ipv6_mask.hdr.dst_addr,
268 sizeof(ipv6.hdr.dst_addr));
271 item.mask = &ipv6_mask;
272 return flow_dv_convert_modify_action(&item, modify_ipv6, resource,
273 MLX5_MODIFICATION_TYPE_SET, error);
277 * Convert modify-header set MAC address action to DV specification.
279 * @param[in,out] resource
280 * Pointer to the modify-header resource.
282 * Pointer to action specification.
284 * Pointer to the error structure.
287 * 0 on success, a negative errno value otherwise and rte_errno is set.
290 flow_dv_convert_action_modify_mac
291 (struct mlx5_flow_dv_modify_hdr_resource *resource,
292 const struct rte_flow_action *action,
293 struct rte_flow_error *error)
295 const struct rte_flow_action_set_mac *conf =
296 (const struct rte_flow_action_set_mac *)(action->conf);
297 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
298 struct rte_flow_item_eth eth;
299 struct rte_flow_item_eth eth_mask;
301 memset(ð, 0, sizeof(eth));
302 memset(ð_mask, 0, sizeof(eth_mask));
303 if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
304 memcpy(ð.src.addr_bytes, &conf->mac_addr,
305 sizeof(eth.src.addr_bytes));
306 memcpy(ð_mask.src.addr_bytes,
307 &rte_flow_item_eth_mask.src.addr_bytes,
308 sizeof(eth_mask.src.addr_bytes));
310 memcpy(ð.dst.addr_bytes, &conf->mac_addr,
311 sizeof(eth.dst.addr_bytes));
312 memcpy(ð_mask.dst.addr_bytes,
313 &rte_flow_item_eth_mask.dst.addr_bytes,
314 sizeof(eth_mask.dst.addr_bytes));
317 item.mask = ð_mask;
318 return flow_dv_convert_modify_action(&item, modify_eth, resource,
319 MLX5_MODIFICATION_TYPE_SET, error);
323 * Convert modify-header set TP action to DV specification.
325 * @param[in,out] resource
326 * Pointer to the modify-header resource.
328 * Pointer to action specification.
330 * Pointer to rte_flow_item objects list.
332 * Pointer to flow attributes structure.
334 * Pointer to the error structure.
337 * 0 on success, a negative errno value otherwise and rte_errno is set.
340 flow_dv_convert_action_modify_tp
341 (struct mlx5_flow_dv_modify_hdr_resource *resource,
342 const struct rte_flow_action *action,
343 const struct rte_flow_item *items,
344 union flow_dv_attr *attr,
345 struct rte_flow_error *error)
347 const struct rte_flow_action_set_tp *conf =
348 (const struct rte_flow_action_set_tp *)(action->conf);
349 struct rte_flow_item item;
350 struct rte_flow_item_udp udp;
351 struct rte_flow_item_udp udp_mask;
352 struct rte_flow_item_tcp tcp;
353 struct rte_flow_item_tcp tcp_mask;
354 struct field_modify_info *field;
357 flow_dv_attr_init(items, attr);
359 memset(&udp, 0, sizeof(udp));
360 memset(&udp_mask, 0, sizeof(udp_mask));
361 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
362 udp.hdr.src_port = conf->port;
363 udp_mask.hdr.src_port =
364 rte_flow_item_udp_mask.hdr.src_port;
366 udp.hdr.dst_port = conf->port;
367 udp_mask.hdr.dst_port =
368 rte_flow_item_udp_mask.hdr.dst_port;
370 item.type = RTE_FLOW_ITEM_TYPE_UDP;
372 item.mask = &udp_mask;
376 memset(&tcp, 0, sizeof(tcp));
377 memset(&tcp_mask, 0, sizeof(tcp_mask));
378 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
379 tcp.hdr.src_port = conf->port;
380 tcp_mask.hdr.src_port =
381 rte_flow_item_tcp_mask.hdr.src_port;
383 tcp.hdr.dst_port = conf->port;
384 tcp_mask.hdr.dst_port =
385 rte_flow_item_tcp_mask.hdr.dst_port;
387 item.type = RTE_FLOW_ITEM_TYPE_TCP;
389 item.mask = &tcp_mask;
392 return flow_dv_convert_modify_action(&item, field, resource,
393 MLX5_MODIFICATION_TYPE_SET, error);
397 * Convert modify-header set TTL action to DV specification.
399 * @param[in,out] resource
400 * Pointer to the modify-header resource.
402 * Pointer to action specification.
404 * Pointer to rte_flow_item objects list.
406 * Pointer to flow attributes structure.
408 * Pointer to the error structure.
411 * 0 on success, a negative errno value otherwise and rte_errno is set.
414 flow_dv_convert_action_modify_ttl
415 (struct mlx5_flow_dv_modify_hdr_resource *resource,
416 const struct rte_flow_action *action,
417 const struct rte_flow_item *items,
418 union flow_dv_attr *attr,
419 struct rte_flow_error *error)
421 const struct rte_flow_action_set_ttl *conf =
422 (const struct rte_flow_action_set_ttl *)(action->conf);
423 struct rte_flow_item item;
424 struct rte_flow_item_ipv4 ipv4;
425 struct rte_flow_item_ipv4 ipv4_mask;
426 struct rte_flow_item_ipv6 ipv6;
427 struct rte_flow_item_ipv6 ipv6_mask;
428 struct field_modify_info *field;
431 flow_dv_attr_init(items, attr);
433 memset(&ipv4, 0, sizeof(ipv4));
434 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
435 ipv4.hdr.time_to_live = conf->ttl_value;
436 ipv4_mask.hdr.time_to_live = 0xFF;
437 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
439 item.mask = &ipv4_mask;
443 memset(&ipv6, 0, sizeof(ipv6));
444 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
445 ipv6.hdr.hop_limits = conf->ttl_value;
446 ipv6_mask.hdr.hop_limits = 0xFF;
447 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
449 item.mask = &ipv6_mask;
452 return flow_dv_convert_modify_action(&item, field, resource,
453 MLX5_MODIFICATION_TYPE_SET, error);
457 * Convert modify-header decrement TTL action to DV specification.
459 * @param[in,out] resource
460 * Pointer to the modify-header resource.
462 * Pointer to action specification.
464 * Pointer to rte_flow_item objects list.
466 * Pointer to flow attributes structure.
468 * Pointer to the error structure.
471 * 0 on success, a negative errno value otherwise and rte_errno is set.
474 flow_dv_convert_action_modify_dec_ttl
475 (struct mlx5_flow_dv_modify_hdr_resource *resource,
476 const struct rte_flow_item *items,
477 union flow_dv_attr *attr,
478 struct rte_flow_error *error)
480 struct rte_flow_item item;
481 struct rte_flow_item_ipv4 ipv4;
482 struct rte_flow_item_ipv4 ipv4_mask;
483 struct rte_flow_item_ipv6 ipv6;
484 struct rte_flow_item_ipv6 ipv6_mask;
485 struct field_modify_info *field;
488 flow_dv_attr_init(items, attr);
490 memset(&ipv4, 0, sizeof(ipv4));
491 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
492 ipv4.hdr.time_to_live = 0xFF;
493 ipv4_mask.hdr.time_to_live = 0xFF;
494 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
496 item.mask = &ipv4_mask;
500 memset(&ipv6, 0, sizeof(ipv6));
501 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
502 ipv6.hdr.hop_limits = 0xFF;
503 ipv6_mask.hdr.hop_limits = 0xFF;
504 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
506 item.mask = &ipv6_mask;
509 return flow_dv_convert_modify_action(&item, field, resource,
510 MLX5_MODIFICATION_TYPE_ADD, error);
514 * Validate META item.
517 * Pointer to the rte_eth_dev structure.
519 * Item specification.
521 * Attributes of flow that includes this item.
523 * Pointer to error structure.
526 * 0 on success, a negative errno value otherwise and rte_errno is set.
529 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
530 const struct rte_flow_item *item,
531 const struct rte_flow_attr *attr,
532 struct rte_flow_error *error)
534 const struct rte_flow_item_meta *spec = item->spec;
535 const struct rte_flow_item_meta *mask = item->mask;
536 const struct rte_flow_item_meta nic_mask = {
537 .data = RTE_BE32(UINT32_MAX)
540 uint64_t offloads = dev->data->dev_conf.txmode.offloads;
542 if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
543 return rte_flow_error_set(error, EPERM,
544 RTE_FLOW_ERROR_TYPE_ITEM,
546 "match on metadata offload "
547 "configuration is off for this port");
549 return rte_flow_error_set(error, EINVAL,
550 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
552 "data cannot be empty");
554 return rte_flow_error_set(error, EINVAL,
555 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
557 "data cannot be zero");
559 mask = &rte_flow_item_meta_mask;
560 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
561 (const uint8_t *)&nic_mask,
562 sizeof(struct rte_flow_item_meta),
567 return rte_flow_error_set(error, ENOTSUP,
568 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
570 "pattern not supported for ingress");
575 * Validate count action.
580 * Pointer to error structure.
583 * 0 on success, a negative errno value otherwise and rte_errno is set.
586 flow_dv_validate_action_count(struct rte_eth_dev *dev,
587 struct rte_flow_error *error)
589 struct mlx5_priv *priv = dev->data->dev_private;
591 if (!priv->config.devx)
593 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
597 return rte_flow_error_set
599 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
601 "count action not supported");
605 * Validate the L2 encap action.
607 * @param[in] action_flags
608 * Holds the actions detected until now.
610 * Pointer to the encap action.
612 * Pointer to flow attributes
614 * Pointer to error structure.
617 * 0 on success, a negative errno value otherwise and rte_errno is set.
620 flow_dv_validate_action_l2_encap(uint64_t action_flags,
621 const struct rte_flow_action *action,
622 const struct rte_flow_attr *attr,
623 struct rte_flow_error *error)
626 return rte_flow_error_set(error, EINVAL,
627 RTE_FLOW_ERROR_TYPE_ACTION, action,
628 "configuration cannot be null");
629 if (action_flags & MLX5_FLOW_ACTION_DROP)
630 return rte_flow_error_set(error, EINVAL,
631 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
632 "can't drop and encap in same flow");
633 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
634 return rte_flow_error_set(error, EINVAL,
635 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
636 "can only have a single encap or"
637 " decap action in a flow");
639 return rte_flow_error_set(error, ENOTSUP,
640 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
642 "encap action not supported for "
648 * Validate the L2 decap action.
650 * @param[in] action_flags
651 * Holds the actions detected until now.
653 * Pointer to flow attributes
655 * Pointer to error structure.
658 * 0 on success, a negative errno value otherwise and rte_errno is set.
661 flow_dv_validate_action_l2_decap(uint64_t action_flags,
662 const struct rte_flow_attr *attr,
663 struct rte_flow_error *error)
665 if (action_flags & MLX5_FLOW_ACTION_DROP)
666 return rte_flow_error_set(error, EINVAL,
667 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
668 "can't drop and decap in same flow");
669 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
670 return rte_flow_error_set(error, EINVAL,
671 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
672 "can only have a single encap or"
673 " decap action in a flow");
674 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
675 return rte_flow_error_set(error, EINVAL,
676 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
677 "can't have decap action after"
680 return rte_flow_error_set(error, ENOTSUP,
681 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
683 "decap action not supported for "
689 * Validate the raw encap action.
691 * @param[in] action_flags
692 * Holds the actions detected until now.
694 * Pointer to the encap action.
696 * Pointer to flow attributes
698 * Pointer to error structure.
701 * 0 on success, a negative errno value otherwise and rte_errno is set.
704 flow_dv_validate_action_raw_encap(uint64_t action_flags,
705 const struct rte_flow_action *action,
706 const struct rte_flow_attr *attr,
707 struct rte_flow_error *error)
710 return rte_flow_error_set(error, EINVAL,
711 RTE_FLOW_ERROR_TYPE_ACTION, action,
712 "configuration cannot be null");
713 if (action_flags & MLX5_FLOW_ACTION_DROP)
714 return rte_flow_error_set(error, EINVAL,
715 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
716 "can't drop and encap in same flow");
717 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
718 return rte_flow_error_set(error, EINVAL,
719 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
720 "can only have a single encap"
721 " action in a flow");
722 /* encap without preceding decap is not supported for ingress */
723 if (attr->ingress && !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
724 return rte_flow_error_set(error, ENOTSUP,
725 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
727 "encap action not supported for "
733 * Validate the raw decap action.
735 * @param[in] action_flags
736 * Holds the actions detected until now.
738 * Pointer to the encap action.
740 * Pointer to flow attributes
742 * Pointer to error structure.
745 * 0 on success, a negative errno value otherwise and rte_errno is set.
748 flow_dv_validate_action_raw_decap(uint64_t action_flags,
749 const struct rte_flow_action *action,
750 const struct rte_flow_attr *attr,
751 struct rte_flow_error *error)
753 if (action_flags & MLX5_FLOW_ACTION_DROP)
754 return rte_flow_error_set(error, EINVAL,
755 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
756 "can't drop and decap in same flow");
757 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
758 return rte_flow_error_set(error, EINVAL,
759 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
760 "can't have encap action before"
762 if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
763 return rte_flow_error_set(error, EINVAL,
764 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
765 "can only have a single decap"
766 " action in a flow");
767 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
768 return rte_flow_error_set(error, EINVAL,
769 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
770 "can't have decap action after"
772 /* decap action is valid on egress only if it is followed by encap */
774 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
775 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
778 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
779 return rte_flow_error_set
781 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
782 NULL, "decap action not supported"
789 * Find existing encap/decap resource or create and register a new one.
791 * @param dev[in, out]
792 * Pointer to rte_eth_dev structure.
793 * @param[in, out] resource
794 * Pointer to encap/decap resource.
795 * @parm[in, out] dev_flow
796 * Pointer to the dev_flow.
798 * pointer to error structure.
801 * 0 on success otherwise -errno and errno is set.
804 flow_dv_encap_decap_resource_register
805 (struct rte_eth_dev *dev,
806 struct mlx5_flow_dv_encap_decap_resource *resource,
807 struct mlx5_flow *dev_flow,
808 struct rte_flow_error *error)
810 struct mlx5_priv *priv = dev->data->dev_private;
811 struct mlx5_flow_dv_encap_decap_resource *cache_resource;
813 /* Lookup a matching resource from cache. */
814 LIST_FOREACH(cache_resource, &priv->encaps_decaps, next) {
815 if (resource->reformat_type == cache_resource->reformat_type &&
816 resource->ft_type == cache_resource->ft_type &&
817 resource->size == cache_resource->size &&
818 !memcmp((const void *)resource->buf,
819 (const void *)cache_resource->buf,
821 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
822 (void *)cache_resource,
823 rte_atomic32_read(&cache_resource->refcnt));
824 rte_atomic32_inc(&cache_resource->refcnt);
825 dev_flow->dv.encap_decap = cache_resource;
829 /* Register new encap/decap resource. */
830 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
832 return rte_flow_error_set(error, ENOMEM,
833 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
834 "cannot allocate resource memory");
835 *cache_resource = *resource;
836 cache_resource->verbs_action =
837 mlx5_glue->dv_create_flow_action_packet_reformat
838 (priv->sh->ctx, cache_resource->size,
839 (cache_resource->size ? cache_resource->buf : NULL),
840 cache_resource->reformat_type,
841 cache_resource->ft_type);
842 if (!cache_resource->verbs_action) {
843 rte_free(cache_resource);
844 return rte_flow_error_set(error, ENOMEM,
845 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
846 NULL, "cannot create action");
848 rte_atomic32_init(&cache_resource->refcnt);
849 rte_atomic32_inc(&cache_resource->refcnt);
850 LIST_INSERT_HEAD(&priv->encaps_decaps, cache_resource, next);
851 dev_flow->dv.encap_decap = cache_resource;
852 DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
853 (void *)cache_resource,
854 rte_atomic32_read(&cache_resource->refcnt));
859 * Get the size of specific rte_flow_item_type
861 * @param[in] item_type
862 * Tested rte_flow_item_type.
865 * sizeof struct item_type, 0 if void or irrelevant.
868 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
873 case RTE_FLOW_ITEM_TYPE_ETH:
874 retval = sizeof(struct rte_flow_item_eth);
876 case RTE_FLOW_ITEM_TYPE_VLAN:
877 retval = sizeof(struct rte_flow_item_vlan);
879 case RTE_FLOW_ITEM_TYPE_IPV4:
880 retval = sizeof(struct rte_flow_item_ipv4);
882 case RTE_FLOW_ITEM_TYPE_IPV6:
883 retval = sizeof(struct rte_flow_item_ipv6);
885 case RTE_FLOW_ITEM_TYPE_UDP:
886 retval = sizeof(struct rte_flow_item_udp);
888 case RTE_FLOW_ITEM_TYPE_TCP:
889 retval = sizeof(struct rte_flow_item_tcp);
891 case RTE_FLOW_ITEM_TYPE_VXLAN:
892 retval = sizeof(struct rte_flow_item_vxlan);
894 case RTE_FLOW_ITEM_TYPE_GRE:
895 retval = sizeof(struct rte_flow_item_gre);
897 case RTE_FLOW_ITEM_TYPE_NVGRE:
898 retval = sizeof(struct rte_flow_item_nvgre);
900 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
901 retval = sizeof(struct rte_flow_item_vxlan_gpe);
903 case RTE_FLOW_ITEM_TYPE_MPLS:
904 retval = sizeof(struct rte_flow_item_mpls);
906 case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
914 #define MLX5_ENCAP_IPV4_VERSION 0x40
915 #define MLX5_ENCAP_IPV4_IHL_MIN 0x05
916 #define MLX5_ENCAP_IPV4_TTL_DEF 0x40
917 #define MLX5_ENCAP_IPV6_VTC_FLOW 0x60000000
918 #define MLX5_ENCAP_IPV6_HOP_LIMIT 0xff
919 #define MLX5_ENCAP_VXLAN_FLAGS 0x08000000
920 #define MLX5_ENCAP_VXLAN_GPE_FLAGS 0x04
923 * Convert the encap action data from list of rte_flow_item to raw buffer
926 * Pointer to rte_flow_item objects list.
928 * Pointer to the output buffer.
930 * Pointer to the output buffer size.
932 * Pointer to the error structure.
935 * 0 on success, a negative errno value otherwise and rte_errno is set.
938 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
939 size_t *size, struct rte_flow_error *error)
941 struct ether_hdr *eth = NULL;
942 struct vlan_hdr *vlan = NULL;
943 struct ipv4_hdr *ipv4 = NULL;
944 struct ipv6_hdr *ipv6 = NULL;
945 struct udp_hdr *udp = NULL;
946 struct vxlan_hdr *vxlan = NULL;
947 struct vxlan_gpe_hdr *vxlan_gpe = NULL;
948 struct gre_hdr *gre = NULL;
950 size_t temp_size = 0;
953 return rte_flow_error_set(error, EINVAL,
954 RTE_FLOW_ERROR_TYPE_ACTION,
955 NULL, "invalid empty data");
956 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
957 len = flow_dv_get_item_len(items->type);
958 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
959 return rte_flow_error_set(error, EINVAL,
960 RTE_FLOW_ERROR_TYPE_ACTION,
962 "items total size is too big"
963 " for encap action");
964 rte_memcpy((void *)&buf[temp_size], items->spec, len);
965 switch (items->type) {
966 case RTE_FLOW_ITEM_TYPE_ETH:
967 eth = (struct ether_hdr *)&buf[temp_size];
969 case RTE_FLOW_ITEM_TYPE_VLAN:
970 vlan = (struct vlan_hdr *)&buf[temp_size];
972 return rte_flow_error_set(error, EINVAL,
973 RTE_FLOW_ERROR_TYPE_ACTION,
975 "eth header not found");
976 if (!eth->ether_type)
977 eth->ether_type = RTE_BE16(ETHER_TYPE_VLAN);
979 case RTE_FLOW_ITEM_TYPE_IPV4:
980 ipv4 = (struct ipv4_hdr *)&buf[temp_size];
982 return rte_flow_error_set(error, EINVAL,
983 RTE_FLOW_ERROR_TYPE_ACTION,
985 "neither eth nor vlan"
987 if (vlan && !vlan->eth_proto)
988 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv4);
989 else if (eth && !eth->ether_type)
990 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv4);
991 if (!ipv4->version_ihl)
992 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
993 MLX5_ENCAP_IPV4_IHL_MIN;
994 if (!ipv4->time_to_live)
995 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
997 case RTE_FLOW_ITEM_TYPE_IPV6:
998 ipv6 = (struct ipv6_hdr *)&buf[temp_size];
1000 return rte_flow_error_set(error, EINVAL,
1001 RTE_FLOW_ERROR_TYPE_ACTION,
1002 (void *)items->type,
1003 "neither eth nor vlan"
1005 if (vlan && !vlan->eth_proto)
1006 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv6);
1007 else if (eth && !eth->ether_type)
1008 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv6);
1009 if (!ipv6->vtc_flow)
1011 RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
1012 if (!ipv6->hop_limits)
1013 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
1015 case RTE_FLOW_ITEM_TYPE_UDP:
1016 udp = (struct udp_hdr *)&buf[temp_size];
1018 return rte_flow_error_set(error, EINVAL,
1019 RTE_FLOW_ERROR_TYPE_ACTION,
1020 (void *)items->type,
1021 "ip header not found");
1022 if (ipv4 && !ipv4->next_proto_id)
1023 ipv4->next_proto_id = IPPROTO_UDP;
1024 else if (ipv6 && !ipv6->proto)
1025 ipv6->proto = IPPROTO_UDP;
1027 case RTE_FLOW_ITEM_TYPE_VXLAN:
1028 vxlan = (struct vxlan_hdr *)&buf[temp_size];
1030 return rte_flow_error_set(error, EINVAL,
1031 RTE_FLOW_ERROR_TYPE_ACTION,
1032 (void *)items->type,
1033 "udp header not found");
1035 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
1036 if (!vxlan->vx_flags)
1038 RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
1040 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1041 vxlan_gpe = (struct vxlan_gpe_hdr *)&buf[temp_size];
1043 return rte_flow_error_set(error, EINVAL,
1044 RTE_FLOW_ERROR_TYPE_ACTION,
1045 (void *)items->type,
1046 "udp header not found");
1047 if (!vxlan_gpe->proto)
1048 return rte_flow_error_set(error, EINVAL,
1049 RTE_FLOW_ERROR_TYPE_ACTION,
1050 (void *)items->type,
1051 "next protocol not found");
1054 RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
1055 if (!vxlan_gpe->vx_flags)
1056 vxlan_gpe->vx_flags =
1057 MLX5_ENCAP_VXLAN_GPE_FLAGS;
1059 case RTE_FLOW_ITEM_TYPE_GRE:
1060 case RTE_FLOW_ITEM_TYPE_NVGRE:
1061 gre = (struct gre_hdr *)&buf[temp_size];
1063 return rte_flow_error_set(error, EINVAL,
1064 RTE_FLOW_ERROR_TYPE_ACTION,
1065 (void *)items->type,
1066 "next protocol not found");
1068 return rte_flow_error_set(error, EINVAL,
1069 RTE_FLOW_ERROR_TYPE_ACTION,
1070 (void *)items->type,
1071 "ip header not found");
1072 if (ipv4 && !ipv4->next_proto_id)
1073 ipv4->next_proto_id = IPPROTO_GRE;
1074 else if (ipv6 && !ipv6->proto)
1075 ipv6->proto = IPPROTO_GRE;
1077 case RTE_FLOW_ITEM_TYPE_VOID:
1080 return rte_flow_error_set(error, EINVAL,
1081 RTE_FLOW_ERROR_TYPE_ACTION,
1082 (void *)items->type,
1083 "unsupported item type");
1093 * Convert L2 encap action to DV specification.
1096 * Pointer to rte_eth_dev structure.
1098 * Pointer to action structure.
1099 * @param[in, out] dev_flow
1100 * Pointer to the mlx5_flow.
1102 * Pointer to the error structure.
1105 * 0 on success, a negative errno value otherwise and rte_errno is set.
1108 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
1109 const struct rte_flow_action *action,
1110 struct mlx5_flow *dev_flow,
1111 struct rte_flow_error *error)
1113 const struct rte_flow_item *encap_data;
1114 const struct rte_flow_action_raw_encap *raw_encap_data;
1115 struct mlx5_flow_dv_encap_decap_resource res = {
1117 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
1118 .ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
1121 if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
1123 (const struct rte_flow_action_raw_encap *)action->conf;
1124 res.size = raw_encap_data->size;
1125 memcpy(res.buf, raw_encap_data->data, res.size);
1127 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
1129 ((const struct rte_flow_action_vxlan_encap *)
1130 action->conf)->definition;
1133 ((const struct rte_flow_action_nvgre_encap *)
1134 action->conf)->definition;
1135 if (flow_dv_convert_encap_data(encap_data, res.buf,
1139 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1140 return rte_flow_error_set(error, EINVAL,
1141 RTE_FLOW_ERROR_TYPE_ACTION,
1142 NULL, "can't create L2 encap action");
1147 * Convert L2 decap action to DV specification.
1150 * Pointer to rte_eth_dev structure.
1151 * @param[in, out] dev_flow
1152 * Pointer to the mlx5_flow.
1154 * Pointer to the error structure.
1157 * 0 on success, a negative errno value otherwise and rte_errno is set.
1160 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
1161 struct mlx5_flow *dev_flow,
1162 struct rte_flow_error *error)
1164 struct mlx5_flow_dv_encap_decap_resource res = {
1167 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
1168 .ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
1171 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1172 return rte_flow_error_set(error, EINVAL,
1173 RTE_FLOW_ERROR_TYPE_ACTION,
1174 NULL, "can't create L2 decap action");
1179 * Convert raw decap/encap (L3 tunnel) action to DV specification.
1182 * Pointer to rte_eth_dev structure.
1184 * Pointer to action structure.
1185 * @param[in, out] dev_flow
1186 * Pointer to the mlx5_flow.
1188 * Pointer to the flow attributes.
1190 * Pointer to the error structure.
1193 * 0 on success, a negative errno value otherwise and rte_errno is set.
1196 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
1197 const struct rte_flow_action *action,
1198 struct mlx5_flow *dev_flow,
1199 const struct rte_flow_attr *attr,
1200 struct rte_flow_error *error)
1202 const struct rte_flow_action_raw_encap *encap_data;
1203 struct mlx5_flow_dv_encap_decap_resource res;
1205 encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
1206 res.size = encap_data->size;
1207 memcpy(res.buf, encap_data->data, res.size);
1208 res.reformat_type = attr->egress ?
1209 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
1210 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
1211 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
1212 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
1213 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1214 return rte_flow_error_set(error, EINVAL,
1215 RTE_FLOW_ERROR_TYPE_ACTION,
1216 NULL, "can't create encap action");
1221 * Validate the modify-header actions.
1223 * @param[in] action_flags
1224 * Holds the actions detected until now.
1226 * Pointer to the modify action.
1228 * Pointer to error structure.
1231 * 0 on success, a negative errno value otherwise and rte_errno is set.
1234 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
1235 const struct rte_flow_action *action,
1236 struct rte_flow_error *error)
1238 if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
1239 return rte_flow_error_set(error, EINVAL,
1240 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1241 NULL, "action configuration not set");
1242 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1243 return rte_flow_error_set(error, EINVAL,
1244 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1245 "can't have encap action before"
1251 * Validate the modify-header MAC address actions.
1253 * @param[in] action_flags
1254 * Holds the actions detected until now.
1256 * Pointer to the modify action.
1257 * @param[in] item_flags
1258 * Holds the items detected.
1260 * Pointer to error structure.
1263 * 0 on success, a negative errno value otherwise and rte_errno is set.
1266 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
1267 const struct rte_flow_action *action,
1268 const uint64_t item_flags,
1269 struct rte_flow_error *error)
1273 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1275 if (!(item_flags & MLX5_FLOW_LAYER_L2))
1276 return rte_flow_error_set(error, EINVAL,
1277 RTE_FLOW_ERROR_TYPE_ACTION,
1279 "no L2 item in pattern");
1285 * Validate the modify-header IPv4 address actions.
1287 * @param[in] action_flags
1288 * Holds the actions detected until now.
1290 * Pointer to the modify action.
1291 * @param[in] item_flags
1292 * Holds the items detected.
1294 * Pointer to error structure.
1297 * 0 on success, a negative errno value otherwise and rte_errno is set.
1300 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
1301 const struct rte_flow_action *action,
1302 const uint64_t item_flags,
1303 struct rte_flow_error *error)
1307 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1309 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
1310 return rte_flow_error_set(error, EINVAL,
1311 RTE_FLOW_ERROR_TYPE_ACTION,
1313 "no ipv4 item in pattern");
1319 * Validate the modify-header IPv6 address actions.
1321 * @param[in] action_flags
1322 * Holds the actions detected until now.
1324 * Pointer to the modify action.
1325 * @param[in] item_flags
1326 * Holds the items detected.
1328 * Pointer to error structure.
1331 * 0 on success, a negative errno value otherwise and rte_errno is set.
1334 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
1335 const struct rte_flow_action *action,
1336 const uint64_t item_flags,
1337 struct rte_flow_error *error)
1341 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1343 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
1344 return rte_flow_error_set(error, EINVAL,
1345 RTE_FLOW_ERROR_TYPE_ACTION,
1347 "no ipv6 item in pattern");
1353 * Validate the modify-header TP actions.
1355 * @param[in] action_flags
1356 * Holds the actions detected until now.
1358 * Pointer to the modify action.
1359 * @param[in] item_flags
1360 * Holds the items detected.
1362 * Pointer to error structure.
1365 * 0 on success, a negative errno value otherwise and rte_errno is set.
1368 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
1369 const struct rte_flow_action *action,
1370 const uint64_t item_flags,
1371 struct rte_flow_error *error)
1375 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1377 if (!(item_flags & MLX5_FLOW_LAYER_L4))
1378 return rte_flow_error_set(error, EINVAL,
1379 RTE_FLOW_ERROR_TYPE_ACTION,
1380 NULL, "no transport layer "
1387 * Validate the modify-header TTL actions.
1389 * @param[in] action_flags
1390 * Holds the actions detected until now.
1392 * Pointer to the modify action.
1393 * @param[in] item_flags
1394 * Holds the items detected.
1396 * Pointer to error structure.
1399 * 0 on success, a negative errno value otherwise and rte_errno is set.
1402 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
1403 const struct rte_flow_action *action,
1404 const uint64_t item_flags,
1405 struct rte_flow_error *error)
1409 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1411 if (!(item_flags & MLX5_FLOW_LAYER_L3))
1412 return rte_flow_error_set(error, EINVAL,
1413 RTE_FLOW_ERROR_TYPE_ACTION,
1415 "no IP protocol in pattern");
1421 * Find existing modify-header resource or create and register a new one.
1423 * @param dev[in, out]
1424 * Pointer to rte_eth_dev structure.
1425 * @param[in, out] resource
1426 * Pointer to modify-header resource.
1427 * @parm[in, out] dev_flow
1428 * Pointer to the dev_flow.
1430 * pointer to error structure.
1433 * 0 on success otherwise -errno and errno is set.
1436 flow_dv_modify_hdr_resource_register
1437 (struct rte_eth_dev *dev,
1438 struct mlx5_flow_dv_modify_hdr_resource *resource,
1439 struct mlx5_flow *dev_flow,
1440 struct rte_flow_error *error)
1442 struct mlx5_priv *priv = dev->data->dev_private;
1443 struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
1445 /* Lookup a matching resource from cache. */
1446 LIST_FOREACH(cache_resource, &priv->modify_cmds, next) {
1447 if (resource->ft_type == cache_resource->ft_type &&
1448 resource->actions_num == cache_resource->actions_num &&
1449 !memcmp((const void *)resource->actions,
1450 (const void *)cache_resource->actions,
1451 (resource->actions_num *
1452 sizeof(resource->actions[0])))) {
1453 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
1454 (void *)cache_resource,
1455 rte_atomic32_read(&cache_resource->refcnt));
1456 rte_atomic32_inc(&cache_resource->refcnt);
1457 dev_flow->dv.modify_hdr = cache_resource;
1461 /* Register new modify-header resource. */
1462 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1463 if (!cache_resource)
1464 return rte_flow_error_set(error, ENOMEM,
1465 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1466 "cannot allocate resource memory");
1467 *cache_resource = *resource;
1468 cache_resource->verbs_action =
1469 mlx5_glue->dv_create_flow_action_modify_header
1471 cache_resource->actions_num *
1472 sizeof(cache_resource->actions[0]),
1473 (uint64_t *)cache_resource->actions,
1474 cache_resource->ft_type);
1475 if (!cache_resource->verbs_action) {
1476 rte_free(cache_resource);
1477 return rte_flow_error_set(error, ENOMEM,
1478 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1479 NULL, "cannot create action");
1481 rte_atomic32_init(&cache_resource->refcnt);
1482 rte_atomic32_inc(&cache_resource->refcnt);
1483 LIST_INSERT_HEAD(&priv->modify_cmds, cache_resource, next);
1484 dev_flow->dv.modify_hdr = cache_resource;
1485 DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
1486 (void *)cache_resource,
1487 rte_atomic32_read(&cache_resource->refcnt));
1492 * Get or create a flow counter.
1495 * Pointer to the Ethernet device structure.
1497 * Indicate if this counter is shared with other flows.
1499 * Counter identifier.
1502 * pointer to flow counter on success, NULL otherwise and rte_errno is set.
1504 static struct mlx5_flow_counter *
1505 flow_dv_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
1507 struct mlx5_priv *priv = dev->data->dev_private;
1508 struct mlx5_flow_counter *cnt = NULL;
1509 struct mlx5_devx_counter_set *dcs = NULL;
1512 if (!priv->config.devx) {
1517 LIST_FOREACH(cnt, &priv->flow_counters, next) {
1518 if (cnt->shared && cnt->id == id) {
1524 cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
1525 dcs = rte_calloc(__func__, 1, sizeof(*dcs), 0);
1530 ret = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, dcs);
1533 struct mlx5_flow_counter tmpl = {
1539 tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
1545 LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
1555 * Release a flow counter.
1557 * @param[in] counter
1558 * Pointer to the counter handler.
1561 flow_dv_counter_release(struct mlx5_flow_counter *counter)
1567 if (--counter->ref_cnt == 0) {
1568 ret = mlx5_devx_cmd_flow_counter_free(counter->dcs->obj);
1570 DRV_LOG(ERR, "Failed to free devx counters, %d", ret);
1571 LIST_REMOVE(counter, next);
1572 rte_free(counter->dcs);
1578 * Verify the @p attributes will be correctly understood by the NIC and store
1579 * them in the @p flow if everything is correct.
1582 * Pointer to dev struct.
1583 * @param[in] attributes
1584 * Pointer to flow attributes
1586 * Pointer to error structure.
1589 * 0 on success, a negative errno value otherwise and rte_errno is set.
1592 flow_dv_validate_attributes(struct rte_eth_dev *dev,
1593 const struct rte_flow_attr *attributes,
1594 struct rte_flow_error *error)
1596 struct mlx5_priv *priv = dev->data->dev_private;
1597 uint32_t priority_max = priv->config.flow_prio - 1;
1599 if (attributes->group)
1600 return rte_flow_error_set(error, ENOTSUP,
1601 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1603 "groups is not supported");
1604 if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
1605 attributes->priority >= priority_max)
1606 return rte_flow_error_set(error, ENOTSUP,
1607 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1609 "priority out of range");
1610 if (attributes->transfer)
1611 return rte_flow_error_set(error, ENOTSUP,
1612 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1614 "transfer is not supported");
1615 if (!(attributes->egress ^ attributes->ingress))
1616 return rte_flow_error_set(error, ENOTSUP,
1617 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1618 "must specify exactly one of "
1619 "ingress or egress");
1624 * Internal validation function. For validating both actions and items.
1627 * Pointer to the rte_eth_dev structure.
1629 * Pointer to the flow attributes.
1631 * Pointer to the list of items.
1632 * @param[in] actions
1633 * Pointer to the list of actions.
1635 * Pointer to the error structure.
1638 * 0 on success, a negative errno value otherwise and rte_errno is set.
1641 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1642 const struct rte_flow_item items[],
1643 const struct rte_flow_action actions[],
1644 struct rte_flow_error *error)
1647 uint64_t action_flags = 0;
1648 uint64_t item_flags = 0;
1649 uint64_t last_item = 0;
1651 uint8_t next_protocol = 0xff;
1656 ret = flow_dv_validate_attributes(dev, attr, error);
1659 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1660 tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1661 switch (items->type) {
1662 case RTE_FLOW_ITEM_TYPE_VOID:
1664 case RTE_FLOW_ITEM_TYPE_ETH:
1665 ret = mlx5_flow_validate_item_eth(items, item_flags,
1669 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1670 MLX5_FLOW_LAYER_OUTER_L2;
1672 case RTE_FLOW_ITEM_TYPE_VLAN:
1673 ret = mlx5_flow_validate_item_vlan(items, item_flags,
1677 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1678 MLX5_FLOW_LAYER_OUTER_VLAN;
1680 case RTE_FLOW_ITEM_TYPE_IPV4:
1681 ret = mlx5_flow_validate_item_ipv4(items, item_flags,
1685 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1686 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1687 if (items->mask != NULL &&
1688 ((const struct rte_flow_item_ipv4 *)
1689 items->mask)->hdr.next_proto_id) {
1691 ((const struct rte_flow_item_ipv4 *)
1692 (items->spec))->hdr.next_proto_id;
1694 ((const struct rte_flow_item_ipv4 *)
1695 (items->mask))->hdr.next_proto_id;
1697 /* Reset for inner layer. */
1698 next_protocol = 0xff;
1701 case RTE_FLOW_ITEM_TYPE_IPV6:
1702 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
1706 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1707 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1708 if (items->mask != NULL &&
1709 ((const struct rte_flow_item_ipv6 *)
1710 items->mask)->hdr.proto) {
1712 ((const struct rte_flow_item_ipv6 *)
1713 items->spec)->hdr.proto;
1715 ((const struct rte_flow_item_ipv6 *)
1716 items->mask)->hdr.proto;
1718 /* Reset for inner layer. */
1719 next_protocol = 0xff;
1722 case RTE_FLOW_ITEM_TYPE_TCP:
1723 ret = mlx5_flow_validate_item_tcp
1726 &rte_flow_item_tcp_mask,
1730 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1731 MLX5_FLOW_LAYER_OUTER_L4_TCP;
1733 case RTE_FLOW_ITEM_TYPE_UDP:
1734 ret = mlx5_flow_validate_item_udp(items, item_flags,
1739 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1740 MLX5_FLOW_LAYER_OUTER_L4_UDP;
1742 case RTE_FLOW_ITEM_TYPE_GRE:
1743 case RTE_FLOW_ITEM_TYPE_NVGRE:
1744 ret = mlx5_flow_validate_item_gre(items, item_flags,
1745 next_protocol, error);
1748 last_item = MLX5_FLOW_LAYER_GRE;
1750 case RTE_FLOW_ITEM_TYPE_VXLAN:
1751 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
1755 last_item = MLX5_FLOW_LAYER_VXLAN;
1757 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1758 ret = mlx5_flow_validate_item_vxlan_gpe(items,
1763 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
1765 case RTE_FLOW_ITEM_TYPE_MPLS:
1766 ret = mlx5_flow_validate_item_mpls(dev, items,
1771 last_item = MLX5_FLOW_LAYER_MPLS;
1773 case RTE_FLOW_ITEM_TYPE_META:
1774 ret = flow_dv_validate_item_meta(dev, items, attr,
1778 last_item = MLX5_FLOW_ITEM_METADATA;
1781 return rte_flow_error_set(error, ENOTSUP,
1782 RTE_FLOW_ERROR_TYPE_ITEM,
1783 NULL, "item not supported");
1785 item_flags |= last_item;
1787 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1788 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
1789 return rte_flow_error_set(error, ENOTSUP,
1790 RTE_FLOW_ERROR_TYPE_ACTION,
1791 actions, "too many actions");
1792 switch (actions->type) {
1793 case RTE_FLOW_ACTION_TYPE_VOID:
1795 case RTE_FLOW_ACTION_TYPE_FLAG:
1796 ret = mlx5_flow_validate_action_flag(action_flags,
1800 action_flags |= MLX5_FLOW_ACTION_FLAG;
1803 case RTE_FLOW_ACTION_TYPE_MARK:
1804 ret = mlx5_flow_validate_action_mark(actions,
1809 action_flags |= MLX5_FLOW_ACTION_MARK;
1812 case RTE_FLOW_ACTION_TYPE_DROP:
1813 ret = mlx5_flow_validate_action_drop(action_flags,
1817 action_flags |= MLX5_FLOW_ACTION_DROP;
1820 case RTE_FLOW_ACTION_TYPE_QUEUE:
1821 ret = mlx5_flow_validate_action_queue(actions,
1826 action_flags |= MLX5_FLOW_ACTION_QUEUE;
1829 case RTE_FLOW_ACTION_TYPE_RSS:
1830 ret = mlx5_flow_validate_action_rss(actions,
1835 action_flags |= MLX5_FLOW_ACTION_RSS;
1838 case RTE_FLOW_ACTION_TYPE_COUNT:
1839 ret = flow_dv_validate_action_count(dev, error);
1842 action_flags |= MLX5_FLOW_ACTION_COUNT;
1845 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
1846 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
1847 ret = flow_dv_validate_action_l2_encap(action_flags,
1852 action_flags |= actions->type ==
1853 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
1854 MLX5_FLOW_ACTION_VXLAN_ENCAP :
1855 MLX5_FLOW_ACTION_NVGRE_ENCAP;
1858 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
1859 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
1860 ret = flow_dv_validate_action_l2_decap(action_flags,
1864 action_flags |= actions->type ==
1865 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
1866 MLX5_FLOW_ACTION_VXLAN_DECAP :
1867 MLX5_FLOW_ACTION_NVGRE_DECAP;
1870 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
1871 ret = flow_dv_validate_action_raw_encap(action_flags,
1876 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
1879 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
1880 ret = flow_dv_validate_action_raw_decap(action_flags,
1885 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
1888 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
1889 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
1890 ret = flow_dv_validate_action_modify_mac(action_flags,
1896 /* Count all modify-header actions as one action. */
1897 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
1899 action_flags |= actions->type ==
1900 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
1901 MLX5_FLOW_ACTION_SET_MAC_SRC :
1902 MLX5_FLOW_ACTION_SET_MAC_DST;
1905 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
1906 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
1907 ret = flow_dv_validate_action_modify_ipv4(action_flags,
1913 /* Count all modify-header actions as one action. */
1914 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
1916 action_flags |= actions->type ==
1917 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
1918 MLX5_FLOW_ACTION_SET_IPV4_SRC :
1919 MLX5_FLOW_ACTION_SET_IPV4_DST;
1921 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
1922 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
1923 ret = flow_dv_validate_action_modify_ipv6(action_flags,
1929 /* Count all modify-header actions as one action. */
1930 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
1932 action_flags |= actions->type ==
1933 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
1934 MLX5_FLOW_ACTION_SET_IPV6_SRC :
1935 MLX5_FLOW_ACTION_SET_IPV6_DST;
1937 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
1938 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
1939 ret = flow_dv_validate_action_modify_tp(action_flags,
1945 /* Count all modify-header actions as one action. */
1946 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
1948 action_flags |= actions->type ==
1949 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
1950 MLX5_FLOW_ACTION_SET_TP_SRC :
1951 MLX5_FLOW_ACTION_SET_TP_DST;
1953 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
1954 case RTE_FLOW_ACTION_TYPE_SET_TTL:
1955 ret = flow_dv_validate_action_modify_ttl(action_flags,
1961 /* Count all modify-header actions as one action. */
1962 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
1964 action_flags |= actions->type ==
1965 RTE_FLOW_ACTION_TYPE_SET_TTL ?
1966 MLX5_FLOW_ACTION_SET_TTL :
1967 MLX5_FLOW_ACTION_DEC_TTL;
1970 return rte_flow_error_set(error, ENOTSUP,
1971 RTE_FLOW_ERROR_TYPE_ACTION,
1973 "action not supported");
1976 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
1977 return rte_flow_error_set(error, EINVAL,
1978 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1979 "no fate action is found");
1984 * Internal preparation function. Allocates the DV flow size,
1985 * this size is constant.
1988 * Pointer to the flow attributes.
1990 * Pointer to the list of items.
1991 * @param[in] actions
1992 * Pointer to the list of actions.
1994 * Pointer to the error structure.
1997 * Pointer to mlx5_flow object on success,
1998 * otherwise NULL and rte_errno is set.
2000 static struct mlx5_flow *
2001 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
2002 const struct rte_flow_item items[] __rte_unused,
2003 const struct rte_flow_action actions[] __rte_unused,
2004 struct rte_flow_error *error)
2006 uint32_t size = sizeof(struct mlx5_flow);
2007 struct mlx5_flow *flow;
2009 flow = rte_calloc(__func__, 1, size, 0);
2011 rte_flow_error_set(error, ENOMEM,
2012 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2013 "not enough memory to create flow");
2016 flow->dv.value.size = MLX5_ST_SZ_DB(fte_match_param);
2022 * Sanity check for match mask and value. Similar to check_valid_spec() in
2023 * kernel driver. If unmasked bit is present in value, it returns failure.
2026 * pointer to match mask buffer.
2027 * @param match_value
2028 * pointer to match value buffer.
2031 * 0 if valid, -EINVAL otherwise.
2034 flow_dv_check_valid_spec(void *match_mask, void *match_value)
2036 uint8_t *m = match_mask;
2037 uint8_t *v = match_value;
2040 for (i = 0; i < MLX5_ST_SZ_DB(fte_match_param); ++i) {
2043 "match_value differs from match_criteria"
2044 " %p[%u] != %p[%u]",
2045 match_value, i, match_mask, i);
2054 * Add Ethernet item to matcher and to the value.
2056 * @param[in, out] matcher
2058 * @param[in, out] key
2059 * Flow matcher value.
2061 * Flow pattern to translate.
2063 * Item is inner pattern.
2066 flow_dv_translate_item_eth(void *matcher, void *key,
2067 const struct rte_flow_item *item, int inner)
2069 const struct rte_flow_item_eth *eth_m = item->mask;
2070 const struct rte_flow_item_eth *eth_v = item->spec;
2071 const struct rte_flow_item_eth nic_mask = {
2072 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2073 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2074 .type = RTE_BE16(0xffff),
2086 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2088 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2090 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2092 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2094 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
2095 ð_m->dst, sizeof(eth_m->dst));
2096 /* The value must be in the range of the mask. */
2097 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
2098 for (i = 0; i < sizeof(eth_m->dst); ++i)
2099 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
2100 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
2101 ð_m->src, sizeof(eth_m->src));
2102 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
2103 /* The value must be in the range of the mask. */
2104 for (i = 0; i < sizeof(eth_m->dst); ++i)
2105 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
2106 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
2107 rte_be_to_cpu_16(eth_m->type));
2108 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
2109 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
2113 * Add VLAN item to matcher and to the value.
2115 * @param[in, out] matcher
2117 * @param[in, out] key
2118 * Flow matcher value.
2120 * Flow pattern to translate.
2122 * Item is inner pattern.
2125 flow_dv_translate_item_vlan(void *matcher, void *key,
2126 const struct rte_flow_item *item,
2129 const struct rte_flow_item_vlan *vlan_m = item->mask;
2130 const struct rte_flow_item_vlan *vlan_v = item->spec;
2131 const struct rte_flow_item_vlan nic_mask = {
2132 .tci = RTE_BE16(0x0fff),
2133 .inner_type = RTE_BE16(0xffff),
2145 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2147 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2149 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2151 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2153 tci_m = rte_be_to_cpu_16(vlan_m->tci);
2154 tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
2155 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
2156 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
2157 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
2158 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
2159 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
2160 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
2161 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
2162 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
2166 * Add IPV4 item to matcher and to the value.
2168 * @param[in, out] matcher
2170 * @param[in, out] key
2171 * Flow matcher value.
2173 * Flow pattern to translate.
2175 * Item is inner pattern.
2178 flow_dv_translate_item_ipv4(void *matcher, void *key,
2179 const struct rte_flow_item *item,
2182 const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
2183 const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
2184 const struct rte_flow_item_ipv4 nic_mask = {
2186 .src_addr = RTE_BE32(0xffffffff),
2187 .dst_addr = RTE_BE32(0xffffffff),
2188 .type_of_service = 0xff,
2189 .next_proto_id = 0xff,
2199 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2201 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2203 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2205 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2207 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2208 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
2213 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2214 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2215 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2216 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2217 *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
2218 *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
2219 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2220 src_ipv4_src_ipv6.ipv4_layout.ipv4);
2221 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2222 src_ipv4_src_ipv6.ipv4_layout.ipv4);
2223 *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
2224 *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
2225 tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
2226 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
2227 ipv4_m->hdr.type_of_service);
2228 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
2229 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
2230 ipv4_m->hdr.type_of_service >> 2);
2231 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
2232 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2233 ipv4_m->hdr.next_proto_id);
2234 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2235 ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
2239 * Add IPV6 item to matcher and to the value.
2241 * @param[in, out] matcher
2243 * @param[in, out] key
2244 * Flow matcher value.
2246 * Flow pattern to translate.
2248 * Item is inner pattern.
2251 flow_dv_translate_item_ipv6(void *matcher, void *key,
2252 const struct rte_flow_item *item,
2255 const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
2256 const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
2257 const struct rte_flow_item_ipv6 nic_mask = {
2260 "\xff\xff\xff\xff\xff\xff\xff\xff"
2261 "\xff\xff\xff\xff\xff\xff\xff\xff",
2263 "\xff\xff\xff\xff\xff\xff\xff\xff"
2264 "\xff\xff\xff\xff\xff\xff\xff\xff",
2265 .vtc_flow = RTE_BE32(0xffffffff),
2272 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2273 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2282 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2284 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2286 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2288 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2290 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2291 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
2296 size = sizeof(ipv6_m->hdr.dst_addr);
2297 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2298 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2299 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2300 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2301 memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
2302 for (i = 0; i < size; ++i)
2303 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
2304 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2305 src_ipv4_src_ipv6.ipv6_layout.ipv6);
2306 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2307 src_ipv4_src_ipv6.ipv6_layout.ipv6);
2308 memcpy(l24_m, ipv6_m->hdr.src_addr, size);
2309 for (i = 0; i < size; ++i)
2310 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
2312 vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
2313 vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
2314 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
2315 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
2316 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
2317 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
2320 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
2322 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
2325 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
2327 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
2331 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2333 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2334 ipv6_v->hdr.proto & ipv6_m->hdr.proto);
2338 * Add TCP item to matcher and to the value.
2340 * @param[in, out] matcher
2342 * @param[in, out] key
2343 * Flow matcher value.
2345 * Flow pattern to translate.
2347 * Item is inner pattern.
2350 flow_dv_translate_item_tcp(void *matcher, void *key,
2351 const struct rte_flow_item *item,
2354 const struct rte_flow_item_tcp *tcp_m = item->mask;
2355 const struct rte_flow_item_tcp *tcp_v = item->spec;
2360 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2362 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2364 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2366 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2368 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2369 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
2373 tcp_m = &rte_flow_item_tcp_mask;
2374 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
2375 rte_be_to_cpu_16(tcp_m->hdr.src_port));
2376 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
2377 rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
2378 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
2379 rte_be_to_cpu_16(tcp_m->hdr.dst_port));
2380 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
2381 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
2385 * Add UDP item to matcher and to the value.
2387 * @param[in, out] matcher
2389 * @param[in, out] key
2390 * Flow matcher value.
2392 * Flow pattern to translate.
2394 * Item is inner pattern.
2397 flow_dv_translate_item_udp(void *matcher, void *key,
2398 const struct rte_flow_item *item,
2401 const struct rte_flow_item_udp *udp_m = item->mask;
2402 const struct rte_flow_item_udp *udp_v = item->spec;
2407 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2409 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2411 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2413 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2415 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2416 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
2420 udp_m = &rte_flow_item_udp_mask;
2421 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
2422 rte_be_to_cpu_16(udp_m->hdr.src_port));
2423 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
2424 rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
2425 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
2426 rte_be_to_cpu_16(udp_m->hdr.dst_port));
2427 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2428 rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
2432 * Add GRE item to matcher and to the value.
2434 * @param[in, out] matcher
2436 * @param[in, out] key
2437 * Flow matcher value.
2439 * Flow pattern to translate.
2441 * Item is inner pattern.
2444 flow_dv_translate_item_gre(void *matcher, void *key,
2445 const struct rte_flow_item *item,
2448 const struct rte_flow_item_gre *gre_m = item->mask;
2449 const struct rte_flow_item_gre *gre_v = item->spec;
2452 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2453 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2456 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2458 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2460 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2462 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2464 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2465 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
2469 gre_m = &rte_flow_item_gre_mask;
2470 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
2471 rte_be_to_cpu_16(gre_m->protocol));
2472 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
2473 rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
2477 * Add NVGRE item to matcher and to the value.
2479 * @param[in, out] matcher
2481 * @param[in, out] key
2482 * Flow matcher value.
2484 * Flow pattern to translate.
2486 * Item is inner pattern.
2489 flow_dv_translate_item_nvgre(void *matcher, void *key,
2490 const struct rte_flow_item *item,
2493 const struct rte_flow_item_nvgre *nvgre_m = item->mask;
2494 const struct rte_flow_item_nvgre *nvgre_v = item->spec;
2495 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2496 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2497 const char *tni_flow_id_m = (const char *)nvgre_m->tni;
2498 const char *tni_flow_id_v = (const char *)nvgre_v->tni;
2504 flow_dv_translate_item_gre(matcher, key, item, inner);
2508 nvgre_m = &rte_flow_item_nvgre_mask;
2509 size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
2510 gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
2511 gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
2512 memcpy(gre_key_m, tni_flow_id_m, size);
2513 for (i = 0; i < size; ++i)
2514 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
2518 * Add VXLAN item to matcher and to the value.
2520 * @param[in, out] matcher
2522 * @param[in, out] key
2523 * Flow matcher value.
2525 * Flow pattern to translate.
2527 * Item is inner pattern.
2530 flow_dv_translate_item_vxlan(void *matcher, void *key,
2531 const struct rte_flow_item *item,
2534 const struct rte_flow_item_vxlan *vxlan_m = item->mask;
2535 const struct rte_flow_item_vxlan *vxlan_v = item->spec;
2538 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2539 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2547 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2549 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2551 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2553 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2555 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
2556 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
2557 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
2558 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
2559 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
2564 vxlan_m = &rte_flow_item_vxlan_mask;
2565 size = sizeof(vxlan_m->vni);
2566 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
2567 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
2568 memcpy(vni_m, vxlan_m->vni, size);
2569 for (i = 0; i < size; ++i)
2570 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
2574 * Add MPLS item to matcher and to the value.
2576 * @param[in, out] matcher
2578 * @param[in, out] key
2579 * Flow matcher value.
2581 * Flow pattern to translate.
2582 * @param[in] prev_layer
2583 * The protocol layer indicated in previous item.
2585 * Item is inner pattern.
2588 flow_dv_translate_item_mpls(void *matcher, void *key,
2589 const struct rte_flow_item *item,
2590 uint64_t prev_layer,
2593 const uint32_t *in_mpls_m = item->mask;
2594 const uint32_t *in_mpls_v = item->spec;
2595 uint32_t *out_mpls_m = 0;
2596 uint32_t *out_mpls_v = 0;
2597 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2598 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2599 void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
2601 void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
2602 void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
2603 void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2605 switch (prev_layer) {
2606 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
2607 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
2608 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2609 MLX5_UDP_PORT_MPLS);
2611 case MLX5_FLOW_LAYER_GRE:
2612 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
2613 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
2617 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2618 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2625 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
2626 switch (prev_layer) {
2627 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
2629 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
2630 outer_first_mpls_over_udp);
2632 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
2633 outer_first_mpls_over_udp);
2635 case MLX5_FLOW_LAYER_GRE:
2637 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
2638 outer_first_mpls_over_gre);
2640 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
2641 outer_first_mpls_over_gre);
2644 /* Inner MPLS not over GRE is not supported. */
2647 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
2651 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
2657 if (out_mpls_m && out_mpls_v) {
2658 *out_mpls_m = *in_mpls_m;
2659 *out_mpls_v = *in_mpls_v & *in_mpls_m;
2664 * Add META item to matcher
2666 * @param[in, out] matcher
2668 * @param[in, out] key
2669 * Flow matcher value.
2671 * Flow pattern to translate.
2673 * Item is inner pattern.
2676 flow_dv_translate_item_meta(void *matcher, void *key,
2677 const struct rte_flow_item *item)
2679 const struct rte_flow_item_meta *meta_m;
2680 const struct rte_flow_item_meta *meta_v;
2682 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
2684 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
2686 meta_m = (const void *)item->mask;
2688 meta_m = &rte_flow_item_meta_mask;
2689 meta_v = (const void *)item->spec;
2691 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
2692 rte_be_to_cpu_32(meta_m->data));
2693 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
2694 rte_be_to_cpu_32(meta_v->data & meta_m->data));
2698 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
2700 #define HEADER_IS_ZERO(match_criteria, headers) \
2701 !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
2702 matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
2705 * Calculate flow matcher enable bitmap.
2707 * @param match_criteria
2708 * Pointer to flow matcher criteria.
2711 * Bitmap of enabled fields.
2714 flow_dv_matcher_enable(uint32_t *match_criteria)
2716 uint8_t match_criteria_enable;
2718 match_criteria_enable =
2719 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
2720 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
2721 match_criteria_enable |=
2722 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
2723 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
2724 match_criteria_enable |=
2725 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
2726 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
2727 match_criteria_enable |=
2728 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
2729 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
2731 return match_criteria_enable;
2735 * Register the flow matcher.
2737 * @param dev[in, out]
2738 * Pointer to rte_eth_dev structure.
2739 * @param[in, out] matcher
2740 * Pointer to flow matcher.
2741 * @parm[in, out] dev_flow
2742 * Pointer to the dev_flow.
2744 * pointer to error structure.
2747 * 0 on success otherwise -errno and errno is set.
2750 flow_dv_matcher_register(struct rte_eth_dev *dev,
2751 struct mlx5_flow_dv_matcher *matcher,
2752 struct mlx5_flow *dev_flow,
2753 struct rte_flow_error *error)
2755 struct mlx5_priv *priv = dev->data->dev_private;
2756 struct mlx5_flow_dv_matcher *cache_matcher;
2757 struct mlx5dv_flow_matcher_attr dv_attr = {
2758 .type = IBV_FLOW_ATTR_NORMAL,
2759 .match_mask = (void *)&matcher->mask,
2762 /* Lookup from cache. */
2763 LIST_FOREACH(cache_matcher, &priv->matchers, next) {
2764 if (matcher->crc == cache_matcher->crc &&
2765 matcher->priority == cache_matcher->priority &&
2766 matcher->egress == cache_matcher->egress &&
2767 !memcmp((const void *)matcher->mask.buf,
2768 (const void *)cache_matcher->mask.buf,
2769 cache_matcher->mask.size)) {
2771 "priority %hd use %s matcher %p: refcnt %d++",
2772 cache_matcher->priority,
2773 cache_matcher->egress ? "tx" : "rx",
2774 (void *)cache_matcher,
2775 rte_atomic32_read(&cache_matcher->refcnt));
2776 rte_atomic32_inc(&cache_matcher->refcnt);
2777 dev_flow->dv.matcher = cache_matcher;
2781 /* Register new matcher. */
2782 cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
2784 return rte_flow_error_set(error, ENOMEM,
2785 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2786 "cannot allocate matcher memory");
2787 *cache_matcher = *matcher;
2788 dv_attr.match_criteria_enable =
2789 flow_dv_matcher_enable(cache_matcher->mask.buf);
2790 dv_attr.priority = matcher->priority;
2791 if (matcher->egress)
2792 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
2793 cache_matcher->matcher_object =
2794 mlx5_glue->dv_create_flow_matcher(priv->sh->ctx, &dv_attr);
2795 if (!cache_matcher->matcher_object) {
2796 rte_free(cache_matcher);
2797 return rte_flow_error_set(error, ENOMEM,
2798 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2799 NULL, "cannot create matcher");
2801 rte_atomic32_inc(&cache_matcher->refcnt);
2802 LIST_INSERT_HEAD(&priv->matchers, cache_matcher, next);
2803 dev_flow->dv.matcher = cache_matcher;
2804 DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
2805 cache_matcher->priority,
2806 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
2807 rte_atomic32_read(&cache_matcher->refcnt));
2812 * Add source vport match to the specified matcher.
2814 * @param[in, out] matcher
2816 * @param[in, out] key
2817 * Flow matcher value.
2819 * Source vport value to match
2824 flow_dv_translate_source_vport(void *matcher, void *key,
2825 int16_t port, uint16_t mask)
2827 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2828 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2830 MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
2831 MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
2835 * Find existing tag resource or create and register a new one.
2837 * @param dev[in, out]
2838 * Pointer to rte_eth_dev structure.
2839 * @param[in, out] resource
2840 * Pointer to tag resource.
2841 * @parm[in, out] dev_flow
2842 * Pointer to the dev_flow.
2844 * pointer to error structure.
2847 * 0 on success otherwise -errno and errno is set.
2850 flow_dv_tag_resource_register
2851 (struct rte_eth_dev *dev,
2852 struct mlx5_flow_dv_tag_resource *resource,
2853 struct mlx5_flow *dev_flow,
2854 struct rte_flow_error *error)
2856 struct mlx5_priv *priv = dev->data->dev_private;
2857 struct mlx5_flow_dv_tag_resource *cache_resource;
2859 /* Lookup a matching resource from cache. */
2860 LIST_FOREACH(cache_resource, &priv->tags, next) {
2861 if (resource->tag == cache_resource->tag) {
2862 DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
2863 (void *)cache_resource,
2864 rte_atomic32_read(&cache_resource->refcnt));
2865 rte_atomic32_inc(&cache_resource->refcnt);
2866 dev_flow->flow->tag_resource = cache_resource;
2870 /* Register new resource. */
2871 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
2872 if (!cache_resource)
2873 return rte_flow_error_set(error, ENOMEM,
2874 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2875 "cannot allocate resource memory");
2876 *cache_resource = *resource;
2877 cache_resource->action = mlx5_glue->dv_create_flow_action_tag
2879 if (!cache_resource->action) {
2880 rte_free(cache_resource);
2881 return rte_flow_error_set(error, ENOMEM,
2882 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2883 NULL, "cannot create action");
2885 rte_atomic32_init(&cache_resource->refcnt);
2886 rte_atomic32_inc(&cache_resource->refcnt);
2887 LIST_INSERT_HEAD(&priv->tags, cache_resource, next);
2888 dev_flow->flow->tag_resource = cache_resource;
2889 DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
2890 (void *)cache_resource,
2891 rte_atomic32_read(&cache_resource->refcnt));
2899 * Pointer to Ethernet device.
2901 * Pointer to mlx5_flow.
2904 * 1 while a reference on it exists, 0 when freed.
2907 flow_dv_tag_release(struct rte_eth_dev *dev,
2908 struct mlx5_flow_dv_tag_resource *tag)
2911 DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
2912 dev->data->port_id, (void *)tag,
2913 rte_atomic32_read(&tag->refcnt));
2914 if (rte_atomic32_dec_and_test(&tag->refcnt)) {
2915 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
2916 LIST_REMOVE(tag, next);
2917 DRV_LOG(DEBUG, "port %u tag %p: removed",
2918 dev->data->port_id, (void *)tag);
2926 * Fill the flow with DV spec.
2929 * Pointer to rte_eth_dev structure.
2930 * @param[in, out] dev_flow
2931 * Pointer to the sub flow.
2933 * Pointer to the flow attributes.
2935 * Pointer to the list of items.
2936 * @param[in] actions
2937 * Pointer to the list of actions.
2939 * Pointer to the error structure.
2942 * 0 on success, a negative errno value otherwise and rte_errno is set.
2945 flow_dv_translate(struct rte_eth_dev *dev,
2946 struct mlx5_flow *dev_flow,
2947 const struct rte_flow_attr *attr,
2948 const struct rte_flow_item items[],
2949 const struct rte_flow_action actions[],
2950 struct rte_flow_error *error)
2952 struct mlx5_priv *priv = dev->data->dev_private;
2953 struct rte_flow *flow = dev_flow->flow;
2954 uint64_t item_flags = 0;
2955 uint64_t last_item = 0;
2956 uint64_t action_flags = 0;
2957 uint64_t priority = attr->priority;
2958 struct mlx5_flow_dv_matcher matcher = {
2960 .size = sizeof(matcher.mask.buf),
2964 bool actions_end = false;
2965 struct mlx5_flow_dv_modify_hdr_resource res = {
2966 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
2967 MLX5DV_FLOW_TABLE_TYPE_NIC_RX
2969 union flow_dv_attr flow_attr = { .attr = 0 };
2970 struct mlx5_flow_dv_tag_resource tag_resource;
2972 if (priority == MLX5_FLOW_PRIO_RSVD)
2973 priority = priv->config.flow_prio - 1;
2974 for (; !actions_end ; actions++) {
2975 const struct rte_flow_action_queue *queue;
2976 const struct rte_flow_action_rss *rss;
2977 const struct rte_flow_action *action = actions;
2978 const struct rte_flow_action_count *count = action->conf;
2979 const uint8_t *rss_key;
2981 switch (actions->type) {
2982 case RTE_FLOW_ACTION_TYPE_VOID:
2984 case RTE_FLOW_ACTION_TYPE_FLAG:
2986 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
2987 if (!flow->tag_resource)
2988 if (flow_dv_tag_resource_register
2989 (dev, &tag_resource, dev_flow, error))
2991 dev_flow->dv.actions[actions_n++] =
2992 flow->tag_resource->action;
2993 action_flags |= MLX5_FLOW_ACTION_FLAG;
2995 case RTE_FLOW_ACTION_TYPE_MARK:
2996 tag_resource.tag = mlx5_flow_mark_set
2997 (((const struct rte_flow_action_mark *)
2998 (actions->conf))->id);
2999 if (!flow->tag_resource)
3000 if (flow_dv_tag_resource_register
3001 (dev, &tag_resource, dev_flow, error))
3003 dev_flow->dv.actions[actions_n++] =
3004 flow->tag_resource->action;
3005 action_flags |= MLX5_FLOW_ACTION_MARK;
3007 case RTE_FLOW_ACTION_TYPE_DROP:
3008 action_flags |= MLX5_FLOW_ACTION_DROP;
3010 case RTE_FLOW_ACTION_TYPE_QUEUE:
3011 queue = actions->conf;
3012 flow->rss.queue_num = 1;
3013 (*flow->queue)[0] = queue->index;
3014 action_flags |= MLX5_FLOW_ACTION_QUEUE;
3016 case RTE_FLOW_ACTION_TYPE_RSS:
3017 rss = actions->conf;
3019 memcpy((*flow->queue), rss->queue,
3020 rss->queue_num * sizeof(uint16_t));
3021 flow->rss.queue_num = rss->queue_num;
3022 /* NULL RSS key indicates default RSS key. */
3023 rss_key = !rss->key ? rss_hash_default_key : rss->key;
3024 memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
3025 /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
3026 flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
3027 flow->rss.level = rss->level;
3028 action_flags |= MLX5_FLOW_ACTION_RSS;
3030 case RTE_FLOW_ACTION_TYPE_COUNT:
3031 if (!priv->config.devx) {
3032 rte_errno = ENOTSUP;
3035 flow->counter = flow_dv_counter_new(dev, count->shared,
3037 if (flow->counter == NULL)
3039 dev_flow->dv.actions[actions_n++] =
3040 flow->counter->action;
3041 action_flags |= MLX5_FLOW_ACTION_COUNT;
3044 if (rte_errno == ENOTSUP)
3045 return rte_flow_error_set
3047 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3049 "count action not supported");
3051 return rte_flow_error_set
3053 RTE_FLOW_ERROR_TYPE_ACTION,
3055 "cannot create counter"
3057 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3058 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3059 if (flow_dv_create_action_l2_encap(dev, actions,
3062 dev_flow->dv.actions[actions_n++] =
3063 dev_flow->dv.encap_decap->verbs_action;
3064 action_flags |= actions->type ==
3065 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
3066 MLX5_FLOW_ACTION_VXLAN_ENCAP :
3067 MLX5_FLOW_ACTION_NVGRE_ENCAP;
3069 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3070 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
3071 if (flow_dv_create_action_l2_decap(dev, dev_flow,
3074 dev_flow->dv.actions[actions_n++] =
3075 dev_flow->dv.encap_decap->verbs_action;
3076 action_flags |= actions->type ==
3077 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
3078 MLX5_FLOW_ACTION_VXLAN_DECAP :
3079 MLX5_FLOW_ACTION_NVGRE_DECAP;
3081 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3082 /* Handle encap with preceding decap. */
3083 if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
3084 if (flow_dv_create_action_raw_encap
3085 (dev, actions, dev_flow, attr, error))
3087 dev_flow->dv.actions[actions_n++] =
3088 dev_flow->dv.encap_decap->verbs_action;
3090 /* Handle encap without preceding decap. */
3091 if (flow_dv_create_action_l2_encap(dev, actions,
3095 dev_flow->dv.actions[actions_n++] =
3096 dev_flow->dv.encap_decap->verbs_action;
3098 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
3100 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3101 /* Check if this decap is followed by encap. */
3102 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
3103 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
3106 /* Handle decap only if it isn't followed by encap. */
3107 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3108 if (flow_dv_create_action_l2_decap(dev,
3112 dev_flow->dv.actions[actions_n++] =
3113 dev_flow->dv.encap_decap->verbs_action;
3115 /* If decap is followed by encap, handle it at encap. */
3116 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
3118 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
3119 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
3120 if (flow_dv_convert_action_modify_mac(&res, actions,
3123 action_flags |= actions->type ==
3124 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
3125 MLX5_FLOW_ACTION_SET_MAC_SRC :
3126 MLX5_FLOW_ACTION_SET_MAC_DST;
3128 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
3129 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
3130 if (flow_dv_convert_action_modify_ipv4(&res, actions,
3133 action_flags |= actions->type ==
3134 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
3135 MLX5_FLOW_ACTION_SET_IPV4_SRC :
3136 MLX5_FLOW_ACTION_SET_IPV4_DST;
3138 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
3139 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
3140 if (flow_dv_convert_action_modify_ipv6(&res, actions,
3143 action_flags |= actions->type ==
3144 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
3145 MLX5_FLOW_ACTION_SET_IPV6_SRC :
3146 MLX5_FLOW_ACTION_SET_IPV6_DST;
3148 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
3149 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
3150 if (flow_dv_convert_action_modify_tp(&res, actions,
3154 action_flags |= actions->type ==
3155 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
3156 MLX5_FLOW_ACTION_SET_TP_SRC :
3157 MLX5_FLOW_ACTION_SET_TP_DST;
3159 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
3160 if (flow_dv_convert_action_modify_dec_ttl(&res, items,
3164 action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
3166 case RTE_FLOW_ACTION_TYPE_SET_TTL:
3167 if (flow_dv_convert_action_modify_ttl(&res, actions,
3171 action_flags |= MLX5_FLOW_ACTION_SET_TTL;
3173 case RTE_FLOW_ACTION_TYPE_END:
3175 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
3176 /* create modify action if needed. */
3177 if (flow_dv_modify_hdr_resource_register
3182 dev_flow->dv.actions[actions_n++] =
3183 dev_flow->dv.modify_hdr->verbs_action;
3190 dev_flow->dv.actions_n = actions_n;
3191 flow->actions = action_flags;
3192 if (attr->ingress && !attr->transfer &&
3193 (priv->representor || priv->master)) {
3194 /* It was validated - we support unidirection flows only. */
3195 assert(!attr->egress);
3197 * Add matching on source vport index only
3198 * for ingress rules in E-Switch configurations.
3200 flow_dv_translate_source_vport(matcher.mask.buf,
3201 dev_flow->dv.value.buf,
3205 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3206 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
3207 void *match_mask = matcher.mask.buf;
3208 void *match_value = dev_flow->dv.value.buf;
3210 switch (items->type) {
3211 case RTE_FLOW_ITEM_TYPE_ETH:
3212 flow_dv_translate_item_eth(match_mask, match_value,
3214 matcher.priority = MLX5_PRIORITY_MAP_L2;
3215 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
3216 MLX5_FLOW_LAYER_OUTER_L2;
3218 case RTE_FLOW_ITEM_TYPE_VLAN:
3219 flow_dv_translate_item_vlan(match_mask, match_value,
3221 matcher.priority = MLX5_PRIORITY_MAP_L2;
3222 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
3223 MLX5_FLOW_LAYER_INNER_VLAN) :
3224 (MLX5_FLOW_LAYER_OUTER_L2 |
3225 MLX5_FLOW_LAYER_OUTER_VLAN);
3227 case RTE_FLOW_ITEM_TYPE_IPV4:
3228 flow_dv_translate_item_ipv4(match_mask, match_value,
3230 matcher.priority = MLX5_PRIORITY_MAP_L3;
3231 dev_flow->dv.hash_fields |=
3232 mlx5_flow_hashfields_adjust
3234 MLX5_IPV4_LAYER_TYPES,
3235 MLX5_IPV4_IBV_RX_HASH);
3236 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3237 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3239 case RTE_FLOW_ITEM_TYPE_IPV6:
3240 flow_dv_translate_item_ipv6(match_mask, match_value,
3242 matcher.priority = MLX5_PRIORITY_MAP_L3;
3243 dev_flow->dv.hash_fields |=
3244 mlx5_flow_hashfields_adjust
3246 MLX5_IPV6_LAYER_TYPES,
3247 MLX5_IPV6_IBV_RX_HASH);
3248 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3249 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3251 case RTE_FLOW_ITEM_TYPE_TCP:
3252 flow_dv_translate_item_tcp(match_mask, match_value,
3254 matcher.priority = MLX5_PRIORITY_MAP_L4;
3255 dev_flow->dv.hash_fields |=
3256 mlx5_flow_hashfields_adjust
3257 (dev_flow, tunnel, ETH_RSS_TCP,
3258 IBV_RX_HASH_SRC_PORT_TCP |
3259 IBV_RX_HASH_DST_PORT_TCP);
3260 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
3261 MLX5_FLOW_LAYER_OUTER_L4_TCP;
3263 case RTE_FLOW_ITEM_TYPE_UDP:
3264 flow_dv_translate_item_udp(match_mask, match_value,
3266 matcher.priority = MLX5_PRIORITY_MAP_L4;
3267 dev_flow->dv.hash_fields |=
3268 mlx5_flow_hashfields_adjust
3269 (dev_flow, tunnel, ETH_RSS_UDP,
3270 IBV_RX_HASH_SRC_PORT_UDP |
3271 IBV_RX_HASH_DST_PORT_UDP);
3272 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
3273 MLX5_FLOW_LAYER_OUTER_L4_UDP;
3275 case RTE_FLOW_ITEM_TYPE_GRE:
3276 flow_dv_translate_item_gre(match_mask, match_value,
3278 last_item = MLX5_FLOW_LAYER_GRE;
3280 case RTE_FLOW_ITEM_TYPE_NVGRE:
3281 flow_dv_translate_item_nvgre(match_mask, match_value,
3283 last_item = MLX5_FLOW_LAYER_GRE;
3285 case RTE_FLOW_ITEM_TYPE_VXLAN:
3286 flow_dv_translate_item_vxlan(match_mask, match_value,
3288 last_item = MLX5_FLOW_LAYER_VXLAN;
3290 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3291 flow_dv_translate_item_vxlan(match_mask, match_value,
3293 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
3295 case RTE_FLOW_ITEM_TYPE_MPLS:
3296 flow_dv_translate_item_mpls(match_mask, match_value,
3297 items, last_item, tunnel);
3298 last_item = MLX5_FLOW_LAYER_MPLS;
3300 case RTE_FLOW_ITEM_TYPE_META:
3301 flow_dv_translate_item_meta(match_mask, match_value,
3303 last_item = MLX5_FLOW_ITEM_METADATA;
3308 item_flags |= last_item;
3310 assert(!flow_dv_check_valid_spec(matcher.mask.buf,
3311 dev_flow->dv.value.buf));
3312 dev_flow->layers = item_flags;
3313 /* Register matcher. */
3314 matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
3316 matcher.priority = mlx5_flow_adjust_priority(dev, priority,
3318 matcher.egress = attr->egress;
3319 if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
3325 * Apply the flow to the NIC.
3328 * Pointer to the Ethernet device structure.
3329 * @param[in, out] flow
3330 * Pointer to flow structure.
3332 * Pointer to error structure.
3335 * 0 on success, a negative errno value otherwise and rte_errno is set.
3338 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
3339 struct rte_flow_error *error)
3341 struct mlx5_flow_dv *dv;
3342 struct mlx5_flow *dev_flow;
3346 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3349 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
3350 dv->hrxq = mlx5_hrxq_drop_new(dev);
3354 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3355 "cannot get drop hash queue");
3359 mlx5_glue->dv_create_flow_action_dest_ibv_qp
3361 } else if (flow->actions &
3362 (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
3363 struct mlx5_hrxq *hrxq;
3365 hrxq = mlx5_hrxq_get(dev, flow->key,
3366 MLX5_RSS_HASH_KEY_LEN,
3369 flow->rss.queue_num);
3371 hrxq = mlx5_hrxq_new
3372 (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
3373 dv->hash_fields, (*flow->queue),
3374 flow->rss.queue_num,
3375 !!(dev_flow->layers &
3376 MLX5_FLOW_LAYER_TUNNEL));
3380 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3381 "cannot get hash queue");
3386 mlx5_glue->dv_create_flow_action_dest_ibv_qp
3390 mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
3391 (void *)&dv->value, n,
3394 rte_flow_error_set(error, errno,
3395 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3397 "hardware refuses to create flow");
3403 err = rte_errno; /* Save rte_errno before cleanup. */
3404 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3405 struct mlx5_flow_dv *dv = &dev_flow->dv;
3407 if (flow->actions & MLX5_FLOW_ACTION_DROP)
3408 mlx5_hrxq_drop_release(dev);
3410 mlx5_hrxq_release(dev, dv->hrxq);
3414 rte_errno = err; /* Restore rte_errno. */
3419 * Release the flow matcher.
3422 * Pointer to Ethernet device.
3424 * Pointer to mlx5_flow.
3427 * 1 while a reference on it exists, 0 when freed.
3430 flow_dv_matcher_release(struct rte_eth_dev *dev,
3431 struct mlx5_flow *flow)
3433 struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
3435 assert(matcher->matcher_object);
3436 DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
3437 dev->data->port_id, (void *)matcher,
3438 rte_atomic32_read(&matcher->refcnt));
3439 if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
3440 claim_zero(mlx5_glue->dv_destroy_flow_matcher
3441 (matcher->matcher_object));
3442 LIST_REMOVE(matcher, next);
3444 DRV_LOG(DEBUG, "port %u matcher %p: removed",
3445 dev->data->port_id, (void *)matcher);
3452 * Release an encap/decap resource.
3455 * Pointer to mlx5_flow.
3458 * 1 while a reference on it exists, 0 when freed.
3461 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
3463 struct mlx5_flow_dv_encap_decap_resource *cache_resource =
3464 flow->dv.encap_decap;
3466 assert(cache_resource->verbs_action);
3467 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
3468 (void *)cache_resource,
3469 rte_atomic32_read(&cache_resource->refcnt));
3470 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
3471 claim_zero(mlx5_glue->destroy_flow_action
3472 (cache_resource->verbs_action));
3473 LIST_REMOVE(cache_resource, next);
3474 rte_free(cache_resource);
3475 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
3476 (void *)cache_resource);
3483 * Release a modify-header resource.
3486 * Pointer to mlx5_flow.
3489 * 1 while a reference on it exists, 0 when freed.
3492 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
3494 struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
3495 flow->dv.modify_hdr;
3497 assert(cache_resource->verbs_action);
3498 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
3499 (void *)cache_resource,
3500 rte_atomic32_read(&cache_resource->refcnt));
3501 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
3502 claim_zero(mlx5_glue->destroy_flow_action
3503 (cache_resource->verbs_action));
3504 LIST_REMOVE(cache_resource, next);
3505 rte_free(cache_resource);
3506 DRV_LOG(DEBUG, "modify-header resource %p: removed",
3507 (void *)cache_resource);
3514 * Remove the flow from the NIC but keeps it in memory.
3517 * Pointer to Ethernet device.
3518 * @param[in, out] flow
3519 * Pointer to flow structure.
3522 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
3524 struct mlx5_flow_dv *dv;
3525 struct mlx5_flow *dev_flow;
3529 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3532 claim_zero(mlx5_glue->destroy_flow(dv->flow));
3536 if (flow->actions & MLX5_FLOW_ACTION_DROP)
3537 mlx5_hrxq_drop_release(dev);
3539 mlx5_hrxq_release(dev, dv->hrxq);
3546 * Remove the flow from the NIC and the memory.
3549 * Pointer to the Ethernet device structure.
3550 * @param[in, out] flow
3551 * Pointer to flow structure.
3554 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
3556 struct mlx5_flow *dev_flow;
3560 flow_dv_remove(dev, flow);
3561 if (flow->counter) {
3562 flow_dv_counter_release(flow->counter);
3563 flow->counter = NULL;
3565 if (flow->tag_resource) {
3566 flow_dv_tag_release(dev, flow->tag_resource);
3567 flow->tag_resource = NULL;
3569 while (!LIST_EMPTY(&flow->dev_flows)) {
3570 dev_flow = LIST_FIRST(&flow->dev_flows);
3571 LIST_REMOVE(dev_flow, next);
3572 if (dev_flow->dv.matcher)
3573 flow_dv_matcher_release(dev, dev_flow);
3574 if (dev_flow->dv.encap_decap)
3575 flow_dv_encap_decap_resource_release(dev_flow);
3576 if (dev_flow->dv.modify_hdr)
3577 flow_dv_modify_hdr_resource_release(dev_flow);
3583 * Query a dv flow rule for its statistics via devx.
3586 * Pointer to Ethernet device.
3588 * Pointer to the sub flow.
3590 * data retrieved by the query.
3592 * Perform verbose error reporting if not NULL.
3595 * 0 on success, a negative errno value otherwise and rte_errno is set.
3598 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
3599 void *data, struct rte_flow_error *error)
3601 struct mlx5_priv *priv = dev->data->dev_private;
3602 struct rte_flow_query_count *qc = data;
3607 if (!priv->config.devx)
3608 return rte_flow_error_set(error, ENOTSUP,
3609 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3611 "counters are not supported");
3612 if (flow->counter) {
3613 err = mlx5_devx_cmd_flow_counter_query
3614 (flow->counter->dcs,
3615 qc->reset, &pkts, &bytes);
3617 return rte_flow_error_set
3619 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3621 "cannot read counters");
3624 qc->hits = pkts - flow->counter->hits;
3625 qc->bytes = bytes - flow->counter->bytes;
3627 flow->counter->hits = pkts;
3628 flow->counter->bytes = bytes;
3632 return rte_flow_error_set(error, EINVAL,
3633 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3635 "counters are not available");
3641 * @see rte_flow_query()
3645 flow_dv_query(struct rte_eth_dev *dev,
3646 struct rte_flow *flow __rte_unused,
3647 const struct rte_flow_action *actions __rte_unused,
3648 void *data __rte_unused,
3649 struct rte_flow_error *error __rte_unused)
3653 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3654 switch (actions->type) {
3655 case RTE_FLOW_ACTION_TYPE_VOID:
3657 case RTE_FLOW_ACTION_TYPE_COUNT:
3658 ret = flow_dv_query_count(dev, flow, data, error);
3661 return rte_flow_error_set(error, ENOTSUP,
3662 RTE_FLOW_ERROR_TYPE_ACTION,
3664 "action not supported");
3671 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
3672 .validate = flow_dv_validate,
3673 .prepare = flow_dv_prepare,
3674 .translate = flow_dv_translate,
3675 .apply = flow_dv_apply,
3676 .remove = flow_dv_remove,
3677 .destroy = flow_dv_destroy,
3678 .query = flow_dv_query,
3681 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */