1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #pragma GCC diagnostic ignored "-Wpedantic"
15 #include <infiniband/verbs.h>
17 #pragma GCC diagnostic error "-Wpedantic"
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_eth_ctrl.h>
23 #include <rte_ethdev_driver.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
31 #include "mlx5_defs.h"
33 #include "mlx5_glue.h"
34 #include "mlx5_flow.h"
35 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
37 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
38 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
54 * Initialize flow attributes structure according to flow items' types.
57 * Pointer to item specification.
59 * Pointer to flow attributes structure.
62 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
64 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
66 case RTE_FLOW_ITEM_TYPE_IPV4:
69 case RTE_FLOW_ITEM_TYPE_IPV6:
72 case RTE_FLOW_ITEM_TYPE_UDP:
75 case RTE_FLOW_ITEM_TYPE_TCP:
85 struct field_modify_info {
86 uint32_t size; /* Size of field in protocol header, in bytes. */
87 uint32_t offset; /* Offset of field in protocol header, in bytes. */
88 enum mlx5_modification_field id;
91 struct field_modify_info modify_eth[] = {
92 {4, 0, MLX5_MODI_OUT_DMAC_47_16},
93 {2, 4, MLX5_MODI_OUT_DMAC_15_0},
94 {4, 6, MLX5_MODI_OUT_SMAC_47_16},
95 {2, 10, MLX5_MODI_OUT_SMAC_15_0},
99 struct field_modify_info modify_ipv4[] = {
100 {1, 8, MLX5_MODI_OUT_IPV4_TTL},
101 {4, 12, MLX5_MODI_OUT_SIPV4},
102 {4, 16, MLX5_MODI_OUT_DIPV4},
106 struct field_modify_info modify_ipv6[] = {
107 {1, 7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
108 {4, 8, MLX5_MODI_OUT_SIPV6_127_96},
109 {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
110 {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
111 {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
112 {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
113 {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
114 {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
115 {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
119 struct field_modify_info modify_udp[] = {
120 {2, 0, MLX5_MODI_OUT_UDP_SPORT},
121 {2, 2, MLX5_MODI_OUT_UDP_DPORT},
125 struct field_modify_info modify_tcp[] = {
126 {2, 0, MLX5_MODI_OUT_TCP_SPORT},
127 {2, 2, MLX5_MODI_OUT_TCP_DPORT},
132 * Convert modify-header action to DV specification.
135 * Pointer to item specification.
137 * Pointer to field modification information.
138 * @param[in,out] resource
139 * Pointer to the modify-header resource.
141 * Type of modification.
143 * Pointer to the error structure.
146 * 0 on success, a negative errno value otherwise and rte_errno is set.
149 flow_dv_convert_modify_action(struct rte_flow_item *item,
150 struct field_modify_info *field,
151 struct mlx5_flow_dv_modify_hdr_resource *resource,
153 struct rte_flow_error *error)
155 uint32_t i = resource->actions_num;
156 struct mlx5_modification_cmd *actions = resource->actions;
157 const uint8_t *spec = item->spec;
158 const uint8_t *mask = item->mask;
161 while (field->size) {
163 /* Generate modify command for each mask segment. */
164 memcpy(&set, &mask[field->offset], field->size);
166 if (i >= MLX5_MODIFY_NUM)
167 return rte_flow_error_set(error, EINVAL,
168 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
169 "too many items to modify");
170 actions[i].action_type = type;
171 actions[i].field = field->id;
172 actions[i].length = field->size ==
173 4 ? 0 : field->size * 8;
174 rte_memcpy(&actions[i].data[4 - field->size],
175 &spec[field->offset], field->size);
176 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
179 if (resource->actions_num != i)
180 resource->actions_num = i;
183 if (!resource->actions_num)
184 return rte_flow_error_set(error, EINVAL,
185 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
186 "invalid modification flow item");
191 * Convert modify-header set IPv4 address action to DV specification.
193 * @param[in,out] resource
194 * Pointer to the modify-header resource.
196 * Pointer to action specification.
198 * Pointer to the error structure.
201 * 0 on success, a negative errno value otherwise and rte_errno is set.
204 flow_dv_convert_action_modify_ipv4
205 (struct mlx5_flow_dv_modify_hdr_resource *resource,
206 const struct rte_flow_action *action,
207 struct rte_flow_error *error)
209 const struct rte_flow_action_set_ipv4 *conf =
210 (const struct rte_flow_action_set_ipv4 *)(action->conf);
211 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
212 struct rte_flow_item_ipv4 ipv4;
213 struct rte_flow_item_ipv4 ipv4_mask;
215 memset(&ipv4, 0, sizeof(ipv4));
216 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
217 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
218 ipv4.hdr.src_addr = conf->ipv4_addr;
219 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
221 ipv4.hdr.dst_addr = conf->ipv4_addr;
222 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
225 item.mask = &ipv4_mask;
226 return flow_dv_convert_modify_action(&item, modify_ipv4, resource,
227 MLX5_MODIFICATION_TYPE_SET, error);
231 * Convert modify-header set IPv6 address action to DV specification.
233 * @param[in,out] resource
234 * Pointer to the modify-header resource.
236 * Pointer to action specification.
238 * Pointer to the error structure.
241 * 0 on success, a negative errno value otherwise and rte_errno is set.
244 flow_dv_convert_action_modify_ipv6
245 (struct mlx5_flow_dv_modify_hdr_resource *resource,
246 const struct rte_flow_action *action,
247 struct rte_flow_error *error)
249 const struct rte_flow_action_set_ipv6 *conf =
250 (const struct rte_flow_action_set_ipv6 *)(action->conf);
251 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
252 struct rte_flow_item_ipv6 ipv6;
253 struct rte_flow_item_ipv6 ipv6_mask;
255 memset(&ipv6, 0, sizeof(ipv6));
256 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
257 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
258 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
259 sizeof(ipv6.hdr.src_addr));
260 memcpy(&ipv6_mask.hdr.src_addr,
261 &rte_flow_item_ipv6_mask.hdr.src_addr,
262 sizeof(ipv6.hdr.src_addr));
264 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
265 sizeof(ipv6.hdr.dst_addr));
266 memcpy(&ipv6_mask.hdr.dst_addr,
267 &rte_flow_item_ipv6_mask.hdr.dst_addr,
268 sizeof(ipv6.hdr.dst_addr));
271 item.mask = &ipv6_mask;
272 return flow_dv_convert_modify_action(&item, modify_ipv6, resource,
273 MLX5_MODIFICATION_TYPE_SET, error);
277 * Convert modify-header set MAC address action to DV specification.
279 * @param[in,out] resource
280 * Pointer to the modify-header resource.
282 * Pointer to action specification.
284 * Pointer to the error structure.
287 * 0 on success, a negative errno value otherwise and rte_errno is set.
290 flow_dv_convert_action_modify_mac
291 (struct mlx5_flow_dv_modify_hdr_resource *resource,
292 const struct rte_flow_action *action,
293 struct rte_flow_error *error)
295 const struct rte_flow_action_set_mac *conf =
296 (const struct rte_flow_action_set_mac *)(action->conf);
297 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
298 struct rte_flow_item_eth eth;
299 struct rte_flow_item_eth eth_mask;
301 memset(ð, 0, sizeof(eth));
302 memset(ð_mask, 0, sizeof(eth_mask));
303 if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
304 memcpy(ð.src.addr_bytes, &conf->mac_addr,
305 sizeof(eth.src.addr_bytes));
306 memcpy(ð_mask.src.addr_bytes,
307 &rte_flow_item_eth_mask.src.addr_bytes,
308 sizeof(eth_mask.src.addr_bytes));
310 memcpy(ð.dst.addr_bytes, &conf->mac_addr,
311 sizeof(eth.dst.addr_bytes));
312 memcpy(ð_mask.dst.addr_bytes,
313 &rte_flow_item_eth_mask.dst.addr_bytes,
314 sizeof(eth_mask.dst.addr_bytes));
317 item.mask = ð_mask;
318 return flow_dv_convert_modify_action(&item, modify_eth, resource,
319 MLX5_MODIFICATION_TYPE_SET, error);
323 * Convert modify-header set TP action to DV specification.
325 * @param[in,out] resource
326 * Pointer to the modify-header resource.
328 * Pointer to action specification.
330 * Pointer to rte_flow_item objects list.
332 * Pointer to flow attributes structure.
334 * Pointer to the error structure.
337 * 0 on success, a negative errno value otherwise and rte_errno is set.
340 flow_dv_convert_action_modify_tp
341 (struct mlx5_flow_dv_modify_hdr_resource *resource,
342 const struct rte_flow_action *action,
343 const struct rte_flow_item *items,
344 union flow_dv_attr *attr,
345 struct rte_flow_error *error)
347 const struct rte_flow_action_set_tp *conf =
348 (const struct rte_flow_action_set_tp *)(action->conf);
349 struct rte_flow_item item;
350 struct rte_flow_item_udp udp;
351 struct rte_flow_item_udp udp_mask;
352 struct rte_flow_item_tcp tcp;
353 struct rte_flow_item_tcp tcp_mask;
354 struct field_modify_info *field;
357 flow_dv_attr_init(items, attr);
359 memset(&udp, 0, sizeof(udp));
360 memset(&udp_mask, 0, sizeof(udp_mask));
361 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
362 udp.hdr.src_port = conf->port;
363 udp_mask.hdr.src_port =
364 rte_flow_item_udp_mask.hdr.src_port;
366 udp.hdr.dst_port = conf->port;
367 udp_mask.hdr.dst_port =
368 rte_flow_item_udp_mask.hdr.dst_port;
370 item.type = RTE_FLOW_ITEM_TYPE_UDP;
372 item.mask = &udp_mask;
376 memset(&tcp, 0, sizeof(tcp));
377 memset(&tcp_mask, 0, sizeof(tcp_mask));
378 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
379 tcp.hdr.src_port = conf->port;
380 tcp_mask.hdr.src_port =
381 rte_flow_item_tcp_mask.hdr.src_port;
383 tcp.hdr.dst_port = conf->port;
384 tcp_mask.hdr.dst_port =
385 rte_flow_item_tcp_mask.hdr.dst_port;
387 item.type = RTE_FLOW_ITEM_TYPE_TCP;
389 item.mask = &tcp_mask;
392 return flow_dv_convert_modify_action(&item, field, resource,
393 MLX5_MODIFICATION_TYPE_SET, error);
397 * Convert modify-header set TTL action to DV specification.
399 * @param[in,out] resource
400 * Pointer to the modify-header resource.
402 * Pointer to action specification.
404 * Pointer to rte_flow_item objects list.
406 * Pointer to flow attributes structure.
408 * Pointer to the error structure.
411 * 0 on success, a negative errno value otherwise and rte_errno is set.
414 flow_dv_convert_action_modify_ttl
415 (struct mlx5_flow_dv_modify_hdr_resource *resource,
416 const struct rte_flow_action *action,
417 const struct rte_flow_item *items,
418 union flow_dv_attr *attr,
419 struct rte_flow_error *error)
421 const struct rte_flow_action_set_ttl *conf =
422 (const struct rte_flow_action_set_ttl *)(action->conf);
423 struct rte_flow_item item;
424 struct rte_flow_item_ipv4 ipv4;
425 struct rte_flow_item_ipv4 ipv4_mask;
426 struct rte_flow_item_ipv6 ipv6;
427 struct rte_flow_item_ipv6 ipv6_mask;
428 struct field_modify_info *field;
431 flow_dv_attr_init(items, attr);
433 memset(&ipv4, 0, sizeof(ipv4));
434 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
435 ipv4.hdr.time_to_live = conf->ttl_value;
436 ipv4_mask.hdr.time_to_live = 0xFF;
437 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
439 item.mask = &ipv4_mask;
443 memset(&ipv6, 0, sizeof(ipv6));
444 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
445 ipv6.hdr.hop_limits = conf->ttl_value;
446 ipv6_mask.hdr.hop_limits = 0xFF;
447 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
449 item.mask = &ipv6_mask;
452 return flow_dv_convert_modify_action(&item, field, resource,
453 MLX5_MODIFICATION_TYPE_SET, error);
457 * Convert modify-header decrement TTL action to DV specification.
459 * @param[in,out] resource
460 * Pointer to the modify-header resource.
462 * Pointer to action specification.
464 * Pointer to rte_flow_item objects list.
466 * Pointer to flow attributes structure.
468 * Pointer to the error structure.
471 * 0 on success, a negative errno value otherwise and rte_errno is set.
474 flow_dv_convert_action_modify_dec_ttl
475 (struct mlx5_flow_dv_modify_hdr_resource *resource,
476 const struct rte_flow_item *items,
477 union flow_dv_attr *attr,
478 struct rte_flow_error *error)
480 struct rte_flow_item item;
481 struct rte_flow_item_ipv4 ipv4;
482 struct rte_flow_item_ipv4 ipv4_mask;
483 struct rte_flow_item_ipv6 ipv6;
484 struct rte_flow_item_ipv6 ipv6_mask;
485 struct field_modify_info *field;
488 flow_dv_attr_init(items, attr);
490 memset(&ipv4, 0, sizeof(ipv4));
491 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
492 ipv4.hdr.time_to_live = 0xFF;
493 ipv4_mask.hdr.time_to_live = 0xFF;
494 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
496 item.mask = &ipv4_mask;
500 memset(&ipv6, 0, sizeof(ipv6));
501 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
502 ipv6.hdr.hop_limits = 0xFF;
503 ipv6_mask.hdr.hop_limits = 0xFF;
504 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
506 item.mask = &ipv6_mask;
509 return flow_dv_convert_modify_action(&item, field, resource,
510 MLX5_MODIFICATION_TYPE_ADD, error);
514 * Validate META item.
517 * Pointer to the rte_eth_dev structure.
519 * Item specification.
521 * Attributes of flow that includes this item.
523 * Pointer to error structure.
526 * 0 on success, a negative errno value otherwise and rte_errno is set.
529 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
530 const struct rte_flow_item *item,
531 const struct rte_flow_attr *attr,
532 struct rte_flow_error *error)
534 const struct rte_flow_item_meta *spec = item->spec;
535 const struct rte_flow_item_meta *mask = item->mask;
536 const struct rte_flow_item_meta nic_mask = {
537 .data = RTE_BE32(UINT32_MAX)
540 uint64_t offloads = dev->data->dev_conf.txmode.offloads;
542 if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
543 return rte_flow_error_set(error, EPERM,
544 RTE_FLOW_ERROR_TYPE_ITEM,
546 "match on metadata offload "
547 "configuration is off for this port");
549 return rte_flow_error_set(error, EINVAL,
550 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
552 "data cannot be empty");
554 return rte_flow_error_set(error, EINVAL,
555 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
557 "data cannot be zero");
559 mask = &rte_flow_item_meta_mask;
560 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
561 (const uint8_t *)&nic_mask,
562 sizeof(struct rte_flow_item_meta),
567 return rte_flow_error_set(error, ENOTSUP,
568 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
570 "pattern not supported for ingress");
575 * Validate count action.
580 * Pointer to error structure.
583 * 0 on success, a negative errno value otherwise and rte_errno is set.
586 flow_dv_validate_action_count(struct rte_eth_dev *dev,
587 struct rte_flow_error *error)
589 struct mlx5_priv *priv = dev->data->dev_private;
591 if (!priv->config.devx)
593 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
597 return rte_flow_error_set
599 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
601 "count action not supported");
605 * Validate the L2 encap action.
607 * @param[in] action_flags
608 * Holds the actions detected until now.
610 * Pointer to the encap action.
612 * Pointer to flow attributes
614 * Pointer to error structure.
617 * 0 on success, a negative errno value otherwise and rte_errno is set.
620 flow_dv_validate_action_l2_encap(uint64_t action_flags,
621 const struct rte_flow_action *action,
622 const struct rte_flow_attr *attr,
623 struct rte_flow_error *error)
626 return rte_flow_error_set(error, EINVAL,
627 RTE_FLOW_ERROR_TYPE_ACTION, action,
628 "configuration cannot be null");
629 if (action_flags & MLX5_FLOW_ACTION_DROP)
630 return rte_flow_error_set(error, EINVAL,
631 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
632 "can't drop and encap in same flow");
633 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
634 return rte_flow_error_set(error, EINVAL,
635 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
636 "can only have a single encap or"
637 " decap action in a flow");
639 return rte_flow_error_set(error, ENOTSUP,
640 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
642 "encap action not supported for "
648 * Validate the L2 decap action.
650 * @param[in] action_flags
651 * Holds the actions detected until now.
653 * Pointer to flow attributes
655 * Pointer to error structure.
658 * 0 on success, a negative errno value otherwise and rte_errno is set.
661 flow_dv_validate_action_l2_decap(uint64_t action_flags,
662 const struct rte_flow_attr *attr,
663 struct rte_flow_error *error)
665 if (action_flags & MLX5_FLOW_ACTION_DROP)
666 return rte_flow_error_set(error, EINVAL,
667 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
668 "can't drop and decap in same flow");
669 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
670 return rte_flow_error_set(error, EINVAL,
671 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
672 "can only have a single encap or"
673 " decap action in a flow");
674 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
675 return rte_flow_error_set(error, EINVAL,
676 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
677 "can't have decap action after"
680 return rte_flow_error_set(error, ENOTSUP,
681 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
683 "decap action not supported for "
689 * Validate the raw encap action.
691 * @param[in] action_flags
692 * Holds the actions detected until now.
694 * Pointer to the encap action.
696 * Pointer to flow attributes
698 * Pointer to error structure.
701 * 0 on success, a negative errno value otherwise and rte_errno is set.
704 flow_dv_validate_action_raw_encap(uint64_t action_flags,
705 const struct rte_flow_action *action,
706 const struct rte_flow_attr *attr,
707 struct rte_flow_error *error)
710 return rte_flow_error_set(error, EINVAL,
711 RTE_FLOW_ERROR_TYPE_ACTION, action,
712 "configuration cannot be null");
713 if (action_flags & MLX5_FLOW_ACTION_DROP)
714 return rte_flow_error_set(error, EINVAL,
715 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
716 "can't drop and encap in same flow");
717 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
718 return rte_flow_error_set(error, EINVAL,
719 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
720 "can only have a single encap"
721 " action in a flow");
722 /* encap without preceding decap is not supported for ingress */
723 if (attr->ingress && !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
724 return rte_flow_error_set(error, ENOTSUP,
725 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
727 "encap action not supported for "
733 * Validate the raw decap action.
735 * @param[in] action_flags
736 * Holds the actions detected until now.
738 * Pointer to the encap action.
740 * Pointer to flow attributes
742 * Pointer to error structure.
745 * 0 on success, a negative errno value otherwise and rte_errno is set.
748 flow_dv_validate_action_raw_decap(uint64_t action_flags,
749 const struct rte_flow_action *action,
750 const struct rte_flow_attr *attr,
751 struct rte_flow_error *error)
753 if (action_flags & MLX5_FLOW_ACTION_DROP)
754 return rte_flow_error_set(error, EINVAL,
755 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
756 "can't drop and decap in same flow");
757 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
758 return rte_flow_error_set(error, EINVAL,
759 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
760 "can't have encap action before"
762 if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
763 return rte_flow_error_set(error, EINVAL,
764 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
765 "can only have a single decap"
766 " action in a flow");
767 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
768 return rte_flow_error_set(error, EINVAL,
769 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
770 "can't have decap action after"
772 /* decap action is valid on egress only if it is followed by encap */
774 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
775 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
778 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
779 return rte_flow_error_set
781 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
782 NULL, "decap action not supported"
789 * Find existing encap/decap resource or create and register a new one.
791 * @param dev[in, out]
792 * Pointer to rte_eth_dev structure.
793 * @param[in, out] resource
794 * Pointer to encap/decap resource.
795 * @parm[in, out] dev_flow
796 * Pointer to the dev_flow.
798 * pointer to error structure.
801 * 0 on success otherwise -errno and errno is set.
804 flow_dv_encap_decap_resource_register
805 (struct rte_eth_dev *dev,
806 struct mlx5_flow_dv_encap_decap_resource *resource,
807 struct mlx5_flow *dev_flow,
808 struct rte_flow_error *error)
810 struct mlx5_priv *priv = dev->data->dev_private;
811 struct mlx5_flow_dv_encap_decap_resource *cache_resource;
812 struct rte_flow *flow = dev_flow->flow;
813 struct mlx5dv_dr_ns *ns;
815 resource->flags = flow->group ? 0 : 1;
821 /* Lookup a matching resource from cache. */
822 LIST_FOREACH(cache_resource, &priv->encaps_decaps, next) {
823 if (resource->reformat_type == cache_resource->reformat_type &&
824 resource->ft_type == cache_resource->ft_type &&
825 resource->flags == cache_resource->flags &&
826 resource->size == cache_resource->size &&
827 !memcmp((const void *)resource->buf,
828 (const void *)cache_resource->buf,
830 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
831 (void *)cache_resource,
832 rte_atomic32_read(&cache_resource->refcnt));
833 rte_atomic32_inc(&cache_resource->refcnt);
834 dev_flow->dv.encap_decap = cache_resource;
838 /* Register new encap/decap resource. */
839 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
841 return rte_flow_error_set(error, ENOMEM,
842 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
843 "cannot allocate resource memory");
844 *cache_resource = *resource;
845 cache_resource->verbs_action =
846 mlx5_glue->dv_create_flow_action_packet_reformat
847 (priv->sh->ctx, cache_resource->reformat_type,
848 cache_resource->ft_type, ns, cache_resource->flags,
849 cache_resource->size,
850 (cache_resource->size ? cache_resource->buf : NULL));
851 if (!cache_resource->verbs_action) {
852 rte_free(cache_resource);
853 return rte_flow_error_set(error, ENOMEM,
854 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
855 NULL, "cannot create action");
857 rte_atomic32_init(&cache_resource->refcnt);
858 rte_atomic32_inc(&cache_resource->refcnt);
859 LIST_INSERT_HEAD(&priv->encaps_decaps, cache_resource, next);
860 dev_flow->dv.encap_decap = cache_resource;
861 DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
862 (void *)cache_resource,
863 rte_atomic32_read(&cache_resource->refcnt));
868 * Get the size of specific rte_flow_item_type
870 * @param[in] item_type
871 * Tested rte_flow_item_type.
874 * sizeof struct item_type, 0 if void or irrelevant.
877 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
882 case RTE_FLOW_ITEM_TYPE_ETH:
883 retval = sizeof(struct rte_flow_item_eth);
885 case RTE_FLOW_ITEM_TYPE_VLAN:
886 retval = sizeof(struct rte_flow_item_vlan);
888 case RTE_FLOW_ITEM_TYPE_IPV4:
889 retval = sizeof(struct rte_flow_item_ipv4);
891 case RTE_FLOW_ITEM_TYPE_IPV6:
892 retval = sizeof(struct rte_flow_item_ipv6);
894 case RTE_FLOW_ITEM_TYPE_UDP:
895 retval = sizeof(struct rte_flow_item_udp);
897 case RTE_FLOW_ITEM_TYPE_TCP:
898 retval = sizeof(struct rte_flow_item_tcp);
900 case RTE_FLOW_ITEM_TYPE_VXLAN:
901 retval = sizeof(struct rte_flow_item_vxlan);
903 case RTE_FLOW_ITEM_TYPE_GRE:
904 retval = sizeof(struct rte_flow_item_gre);
906 case RTE_FLOW_ITEM_TYPE_NVGRE:
907 retval = sizeof(struct rte_flow_item_nvgre);
909 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
910 retval = sizeof(struct rte_flow_item_vxlan_gpe);
912 case RTE_FLOW_ITEM_TYPE_MPLS:
913 retval = sizeof(struct rte_flow_item_mpls);
915 case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
923 #define MLX5_ENCAP_IPV4_VERSION 0x40
924 #define MLX5_ENCAP_IPV4_IHL_MIN 0x05
925 #define MLX5_ENCAP_IPV4_TTL_DEF 0x40
926 #define MLX5_ENCAP_IPV6_VTC_FLOW 0x60000000
927 #define MLX5_ENCAP_IPV6_HOP_LIMIT 0xff
928 #define MLX5_ENCAP_VXLAN_FLAGS 0x08000000
929 #define MLX5_ENCAP_VXLAN_GPE_FLAGS 0x04
932 * Convert the encap action data from list of rte_flow_item to raw buffer
935 * Pointer to rte_flow_item objects list.
937 * Pointer to the output buffer.
939 * Pointer to the output buffer size.
941 * Pointer to the error structure.
944 * 0 on success, a negative errno value otherwise and rte_errno is set.
947 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
948 size_t *size, struct rte_flow_error *error)
950 struct ether_hdr *eth = NULL;
951 struct vlan_hdr *vlan = NULL;
952 struct ipv4_hdr *ipv4 = NULL;
953 struct ipv6_hdr *ipv6 = NULL;
954 struct udp_hdr *udp = NULL;
955 struct vxlan_hdr *vxlan = NULL;
956 struct vxlan_gpe_hdr *vxlan_gpe = NULL;
957 struct gre_hdr *gre = NULL;
959 size_t temp_size = 0;
962 return rte_flow_error_set(error, EINVAL,
963 RTE_FLOW_ERROR_TYPE_ACTION,
964 NULL, "invalid empty data");
965 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
966 len = flow_dv_get_item_len(items->type);
967 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
968 return rte_flow_error_set(error, EINVAL,
969 RTE_FLOW_ERROR_TYPE_ACTION,
971 "items total size is too big"
972 " for encap action");
973 rte_memcpy((void *)&buf[temp_size], items->spec, len);
974 switch (items->type) {
975 case RTE_FLOW_ITEM_TYPE_ETH:
976 eth = (struct ether_hdr *)&buf[temp_size];
978 case RTE_FLOW_ITEM_TYPE_VLAN:
979 vlan = (struct vlan_hdr *)&buf[temp_size];
981 return rte_flow_error_set(error, EINVAL,
982 RTE_FLOW_ERROR_TYPE_ACTION,
984 "eth header not found");
985 if (!eth->ether_type)
986 eth->ether_type = RTE_BE16(ETHER_TYPE_VLAN);
988 case RTE_FLOW_ITEM_TYPE_IPV4:
989 ipv4 = (struct ipv4_hdr *)&buf[temp_size];
991 return rte_flow_error_set(error, EINVAL,
992 RTE_FLOW_ERROR_TYPE_ACTION,
994 "neither eth nor vlan"
996 if (vlan && !vlan->eth_proto)
997 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv4);
998 else if (eth && !eth->ether_type)
999 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv4);
1000 if (!ipv4->version_ihl)
1001 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
1002 MLX5_ENCAP_IPV4_IHL_MIN;
1003 if (!ipv4->time_to_live)
1004 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
1006 case RTE_FLOW_ITEM_TYPE_IPV6:
1007 ipv6 = (struct ipv6_hdr *)&buf[temp_size];
1009 return rte_flow_error_set(error, EINVAL,
1010 RTE_FLOW_ERROR_TYPE_ACTION,
1011 (void *)items->type,
1012 "neither eth nor vlan"
1014 if (vlan && !vlan->eth_proto)
1015 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv6);
1016 else if (eth && !eth->ether_type)
1017 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv6);
1018 if (!ipv6->vtc_flow)
1020 RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
1021 if (!ipv6->hop_limits)
1022 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
1024 case RTE_FLOW_ITEM_TYPE_UDP:
1025 udp = (struct udp_hdr *)&buf[temp_size];
1027 return rte_flow_error_set(error, EINVAL,
1028 RTE_FLOW_ERROR_TYPE_ACTION,
1029 (void *)items->type,
1030 "ip header not found");
1031 if (ipv4 && !ipv4->next_proto_id)
1032 ipv4->next_proto_id = IPPROTO_UDP;
1033 else if (ipv6 && !ipv6->proto)
1034 ipv6->proto = IPPROTO_UDP;
1036 case RTE_FLOW_ITEM_TYPE_VXLAN:
1037 vxlan = (struct vxlan_hdr *)&buf[temp_size];
1039 return rte_flow_error_set(error, EINVAL,
1040 RTE_FLOW_ERROR_TYPE_ACTION,
1041 (void *)items->type,
1042 "udp header not found");
1044 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
1045 if (!vxlan->vx_flags)
1047 RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
1049 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1050 vxlan_gpe = (struct vxlan_gpe_hdr *)&buf[temp_size];
1052 return rte_flow_error_set(error, EINVAL,
1053 RTE_FLOW_ERROR_TYPE_ACTION,
1054 (void *)items->type,
1055 "udp header not found");
1056 if (!vxlan_gpe->proto)
1057 return rte_flow_error_set(error, EINVAL,
1058 RTE_FLOW_ERROR_TYPE_ACTION,
1059 (void *)items->type,
1060 "next protocol not found");
1063 RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
1064 if (!vxlan_gpe->vx_flags)
1065 vxlan_gpe->vx_flags =
1066 MLX5_ENCAP_VXLAN_GPE_FLAGS;
1068 case RTE_FLOW_ITEM_TYPE_GRE:
1069 case RTE_FLOW_ITEM_TYPE_NVGRE:
1070 gre = (struct gre_hdr *)&buf[temp_size];
1072 return rte_flow_error_set(error, EINVAL,
1073 RTE_FLOW_ERROR_TYPE_ACTION,
1074 (void *)items->type,
1075 "next protocol not found");
1077 return rte_flow_error_set(error, EINVAL,
1078 RTE_FLOW_ERROR_TYPE_ACTION,
1079 (void *)items->type,
1080 "ip header not found");
1081 if (ipv4 && !ipv4->next_proto_id)
1082 ipv4->next_proto_id = IPPROTO_GRE;
1083 else if (ipv6 && !ipv6->proto)
1084 ipv6->proto = IPPROTO_GRE;
1086 case RTE_FLOW_ITEM_TYPE_VOID:
1089 return rte_flow_error_set(error, EINVAL,
1090 RTE_FLOW_ERROR_TYPE_ACTION,
1091 (void *)items->type,
1092 "unsupported item type");
1102 * Convert L2 encap action to DV specification.
1105 * Pointer to rte_eth_dev structure.
1107 * Pointer to action structure.
1108 * @param[in, out] dev_flow
1109 * Pointer to the mlx5_flow.
1111 * Pointer to the error structure.
1114 * 0 on success, a negative errno value otherwise and rte_errno is set.
1117 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
1118 const struct rte_flow_action *action,
1119 struct mlx5_flow *dev_flow,
1120 struct rte_flow_error *error)
1122 const struct rte_flow_item *encap_data;
1123 const struct rte_flow_action_raw_encap *raw_encap_data;
1124 struct mlx5_flow_dv_encap_decap_resource res = {
1126 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
1127 .ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
1130 if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
1132 (const struct rte_flow_action_raw_encap *)action->conf;
1133 res.size = raw_encap_data->size;
1134 memcpy(res.buf, raw_encap_data->data, res.size);
1136 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
1138 ((const struct rte_flow_action_vxlan_encap *)
1139 action->conf)->definition;
1142 ((const struct rte_flow_action_nvgre_encap *)
1143 action->conf)->definition;
1144 if (flow_dv_convert_encap_data(encap_data, res.buf,
1148 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1149 return rte_flow_error_set(error, EINVAL,
1150 RTE_FLOW_ERROR_TYPE_ACTION,
1151 NULL, "can't create L2 encap action");
1156 * Convert L2 decap action to DV specification.
1159 * Pointer to rte_eth_dev structure.
1160 * @param[in, out] dev_flow
1161 * Pointer to the mlx5_flow.
1163 * Pointer to the error structure.
1166 * 0 on success, a negative errno value otherwise and rte_errno is set.
1169 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
1170 struct mlx5_flow *dev_flow,
1171 struct rte_flow_error *error)
1173 struct mlx5_flow_dv_encap_decap_resource res = {
1176 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
1177 .ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
1180 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1181 return rte_flow_error_set(error, EINVAL,
1182 RTE_FLOW_ERROR_TYPE_ACTION,
1183 NULL, "can't create L2 decap action");
1188 * Convert raw decap/encap (L3 tunnel) action to DV specification.
1191 * Pointer to rte_eth_dev structure.
1193 * Pointer to action structure.
1194 * @param[in, out] dev_flow
1195 * Pointer to the mlx5_flow.
1197 * Pointer to the flow attributes.
1199 * Pointer to the error structure.
1202 * 0 on success, a negative errno value otherwise and rte_errno is set.
1205 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
1206 const struct rte_flow_action *action,
1207 struct mlx5_flow *dev_flow,
1208 const struct rte_flow_attr *attr,
1209 struct rte_flow_error *error)
1211 const struct rte_flow_action_raw_encap *encap_data;
1212 struct mlx5_flow_dv_encap_decap_resource res;
1214 encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
1215 res.size = encap_data->size;
1216 memcpy(res.buf, encap_data->data, res.size);
1217 res.reformat_type = attr->egress ?
1218 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
1219 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
1220 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
1221 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
1222 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1223 return rte_flow_error_set(error, EINVAL,
1224 RTE_FLOW_ERROR_TYPE_ACTION,
1225 NULL, "can't create encap action");
1230 * Validate the modify-header actions.
1232 * @param[in] action_flags
1233 * Holds the actions detected until now.
1235 * Pointer to the modify action.
1237 * Pointer to error structure.
1240 * 0 on success, a negative errno value otherwise and rte_errno is set.
1243 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
1244 const struct rte_flow_action *action,
1245 struct rte_flow_error *error)
1247 if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
1248 return rte_flow_error_set(error, EINVAL,
1249 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1250 NULL, "action configuration not set");
1251 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1252 return rte_flow_error_set(error, EINVAL,
1253 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1254 "can't have encap action before"
1260 * Validate the modify-header MAC address actions.
1262 * @param[in] action_flags
1263 * Holds the actions detected until now.
1265 * Pointer to the modify action.
1266 * @param[in] item_flags
1267 * Holds the items detected.
1269 * Pointer to error structure.
1272 * 0 on success, a negative errno value otherwise and rte_errno is set.
1275 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
1276 const struct rte_flow_action *action,
1277 const uint64_t item_flags,
1278 struct rte_flow_error *error)
1282 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1284 if (!(item_flags & MLX5_FLOW_LAYER_L2))
1285 return rte_flow_error_set(error, EINVAL,
1286 RTE_FLOW_ERROR_TYPE_ACTION,
1288 "no L2 item in pattern");
1294 * Validate the modify-header IPv4 address actions.
1296 * @param[in] action_flags
1297 * Holds the actions detected until now.
1299 * Pointer to the modify action.
1300 * @param[in] item_flags
1301 * Holds the items detected.
1303 * Pointer to error structure.
1306 * 0 on success, a negative errno value otherwise and rte_errno is set.
1309 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
1310 const struct rte_flow_action *action,
1311 const uint64_t item_flags,
1312 struct rte_flow_error *error)
1316 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1318 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
1319 return rte_flow_error_set(error, EINVAL,
1320 RTE_FLOW_ERROR_TYPE_ACTION,
1322 "no ipv4 item in pattern");
1328 * Validate the modify-header IPv6 address actions.
1330 * @param[in] action_flags
1331 * Holds the actions detected until now.
1333 * Pointer to the modify action.
1334 * @param[in] item_flags
1335 * Holds the items detected.
1337 * Pointer to error structure.
1340 * 0 on success, a negative errno value otherwise and rte_errno is set.
1343 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
1344 const struct rte_flow_action *action,
1345 const uint64_t item_flags,
1346 struct rte_flow_error *error)
1350 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1352 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
1353 return rte_flow_error_set(error, EINVAL,
1354 RTE_FLOW_ERROR_TYPE_ACTION,
1356 "no ipv6 item in pattern");
1362 * Validate the modify-header TP actions.
1364 * @param[in] action_flags
1365 * Holds the actions detected until now.
1367 * Pointer to the modify action.
1368 * @param[in] item_flags
1369 * Holds the items detected.
1371 * Pointer to error structure.
1374 * 0 on success, a negative errno value otherwise and rte_errno is set.
1377 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
1378 const struct rte_flow_action *action,
1379 const uint64_t item_flags,
1380 struct rte_flow_error *error)
1384 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1386 if (!(item_flags & MLX5_FLOW_LAYER_L4))
1387 return rte_flow_error_set(error, EINVAL,
1388 RTE_FLOW_ERROR_TYPE_ACTION,
1389 NULL, "no transport layer "
1396 * Validate the modify-header TTL actions.
1398 * @param[in] action_flags
1399 * Holds the actions detected until now.
1401 * Pointer to the modify action.
1402 * @param[in] item_flags
1403 * Holds the items detected.
1405 * Pointer to error structure.
1408 * 0 on success, a negative errno value otherwise and rte_errno is set.
1411 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
1412 const struct rte_flow_action *action,
1413 const uint64_t item_flags,
1414 struct rte_flow_error *error)
1418 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1420 if (!(item_flags & MLX5_FLOW_LAYER_L3))
1421 return rte_flow_error_set(error, EINVAL,
1422 RTE_FLOW_ERROR_TYPE_ACTION,
1424 "no IP protocol in pattern");
1430 * Find existing modify-header resource or create and register a new one.
1432 * @param dev[in, out]
1433 * Pointer to rte_eth_dev structure.
1434 * @param[in, out] resource
1435 * Pointer to modify-header resource.
1436 * @parm[in, out] dev_flow
1437 * Pointer to the dev_flow.
1439 * pointer to error structure.
1442 * 0 on success otherwise -errno and errno is set.
1445 flow_dv_modify_hdr_resource_register
1446 (struct rte_eth_dev *dev,
1447 struct mlx5_flow_dv_modify_hdr_resource *resource,
1448 struct mlx5_flow *dev_flow,
1449 struct rte_flow_error *error)
1451 struct mlx5_priv *priv = dev->data->dev_private;
1452 struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
1454 struct mlx5dv_dr_ns *ns =
1455 resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX ?
1456 priv->tx_ns : priv->rx_ns;
1458 /* Lookup a matching resource from cache. */
1459 LIST_FOREACH(cache_resource, &priv->modify_cmds, next) {
1460 if (resource->ft_type == cache_resource->ft_type &&
1461 resource->actions_num == cache_resource->actions_num &&
1462 !memcmp((const void *)resource->actions,
1463 (const void *)cache_resource->actions,
1464 (resource->actions_num *
1465 sizeof(resource->actions[0])))) {
1466 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
1467 (void *)cache_resource,
1468 rte_atomic32_read(&cache_resource->refcnt));
1469 rte_atomic32_inc(&cache_resource->refcnt);
1470 dev_flow->dv.modify_hdr = cache_resource;
1474 /* Register new modify-header resource. */
1475 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1476 if (!cache_resource)
1477 return rte_flow_error_set(error, ENOMEM,
1478 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1479 "cannot allocate resource memory");
1480 *cache_resource = *resource;
1481 cache_resource->verbs_action =
1482 mlx5_glue->dv_create_flow_action_modify_header
1483 (priv->sh->ctx, cache_resource->ft_type,
1485 cache_resource->actions_num *
1486 sizeof(cache_resource->actions[0]),
1487 (uint64_t *)cache_resource->actions);
1488 if (!cache_resource->verbs_action) {
1489 rte_free(cache_resource);
1490 return rte_flow_error_set(error, ENOMEM,
1491 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1492 NULL, "cannot create action");
1494 rte_atomic32_init(&cache_resource->refcnt);
1495 rte_atomic32_inc(&cache_resource->refcnt);
1496 LIST_INSERT_HEAD(&priv->modify_cmds, cache_resource, next);
1497 dev_flow->dv.modify_hdr = cache_resource;
1498 DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
1499 (void *)cache_resource,
1500 rte_atomic32_read(&cache_resource->refcnt));
1505 * Get or create a flow counter.
1508 * Pointer to the Ethernet device structure.
1510 * Indicate if this counter is shared with other flows.
1512 * Counter identifier.
1515 * pointer to flow counter on success, NULL otherwise and rte_errno is set.
1517 static struct mlx5_flow_counter *
1518 flow_dv_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
1520 struct mlx5_priv *priv = dev->data->dev_private;
1521 struct mlx5_flow_counter *cnt = NULL;
1522 struct mlx5_devx_counter_set *dcs = NULL;
1525 if (!priv->config.devx) {
1530 LIST_FOREACH(cnt, &priv->flow_counters, next) {
1531 if (cnt->shared && cnt->id == id) {
1537 cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
1538 dcs = rte_calloc(__func__, 1, sizeof(*dcs), 0);
1543 ret = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, dcs);
1546 struct mlx5_flow_counter tmpl = {
1552 tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
1558 LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
1568 * Release a flow counter.
1570 * @param[in] counter
1571 * Pointer to the counter handler.
1574 flow_dv_counter_release(struct mlx5_flow_counter *counter)
1580 if (--counter->ref_cnt == 0) {
1581 ret = mlx5_devx_cmd_flow_counter_free(counter->dcs->obj);
1583 DRV_LOG(ERR, "Failed to free devx counters, %d", ret);
1584 LIST_REMOVE(counter, next);
1585 rte_free(counter->dcs);
1591 * Verify the @p attributes will be correctly understood by the NIC and store
1592 * them in the @p flow if everything is correct.
1595 * Pointer to dev struct.
1596 * @param[in] attributes
1597 * Pointer to flow attributes
1599 * Pointer to error structure.
1602 * 0 on success, a negative errno value otherwise and rte_errno is set.
1605 flow_dv_validate_attributes(struct rte_eth_dev *dev,
1606 const struct rte_flow_attr *attributes,
1607 struct rte_flow_error *error)
1609 struct mlx5_priv *priv = dev->data->dev_private;
1610 uint32_t priority_max = priv->config.flow_prio - 1;
1612 #ifdef HAVE_MLX5DV_DR
1613 if (attributes->group)
1614 return rte_flow_error_set(error, ENOTSUP,
1615 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1617 "groups is not supported");
1619 if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
1620 attributes->priority >= priority_max)
1621 return rte_flow_error_set(error, ENOTSUP,
1622 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1624 "priority out of range");
1625 if (attributes->transfer)
1626 return rte_flow_error_set(error, ENOTSUP,
1627 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1629 "transfer is not supported");
1630 if (!(attributes->egress ^ attributes->ingress))
1631 return rte_flow_error_set(error, ENOTSUP,
1632 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1633 "must specify exactly one of "
1634 "ingress or egress");
1639 * Internal validation function. For validating both actions and items.
1642 * Pointer to the rte_eth_dev structure.
1644 * Pointer to the flow attributes.
1646 * Pointer to the list of items.
1647 * @param[in] actions
1648 * Pointer to the list of actions.
1650 * Pointer to the error structure.
1653 * 0 on success, a negative errno value otherwise and rte_errno is set.
1656 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1657 const struct rte_flow_item items[],
1658 const struct rte_flow_action actions[],
1659 struct rte_flow_error *error)
1662 uint64_t action_flags = 0;
1663 uint64_t item_flags = 0;
1664 uint64_t last_item = 0;
1666 uint8_t next_protocol = 0xff;
1671 ret = flow_dv_validate_attributes(dev, attr, error);
1674 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1675 tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1676 switch (items->type) {
1677 case RTE_FLOW_ITEM_TYPE_VOID:
1679 case RTE_FLOW_ITEM_TYPE_ETH:
1680 ret = mlx5_flow_validate_item_eth(items, item_flags,
1684 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1685 MLX5_FLOW_LAYER_OUTER_L2;
1687 case RTE_FLOW_ITEM_TYPE_VLAN:
1688 ret = mlx5_flow_validate_item_vlan(items, item_flags,
1692 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1693 MLX5_FLOW_LAYER_OUTER_VLAN;
1695 case RTE_FLOW_ITEM_TYPE_IPV4:
1696 ret = mlx5_flow_validate_item_ipv4(items, item_flags,
1700 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1701 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1702 if (items->mask != NULL &&
1703 ((const struct rte_flow_item_ipv4 *)
1704 items->mask)->hdr.next_proto_id) {
1706 ((const struct rte_flow_item_ipv4 *)
1707 (items->spec))->hdr.next_proto_id;
1709 ((const struct rte_flow_item_ipv4 *)
1710 (items->mask))->hdr.next_proto_id;
1712 /* Reset for inner layer. */
1713 next_protocol = 0xff;
1716 case RTE_FLOW_ITEM_TYPE_IPV6:
1717 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
1721 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1722 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1723 if (items->mask != NULL &&
1724 ((const struct rte_flow_item_ipv6 *)
1725 items->mask)->hdr.proto) {
1727 ((const struct rte_flow_item_ipv6 *)
1728 items->spec)->hdr.proto;
1730 ((const struct rte_flow_item_ipv6 *)
1731 items->mask)->hdr.proto;
1733 /* Reset for inner layer. */
1734 next_protocol = 0xff;
1737 case RTE_FLOW_ITEM_TYPE_TCP:
1738 ret = mlx5_flow_validate_item_tcp
1741 &rte_flow_item_tcp_mask,
1745 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1746 MLX5_FLOW_LAYER_OUTER_L4_TCP;
1748 case RTE_FLOW_ITEM_TYPE_UDP:
1749 ret = mlx5_flow_validate_item_udp(items, item_flags,
1754 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1755 MLX5_FLOW_LAYER_OUTER_L4_UDP;
1757 case RTE_FLOW_ITEM_TYPE_GRE:
1758 case RTE_FLOW_ITEM_TYPE_NVGRE:
1759 ret = mlx5_flow_validate_item_gre(items, item_flags,
1760 next_protocol, error);
1763 last_item = MLX5_FLOW_LAYER_GRE;
1765 case RTE_FLOW_ITEM_TYPE_VXLAN:
1766 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
1770 last_item = MLX5_FLOW_LAYER_VXLAN;
1772 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1773 ret = mlx5_flow_validate_item_vxlan_gpe(items,
1778 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
1780 case RTE_FLOW_ITEM_TYPE_MPLS:
1781 ret = mlx5_flow_validate_item_mpls(dev, items,
1786 last_item = MLX5_FLOW_LAYER_MPLS;
1788 case RTE_FLOW_ITEM_TYPE_META:
1789 ret = flow_dv_validate_item_meta(dev, items, attr,
1793 last_item = MLX5_FLOW_ITEM_METADATA;
1796 return rte_flow_error_set(error, ENOTSUP,
1797 RTE_FLOW_ERROR_TYPE_ITEM,
1798 NULL, "item not supported");
1800 item_flags |= last_item;
1802 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1803 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
1804 return rte_flow_error_set(error, ENOTSUP,
1805 RTE_FLOW_ERROR_TYPE_ACTION,
1806 actions, "too many actions");
1807 switch (actions->type) {
1808 case RTE_FLOW_ACTION_TYPE_VOID:
1810 case RTE_FLOW_ACTION_TYPE_FLAG:
1811 ret = mlx5_flow_validate_action_flag(action_flags,
1815 action_flags |= MLX5_FLOW_ACTION_FLAG;
1818 case RTE_FLOW_ACTION_TYPE_MARK:
1819 ret = mlx5_flow_validate_action_mark(actions,
1824 action_flags |= MLX5_FLOW_ACTION_MARK;
1827 case RTE_FLOW_ACTION_TYPE_DROP:
1828 ret = mlx5_flow_validate_action_drop(action_flags,
1832 action_flags |= MLX5_FLOW_ACTION_DROP;
1835 case RTE_FLOW_ACTION_TYPE_QUEUE:
1836 ret = mlx5_flow_validate_action_queue(actions,
1841 action_flags |= MLX5_FLOW_ACTION_QUEUE;
1844 case RTE_FLOW_ACTION_TYPE_RSS:
1845 ret = mlx5_flow_validate_action_rss(actions,
1850 action_flags |= MLX5_FLOW_ACTION_RSS;
1853 case RTE_FLOW_ACTION_TYPE_COUNT:
1854 ret = flow_dv_validate_action_count(dev, error);
1857 action_flags |= MLX5_FLOW_ACTION_COUNT;
1860 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
1861 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
1862 ret = flow_dv_validate_action_l2_encap(action_flags,
1867 action_flags |= actions->type ==
1868 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
1869 MLX5_FLOW_ACTION_VXLAN_ENCAP :
1870 MLX5_FLOW_ACTION_NVGRE_ENCAP;
1873 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
1874 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
1875 ret = flow_dv_validate_action_l2_decap(action_flags,
1879 action_flags |= actions->type ==
1880 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
1881 MLX5_FLOW_ACTION_VXLAN_DECAP :
1882 MLX5_FLOW_ACTION_NVGRE_DECAP;
1885 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
1886 ret = flow_dv_validate_action_raw_encap(action_flags,
1891 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
1894 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
1895 ret = flow_dv_validate_action_raw_decap(action_flags,
1900 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
1903 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
1904 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
1905 ret = flow_dv_validate_action_modify_mac(action_flags,
1911 /* Count all modify-header actions as one action. */
1912 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
1914 action_flags |= actions->type ==
1915 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
1916 MLX5_FLOW_ACTION_SET_MAC_SRC :
1917 MLX5_FLOW_ACTION_SET_MAC_DST;
1920 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
1921 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
1922 ret = flow_dv_validate_action_modify_ipv4(action_flags,
1928 /* Count all modify-header actions as one action. */
1929 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
1931 action_flags |= actions->type ==
1932 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
1933 MLX5_FLOW_ACTION_SET_IPV4_SRC :
1934 MLX5_FLOW_ACTION_SET_IPV4_DST;
1936 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
1937 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
1938 ret = flow_dv_validate_action_modify_ipv6(action_flags,
1944 /* Count all modify-header actions as one action. */
1945 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
1947 action_flags |= actions->type ==
1948 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
1949 MLX5_FLOW_ACTION_SET_IPV6_SRC :
1950 MLX5_FLOW_ACTION_SET_IPV6_DST;
1952 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
1953 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
1954 ret = flow_dv_validate_action_modify_tp(action_flags,
1960 /* Count all modify-header actions as one action. */
1961 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
1963 action_flags |= actions->type ==
1964 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
1965 MLX5_FLOW_ACTION_SET_TP_SRC :
1966 MLX5_FLOW_ACTION_SET_TP_DST;
1968 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
1969 case RTE_FLOW_ACTION_TYPE_SET_TTL:
1970 ret = flow_dv_validate_action_modify_ttl(action_flags,
1976 /* Count all modify-header actions as one action. */
1977 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
1979 action_flags |= actions->type ==
1980 RTE_FLOW_ACTION_TYPE_SET_TTL ?
1981 MLX5_FLOW_ACTION_SET_TTL :
1982 MLX5_FLOW_ACTION_DEC_TTL;
1985 return rte_flow_error_set(error, ENOTSUP,
1986 RTE_FLOW_ERROR_TYPE_ACTION,
1988 "action not supported");
1991 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
1992 return rte_flow_error_set(error, EINVAL,
1993 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1994 "no fate action is found");
1999 * Internal preparation function. Allocates the DV flow size,
2000 * this size is constant.
2003 * Pointer to the flow attributes.
2005 * Pointer to the list of items.
2006 * @param[in] actions
2007 * Pointer to the list of actions.
2009 * Pointer to the error structure.
2012 * Pointer to mlx5_flow object on success,
2013 * otherwise NULL and rte_errno is set.
2015 static struct mlx5_flow *
2016 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
2017 const struct rte_flow_item items[] __rte_unused,
2018 const struct rte_flow_action actions[] __rte_unused,
2019 struct rte_flow_error *error)
2021 uint32_t size = sizeof(struct mlx5_flow);
2022 struct mlx5_flow *flow;
2024 flow = rte_calloc(__func__, 1, size, 0);
2026 rte_flow_error_set(error, ENOMEM,
2027 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2028 "not enough memory to create flow");
2031 flow->dv.value.size = MLX5_ST_SZ_DB(fte_match_param);
2037 * Sanity check for match mask and value. Similar to check_valid_spec() in
2038 * kernel driver. If unmasked bit is present in value, it returns failure.
2041 * pointer to match mask buffer.
2042 * @param match_value
2043 * pointer to match value buffer.
2046 * 0 if valid, -EINVAL otherwise.
2049 flow_dv_check_valid_spec(void *match_mask, void *match_value)
2051 uint8_t *m = match_mask;
2052 uint8_t *v = match_value;
2055 for (i = 0; i < MLX5_ST_SZ_DB(fte_match_param); ++i) {
2058 "match_value differs from match_criteria"
2059 " %p[%u] != %p[%u]",
2060 match_value, i, match_mask, i);
2069 * Add Ethernet item to matcher and to the value.
2071 * @param[in, out] matcher
2073 * @param[in, out] key
2074 * Flow matcher value.
2076 * Flow pattern to translate.
2078 * Item is inner pattern.
2081 flow_dv_translate_item_eth(void *matcher, void *key,
2082 const struct rte_flow_item *item, int inner)
2084 const struct rte_flow_item_eth *eth_m = item->mask;
2085 const struct rte_flow_item_eth *eth_v = item->spec;
2086 const struct rte_flow_item_eth nic_mask = {
2087 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2088 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2089 .type = RTE_BE16(0xffff),
2101 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2103 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2105 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2107 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2109 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
2110 ð_m->dst, sizeof(eth_m->dst));
2111 /* The value must be in the range of the mask. */
2112 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
2113 for (i = 0; i < sizeof(eth_m->dst); ++i)
2114 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
2115 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
2116 ð_m->src, sizeof(eth_m->src));
2117 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
2118 /* The value must be in the range of the mask. */
2119 for (i = 0; i < sizeof(eth_m->dst); ++i)
2120 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
2121 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
2122 rte_be_to_cpu_16(eth_m->type));
2123 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
2124 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
2128 * Add VLAN item to matcher and to the value.
2130 * @param[in, out] matcher
2132 * @param[in, out] key
2133 * Flow matcher value.
2135 * Flow pattern to translate.
2137 * Item is inner pattern.
2140 flow_dv_translate_item_vlan(void *matcher, void *key,
2141 const struct rte_flow_item *item,
2144 const struct rte_flow_item_vlan *vlan_m = item->mask;
2145 const struct rte_flow_item_vlan *vlan_v = item->spec;
2146 const struct rte_flow_item_vlan nic_mask = {
2147 .tci = RTE_BE16(0x0fff),
2148 .inner_type = RTE_BE16(0xffff),
2160 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2162 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2164 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2166 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2168 tci_m = rte_be_to_cpu_16(vlan_m->tci);
2169 tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
2170 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
2171 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
2172 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
2173 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
2174 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
2175 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
2176 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
2177 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
2181 * Add IPV4 item to matcher and to the value.
2183 * @param[in, out] matcher
2185 * @param[in, out] key
2186 * Flow matcher value.
2188 * Flow pattern to translate.
2190 * Item is inner pattern.
2192 * The group to insert the rule.
2195 flow_dv_translate_item_ipv4(void *matcher, void *key,
2196 const struct rte_flow_item *item,
2197 int inner, uint32_t group)
2199 const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
2200 const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
2201 const struct rte_flow_item_ipv4 nic_mask = {
2203 .src_addr = RTE_BE32(0xffffffff),
2204 .dst_addr = RTE_BE32(0xffffffff),
2205 .type_of_service = 0xff,
2206 .next_proto_id = 0xff,
2216 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2218 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2220 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2222 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2225 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2227 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
2228 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
2233 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2234 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2235 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2236 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2237 *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
2238 *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
2239 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2240 src_ipv4_src_ipv6.ipv4_layout.ipv4);
2241 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2242 src_ipv4_src_ipv6.ipv4_layout.ipv4);
2243 *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
2244 *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
2245 tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
2246 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
2247 ipv4_m->hdr.type_of_service);
2248 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
2249 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
2250 ipv4_m->hdr.type_of_service >> 2);
2251 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
2252 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2253 ipv4_m->hdr.next_proto_id);
2254 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2255 ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
2259 * Add IPV6 item to matcher and to the value.
2261 * @param[in, out] matcher
2263 * @param[in, out] key
2264 * Flow matcher value.
2266 * Flow pattern to translate.
2268 * Item is inner pattern.
2270 * The group to insert the rule.
2273 flow_dv_translate_item_ipv6(void *matcher, void *key,
2274 const struct rte_flow_item *item,
2275 int inner, uint32_t group)
2277 const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
2278 const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
2279 const struct rte_flow_item_ipv6 nic_mask = {
2282 "\xff\xff\xff\xff\xff\xff\xff\xff"
2283 "\xff\xff\xff\xff\xff\xff\xff\xff",
2285 "\xff\xff\xff\xff\xff\xff\xff\xff"
2286 "\xff\xff\xff\xff\xff\xff\xff\xff",
2287 .vtc_flow = RTE_BE32(0xffffffff),
2294 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2295 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2304 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2306 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2308 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2310 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2313 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2315 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
2316 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
2321 size = sizeof(ipv6_m->hdr.dst_addr);
2322 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2323 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2324 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2325 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2326 memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
2327 for (i = 0; i < size; ++i)
2328 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
2329 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2330 src_ipv4_src_ipv6.ipv6_layout.ipv6);
2331 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2332 src_ipv4_src_ipv6.ipv6_layout.ipv6);
2333 memcpy(l24_m, ipv6_m->hdr.src_addr, size);
2334 for (i = 0; i < size; ++i)
2335 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
2337 vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
2338 vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
2339 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
2340 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
2341 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
2342 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
2345 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
2347 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
2350 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
2352 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
2356 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2358 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2359 ipv6_v->hdr.proto & ipv6_m->hdr.proto);
2363 * Add TCP item to matcher and to the value.
2365 * @param[in, out] matcher
2367 * @param[in, out] key
2368 * Flow matcher value.
2370 * Flow pattern to translate.
2372 * Item is inner pattern.
2375 flow_dv_translate_item_tcp(void *matcher, void *key,
2376 const struct rte_flow_item *item,
2379 const struct rte_flow_item_tcp *tcp_m = item->mask;
2380 const struct rte_flow_item_tcp *tcp_v = item->spec;
2385 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2387 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2389 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2391 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2393 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2394 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
2398 tcp_m = &rte_flow_item_tcp_mask;
2399 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
2400 rte_be_to_cpu_16(tcp_m->hdr.src_port));
2401 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
2402 rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
2403 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
2404 rte_be_to_cpu_16(tcp_m->hdr.dst_port));
2405 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
2406 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
2410 * Add UDP item to matcher and to the value.
2412 * @param[in, out] matcher
2414 * @param[in, out] key
2415 * Flow matcher value.
2417 * Flow pattern to translate.
2419 * Item is inner pattern.
2422 flow_dv_translate_item_udp(void *matcher, void *key,
2423 const struct rte_flow_item *item,
2426 const struct rte_flow_item_udp *udp_m = item->mask;
2427 const struct rte_flow_item_udp *udp_v = item->spec;
2432 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2434 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2436 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2438 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2440 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2441 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
2445 udp_m = &rte_flow_item_udp_mask;
2446 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
2447 rte_be_to_cpu_16(udp_m->hdr.src_port));
2448 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
2449 rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
2450 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
2451 rte_be_to_cpu_16(udp_m->hdr.dst_port));
2452 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2453 rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
2457 * Add GRE item to matcher and to the value.
2459 * @param[in, out] matcher
2461 * @param[in, out] key
2462 * Flow matcher value.
2464 * Flow pattern to translate.
2466 * Item is inner pattern.
2469 flow_dv_translate_item_gre(void *matcher, void *key,
2470 const struct rte_flow_item *item,
2473 const struct rte_flow_item_gre *gre_m = item->mask;
2474 const struct rte_flow_item_gre *gre_v = item->spec;
2477 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2478 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2481 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2483 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2485 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2487 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2489 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2490 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
2494 gre_m = &rte_flow_item_gre_mask;
2495 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
2496 rte_be_to_cpu_16(gre_m->protocol));
2497 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
2498 rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
2502 * Add NVGRE item to matcher and to the value.
2504 * @param[in, out] matcher
2506 * @param[in, out] key
2507 * Flow matcher value.
2509 * Flow pattern to translate.
2511 * Item is inner pattern.
2514 flow_dv_translate_item_nvgre(void *matcher, void *key,
2515 const struct rte_flow_item *item,
2518 const struct rte_flow_item_nvgre *nvgre_m = item->mask;
2519 const struct rte_flow_item_nvgre *nvgre_v = item->spec;
2520 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2521 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2522 const char *tni_flow_id_m = (const char *)nvgre_m->tni;
2523 const char *tni_flow_id_v = (const char *)nvgre_v->tni;
2529 flow_dv_translate_item_gre(matcher, key, item, inner);
2533 nvgre_m = &rte_flow_item_nvgre_mask;
2534 size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
2535 gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
2536 gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
2537 memcpy(gre_key_m, tni_flow_id_m, size);
2538 for (i = 0; i < size; ++i)
2539 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
2543 * Add VXLAN item to matcher and to the value.
2545 * @param[in, out] matcher
2547 * @param[in, out] key
2548 * Flow matcher value.
2550 * Flow pattern to translate.
2552 * Item is inner pattern.
2555 flow_dv_translate_item_vxlan(void *matcher, void *key,
2556 const struct rte_flow_item *item,
2559 const struct rte_flow_item_vxlan *vxlan_m = item->mask;
2560 const struct rte_flow_item_vxlan *vxlan_v = item->spec;
2563 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2564 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2572 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2574 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2576 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2578 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2580 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
2581 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
2582 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
2583 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
2584 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
2589 vxlan_m = &rte_flow_item_vxlan_mask;
2590 size = sizeof(vxlan_m->vni);
2591 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
2592 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
2593 memcpy(vni_m, vxlan_m->vni, size);
2594 for (i = 0; i < size; ++i)
2595 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
2599 * Add MPLS item to matcher and to the value.
2601 * @param[in, out] matcher
2603 * @param[in, out] key
2604 * Flow matcher value.
2606 * Flow pattern to translate.
2607 * @param[in] prev_layer
2608 * The protocol layer indicated in previous item.
2610 * Item is inner pattern.
2613 flow_dv_translate_item_mpls(void *matcher, void *key,
2614 const struct rte_flow_item *item,
2615 uint64_t prev_layer,
2618 const uint32_t *in_mpls_m = item->mask;
2619 const uint32_t *in_mpls_v = item->spec;
2620 uint32_t *out_mpls_m = 0;
2621 uint32_t *out_mpls_v = 0;
2622 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2623 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2624 void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
2626 void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
2627 void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
2628 void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2630 switch (prev_layer) {
2631 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
2632 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
2633 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2634 MLX5_UDP_PORT_MPLS);
2636 case MLX5_FLOW_LAYER_GRE:
2637 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
2638 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
2642 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2643 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2650 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
2651 switch (prev_layer) {
2652 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
2654 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
2655 outer_first_mpls_over_udp);
2657 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
2658 outer_first_mpls_over_udp);
2660 case MLX5_FLOW_LAYER_GRE:
2662 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
2663 outer_first_mpls_over_gre);
2665 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
2666 outer_first_mpls_over_gre);
2669 /* Inner MPLS not over GRE is not supported. */
2672 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
2676 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
2682 if (out_mpls_m && out_mpls_v) {
2683 *out_mpls_m = *in_mpls_m;
2684 *out_mpls_v = *in_mpls_v & *in_mpls_m;
2689 * Add META item to matcher
2691 * @param[in, out] matcher
2693 * @param[in, out] key
2694 * Flow matcher value.
2696 * Flow pattern to translate.
2698 * Item is inner pattern.
2701 flow_dv_translate_item_meta(void *matcher, void *key,
2702 const struct rte_flow_item *item)
2704 const struct rte_flow_item_meta *meta_m;
2705 const struct rte_flow_item_meta *meta_v;
2707 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
2709 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
2711 meta_m = (const void *)item->mask;
2713 meta_m = &rte_flow_item_meta_mask;
2714 meta_v = (const void *)item->spec;
2716 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
2717 rte_be_to_cpu_32(meta_m->data));
2718 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
2719 rte_be_to_cpu_32(meta_v->data & meta_m->data));
2723 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
2725 #define HEADER_IS_ZERO(match_criteria, headers) \
2726 !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
2727 matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
2730 * Calculate flow matcher enable bitmap.
2732 * @param match_criteria
2733 * Pointer to flow matcher criteria.
2736 * Bitmap of enabled fields.
2739 flow_dv_matcher_enable(uint32_t *match_criteria)
2741 uint8_t match_criteria_enable;
2743 match_criteria_enable =
2744 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
2745 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
2746 match_criteria_enable |=
2747 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
2748 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
2749 match_criteria_enable |=
2750 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
2751 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
2752 match_criteria_enable |=
2753 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
2754 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
2755 #ifdef HAVE_MLX5DV_DR
2756 match_criteria_enable |=
2757 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
2758 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
2760 return match_criteria_enable;
2764 * Register the flow matcher.
2766 * @param dev[in, out]
2767 * Pointer to rte_eth_dev structure.
2768 * @param[in, out] matcher
2769 * Pointer to flow matcher.
2770 * @parm[in, out] dev_flow
2771 * Pointer to the dev_flow.
2773 * pointer to error structure.
2776 * 0 on success otherwise -errno and errno is set.
2779 flow_dv_matcher_register(struct rte_eth_dev *dev,
2780 struct mlx5_flow_dv_matcher *matcher,
2781 struct mlx5_flow *dev_flow,
2782 struct rte_flow_error *error)
2784 struct mlx5_priv *priv = dev->data->dev_private;
2785 struct mlx5_flow_dv_matcher *cache_matcher;
2786 struct mlx5dv_flow_matcher_attr dv_attr = {
2787 .type = IBV_FLOW_ATTR_NORMAL,
2788 .match_mask = (void *)&matcher->mask,
2790 struct mlx5_flow_tbl_resource *tbl = NULL;
2792 /* Lookup from cache. */
2793 LIST_FOREACH(cache_matcher, &priv->matchers, next) {
2794 if (matcher->crc == cache_matcher->crc &&
2795 matcher->priority == cache_matcher->priority &&
2796 matcher->egress == cache_matcher->egress &&
2797 matcher->group == cache_matcher->group &&
2798 !memcmp((const void *)matcher->mask.buf,
2799 (const void *)cache_matcher->mask.buf,
2800 cache_matcher->mask.size)) {
2802 "priority %hd use %s matcher %p: refcnt %d++",
2803 cache_matcher->priority,
2804 cache_matcher->egress ? "tx" : "rx",
2805 (void *)cache_matcher,
2806 rte_atomic32_read(&cache_matcher->refcnt));
2807 rte_atomic32_inc(&cache_matcher->refcnt);
2808 dev_flow->dv.matcher = cache_matcher;
2812 #ifdef HAVE_MLX5DV_DR
2813 if (matcher->egress) {
2814 tbl = &priv->tx_tbl[matcher->group];
2816 tbl->obj = mlx5_glue->dr_create_flow_tbl
2818 matcher->group * MLX5_GROUP_FACTOR);
2820 tbl = &priv->rx_tbl[matcher->group];
2822 tbl->obj = mlx5_glue->dr_create_flow_tbl
2824 matcher->group * MLX5_GROUP_FACTOR);
2827 return rte_flow_error_set(error, ENOMEM,
2828 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2829 NULL, "cannot create table");
2831 rte_atomic32_inc(&tbl->refcnt);
2833 /* Register new matcher. */
2834 cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
2836 return rte_flow_error_set(error, ENOMEM,
2837 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2838 "cannot allocate matcher memory");
2839 *cache_matcher = *matcher;
2840 dv_attr.match_criteria_enable =
2841 flow_dv_matcher_enable(cache_matcher->mask.buf);
2842 dv_attr.priority = matcher->priority;
2843 if (matcher->egress)
2844 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
2845 cache_matcher->matcher_object =
2846 mlx5_glue->dv_create_flow_matcher(priv->sh->ctx, &dv_attr,
2848 if (!cache_matcher->matcher_object) {
2849 rte_free(cache_matcher);
2850 #ifdef HAVE_MLX5DV_DR
2851 if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
2852 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
2856 return rte_flow_error_set(error, ENOMEM,
2857 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2858 NULL, "cannot create matcher");
2860 rte_atomic32_inc(&cache_matcher->refcnt);
2861 LIST_INSERT_HEAD(&priv->matchers, cache_matcher, next);
2862 dev_flow->dv.matcher = cache_matcher;
2863 DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
2864 cache_matcher->priority,
2865 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
2866 rte_atomic32_read(&cache_matcher->refcnt));
2867 rte_atomic32_inc(&tbl->refcnt);
2872 * Add source vport match to the specified matcher.
2874 * @param[in, out] matcher
2876 * @param[in, out] key
2877 * Flow matcher value.
2879 * Source vport value to match
2884 flow_dv_translate_source_vport(void *matcher, void *key,
2885 int16_t port, uint16_t mask)
2887 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2888 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2890 MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
2891 MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
2895 * Find existing tag resource or create and register a new one.
2897 * @param dev[in, out]
2898 * Pointer to rte_eth_dev structure.
2899 * @param[in, out] resource
2900 * Pointer to tag resource.
2901 * @parm[in, out] dev_flow
2902 * Pointer to the dev_flow.
2904 * pointer to error structure.
2907 * 0 on success otherwise -errno and errno is set.
2910 flow_dv_tag_resource_register
2911 (struct rte_eth_dev *dev,
2912 struct mlx5_flow_dv_tag_resource *resource,
2913 struct mlx5_flow *dev_flow,
2914 struct rte_flow_error *error)
2916 struct mlx5_priv *priv = dev->data->dev_private;
2917 struct mlx5_flow_dv_tag_resource *cache_resource;
2919 /* Lookup a matching resource from cache. */
2920 LIST_FOREACH(cache_resource, &priv->tags, next) {
2921 if (resource->tag == cache_resource->tag) {
2922 DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
2923 (void *)cache_resource,
2924 rte_atomic32_read(&cache_resource->refcnt));
2925 rte_atomic32_inc(&cache_resource->refcnt);
2926 dev_flow->flow->tag_resource = cache_resource;
2930 /* Register new resource. */
2931 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
2932 if (!cache_resource)
2933 return rte_flow_error_set(error, ENOMEM,
2934 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2935 "cannot allocate resource memory");
2936 *cache_resource = *resource;
2937 cache_resource->action = mlx5_glue->dv_create_flow_action_tag
2939 if (!cache_resource->action) {
2940 rte_free(cache_resource);
2941 return rte_flow_error_set(error, ENOMEM,
2942 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2943 NULL, "cannot create action");
2945 rte_atomic32_init(&cache_resource->refcnt);
2946 rte_atomic32_inc(&cache_resource->refcnt);
2947 LIST_INSERT_HEAD(&priv->tags, cache_resource, next);
2948 dev_flow->flow->tag_resource = cache_resource;
2949 DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
2950 (void *)cache_resource,
2951 rte_atomic32_read(&cache_resource->refcnt));
2959 * Pointer to Ethernet device.
2961 * Pointer to mlx5_flow.
2964 * 1 while a reference on it exists, 0 when freed.
2967 flow_dv_tag_release(struct rte_eth_dev *dev,
2968 struct mlx5_flow_dv_tag_resource *tag)
2971 DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
2972 dev->data->port_id, (void *)tag,
2973 rte_atomic32_read(&tag->refcnt));
2974 if (rte_atomic32_dec_and_test(&tag->refcnt)) {
2975 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
2976 LIST_REMOVE(tag, next);
2977 DRV_LOG(DEBUG, "port %u tag %p: removed",
2978 dev->data->port_id, (void *)tag);
2986 * Fill the flow with DV spec.
2989 * Pointer to rte_eth_dev structure.
2990 * @param[in, out] dev_flow
2991 * Pointer to the sub flow.
2993 * Pointer to the flow attributes.
2995 * Pointer to the list of items.
2996 * @param[in] actions
2997 * Pointer to the list of actions.
2999 * Pointer to the error structure.
3002 * 0 on success, a negative errno value otherwise and rte_errno is set.
3005 flow_dv_translate(struct rte_eth_dev *dev,
3006 struct mlx5_flow *dev_flow,
3007 const struct rte_flow_attr *attr,
3008 const struct rte_flow_item items[],
3009 const struct rte_flow_action actions[],
3010 struct rte_flow_error *error)
3012 struct mlx5_priv *priv = dev->data->dev_private;
3013 struct rte_flow *flow = dev_flow->flow;
3014 uint64_t item_flags = 0;
3015 uint64_t last_item = 0;
3016 uint64_t action_flags = 0;
3017 uint64_t priority = attr->priority;
3018 struct mlx5_flow_dv_matcher matcher = {
3020 .size = sizeof(matcher.mask.buf),
3024 bool actions_end = false;
3025 struct mlx5_flow_dv_modify_hdr_resource res = {
3026 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3027 MLX5DV_FLOW_TABLE_TYPE_NIC_RX
3029 union flow_dv_attr flow_attr = { .attr = 0 };
3030 struct mlx5_flow_dv_tag_resource tag_resource;
3032 if (priority == MLX5_FLOW_PRIO_RSVD)
3033 priority = priv->config.flow_prio - 1;
3034 for (; !actions_end ; actions++) {
3035 const struct rte_flow_action_queue *queue;
3036 const struct rte_flow_action_rss *rss;
3037 const struct rte_flow_action *action = actions;
3038 const struct rte_flow_action_count *count = action->conf;
3039 const uint8_t *rss_key;
3041 switch (actions->type) {
3042 case RTE_FLOW_ACTION_TYPE_VOID:
3044 case RTE_FLOW_ACTION_TYPE_FLAG:
3046 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
3047 if (!flow->tag_resource)
3048 if (flow_dv_tag_resource_register
3049 (dev, &tag_resource, dev_flow, error))
3051 dev_flow->dv.actions[actions_n++] =
3052 flow->tag_resource->action;
3053 action_flags |= MLX5_FLOW_ACTION_FLAG;
3055 case RTE_FLOW_ACTION_TYPE_MARK:
3056 tag_resource.tag = mlx5_flow_mark_set
3057 (((const struct rte_flow_action_mark *)
3058 (actions->conf))->id);
3059 if (!flow->tag_resource)
3060 if (flow_dv_tag_resource_register
3061 (dev, &tag_resource, dev_flow, error))
3063 dev_flow->dv.actions[actions_n++] =
3064 flow->tag_resource->action;
3065 action_flags |= MLX5_FLOW_ACTION_MARK;
3067 case RTE_FLOW_ACTION_TYPE_DROP:
3068 action_flags |= MLX5_FLOW_ACTION_DROP;
3070 case RTE_FLOW_ACTION_TYPE_QUEUE:
3071 queue = actions->conf;
3072 flow->rss.queue_num = 1;
3073 (*flow->queue)[0] = queue->index;
3074 action_flags |= MLX5_FLOW_ACTION_QUEUE;
3076 case RTE_FLOW_ACTION_TYPE_RSS:
3077 rss = actions->conf;
3079 memcpy((*flow->queue), rss->queue,
3080 rss->queue_num * sizeof(uint16_t));
3081 flow->rss.queue_num = rss->queue_num;
3082 /* NULL RSS key indicates default RSS key. */
3083 rss_key = !rss->key ? rss_hash_default_key : rss->key;
3084 memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
3085 /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
3086 flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
3087 flow->rss.level = rss->level;
3088 action_flags |= MLX5_FLOW_ACTION_RSS;
3090 case RTE_FLOW_ACTION_TYPE_COUNT:
3091 if (!priv->config.devx) {
3092 rte_errno = ENOTSUP;
3095 flow->counter = flow_dv_counter_new(dev, count->shared,
3097 if (flow->counter == NULL)
3099 dev_flow->dv.actions[actions_n++] =
3100 flow->counter->action;
3101 action_flags |= MLX5_FLOW_ACTION_COUNT;
3104 if (rte_errno == ENOTSUP)
3105 return rte_flow_error_set
3107 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3109 "count action not supported");
3111 return rte_flow_error_set
3113 RTE_FLOW_ERROR_TYPE_ACTION,
3115 "cannot create counter"
3117 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3118 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3119 if (flow_dv_create_action_l2_encap(dev, actions,
3122 dev_flow->dv.actions[actions_n++] =
3123 dev_flow->dv.encap_decap->verbs_action;
3124 action_flags |= actions->type ==
3125 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
3126 MLX5_FLOW_ACTION_VXLAN_ENCAP :
3127 MLX5_FLOW_ACTION_NVGRE_ENCAP;
3129 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3130 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
3131 if (flow_dv_create_action_l2_decap(dev, dev_flow,
3134 dev_flow->dv.actions[actions_n++] =
3135 dev_flow->dv.encap_decap->verbs_action;
3136 action_flags |= actions->type ==
3137 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
3138 MLX5_FLOW_ACTION_VXLAN_DECAP :
3139 MLX5_FLOW_ACTION_NVGRE_DECAP;
3141 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3142 /* Handle encap with preceding decap. */
3143 if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
3144 if (flow_dv_create_action_raw_encap
3145 (dev, actions, dev_flow, attr, error))
3147 dev_flow->dv.actions[actions_n++] =
3148 dev_flow->dv.encap_decap->verbs_action;
3150 /* Handle encap without preceding decap. */
3151 if (flow_dv_create_action_l2_encap(dev, actions,
3155 dev_flow->dv.actions[actions_n++] =
3156 dev_flow->dv.encap_decap->verbs_action;
3158 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
3160 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3161 /* Check if this decap is followed by encap. */
3162 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
3163 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
3166 /* Handle decap only if it isn't followed by encap. */
3167 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3168 if (flow_dv_create_action_l2_decap(dev,
3172 dev_flow->dv.actions[actions_n++] =
3173 dev_flow->dv.encap_decap->verbs_action;
3175 /* If decap is followed by encap, handle it at encap. */
3176 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
3178 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
3179 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
3180 if (flow_dv_convert_action_modify_mac(&res, actions,
3183 action_flags |= actions->type ==
3184 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
3185 MLX5_FLOW_ACTION_SET_MAC_SRC :
3186 MLX5_FLOW_ACTION_SET_MAC_DST;
3188 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
3189 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
3190 if (flow_dv_convert_action_modify_ipv4(&res, actions,
3193 action_flags |= actions->type ==
3194 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
3195 MLX5_FLOW_ACTION_SET_IPV4_SRC :
3196 MLX5_FLOW_ACTION_SET_IPV4_DST;
3198 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
3199 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
3200 if (flow_dv_convert_action_modify_ipv6(&res, actions,
3203 action_flags |= actions->type ==
3204 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
3205 MLX5_FLOW_ACTION_SET_IPV6_SRC :
3206 MLX5_FLOW_ACTION_SET_IPV6_DST;
3208 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
3209 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
3210 if (flow_dv_convert_action_modify_tp(&res, actions,
3214 action_flags |= actions->type ==
3215 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
3216 MLX5_FLOW_ACTION_SET_TP_SRC :
3217 MLX5_FLOW_ACTION_SET_TP_DST;
3219 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
3220 if (flow_dv_convert_action_modify_dec_ttl(&res, items,
3224 action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
3226 case RTE_FLOW_ACTION_TYPE_SET_TTL:
3227 if (flow_dv_convert_action_modify_ttl(&res, actions,
3231 action_flags |= MLX5_FLOW_ACTION_SET_TTL;
3233 case RTE_FLOW_ACTION_TYPE_END:
3235 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
3236 /* create modify action if needed. */
3237 if (flow_dv_modify_hdr_resource_register
3242 dev_flow->dv.actions[actions_n++] =
3243 dev_flow->dv.modify_hdr->verbs_action;
3250 dev_flow->dv.actions_n = actions_n;
3251 flow->actions = action_flags;
3252 if (attr->ingress && !attr->transfer &&
3253 (priv->representor || priv->master)) {
3254 /* It was validated - we support unidirection flows only. */
3255 assert(!attr->egress);
3257 * Add matching on source vport index only
3258 * for ingress rules in E-Switch configurations.
3260 flow_dv_translate_source_vport(matcher.mask.buf,
3261 dev_flow->dv.value.buf,
3265 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3266 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
3267 void *match_mask = matcher.mask.buf;
3268 void *match_value = dev_flow->dv.value.buf;
3270 switch (items->type) {
3271 case RTE_FLOW_ITEM_TYPE_ETH:
3272 flow_dv_translate_item_eth(match_mask, match_value,
3274 matcher.priority = MLX5_PRIORITY_MAP_L2;
3275 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
3276 MLX5_FLOW_LAYER_OUTER_L2;
3278 case RTE_FLOW_ITEM_TYPE_VLAN:
3279 flow_dv_translate_item_vlan(match_mask, match_value,
3281 matcher.priority = MLX5_PRIORITY_MAP_L2;
3282 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
3283 MLX5_FLOW_LAYER_INNER_VLAN) :
3284 (MLX5_FLOW_LAYER_OUTER_L2 |
3285 MLX5_FLOW_LAYER_OUTER_VLAN);
3287 case RTE_FLOW_ITEM_TYPE_IPV4:
3288 flow_dv_translate_item_ipv4(match_mask, match_value,
3289 items, tunnel, attr->group);
3290 matcher.priority = MLX5_PRIORITY_MAP_L3;
3291 dev_flow->dv.hash_fields |=
3292 mlx5_flow_hashfields_adjust
3294 MLX5_IPV4_LAYER_TYPES,
3295 MLX5_IPV4_IBV_RX_HASH);
3296 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3297 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3299 case RTE_FLOW_ITEM_TYPE_IPV6:
3300 flow_dv_translate_item_ipv6(match_mask, match_value,
3301 items, tunnel, attr->group);
3302 matcher.priority = MLX5_PRIORITY_MAP_L3;
3303 dev_flow->dv.hash_fields |=
3304 mlx5_flow_hashfields_adjust
3306 MLX5_IPV6_LAYER_TYPES,
3307 MLX5_IPV6_IBV_RX_HASH);
3308 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3309 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3311 case RTE_FLOW_ITEM_TYPE_TCP:
3312 flow_dv_translate_item_tcp(match_mask, match_value,
3314 matcher.priority = MLX5_PRIORITY_MAP_L4;
3315 dev_flow->dv.hash_fields |=
3316 mlx5_flow_hashfields_adjust
3317 (dev_flow, tunnel, ETH_RSS_TCP,
3318 IBV_RX_HASH_SRC_PORT_TCP |
3319 IBV_RX_HASH_DST_PORT_TCP);
3320 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
3321 MLX5_FLOW_LAYER_OUTER_L4_TCP;
3323 case RTE_FLOW_ITEM_TYPE_UDP:
3324 flow_dv_translate_item_udp(match_mask, match_value,
3326 matcher.priority = MLX5_PRIORITY_MAP_L4;
3327 dev_flow->dv.hash_fields |=
3328 mlx5_flow_hashfields_adjust
3329 (dev_flow, tunnel, ETH_RSS_UDP,
3330 IBV_RX_HASH_SRC_PORT_UDP |
3331 IBV_RX_HASH_DST_PORT_UDP);
3332 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
3333 MLX5_FLOW_LAYER_OUTER_L4_UDP;
3335 case RTE_FLOW_ITEM_TYPE_GRE:
3336 flow_dv_translate_item_gre(match_mask, match_value,
3338 last_item = MLX5_FLOW_LAYER_GRE;
3340 case RTE_FLOW_ITEM_TYPE_NVGRE:
3341 flow_dv_translate_item_nvgre(match_mask, match_value,
3343 last_item = MLX5_FLOW_LAYER_GRE;
3345 case RTE_FLOW_ITEM_TYPE_VXLAN:
3346 flow_dv_translate_item_vxlan(match_mask, match_value,
3348 last_item = MLX5_FLOW_LAYER_VXLAN;
3350 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3351 flow_dv_translate_item_vxlan(match_mask, match_value,
3353 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
3355 case RTE_FLOW_ITEM_TYPE_MPLS:
3356 flow_dv_translate_item_mpls(match_mask, match_value,
3357 items, last_item, tunnel);
3358 last_item = MLX5_FLOW_LAYER_MPLS;
3360 case RTE_FLOW_ITEM_TYPE_META:
3361 flow_dv_translate_item_meta(match_mask, match_value,
3363 last_item = MLX5_FLOW_ITEM_METADATA;
3368 item_flags |= last_item;
3370 assert(!flow_dv_check_valid_spec(matcher.mask.buf,
3371 dev_flow->dv.value.buf));
3372 dev_flow->layers = item_flags;
3373 /* Register matcher. */
3374 matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
3376 matcher.priority = mlx5_flow_adjust_priority(dev, priority,
3378 matcher.egress = attr->egress;
3379 matcher.group = attr->group;
3380 if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
3386 * Apply the flow to the NIC.
3389 * Pointer to the Ethernet device structure.
3390 * @param[in, out] flow
3391 * Pointer to flow structure.
3393 * Pointer to error structure.
3396 * 0 on success, a negative errno value otherwise and rte_errno is set.
3399 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
3400 struct rte_flow_error *error)
3402 struct mlx5_flow_dv *dv;
3403 struct mlx5_flow *dev_flow;
3407 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3410 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
3411 dv->hrxq = mlx5_hrxq_drop_new(dev);
3415 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3416 "cannot get drop hash queue");
3420 mlx5_glue->dv_create_flow_action_dest_ibv_qp
3422 } else if (flow->actions &
3423 (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
3424 struct mlx5_hrxq *hrxq;
3426 hrxq = mlx5_hrxq_get(dev, flow->key,
3427 MLX5_RSS_HASH_KEY_LEN,
3430 flow->rss.queue_num);
3432 hrxq = mlx5_hrxq_new
3433 (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
3434 dv->hash_fields, (*flow->queue),
3435 flow->rss.queue_num,
3436 !!(dev_flow->layers &
3437 MLX5_FLOW_LAYER_TUNNEL));
3441 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3442 "cannot get hash queue");
3447 mlx5_glue->dv_create_flow_action_dest_ibv_qp
3451 mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
3452 (void *)&dv->value, n,
3455 rte_flow_error_set(error, errno,
3456 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3458 "hardware refuses to create flow");
3464 err = rte_errno; /* Save rte_errno before cleanup. */
3465 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3466 struct mlx5_flow_dv *dv = &dev_flow->dv;
3468 if (flow->actions & MLX5_FLOW_ACTION_DROP)
3469 mlx5_hrxq_drop_release(dev);
3471 mlx5_hrxq_release(dev, dv->hrxq);
3475 rte_errno = err; /* Restore rte_errno. */
3480 * Release the flow matcher.
3483 * Pointer to Ethernet device.
3485 * Pointer to mlx5_flow.
3488 * 1 while a reference on it exists, 0 when freed.
3491 flow_dv_matcher_release(struct rte_eth_dev *dev,
3492 struct mlx5_flow *flow)
3494 struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
3495 struct mlx5_priv *priv = dev->data->dev_private;
3496 struct mlx5_flow_tbl_resource *tbl;
3498 assert(matcher->matcher_object);
3499 DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
3500 dev->data->port_id, (void *)matcher,
3501 rte_atomic32_read(&matcher->refcnt));
3502 if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
3503 claim_zero(mlx5_glue->dv_destroy_flow_matcher
3504 (matcher->matcher_object));
3505 LIST_REMOVE(matcher, next);
3506 if (matcher->egress)
3507 tbl = &priv->tx_tbl[matcher->group];
3509 tbl = &priv->rx_tbl[matcher->group];
3510 if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
3511 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
3515 DRV_LOG(DEBUG, "port %u matcher %p: removed",
3516 dev->data->port_id, (void *)matcher);
3523 * Release an encap/decap resource.
3526 * Pointer to mlx5_flow.
3529 * 1 while a reference on it exists, 0 when freed.
3532 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
3534 struct mlx5_flow_dv_encap_decap_resource *cache_resource =
3535 flow->dv.encap_decap;
3537 assert(cache_resource->verbs_action);
3538 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
3539 (void *)cache_resource,
3540 rte_atomic32_read(&cache_resource->refcnt));
3541 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
3542 claim_zero(mlx5_glue->destroy_flow_action
3543 (cache_resource->verbs_action));
3544 LIST_REMOVE(cache_resource, next);
3545 rte_free(cache_resource);
3546 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
3547 (void *)cache_resource);
3554 * Release a modify-header resource.
3557 * Pointer to mlx5_flow.
3560 * 1 while a reference on it exists, 0 when freed.
3563 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
3565 struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
3566 flow->dv.modify_hdr;
3568 assert(cache_resource->verbs_action);
3569 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
3570 (void *)cache_resource,
3571 rte_atomic32_read(&cache_resource->refcnt));
3572 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
3573 claim_zero(mlx5_glue->destroy_flow_action
3574 (cache_resource->verbs_action));
3575 LIST_REMOVE(cache_resource, next);
3576 rte_free(cache_resource);
3577 DRV_LOG(DEBUG, "modify-header resource %p: removed",
3578 (void *)cache_resource);
3585 * Remove the flow from the NIC but keeps it in memory.
3588 * Pointer to Ethernet device.
3589 * @param[in, out] flow
3590 * Pointer to flow structure.
3593 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
3595 struct mlx5_flow_dv *dv;
3596 struct mlx5_flow *dev_flow;
3600 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3603 claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
3607 if (flow->actions & MLX5_FLOW_ACTION_DROP)
3608 mlx5_hrxq_drop_release(dev);
3610 mlx5_hrxq_release(dev, dv->hrxq);
3617 * Remove the flow from the NIC and the memory.
3620 * Pointer to the Ethernet device structure.
3621 * @param[in, out] flow
3622 * Pointer to flow structure.
3625 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
3627 struct mlx5_flow *dev_flow;
3631 flow_dv_remove(dev, flow);
3632 if (flow->counter) {
3633 flow_dv_counter_release(flow->counter);
3634 flow->counter = NULL;
3636 if (flow->tag_resource) {
3637 flow_dv_tag_release(dev, flow->tag_resource);
3638 flow->tag_resource = NULL;
3640 while (!LIST_EMPTY(&flow->dev_flows)) {
3641 dev_flow = LIST_FIRST(&flow->dev_flows);
3642 LIST_REMOVE(dev_flow, next);
3643 if (dev_flow->dv.matcher)
3644 flow_dv_matcher_release(dev, dev_flow);
3645 if (dev_flow->dv.encap_decap)
3646 flow_dv_encap_decap_resource_release(dev_flow);
3647 if (dev_flow->dv.modify_hdr)
3648 flow_dv_modify_hdr_resource_release(dev_flow);
3654 * Query a dv flow rule for its statistics via devx.
3657 * Pointer to Ethernet device.
3659 * Pointer to the sub flow.
3661 * data retrieved by the query.
3663 * Perform verbose error reporting if not NULL.
3666 * 0 on success, a negative errno value otherwise and rte_errno is set.
3669 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
3670 void *data, struct rte_flow_error *error)
3672 struct mlx5_priv *priv = dev->data->dev_private;
3673 struct rte_flow_query_count *qc = data;
3678 if (!priv->config.devx)
3679 return rte_flow_error_set(error, ENOTSUP,
3680 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3682 "counters are not supported");
3683 if (flow->counter) {
3684 err = mlx5_devx_cmd_flow_counter_query
3685 (flow->counter->dcs,
3686 qc->reset, &pkts, &bytes);
3688 return rte_flow_error_set
3690 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3692 "cannot read counters");
3695 qc->hits = pkts - flow->counter->hits;
3696 qc->bytes = bytes - flow->counter->bytes;
3698 flow->counter->hits = pkts;
3699 flow->counter->bytes = bytes;
3703 return rte_flow_error_set(error, EINVAL,
3704 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3706 "counters are not available");
3712 * @see rte_flow_query()
3716 flow_dv_query(struct rte_eth_dev *dev,
3717 struct rte_flow *flow __rte_unused,
3718 const struct rte_flow_action *actions __rte_unused,
3719 void *data __rte_unused,
3720 struct rte_flow_error *error __rte_unused)
3724 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3725 switch (actions->type) {
3726 case RTE_FLOW_ACTION_TYPE_VOID:
3728 case RTE_FLOW_ACTION_TYPE_COUNT:
3729 ret = flow_dv_query_count(dev, flow, data, error);
3732 return rte_flow_error_set(error, ENOTSUP,
3733 RTE_FLOW_ERROR_TYPE_ACTION,
3735 "action not supported");
3742 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
3743 .validate = flow_dv_validate,
3744 .prepare = flow_dv_prepare,
3745 .translate = flow_dv_translate,
3746 .apply = flow_dv_apply,
3747 .remove = flow_dv_remove,
3748 .destroy = flow_dv_destroy,
3749 .query = flow_dv_query,
3752 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */