1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #pragma GCC diagnostic ignored "-Wpedantic"
15 #include <infiniband/verbs.h>
17 #pragma GCC diagnostic error "-Wpedantic"
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_eth_ctrl.h>
23 #include <rte_ethdev_driver.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
31 #include "mlx5_defs.h"
33 #include "mlx5_glue.h"
34 #include "mlx5_flow.h"
35 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
37 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
38 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
54 * Initialize flow attributes structure according to flow items' types.
57 * Pointer to item specification.
59 * Pointer to flow attributes structure.
62 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
64 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
66 case RTE_FLOW_ITEM_TYPE_IPV4:
69 case RTE_FLOW_ITEM_TYPE_IPV6:
72 case RTE_FLOW_ITEM_TYPE_UDP:
75 case RTE_FLOW_ITEM_TYPE_TCP:
85 struct field_modify_info {
86 uint32_t size; /* Size of field in protocol header, in bytes. */
87 uint32_t offset; /* Offset of field in protocol header, in bytes. */
88 enum mlx5_modification_field id;
91 struct field_modify_info modify_eth[] = {
92 {4, 0, MLX5_MODI_OUT_DMAC_47_16},
93 {2, 4, MLX5_MODI_OUT_DMAC_15_0},
94 {4, 6, MLX5_MODI_OUT_SMAC_47_16},
95 {2, 10, MLX5_MODI_OUT_SMAC_15_0},
99 struct field_modify_info modify_ipv4[] = {
100 {1, 8, MLX5_MODI_OUT_IPV4_TTL},
101 {4, 12, MLX5_MODI_OUT_SIPV4},
102 {4, 16, MLX5_MODI_OUT_DIPV4},
106 struct field_modify_info modify_ipv6[] = {
107 {1, 7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
108 {4, 8, MLX5_MODI_OUT_SIPV6_127_96},
109 {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
110 {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
111 {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
112 {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
113 {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
114 {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
115 {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
119 struct field_modify_info modify_udp[] = {
120 {2, 0, MLX5_MODI_OUT_UDP_SPORT},
121 {2, 2, MLX5_MODI_OUT_UDP_DPORT},
125 struct field_modify_info modify_tcp[] = {
126 {2, 0, MLX5_MODI_OUT_TCP_SPORT},
127 {2, 2, MLX5_MODI_OUT_TCP_DPORT},
132 * Convert modify-header action to DV specification.
135 * Pointer to item specification.
137 * Pointer to field modification information.
138 * @param[in,out] resource
139 * Pointer to the modify-header resource.
141 * Type of modification.
143 * Pointer to the error structure.
146 * 0 on success, a negative errno value otherwise and rte_errno is set.
149 flow_dv_convert_modify_action(struct rte_flow_item *item,
150 struct field_modify_info *field,
151 struct mlx5_flow_dv_modify_hdr_resource *resource,
153 struct rte_flow_error *error)
155 uint32_t i = resource->actions_num;
156 struct mlx5_modification_cmd *actions = resource->actions;
157 const uint8_t *spec = item->spec;
158 const uint8_t *mask = item->mask;
161 while (field->size) {
163 /* Generate modify command for each mask segment. */
164 memcpy(&set, &mask[field->offset], field->size);
166 if (i >= MLX5_MODIFY_NUM)
167 return rte_flow_error_set(error, EINVAL,
168 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
169 "too many items to modify");
170 actions[i].action_type = type;
171 actions[i].field = field->id;
172 actions[i].length = field->size ==
173 4 ? 0 : field->size * 8;
174 rte_memcpy(&actions[i].data[4 - field->size],
175 &spec[field->offset], field->size);
176 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
179 if (resource->actions_num != i)
180 resource->actions_num = i;
183 if (!resource->actions_num)
184 return rte_flow_error_set(error, EINVAL,
185 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
186 "invalid modification flow item");
191 * Convert modify-header set IPv4 address action to DV specification.
193 * @param[in,out] resource
194 * Pointer to the modify-header resource.
196 * Pointer to action specification.
198 * Pointer to the error structure.
201 * 0 on success, a negative errno value otherwise and rte_errno is set.
204 flow_dv_convert_action_modify_ipv4
205 (struct mlx5_flow_dv_modify_hdr_resource *resource,
206 const struct rte_flow_action *action,
207 struct rte_flow_error *error)
209 const struct rte_flow_action_set_ipv4 *conf =
210 (const struct rte_flow_action_set_ipv4 *)(action->conf);
211 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
212 struct rte_flow_item_ipv4 ipv4;
213 struct rte_flow_item_ipv4 ipv4_mask;
215 memset(&ipv4, 0, sizeof(ipv4));
216 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
217 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
218 ipv4.hdr.src_addr = conf->ipv4_addr;
219 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
221 ipv4.hdr.dst_addr = conf->ipv4_addr;
222 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
225 item.mask = &ipv4_mask;
226 return flow_dv_convert_modify_action(&item, modify_ipv4, resource,
227 MLX5_MODIFICATION_TYPE_SET, error);
231 * Convert modify-header set IPv6 address action to DV specification.
233 * @param[in,out] resource
234 * Pointer to the modify-header resource.
236 * Pointer to action specification.
238 * Pointer to the error structure.
241 * 0 on success, a negative errno value otherwise and rte_errno is set.
244 flow_dv_convert_action_modify_ipv6
245 (struct mlx5_flow_dv_modify_hdr_resource *resource,
246 const struct rte_flow_action *action,
247 struct rte_flow_error *error)
249 const struct rte_flow_action_set_ipv6 *conf =
250 (const struct rte_flow_action_set_ipv6 *)(action->conf);
251 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
252 struct rte_flow_item_ipv6 ipv6;
253 struct rte_flow_item_ipv6 ipv6_mask;
255 memset(&ipv6, 0, sizeof(ipv6));
256 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
257 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
258 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
259 sizeof(ipv6.hdr.src_addr));
260 memcpy(&ipv6_mask.hdr.src_addr,
261 &rte_flow_item_ipv6_mask.hdr.src_addr,
262 sizeof(ipv6.hdr.src_addr));
264 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
265 sizeof(ipv6.hdr.dst_addr));
266 memcpy(&ipv6_mask.hdr.dst_addr,
267 &rte_flow_item_ipv6_mask.hdr.dst_addr,
268 sizeof(ipv6.hdr.dst_addr));
271 item.mask = &ipv6_mask;
272 return flow_dv_convert_modify_action(&item, modify_ipv6, resource,
273 MLX5_MODIFICATION_TYPE_SET, error);
277 * Convert modify-header set MAC address action to DV specification.
279 * @param[in,out] resource
280 * Pointer to the modify-header resource.
282 * Pointer to action specification.
284 * Pointer to the error structure.
287 * 0 on success, a negative errno value otherwise and rte_errno is set.
290 flow_dv_convert_action_modify_mac
291 (struct mlx5_flow_dv_modify_hdr_resource *resource,
292 const struct rte_flow_action *action,
293 struct rte_flow_error *error)
295 const struct rte_flow_action_set_mac *conf =
296 (const struct rte_flow_action_set_mac *)(action->conf);
297 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
298 struct rte_flow_item_eth eth;
299 struct rte_flow_item_eth eth_mask;
301 memset(ð, 0, sizeof(eth));
302 memset(ð_mask, 0, sizeof(eth_mask));
303 if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
304 memcpy(ð.src.addr_bytes, &conf->mac_addr,
305 sizeof(eth.src.addr_bytes));
306 memcpy(ð_mask.src.addr_bytes,
307 &rte_flow_item_eth_mask.src.addr_bytes,
308 sizeof(eth_mask.src.addr_bytes));
310 memcpy(ð.dst.addr_bytes, &conf->mac_addr,
311 sizeof(eth.dst.addr_bytes));
312 memcpy(ð_mask.dst.addr_bytes,
313 &rte_flow_item_eth_mask.dst.addr_bytes,
314 sizeof(eth_mask.dst.addr_bytes));
317 item.mask = ð_mask;
318 return flow_dv_convert_modify_action(&item, modify_eth, resource,
319 MLX5_MODIFICATION_TYPE_SET, error);
323 * Convert modify-header set TP action to DV specification.
325 * @param[in,out] resource
326 * Pointer to the modify-header resource.
328 * Pointer to action specification.
330 * Pointer to rte_flow_item objects list.
332 * Pointer to flow attributes structure.
334 * Pointer to the error structure.
337 * 0 on success, a negative errno value otherwise and rte_errno is set.
340 flow_dv_convert_action_modify_tp
341 (struct mlx5_flow_dv_modify_hdr_resource *resource,
342 const struct rte_flow_action *action,
343 const struct rte_flow_item *items,
344 union flow_dv_attr *attr,
345 struct rte_flow_error *error)
347 const struct rte_flow_action_set_tp *conf =
348 (const struct rte_flow_action_set_tp *)(action->conf);
349 struct rte_flow_item item;
350 struct rte_flow_item_udp udp;
351 struct rte_flow_item_udp udp_mask;
352 struct rte_flow_item_tcp tcp;
353 struct rte_flow_item_tcp tcp_mask;
354 struct field_modify_info *field;
357 flow_dv_attr_init(items, attr);
359 memset(&udp, 0, sizeof(udp));
360 memset(&udp_mask, 0, sizeof(udp_mask));
361 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
362 udp.hdr.src_port = conf->port;
363 udp_mask.hdr.src_port =
364 rte_flow_item_udp_mask.hdr.src_port;
366 udp.hdr.dst_port = conf->port;
367 udp_mask.hdr.dst_port =
368 rte_flow_item_udp_mask.hdr.dst_port;
370 item.type = RTE_FLOW_ITEM_TYPE_UDP;
372 item.mask = &udp_mask;
376 memset(&tcp, 0, sizeof(tcp));
377 memset(&tcp_mask, 0, sizeof(tcp_mask));
378 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
379 tcp.hdr.src_port = conf->port;
380 tcp_mask.hdr.src_port =
381 rte_flow_item_tcp_mask.hdr.src_port;
383 tcp.hdr.dst_port = conf->port;
384 tcp_mask.hdr.dst_port =
385 rte_flow_item_tcp_mask.hdr.dst_port;
387 item.type = RTE_FLOW_ITEM_TYPE_TCP;
389 item.mask = &tcp_mask;
392 return flow_dv_convert_modify_action(&item, field, resource,
393 MLX5_MODIFICATION_TYPE_SET, error);
397 * Convert modify-header set TTL action to DV specification.
399 * @param[in,out] resource
400 * Pointer to the modify-header resource.
402 * Pointer to action specification.
404 * Pointer to rte_flow_item objects list.
406 * Pointer to flow attributes structure.
408 * Pointer to the error structure.
411 * 0 on success, a negative errno value otherwise and rte_errno is set.
414 flow_dv_convert_action_modify_ttl
415 (struct mlx5_flow_dv_modify_hdr_resource *resource,
416 const struct rte_flow_action *action,
417 const struct rte_flow_item *items,
418 union flow_dv_attr *attr,
419 struct rte_flow_error *error)
421 const struct rte_flow_action_set_ttl *conf =
422 (const struct rte_flow_action_set_ttl *)(action->conf);
423 struct rte_flow_item item;
424 struct rte_flow_item_ipv4 ipv4;
425 struct rte_flow_item_ipv4 ipv4_mask;
426 struct rte_flow_item_ipv6 ipv6;
427 struct rte_flow_item_ipv6 ipv6_mask;
428 struct field_modify_info *field;
431 flow_dv_attr_init(items, attr);
433 memset(&ipv4, 0, sizeof(ipv4));
434 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
435 ipv4.hdr.time_to_live = conf->ttl_value;
436 ipv4_mask.hdr.time_to_live = 0xFF;
437 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
439 item.mask = &ipv4_mask;
443 memset(&ipv6, 0, sizeof(ipv6));
444 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
445 ipv6.hdr.hop_limits = conf->ttl_value;
446 ipv6_mask.hdr.hop_limits = 0xFF;
447 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
449 item.mask = &ipv6_mask;
452 return flow_dv_convert_modify_action(&item, field, resource,
453 MLX5_MODIFICATION_TYPE_SET, error);
457 * Convert modify-header decrement TTL action to DV specification.
459 * @param[in,out] resource
460 * Pointer to the modify-header resource.
462 * Pointer to action specification.
464 * Pointer to rte_flow_item objects list.
466 * Pointer to flow attributes structure.
468 * Pointer to the error structure.
471 * 0 on success, a negative errno value otherwise and rte_errno is set.
474 flow_dv_convert_action_modify_dec_ttl
475 (struct mlx5_flow_dv_modify_hdr_resource *resource,
476 const struct rte_flow_item *items,
477 union flow_dv_attr *attr,
478 struct rte_flow_error *error)
480 struct rte_flow_item item;
481 struct rte_flow_item_ipv4 ipv4;
482 struct rte_flow_item_ipv4 ipv4_mask;
483 struct rte_flow_item_ipv6 ipv6;
484 struct rte_flow_item_ipv6 ipv6_mask;
485 struct field_modify_info *field;
488 flow_dv_attr_init(items, attr);
490 memset(&ipv4, 0, sizeof(ipv4));
491 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
492 ipv4.hdr.time_to_live = 0xFF;
493 ipv4_mask.hdr.time_to_live = 0xFF;
494 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
496 item.mask = &ipv4_mask;
500 memset(&ipv6, 0, sizeof(ipv6));
501 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
502 ipv6.hdr.hop_limits = 0xFF;
503 ipv6_mask.hdr.hop_limits = 0xFF;
504 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
506 item.mask = &ipv6_mask;
509 return flow_dv_convert_modify_action(&item, field, resource,
510 MLX5_MODIFICATION_TYPE_ADD, error);
514 * Validate META item.
517 * Pointer to the rte_eth_dev structure.
519 * Item specification.
521 * Attributes of flow that includes this item.
523 * Pointer to error structure.
526 * 0 on success, a negative errno value otherwise and rte_errno is set.
529 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
530 const struct rte_flow_item *item,
531 const struct rte_flow_attr *attr,
532 struct rte_flow_error *error)
534 const struct rte_flow_item_meta *spec = item->spec;
535 const struct rte_flow_item_meta *mask = item->mask;
536 const struct rte_flow_item_meta nic_mask = {
537 .data = RTE_BE32(UINT32_MAX)
540 uint64_t offloads = dev->data->dev_conf.txmode.offloads;
542 if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
543 return rte_flow_error_set(error, EPERM,
544 RTE_FLOW_ERROR_TYPE_ITEM,
546 "match on metadata offload "
547 "configuration is off for this port");
549 return rte_flow_error_set(error, EINVAL,
550 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
552 "data cannot be empty");
554 return rte_flow_error_set(error, EINVAL,
555 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
557 "data cannot be zero");
559 mask = &rte_flow_item_meta_mask;
560 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
561 (const uint8_t *)&nic_mask,
562 sizeof(struct rte_flow_item_meta),
567 return rte_flow_error_set(error, ENOTSUP,
568 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
570 "pattern not supported for ingress");
575 * Validate count action.
580 * Pointer to error structure.
583 * 0 on success, a negative errno value otherwise and rte_errno is set.
586 flow_dv_validate_action_count(struct rte_eth_dev *dev,
587 struct rte_flow_error *error)
589 struct mlx5_priv *priv = dev->data->dev_private;
591 if (!priv->config.devx)
593 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
597 return rte_flow_error_set
599 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
601 "count action not supported");
605 * Validate the L2 encap action.
607 * @param[in] action_flags
608 * Holds the actions detected until now.
610 * Pointer to the encap action.
612 * Pointer to flow attributes
614 * Pointer to error structure.
617 * 0 on success, a negative errno value otherwise and rte_errno is set.
620 flow_dv_validate_action_l2_encap(uint64_t action_flags,
621 const struct rte_flow_action *action,
622 const struct rte_flow_attr *attr,
623 struct rte_flow_error *error)
626 return rte_flow_error_set(error, EINVAL,
627 RTE_FLOW_ERROR_TYPE_ACTION, action,
628 "configuration cannot be null");
629 if (action_flags & MLX5_FLOW_ACTION_DROP)
630 return rte_flow_error_set(error, EINVAL,
631 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
632 "can't drop and encap in same flow");
633 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
634 return rte_flow_error_set(error, EINVAL,
635 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
636 "can only have a single encap or"
637 " decap action in a flow");
639 return rte_flow_error_set(error, ENOTSUP,
640 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
642 "encap action not supported for "
648 * Validate the L2 decap action.
650 * @param[in] action_flags
651 * Holds the actions detected until now.
653 * Pointer to flow attributes
655 * Pointer to error structure.
658 * 0 on success, a negative errno value otherwise and rte_errno is set.
661 flow_dv_validate_action_l2_decap(uint64_t action_flags,
662 const struct rte_flow_attr *attr,
663 struct rte_flow_error *error)
665 if (action_flags & MLX5_FLOW_ACTION_DROP)
666 return rte_flow_error_set(error, EINVAL,
667 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
668 "can't drop and decap in same flow");
669 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
670 return rte_flow_error_set(error, EINVAL,
671 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
672 "can only have a single encap or"
673 " decap action in a flow");
674 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
675 return rte_flow_error_set(error, EINVAL,
676 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
677 "can't have decap action after"
680 return rte_flow_error_set(error, ENOTSUP,
681 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
683 "decap action not supported for "
689 * Validate the raw encap action.
691 * @param[in] action_flags
692 * Holds the actions detected until now.
694 * Pointer to the encap action.
696 * Pointer to flow attributes
698 * Pointer to error structure.
701 * 0 on success, a negative errno value otherwise and rte_errno is set.
704 flow_dv_validate_action_raw_encap(uint64_t action_flags,
705 const struct rte_flow_action *action,
706 const struct rte_flow_attr *attr,
707 struct rte_flow_error *error)
710 return rte_flow_error_set(error, EINVAL,
711 RTE_FLOW_ERROR_TYPE_ACTION, action,
712 "configuration cannot be null");
713 if (action_flags & MLX5_FLOW_ACTION_DROP)
714 return rte_flow_error_set(error, EINVAL,
715 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
716 "can't drop and encap in same flow");
717 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
718 return rte_flow_error_set(error, EINVAL,
719 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
720 "can only have a single encap"
721 " action in a flow");
722 /* encap without preceding decap is not supported for ingress */
723 if (attr->ingress && !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
724 return rte_flow_error_set(error, ENOTSUP,
725 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
727 "encap action not supported for "
733 * Validate the raw decap action.
735 * @param[in] action_flags
736 * Holds the actions detected until now.
738 * Pointer to the encap action.
740 * Pointer to flow attributes
742 * Pointer to error structure.
745 * 0 on success, a negative errno value otherwise and rte_errno is set.
748 flow_dv_validate_action_raw_decap(uint64_t action_flags,
749 const struct rte_flow_action *action,
750 const struct rte_flow_attr *attr,
751 struct rte_flow_error *error)
753 if (action_flags & MLX5_FLOW_ACTION_DROP)
754 return rte_flow_error_set(error, EINVAL,
755 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
756 "can't drop and decap in same flow");
757 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
758 return rte_flow_error_set(error, EINVAL,
759 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
760 "can't have encap action before"
762 if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
763 return rte_flow_error_set(error, EINVAL,
764 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
765 "can only have a single decap"
766 " action in a flow");
767 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
768 return rte_flow_error_set(error, EINVAL,
769 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
770 "can't have decap action after"
772 /* decap action is valid on egress only if it is followed by encap */
774 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
775 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
778 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
779 return rte_flow_error_set
781 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
782 NULL, "decap action not supported"
789 * Find existing encap/decap resource or create and register a new one.
791 * @param dev[in, out]
792 * Pointer to rte_eth_dev structure.
793 * @param[in, out] resource
794 * Pointer to encap/decap resource.
795 * @parm[in, out] dev_flow
796 * Pointer to the dev_flow.
798 * pointer to error structure.
801 * 0 on success otherwise -errno and errno is set.
804 flow_dv_encap_decap_resource_register
805 (struct rte_eth_dev *dev,
806 struct mlx5_flow_dv_encap_decap_resource *resource,
807 struct mlx5_flow *dev_flow,
808 struct rte_flow_error *error)
810 struct mlx5_priv *priv = dev->data->dev_private;
811 struct mlx5_flow_dv_encap_decap_resource *cache_resource;
812 struct rte_flow *flow = dev_flow->flow;
813 struct mlx5dv_dr_ns *ns;
815 resource->flags = flow->group ? 0 : 1;
821 /* Lookup a matching resource from cache. */
822 LIST_FOREACH(cache_resource, &priv->encaps_decaps, next) {
823 if (resource->reformat_type == cache_resource->reformat_type &&
824 resource->ft_type == cache_resource->ft_type &&
825 resource->flags == cache_resource->flags &&
826 resource->size == cache_resource->size &&
827 !memcmp((const void *)resource->buf,
828 (const void *)cache_resource->buf,
830 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
831 (void *)cache_resource,
832 rte_atomic32_read(&cache_resource->refcnt));
833 rte_atomic32_inc(&cache_resource->refcnt);
834 dev_flow->dv.encap_decap = cache_resource;
838 /* Register new encap/decap resource. */
839 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
841 return rte_flow_error_set(error, ENOMEM,
842 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
843 "cannot allocate resource memory");
844 *cache_resource = *resource;
845 cache_resource->verbs_action =
846 mlx5_glue->dv_create_flow_action_packet_reformat
847 (priv->sh->ctx, cache_resource->reformat_type,
848 cache_resource->ft_type, ns, cache_resource->flags,
849 cache_resource->size,
850 (cache_resource->size ? cache_resource->buf : NULL));
851 if (!cache_resource->verbs_action) {
852 rte_free(cache_resource);
853 return rte_flow_error_set(error, ENOMEM,
854 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
855 NULL, "cannot create action");
857 rte_atomic32_init(&cache_resource->refcnt);
858 rte_atomic32_inc(&cache_resource->refcnt);
859 LIST_INSERT_HEAD(&priv->encaps_decaps, cache_resource, next);
860 dev_flow->dv.encap_decap = cache_resource;
861 DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
862 (void *)cache_resource,
863 rte_atomic32_read(&cache_resource->refcnt));
868 * Find existing table jump resource or create and register a new one.
870 * @param dev[in, out]
871 * Pointer to rte_eth_dev structure.
872 * @param[in, out] resource
873 * Pointer to jump table resource.
874 * @parm[in, out] dev_flow
875 * Pointer to the dev_flow.
877 * pointer to error structure.
880 * 0 on success otherwise -errno and errno is set.
883 flow_dv_jump_tbl_resource_register
884 (struct rte_eth_dev *dev,
885 struct mlx5_flow_dv_jump_tbl_resource *resource,
886 struct mlx5_flow *dev_flow,
887 struct rte_flow_error *error)
889 struct mlx5_priv *priv = dev->data->dev_private;
890 struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
892 /* Lookup a matching resource from cache. */
893 LIST_FOREACH(cache_resource, &priv->jump_tbl, next) {
894 if (resource->tbl == cache_resource->tbl) {
895 DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++",
896 (void *)cache_resource,
897 rte_atomic32_read(&cache_resource->refcnt));
898 rte_atomic32_inc(&cache_resource->refcnt);
899 dev_flow->dv.jump = cache_resource;
903 /* Register new jump table resource. */
904 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
906 return rte_flow_error_set(error, ENOMEM,
907 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
908 "cannot allocate resource memory");
909 *cache_resource = *resource;
910 cache_resource->action =
911 mlx5_glue->dr_create_flow_action_dest_flow_tbl
912 (resource->tbl->obj);
913 if (!cache_resource->action) {
914 rte_free(cache_resource);
915 return rte_flow_error_set(error, ENOMEM,
916 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
917 NULL, "cannot create action");
919 rte_atomic32_init(&cache_resource->refcnt);
920 rte_atomic32_inc(&cache_resource->refcnt);
921 LIST_INSERT_HEAD(&priv->jump_tbl, cache_resource, next);
922 dev_flow->dv.jump = cache_resource;
923 DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++",
924 (void *)cache_resource,
925 rte_atomic32_read(&cache_resource->refcnt));
930 * Get the size of specific rte_flow_item_type
932 * @param[in] item_type
933 * Tested rte_flow_item_type.
936 * sizeof struct item_type, 0 if void or irrelevant.
939 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
944 case RTE_FLOW_ITEM_TYPE_ETH:
945 retval = sizeof(struct rte_flow_item_eth);
947 case RTE_FLOW_ITEM_TYPE_VLAN:
948 retval = sizeof(struct rte_flow_item_vlan);
950 case RTE_FLOW_ITEM_TYPE_IPV4:
951 retval = sizeof(struct rte_flow_item_ipv4);
953 case RTE_FLOW_ITEM_TYPE_IPV6:
954 retval = sizeof(struct rte_flow_item_ipv6);
956 case RTE_FLOW_ITEM_TYPE_UDP:
957 retval = sizeof(struct rte_flow_item_udp);
959 case RTE_FLOW_ITEM_TYPE_TCP:
960 retval = sizeof(struct rte_flow_item_tcp);
962 case RTE_FLOW_ITEM_TYPE_VXLAN:
963 retval = sizeof(struct rte_flow_item_vxlan);
965 case RTE_FLOW_ITEM_TYPE_GRE:
966 retval = sizeof(struct rte_flow_item_gre);
968 case RTE_FLOW_ITEM_TYPE_NVGRE:
969 retval = sizeof(struct rte_flow_item_nvgre);
971 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
972 retval = sizeof(struct rte_flow_item_vxlan_gpe);
974 case RTE_FLOW_ITEM_TYPE_MPLS:
975 retval = sizeof(struct rte_flow_item_mpls);
977 case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
985 #define MLX5_ENCAP_IPV4_VERSION 0x40
986 #define MLX5_ENCAP_IPV4_IHL_MIN 0x05
987 #define MLX5_ENCAP_IPV4_TTL_DEF 0x40
988 #define MLX5_ENCAP_IPV6_VTC_FLOW 0x60000000
989 #define MLX5_ENCAP_IPV6_HOP_LIMIT 0xff
990 #define MLX5_ENCAP_VXLAN_FLAGS 0x08000000
991 #define MLX5_ENCAP_VXLAN_GPE_FLAGS 0x04
994 * Convert the encap action data from list of rte_flow_item to raw buffer
997 * Pointer to rte_flow_item objects list.
999 * Pointer to the output buffer.
1001 * Pointer to the output buffer size.
1003 * Pointer to the error structure.
1006 * 0 on success, a negative errno value otherwise and rte_errno is set.
1009 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
1010 size_t *size, struct rte_flow_error *error)
1012 struct ether_hdr *eth = NULL;
1013 struct vlan_hdr *vlan = NULL;
1014 struct ipv4_hdr *ipv4 = NULL;
1015 struct ipv6_hdr *ipv6 = NULL;
1016 struct udp_hdr *udp = NULL;
1017 struct vxlan_hdr *vxlan = NULL;
1018 struct vxlan_gpe_hdr *vxlan_gpe = NULL;
1019 struct gre_hdr *gre = NULL;
1021 size_t temp_size = 0;
1024 return rte_flow_error_set(error, EINVAL,
1025 RTE_FLOW_ERROR_TYPE_ACTION,
1026 NULL, "invalid empty data");
1027 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1028 len = flow_dv_get_item_len(items->type);
1029 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
1030 return rte_flow_error_set(error, EINVAL,
1031 RTE_FLOW_ERROR_TYPE_ACTION,
1032 (void *)items->type,
1033 "items total size is too big"
1034 " for encap action");
1035 rte_memcpy((void *)&buf[temp_size], items->spec, len);
1036 switch (items->type) {
1037 case RTE_FLOW_ITEM_TYPE_ETH:
1038 eth = (struct ether_hdr *)&buf[temp_size];
1040 case RTE_FLOW_ITEM_TYPE_VLAN:
1041 vlan = (struct vlan_hdr *)&buf[temp_size];
1043 return rte_flow_error_set(error, EINVAL,
1044 RTE_FLOW_ERROR_TYPE_ACTION,
1045 (void *)items->type,
1046 "eth header not found");
1047 if (!eth->ether_type)
1048 eth->ether_type = RTE_BE16(ETHER_TYPE_VLAN);
1050 case RTE_FLOW_ITEM_TYPE_IPV4:
1051 ipv4 = (struct ipv4_hdr *)&buf[temp_size];
1053 return rte_flow_error_set(error, EINVAL,
1054 RTE_FLOW_ERROR_TYPE_ACTION,
1055 (void *)items->type,
1056 "neither eth nor vlan"
1058 if (vlan && !vlan->eth_proto)
1059 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv4);
1060 else if (eth && !eth->ether_type)
1061 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv4);
1062 if (!ipv4->version_ihl)
1063 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
1064 MLX5_ENCAP_IPV4_IHL_MIN;
1065 if (!ipv4->time_to_live)
1066 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
1068 case RTE_FLOW_ITEM_TYPE_IPV6:
1069 ipv6 = (struct ipv6_hdr *)&buf[temp_size];
1071 return rte_flow_error_set(error, EINVAL,
1072 RTE_FLOW_ERROR_TYPE_ACTION,
1073 (void *)items->type,
1074 "neither eth nor vlan"
1076 if (vlan && !vlan->eth_proto)
1077 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv6);
1078 else if (eth && !eth->ether_type)
1079 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv6);
1080 if (!ipv6->vtc_flow)
1082 RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
1083 if (!ipv6->hop_limits)
1084 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
1086 case RTE_FLOW_ITEM_TYPE_UDP:
1087 udp = (struct udp_hdr *)&buf[temp_size];
1089 return rte_flow_error_set(error, EINVAL,
1090 RTE_FLOW_ERROR_TYPE_ACTION,
1091 (void *)items->type,
1092 "ip header not found");
1093 if (ipv4 && !ipv4->next_proto_id)
1094 ipv4->next_proto_id = IPPROTO_UDP;
1095 else if (ipv6 && !ipv6->proto)
1096 ipv6->proto = IPPROTO_UDP;
1098 case RTE_FLOW_ITEM_TYPE_VXLAN:
1099 vxlan = (struct vxlan_hdr *)&buf[temp_size];
1101 return rte_flow_error_set(error, EINVAL,
1102 RTE_FLOW_ERROR_TYPE_ACTION,
1103 (void *)items->type,
1104 "udp header not found");
1106 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
1107 if (!vxlan->vx_flags)
1109 RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
1111 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1112 vxlan_gpe = (struct vxlan_gpe_hdr *)&buf[temp_size];
1114 return rte_flow_error_set(error, EINVAL,
1115 RTE_FLOW_ERROR_TYPE_ACTION,
1116 (void *)items->type,
1117 "udp header not found");
1118 if (!vxlan_gpe->proto)
1119 return rte_flow_error_set(error, EINVAL,
1120 RTE_FLOW_ERROR_TYPE_ACTION,
1121 (void *)items->type,
1122 "next protocol not found");
1125 RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
1126 if (!vxlan_gpe->vx_flags)
1127 vxlan_gpe->vx_flags =
1128 MLX5_ENCAP_VXLAN_GPE_FLAGS;
1130 case RTE_FLOW_ITEM_TYPE_GRE:
1131 case RTE_FLOW_ITEM_TYPE_NVGRE:
1132 gre = (struct gre_hdr *)&buf[temp_size];
1134 return rte_flow_error_set(error, EINVAL,
1135 RTE_FLOW_ERROR_TYPE_ACTION,
1136 (void *)items->type,
1137 "next protocol not found");
1139 return rte_flow_error_set(error, EINVAL,
1140 RTE_FLOW_ERROR_TYPE_ACTION,
1141 (void *)items->type,
1142 "ip header not found");
1143 if (ipv4 && !ipv4->next_proto_id)
1144 ipv4->next_proto_id = IPPROTO_GRE;
1145 else if (ipv6 && !ipv6->proto)
1146 ipv6->proto = IPPROTO_GRE;
1148 case RTE_FLOW_ITEM_TYPE_VOID:
1151 return rte_flow_error_set(error, EINVAL,
1152 RTE_FLOW_ERROR_TYPE_ACTION,
1153 (void *)items->type,
1154 "unsupported item type");
1164 * Convert L2 encap action to DV specification.
1167 * Pointer to rte_eth_dev structure.
1169 * Pointer to action structure.
1170 * @param[in, out] dev_flow
1171 * Pointer to the mlx5_flow.
1173 * Pointer to the error structure.
1176 * 0 on success, a negative errno value otherwise and rte_errno is set.
1179 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
1180 const struct rte_flow_action *action,
1181 struct mlx5_flow *dev_flow,
1182 struct rte_flow_error *error)
1184 const struct rte_flow_item *encap_data;
1185 const struct rte_flow_action_raw_encap *raw_encap_data;
1186 struct mlx5_flow_dv_encap_decap_resource res = {
1188 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
1189 .ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
1192 if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
1194 (const struct rte_flow_action_raw_encap *)action->conf;
1195 res.size = raw_encap_data->size;
1196 memcpy(res.buf, raw_encap_data->data, res.size);
1198 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
1200 ((const struct rte_flow_action_vxlan_encap *)
1201 action->conf)->definition;
1204 ((const struct rte_flow_action_nvgre_encap *)
1205 action->conf)->definition;
1206 if (flow_dv_convert_encap_data(encap_data, res.buf,
1210 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1211 return rte_flow_error_set(error, EINVAL,
1212 RTE_FLOW_ERROR_TYPE_ACTION,
1213 NULL, "can't create L2 encap action");
1218 * Convert L2 decap action to DV specification.
1221 * Pointer to rte_eth_dev structure.
1222 * @param[in, out] dev_flow
1223 * Pointer to the mlx5_flow.
1225 * Pointer to the error structure.
1228 * 0 on success, a negative errno value otherwise and rte_errno is set.
1231 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
1232 struct mlx5_flow *dev_flow,
1233 struct rte_flow_error *error)
1235 struct mlx5_flow_dv_encap_decap_resource res = {
1238 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
1239 .ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
1242 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1243 return rte_flow_error_set(error, EINVAL,
1244 RTE_FLOW_ERROR_TYPE_ACTION,
1245 NULL, "can't create L2 decap action");
1250 * Convert raw decap/encap (L3 tunnel) action to DV specification.
1253 * Pointer to rte_eth_dev structure.
1255 * Pointer to action structure.
1256 * @param[in, out] dev_flow
1257 * Pointer to the mlx5_flow.
1259 * Pointer to the flow attributes.
1261 * Pointer to the error structure.
1264 * 0 on success, a negative errno value otherwise and rte_errno is set.
1267 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
1268 const struct rte_flow_action *action,
1269 struct mlx5_flow *dev_flow,
1270 const struct rte_flow_attr *attr,
1271 struct rte_flow_error *error)
1273 const struct rte_flow_action_raw_encap *encap_data;
1274 struct mlx5_flow_dv_encap_decap_resource res;
1276 encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
1277 res.size = encap_data->size;
1278 memcpy(res.buf, encap_data->data, res.size);
1279 res.reformat_type = attr->egress ?
1280 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
1281 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
1282 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
1283 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
1284 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1285 return rte_flow_error_set(error, EINVAL,
1286 RTE_FLOW_ERROR_TYPE_ACTION,
1287 NULL, "can't create encap action");
1292 * Validate the modify-header actions.
1294 * @param[in] action_flags
1295 * Holds the actions detected until now.
1297 * Pointer to the modify action.
1299 * Pointer to error structure.
1302 * 0 on success, a negative errno value otherwise and rte_errno is set.
1305 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
1306 const struct rte_flow_action *action,
1307 struct rte_flow_error *error)
1309 if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
1310 return rte_flow_error_set(error, EINVAL,
1311 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1312 NULL, "action configuration not set");
1313 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1314 return rte_flow_error_set(error, EINVAL,
1315 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1316 "can't have encap action before"
1322 * Validate the modify-header MAC address actions.
1324 * @param[in] action_flags
1325 * Holds the actions detected until now.
1327 * Pointer to the modify action.
1328 * @param[in] item_flags
1329 * Holds the items detected.
1331 * Pointer to error structure.
1334 * 0 on success, a negative errno value otherwise and rte_errno is set.
1337 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
1338 const struct rte_flow_action *action,
1339 const uint64_t item_flags,
1340 struct rte_flow_error *error)
1344 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1346 if (!(item_flags & MLX5_FLOW_LAYER_L2))
1347 return rte_flow_error_set(error, EINVAL,
1348 RTE_FLOW_ERROR_TYPE_ACTION,
1350 "no L2 item in pattern");
1356 * Validate the modify-header IPv4 address actions.
1358 * @param[in] action_flags
1359 * Holds the actions detected until now.
1361 * Pointer to the modify action.
1362 * @param[in] item_flags
1363 * Holds the items detected.
1365 * Pointer to error structure.
1368 * 0 on success, a negative errno value otherwise and rte_errno is set.
1371 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
1372 const struct rte_flow_action *action,
1373 const uint64_t item_flags,
1374 struct rte_flow_error *error)
1378 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1380 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
1381 return rte_flow_error_set(error, EINVAL,
1382 RTE_FLOW_ERROR_TYPE_ACTION,
1384 "no ipv4 item in pattern");
1390 * Validate the modify-header IPv6 address actions.
1392 * @param[in] action_flags
1393 * Holds the actions detected until now.
1395 * Pointer to the modify action.
1396 * @param[in] item_flags
1397 * Holds the items detected.
1399 * Pointer to error structure.
1402 * 0 on success, a negative errno value otherwise and rte_errno is set.
1405 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
1406 const struct rte_flow_action *action,
1407 const uint64_t item_flags,
1408 struct rte_flow_error *error)
1412 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1414 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
1415 return rte_flow_error_set(error, EINVAL,
1416 RTE_FLOW_ERROR_TYPE_ACTION,
1418 "no ipv6 item in pattern");
1424 * Validate the modify-header TP actions.
1426 * @param[in] action_flags
1427 * Holds the actions detected until now.
1429 * Pointer to the modify action.
1430 * @param[in] item_flags
1431 * Holds the items detected.
1433 * Pointer to error structure.
1436 * 0 on success, a negative errno value otherwise and rte_errno is set.
1439 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
1440 const struct rte_flow_action *action,
1441 const uint64_t item_flags,
1442 struct rte_flow_error *error)
1446 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1448 if (!(item_flags & MLX5_FLOW_LAYER_L4))
1449 return rte_flow_error_set(error, EINVAL,
1450 RTE_FLOW_ERROR_TYPE_ACTION,
1451 NULL, "no transport layer "
1458 * Validate the modify-header TTL actions.
1460 * @param[in] action_flags
1461 * Holds the actions detected until now.
1463 * Pointer to the modify action.
1464 * @param[in] item_flags
1465 * Holds the items detected.
1467 * Pointer to error structure.
1470 * 0 on success, a negative errno value otherwise and rte_errno is set.
1473 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
1474 const struct rte_flow_action *action,
1475 const uint64_t item_flags,
1476 struct rte_flow_error *error)
1480 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1482 if (!(item_flags & MLX5_FLOW_LAYER_L3))
1483 return rte_flow_error_set(error, EINVAL,
1484 RTE_FLOW_ERROR_TYPE_ACTION,
1486 "no IP protocol in pattern");
1492 * Validate jump action.
1495 * Pointer to the modify action.
1497 * The group of the current flow.
1499 * Pointer to error structure.
1502 * 0 on success, a negative errno value otherwise and rte_errno is set.
1505 flow_dv_validate_action_jump(const struct rte_flow_action *action,
1507 struct rte_flow_error *error)
1509 if (action->type != RTE_FLOW_ACTION_TYPE_JUMP && !action->conf)
1510 return rte_flow_error_set(error, EINVAL,
1511 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1512 NULL, "action configuration not set");
1513 if (group >= ((const struct rte_flow_action_jump *)action->conf)->group)
1514 return rte_flow_error_set(error, EINVAL,
1515 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1516 "target group must be higher then"
1517 " the current flow group");
1523 * Find existing modify-header resource or create and register a new one.
1525 * @param dev[in, out]
1526 * Pointer to rte_eth_dev structure.
1527 * @param[in, out] resource
1528 * Pointer to modify-header resource.
1529 * @parm[in, out] dev_flow
1530 * Pointer to the dev_flow.
1532 * pointer to error structure.
1535 * 0 on success otherwise -errno and errno is set.
1538 flow_dv_modify_hdr_resource_register
1539 (struct rte_eth_dev *dev,
1540 struct mlx5_flow_dv_modify_hdr_resource *resource,
1541 struct mlx5_flow *dev_flow,
1542 struct rte_flow_error *error)
1544 struct mlx5_priv *priv = dev->data->dev_private;
1545 struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
1547 struct mlx5dv_dr_ns *ns =
1548 resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX ?
1549 priv->tx_ns : priv->rx_ns;
1551 /* Lookup a matching resource from cache. */
1552 LIST_FOREACH(cache_resource, &priv->modify_cmds, next) {
1553 if (resource->ft_type == cache_resource->ft_type &&
1554 resource->actions_num == cache_resource->actions_num &&
1555 !memcmp((const void *)resource->actions,
1556 (const void *)cache_resource->actions,
1557 (resource->actions_num *
1558 sizeof(resource->actions[0])))) {
1559 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
1560 (void *)cache_resource,
1561 rte_atomic32_read(&cache_resource->refcnt));
1562 rte_atomic32_inc(&cache_resource->refcnt);
1563 dev_flow->dv.modify_hdr = cache_resource;
1567 /* Register new modify-header resource. */
1568 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1569 if (!cache_resource)
1570 return rte_flow_error_set(error, ENOMEM,
1571 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1572 "cannot allocate resource memory");
1573 *cache_resource = *resource;
1574 cache_resource->verbs_action =
1575 mlx5_glue->dv_create_flow_action_modify_header
1576 (priv->sh->ctx, cache_resource->ft_type,
1578 cache_resource->actions_num *
1579 sizeof(cache_resource->actions[0]),
1580 (uint64_t *)cache_resource->actions);
1581 if (!cache_resource->verbs_action) {
1582 rte_free(cache_resource);
1583 return rte_flow_error_set(error, ENOMEM,
1584 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1585 NULL, "cannot create action");
1587 rte_atomic32_init(&cache_resource->refcnt);
1588 rte_atomic32_inc(&cache_resource->refcnt);
1589 LIST_INSERT_HEAD(&priv->modify_cmds, cache_resource, next);
1590 dev_flow->dv.modify_hdr = cache_resource;
1591 DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
1592 (void *)cache_resource,
1593 rte_atomic32_read(&cache_resource->refcnt));
1598 * Get or create a flow counter.
1601 * Pointer to the Ethernet device structure.
1603 * Indicate if this counter is shared with other flows.
1605 * Counter identifier.
1608 * pointer to flow counter on success, NULL otherwise and rte_errno is set.
1610 static struct mlx5_flow_counter *
1611 flow_dv_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
1613 struct mlx5_priv *priv = dev->data->dev_private;
1614 struct mlx5_flow_counter *cnt = NULL;
1615 struct mlx5_devx_counter_set *dcs = NULL;
1618 if (!priv->config.devx) {
1623 LIST_FOREACH(cnt, &priv->flow_counters, next) {
1624 if (cnt->shared && cnt->id == id) {
1630 cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
1631 dcs = rte_calloc(__func__, 1, sizeof(*dcs), 0);
1636 ret = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, dcs);
1639 struct mlx5_flow_counter tmpl = {
1645 tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
1651 LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
1661 * Release a flow counter.
1663 * @param[in] counter
1664 * Pointer to the counter handler.
1667 flow_dv_counter_release(struct mlx5_flow_counter *counter)
1673 if (--counter->ref_cnt == 0) {
1674 ret = mlx5_devx_cmd_flow_counter_free(counter->dcs->obj);
1676 DRV_LOG(ERR, "Failed to free devx counters, %d", ret);
1677 LIST_REMOVE(counter, next);
1678 rte_free(counter->dcs);
1684 * Verify the @p attributes will be correctly understood by the NIC and store
1685 * them in the @p flow if everything is correct.
1688 * Pointer to dev struct.
1689 * @param[in] attributes
1690 * Pointer to flow attributes
1692 * Pointer to error structure.
1695 * 0 on success, a negative errno value otherwise and rte_errno is set.
1698 flow_dv_validate_attributes(struct rte_eth_dev *dev,
1699 const struct rte_flow_attr *attributes,
1700 struct rte_flow_error *error)
1702 struct mlx5_priv *priv = dev->data->dev_private;
1703 uint32_t priority_max = priv->config.flow_prio - 1;
1705 #ifndef HAVE_MLX5DV_DR
1706 if (attributes->group)
1707 return rte_flow_error_set(error, ENOTSUP,
1708 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1710 "groups is not supported");
1712 if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
1713 attributes->priority >= priority_max)
1714 return rte_flow_error_set(error, ENOTSUP,
1715 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1717 "priority out of range");
1718 if (attributes->transfer)
1719 return rte_flow_error_set(error, ENOTSUP,
1720 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1722 "transfer is not supported");
1723 if (!(attributes->egress ^ attributes->ingress))
1724 return rte_flow_error_set(error, ENOTSUP,
1725 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1726 "must specify exactly one of "
1727 "ingress or egress");
1732 * Internal validation function. For validating both actions and items.
1735 * Pointer to the rte_eth_dev structure.
1737 * Pointer to the flow attributes.
1739 * Pointer to the list of items.
1740 * @param[in] actions
1741 * Pointer to the list of actions.
1743 * Pointer to the error structure.
1746 * 0 on success, a negative errno value otherwise and rte_errno is set.
1749 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1750 const struct rte_flow_item items[],
1751 const struct rte_flow_action actions[],
1752 struct rte_flow_error *error)
1755 uint64_t action_flags = 0;
1756 uint64_t item_flags = 0;
1757 uint64_t last_item = 0;
1759 uint8_t next_protocol = 0xff;
1764 ret = flow_dv_validate_attributes(dev, attr, error);
1767 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1768 tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1769 switch (items->type) {
1770 case RTE_FLOW_ITEM_TYPE_VOID:
1772 case RTE_FLOW_ITEM_TYPE_ETH:
1773 ret = mlx5_flow_validate_item_eth(items, item_flags,
1777 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1778 MLX5_FLOW_LAYER_OUTER_L2;
1780 case RTE_FLOW_ITEM_TYPE_VLAN:
1781 ret = mlx5_flow_validate_item_vlan(items, item_flags,
1785 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1786 MLX5_FLOW_LAYER_OUTER_VLAN;
1788 case RTE_FLOW_ITEM_TYPE_IPV4:
1789 ret = mlx5_flow_validate_item_ipv4(items, item_flags,
1793 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1794 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1795 if (items->mask != NULL &&
1796 ((const struct rte_flow_item_ipv4 *)
1797 items->mask)->hdr.next_proto_id) {
1799 ((const struct rte_flow_item_ipv4 *)
1800 (items->spec))->hdr.next_proto_id;
1802 ((const struct rte_flow_item_ipv4 *)
1803 (items->mask))->hdr.next_proto_id;
1805 /* Reset for inner layer. */
1806 next_protocol = 0xff;
1809 case RTE_FLOW_ITEM_TYPE_IPV6:
1810 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
1814 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1815 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1816 if (items->mask != NULL &&
1817 ((const struct rte_flow_item_ipv6 *)
1818 items->mask)->hdr.proto) {
1820 ((const struct rte_flow_item_ipv6 *)
1821 items->spec)->hdr.proto;
1823 ((const struct rte_flow_item_ipv6 *)
1824 items->mask)->hdr.proto;
1826 /* Reset for inner layer. */
1827 next_protocol = 0xff;
1830 case RTE_FLOW_ITEM_TYPE_TCP:
1831 ret = mlx5_flow_validate_item_tcp
1834 &rte_flow_item_tcp_mask,
1838 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1839 MLX5_FLOW_LAYER_OUTER_L4_TCP;
1841 case RTE_FLOW_ITEM_TYPE_UDP:
1842 ret = mlx5_flow_validate_item_udp(items, item_flags,
1847 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1848 MLX5_FLOW_LAYER_OUTER_L4_UDP;
1850 case RTE_FLOW_ITEM_TYPE_GRE:
1851 case RTE_FLOW_ITEM_TYPE_NVGRE:
1852 ret = mlx5_flow_validate_item_gre(items, item_flags,
1853 next_protocol, error);
1856 last_item = MLX5_FLOW_LAYER_GRE;
1858 case RTE_FLOW_ITEM_TYPE_VXLAN:
1859 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
1863 last_item = MLX5_FLOW_LAYER_VXLAN;
1865 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1866 ret = mlx5_flow_validate_item_vxlan_gpe(items,
1871 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
1873 case RTE_FLOW_ITEM_TYPE_MPLS:
1874 ret = mlx5_flow_validate_item_mpls(dev, items,
1879 last_item = MLX5_FLOW_LAYER_MPLS;
1881 case RTE_FLOW_ITEM_TYPE_META:
1882 ret = flow_dv_validate_item_meta(dev, items, attr,
1886 last_item = MLX5_FLOW_ITEM_METADATA;
1889 return rte_flow_error_set(error, ENOTSUP,
1890 RTE_FLOW_ERROR_TYPE_ITEM,
1891 NULL, "item not supported");
1893 item_flags |= last_item;
1895 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1896 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
1897 return rte_flow_error_set(error, ENOTSUP,
1898 RTE_FLOW_ERROR_TYPE_ACTION,
1899 actions, "too many actions");
1900 switch (actions->type) {
1901 case RTE_FLOW_ACTION_TYPE_VOID:
1903 case RTE_FLOW_ACTION_TYPE_FLAG:
1904 ret = mlx5_flow_validate_action_flag(action_flags,
1908 action_flags |= MLX5_FLOW_ACTION_FLAG;
1911 case RTE_FLOW_ACTION_TYPE_MARK:
1912 ret = mlx5_flow_validate_action_mark(actions,
1917 action_flags |= MLX5_FLOW_ACTION_MARK;
1920 case RTE_FLOW_ACTION_TYPE_DROP:
1921 ret = mlx5_flow_validate_action_drop(action_flags,
1925 action_flags |= MLX5_FLOW_ACTION_DROP;
1928 case RTE_FLOW_ACTION_TYPE_QUEUE:
1929 ret = mlx5_flow_validate_action_queue(actions,
1934 action_flags |= MLX5_FLOW_ACTION_QUEUE;
1937 case RTE_FLOW_ACTION_TYPE_RSS:
1938 ret = mlx5_flow_validate_action_rss(actions,
1943 action_flags |= MLX5_FLOW_ACTION_RSS;
1946 case RTE_FLOW_ACTION_TYPE_COUNT:
1947 ret = flow_dv_validate_action_count(dev, error);
1950 action_flags |= MLX5_FLOW_ACTION_COUNT;
1953 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
1954 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
1955 ret = flow_dv_validate_action_l2_encap(action_flags,
1960 action_flags |= actions->type ==
1961 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
1962 MLX5_FLOW_ACTION_VXLAN_ENCAP :
1963 MLX5_FLOW_ACTION_NVGRE_ENCAP;
1966 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
1967 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
1968 ret = flow_dv_validate_action_l2_decap(action_flags,
1972 action_flags |= actions->type ==
1973 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
1974 MLX5_FLOW_ACTION_VXLAN_DECAP :
1975 MLX5_FLOW_ACTION_NVGRE_DECAP;
1978 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
1979 ret = flow_dv_validate_action_raw_encap(action_flags,
1984 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
1987 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
1988 ret = flow_dv_validate_action_raw_decap(action_flags,
1993 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
1996 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
1997 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
1998 ret = flow_dv_validate_action_modify_mac(action_flags,
2004 /* Count all modify-header actions as one action. */
2005 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2007 action_flags |= actions->type ==
2008 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
2009 MLX5_FLOW_ACTION_SET_MAC_SRC :
2010 MLX5_FLOW_ACTION_SET_MAC_DST;
2013 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
2014 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
2015 ret = flow_dv_validate_action_modify_ipv4(action_flags,
2021 /* Count all modify-header actions as one action. */
2022 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2024 action_flags |= actions->type ==
2025 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
2026 MLX5_FLOW_ACTION_SET_IPV4_SRC :
2027 MLX5_FLOW_ACTION_SET_IPV4_DST;
2029 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
2030 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
2031 ret = flow_dv_validate_action_modify_ipv6(action_flags,
2037 /* Count all modify-header actions as one action. */
2038 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2040 action_flags |= actions->type ==
2041 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
2042 MLX5_FLOW_ACTION_SET_IPV6_SRC :
2043 MLX5_FLOW_ACTION_SET_IPV6_DST;
2045 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
2046 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
2047 ret = flow_dv_validate_action_modify_tp(action_flags,
2053 /* Count all modify-header actions as one action. */
2054 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2056 action_flags |= actions->type ==
2057 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
2058 MLX5_FLOW_ACTION_SET_TP_SRC :
2059 MLX5_FLOW_ACTION_SET_TP_DST;
2061 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
2062 case RTE_FLOW_ACTION_TYPE_SET_TTL:
2063 ret = flow_dv_validate_action_modify_ttl(action_flags,
2069 /* Count all modify-header actions as one action. */
2070 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2072 action_flags |= actions->type ==
2073 RTE_FLOW_ACTION_TYPE_SET_TTL ?
2074 MLX5_FLOW_ACTION_SET_TTL :
2075 MLX5_FLOW_ACTION_DEC_TTL;
2077 case RTE_FLOW_ACTION_TYPE_JUMP:
2078 ret = flow_dv_validate_action_jump(actions,
2079 attr->group, error);
2083 action_flags |= MLX5_FLOW_ACTION_JUMP;
2086 return rte_flow_error_set(error, ENOTSUP,
2087 RTE_FLOW_ERROR_TYPE_ACTION,
2089 "action not supported");
2092 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
2093 return rte_flow_error_set(error, EINVAL,
2094 RTE_FLOW_ERROR_TYPE_ACTION, actions,
2095 "no fate action is found");
2100 * Internal preparation function. Allocates the DV flow size,
2101 * this size is constant.
2104 * Pointer to the flow attributes.
2106 * Pointer to the list of items.
2107 * @param[in] actions
2108 * Pointer to the list of actions.
2110 * Pointer to the error structure.
2113 * Pointer to mlx5_flow object on success,
2114 * otherwise NULL and rte_errno is set.
2116 static struct mlx5_flow *
2117 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
2118 const struct rte_flow_item items[] __rte_unused,
2119 const struct rte_flow_action actions[] __rte_unused,
2120 struct rte_flow_error *error)
2122 uint32_t size = sizeof(struct mlx5_flow);
2123 struct mlx5_flow *flow;
2125 flow = rte_calloc(__func__, 1, size, 0);
2127 rte_flow_error_set(error, ENOMEM,
2128 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2129 "not enough memory to create flow");
2132 flow->dv.value.size = MLX5_ST_SZ_DB(fte_match_param);
2138 * Sanity check for match mask and value. Similar to check_valid_spec() in
2139 * kernel driver. If unmasked bit is present in value, it returns failure.
2142 * pointer to match mask buffer.
2143 * @param match_value
2144 * pointer to match value buffer.
2147 * 0 if valid, -EINVAL otherwise.
2150 flow_dv_check_valid_spec(void *match_mask, void *match_value)
2152 uint8_t *m = match_mask;
2153 uint8_t *v = match_value;
2156 for (i = 0; i < MLX5_ST_SZ_DB(fte_match_param); ++i) {
2159 "match_value differs from match_criteria"
2160 " %p[%u] != %p[%u]",
2161 match_value, i, match_mask, i);
2170 * Add Ethernet item to matcher and to the value.
2172 * @param[in, out] matcher
2174 * @param[in, out] key
2175 * Flow matcher value.
2177 * Flow pattern to translate.
2179 * Item is inner pattern.
2182 flow_dv_translate_item_eth(void *matcher, void *key,
2183 const struct rte_flow_item *item, int inner)
2185 const struct rte_flow_item_eth *eth_m = item->mask;
2186 const struct rte_flow_item_eth *eth_v = item->spec;
2187 const struct rte_flow_item_eth nic_mask = {
2188 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2189 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2190 .type = RTE_BE16(0xffff),
2202 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2204 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2206 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2208 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2210 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
2211 ð_m->dst, sizeof(eth_m->dst));
2212 /* The value must be in the range of the mask. */
2213 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
2214 for (i = 0; i < sizeof(eth_m->dst); ++i)
2215 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
2216 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
2217 ð_m->src, sizeof(eth_m->src));
2218 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
2219 /* The value must be in the range of the mask. */
2220 for (i = 0; i < sizeof(eth_m->dst); ++i)
2221 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
2222 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
2223 rte_be_to_cpu_16(eth_m->type));
2224 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
2225 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
2229 * Add VLAN item to matcher and to the value.
2231 * @param[in, out] matcher
2233 * @param[in, out] key
2234 * Flow matcher value.
2236 * Flow pattern to translate.
2238 * Item is inner pattern.
2241 flow_dv_translate_item_vlan(void *matcher, void *key,
2242 const struct rte_flow_item *item,
2245 const struct rte_flow_item_vlan *vlan_m = item->mask;
2246 const struct rte_flow_item_vlan *vlan_v = item->spec;
2247 const struct rte_flow_item_vlan nic_mask = {
2248 .tci = RTE_BE16(0x0fff),
2249 .inner_type = RTE_BE16(0xffff),
2261 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2263 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2265 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2267 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2269 tci_m = rte_be_to_cpu_16(vlan_m->tci);
2270 tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
2271 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
2272 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
2273 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
2274 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
2275 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
2276 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
2277 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
2278 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
2282 * Add IPV4 item to matcher and to the value.
2284 * @param[in, out] matcher
2286 * @param[in, out] key
2287 * Flow matcher value.
2289 * Flow pattern to translate.
2291 * Item is inner pattern.
2293 * The group to insert the rule.
2296 flow_dv_translate_item_ipv4(void *matcher, void *key,
2297 const struct rte_flow_item *item,
2298 int inner, uint32_t group)
2300 const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
2301 const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
2302 const struct rte_flow_item_ipv4 nic_mask = {
2304 .src_addr = RTE_BE32(0xffffffff),
2305 .dst_addr = RTE_BE32(0xffffffff),
2306 .type_of_service = 0xff,
2307 .next_proto_id = 0xff,
2317 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2319 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2321 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2323 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2326 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2328 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
2329 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
2334 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2335 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2336 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2337 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2338 *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
2339 *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
2340 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2341 src_ipv4_src_ipv6.ipv4_layout.ipv4);
2342 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2343 src_ipv4_src_ipv6.ipv4_layout.ipv4);
2344 *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
2345 *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
2346 tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
2347 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
2348 ipv4_m->hdr.type_of_service);
2349 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
2350 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
2351 ipv4_m->hdr.type_of_service >> 2);
2352 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
2353 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2354 ipv4_m->hdr.next_proto_id);
2355 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2356 ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
2360 * Add IPV6 item to matcher and to the value.
2362 * @param[in, out] matcher
2364 * @param[in, out] key
2365 * Flow matcher value.
2367 * Flow pattern to translate.
2369 * Item is inner pattern.
2371 * The group to insert the rule.
2374 flow_dv_translate_item_ipv6(void *matcher, void *key,
2375 const struct rte_flow_item *item,
2376 int inner, uint32_t group)
2378 const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
2379 const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
2380 const struct rte_flow_item_ipv6 nic_mask = {
2383 "\xff\xff\xff\xff\xff\xff\xff\xff"
2384 "\xff\xff\xff\xff\xff\xff\xff\xff",
2386 "\xff\xff\xff\xff\xff\xff\xff\xff"
2387 "\xff\xff\xff\xff\xff\xff\xff\xff",
2388 .vtc_flow = RTE_BE32(0xffffffff),
2395 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2396 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2405 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2407 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2409 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2411 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2414 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2416 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
2417 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
2422 size = sizeof(ipv6_m->hdr.dst_addr);
2423 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2424 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2425 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2426 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2427 memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
2428 for (i = 0; i < size; ++i)
2429 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
2430 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2431 src_ipv4_src_ipv6.ipv6_layout.ipv6);
2432 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2433 src_ipv4_src_ipv6.ipv6_layout.ipv6);
2434 memcpy(l24_m, ipv6_m->hdr.src_addr, size);
2435 for (i = 0; i < size; ++i)
2436 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
2438 vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
2439 vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
2440 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
2441 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
2442 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
2443 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
2446 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
2448 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
2451 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
2453 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
2457 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2459 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2460 ipv6_v->hdr.proto & ipv6_m->hdr.proto);
2464 * Add TCP item to matcher and to the value.
2466 * @param[in, out] matcher
2468 * @param[in, out] key
2469 * Flow matcher value.
2471 * Flow pattern to translate.
2473 * Item is inner pattern.
2476 flow_dv_translate_item_tcp(void *matcher, void *key,
2477 const struct rte_flow_item *item,
2480 const struct rte_flow_item_tcp *tcp_m = item->mask;
2481 const struct rte_flow_item_tcp *tcp_v = item->spec;
2486 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2488 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2490 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2492 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2494 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2495 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
2499 tcp_m = &rte_flow_item_tcp_mask;
2500 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
2501 rte_be_to_cpu_16(tcp_m->hdr.src_port));
2502 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
2503 rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
2504 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
2505 rte_be_to_cpu_16(tcp_m->hdr.dst_port));
2506 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
2507 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
2511 * Add UDP item to matcher and to the value.
2513 * @param[in, out] matcher
2515 * @param[in, out] key
2516 * Flow matcher value.
2518 * Flow pattern to translate.
2520 * Item is inner pattern.
2523 flow_dv_translate_item_udp(void *matcher, void *key,
2524 const struct rte_flow_item *item,
2527 const struct rte_flow_item_udp *udp_m = item->mask;
2528 const struct rte_flow_item_udp *udp_v = item->spec;
2533 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2535 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2537 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2539 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2541 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2542 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
2546 udp_m = &rte_flow_item_udp_mask;
2547 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
2548 rte_be_to_cpu_16(udp_m->hdr.src_port));
2549 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
2550 rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
2551 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
2552 rte_be_to_cpu_16(udp_m->hdr.dst_port));
2553 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2554 rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
2558 * Add GRE item to matcher and to the value.
2560 * @param[in, out] matcher
2562 * @param[in, out] key
2563 * Flow matcher value.
2565 * Flow pattern to translate.
2567 * Item is inner pattern.
2570 flow_dv_translate_item_gre(void *matcher, void *key,
2571 const struct rte_flow_item *item,
2574 const struct rte_flow_item_gre *gre_m = item->mask;
2575 const struct rte_flow_item_gre *gre_v = item->spec;
2578 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2579 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2582 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2584 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2586 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2588 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2590 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2591 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
2595 gre_m = &rte_flow_item_gre_mask;
2596 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
2597 rte_be_to_cpu_16(gre_m->protocol));
2598 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
2599 rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
2603 * Add NVGRE item to matcher and to the value.
2605 * @param[in, out] matcher
2607 * @param[in, out] key
2608 * Flow matcher value.
2610 * Flow pattern to translate.
2612 * Item is inner pattern.
2615 flow_dv_translate_item_nvgre(void *matcher, void *key,
2616 const struct rte_flow_item *item,
2619 const struct rte_flow_item_nvgre *nvgre_m = item->mask;
2620 const struct rte_flow_item_nvgre *nvgre_v = item->spec;
2621 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2622 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2623 const char *tni_flow_id_m = (const char *)nvgre_m->tni;
2624 const char *tni_flow_id_v = (const char *)nvgre_v->tni;
2630 flow_dv_translate_item_gre(matcher, key, item, inner);
2634 nvgre_m = &rte_flow_item_nvgre_mask;
2635 size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
2636 gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
2637 gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
2638 memcpy(gre_key_m, tni_flow_id_m, size);
2639 for (i = 0; i < size; ++i)
2640 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
2644 * Add VXLAN item to matcher and to the value.
2646 * @param[in, out] matcher
2648 * @param[in, out] key
2649 * Flow matcher value.
2651 * Flow pattern to translate.
2653 * Item is inner pattern.
2656 flow_dv_translate_item_vxlan(void *matcher, void *key,
2657 const struct rte_flow_item *item,
2660 const struct rte_flow_item_vxlan *vxlan_m = item->mask;
2661 const struct rte_flow_item_vxlan *vxlan_v = item->spec;
2664 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2665 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2673 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2675 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2677 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2679 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2681 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
2682 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
2683 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
2684 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
2685 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
2690 vxlan_m = &rte_flow_item_vxlan_mask;
2691 size = sizeof(vxlan_m->vni);
2692 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
2693 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
2694 memcpy(vni_m, vxlan_m->vni, size);
2695 for (i = 0; i < size; ++i)
2696 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
2700 * Add MPLS item to matcher and to the value.
2702 * @param[in, out] matcher
2704 * @param[in, out] key
2705 * Flow matcher value.
2707 * Flow pattern to translate.
2708 * @param[in] prev_layer
2709 * The protocol layer indicated in previous item.
2711 * Item is inner pattern.
2714 flow_dv_translate_item_mpls(void *matcher, void *key,
2715 const struct rte_flow_item *item,
2716 uint64_t prev_layer,
2719 const uint32_t *in_mpls_m = item->mask;
2720 const uint32_t *in_mpls_v = item->spec;
2721 uint32_t *out_mpls_m = 0;
2722 uint32_t *out_mpls_v = 0;
2723 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2724 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2725 void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
2727 void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
2728 void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
2729 void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2731 switch (prev_layer) {
2732 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
2733 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
2734 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2735 MLX5_UDP_PORT_MPLS);
2737 case MLX5_FLOW_LAYER_GRE:
2738 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
2739 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
2743 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2744 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2751 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
2752 switch (prev_layer) {
2753 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
2755 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
2756 outer_first_mpls_over_udp);
2758 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
2759 outer_first_mpls_over_udp);
2761 case MLX5_FLOW_LAYER_GRE:
2763 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
2764 outer_first_mpls_over_gre);
2766 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
2767 outer_first_mpls_over_gre);
2770 /* Inner MPLS not over GRE is not supported. */
2773 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
2777 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
2783 if (out_mpls_m && out_mpls_v) {
2784 *out_mpls_m = *in_mpls_m;
2785 *out_mpls_v = *in_mpls_v & *in_mpls_m;
2790 * Add META item to matcher
2792 * @param[in, out] matcher
2794 * @param[in, out] key
2795 * Flow matcher value.
2797 * Flow pattern to translate.
2799 * Item is inner pattern.
2802 flow_dv_translate_item_meta(void *matcher, void *key,
2803 const struct rte_flow_item *item)
2805 const struct rte_flow_item_meta *meta_m;
2806 const struct rte_flow_item_meta *meta_v;
2808 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
2810 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
2812 meta_m = (const void *)item->mask;
2814 meta_m = &rte_flow_item_meta_mask;
2815 meta_v = (const void *)item->spec;
2817 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
2818 rte_be_to_cpu_32(meta_m->data));
2819 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
2820 rte_be_to_cpu_32(meta_v->data & meta_m->data));
2824 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
2826 #define HEADER_IS_ZERO(match_criteria, headers) \
2827 !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
2828 matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
2831 * Calculate flow matcher enable bitmap.
2833 * @param match_criteria
2834 * Pointer to flow matcher criteria.
2837 * Bitmap of enabled fields.
2840 flow_dv_matcher_enable(uint32_t *match_criteria)
2842 uint8_t match_criteria_enable;
2844 match_criteria_enable =
2845 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
2846 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
2847 match_criteria_enable |=
2848 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
2849 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
2850 match_criteria_enable |=
2851 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
2852 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
2853 match_criteria_enable |=
2854 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
2855 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
2856 #ifdef HAVE_MLX5DV_DR
2857 match_criteria_enable |=
2858 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
2859 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
2861 return match_criteria_enable;
2868 * @param dev[in, out]
2869 * Pointer to rte_eth_dev structure.
2870 * @param[in] table_id
2873 * Direction of the table.
2875 * pointer to error structure.
2878 * Returns tables resource based on the index, NULL in case of failed.
2880 static struct mlx5_flow_tbl_resource *
2881 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
2882 uint32_t table_id, uint8_t egress,
2883 struct rte_flow_error *error)
2885 struct mlx5_priv *priv = dev->data->dev_private;
2886 struct mlx5_flow_tbl_resource *tbl;
2888 #ifdef HAVE_MLX5DV_DR
2890 tbl = &priv->tx_tbl[table_id];
2892 tbl->obj = mlx5_glue->dr_create_flow_tbl
2893 (priv->tx_ns, table_id);
2895 tbl = &priv->rx_tbl[table_id];
2897 tbl->obj = mlx5_glue->dr_create_flow_tbl
2898 (priv->rx_ns, table_id);
2901 rte_flow_error_set(error, ENOMEM,
2902 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2903 NULL, "cannot create table");
2906 rte_atomic32_inc(&tbl->refcnt);
2912 return &priv->tx_tbl[table_id];
2914 return &priv->rx_tbl[table_id];
2919 * Release a flow table.
2922 * Table resource to be released.
2925 * Returns 0 if table was released, else return 1;
2928 flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
2932 if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
2933 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
2941 * Register the flow matcher.
2943 * @param dev[in, out]
2944 * Pointer to rte_eth_dev structure.
2945 * @param[in, out] matcher
2946 * Pointer to flow matcher.
2947 * @parm[in, out] dev_flow
2948 * Pointer to the dev_flow.
2950 * pointer to error structure.
2953 * 0 on success otherwise -errno and errno is set.
2956 flow_dv_matcher_register(struct rte_eth_dev *dev,
2957 struct mlx5_flow_dv_matcher *matcher,
2958 struct mlx5_flow *dev_flow,
2959 struct rte_flow_error *error)
2961 struct mlx5_priv *priv = dev->data->dev_private;
2962 struct mlx5_flow_dv_matcher *cache_matcher;
2963 struct mlx5dv_flow_matcher_attr dv_attr = {
2964 .type = IBV_FLOW_ATTR_NORMAL,
2965 .match_mask = (void *)&matcher->mask,
2967 struct mlx5_flow_tbl_resource *tbl = NULL;
2969 /* Lookup from cache. */
2970 LIST_FOREACH(cache_matcher, &priv->matchers, next) {
2971 if (matcher->crc == cache_matcher->crc &&
2972 matcher->priority == cache_matcher->priority &&
2973 matcher->egress == cache_matcher->egress &&
2974 matcher->group == cache_matcher->group &&
2975 !memcmp((const void *)matcher->mask.buf,
2976 (const void *)cache_matcher->mask.buf,
2977 cache_matcher->mask.size)) {
2979 "priority %hd use %s matcher %p: refcnt %d++",
2980 cache_matcher->priority,
2981 cache_matcher->egress ? "tx" : "rx",
2982 (void *)cache_matcher,
2983 rte_atomic32_read(&cache_matcher->refcnt));
2984 rte_atomic32_inc(&cache_matcher->refcnt);
2985 dev_flow->dv.matcher = cache_matcher;
2989 /* Register new matcher. */
2990 cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
2992 return rte_flow_error_set(error, ENOMEM,
2993 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2994 "cannot allocate matcher memory");
2995 tbl = flow_dv_tbl_resource_get(dev, matcher->group * MLX5_GROUP_FACTOR,
2996 matcher->egress, error);
2998 rte_free(cache_matcher);
2999 return rte_flow_error_set(error, ENOMEM,
3000 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3001 NULL, "cannot create table");
3003 *cache_matcher = *matcher;
3004 dv_attr.match_criteria_enable =
3005 flow_dv_matcher_enable(cache_matcher->mask.buf);
3006 dv_attr.priority = matcher->priority;
3007 if (matcher->egress)
3008 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
3009 cache_matcher->matcher_object =
3010 mlx5_glue->dv_create_flow_matcher(priv->sh->ctx, &dv_attr,
3012 if (!cache_matcher->matcher_object) {
3013 rte_free(cache_matcher);
3014 #ifdef HAVE_MLX5DV_DR
3015 flow_dv_tbl_resource_release(tbl);
3017 return rte_flow_error_set(error, ENOMEM,
3018 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3019 NULL, "cannot create matcher");
3021 rte_atomic32_inc(&cache_matcher->refcnt);
3022 LIST_INSERT_HEAD(&priv->matchers, cache_matcher, next);
3023 dev_flow->dv.matcher = cache_matcher;
3024 DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
3025 cache_matcher->priority,
3026 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
3027 rte_atomic32_read(&cache_matcher->refcnt));
3028 rte_atomic32_inc(&tbl->refcnt);
3033 * Add source vport match to the specified matcher.
3035 * @param[in, out] matcher
3037 * @param[in, out] key
3038 * Flow matcher value.
3040 * Source vport value to match
3045 flow_dv_translate_source_vport(void *matcher, void *key,
3046 int16_t port, uint16_t mask)
3048 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3049 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3051 MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
3052 MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
3056 * Find existing tag resource or create and register a new one.
3058 * @param dev[in, out]
3059 * Pointer to rte_eth_dev structure.
3060 * @param[in, out] resource
3061 * Pointer to tag resource.
3062 * @parm[in, out] dev_flow
3063 * Pointer to the dev_flow.
3065 * pointer to error structure.
3068 * 0 on success otherwise -errno and errno is set.
3071 flow_dv_tag_resource_register
3072 (struct rte_eth_dev *dev,
3073 struct mlx5_flow_dv_tag_resource *resource,
3074 struct mlx5_flow *dev_flow,
3075 struct rte_flow_error *error)
3077 struct mlx5_priv *priv = dev->data->dev_private;
3078 struct mlx5_flow_dv_tag_resource *cache_resource;
3080 /* Lookup a matching resource from cache. */
3081 LIST_FOREACH(cache_resource, &priv->tags, next) {
3082 if (resource->tag == cache_resource->tag) {
3083 DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
3084 (void *)cache_resource,
3085 rte_atomic32_read(&cache_resource->refcnt));
3086 rte_atomic32_inc(&cache_resource->refcnt);
3087 dev_flow->flow->tag_resource = cache_resource;
3091 /* Register new resource. */
3092 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
3093 if (!cache_resource)
3094 return rte_flow_error_set(error, ENOMEM,
3095 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3096 "cannot allocate resource memory");
3097 *cache_resource = *resource;
3098 cache_resource->action = mlx5_glue->dv_create_flow_action_tag
3100 if (!cache_resource->action) {
3101 rte_free(cache_resource);
3102 return rte_flow_error_set(error, ENOMEM,
3103 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3104 NULL, "cannot create action");
3106 rte_atomic32_init(&cache_resource->refcnt);
3107 rte_atomic32_inc(&cache_resource->refcnt);
3108 LIST_INSERT_HEAD(&priv->tags, cache_resource, next);
3109 dev_flow->flow->tag_resource = cache_resource;
3110 DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
3111 (void *)cache_resource,
3112 rte_atomic32_read(&cache_resource->refcnt));
3120 * Pointer to Ethernet device.
3122 * Pointer to mlx5_flow.
3125 * 1 while a reference on it exists, 0 when freed.
3128 flow_dv_tag_release(struct rte_eth_dev *dev,
3129 struct mlx5_flow_dv_tag_resource *tag)
3132 DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
3133 dev->data->port_id, (void *)tag,
3134 rte_atomic32_read(&tag->refcnt));
3135 if (rte_atomic32_dec_and_test(&tag->refcnt)) {
3136 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
3137 LIST_REMOVE(tag, next);
3138 DRV_LOG(DEBUG, "port %u tag %p: removed",
3139 dev->data->port_id, (void *)tag);
3147 * Fill the flow with DV spec.
3150 * Pointer to rte_eth_dev structure.
3151 * @param[in, out] dev_flow
3152 * Pointer to the sub flow.
3154 * Pointer to the flow attributes.
3156 * Pointer to the list of items.
3157 * @param[in] actions
3158 * Pointer to the list of actions.
3160 * Pointer to the error structure.
3163 * 0 on success, a negative errno value otherwise and rte_errno is set.
3166 flow_dv_translate(struct rte_eth_dev *dev,
3167 struct mlx5_flow *dev_flow,
3168 const struct rte_flow_attr *attr,
3169 const struct rte_flow_item items[],
3170 const struct rte_flow_action actions[],
3171 struct rte_flow_error *error)
3173 struct mlx5_priv *priv = dev->data->dev_private;
3174 struct rte_flow *flow = dev_flow->flow;
3175 uint64_t item_flags = 0;
3176 uint64_t last_item = 0;
3177 uint64_t action_flags = 0;
3178 uint64_t priority = attr->priority;
3179 struct mlx5_flow_dv_matcher matcher = {
3181 .size = sizeof(matcher.mask.buf),
3185 bool actions_end = false;
3186 struct mlx5_flow_dv_modify_hdr_resource res = {
3187 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3188 MLX5DV_FLOW_TABLE_TYPE_NIC_RX
3190 union flow_dv_attr flow_attr = { .attr = 0 };
3191 struct mlx5_flow_dv_tag_resource tag_resource;
3193 if (priority == MLX5_FLOW_PRIO_RSVD)
3194 priority = priv->config.flow_prio - 1;
3195 for (; !actions_end ; actions++) {
3196 const struct rte_flow_action_queue *queue;
3197 const struct rte_flow_action_rss *rss;
3198 const struct rte_flow_action *action = actions;
3199 const struct rte_flow_action_count *count = action->conf;
3200 const uint8_t *rss_key;
3201 const struct rte_flow_action_jump *jump_data;
3202 struct mlx5_flow_dv_jump_tbl_resource jump_tbl_resource;
3203 struct mlx5_flow_tbl_resource *tbl;
3205 switch (actions->type) {
3206 case RTE_FLOW_ACTION_TYPE_VOID:
3208 case RTE_FLOW_ACTION_TYPE_FLAG:
3210 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
3211 if (!flow->tag_resource)
3212 if (flow_dv_tag_resource_register
3213 (dev, &tag_resource, dev_flow, error))
3215 dev_flow->dv.actions[actions_n++] =
3216 flow->tag_resource->action;
3217 action_flags |= MLX5_FLOW_ACTION_FLAG;
3219 case RTE_FLOW_ACTION_TYPE_MARK:
3220 tag_resource.tag = mlx5_flow_mark_set
3221 (((const struct rte_flow_action_mark *)
3222 (actions->conf))->id);
3223 if (!flow->tag_resource)
3224 if (flow_dv_tag_resource_register
3225 (dev, &tag_resource, dev_flow, error))
3227 dev_flow->dv.actions[actions_n++] =
3228 flow->tag_resource->action;
3229 action_flags |= MLX5_FLOW_ACTION_MARK;
3231 case RTE_FLOW_ACTION_TYPE_DROP:
3232 action_flags |= MLX5_FLOW_ACTION_DROP;
3234 case RTE_FLOW_ACTION_TYPE_QUEUE:
3235 queue = actions->conf;
3236 flow->rss.queue_num = 1;
3237 (*flow->queue)[0] = queue->index;
3238 action_flags |= MLX5_FLOW_ACTION_QUEUE;
3240 case RTE_FLOW_ACTION_TYPE_RSS:
3241 rss = actions->conf;
3243 memcpy((*flow->queue), rss->queue,
3244 rss->queue_num * sizeof(uint16_t));
3245 flow->rss.queue_num = rss->queue_num;
3246 /* NULL RSS key indicates default RSS key. */
3247 rss_key = !rss->key ? rss_hash_default_key : rss->key;
3248 memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
3249 /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
3250 flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
3251 flow->rss.level = rss->level;
3252 action_flags |= MLX5_FLOW_ACTION_RSS;
3254 case RTE_FLOW_ACTION_TYPE_COUNT:
3255 if (!priv->config.devx) {
3256 rte_errno = ENOTSUP;
3259 flow->counter = flow_dv_counter_new(dev, count->shared,
3261 if (flow->counter == NULL)
3263 dev_flow->dv.actions[actions_n++] =
3264 flow->counter->action;
3265 action_flags |= MLX5_FLOW_ACTION_COUNT;
3268 if (rte_errno == ENOTSUP)
3269 return rte_flow_error_set
3271 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3273 "count action not supported");
3275 return rte_flow_error_set
3277 RTE_FLOW_ERROR_TYPE_ACTION,
3279 "cannot create counter"
3281 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3282 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3283 if (flow_dv_create_action_l2_encap(dev, actions,
3286 dev_flow->dv.actions[actions_n++] =
3287 dev_flow->dv.encap_decap->verbs_action;
3288 action_flags |= actions->type ==
3289 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
3290 MLX5_FLOW_ACTION_VXLAN_ENCAP :
3291 MLX5_FLOW_ACTION_NVGRE_ENCAP;
3293 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3294 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
3295 if (flow_dv_create_action_l2_decap(dev, dev_flow,
3298 dev_flow->dv.actions[actions_n++] =
3299 dev_flow->dv.encap_decap->verbs_action;
3300 action_flags |= actions->type ==
3301 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
3302 MLX5_FLOW_ACTION_VXLAN_DECAP :
3303 MLX5_FLOW_ACTION_NVGRE_DECAP;
3305 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3306 /* Handle encap with preceding decap. */
3307 if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
3308 if (flow_dv_create_action_raw_encap
3309 (dev, actions, dev_flow, attr, error))
3311 dev_flow->dv.actions[actions_n++] =
3312 dev_flow->dv.encap_decap->verbs_action;
3314 /* Handle encap without preceding decap. */
3315 if (flow_dv_create_action_l2_encap(dev, actions,
3319 dev_flow->dv.actions[actions_n++] =
3320 dev_flow->dv.encap_decap->verbs_action;
3322 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
3324 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3325 /* Check if this decap is followed by encap. */
3326 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
3327 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
3330 /* Handle decap only if it isn't followed by encap. */
3331 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3332 if (flow_dv_create_action_l2_decap(dev,
3336 dev_flow->dv.actions[actions_n++] =
3337 dev_flow->dv.encap_decap->verbs_action;
3339 /* If decap is followed by encap, handle it at encap. */
3340 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
3342 case RTE_FLOW_ACTION_TYPE_JUMP:
3343 jump_data = action->conf;
3344 tbl = flow_dv_tbl_resource_get(dev, jump_data->group *
3346 attr->egress, error);
3348 return rte_flow_error_set
3350 RTE_FLOW_ERROR_TYPE_ACTION,
3352 "cannot create jump action.");
3353 jump_tbl_resource.tbl = tbl;
3354 if (flow_dv_jump_tbl_resource_register
3355 (dev, &jump_tbl_resource, dev_flow, error)) {
3356 flow_dv_tbl_resource_release(tbl);
3357 return rte_flow_error_set
3359 RTE_FLOW_ERROR_TYPE_ACTION,
3361 "cannot create jump action.");
3363 dev_flow->dv.actions[actions_n++] =
3364 dev_flow->dv.jump->action;
3365 action_flags |= MLX5_FLOW_ACTION_JUMP;
3367 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
3368 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
3369 if (flow_dv_convert_action_modify_mac(&res, actions,
3372 action_flags |= actions->type ==
3373 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
3374 MLX5_FLOW_ACTION_SET_MAC_SRC :
3375 MLX5_FLOW_ACTION_SET_MAC_DST;
3377 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
3378 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
3379 if (flow_dv_convert_action_modify_ipv4(&res, actions,
3382 action_flags |= actions->type ==
3383 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
3384 MLX5_FLOW_ACTION_SET_IPV4_SRC :
3385 MLX5_FLOW_ACTION_SET_IPV4_DST;
3387 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
3388 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
3389 if (flow_dv_convert_action_modify_ipv6(&res, actions,
3392 action_flags |= actions->type ==
3393 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
3394 MLX5_FLOW_ACTION_SET_IPV6_SRC :
3395 MLX5_FLOW_ACTION_SET_IPV6_DST;
3397 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
3398 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
3399 if (flow_dv_convert_action_modify_tp(&res, actions,
3403 action_flags |= actions->type ==
3404 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
3405 MLX5_FLOW_ACTION_SET_TP_SRC :
3406 MLX5_FLOW_ACTION_SET_TP_DST;
3408 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
3409 if (flow_dv_convert_action_modify_dec_ttl(&res, items,
3413 action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
3415 case RTE_FLOW_ACTION_TYPE_SET_TTL:
3416 if (flow_dv_convert_action_modify_ttl(&res, actions,
3420 action_flags |= MLX5_FLOW_ACTION_SET_TTL;
3422 case RTE_FLOW_ACTION_TYPE_END:
3424 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
3425 /* create modify action if needed. */
3426 if (flow_dv_modify_hdr_resource_register
3431 dev_flow->dv.actions[actions_n++] =
3432 dev_flow->dv.modify_hdr->verbs_action;
3439 dev_flow->dv.actions_n = actions_n;
3440 flow->actions = action_flags;
3441 if (attr->ingress && !attr->transfer &&
3442 (priv->representor || priv->master)) {
3443 /* It was validated - we support unidirection flows only. */
3444 assert(!attr->egress);
3446 * Add matching on source vport index only
3447 * for ingress rules in E-Switch configurations.
3449 flow_dv_translate_source_vport(matcher.mask.buf,
3450 dev_flow->dv.value.buf,
3454 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3455 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
3456 void *match_mask = matcher.mask.buf;
3457 void *match_value = dev_flow->dv.value.buf;
3459 switch (items->type) {
3460 case RTE_FLOW_ITEM_TYPE_ETH:
3461 flow_dv_translate_item_eth(match_mask, match_value,
3463 matcher.priority = MLX5_PRIORITY_MAP_L2;
3464 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
3465 MLX5_FLOW_LAYER_OUTER_L2;
3467 case RTE_FLOW_ITEM_TYPE_VLAN:
3468 flow_dv_translate_item_vlan(match_mask, match_value,
3470 matcher.priority = MLX5_PRIORITY_MAP_L2;
3471 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
3472 MLX5_FLOW_LAYER_INNER_VLAN) :
3473 (MLX5_FLOW_LAYER_OUTER_L2 |
3474 MLX5_FLOW_LAYER_OUTER_VLAN);
3476 case RTE_FLOW_ITEM_TYPE_IPV4:
3477 flow_dv_translate_item_ipv4(match_mask, match_value,
3478 items, tunnel, attr->group);
3479 matcher.priority = MLX5_PRIORITY_MAP_L3;
3480 dev_flow->dv.hash_fields |=
3481 mlx5_flow_hashfields_adjust
3483 MLX5_IPV4_LAYER_TYPES,
3484 MLX5_IPV4_IBV_RX_HASH);
3485 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3486 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3488 case RTE_FLOW_ITEM_TYPE_IPV6:
3489 flow_dv_translate_item_ipv6(match_mask, match_value,
3490 items, tunnel, attr->group);
3491 matcher.priority = MLX5_PRIORITY_MAP_L3;
3492 dev_flow->dv.hash_fields |=
3493 mlx5_flow_hashfields_adjust
3495 MLX5_IPV6_LAYER_TYPES,
3496 MLX5_IPV6_IBV_RX_HASH);
3497 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3498 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3500 case RTE_FLOW_ITEM_TYPE_TCP:
3501 flow_dv_translate_item_tcp(match_mask, match_value,
3503 matcher.priority = MLX5_PRIORITY_MAP_L4;
3504 dev_flow->dv.hash_fields |=
3505 mlx5_flow_hashfields_adjust
3506 (dev_flow, tunnel, ETH_RSS_TCP,
3507 IBV_RX_HASH_SRC_PORT_TCP |
3508 IBV_RX_HASH_DST_PORT_TCP);
3509 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
3510 MLX5_FLOW_LAYER_OUTER_L4_TCP;
3512 case RTE_FLOW_ITEM_TYPE_UDP:
3513 flow_dv_translate_item_udp(match_mask, match_value,
3515 matcher.priority = MLX5_PRIORITY_MAP_L4;
3516 dev_flow->dv.hash_fields |=
3517 mlx5_flow_hashfields_adjust
3518 (dev_flow, tunnel, ETH_RSS_UDP,
3519 IBV_RX_HASH_SRC_PORT_UDP |
3520 IBV_RX_HASH_DST_PORT_UDP);
3521 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
3522 MLX5_FLOW_LAYER_OUTER_L4_UDP;
3524 case RTE_FLOW_ITEM_TYPE_GRE:
3525 flow_dv_translate_item_gre(match_mask, match_value,
3527 last_item = MLX5_FLOW_LAYER_GRE;
3529 case RTE_FLOW_ITEM_TYPE_NVGRE:
3530 flow_dv_translate_item_nvgre(match_mask, match_value,
3532 last_item = MLX5_FLOW_LAYER_GRE;
3534 case RTE_FLOW_ITEM_TYPE_VXLAN:
3535 flow_dv_translate_item_vxlan(match_mask, match_value,
3537 last_item = MLX5_FLOW_LAYER_VXLAN;
3539 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3540 flow_dv_translate_item_vxlan(match_mask, match_value,
3542 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
3544 case RTE_FLOW_ITEM_TYPE_MPLS:
3545 flow_dv_translate_item_mpls(match_mask, match_value,
3546 items, last_item, tunnel);
3547 last_item = MLX5_FLOW_LAYER_MPLS;
3549 case RTE_FLOW_ITEM_TYPE_META:
3550 flow_dv_translate_item_meta(match_mask, match_value,
3552 last_item = MLX5_FLOW_ITEM_METADATA;
3557 item_flags |= last_item;
3559 assert(!flow_dv_check_valid_spec(matcher.mask.buf,
3560 dev_flow->dv.value.buf));
3561 dev_flow->layers = item_flags;
3562 /* Register matcher. */
3563 matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
3565 matcher.priority = mlx5_flow_adjust_priority(dev, priority,
3567 matcher.egress = attr->egress;
3568 matcher.group = attr->group;
3569 if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
3575 * Apply the flow to the NIC.
3578 * Pointer to the Ethernet device structure.
3579 * @param[in, out] flow
3580 * Pointer to flow structure.
3582 * Pointer to error structure.
3585 * 0 on success, a negative errno value otherwise and rte_errno is set.
3588 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
3589 struct rte_flow_error *error)
3591 struct mlx5_flow_dv *dv;
3592 struct mlx5_flow *dev_flow;
3596 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3599 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
3600 dv->hrxq = mlx5_hrxq_drop_new(dev);
3604 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3605 "cannot get drop hash queue");
3609 mlx5_glue->dv_create_flow_action_dest_ibv_qp
3611 } else if (flow->actions &
3612 (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
3613 struct mlx5_hrxq *hrxq;
3615 hrxq = mlx5_hrxq_get(dev, flow->key,
3616 MLX5_RSS_HASH_KEY_LEN,
3619 flow->rss.queue_num);
3621 hrxq = mlx5_hrxq_new
3622 (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
3623 dv->hash_fields, (*flow->queue),
3624 flow->rss.queue_num,
3625 !!(dev_flow->layers &
3626 MLX5_FLOW_LAYER_TUNNEL));
3630 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3631 "cannot get hash queue");
3636 mlx5_glue->dv_create_flow_action_dest_ibv_qp
3640 mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
3641 (void *)&dv->value, n,
3644 rte_flow_error_set(error, errno,
3645 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3647 "hardware refuses to create flow");
3653 err = rte_errno; /* Save rte_errno before cleanup. */
3654 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3655 struct mlx5_flow_dv *dv = &dev_flow->dv;
3657 if (flow->actions & MLX5_FLOW_ACTION_DROP)
3658 mlx5_hrxq_drop_release(dev);
3660 mlx5_hrxq_release(dev, dv->hrxq);
3664 rte_errno = err; /* Restore rte_errno. */
3669 * Release the flow matcher.
3672 * Pointer to Ethernet device.
3674 * Pointer to mlx5_flow.
3677 * 1 while a reference on it exists, 0 when freed.
3680 flow_dv_matcher_release(struct rte_eth_dev *dev,
3681 struct mlx5_flow *flow)
3683 struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
3684 struct mlx5_priv *priv = dev->data->dev_private;
3685 struct mlx5_flow_tbl_resource *tbl;
3687 assert(matcher->matcher_object);
3688 DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
3689 dev->data->port_id, (void *)matcher,
3690 rte_atomic32_read(&matcher->refcnt));
3691 if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
3692 claim_zero(mlx5_glue->dv_destroy_flow_matcher
3693 (matcher->matcher_object));
3694 LIST_REMOVE(matcher, next);
3695 if (matcher->egress)
3696 tbl = &priv->tx_tbl[matcher->group];
3698 tbl = &priv->rx_tbl[matcher->group];
3699 flow_dv_tbl_resource_release(tbl);
3701 DRV_LOG(DEBUG, "port %u matcher %p: removed",
3702 dev->data->port_id, (void *)matcher);
3709 * Release an encap/decap resource.
3712 * Pointer to mlx5_flow.
3715 * 1 while a reference on it exists, 0 when freed.
3718 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
3720 struct mlx5_flow_dv_encap_decap_resource *cache_resource =
3721 flow->dv.encap_decap;
3723 assert(cache_resource->verbs_action);
3724 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
3725 (void *)cache_resource,
3726 rte_atomic32_read(&cache_resource->refcnt));
3727 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
3728 claim_zero(mlx5_glue->destroy_flow_action
3729 (cache_resource->verbs_action));
3730 LIST_REMOVE(cache_resource, next);
3731 rte_free(cache_resource);
3732 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
3733 (void *)cache_resource);
3740 * Release an jump to table action resource.
3743 * Pointer to mlx5_flow.
3746 * 1 while a reference on it exists, 0 when freed.
3749 flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow)
3751 struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
3754 assert(cache_resource->action);
3755 DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
3756 (void *)cache_resource,
3757 rte_atomic32_read(&cache_resource->refcnt));
3758 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
3759 claim_zero(mlx5_glue->destroy_flow_action
3760 (cache_resource->action));
3761 LIST_REMOVE(cache_resource, next);
3762 flow_dv_tbl_resource_release(cache_resource->tbl);
3763 rte_free(cache_resource);
3764 DRV_LOG(DEBUG, "jump table resource %p: removed",
3765 (void *)cache_resource);
3772 * Release a modify-header resource.
3775 * Pointer to mlx5_flow.
3778 * 1 while a reference on it exists, 0 when freed.
3781 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
3783 struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
3784 flow->dv.modify_hdr;
3786 assert(cache_resource->verbs_action);
3787 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
3788 (void *)cache_resource,
3789 rte_atomic32_read(&cache_resource->refcnt));
3790 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
3791 claim_zero(mlx5_glue->destroy_flow_action
3792 (cache_resource->verbs_action));
3793 LIST_REMOVE(cache_resource, next);
3794 rte_free(cache_resource);
3795 DRV_LOG(DEBUG, "modify-header resource %p: removed",
3796 (void *)cache_resource);
3803 * Remove the flow from the NIC but keeps it in memory.
3806 * Pointer to Ethernet device.
3807 * @param[in, out] flow
3808 * Pointer to flow structure.
3811 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
3813 struct mlx5_flow_dv *dv;
3814 struct mlx5_flow *dev_flow;
3818 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3821 claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
3825 if (flow->actions & MLX5_FLOW_ACTION_DROP)
3826 mlx5_hrxq_drop_release(dev);
3828 mlx5_hrxq_release(dev, dv->hrxq);
3835 * Remove the flow from the NIC and the memory.
3838 * Pointer to the Ethernet device structure.
3839 * @param[in, out] flow
3840 * Pointer to flow structure.
3843 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
3845 struct mlx5_flow *dev_flow;
3849 flow_dv_remove(dev, flow);
3850 if (flow->counter) {
3851 flow_dv_counter_release(flow->counter);
3852 flow->counter = NULL;
3854 if (flow->tag_resource) {
3855 flow_dv_tag_release(dev, flow->tag_resource);
3856 flow->tag_resource = NULL;
3858 while (!LIST_EMPTY(&flow->dev_flows)) {
3859 dev_flow = LIST_FIRST(&flow->dev_flows);
3860 LIST_REMOVE(dev_flow, next);
3861 if (dev_flow->dv.matcher)
3862 flow_dv_matcher_release(dev, dev_flow);
3863 if (dev_flow->dv.encap_decap)
3864 flow_dv_encap_decap_resource_release(dev_flow);
3865 if (dev_flow->dv.modify_hdr)
3866 flow_dv_modify_hdr_resource_release(dev_flow);
3867 if (dev_flow->dv.jump)
3868 flow_dv_jump_tbl_resource_release(dev_flow);
3874 * Query a dv flow rule for its statistics via devx.
3877 * Pointer to Ethernet device.
3879 * Pointer to the sub flow.
3881 * data retrieved by the query.
3883 * Perform verbose error reporting if not NULL.
3886 * 0 on success, a negative errno value otherwise and rte_errno is set.
3889 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
3890 void *data, struct rte_flow_error *error)
3892 struct mlx5_priv *priv = dev->data->dev_private;
3893 struct rte_flow_query_count *qc = data;
3898 if (!priv->config.devx)
3899 return rte_flow_error_set(error, ENOTSUP,
3900 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3902 "counters are not supported");
3903 if (flow->counter) {
3904 err = mlx5_devx_cmd_flow_counter_query
3905 (flow->counter->dcs,
3906 qc->reset, &pkts, &bytes);
3908 return rte_flow_error_set
3910 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3912 "cannot read counters");
3915 qc->hits = pkts - flow->counter->hits;
3916 qc->bytes = bytes - flow->counter->bytes;
3918 flow->counter->hits = pkts;
3919 flow->counter->bytes = bytes;
3923 return rte_flow_error_set(error, EINVAL,
3924 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3926 "counters are not available");
3932 * @see rte_flow_query()
3936 flow_dv_query(struct rte_eth_dev *dev,
3937 struct rte_flow *flow __rte_unused,
3938 const struct rte_flow_action *actions __rte_unused,
3939 void *data __rte_unused,
3940 struct rte_flow_error *error __rte_unused)
3944 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3945 switch (actions->type) {
3946 case RTE_FLOW_ACTION_TYPE_VOID:
3948 case RTE_FLOW_ACTION_TYPE_COUNT:
3949 ret = flow_dv_query_count(dev, flow, data, error);
3952 return rte_flow_error_set(error, ENOTSUP,
3953 RTE_FLOW_ERROR_TYPE_ACTION,
3955 "action not supported");
3962 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
3963 .validate = flow_dv_validate,
3964 .prepare = flow_dv_prepare,
3965 .translate = flow_dv_translate,
3966 .apply = flow_dv_apply,
3967 .remove = flow_dv_remove,
3968 .destroy = flow_dv_destroy,
3969 .query = flow_dv_query,
3972 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */