1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #pragma GCC diagnostic ignored "-Wpedantic"
15 #include <infiniband/verbs.h>
17 #pragma GCC diagnostic error "-Wpedantic"
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_eth_ctrl.h>
23 #include <rte_ethdev_driver.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
31 #include "mlx5_defs.h"
33 #include "mlx5_glue.h"
34 #include "mlx5_flow.h"
36 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
51 * Initialize flow attributes structure according to flow items' types.
54 * Pointer to item specification.
56 * Pointer to flow attributes structure.
59 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
61 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
63 case RTE_FLOW_ITEM_TYPE_IPV4:
66 case RTE_FLOW_ITEM_TYPE_IPV6:
69 case RTE_FLOW_ITEM_TYPE_UDP:
72 case RTE_FLOW_ITEM_TYPE_TCP:
82 struct field_modify_info {
83 uint32_t size; /* Size of field in protocol header, in bytes. */
84 uint32_t offset; /* Offset of field in protocol header, in bytes. */
85 enum mlx5_modification_field id;
88 struct field_modify_info modify_eth[] = {
89 {4, 0, MLX5_MODI_OUT_DMAC_47_16},
90 {2, 4, MLX5_MODI_OUT_DMAC_15_0},
91 {4, 6, MLX5_MODI_OUT_SMAC_47_16},
92 {2, 10, MLX5_MODI_OUT_SMAC_15_0},
96 struct field_modify_info modify_ipv4[] = {
97 {1, 8, MLX5_MODI_OUT_IPV4_TTL},
98 {4, 12, MLX5_MODI_OUT_SIPV4},
99 {4, 16, MLX5_MODI_OUT_DIPV4},
103 struct field_modify_info modify_ipv6[] = {
104 {1, 7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
105 {4, 8, MLX5_MODI_OUT_SIPV6_127_96},
106 {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
107 {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
108 {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
109 {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
110 {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
111 {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
112 {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
116 struct field_modify_info modify_udp[] = {
117 {2, 0, MLX5_MODI_OUT_UDP_SPORT},
118 {2, 2, MLX5_MODI_OUT_UDP_DPORT},
122 struct field_modify_info modify_tcp[] = {
123 {2, 0, MLX5_MODI_OUT_TCP_SPORT},
124 {2, 2, MLX5_MODI_OUT_TCP_DPORT},
129 * Convert modify-header action to DV specification.
132 * Pointer to item specification.
134 * Pointer to field modification information.
135 * @param[in,out] resource
136 * Pointer to the modify-header resource.
138 * Type of modification.
140 * Pointer to the error structure.
143 * 0 on success, a negative errno value otherwise and rte_errno is set.
146 flow_dv_convert_modify_action(struct rte_flow_item *item,
147 struct field_modify_info *field,
148 struct mlx5_flow_dv_modify_hdr_resource *resource,
150 struct rte_flow_error *error)
152 uint32_t i = resource->actions_num;
153 struct mlx5_modification_cmd *actions = resource->actions;
154 const uint8_t *spec = item->spec;
155 const uint8_t *mask = item->mask;
158 while (field->size) {
160 /* Generate modify command for each mask segment. */
161 memcpy(&set, &mask[field->offset], field->size);
163 if (i >= MLX5_MODIFY_NUM)
164 return rte_flow_error_set(error, EINVAL,
165 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
166 "too many items to modify");
167 actions[i].action_type = type;
168 actions[i].field = field->id;
169 actions[i].length = field->size ==
170 4 ? 0 : field->size * 8;
171 rte_memcpy(&actions[i].data[4 - field->size],
172 &spec[field->offset], field->size);
173 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
176 if (resource->actions_num != i)
177 resource->actions_num = i;
180 if (!resource->actions_num)
181 return rte_flow_error_set(error, EINVAL,
182 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
183 "invalid modification flow item");
188 * Convert modify-header set IPv4 address action to DV specification.
190 * @param[in,out] resource
191 * Pointer to the modify-header resource.
193 * Pointer to action specification.
195 * Pointer to the error structure.
198 * 0 on success, a negative errno value otherwise and rte_errno is set.
201 flow_dv_convert_action_modify_ipv4
202 (struct mlx5_flow_dv_modify_hdr_resource *resource,
203 const struct rte_flow_action *action,
204 struct rte_flow_error *error)
206 const struct rte_flow_action_set_ipv4 *conf =
207 (const struct rte_flow_action_set_ipv4 *)(action->conf);
208 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
209 struct rte_flow_item_ipv4 ipv4;
210 struct rte_flow_item_ipv4 ipv4_mask;
212 memset(&ipv4, 0, sizeof(ipv4));
213 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
214 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
215 ipv4.hdr.src_addr = conf->ipv4_addr;
216 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
218 ipv4.hdr.dst_addr = conf->ipv4_addr;
219 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
222 item.mask = &ipv4_mask;
223 return flow_dv_convert_modify_action(&item, modify_ipv4, resource,
224 MLX5_MODIFICATION_TYPE_SET, error);
228 * Convert modify-header set IPv6 address action to DV specification.
230 * @param[in,out] resource
231 * Pointer to the modify-header resource.
233 * Pointer to action specification.
235 * Pointer to the error structure.
238 * 0 on success, a negative errno value otherwise and rte_errno is set.
241 flow_dv_convert_action_modify_ipv6
242 (struct mlx5_flow_dv_modify_hdr_resource *resource,
243 const struct rte_flow_action *action,
244 struct rte_flow_error *error)
246 const struct rte_flow_action_set_ipv6 *conf =
247 (const struct rte_flow_action_set_ipv6 *)(action->conf);
248 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
249 struct rte_flow_item_ipv6 ipv6;
250 struct rte_flow_item_ipv6 ipv6_mask;
252 memset(&ipv6, 0, sizeof(ipv6));
253 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
254 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
255 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
256 sizeof(ipv6.hdr.src_addr));
257 memcpy(&ipv6_mask.hdr.src_addr,
258 &rte_flow_item_ipv6_mask.hdr.src_addr,
259 sizeof(ipv6.hdr.src_addr));
261 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
262 sizeof(ipv6.hdr.dst_addr));
263 memcpy(&ipv6_mask.hdr.dst_addr,
264 &rte_flow_item_ipv6_mask.hdr.dst_addr,
265 sizeof(ipv6.hdr.dst_addr));
268 item.mask = &ipv6_mask;
269 return flow_dv_convert_modify_action(&item, modify_ipv6, resource,
270 MLX5_MODIFICATION_TYPE_SET, error);
274 * Convert modify-header set MAC address action to DV specification.
276 * @param[in,out] resource
277 * Pointer to the modify-header resource.
279 * Pointer to action specification.
281 * Pointer to the error structure.
284 * 0 on success, a negative errno value otherwise and rte_errno is set.
287 flow_dv_convert_action_modify_mac
288 (struct mlx5_flow_dv_modify_hdr_resource *resource,
289 const struct rte_flow_action *action,
290 struct rte_flow_error *error)
292 const struct rte_flow_action_set_mac *conf =
293 (const struct rte_flow_action_set_mac *)(action->conf);
294 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
295 struct rte_flow_item_eth eth;
296 struct rte_flow_item_eth eth_mask;
298 memset(ð, 0, sizeof(eth));
299 memset(ð_mask, 0, sizeof(eth_mask));
300 if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
301 memcpy(ð.src.addr_bytes, &conf->mac_addr,
302 sizeof(eth.src.addr_bytes));
303 memcpy(ð_mask.src.addr_bytes,
304 &rte_flow_item_eth_mask.src.addr_bytes,
305 sizeof(eth_mask.src.addr_bytes));
307 memcpy(ð.dst.addr_bytes, &conf->mac_addr,
308 sizeof(eth.dst.addr_bytes));
309 memcpy(ð_mask.dst.addr_bytes,
310 &rte_flow_item_eth_mask.dst.addr_bytes,
311 sizeof(eth_mask.dst.addr_bytes));
314 item.mask = ð_mask;
315 return flow_dv_convert_modify_action(&item, modify_eth, resource,
316 MLX5_MODIFICATION_TYPE_SET, error);
320 * Convert modify-header set TP action to DV specification.
322 * @param[in,out] resource
323 * Pointer to the modify-header resource.
325 * Pointer to action specification.
327 * Pointer to rte_flow_item objects list.
329 * Pointer to flow attributes structure.
331 * Pointer to the error structure.
334 * 0 on success, a negative errno value otherwise and rte_errno is set.
337 flow_dv_convert_action_modify_tp
338 (struct mlx5_flow_dv_modify_hdr_resource *resource,
339 const struct rte_flow_action *action,
340 const struct rte_flow_item *items,
341 union flow_dv_attr *attr,
342 struct rte_flow_error *error)
344 const struct rte_flow_action_set_tp *conf =
345 (const struct rte_flow_action_set_tp *)(action->conf);
346 struct rte_flow_item item;
347 struct rte_flow_item_udp udp;
348 struct rte_flow_item_udp udp_mask;
349 struct rte_flow_item_tcp tcp;
350 struct rte_flow_item_tcp tcp_mask;
351 struct field_modify_info *field;
354 flow_dv_attr_init(items, attr);
356 memset(&udp, 0, sizeof(udp));
357 memset(&udp_mask, 0, sizeof(udp_mask));
358 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
359 udp.hdr.src_port = conf->port;
360 udp_mask.hdr.src_port =
361 rte_flow_item_udp_mask.hdr.src_port;
363 udp.hdr.dst_port = conf->port;
364 udp_mask.hdr.dst_port =
365 rte_flow_item_udp_mask.hdr.dst_port;
367 item.type = RTE_FLOW_ITEM_TYPE_UDP;
369 item.mask = &udp_mask;
373 memset(&tcp, 0, sizeof(tcp));
374 memset(&tcp_mask, 0, sizeof(tcp_mask));
375 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
376 tcp.hdr.src_port = conf->port;
377 tcp_mask.hdr.src_port =
378 rte_flow_item_tcp_mask.hdr.src_port;
380 tcp.hdr.dst_port = conf->port;
381 tcp_mask.hdr.dst_port =
382 rte_flow_item_tcp_mask.hdr.dst_port;
384 item.type = RTE_FLOW_ITEM_TYPE_TCP;
386 item.mask = &tcp_mask;
389 return flow_dv_convert_modify_action(&item, field, resource,
390 MLX5_MODIFICATION_TYPE_SET, error);
394 * Convert modify-header set TTL action to DV specification.
396 * @param[in,out] resource
397 * Pointer to the modify-header resource.
399 * Pointer to action specification.
401 * Pointer to rte_flow_item objects list.
403 * Pointer to flow attributes structure.
405 * Pointer to the error structure.
408 * 0 on success, a negative errno value otherwise and rte_errno is set.
411 flow_dv_convert_action_modify_ttl
412 (struct mlx5_flow_dv_modify_hdr_resource *resource,
413 const struct rte_flow_action *action,
414 const struct rte_flow_item *items,
415 union flow_dv_attr *attr,
416 struct rte_flow_error *error)
418 const struct rte_flow_action_set_ttl *conf =
419 (const struct rte_flow_action_set_ttl *)(action->conf);
420 struct rte_flow_item item;
421 struct rte_flow_item_ipv4 ipv4;
422 struct rte_flow_item_ipv4 ipv4_mask;
423 struct rte_flow_item_ipv6 ipv6;
424 struct rte_flow_item_ipv6 ipv6_mask;
425 struct field_modify_info *field;
428 flow_dv_attr_init(items, attr);
430 memset(&ipv4, 0, sizeof(ipv4));
431 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
432 ipv4.hdr.time_to_live = conf->ttl_value;
433 ipv4_mask.hdr.time_to_live = 0xFF;
434 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
436 item.mask = &ipv4_mask;
440 memset(&ipv6, 0, sizeof(ipv6));
441 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
442 ipv6.hdr.hop_limits = conf->ttl_value;
443 ipv6_mask.hdr.hop_limits = 0xFF;
444 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
446 item.mask = &ipv6_mask;
449 return flow_dv_convert_modify_action(&item, field, resource,
450 MLX5_MODIFICATION_TYPE_SET, error);
454 * Convert modify-header decrement TTL action to DV specification.
456 * @param[in,out] resource
457 * Pointer to the modify-header resource.
459 * Pointer to action specification.
461 * Pointer to rte_flow_item objects list.
463 * Pointer to flow attributes structure.
465 * Pointer to the error structure.
468 * 0 on success, a negative errno value otherwise and rte_errno is set.
471 flow_dv_convert_action_modify_dec_ttl
472 (struct mlx5_flow_dv_modify_hdr_resource *resource,
473 const struct rte_flow_item *items,
474 union flow_dv_attr *attr,
475 struct rte_flow_error *error)
477 struct rte_flow_item item;
478 struct rte_flow_item_ipv4 ipv4;
479 struct rte_flow_item_ipv4 ipv4_mask;
480 struct rte_flow_item_ipv6 ipv6;
481 struct rte_flow_item_ipv6 ipv6_mask;
482 struct field_modify_info *field;
485 flow_dv_attr_init(items, attr);
487 memset(&ipv4, 0, sizeof(ipv4));
488 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
489 ipv4.hdr.time_to_live = 0xFF;
490 ipv4_mask.hdr.time_to_live = 0xFF;
491 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
493 item.mask = &ipv4_mask;
497 memset(&ipv6, 0, sizeof(ipv6));
498 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
499 ipv6.hdr.hop_limits = 0xFF;
500 ipv6_mask.hdr.hop_limits = 0xFF;
501 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
503 item.mask = &ipv6_mask;
506 return flow_dv_convert_modify_action(&item, field, resource,
507 MLX5_MODIFICATION_TYPE_ADD, error);
511 * Validate META item.
514 * Pointer to the rte_eth_dev structure.
516 * Item specification.
518 * Attributes of flow that includes this item.
520 * Pointer to error structure.
523 * 0 on success, a negative errno value otherwise and rte_errno is set.
526 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
527 const struct rte_flow_item *item,
528 const struct rte_flow_attr *attr,
529 struct rte_flow_error *error)
531 const struct rte_flow_item_meta *spec = item->spec;
532 const struct rte_flow_item_meta *mask = item->mask;
533 const struct rte_flow_item_meta nic_mask = {
534 .data = RTE_BE32(UINT32_MAX)
537 uint64_t offloads = dev->data->dev_conf.txmode.offloads;
539 if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
540 return rte_flow_error_set(error, EPERM,
541 RTE_FLOW_ERROR_TYPE_ITEM,
543 "match on metadata offload "
544 "configuration is off for this port");
546 return rte_flow_error_set(error, EINVAL,
547 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
549 "data cannot be empty");
551 return rte_flow_error_set(error, EINVAL,
552 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
554 "data cannot be zero");
556 mask = &rte_flow_item_meta_mask;
557 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
558 (const uint8_t *)&nic_mask,
559 sizeof(struct rte_flow_item_meta),
564 return rte_flow_error_set(error, ENOTSUP,
565 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
567 "pattern not supported for ingress");
572 * Validate the L2 encap action.
574 * @param[in] action_flags
575 * Holds the actions detected until now.
577 * Pointer to the encap action.
579 * Pointer to flow attributes
581 * Pointer to error structure.
584 * 0 on success, a negative errno value otherwise and rte_errno is set.
587 flow_dv_validate_action_l2_encap(uint64_t action_flags,
588 const struct rte_flow_action *action,
589 const struct rte_flow_attr *attr,
590 struct rte_flow_error *error)
593 return rte_flow_error_set(error, EINVAL,
594 RTE_FLOW_ERROR_TYPE_ACTION, action,
595 "configuration cannot be null");
596 if (action_flags & MLX5_FLOW_ACTION_DROP)
597 return rte_flow_error_set(error, EINVAL,
598 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
599 "can't drop and encap in same flow");
600 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
601 return rte_flow_error_set(error, EINVAL,
602 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
603 "can only have a single encap or"
604 " decap action in a flow");
606 return rte_flow_error_set(error, ENOTSUP,
607 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
609 "encap action not supported for "
615 * Validate the L2 decap action.
617 * @param[in] action_flags
618 * Holds the actions detected until now.
620 * Pointer to flow attributes
622 * Pointer to error structure.
625 * 0 on success, a negative errno value otherwise and rte_errno is set.
628 flow_dv_validate_action_l2_decap(uint64_t action_flags,
629 const struct rte_flow_attr *attr,
630 struct rte_flow_error *error)
632 if (action_flags & MLX5_FLOW_ACTION_DROP)
633 return rte_flow_error_set(error, EINVAL,
634 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
635 "can't drop and decap in same flow");
636 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
637 return rte_flow_error_set(error, EINVAL,
638 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
639 "can only have a single encap or"
640 " decap action in a flow");
641 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
642 return rte_flow_error_set(error, EINVAL,
643 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
644 "can't have decap action after"
647 return rte_flow_error_set(error, ENOTSUP,
648 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
650 "decap action not supported for "
656 * Validate the raw encap action.
658 * @param[in] action_flags
659 * Holds the actions detected until now.
661 * Pointer to the encap action.
663 * Pointer to flow attributes
665 * Pointer to error structure.
668 * 0 on success, a negative errno value otherwise and rte_errno is set.
671 flow_dv_validate_action_raw_encap(uint64_t action_flags,
672 const struct rte_flow_action *action,
673 const struct rte_flow_attr *attr,
674 struct rte_flow_error *error)
677 return rte_flow_error_set(error, EINVAL,
678 RTE_FLOW_ERROR_TYPE_ACTION, action,
679 "configuration cannot be null");
680 if (action_flags & MLX5_FLOW_ACTION_DROP)
681 return rte_flow_error_set(error, EINVAL,
682 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
683 "can't drop and encap in same flow");
684 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
685 return rte_flow_error_set(error, EINVAL,
686 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
687 "can only have a single encap"
688 " action in a flow");
689 /* encap without preceding decap is not supported for ingress */
690 if (attr->ingress && !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
691 return rte_flow_error_set(error, ENOTSUP,
692 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
694 "encap action not supported for "
700 * Validate the raw decap action.
702 * @param[in] action_flags
703 * Holds the actions detected until now.
705 * Pointer to the encap action.
707 * Pointer to flow attributes
709 * Pointer to error structure.
712 * 0 on success, a negative errno value otherwise and rte_errno is set.
715 flow_dv_validate_action_raw_decap(uint64_t action_flags,
716 const struct rte_flow_action *action,
717 const struct rte_flow_attr *attr,
718 struct rte_flow_error *error)
720 if (action_flags & MLX5_FLOW_ACTION_DROP)
721 return rte_flow_error_set(error, EINVAL,
722 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
723 "can't drop and decap in same flow");
724 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
725 return rte_flow_error_set(error, EINVAL,
726 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
727 "can't have encap action before"
729 if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
730 return rte_flow_error_set(error, EINVAL,
731 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
732 "can only have a single decap"
733 " action in a flow");
734 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
735 return rte_flow_error_set(error, EINVAL,
736 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
737 "can't have decap action after"
739 /* decap action is valid on egress only if it is followed by encap */
741 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
742 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
745 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
746 return rte_flow_error_set
748 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
749 NULL, "decap action not supported"
756 * Find existing encap/decap resource or create and register a new one.
758 * @param dev[in, out]
759 * Pointer to rte_eth_dev structure.
760 * @param[in, out] resource
761 * Pointer to encap/decap resource.
762 * @parm[in, out] dev_flow
763 * Pointer to the dev_flow.
765 * pointer to error structure.
768 * 0 on success otherwise -errno and errno is set.
771 flow_dv_encap_decap_resource_register
772 (struct rte_eth_dev *dev,
773 struct mlx5_flow_dv_encap_decap_resource *resource,
774 struct mlx5_flow *dev_flow,
775 struct rte_flow_error *error)
777 struct priv *priv = dev->data->dev_private;
778 struct mlx5_flow_dv_encap_decap_resource *cache_resource;
780 /* Lookup a matching resource from cache. */
781 LIST_FOREACH(cache_resource, &priv->encaps_decaps, next) {
782 if (resource->reformat_type == cache_resource->reformat_type &&
783 resource->ft_type == cache_resource->ft_type &&
784 resource->size == cache_resource->size &&
785 !memcmp((const void *)resource->buf,
786 (const void *)cache_resource->buf,
788 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
789 (void *)cache_resource,
790 rte_atomic32_read(&cache_resource->refcnt));
791 rte_atomic32_inc(&cache_resource->refcnt);
792 dev_flow->dv.encap_decap = cache_resource;
796 /* Register new encap/decap resource. */
797 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
799 return rte_flow_error_set(error, ENOMEM,
800 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
801 "cannot allocate resource memory");
802 *cache_resource = *resource;
803 cache_resource->verbs_action =
804 mlx5_glue->dv_create_flow_action_packet_reformat
805 (priv->ctx, cache_resource->size,
806 (cache_resource->size ? cache_resource->buf : NULL),
807 cache_resource->reformat_type,
808 cache_resource->ft_type);
809 if (!cache_resource->verbs_action) {
810 rte_free(cache_resource);
811 return rte_flow_error_set(error, ENOMEM,
812 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
813 NULL, "cannot create action");
815 rte_atomic32_init(&cache_resource->refcnt);
816 rte_atomic32_inc(&cache_resource->refcnt);
817 LIST_INSERT_HEAD(&priv->encaps_decaps, cache_resource, next);
818 dev_flow->dv.encap_decap = cache_resource;
819 DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
820 (void *)cache_resource,
821 rte_atomic32_read(&cache_resource->refcnt));
826 * Get the size of specific rte_flow_item_type
828 * @param[in] item_type
829 * Tested rte_flow_item_type.
832 * sizeof struct item_type, 0 if void or irrelevant.
835 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
840 case RTE_FLOW_ITEM_TYPE_ETH:
841 retval = sizeof(struct rte_flow_item_eth);
843 case RTE_FLOW_ITEM_TYPE_VLAN:
844 retval = sizeof(struct rte_flow_item_vlan);
846 case RTE_FLOW_ITEM_TYPE_IPV4:
847 retval = sizeof(struct rte_flow_item_ipv4);
849 case RTE_FLOW_ITEM_TYPE_IPV6:
850 retval = sizeof(struct rte_flow_item_ipv6);
852 case RTE_FLOW_ITEM_TYPE_UDP:
853 retval = sizeof(struct rte_flow_item_udp);
855 case RTE_FLOW_ITEM_TYPE_TCP:
856 retval = sizeof(struct rte_flow_item_tcp);
858 case RTE_FLOW_ITEM_TYPE_VXLAN:
859 retval = sizeof(struct rte_flow_item_vxlan);
861 case RTE_FLOW_ITEM_TYPE_GRE:
862 retval = sizeof(struct rte_flow_item_gre);
864 case RTE_FLOW_ITEM_TYPE_NVGRE:
865 retval = sizeof(struct rte_flow_item_nvgre);
867 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
868 retval = sizeof(struct rte_flow_item_vxlan_gpe);
870 case RTE_FLOW_ITEM_TYPE_MPLS:
871 retval = sizeof(struct rte_flow_item_mpls);
873 case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
881 #define MLX5_ENCAP_IPV4_VERSION 0x40
882 #define MLX5_ENCAP_IPV4_IHL_MIN 0x05
883 #define MLX5_ENCAP_IPV4_TTL_DEF 0x40
884 #define MLX5_ENCAP_IPV6_VTC_FLOW 0x60000000
885 #define MLX5_ENCAP_IPV6_HOP_LIMIT 0xff
886 #define MLX5_ENCAP_VXLAN_FLAGS 0x08000000
887 #define MLX5_ENCAP_VXLAN_GPE_FLAGS 0x04
890 * Convert the encap action data from list of rte_flow_item to raw buffer
893 * Pointer to rte_flow_item objects list.
895 * Pointer to the output buffer.
897 * Pointer to the output buffer size.
899 * Pointer to the error structure.
902 * 0 on success, a negative errno value otherwise and rte_errno is set.
905 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
906 size_t *size, struct rte_flow_error *error)
908 struct ether_hdr *eth = NULL;
909 struct vlan_hdr *vlan = NULL;
910 struct ipv4_hdr *ipv4 = NULL;
911 struct ipv6_hdr *ipv6 = NULL;
912 struct udp_hdr *udp = NULL;
913 struct vxlan_hdr *vxlan = NULL;
914 struct vxlan_gpe_hdr *vxlan_gpe = NULL;
915 struct gre_hdr *gre = NULL;
917 size_t temp_size = 0;
920 return rte_flow_error_set(error, EINVAL,
921 RTE_FLOW_ERROR_TYPE_ACTION,
922 NULL, "invalid empty data");
923 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
924 len = flow_dv_get_item_len(items->type);
925 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
926 return rte_flow_error_set(error, EINVAL,
927 RTE_FLOW_ERROR_TYPE_ACTION,
929 "items total size is too big"
930 " for encap action");
931 rte_memcpy((void *)&buf[temp_size], items->spec, len);
932 switch (items->type) {
933 case RTE_FLOW_ITEM_TYPE_ETH:
934 eth = (struct ether_hdr *)&buf[temp_size];
936 case RTE_FLOW_ITEM_TYPE_VLAN:
937 vlan = (struct vlan_hdr *)&buf[temp_size];
939 return rte_flow_error_set(error, EINVAL,
940 RTE_FLOW_ERROR_TYPE_ACTION,
942 "eth header not found");
943 if (!eth->ether_type)
944 eth->ether_type = RTE_BE16(ETHER_TYPE_VLAN);
946 case RTE_FLOW_ITEM_TYPE_IPV4:
947 ipv4 = (struct ipv4_hdr *)&buf[temp_size];
949 return rte_flow_error_set(error, EINVAL,
950 RTE_FLOW_ERROR_TYPE_ACTION,
952 "neither eth nor vlan"
954 if (vlan && !vlan->eth_proto)
955 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv4);
956 else if (eth && !eth->ether_type)
957 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv4);
958 if (!ipv4->version_ihl)
959 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
960 MLX5_ENCAP_IPV4_IHL_MIN;
961 if (!ipv4->time_to_live)
962 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
964 case RTE_FLOW_ITEM_TYPE_IPV6:
965 ipv6 = (struct ipv6_hdr *)&buf[temp_size];
967 return rte_flow_error_set(error, EINVAL,
968 RTE_FLOW_ERROR_TYPE_ACTION,
970 "neither eth nor vlan"
972 if (vlan && !vlan->eth_proto)
973 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv6);
974 else if (eth && !eth->ether_type)
975 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv6);
978 RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
979 if (!ipv6->hop_limits)
980 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
982 case RTE_FLOW_ITEM_TYPE_UDP:
983 udp = (struct udp_hdr *)&buf[temp_size];
985 return rte_flow_error_set(error, EINVAL,
986 RTE_FLOW_ERROR_TYPE_ACTION,
988 "ip header not found");
989 if (ipv4 && !ipv4->next_proto_id)
990 ipv4->next_proto_id = IPPROTO_UDP;
991 else if (ipv6 && !ipv6->proto)
992 ipv6->proto = IPPROTO_UDP;
994 case RTE_FLOW_ITEM_TYPE_VXLAN:
995 vxlan = (struct vxlan_hdr *)&buf[temp_size];
997 return rte_flow_error_set(error, EINVAL,
998 RTE_FLOW_ERROR_TYPE_ACTION,
1000 "udp header not found");
1002 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
1003 if (!vxlan->vx_flags)
1005 RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
1007 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1008 vxlan_gpe = (struct vxlan_gpe_hdr *)&buf[temp_size];
1010 return rte_flow_error_set(error, EINVAL,
1011 RTE_FLOW_ERROR_TYPE_ACTION,
1012 (void *)items->type,
1013 "udp header not found");
1014 if (!vxlan_gpe->proto)
1015 return rte_flow_error_set(error, EINVAL,
1016 RTE_FLOW_ERROR_TYPE_ACTION,
1017 (void *)items->type,
1018 "next protocol not found");
1021 RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
1022 if (!vxlan_gpe->vx_flags)
1023 vxlan_gpe->vx_flags =
1024 MLX5_ENCAP_VXLAN_GPE_FLAGS;
1026 case RTE_FLOW_ITEM_TYPE_GRE:
1027 case RTE_FLOW_ITEM_TYPE_NVGRE:
1028 gre = (struct gre_hdr *)&buf[temp_size];
1030 return rte_flow_error_set(error, EINVAL,
1031 RTE_FLOW_ERROR_TYPE_ACTION,
1032 (void *)items->type,
1033 "next protocol not found");
1035 return rte_flow_error_set(error, EINVAL,
1036 RTE_FLOW_ERROR_TYPE_ACTION,
1037 (void *)items->type,
1038 "ip header not found");
1039 if (ipv4 && !ipv4->next_proto_id)
1040 ipv4->next_proto_id = IPPROTO_GRE;
1041 else if (ipv6 && !ipv6->proto)
1042 ipv6->proto = IPPROTO_GRE;
1044 case RTE_FLOW_ITEM_TYPE_VOID:
1047 return rte_flow_error_set(error, EINVAL,
1048 RTE_FLOW_ERROR_TYPE_ACTION,
1049 (void *)items->type,
1050 "unsupported item type");
1060 * Convert L2 encap action to DV specification.
1063 * Pointer to rte_eth_dev structure.
1065 * Pointer to action structure.
1066 * @param[in, out] dev_flow
1067 * Pointer to the mlx5_flow.
1069 * Pointer to the error structure.
1072 * 0 on success, a negative errno value otherwise and rte_errno is set.
1075 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
1076 const struct rte_flow_action *action,
1077 struct mlx5_flow *dev_flow,
1078 struct rte_flow_error *error)
1080 const struct rte_flow_item *encap_data;
1081 const struct rte_flow_action_raw_encap *raw_encap_data;
1082 struct mlx5_flow_dv_encap_decap_resource res = {
1084 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
1085 .ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
1088 if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
1090 (const struct rte_flow_action_raw_encap *)action->conf;
1091 res.size = raw_encap_data->size;
1092 memcpy(res.buf, raw_encap_data->data, res.size);
1094 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
1096 ((const struct rte_flow_action_vxlan_encap *)
1097 action->conf)->definition;
1100 ((const struct rte_flow_action_nvgre_encap *)
1101 action->conf)->definition;
1102 if (flow_dv_convert_encap_data(encap_data, res.buf,
1106 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1107 return rte_flow_error_set(error, EINVAL,
1108 RTE_FLOW_ERROR_TYPE_ACTION,
1109 NULL, "can't create L2 encap action");
1114 * Convert L2 decap action to DV specification.
1117 * Pointer to rte_eth_dev structure.
1118 * @param[in, out] dev_flow
1119 * Pointer to the mlx5_flow.
1121 * Pointer to the error structure.
1124 * 0 on success, a negative errno value otherwise and rte_errno is set.
1127 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
1128 struct mlx5_flow *dev_flow,
1129 struct rte_flow_error *error)
1131 struct mlx5_flow_dv_encap_decap_resource res = {
1134 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
1135 .ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
1138 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1139 return rte_flow_error_set(error, EINVAL,
1140 RTE_FLOW_ERROR_TYPE_ACTION,
1141 NULL, "can't create L2 decap action");
1146 * Convert raw decap/encap (L3 tunnel) action to DV specification.
1149 * Pointer to rte_eth_dev structure.
1151 * Pointer to action structure.
1152 * @param[in, out] dev_flow
1153 * Pointer to the mlx5_flow.
1155 * Pointer to the flow attributes.
1157 * Pointer to the error structure.
1160 * 0 on success, a negative errno value otherwise and rte_errno is set.
1163 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
1164 const struct rte_flow_action *action,
1165 struct mlx5_flow *dev_flow,
1166 const struct rte_flow_attr *attr,
1167 struct rte_flow_error *error)
1169 const struct rte_flow_action_raw_encap *encap_data;
1170 struct mlx5_flow_dv_encap_decap_resource res;
1172 encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
1173 res.size = encap_data->size;
1174 memcpy(res.buf, encap_data->data, res.size);
1175 res.reformat_type = attr->egress ?
1176 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
1177 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
1178 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
1179 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
1180 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1181 return rte_flow_error_set(error, EINVAL,
1182 RTE_FLOW_ERROR_TYPE_ACTION,
1183 NULL, "can't create encap action");
1188 * Validate the modify-header actions.
1190 * @param[in] action_flags
1191 * Holds the actions detected until now.
1193 * Pointer to the modify action.
1195 * Pointer to error structure.
1198 * 0 on success, a negative errno value otherwise and rte_errno is set.
1201 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
1202 const struct rte_flow_action *action,
1203 struct rte_flow_error *error)
1205 if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
1206 return rte_flow_error_set(error, EINVAL,
1207 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1208 NULL, "action configuration not set");
1209 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1210 return rte_flow_error_set(error, EINVAL,
1211 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1212 "can't have encap action before"
1218 * Validate the modify-header MAC address actions.
1220 * @param[in] action_flags
1221 * Holds the actions detected until now.
1223 * Pointer to the modify action.
1224 * @param[in] item_flags
1225 * Holds the items detected.
1227 * Pointer to error structure.
1230 * 0 on success, a negative errno value otherwise and rte_errno is set.
1233 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
1234 const struct rte_flow_action *action,
1235 const uint64_t item_flags,
1236 struct rte_flow_error *error)
1240 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1242 if (!(item_flags & MLX5_FLOW_LAYER_L2))
1243 return rte_flow_error_set(error, EINVAL,
1244 RTE_FLOW_ERROR_TYPE_ACTION,
1246 "no L2 item in pattern");
1252 * Validate the modify-header IPv4 address actions.
1254 * @param[in] action_flags
1255 * Holds the actions detected until now.
1257 * Pointer to the modify action.
1258 * @param[in] item_flags
1259 * Holds the items detected.
1261 * Pointer to error structure.
1264 * 0 on success, a negative errno value otherwise and rte_errno is set.
1267 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
1268 const struct rte_flow_action *action,
1269 const uint64_t item_flags,
1270 struct rte_flow_error *error)
1274 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1276 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
1277 return rte_flow_error_set(error, EINVAL,
1278 RTE_FLOW_ERROR_TYPE_ACTION,
1280 "no ipv4 item in pattern");
1286 * Validate the modify-header IPv6 address actions.
1288 * @param[in] action_flags
1289 * Holds the actions detected until now.
1291 * Pointer to the modify action.
1292 * @param[in] item_flags
1293 * Holds the items detected.
1295 * Pointer to error structure.
1298 * 0 on success, a negative errno value otherwise and rte_errno is set.
1301 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
1302 const struct rte_flow_action *action,
1303 const uint64_t item_flags,
1304 struct rte_flow_error *error)
1308 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1310 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
1311 return rte_flow_error_set(error, EINVAL,
1312 RTE_FLOW_ERROR_TYPE_ACTION,
1314 "no ipv6 item in pattern");
1320 * Validate the modify-header TP actions.
1322 * @param[in] action_flags
1323 * Holds the actions detected until now.
1325 * Pointer to the modify action.
1326 * @param[in] item_flags
1327 * Holds the items detected.
1329 * Pointer to error structure.
1332 * 0 on success, a negative errno value otherwise and rte_errno is set.
1335 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
1336 const struct rte_flow_action *action,
1337 const uint64_t item_flags,
1338 struct rte_flow_error *error)
1342 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1344 if (!(item_flags & MLX5_FLOW_LAYER_L4))
1345 return rte_flow_error_set(error, EINVAL,
1346 RTE_FLOW_ERROR_TYPE_ACTION,
1347 NULL, "no transport layer "
1354 * Validate the modify-header TTL actions.
1356 * @param[in] action_flags
1357 * Holds the actions detected until now.
1359 * Pointer to the modify action.
1360 * @param[in] item_flags
1361 * Holds the items detected.
1363 * Pointer to error structure.
1366 * 0 on success, a negative errno value otherwise and rte_errno is set.
1369 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
1370 const struct rte_flow_action *action,
1371 const uint64_t item_flags,
1372 struct rte_flow_error *error)
1376 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1378 if (!(item_flags & MLX5_FLOW_LAYER_L3))
1379 return rte_flow_error_set(error, EINVAL,
1380 RTE_FLOW_ERROR_TYPE_ACTION,
1382 "no IP protocol in pattern");
1388 * Find existing modify-header resource or create and register a new one.
1390 * @param dev[in, out]
1391 * Pointer to rte_eth_dev structure.
1392 * @param[in, out] resource
1393 * Pointer to modify-header resource.
1394 * @parm[in, out] dev_flow
1395 * Pointer to the dev_flow.
1397 * pointer to error structure.
1400 * 0 on success otherwise -errno and errno is set.
1403 flow_dv_modify_hdr_resource_register
1404 (struct rte_eth_dev *dev,
1405 struct mlx5_flow_dv_modify_hdr_resource *resource,
1406 struct mlx5_flow *dev_flow,
1407 struct rte_flow_error *error)
1409 struct priv *priv = dev->data->dev_private;
1410 struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
1412 /* Lookup a matching resource from cache. */
1413 LIST_FOREACH(cache_resource, &priv->modify_cmds, next) {
1414 if (resource->ft_type == cache_resource->ft_type &&
1415 resource->actions_num == cache_resource->actions_num &&
1416 !memcmp((const void *)resource->actions,
1417 (const void *)cache_resource->actions,
1418 (resource->actions_num *
1419 sizeof(resource->actions[0])))) {
1420 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
1421 (void *)cache_resource,
1422 rte_atomic32_read(&cache_resource->refcnt));
1423 rte_atomic32_inc(&cache_resource->refcnt);
1424 dev_flow->dv.modify_hdr = cache_resource;
1428 /* Register new modify-header resource. */
1429 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1430 if (!cache_resource)
1431 return rte_flow_error_set(error, ENOMEM,
1432 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1433 "cannot allocate resource memory");
1434 *cache_resource = *resource;
1435 cache_resource->verbs_action =
1436 mlx5_glue->dv_create_flow_action_modify_header
1438 cache_resource->actions_num *
1439 sizeof(cache_resource->actions[0]),
1440 (uint64_t *)cache_resource->actions,
1441 cache_resource->ft_type);
1442 if (!cache_resource->verbs_action) {
1443 rte_free(cache_resource);
1444 return rte_flow_error_set(error, ENOMEM,
1445 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1446 NULL, "cannot create action");
1448 rte_atomic32_init(&cache_resource->refcnt);
1449 rte_atomic32_inc(&cache_resource->refcnt);
1450 LIST_INSERT_HEAD(&priv->modify_cmds, cache_resource, next);
1451 dev_flow->dv.modify_hdr = cache_resource;
1452 DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
1453 (void *)cache_resource,
1454 rte_atomic32_read(&cache_resource->refcnt));
1459 * Verify the @p attributes will be correctly understood by the NIC and store
1460 * them in the @p flow if everything is correct.
1463 * Pointer to dev struct.
1464 * @param[in] attributes
1465 * Pointer to flow attributes
1467 * Pointer to error structure.
1470 * 0 on success, a negative errno value otherwise and rte_errno is set.
1473 flow_dv_validate_attributes(struct rte_eth_dev *dev,
1474 const struct rte_flow_attr *attributes,
1475 struct rte_flow_error *error)
1477 struct priv *priv = dev->data->dev_private;
1478 uint32_t priority_max = priv->config.flow_prio - 1;
1480 if (attributes->group)
1481 return rte_flow_error_set(error, ENOTSUP,
1482 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1484 "groups is not supported");
1485 if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
1486 attributes->priority >= priority_max)
1487 return rte_flow_error_set(error, ENOTSUP,
1488 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1490 "priority out of range");
1491 if (attributes->transfer)
1492 return rte_flow_error_set(error, ENOTSUP,
1493 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1495 "transfer is not supported");
1496 if (!(attributes->egress ^ attributes->ingress))
1497 return rte_flow_error_set(error, ENOTSUP,
1498 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1499 "must specify exactly one of "
1500 "ingress or egress");
1505 * Internal validation function. For validating both actions and items.
1508 * Pointer to the rte_eth_dev structure.
1510 * Pointer to the flow attributes.
1512 * Pointer to the list of items.
1513 * @param[in] actions
1514 * Pointer to the list of actions.
1516 * Pointer to the error structure.
1519 * 0 on success, a negative errno value otherwise and rte_ernno is set.
1522 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1523 const struct rte_flow_item items[],
1524 const struct rte_flow_action actions[],
1525 struct rte_flow_error *error)
1528 uint64_t action_flags = 0;
1529 uint64_t item_flags = 0;
1530 uint64_t last_item = 0;
1532 uint8_t next_protocol = 0xff;
1537 ret = flow_dv_validate_attributes(dev, attr, error);
1540 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1541 tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1542 switch (items->type) {
1543 case RTE_FLOW_ITEM_TYPE_VOID:
1545 case RTE_FLOW_ITEM_TYPE_ETH:
1546 ret = mlx5_flow_validate_item_eth(items, item_flags,
1550 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1551 MLX5_FLOW_LAYER_OUTER_L2;
1553 case RTE_FLOW_ITEM_TYPE_VLAN:
1554 ret = mlx5_flow_validate_item_vlan(items, item_flags,
1558 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1559 MLX5_FLOW_LAYER_OUTER_VLAN;
1561 case RTE_FLOW_ITEM_TYPE_IPV4:
1562 ret = mlx5_flow_validate_item_ipv4(items, item_flags,
1566 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1567 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1568 if (items->mask != NULL &&
1569 ((const struct rte_flow_item_ipv4 *)
1570 items->mask)->hdr.next_proto_id) {
1572 ((const struct rte_flow_item_ipv4 *)
1573 (items->spec))->hdr.next_proto_id;
1575 ((const struct rte_flow_item_ipv4 *)
1576 (items->mask))->hdr.next_proto_id;
1578 /* Reset for inner layer. */
1579 next_protocol = 0xff;
1582 case RTE_FLOW_ITEM_TYPE_IPV6:
1583 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
1587 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1588 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1589 if (items->mask != NULL &&
1590 ((const struct rte_flow_item_ipv6 *)
1591 items->mask)->hdr.proto) {
1593 ((const struct rte_flow_item_ipv6 *)
1594 items->spec)->hdr.proto;
1596 ((const struct rte_flow_item_ipv6 *)
1597 items->mask)->hdr.proto;
1599 /* Reset for inner layer. */
1600 next_protocol = 0xff;
1603 case RTE_FLOW_ITEM_TYPE_TCP:
1604 ret = mlx5_flow_validate_item_tcp
1607 &rte_flow_item_tcp_mask,
1611 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1612 MLX5_FLOW_LAYER_OUTER_L4_TCP;
1614 case RTE_FLOW_ITEM_TYPE_UDP:
1615 ret = mlx5_flow_validate_item_udp(items, item_flags,
1620 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1621 MLX5_FLOW_LAYER_OUTER_L4_UDP;
1623 case RTE_FLOW_ITEM_TYPE_GRE:
1624 case RTE_FLOW_ITEM_TYPE_NVGRE:
1625 ret = mlx5_flow_validate_item_gre(items, item_flags,
1626 next_protocol, error);
1629 last_item = MLX5_FLOW_LAYER_GRE;
1631 case RTE_FLOW_ITEM_TYPE_VXLAN:
1632 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
1636 last_item = MLX5_FLOW_LAYER_VXLAN;
1638 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1639 ret = mlx5_flow_validate_item_vxlan_gpe(items,
1644 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
1646 case RTE_FLOW_ITEM_TYPE_MPLS:
1647 ret = mlx5_flow_validate_item_mpls(dev, items,
1652 last_item = MLX5_FLOW_LAYER_MPLS;
1654 case RTE_FLOW_ITEM_TYPE_META:
1655 ret = flow_dv_validate_item_meta(dev, items, attr,
1659 last_item = MLX5_FLOW_ITEM_METADATA;
1662 return rte_flow_error_set(error, ENOTSUP,
1663 RTE_FLOW_ERROR_TYPE_ITEM,
1664 NULL, "item not supported");
1666 item_flags |= last_item;
1668 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1669 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
1670 return rte_flow_error_set(error, ENOTSUP,
1671 RTE_FLOW_ERROR_TYPE_ACTION,
1672 actions, "too many actions");
1673 switch (actions->type) {
1674 case RTE_FLOW_ACTION_TYPE_VOID:
1676 case RTE_FLOW_ACTION_TYPE_FLAG:
1677 ret = mlx5_flow_validate_action_flag(action_flags,
1681 action_flags |= MLX5_FLOW_ACTION_FLAG;
1684 case RTE_FLOW_ACTION_TYPE_MARK:
1685 ret = mlx5_flow_validate_action_mark(actions,
1690 action_flags |= MLX5_FLOW_ACTION_MARK;
1693 case RTE_FLOW_ACTION_TYPE_DROP:
1694 ret = mlx5_flow_validate_action_drop(action_flags,
1698 action_flags |= MLX5_FLOW_ACTION_DROP;
1701 case RTE_FLOW_ACTION_TYPE_QUEUE:
1702 ret = mlx5_flow_validate_action_queue(actions,
1707 action_flags |= MLX5_FLOW_ACTION_QUEUE;
1710 case RTE_FLOW_ACTION_TYPE_RSS:
1711 ret = mlx5_flow_validate_action_rss(actions,
1716 action_flags |= MLX5_FLOW_ACTION_RSS;
1719 case RTE_FLOW_ACTION_TYPE_COUNT:
1720 ret = mlx5_flow_validate_action_count(dev, attr, error);
1723 action_flags |= MLX5_FLOW_ACTION_COUNT;
1726 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
1727 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
1728 ret = flow_dv_validate_action_l2_encap(action_flags,
1733 action_flags |= actions->type ==
1734 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
1735 MLX5_FLOW_ACTION_VXLAN_ENCAP :
1736 MLX5_FLOW_ACTION_NVGRE_ENCAP;
1739 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
1740 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
1741 ret = flow_dv_validate_action_l2_decap(action_flags,
1745 action_flags |= actions->type ==
1746 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
1747 MLX5_FLOW_ACTION_VXLAN_DECAP :
1748 MLX5_FLOW_ACTION_NVGRE_DECAP;
1751 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
1752 ret = flow_dv_validate_action_raw_encap(action_flags,
1757 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
1760 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
1761 ret = flow_dv_validate_action_raw_decap(action_flags,
1766 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
1769 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
1770 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
1771 ret = flow_dv_validate_action_modify_mac(action_flags,
1777 /* Count all modify-header actions as one action. */
1778 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
1780 action_flags |= actions->type ==
1781 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
1782 MLX5_FLOW_ACTION_SET_MAC_SRC :
1783 MLX5_FLOW_ACTION_SET_MAC_DST;
1786 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
1787 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
1788 ret = flow_dv_validate_action_modify_ipv4(action_flags,
1794 /* Count all modify-header actions as one action. */
1795 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
1797 action_flags |= actions->type ==
1798 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
1799 MLX5_FLOW_ACTION_SET_IPV4_SRC :
1800 MLX5_FLOW_ACTION_SET_IPV4_DST;
1802 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
1803 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
1804 ret = flow_dv_validate_action_modify_ipv6(action_flags,
1810 /* Count all modify-header actions as one action. */
1811 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
1813 action_flags |= actions->type ==
1814 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
1815 MLX5_FLOW_ACTION_SET_IPV6_SRC :
1816 MLX5_FLOW_ACTION_SET_IPV6_DST;
1818 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
1819 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
1820 ret = flow_dv_validate_action_modify_tp(action_flags,
1826 /* Count all modify-header actions as one action. */
1827 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
1829 action_flags |= actions->type ==
1830 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
1831 MLX5_FLOW_ACTION_SET_TP_SRC :
1832 MLX5_FLOW_ACTION_SET_TP_DST;
1834 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
1835 case RTE_FLOW_ACTION_TYPE_SET_TTL:
1836 ret = flow_dv_validate_action_modify_ttl(action_flags,
1842 /* Count all modify-header actions as one action. */
1843 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
1845 action_flags |= actions->type ==
1846 RTE_FLOW_ACTION_TYPE_SET_TTL ?
1847 MLX5_FLOW_ACTION_SET_TTL :
1848 MLX5_FLOW_ACTION_DEC_TTL;
1851 return rte_flow_error_set(error, ENOTSUP,
1852 RTE_FLOW_ERROR_TYPE_ACTION,
1854 "action not supported");
1857 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
1858 return rte_flow_error_set(error, EINVAL,
1859 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1860 "no fate action is found");
1865 * Internal preparation function. Allocates the DV flow size,
1866 * this size is constant.
1869 * Pointer to the flow attributes.
1871 * Pointer to the list of items.
1872 * @param[in] actions
1873 * Pointer to the list of actions.
1875 * Pointer to the error structure.
1878 * Pointer to mlx5_flow object on success,
1879 * otherwise NULL and rte_ernno is set.
1881 static struct mlx5_flow *
1882 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
1883 const struct rte_flow_item items[] __rte_unused,
1884 const struct rte_flow_action actions[] __rte_unused,
1885 struct rte_flow_error *error)
1887 uint32_t size = sizeof(struct mlx5_flow);
1888 struct mlx5_flow *flow;
1890 flow = rte_calloc(__func__, 1, size, 0);
1892 rte_flow_error_set(error, ENOMEM,
1893 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1894 "not enough memory to create flow");
1897 flow->dv.value.size = MLX5_ST_SZ_DB(fte_match_param);
1903 * Sanity check for match mask and value. Similar to check_valid_spec() in
1904 * kernel driver. If unmasked bit is present in value, it returns failure.
1907 * pointer to match mask buffer.
1908 * @param match_value
1909 * pointer to match value buffer.
1912 * 0 if valid, -EINVAL otherwise.
1915 flow_dv_check_valid_spec(void *match_mask, void *match_value)
1917 uint8_t *m = match_mask;
1918 uint8_t *v = match_value;
1921 for (i = 0; i < MLX5_ST_SZ_DB(fte_match_param); ++i) {
1924 "match_value differs from match_criteria"
1925 " %p[%u] != %p[%u]",
1926 match_value, i, match_mask, i);
1935 * Add Ethernet item to matcher and to the value.
1937 * @param[in, out] matcher
1939 * @param[in, out] key
1940 * Flow matcher value.
1942 * Flow pattern to translate.
1944 * Item is inner pattern.
1947 flow_dv_translate_item_eth(void *matcher, void *key,
1948 const struct rte_flow_item *item, int inner)
1950 const struct rte_flow_item_eth *eth_m = item->mask;
1951 const struct rte_flow_item_eth *eth_v = item->spec;
1952 const struct rte_flow_item_eth nic_mask = {
1953 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1954 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1955 .type = RTE_BE16(0xffff),
1967 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1969 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
1971 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1973 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1975 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
1976 ð_m->dst, sizeof(eth_m->dst));
1977 /* The value must be in the range of the mask. */
1978 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
1979 for (i = 0; i < sizeof(eth_m->dst); ++i)
1980 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
1981 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
1982 ð_m->src, sizeof(eth_m->src));
1983 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
1984 /* The value must be in the range of the mask. */
1985 for (i = 0; i < sizeof(eth_m->dst); ++i)
1986 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
1987 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
1988 rte_be_to_cpu_16(eth_m->type));
1989 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
1990 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
1994 * Add VLAN item to matcher and to the value.
1996 * @param[in, out] matcher
1998 * @param[in, out] key
1999 * Flow matcher value.
2001 * Flow pattern to translate.
2003 * Item is inner pattern.
2006 flow_dv_translate_item_vlan(void *matcher, void *key,
2007 const struct rte_flow_item *item,
2010 const struct rte_flow_item_vlan *vlan_m = item->mask;
2011 const struct rte_flow_item_vlan *vlan_v = item->spec;
2012 const struct rte_flow_item_vlan nic_mask = {
2013 .tci = RTE_BE16(0x0fff),
2014 .inner_type = RTE_BE16(0xffff),
2026 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2028 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2030 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2032 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2034 tci_m = rte_be_to_cpu_16(vlan_m->tci);
2035 tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
2036 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
2037 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
2038 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
2039 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
2040 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
2041 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
2042 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
2043 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
2047 * Add IPV4 item to matcher and to the value.
2049 * @param[in, out] matcher
2051 * @param[in, out] key
2052 * Flow matcher value.
2054 * Flow pattern to translate.
2056 * Item is inner pattern.
2059 flow_dv_translate_item_ipv4(void *matcher, void *key,
2060 const struct rte_flow_item *item,
2063 const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
2064 const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
2065 const struct rte_flow_item_ipv4 nic_mask = {
2067 .src_addr = RTE_BE32(0xffffffff),
2068 .dst_addr = RTE_BE32(0xffffffff),
2069 .type_of_service = 0xff,
2070 .next_proto_id = 0xff,
2080 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2082 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2084 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2086 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2088 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2089 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
2094 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2095 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2096 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2097 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2098 *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
2099 *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
2100 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2101 src_ipv4_src_ipv6.ipv4_layout.ipv4);
2102 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2103 src_ipv4_src_ipv6.ipv4_layout.ipv4);
2104 *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
2105 *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
2106 tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
2107 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
2108 ipv4_m->hdr.type_of_service);
2109 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
2110 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
2111 ipv4_m->hdr.type_of_service >> 2);
2112 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
2113 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2114 ipv4_m->hdr.next_proto_id);
2115 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2116 ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
2120 * Add IPV6 item to matcher and to the value.
2122 * @param[in, out] matcher
2124 * @param[in, out] key
2125 * Flow matcher value.
2127 * Flow pattern to translate.
2129 * Item is inner pattern.
2132 flow_dv_translate_item_ipv6(void *matcher, void *key,
2133 const struct rte_flow_item *item,
2136 const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
2137 const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
2138 const struct rte_flow_item_ipv6 nic_mask = {
2141 "\xff\xff\xff\xff\xff\xff\xff\xff"
2142 "\xff\xff\xff\xff\xff\xff\xff\xff",
2144 "\xff\xff\xff\xff\xff\xff\xff\xff"
2145 "\xff\xff\xff\xff\xff\xff\xff\xff",
2146 .vtc_flow = RTE_BE32(0xffffffff),
2153 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2154 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2163 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2165 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2167 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2169 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2171 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2172 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
2177 size = sizeof(ipv6_m->hdr.dst_addr);
2178 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2179 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2180 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2181 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2182 memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
2183 for (i = 0; i < size; ++i)
2184 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
2185 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2186 src_ipv4_src_ipv6.ipv6_layout.ipv6);
2187 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2188 src_ipv4_src_ipv6.ipv6_layout.ipv6);
2189 memcpy(l24_m, ipv6_m->hdr.src_addr, size);
2190 for (i = 0; i < size; ++i)
2191 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
2193 vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
2194 vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
2195 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
2196 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
2197 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
2198 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
2201 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
2203 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
2206 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
2208 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
2212 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2214 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2215 ipv6_v->hdr.proto & ipv6_m->hdr.proto);
2219 * Add TCP item to matcher and to the value.
2221 * @param[in, out] matcher
2223 * @param[in, out] key
2224 * Flow matcher value.
2226 * Flow pattern to translate.
2228 * Item is inner pattern.
2231 flow_dv_translate_item_tcp(void *matcher, void *key,
2232 const struct rte_flow_item *item,
2235 const struct rte_flow_item_tcp *tcp_m = item->mask;
2236 const struct rte_flow_item_tcp *tcp_v = item->spec;
2241 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2243 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2245 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2247 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2249 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2250 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
2254 tcp_m = &rte_flow_item_tcp_mask;
2255 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
2256 rte_be_to_cpu_16(tcp_m->hdr.src_port));
2257 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
2258 rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
2259 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
2260 rte_be_to_cpu_16(tcp_m->hdr.dst_port));
2261 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
2262 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
2266 * Add UDP item to matcher and to the value.
2268 * @param[in, out] matcher
2270 * @param[in, out] key
2271 * Flow matcher value.
2273 * Flow pattern to translate.
2275 * Item is inner pattern.
2278 flow_dv_translate_item_udp(void *matcher, void *key,
2279 const struct rte_flow_item *item,
2282 const struct rte_flow_item_udp *udp_m = item->mask;
2283 const struct rte_flow_item_udp *udp_v = item->spec;
2288 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2290 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2292 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2294 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2296 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2297 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
2301 udp_m = &rte_flow_item_udp_mask;
2302 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
2303 rte_be_to_cpu_16(udp_m->hdr.src_port));
2304 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
2305 rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
2306 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
2307 rte_be_to_cpu_16(udp_m->hdr.dst_port));
2308 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2309 rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
2313 * Add GRE item to matcher and to the value.
2315 * @param[in, out] matcher
2317 * @param[in, out] key
2318 * Flow matcher value.
2320 * Flow pattern to translate.
2322 * Item is inner pattern.
2325 flow_dv_translate_item_gre(void *matcher, void *key,
2326 const struct rte_flow_item *item,
2329 const struct rte_flow_item_gre *gre_m = item->mask;
2330 const struct rte_flow_item_gre *gre_v = item->spec;
2333 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2334 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2337 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2339 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2341 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2343 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2345 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2346 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
2350 gre_m = &rte_flow_item_gre_mask;
2351 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
2352 rte_be_to_cpu_16(gre_m->protocol));
2353 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
2354 rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
2358 * Add NVGRE item to matcher and to the value.
2360 * @param[in, out] matcher
2362 * @param[in, out] key
2363 * Flow matcher value.
2365 * Flow pattern to translate.
2367 * Item is inner pattern.
2370 flow_dv_translate_item_nvgre(void *matcher, void *key,
2371 const struct rte_flow_item *item,
2374 const struct rte_flow_item_nvgre *nvgre_m = item->mask;
2375 const struct rte_flow_item_nvgre *nvgre_v = item->spec;
2376 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2377 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2378 const char *tni_flow_id_m = (const char *)nvgre_m->tni;
2379 const char *tni_flow_id_v = (const char *)nvgre_v->tni;
2385 flow_dv_translate_item_gre(matcher, key, item, inner);
2389 nvgre_m = &rte_flow_item_nvgre_mask;
2390 size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
2391 gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
2392 gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
2393 memcpy(gre_key_m, tni_flow_id_m, size);
2394 for (i = 0; i < size; ++i)
2395 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
2399 * Add VXLAN item to matcher and to the value.
2401 * @param[in, out] matcher
2403 * @param[in, out] key
2404 * Flow matcher value.
2406 * Flow pattern to translate.
2408 * Item is inner pattern.
2411 flow_dv_translate_item_vxlan(void *matcher, void *key,
2412 const struct rte_flow_item *item,
2415 const struct rte_flow_item_vxlan *vxlan_m = item->mask;
2416 const struct rte_flow_item_vxlan *vxlan_v = item->spec;
2419 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2420 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2428 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2430 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2432 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2434 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2436 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
2437 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
2438 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
2439 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
2440 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
2445 vxlan_m = &rte_flow_item_vxlan_mask;
2446 size = sizeof(vxlan_m->vni);
2447 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
2448 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
2449 memcpy(vni_m, vxlan_m->vni, size);
2450 for (i = 0; i < size; ++i)
2451 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
2455 * Add MPLS item to matcher and to the value.
2457 * @param[in, out] matcher
2459 * @param[in, out] key
2460 * Flow matcher value.
2462 * Flow pattern to translate.
2463 * @param[in] prev_layer
2464 * The protocol layer indicated in previous item.
2466 * Item is inner pattern.
2469 flow_dv_translate_item_mpls(void *matcher, void *key,
2470 const struct rte_flow_item *item,
2471 uint64_t prev_layer,
2474 const uint32_t *in_mpls_m = item->mask;
2475 const uint32_t *in_mpls_v = item->spec;
2476 uint32_t *out_mpls_m = 0;
2477 uint32_t *out_mpls_v = 0;
2478 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2479 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2480 void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
2482 void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
2483 void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
2484 void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2486 switch (prev_layer) {
2487 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
2488 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
2489 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2490 MLX5_UDP_PORT_MPLS);
2492 case MLX5_FLOW_LAYER_GRE:
2493 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
2494 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
2498 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2499 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2506 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
2507 switch (prev_layer) {
2508 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
2510 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
2511 outer_first_mpls_over_udp);
2513 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
2514 outer_first_mpls_over_udp);
2516 case MLX5_FLOW_LAYER_GRE:
2518 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
2519 outer_first_mpls_over_gre);
2521 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
2522 outer_first_mpls_over_gre);
2525 /* Inner MPLS not over GRE is not supported. */
2528 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
2532 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
2538 if (out_mpls_m && out_mpls_v) {
2539 *out_mpls_m = *in_mpls_m;
2540 *out_mpls_v = *in_mpls_v & *in_mpls_m;
2545 * Add META item to matcher
2547 * @param[in, out] matcher
2549 * @param[in, out] key
2550 * Flow matcher value.
2552 * Flow pattern to translate.
2554 * Item is inner pattern.
2557 flow_dv_translate_item_meta(void *matcher, void *key,
2558 const struct rte_flow_item *item)
2560 const struct rte_flow_item_meta *meta_m;
2561 const struct rte_flow_item_meta *meta_v;
2563 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
2565 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
2567 meta_m = (const void *)item->mask;
2569 meta_m = &rte_flow_item_meta_mask;
2570 meta_v = (const void *)item->spec;
2572 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
2573 rte_be_to_cpu_32(meta_m->data));
2574 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
2575 rte_be_to_cpu_32(meta_v->data & meta_m->data));
2579 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
2581 #define HEADER_IS_ZERO(match_criteria, headers) \
2582 !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
2583 matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
2586 * Calculate flow matcher enable bitmap.
2588 * @param match_criteria
2589 * Pointer to flow matcher criteria.
2592 * Bitmap of enabled fields.
2595 flow_dv_matcher_enable(uint32_t *match_criteria)
2597 uint8_t match_criteria_enable;
2599 match_criteria_enable =
2600 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
2601 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
2602 match_criteria_enable |=
2603 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
2604 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
2605 match_criteria_enable |=
2606 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
2607 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
2608 match_criteria_enable |=
2609 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
2610 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
2612 return match_criteria_enable;
2616 * Register the flow matcher.
2618 * @param dev[in, out]
2619 * Pointer to rte_eth_dev structure.
2620 * @param[in, out] matcher
2621 * Pointer to flow matcher.
2622 * @parm[in, out] dev_flow
2623 * Pointer to the dev_flow.
2625 * pointer to error structure.
2628 * 0 on success otherwise -errno and errno is set.
2631 flow_dv_matcher_register(struct rte_eth_dev *dev,
2632 struct mlx5_flow_dv_matcher *matcher,
2633 struct mlx5_flow *dev_flow,
2634 struct rte_flow_error *error)
2636 struct priv *priv = dev->data->dev_private;
2637 struct mlx5_flow_dv_matcher *cache_matcher;
2638 struct mlx5dv_flow_matcher_attr dv_attr = {
2639 .type = IBV_FLOW_ATTR_NORMAL,
2640 .match_mask = (void *)&matcher->mask,
2643 /* Lookup from cache. */
2644 LIST_FOREACH(cache_matcher, &priv->matchers, next) {
2645 if (matcher->crc == cache_matcher->crc &&
2646 matcher->priority == cache_matcher->priority &&
2647 matcher->egress == cache_matcher->egress &&
2648 !memcmp((const void *)matcher->mask.buf,
2649 (const void *)cache_matcher->mask.buf,
2650 cache_matcher->mask.size)) {
2652 "priority %hd use %s matcher %p: refcnt %d++",
2653 cache_matcher->priority,
2654 cache_matcher->egress ? "tx" : "rx",
2655 (void *)cache_matcher,
2656 rte_atomic32_read(&cache_matcher->refcnt));
2657 rte_atomic32_inc(&cache_matcher->refcnt);
2658 dev_flow->dv.matcher = cache_matcher;
2662 /* Register new matcher. */
2663 cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
2665 return rte_flow_error_set(error, ENOMEM,
2666 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2667 "cannot allocate matcher memory");
2668 *cache_matcher = *matcher;
2669 dv_attr.match_criteria_enable =
2670 flow_dv_matcher_enable(cache_matcher->mask.buf);
2671 dv_attr.priority = matcher->priority;
2672 if (matcher->egress)
2673 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
2674 cache_matcher->matcher_object =
2675 mlx5_glue->dv_create_flow_matcher(priv->ctx, &dv_attr);
2676 if (!cache_matcher->matcher_object) {
2677 rte_free(cache_matcher);
2678 return rte_flow_error_set(error, ENOMEM,
2679 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2680 NULL, "cannot create matcher");
2682 rte_atomic32_inc(&cache_matcher->refcnt);
2683 LIST_INSERT_HEAD(&priv->matchers, cache_matcher, next);
2684 dev_flow->dv.matcher = cache_matcher;
2685 DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
2686 cache_matcher->priority,
2687 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
2688 rte_atomic32_read(&cache_matcher->refcnt));
2693 * Fill the flow with DV spec.
2696 * Pointer to rte_eth_dev structure.
2697 * @param[in, out] dev_flow
2698 * Pointer to the sub flow.
2700 * Pointer to the flow attributes.
2702 * Pointer to the list of items.
2703 * @param[in] actions
2704 * Pointer to the list of actions.
2706 * Pointer to the error structure.
2709 * 0 on success, a negative errno value otherwise and rte_ernno is set.
2712 flow_dv_translate(struct rte_eth_dev *dev,
2713 struct mlx5_flow *dev_flow,
2714 const struct rte_flow_attr *attr,
2715 const struct rte_flow_item items[],
2716 const struct rte_flow_action actions[],
2717 struct rte_flow_error *error)
2719 struct priv *priv = dev->data->dev_private;
2720 struct rte_flow *flow = dev_flow->flow;
2721 uint64_t item_flags = 0;
2722 uint64_t last_item = 0;
2723 uint64_t action_flags = 0;
2724 uint64_t priority = attr->priority;
2725 struct mlx5_flow_dv_matcher matcher = {
2727 .size = sizeof(matcher.mask.buf),
2731 bool actions_end = false;
2732 struct mlx5_flow_dv_modify_hdr_resource res = {
2733 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
2734 MLX5DV_FLOW_TABLE_TYPE_NIC_RX
2736 union flow_dv_attr flow_attr = { .attr = 0 };
2738 if (priority == MLX5_FLOW_PRIO_RSVD)
2739 priority = priv->config.flow_prio - 1;
2740 for (; !actions_end ; actions++) {
2741 const struct rte_flow_action_queue *queue;
2742 const struct rte_flow_action_rss *rss;
2743 const struct rte_flow_action *action = actions;
2744 const uint8_t *rss_key;
2746 switch (actions->type) {
2747 case RTE_FLOW_ACTION_TYPE_VOID:
2749 case RTE_FLOW_ACTION_TYPE_FLAG:
2750 dev_flow->dv.actions[actions_n].type =
2751 MLX5DV_FLOW_ACTION_TAG;
2752 dev_flow->dv.actions[actions_n].tag_value =
2753 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
2755 action_flags |= MLX5_FLOW_ACTION_FLAG;
2757 case RTE_FLOW_ACTION_TYPE_MARK:
2758 dev_flow->dv.actions[actions_n].type =
2759 MLX5DV_FLOW_ACTION_TAG;
2760 dev_flow->dv.actions[actions_n].tag_value =
2762 (((const struct rte_flow_action_mark *)
2763 (actions->conf))->id);
2765 action_flags |= MLX5_FLOW_ACTION_MARK;
2767 case RTE_FLOW_ACTION_TYPE_DROP:
2768 dev_flow->dv.actions[actions_n].type =
2769 MLX5DV_FLOW_ACTION_DROP;
2770 action_flags |= MLX5_FLOW_ACTION_DROP;
2772 case RTE_FLOW_ACTION_TYPE_QUEUE:
2773 queue = actions->conf;
2774 flow->rss.queue_num = 1;
2775 (*flow->queue)[0] = queue->index;
2776 action_flags |= MLX5_FLOW_ACTION_QUEUE;
2778 case RTE_FLOW_ACTION_TYPE_RSS:
2779 rss = actions->conf;
2781 memcpy((*flow->queue), rss->queue,
2782 rss->queue_num * sizeof(uint16_t));
2783 flow->rss.queue_num = rss->queue_num;
2784 /* NULL RSS key indicates default RSS key. */
2785 rss_key = !rss->key ? rss_hash_default_key : rss->key;
2786 memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
2787 /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
2788 flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
2789 flow->rss.level = rss->level;
2790 action_flags |= MLX5_FLOW_ACTION_RSS;
2792 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2793 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
2794 if (flow_dv_create_action_l2_encap(dev, actions,
2797 dev_flow->dv.actions[actions_n].type =
2798 MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
2799 dev_flow->dv.actions[actions_n].action =
2800 dev_flow->dv.encap_decap->verbs_action;
2802 action_flags |= actions->type ==
2803 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
2804 MLX5_FLOW_ACTION_VXLAN_ENCAP :
2805 MLX5_FLOW_ACTION_NVGRE_ENCAP;
2807 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2808 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
2809 if (flow_dv_create_action_l2_decap(dev, dev_flow,
2812 dev_flow->dv.actions[actions_n].type =
2813 MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
2814 dev_flow->dv.actions[actions_n].action =
2815 dev_flow->dv.encap_decap->verbs_action;
2817 action_flags |= actions->type ==
2818 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
2819 MLX5_FLOW_ACTION_VXLAN_DECAP :
2820 MLX5_FLOW_ACTION_NVGRE_DECAP;
2822 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
2823 /* Handle encap with preceding decap. */
2824 if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
2825 if (flow_dv_create_action_raw_encap
2826 (dev, actions, dev_flow, attr, error))
2828 dev_flow->dv.actions[actions_n].type =
2829 MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
2830 dev_flow->dv.actions[actions_n].action =
2831 dev_flow->dv.encap_decap->verbs_action;
2833 /* Handle encap without preceding decap. */
2834 if (flow_dv_create_action_l2_encap(dev, actions,
2838 dev_flow->dv.actions[actions_n].type =
2839 MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
2840 dev_flow->dv.actions[actions_n].action =
2841 dev_flow->dv.encap_decap->verbs_action;
2844 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
2846 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
2847 /* Check if this decap is followed by encap. */
2848 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
2849 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
2852 /* Handle decap only if it isn't followed by encap. */
2853 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
2854 if (flow_dv_create_action_l2_decap(dev,
2858 dev_flow->dv.actions[actions_n].type =
2859 MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
2860 dev_flow->dv.actions[actions_n].action =
2861 dev_flow->dv.encap_decap->verbs_action;
2864 /* If decap is followed by encap, handle it at encap. */
2865 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
2867 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
2868 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
2869 if (flow_dv_convert_action_modify_mac(&res, actions,
2872 action_flags |= actions->type ==
2873 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
2874 MLX5_FLOW_ACTION_SET_MAC_SRC :
2875 MLX5_FLOW_ACTION_SET_MAC_DST;
2877 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
2878 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
2879 if (flow_dv_convert_action_modify_ipv4(&res, actions,
2882 action_flags |= actions->type ==
2883 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
2884 MLX5_FLOW_ACTION_SET_IPV4_SRC :
2885 MLX5_FLOW_ACTION_SET_IPV4_DST;
2887 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
2888 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
2889 if (flow_dv_convert_action_modify_ipv6(&res, actions,
2892 action_flags |= actions->type ==
2893 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
2894 MLX5_FLOW_ACTION_SET_IPV6_SRC :
2895 MLX5_FLOW_ACTION_SET_IPV6_DST;
2897 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
2898 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
2899 if (flow_dv_convert_action_modify_tp(&res, actions,
2903 action_flags |= actions->type ==
2904 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
2905 MLX5_FLOW_ACTION_SET_TP_SRC :
2906 MLX5_FLOW_ACTION_SET_TP_DST;
2908 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
2909 if (flow_dv_convert_action_modify_dec_ttl(&res, items,
2913 action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
2915 case RTE_FLOW_ACTION_TYPE_SET_TTL:
2916 if (flow_dv_convert_action_modify_ttl(&res, actions,
2920 action_flags |= MLX5_FLOW_ACTION_SET_TTL;
2922 case RTE_FLOW_ACTION_TYPE_END:
2924 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
2925 /* create modify action if needed. */
2926 if (flow_dv_modify_hdr_resource_register
2931 dev_flow->dv.actions[actions_n].type =
2932 MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
2933 dev_flow->dv.actions[actions_n].action =
2934 dev_flow->dv.modify_hdr->verbs_action;
2942 dev_flow->dv.actions_n = actions_n;
2943 flow->actions = action_flags;
2944 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2945 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2946 void *match_mask = matcher.mask.buf;
2947 void *match_value = dev_flow->dv.value.buf;
2949 switch (items->type) {
2950 case RTE_FLOW_ITEM_TYPE_ETH:
2951 flow_dv_translate_item_eth(match_mask, match_value,
2953 matcher.priority = MLX5_PRIORITY_MAP_L2;
2954 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
2955 MLX5_FLOW_LAYER_OUTER_L2;
2957 case RTE_FLOW_ITEM_TYPE_VLAN:
2958 flow_dv_translate_item_vlan(match_mask, match_value,
2960 matcher.priority = MLX5_PRIORITY_MAP_L2;
2961 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
2962 MLX5_FLOW_LAYER_INNER_VLAN) :
2963 (MLX5_FLOW_LAYER_OUTER_L2 |
2964 MLX5_FLOW_LAYER_OUTER_VLAN);
2966 case RTE_FLOW_ITEM_TYPE_IPV4:
2967 flow_dv_translate_item_ipv4(match_mask, match_value,
2969 matcher.priority = MLX5_PRIORITY_MAP_L3;
2970 dev_flow->dv.hash_fields |=
2971 mlx5_flow_hashfields_adjust
2973 MLX5_IPV4_LAYER_TYPES,
2974 MLX5_IPV4_IBV_RX_HASH);
2975 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
2976 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
2978 case RTE_FLOW_ITEM_TYPE_IPV6:
2979 flow_dv_translate_item_ipv6(match_mask, match_value,
2981 matcher.priority = MLX5_PRIORITY_MAP_L3;
2982 dev_flow->dv.hash_fields |=
2983 mlx5_flow_hashfields_adjust
2985 MLX5_IPV6_LAYER_TYPES,
2986 MLX5_IPV6_IBV_RX_HASH);
2987 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
2988 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
2990 case RTE_FLOW_ITEM_TYPE_TCP:
2991 flow_dv_translate_item_tcp(match_mask, match_value,
2993 matcher.priority = MLX5_PRIORITY_MAP_L4;
2994 dev_flow->dv.hash_fields |=
2995 mlx5_flow_hashfields_adjust
2996 (dev_flow, tunnel, ETH_RSS_TCP,
2997 IBV_RX_HASH_SRC_PORT_TCP |
2998 IBV_RX_HASH_DST_PORT_TCP);
2999 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
3000 MLX5_FLOW_LAYER_OUTER_L4_TCP;
3002 case RTE_FLOW_ITEM_TYPE_UDP:
3003 flow_dv_translate_item_udp(match_mask, match_value,
3005 matcher.priority = MLX5_PRIORITY_MAP_L4;
3006 dev_flow->dv.hash_fields |=
3007 mlx5_flow_hashfields_adjust
3008 (dev_flow, tunnel, ETH_RSS_UDP,
3009 IBV_RX_HASH_SRC_PORT_UDP |
3010 IBV_RX_HASH_DST_PORT_UDP);
3011 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
3012 MLX5_FLOW_LAYER_OUTER_L4_UDP;
3014 case RTE_FLOW_ITEM_TYPE_GRE:
3015 flow_dv_translate_item_gre(match_mask, match_value,
3017 last_item = MLX5_FLOW_LAYER_GRE;
3019 case RTE_FLOW_ITEM_TYPE_NVGRE:
3020 flow_dv_translate_item_nvgre(match_mask, match_value,
3022 last_item = MLX5_FLOW_LAYER_GRE;
3024 case RTE_FLOW_ITEM_TYPE_VXLAN:
3025 flow_dv_translate_item_vxlan(match_mask, match_value,
3027 last_item = MLX5_FLOW_LAYER_VXLAN;
3029 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3030 flow_dv_translate_item_vxlan(match_mask, match_value,
3032 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
3034 case RTE_FLOW_ITEM_TYPE_MPLS:
3035 flow_dv_translate_item_mpls(match_mask, match_value,
3036 items, last_item, tunnel);
3037 last_item = MLX5_FLOW_LAYER_MPLS;
3039 case RTE_FLOW_ITEM_TYPE_META:
3040 flow_dv_translate_item_meta(match_mask, match_value,
3042 last_item = MLX5_FLOW_ITEM_METADATA;
3047 item_flags |= last_item;
3049 assert(!flow_dv_check_valid_spec(matcher.mask.buf,
3050 dev_flow->dv.value.buf));
3051 dev_flow->layers = item_flags;
3052 /* Register matcher. */
3053 matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
3055 matcher.priority = mlx5_flow_adjust_priority(dev, priority,
3057 matcher.egress = attr->egress;
3058 if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
3064 * Apply the flow to the NIC.
3067 * Pointer to the Ethernet device structure.
3068 * @param[in, out] flow
3069 * Pointer to flow structure.
3071 * Pointer to error structure.
3074 * 0 on success, a negative errno value otherwise and rte_errno is set.
3077 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
3078 struct rte_flow_error *error)
3080 struct mlx5_flow_dv *dv;
3081 struct mlx5_flow *dev_flow;
3085 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3088 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
3089 dv->hrxq = mlx5_hrxq_drop_new(dev);
3093 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3094 "cannot get drop hash queue");
3097 dv->actions[n].type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;
3098 dv->actions[n].qp = dv->hrxq->qp;
3100 } else if (flow->actions &
3101 (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
3102 struct mlx5_hrxq *hrxq;
3104 hrxq = mlx5_hrxq_get(dev, flow->key,
3105 MLX5_RSS_HASH_KEY_LEN,
3108 flow->rss.queue_num);
3110 hrxq = mlx5_hrxq_new
3111 (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
3112 dv->hash_fields, (*flow->queue),
3113 flow->rss.queue_num,
3114 !!(dev_flow->layers &
3115 MLX5_FLOW_LAYER_TUNNEL));
3119 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3120 "cannot get hash queue");
3124 dv->actions[n].type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;
3125 dv->actions[n].qp = hrxq->qp;
3129 mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
3130 (void *)&dv->value, n,
3133 rte_flow_error_set(error, errno,
3134 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3136 "hardware refuses to create flow");
3142 err = rte_errno; /* Save rte_errno before cleanup. */
3143 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3144 struct mlx5_flow_dv *dv = &dev_flow->dv;
3146 if (flow->actions & MLX5_FLOW_ACTION_DROP)
3147 mlx5_hrxq_drop_release(dev);
3149 mlx5_hrxq_release(dev, dv->hrxq);
3153 rte_errno = err; /* Restore rte_errno. */
3158 * Release the flow matcher.
3161 * Pointer to Ethernet device.
3163 * Pointer to mlx5_flow.
3166 * 1 while a reference on it exists, 0 when freed.
3169 flow_dv_matcher_release(struct rte_eth_dev *dev,
3170 struct mlx5_flow *flow)
3172 struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
3174 assert(matcher->matcher_object);
3175 DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
3176 dev->data->port_id, (void *)matcher,
3177 rte_atomic32_read(&matcher->refcnt));
3178 if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
3179 claim_zero(mlx5_glue->dv_destroy_flow_matcher
3180 (matcher->matcher_object));
3181 LIST_REMOVE(matcher, next);
3183 DRV_LOG(DEBUG, "port %u matcher %p: removed",
3184 dev->data->port_id, (void *)matcher);
3191 * Release an encap/decap resource.
3194 * Pointer to mlx5_flow.
3197 * 1 while a reference on it exists, 0 when freed.
3200 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
3202 struct mlx5_flow_dv_encap_decap_resource *cache_resource =
3203 flow->dv.encap_decap;
3205 assert(cache_resource->verbs_action);
3206 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
3207 (void *)cache_resource,
3208 rte_atomic32_read(&cache_resource->refcnt));
3209 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
3210 claim_zero(mlx5_glue->destroy_flow_action
3211 (cache_resource->verbs_action));
3212 LIST_REMOVE(cache_resource, next);
3213 rte_free(cache_resource);
3214 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
3215 (void *)cache_resource);
3222 * Release a modify-header resource.
3225 * Pointer to mlx5_flow.
3228 * 1 while a reference on it exists, 0 when freed.
3231 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
3233 struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
3234 flow->dv.modify_hdr;
3236 assert(cache_resource->verbs_action);
3237 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
3238 (void *)cache_resource,
3239 rte_atomic32_read(&cache_resource->refcnt));
3240 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
3241 claim_zero(mlx5_glue->destroy_flow_action
3242 (cache_resource->verbs_action));
3243 LIST_REMOVE(cache_resource, next);
3244 rte_free(cache_resource);
3245 DRV_LOG(DEBUG, "modify-header resource %p: removed",
3246 (void *)cache_resource);
3253 * Remove the flow from the NIC but keeps it in memory.
3256 * Pointer to Ethernet device.
3257 * @param[in, out] flow
3258 * Pointer to flow structure.
3261 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
3263 struct mlx5_flow_dv *dv;
3264 struct mlx5_flow *dev_flow;
3268 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3271 claim_zero(mlx5_glue->destroy_flow(dv->flow));
3275 if (flow->actions & MLX5_FLOW_ACTION_DROP)
3276 mlx5_hrxq_drop_release(dev);
3278 mlx5_hrxq_release(dev, dv->hrxq);
3283 flow->counter = NULL;
3287 * Remove the flow from the NIC and the memory.
3290 * Pointer to the Ethernet device structure.
3291 * @param[in, out] flow
3292 * Pointer to flow structure.
3295 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
3297 struct mlx5_flow *dev_flow;
3301 flow_dv_remove(dev, flow);
3302 while (!LIST_EMPTY(&flow->dev_flows)) {
3303 dev_flow = LIST_FIRST(&flow->dev_flows);
3304 LIST_REMOVE(dev_flow, next);
3305 if (dev_flow->dv.matcher)
3306 flow_dv_matcher_release(dev, dev_flow);
3307 if (dev_flow->dv.encap_decap)
3308 flow_dv_encap_decap_resource_release(dev_flow);
3309 if (dev_flow->dv.modify_hdr)
3310 flow_dv_modify_hdr_resource_release(dev_flow);
3318 * @see rte_flow_query()
3322 flow_dv_query(struct rte_eth_dev *dev __rte_unused,
3323 struct rte_flow *flow __rte_unused,
3324 const struct rte_flow_action *actions __rte_unused,
3325 void *data __rte_unused,
3326 struct rte_flow_error *error __rte_unused)
3328 return rte_flow_error_set(error, ENOTSUP,
3329 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3331 "flow query with DV is not supported");
3335 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
3336 .validate = flow_dv_validate,
3337 .prepare = flow_dv_prepare,
3338 .translate = flow_dv_translate,
3339 .apply = flow_dv_apply,
3340 .remove = flow_dv_remove,
3341 .destroy = flow_dv_destroy,
3342 .query = flow_dv_query,
3345 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */