1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #pragma GCC diagnostic ignored "-Wpedantic"
15 #include <infiniband/verbs.h>
17 #pragma GCC diagnostic error "-Wpedantic"
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_eth_ctrl.h>
23 #include <rte_ethdev_driver.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
31 #include "mlx5_defs.h"
33 #include "mlx5_glue.h"
34 #include "mlx5_flow.h"
35 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
37 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
38 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
54 * Initialize flow attributes structure according to flow items' types.
57 * Pointer to item specification.
59 * Pointer to flow attributes structure.
62 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
64 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
66 case RTE_FLOW_ITEM_TYPE_IPV4:
69 case RTE_FLOW_ITEM_TYPE_IPV6:
72 case RTE_FLOW_ITEM_TYPE_UDP:
75 case RTE_FLOW_ITEM_TYPE_TCP:
85 struct field_modify_info {
86 uint32_t size; /* Size of field in protocol header, in bytes. */
87 uint32_t offset; /* Offset of field in protocol header, in bytes. */
88 enum mlx5_modification_field id;
91 struct field_modify_info modify_eth[] = {
92 {4, 0, MLX5_MODI_OUT_DMAC_47_16},
93 {2, 4, MLX5_MODI_OUT_DMAC_15_0},
94 {4, 6, MLX5_MODI_OUT_SMAC_47_16},
95 {2, 10, MLX5_MODI_OUT_SMAC_15_0},
99 struct field_modify_info modify_ipv4[] = {
100 {1, 8, MLX5_MODI_OUT_IPV4_TTL},
101 {4, 12, MLX5_MODI_OUT_SIPV4},
102 {4, 16, MLX5_MODI_OUT_DIPV4},
106 struct field_modify_info modify_ipv6[] = {
107 {1, 7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
108 {4, 8, MLX5_MODI_OUT_SIPV6_127_96},
109 {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
110 {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
111 {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
112 {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
113 {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
114 {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
115 {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
119 struct field_modify_info modify_udp[] = {
120 {2, 0, MLX5_MODI_OUT_UDP_SPORT},
121 {2, 2, MLX5_MODI_OUT_UDP_DPORT},
125 struct field_modify_info modify_tcp[] = {
126 {2, 0, MLX5_MODI_OUT_TCP_SPORT},
127 {2, 2, MLX5_MODI_OUT_TCP_DPORT},
132 * Acquire the synchronizing object to protect multithreaded access
133 * to shared dv context. Lock occurs only if context is actually
134 * shared, i.e. we have multiport IB device and representors are
138 * Pointer to the rte_eth_dev structure.
141 flow_d_shared_lock(struct rte_eth_dev *dev)
143 struct mlx5_priv *priv = dev->data->dev_private;
144 struct mlx5_ibv_shared *sh = priv->sh;
146 if (sh->dv_refcnt > 1) {
149 ret = pthread_mutex_lock(&sh->dv_mutex);
156 flow_d_shared_unlock(struct rte_eth_dev *dev)
158 struct mlx5_priv *priv = dev->data->dev_private;
159 struct mlx5_ibv_shared *sh = priv->sh;
161 if (sh->dv_refcnt > 1) {
164 ret = pthread_mutex_unlock(&sh->dv_mutex);
171 * Convert modify-header action to DV specification.
174 * Pointer to item specification.
176 * Pointer to field modification information.
177 * @param[in,out] resource
178 * Pointer to the modify-header resource.
180 * Type of modification.
182 * Pointer to the error structure.
185 * 0 on success, a negative errno value otherwise and rte_errno is set.
188 flow_dv_convert_modify_action(struct rte_flow_item *item,
189 struct field_modify_info *field,
190 struct mlx5_flow_dv_modify_hdr_resource *resource,
192 struct rte_flow_error *error)
194 uint32_t i = resource->actions_num;
195 struct mlx5_modification_cmd *actions = resource->actions;
196 const uint8_t *spec = item->spec;
197 const uint8_t *mask = item->mask;
200 while (field->size) {
202 /* Generate modify command for each mask segment. */
203 memcpy(&set, &mask[field->offset], field->size);
205 if (i >= MLX5_MODIFY_NUM)
206 return rte_flow_error_set(error, EINVAL,
207 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
208 "too many items to modify");
209 actions[i].action_type = type;
210 actions[i].field = field->id;
211 actions[i].length = field->size ==
212 4 ? 0 : field->size * 8;
213 rte_memcpy(&actions[i].data[4 - field->size],
214 &spec[field->offset], field->size);
215 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
218 if (resource->actions_num != i)
219 resource->actions_num = i;
222 if (!resource->actions_num)
223 return rte_flow_error_set(error, EINVAL,
224 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
225 "invalid modification flow item");
230 * Convert modify-header set IPv4 address action to DV specification.
232 * @param[in,out] resource
233 * Pointer to the modify-header resource.
235 * Pointer to action specification.
237 * Pointer to the error structure.
240 * 0 on success, a negative errno value otherwise and rte_errno is set.
243 flow_dv_convert_action_modify_ipv4
244 (struct mlx5_flow_dv_modify_hdr_resource *resource,
245 const struct rte_flow_action *action,
246 struct rte_flow_error *error)
248 const struct rte_flow_action_set_ipv4 *conf =
249 (const struct rte_flow_action_set_ipv4 *)(action->conf);
250 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
251 struct rte_flow_item_ipv4 ipv4;
252 struct rte_flow_item_ipv4 ipv4_mask;
254 memset(&ipv4, 0, sizeof(ipv4));
255 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
256 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
257 ipv4.hdr.src_addr = conf->ipv4_addr;
258 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
260 ipv4.hdr.dst_addr = conf->ipv4_addr;
261 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
264 item.mask = &ipv4_mask;
265 return flow_dv_convert_modify_action(&item, modify_ipv4, resource,
266 MLX5_MODIFICATION_TYPE_SET, error);
270 * Convert modify-header set IPv6 address action to DV specification.
272 * @param[in,out] resource
273 * Pointer to the modify-header resource.
275 * Pointer to action specification.
277 * Pointer to the error structure.
280 * 0 on success, a negative errno value otherwise and rte_errno is set.
283 flow_dv_convert_action_modify_ipv6
284 (struct mlx5_flow_dv_modify_hdr_resource *resource,
285 const struct rte_flow_action *action,
286 struct rte_flow_error *error)
288 const struct rte_flow_action_set_ipv6 *conf =
289 (const struct rte_flow_action_set_ipv6 *)(action->conf);
290 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
291 struct rte_flow_item_ipv6 ipv6;
292 struct rte_flow_item_ipv6 ipv6_mask;
294 memset(&ipv6, 0, sizeof(ipv6));
295 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
296 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
297 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
298 sizeof(ipv6.hdr.src_addr));
299 memcpy(&ipv6_mask.hdr.src_addr,
300 &rte_flow_item_ipv6_mask.hdr.src_addr,
301 sizeof(ipv6.hdr.src_addr));
303 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
304 sizeof(ipv6.hdr.dst_addr));
305 memcpy(&ipv6_mask.hdr.dst_addr,
306 &rte_flow_item_ipv6_mask.hdr.dst_addr,
307 sizeof(ipv6.hdr.dst_addr));
310 item.mask = &ipv6_mask;
311 return flow_dv_convert_modify_action(&item, modify_ipv6, resource,
312 MLX5_MODIFICATION_TYPE_SET, error);
316 * Convert modify-header set MAC address action to DV specification.
318 * @param[in,out] resource
319 * Pointer to the modify-header resource.
321 * Pointer to action specification.
323 * Pointer to the error structure.
326 * 0 on success, a negative errno value otherwise and rte_errno is set.
329 flow_dv_convert_action_modify_mac
330 (struct mlx5_flow_dv_modify_hdr_resource *resource,
331 const struct rte_flow_action *action,
332 struct rte_flow_error *error)
334 const struct rte_flow_action_set_mac *conf =
335 (const struct rte_flow_action_set_mac *)(action->conf);
336 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
337 struct rte_flow_item_eth eth;
338 struct rte_flow_item_eth eth_mask;
340 memset(ð, 0, sizeof(eth));
341 memset(ð_mask, 0, sizeof(eth_mask));
342 if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
343 memcpy(ð.src.addr_bytes, &conf->mac_addr,
344 sizeof(eth.src.addr_bytes));
345 memcpy(ð_mask.src.addr_bytes,
346 &rte_flow_item_eth_mask.src.addr_bytes,
347 sizeof(eth_mask.src.addr_bytes));
349 memcpy(ð.dst.addr_bytes, &conf->mac_addr,
350 sizeof(eth.dst.addr_bytes));
351 memcpy(ð_mask.dst.addr_bytes,
352 &rte_flow_item_eth_mask.dst.addr_bytes,
353 sizeof(eth_mask.dst.addr_bytes));
356 item.mask = ð_mask;
357 return flow_dv_convert_modify_action(&item, modify_eth, resource,
358 MLX5_MODIFICATION_TYPE_SET, error);
362 * Convert modify-header set TP action to DV specification.
364 * @param[in,out] resource
365 * Pointer to the modify-header resource.
367 * Pointer to action specification.
369 * Pointer to rte_flow_item objects list.
371 * Pointer to flow attributes structure.
373 * Pointer to the error structure.
376 * 0 on success, a negative errno value otherwise and rte_errno is set.
379 flow_dv_convert_action_modify_tp
380 (struct mlx5_flow_dv_modify_hdr_resource *resource,
381 const struct rte_flow_action *action,
382 const struct rte_flow_item *items,
383 union flow_dv_attr *attr,
384 struct rte_flow_error *error)
386 const struct rte_flow_action_set_tp *conf =
387 (const struct rte_flow_action_set_tp *)(action->conf);
388 struct rte_flow_item item;
389 struct rte_flow_item_udp udp;
390 struct rte_flow_item_udp udp_mask;
391 struct rte_flow_item_tcp tcp;
392 struct rte_flow_item_tcp tcp_mask;
393 struct field_modify_info *field;
396 flow_dv_attr_init(items, attr);
398 memset(&udp, 0, sizeof(udp));
399 memset(&udp_mask, 0, sizeof(udp_mask));
400 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
401 udp.hdr.src_port = conf->port;
402 udp_mask.hdr.src_port =
403 rte_flow_item_udp_mask.hdr.src_port;
405 udp.hdr.dst_port = conf->port;
406 udp_mask.hdr.dst_port =
407 rte_flow_item_udp_mask.hdr.dst_port;
409 item.type = RTE_FLOW_ITEM_TYPE_UDP;
411 item.mask = &udp_mask;
415 memset(&tcp, 0, sizeof(tcp));
416 memset(&tcp_mask, 0, sizeof(tcp_mask));
417 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
418 tcp.hdr.src_port = conf->port;
419 tcp_mask.hdr.src_port =
420 rte_flow_item_tcp_mask.hdr.src_port;
422 tcp.hdr.dst_port = conf->port;
423 tcp_mask.hdr.dst_port =
424 rte_flow_item_tcp_mask.hdr.dst_port;
426 item.type = RTE_FLOW_ITEM_TYPE_TCP;
428 item.mask = &tcp_mask;
431 return flow_dv_convert_modify_action(&item, field, resource,
432 MLX5_MODIFICATION_TYPE_SET, error);
436 * Convert modify-header set TTL action to DV specification.
438 * @param[in,out] resource
439 * Pointer to the modify-header resource.
441 * Pointer to action specification.
443 * Pointer to rte_flow_item objects list.
445 * Pointer to flow attributes structure.
447 * Pointer to the error structure.
450 * 0 on success, a negative errno value otherwise and rte_errno is set.
453 flow_dv_convert_action_modify_ttl
454 (struct mlx5_flow_dv_modify_hdr_resource *resource,
455 const struct rte_flow_action *action,
456 const struct rte_flow_item *items,
457 union flow_dv_attr *attr,
458 struct rte_flow_error *error)
460 const struct rte_flow_action_set_ttl *conf =
461 (const struct rte_flow_action_set_ttl *)(action->conf);
462 struct rte_flow_item item;
463 struct rte_flow_item_ipv4 ipv4;
464 struct rte_flow_item_ipv4 ipv4_mask;
465 struct rte_flow_item_ipv6 ipv6;
466 struct rte_flow_item_ipv6 ipv6_mask;
467 struct field_modify_info *field;
470 flow_dv_attr_init(items, attr);
472 memset(&ipv4, 0, sizeof(ipv4));
473 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
474 ipv4.hdr.time_to_live = conf->ttl_value;
475 ipv4_mask.hdr.time_to_live = 0xFF;
476 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
478 item.mask = &ipv4_mask;
482 memset(&ipv6, 0, sizeof(ipv6));
483 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
484 ipv6.hdr.hop_limits = conf->ttl_value;
485 ipv6_mask.hdr.hop_limits = 0xFF;
486 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
488 item.mask = &ipv6_mask;
491 return flow_dv_convert_modify_action(&item, field, resource,
492 MLX5_MODIFICATION_TYPE_SET, error);
496 * Convert modify-header decrement TTL action to DV specification.
498 * @param[in,out] resource
499 * Pointer to the modify-header resource.
501 * Pointer to action specification.
503 * Pointer to rte_flow_item objects list.
505 * Pointer to flow attributes structure.
507 * Pointer to the error structure.
510 * 0 on success, a negative errno value otherwise and rte_errno is set.
513 flow_dv_convert_action_modify_dec_ttl
514 (struct mlx5_flow_dv_modify_hdr_resource *resource,
515 const struct rte_flow_item *items,
516 union flow_dv_attr *attr,
517 struct rte_flow_error *error)
519 struct rte_flow_item item;
520 struct rte_flow_item_ipv4 ipv4;
521 struct rte_flow_item_ipv4 ipv4_mask;
522 struct rte_flow_item_ipv6 ipv6;
523 struct rte_flow_item_ipv6 ipv6_mask;
524 struct field_modify_info *field;
527 flow_dv_attr_init(items, attr);
529 memset(&ipv4, 0, sizeof(ipv4));
530 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
531 ipv4.hdr.time_to_live = 0xFF;
532 ipv4_mask.hdr.time_to_live = 0xFF;
533 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
535 item.mask = &ipv4_mask;
539 memset(&ipv6, 0, sizeof(ipv6));
540 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
541 ipv6.hdr.hop_limits = 0xFF;
542 ipv6_mask.hdr.hop_limits = 0xFF;
543 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
545 item.mask = &ipv6_mask;
548 return flow_dv_convert_modify_action(&item, field, resource,
549 MLX5_MODIFICATION_TYPE_ADD, error);
553 * Validate META item.
556 * Pointer to the rte_eth_dev structure.
558 * Item specification.
560 * Attributes of flow that includes this item.
562 * Pointer to error structure.
565 * 0 on success, a negative errno value otherwise and rte_errno is set.
568 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
569 const struct rte_flow_item *item,
570 const struct rte_flow_attr *attr,
571 struct rte_flow_error *error)
573 const struct rte_flow_item_meta *spec = item->spec;
574 const struct rte_flow_item_meta *mask = item->mask;
575 const struct rte_flow_item_meta nic_mask = {
576 .data = RTE_BE32(UINT32_MAX)
579 uint64_t offloads = dev->data->dev_conf.txmode.offloads;
581 if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
582 return rte_flow_error_set(error, EPERM,
583 RTE_FLOW_ERROR_TYPE_ITEM,
585 "match on metadata offload "
586 "configuration is off for this port");
588 return rte_flow_error_set(error, EINVAL,
589 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
591 "data cannot be empty");
593 return rte_flow_error_set(error, EINVAL,
594 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
596 "data cannot be zero");
598 mask = &rte_flow_item_meta_mask;
599 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
600 (const uint8_t *)&nic_mask,
601 sizeof(struct rte_flow_item_meta),
606 return rte_flow_error_set(error, ENOTSUP,
607 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
609 "pattern not supported for ingress");
614 * Validate count action.
619 * Pointer to error structure.
622 * 0 on success, a negative errno value otherwise and rte_errno is set.
625 flow_dv_validate_action_count(struct rte_eth_dev *dev,
626 struct rte_flow_error *error)
628 struct mlx5_priv *priv = dev->data->dev_private;
630 if (!priv->config.devx)
632 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
636 return rte_flow_error_set
638 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
640 "count action not supported");
644 * Validate the L2 encap action.
646 * @param[in] action_flags
647 * Holds the actions detected until now.
649 * Pointer to the encap action.
651 * Pointer to flow attributes
653 * Pointer to error structure.
656 * 0 on success, a negative errno value otherwise and rte_errno is set.
659 flow_dv_validate_action_l2_encap(uint64_t action_flags,
660 const struct rte_flow_action *action,
661 const struct rte_flow_attr *attr,
662 struct rte_flow_error *error)
665 return rte_flow_error_set(error, EINVAL,
666 RTE_FLOW_ERROR_TYPE_ACTION, action,
667 "configuration cannot be null");
668 if (action_flags & MLX5_FLOW_ACTION_DROP)
669 return rte_flow_error_set(error, EINVAL,
670 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
671 "can't drop and encap in same flow");
672 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
673 return rte_flow_error_set(error, EINVAL,
674 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
675 "can only have a single encap or"
676 " decap action in a flow");
678 return rte_flow_error_set(error, ENOTSUP,
679 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
681 "encap action not supported for "
687 * Validate the L2 decap action.
689 * @param[in] action_flags
690 * Holds the actions detected until now.
692 * Pointer to flow attributes
694 * Pointer to error structure.
697 * 0 on success, a negative errno value otherwise and rte_errno is set.
700 flow_dv_validate_action_l2_decap(uint64_t action_flags,
701 const struct rte_flow_attr *attr,
702 struct rte_flow_error *error)
704 if (action_flags & MLX5_FLOW_ACTION_DROP)
705 return rte_flow_error_set(error, EINVAL,
706 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
707 "can't drop and decap in same flow");
708 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
709 return rte_flow_error_set(error, EINVAL,
710 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
711 "can only have a single encap or"
712 " decap action in a flow");
713 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
714 return rte_flow_error_set(error, EINVAL,
715 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
716 "can't have decap action after"
719 return rte_flow_error_set(error, ENOTSUP,
720 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
722 "decap action not supported for "
728 * Validate the raw encap action.
730 * @param[in] action_flags
731 * Holds the actions detected until now.
733 * Pointer to the encap action.
735 * Pointer to flow attributes
737 * Pointer to error structure.
740 * 0 on success, a negative errno value otherwise and rte_errno is set.
743 flow_dv_validate_action_raw_encap(uint64_t action_flags,
744 const struct rte_flow_action *action,
745 const struct rte_flow_attr *attr,
746 struct rte_flow_error *error)
749 return rte_flow_error_set(error, EINVAL,
750 RTE_FLOW_ERROR_TYPE_ACTION, action,
751 "configuration cannot be null");
752 if (action_flags & MLX5_FLOW_ACTION_DROP)
753 return rte_flow_error_set(error, EINVAL,
754 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
755 "can't drop and encap in same flow");
756 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
757 return rte_flow_error_set(error, EINVAL,
758 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
759 "can only have a single encap"
760 " action in a flow");
761 /* encap without preceding decap is not supported for ingress */
762 if (attr->ingress && !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
763 return rte_flow_error_set(error, ENOTSUP,
764 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
766 "encap action not supported for "
772 * Validate the raw decap action.
774 * @param[in] action_flags
775 * Holds the actions detected until now.
777 * Pointer to the encap action.
779 * Pointer to flow attributes
781 * Pointer to error structure.
784 * 0 on success, a negative errno value otherwise and rte_errno is set.
787 flow_dv_validate_action_raw_decap(uint64_t action_flags,
788 const struct rte_flow_action *action,
789 const struct rte_flow_attr *attr,
790 struct rte_flow_error *error)
792 if (action_flags & MLX5_FLOW_ACTION_DROP)
793 return rte_flow_error_set(error, EINVAL,
794 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
795 "can't drop and decap in same flow");
796 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
797 return rte_flow_error_set(error, EINVAL,
798 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
799 "can't have encap action before"
801 if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
802 return rte_flow_error_set(error, EINVAL,
803 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
804 "can only have a single decap"
805 " action in a flow");
806 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
807 return rte_flow_error_set(error, EINVAL,
808 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
809 "can't have decap action after"
811 /* decap action is valid on egress only if it is followed by encap */
813 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
814 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
817 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
818 return rte_flow_error_set
820 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
821 NULL, "decap action not supported"
828 * Find existing encap/decap resource or create and register a new one.
830 * @param dev[in, out]
831 * Pointer to rte_eth_dev structure.
832 * @param[in, out] resource
833 * Pointer to encap/decap resource.
834 * @parm[in, out] dev_flow
835 * Pointer to the dev_flow.
837 * pointer to error structure.
840 * 0 on success otherwise -errno and errno is set.
843 flow_dv_encap_decap_resource_register
844 (struct rte_eth_dev *dev,
845 struct mlx5_flow_dv_encap_decap_resource *resource,
846 struct mlx5_flow *dev_flow,
847 struct rte_flow_error *error)
849 struct mlx5_priv *priv = dev->data->dev_private;
850 struct mlx5_ibv_shared *sh = priv->sh;
851 struct mlx5_flow_dv_encap_decap_resource *cache_resource;
852 struct rte_flow *flow = dev_flow->flow;
853 struct mlx5dv_dr_ns *ns;
855 resource->flags = flow->group ? 0 : 1;
861 /* Lookup a matching resource from cache. */
862 LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
863 if (resource->reformat_type == cache_resource->reformat_type &&
864 resource->ft_type == cache_resource->ft_type &&
865 resource->flags == cache_resource->flags &&
866 resource->size == cache_resource->size &&
867 !memcmp((const void *)resource->buf,
868 (const void *)cache_resource->buf,
870 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
871 (void *)cache_resource,
872 rte_atomic32_read(&cache_resource->refcnt));
873 rte_atomic32_inc(&cache_resource->refcnt);
874 dev_flow->dv.encap_decap = cache_resource;
878 /* Register new encap/decap resource. */
879 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
881 return rte_flow_error_set(error, ENOMEM,
882 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
883 "cannot allocate resource memory");
884 *cache_resource = *resource;
885 cache_resource->verbs_action =
886 mlx5_glue->dv_create_flow_action_packet_reformat
887 (sh->ctx, cache_resource->reformat_type,
888 cache_resource->ft_type, ns, cache_resource->flags,
889 cache_resource->size,
890 (cache_resource->size ? cache_resource->buf : NULL));
891 if (!cache_resource->verbs_action) {
892 rte_free(cache_resource);
893 return rte_flow_error_set(error, ENOMEM,
894 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
895 NULL, "cannot create action");
897 rte_atomic32_init(&cache_resource->refcnt);
898 rte_atomic32_inc(&cache_resource->refcnt);
899 LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
900 dev_flow->dv.encap_decap = cache_resource;
901 DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
902 (void *)cache_resource,
903 rte_atomic32_read(&cache_resource->refcnt));
908 * Find existing table jump resource or create and register a new one.
910 * @param dev[in, out]
911 * Pointer to rte_eth_dev structure.
912 * @param[in, out] resource
913 * Pointer to jump table resource.
914 * @parm[in, out] dev_flow
915 * Pointer to the dev_flow.
917 * pointer to error structure.
920 * 0 on success otherwise -errno and errno is set.
923 flow_dv_jump_tbl_resource_register
924 (struct rte_eth_dev *dev,
925 struct mlx5_flow_dv_jump_tbl_resource *resource,
926 struct mlx5_flow *dev_flow,
927 struct rte_flow_error *error)
929 struct mlx5_priv *priv = dev->data->dev_private;
930 struct mlx5_ibv_shared *sh = priv->sh;
931 struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
933 /* Lookup a matching resource from cache. */
934 LIST_FOREACH(cache_resource, &sh->jump_tbl, next) {
935 if (resource->tbl == cache_resource->tbl) {
936 DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++",
937 (void *)cache_resource,
938 rte_atomic32_read(&cache_resource->refcnt));
939 rte_atomic32_inc(&cache_resource->refcnt);
940 dev_flow->dv.jump = cache_resource;
944 /* Register new jump table resource. */
945 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
947 return rte_flow_error_set(error, ENOMEM,
948 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
949 "cannot allocate resource memory");
950 *cache_resource = *resource;
951 cache_resource->action =
952 mlx5_glue->dr_create_flow_action_dest_flow_tbl
953 (resource->tbl->obj);
954 if (!cache_resource->action) {
955 rte_free(cache_resource);
956 return rte_flow_error_set(error, ENOMEM,
957 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
958 NULL, "cannot create action");
960 rte_atomic32_init(&cache_resource->refcnt);
961 rte_atomic32_inc(&cache_resource->refcnt);
962 LIST_INSERT_HEAD(&sh->jump_tbl, cache_resource, next);
963 dev_flow->dv.jump = cache_resource;
964 DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++",
965 (void *)cache_resource,
966 rte_atomic32_read(&cache_resource->refcnt));
971 * Get the size of specific rte_flow_item_type
973 * @param[in] item_type
974 * Tested rte_flow_item_type.
977 * sizeof struct item_type, 0 if void or irrelevant.
980 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
985 case RTE_FLOW_ITEM_TYPE_ETH:
986 retval = sizeof(struct rte_flow_item_eth);
988 case RTE_FLOW_ITEM_TYPE_VLAN:
989 retval = sizeof(struct rte_flow_item_vlan);
991 case RTE_FLOW_ITEM_TYPE_IPV4:
992 retval = sizeof(struct rte_flow_item_ipv4);
994 case RTE_FLOW_ITEM_TYPE_IPV6:
995 retval = sizeof(struct rte_flow_item_ipv6);
997 case RTE_FLOW_ITEM_TYPE_UDP:
998 retval = sizeof(struct rte_flow_item_udp);
1000 case RTE_FLOW_ITEM_TYPE_TCP:
1001 retval = sizeof(struct rte_flow_item_tcp);
1003 case RTE_FLOW_ITEM_TYPE_VXLAN:
1004 retval = sizeof(struct rte_flow_item_vxlan);
1006 case RTE_FLOW_ITEM_TYPE_GRE:
1007 retval = sizeof(struct rte_flow_item_gre);
1009 case RTE_FLOW_ITEM_TYPE_NVGRE:
1010 retval = sizeof(struct rte_flow_item_nvgre);
1012 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1013 retval = sizeof(struct rte_flow_item_vxlan_gpe);
1015 case RTE_FLOW_ITEM_TYPE_MPLS:
1016 retval = sizeof(struct rte_flow_item_mpls);
1018 case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
1026 #define MLX5_ENCAP_IPV4_VERSION 0x40
1027 #define MLX5_ENCAP_IPV4_IHL_MIN 0x05
1028 #define MLX5_ENCAP_IPV4_TTL_DEF 0x40
1029 #define MLX5_ENCAP_IPV6_VTC_FLOW 0x60000000
1030 #define MLX5_ENCAP_IPV6_HOP_LIMIT 0xff
1031 #define MLX5_ENCAP_VXLAN_FLAGS 0x08000000
1032 #define MLX5_ENCAP_VXLAN_GPE_FLAGS 0x04
1035 * Convert the encap action data from list of rte_flow_item to raw buffer
1038 * Pointer to rte_flow_item objects list.
1040 * Pointer to the output buffer.
1042 * Pointer to the output buffer size.
1044 * Pointer to the error structure.
1047 * 0 on success, a negative errno value otherwise and rte_errno is set.
1050 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
1051 size_t *size, struct rte_flow_error *error)
1053 struct ether_hdr *eth = NULL;
1054 struct vlan_hdr *vlan = NULL;
1055 struct ipv4_hdr *ipv4 = NULL;
1056 struct ipv6_hdr *ipv6 = NULL;
1057 struct udp_hdr *udp = NULL;
1058 struct vxlan_hdr *vxlan = NULL;
1059 struct vxlan_gpe_hdr *vxlan_gpe = NULL;
1060 struct gre_hdr *gre = NULL;
1062 size_t temp_size = 0;
1065 return rte_flow_error_set(error, EINVAL,
1066 RTE_FLOW_ERROR_TYPE_ACTION,
1067 NULL, "invalid empty data");
1068 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1069 len = flow_dv_get_item_len(items->type);
1070 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
1071 return rte_flow_error_set(error, EINVAL,
1072 RTE_FLOW_ERROR_TYPE_ACTION,
1073 (void *)items->type,
1074 "items total size is too big"
1075 " for encap action");
1076 rte_memcpy((void *)&buf[temp_size], items->spec, len);
1077 switch (items->type) {
1078 case RTE_FLOW_ITEM_TYPE_ETH:
1079 eth = (struct ether_hdr *)&buf[temp_size];
1081 case RTE_FLOW_ITEM_TYPE_VLAN:
1082 vlan = (struct vlan_hdr *)&buf[temp_size];
1084 return rte_flow_error_set(error, EINVAL,
1085 RTE_FLOW_ERROR_TYPE_ACTION,
1086 (void *)items->type,
1087 "eth header not found");
1088 if (!eth->ether_type)
1089 eth->ether_type = RTE_BE16(ETHER_TYPE_VLAN);
1091 case RTE_FLOW_ITEM_TYPE_IPV4:
1092 ipv4 = (struct ipv4_hdr *)&buf[temp_size];
1094 return rte_flow_error_set(error, EINVAL,
1095 RTE_FLOW_ERROR_TYPE_ACTION,
1096 (void *)items->type,
1097 "neither eth nor vlan"
1099 if (vlan && !vlan->eth_proto)
1100 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv4);
1101 else if (eth && !eth->ether_type)
1102 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv4);
1103 if (!ipv4->version_ihl)
1104 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
1105 MLX5_ENCAP_IPV4_IHL_MIN;
1106 if (!ipv4->time_to_live)
1107 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
1109 case RTE_FLOW_ITEM_TYPE_IPV6:
1110 ipv6 = (struct ipv6_hdr *)&buf[temp_size];
1112 return rte_flow_error_set(error, EINVAL,
1113 RTE_FLOW_ERROR_TYPE_ACTION,
1114 (void *)items->type,
1115 "neither eth nor vlan"
1117 if (vlan && !vlan->eth_proto)
1118 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv6);
1119 else if (eth && !eth->ether_type)
1120 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv6);
1121 if (!ipv6->vtc_flow)
1123 RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
1124 if (!ipv6->hop_limits)
1125 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
1127 case RTE_FLOW_ITEM_TYPE_UDP:
1128 udp = (struct udp_hdr *)&buf[temp_size];
1130 return rte_flow_error_set(error, EINVAL,
1131 RTE_FLOW_ERROR_TYPE_ACTION,
1132 (void *)items->type,
1133 "ip header not found");
1134 if (ipv4 && !ipv4->next_proto_id)
1135 ipv4->next_proto_id = IPPROTO_UDP;
1136 else if (ipv6 && !ipv6->proto)
1137 ipv6->proto = IPPROTO_UDP;
1139 case RTE_FLOW_ITEM_TYPE_VXLAN:
1140 vxlan = (struct vxlan_hdr *)&buf[temp_size];
1142 return rte_flow_error_set(error, EINVAL,
1143 RTE_FLOW_ERROR_TYPE_ACTION,
1144 (void *)items->type,
1145 "udp header not found");
1147 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
1148 if (!vxlan->vx_flags)
1150 RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
1152 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1153 vxlan_gpe = (struct vxlan_gpe_hdr *)&buf[temp_size];
1155 return rte_flow_error_set(error, EINVAL,
1156 RTE_FLOW_ERROR_TYPE_ACTION,
1157 (void *)items->type,
1158 "udp header not found");
1159 if (!vxlan_gpe->proto)
1160 return rte_flow_error_set(error, EINVAL,
1161 RTE_FLOW_ERROR_TYPE_ACTION,
1162 (void *)items->type,
1163 "next protocol not found");
1166 RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
1167 if (!vxlan_gpe->vx_flags)
1168 vxlan_gpe->vx_flags =
1169 MLX5_ENCAP_VXLAN_GPE_FLAGS;
1171 case RTE_FLOW_ITEM_TYPE_GRE:
1172 case RTE_FLOW_ITEM_TYPE_NVGRE:
1173 gre = (struct gre_hdr *)&buf[temp_size];
1175 return rte_flow_error_set(error, EINVAL,
1176 RTE_FLOW_ERROR_TYPE_ACTION,
1177 (void *)items->type,
1178 "next protocol not found");
1180 return rte_flow_error_set(error, EINVAL,
1181 RTE_FLOW_ERROR_TYPE_ACTION,
1182 (void *)items->type,
1183 "ip header not found");
1184 if (ipv4 && !ipv4->next_proto_id)
1185 ipv4->next_proto_id = IPPROTO_GRE;
1186 else if (ipv6 && !ipv6->proto)
1187 ipv6->proto = IPPROTO_GRE;
1189 case RTE_FLOW_ITEM_TYPE_VOID:
1192 return rte_flow_error_set(error, EINVAL,
1193 RTE_FLOW_ERROR_TYPE_ACTION,
1194 (void *)items->type,
1195 "unsupported item type");
1205 * Convert L2 encap action to DV specification.
1208 * Pointer to rte_eth_dev structure.
1210 * Pointer to action structure.
1211 * @param[in, out] dev_flow
1212 * Pointer to the mlx5_flow.
1214 * Pointer to the error structure.
1217 * 0 on success, a negative errno value otherwise and rte_errno is set.
1220 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
1221 const struct rte_flow_action *action,
1222 struct mlx5_flow *dev_flow,
1223 struct rte_flow_error *error)
1225 const struct rte_flow_item *encap_data;
1226 const struct rte_flow_action_raw_encap *raw_encap_data;
1227 struct mlx5_flow_dv_encap_decap_resource res = {
1229 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
1230 .ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
1233 if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
1235 (const struct rte_flow_action_raw_encap *)action->conf;
1236 res.size = raw_encap_data->size;
1237 memcpy(res.buf, raw_encap_data->data, res.size);
1239 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
1241 ((const struct rte_flow_action_vxlan_encap *)
1242 action->conf)->definition;
1245 ((const struct rte_flow_action_nvgre_encap *)
1246 action->conf)->definition;
1247 if (flow_dv_convert_encap_data(encap_data, res.buf,
1251 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1252 return rte_flow_error_set(error, EINVAL,
1253 RTE_FLOW_ERROR_TYPE_ACTION,
1254 NULL, "can't create L2 encap action");
1259 * Convert L2 decap action to DV specification.
1262 * Pointer to rte_eth_dev structure.
1263 * @param[in, out] dev_flow
1264 * Pointer to the mlx5_flow.
1266 * Pointer to the error structure.
1269 * 0 on success, a negative errno value otherwise and rte_errno is set.
1272 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
1273 struct mlx5_flow *dev_flow,
1274 struct rte_flow_error *error)
1276 struct mlx5_flow_dv_encap_decap_resource res = {
1279 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
1280 .ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
1283 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1284 return rte_flow_error_set(error, EINVAL,
1285 RTE_FLOW_ERROR_TYPE_ACTION,
1286 NULL, "can't create L2 decap action");
1291 * Convert raw decap/encap (L3 tunnel) action to DV specification.
1294 * Pointer to rte_eth_dev structure.
1296 * Pointer to action structure.
1297 * @param[in, out] dev_flow
1298 * Pointer to the mlx5_flow.
1300 * Pointer to the flow attributes.
1302 * Pointer to the error structure.
1305 * 0 on success, a negative errno value otherwise and rte_errno is set.
1308 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
1309 const struct rte_flow_action *action,
1310 struct mlx5_flow *dev_flow,
1311 const struct rte_flow_attr *attr,
1312 struct rte_flow_error *error)
1314 const struct rte_flow_action_raw_encap *encap_data;
1315 struct mlx5_flow_dv_encap_decap_resource res;
1317 encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
1318 res.size = encap_data->size;
1319 memcpy(res.buf, encap_data->data, res.size);
1320 res.reformat_type = attr->egress ?
1321 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
1322 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
1323 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
1324 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
1325 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1326 return rte_flow_error_set(error, EINVAL,
1327 RTE_FLOW_ERROR_TYPE_ACTION,
1328 NULL, "can't create encap action");
1333 * Validate the modify-header actions.
1335 * @param[in] action_flags
1336 * Holds the actions detected until now.
1338 * Pointer to the modify action.
1340 * Pointer to error structure.
1343 * 0 on success, a negative errno value otherwise and rte_errno is set.
1346 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
1347 const struct rte_flow_action *action,
1348 struct rte_flow_error *error)
1350 if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
1351 return rte_flow_error_set(error, EINVAL,
1352 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1353 NULL, "action configuration not set");
1354 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1355 return rte_flow_error_set(error, EINVAL,
1356 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1357 "can't have encap action before"
1363 * Validate the modify-header MAC address actions.
1365 * @param[in] action_flags
1366 * Holds the actions detected until now.
1368 * Pointer to the modify action.
1369 * @param[in] item_flags
1370 * Holds the items detected.
1372 * Pointer to error structure.
1375 * 0 on success, a negative errno value otherwise and rte_errno is set.
1378 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
1379 const struct rte_flow_action *action,
1380 const uint64_t item_flags,
1381 struct rte_flow_error *error)
1385 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1387 if (!(item_flags & MLX5_FLOW_LAYER_L2))
1388 return rte_flow_error_set(error, EINVAL,
1389 RTE_FLOW_ERROR_TYPE_ACTION,
1391 "no L2 item in pattern");
1397 * Validate the modify-header IPv4 address actions.
1399 * @param[in] action_flags
1400 * Holds the actions detected until now.
1402 * Pointer to the modify action.
1403 * @param[in] item_flags
1404 * Holds the items detected.
1406 * Pointer to error structure.
1409 * 0 on success, a negative errno value otherwise and rte_errno is set.
1412 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
1413 const struct rte_flow_action *action,
1414 const uint64_t item_flags,
1415 struct rte_flow_error *error)
1419 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1421 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
1422 return rte_flow_error_set(error, EINVAL,
1423 RTE_FLOW_ERROR_TYPE_ACTION,
1425 "no ipv4 item in pattern");
1431 * Validate the modify-header IPv6 address actions.
1433 * @param[in] action_flags
1434 * Holds the actions detected until now.
1436 * Pointer to the modify action.
1437 * @param[in] item_flags
1438 * Holds the items detected.
1440 * Pointer to error structure.
1443 * 0 on success, a negative errno value otherwise and rte_errno is set.
1446 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
1447 const struct rte_flow_action *action,
1448 const uint64_t item_flags,
1449 struct rte_flow_error *error)
1453 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1455 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
1456 return rte_flow_error_set(error, EINVAL,
1457 RTE_FLOW_ERROR_TYPE_ACTION,
1459 "no ipv6 item in pattern");
1465 * Validate the modify-header TP actions.
1467 * @param[in] action_flags
1468 * Holds the actions detected until now.
1470 * Pointer to the modify action.
1471 * @param[in] item_flags
1472 * Holds the items detected.
1474 * Pointer to error structure.
1477 * 0 on success, a negative errno value otherwise and rte_errno is set.
1480 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
1481 const struct rte_flow_action *action,
1482 const uint64_t item_flags,
1483 struct rte_flow_error *error)
1487 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1489 if (!(item_flags & MLX5_FLOW_LAYER_L4))
1490 return rte_flow_error_set(error, EINVAL,
1491 RTE_FLOW_ERROR_TYPE_ACTION,
1492 NULL, "no transport layer "
1499 * Validate the modify-header TTL actions.
1501 * @param[in] action_flags
1502 * Holds the actions detected until now.
1504 * Pointer to the modify action.
1505 * @param[in] item_flags
1506 * Holds the items detected.
1508 * Pointer to error structure.
1511 * 0 on success, a negative errno value otherwise and rte_errno is set.
1514 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
1515 const struct rte_flow_action *action,
1516 const uint64_t item_flags,
1517 struct rte_flow_error *error)
1521 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1523 if (!(item_flags & MLX5_FLOW_LAYER_L3))
1524 return rte_flow_error_set(error, EINVAL,
1525 RTE_FLOW_ERROR_TYPE_ACTION,
1527 "no IP protocol in pattern");
1533 * Validate jump action.
1536 * Pointer to the modify action.
1538 * The group of the current flow.
1540 * Pointer to error structure.
1543 * 0 on success, a negative errno value otherwise and rte_errno is set.
1546 flow_dv_validate_action_jump(const struct rte_flow_action *action,
1548 struct rte_flow_error *error)
1550 if (action->type != RTE_FLOW_ACTION_TYPE_JUMP && !action->conf)
1551 return rte_flow_error_set(error, EINVAL,
1552 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1553 NULL, "action configuration not set");
1554 if (group >= ((const struct rte_flow_action_jump *)action->conf)->group)
1555 return rte_flow_error_set(error, EINVAL,
1556 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1557 "target group must be higher then"
1558 " the current flow group");
1564 * Find existing modify-header resource or create and register a new one.
1566 * @param dev[in, out]
1567 * Pointer to rte_eth_dev structure.
1568 * @param[in, out] resource
1569 * Pointer to modify-header resource.
1570 * @parm[in, out] dev_flow
1571 * Pointer to the dev_flow.
1573 * pointer to error structure.
1576 * 0 on success otherwise -errno and errno is set.
1579 flow_dv_modify_hdr_resource_register
1580 (struct rte_eth_dev *dev,
1581 struct mlx5_flow_dv_modify_hdr_resource *resource,
1582 struct mlx5_flow *dev_flow,
1583 struct rte_flow_error *error)
1585 struct mlx5_priv *priv = dev->data->dev_private;
1586 struct mlx5_ibv_shared *sh = priv->sh;
1587 struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
1589 struct mlx5dv_dr_ns *ns =
1590 resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX ?
1591 sh->tx_ns : sh->rx_ns;
1593 /* Lookup a matching resource from cache. */
1594 LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
1595 if (resource->ft_type == cache_resource->ft_type &&
1596 resource->actions_num == cache_resource->actions_num &&
1597 !memcmp((const void *)resource->actions,
1598 (const void *)cache_resource->actions,
1599 (resource->actions_num *
1600 sizeof(resource->actions[0])))) {
1601 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
1602 (void *)cache_resource,
1603 rte_atomic32_read(&cache_resource->refcnt));
1604 rte_atomic32_inc(&cache_resource->refcnt);
1605 dev_flow->dv.modify_hdr = cache_resource;
1609 /* Register new modify-header resource. */
1610 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1611 if (!cache_resource)
1612 return rte_flow_error_set(error, ENOMEM,
1613 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1614 "cannot allocate resource memory");
1615 *cache_resource = *resource;
1616 cache_resource->verbs_action =
1617 mlx5_glue->dv_create_flow_action_modify_header
1618 (sh->ctx, cache_resource->ft_type,
1620 cache_resource->actions_num *
1621 sizeof(cache_resource->actions[0]),
1622 (uint64_t *)cache_resource->actions);
1623 if (!cache_resource->verbs_action) {
1624 rte_free(cache_resource);
1625 return rte_flow_error_set(error, ENOMEM,
1626 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1627 NULL, "cannot create action");
1629 rte_atomic32_init(&cache_resource->refcnt);
1630 rte_atomic32_inc(&cache_resource->refcnt);
1631 LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
1632 dev_flow->dv.modify_hdr = cache_resource;
1633 DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
1634 (void *)cache_resource,
1635 rte_atomic32_read(&cache_resource->refcnt));
1640 * Get or create a flow counter.
1643 * Pointer to the Ethernet device structure.
1645 * Indicate if this counter is shared with other flows.
1647 * Counter identifier.
1650 * pointer to flow counter on success, NULL otherwise and rte_errno is set.
1652 static struct mlx5_flow_counter *
1653 flow_dv_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
1655 struct mlx5_priv *priv = dev->data->dev_private;
1656 struct mlx5_flow_counter *cnt = NULL;
1657 struct mlx5_devx_counter_set *dcs = NULL;
1660 if (!priv->config.devx) {
1665 LIST_FOREACH(cnt, &priv->flow_counters, next) {
1666 if (cnt->shared && cnt->id == id) {
1672 cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
1673 dcs = rte_calloc(__func__, 1, sizeof(*dcs), 0);
1678 ret = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, dcs);
1681 struct mlx5_flow_counter tmpl = {
1687 tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
1693 LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
1703 * Release a flow counter.
1705 * @param[in] counter
1706 * Pointer to the counter handler.
1709 flow_dv_counter_release(struct mlx5_flow_counter *counter)
1715 if (--counter->ref_cnt == 0) {
1716 ret = mlx5_devx_cmd_flow_counter_free(counter->dcs->obj);
1718 DRV_LOG(ERR, "Failed to free devx counters, %d", ret);
1719 LIST_REMOVE(counter, next);
1720 rte_free(counter->dcs);
1726 * Verify the @p attributes will be correctly understood by the NIC and store
1727 * them in the @p flow if everything is correct.
1730 * Pointer to dev struct.
1731 * @param[in] attributes
1732 * Pointer to flow attributes
1734 * Pointer to error structure.
1737 * 0 on success, a negative errno value otherwise and rte_errno is set.
1740 flow_dv_validate_attributes(struct rte_eth_dev *dev,
1741 const struct rte_flow_attr *attributes,
1742 struct rte_flow_error *error)
1744 struct mlx5_priv *priv = dev->data->dev_private;
1745 uint32_t priority_max = priv->config.flow_prio - 1;
1747 #ifndef HAVE_MLX5DV_DR
1748 if (attributes->group)
1749 return rte_flow_error_set(error, ENOTSUP,
1750 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1752 "groups is not supported");
1754 if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
1755 attributes->priority >= priority_max)
1756 return rte_flow_error_set(error, ENOTSUP,
1757 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1759 "priority out of range");
1760 if (attributes->transfer)
1761 return rte_flow_error_set(error, ENOTSUP,
1762 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1764 "transfer is not supported");
1765 if (!(attributes->egress ^ attributes->ingress))
1766 return rte_flow_error_set(error, ENOTSUP,
1767 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1768 "must specify exactly one of "
1769 "ingress or egress");
1774 * Internal validation function. For validating both actions and items.
1777 * Pointer to the rte_eth_dev structure.
1779 * Pointer to the flow attributes.
1781 * Pointer to the list of items.
1782 * @param[in] actions
1783 * Pointer to the list of actions.
1785 * Pointer to the error structure.
1788 * 0 on success, a negative errno value otherwise and rte_errno is set.
1791 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1792 const struct rte_flow_item items[],
1793 const struct rte_flow_action actions[],
1794 struct rte_flow_error *error)
1797 uint64_t action_flags = 0;
1798 uint64_t item_flags = 0;
1799 uint64_t last_item = 0;
1801 uint8_t next_protocol = 0xff;
1806 ret = flow_dv_validate_attributes(dev, attr, error);
1809 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1810 tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1811 switch (items->type) {
1812 case RTE_FLOW_ITEM_TYPE_VOID:
1814 case RTE_FLOW_ITEM_TYPE_ETH:
1815 ret = mlx5_flow_validate_item_eth(items, item_flags,
1819 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1820 MLX5_FLOW_LAYER_OUTER_L2;
1822 case RTE_FLOW_ITEM_TYPE_VLAN:
1823 ret = mlx5_flow_validate_item_vlan(items, item_flags,
1827 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1828 MLX5_FLOW_LAYER_OUTER_VLAN;
1830 case RTE_FLOW_ITEM_TYPE_IPV4:
1831 ret = mlx5_flow_validate_item_ipv4(items, item_flags,
1835 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1836 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1837 if (items->mask != NULL &&
1838 ((const struct rte_flow_item_ipv4 *)
1839 items->mask)->hdr.next_proto_id) {
1841 ((const struct rte_flow_item_ipv4 *)
1842 (items->spec))->hdr.next_proto_id;
1844 ((const struct rte_flow_item_ipv4 *)
1845 (items->mask))->hdr.next_proto_id;
1847 /* Reset for inner layer. */
1848 next_protocol = 0xff;
1851 case RTE_FLOW_ITEM_TYPE_IPV6:
1852 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
1856 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1857 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1858 if (items->mask != NULL &&
1859 ((const struct rte_flow_item_ipv6 *)
1860 items->mask)->hdr.proto) {
1862 ((const struct rte_flow_item_ipv6 *)
1863 items->spec)->hdr.proto;
1865 ((const struct rte_flow_item_ipv6 *)
1866 items->mask)->hdr.proto;
1868 /* Reset for inner layer. */
1869 next_protocol = 0xff;
1872 case RTE_FLOW_ITEM_TYPE_TCP:
1873 ret = mlx5_flow_validate_item_tcp
1876 &rte_flow_item_tcp_mask,
1880 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1881 MLX5_FLOW_LAYER_OUTER_L4_TCP;
1883 case RTE_FLOW_ITEM_TYPE_UDP:
1884 ret = mlx5_flow_validate_item_udp(items, item_flags,
1889 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1890 MLX5_FLOW_LAYER_OUTER_L4_UDP;
1892 case RTE_FLOW_ITEM_TYPE_GRE:
1893 case RTE_FLOW_ITEM_TYPE_NVGRE:
1894 ret = mlx5_flow_validate_item_gre(items, item_flags,
1895 next_protocol, error);
1898 last_item = MLX5_FLOW_LAYER_GRE;
1900 case RTE_FLOW_ITEM_TYPE_VXLAN:
1901 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
1905 last_item = MLX5_FLOW_LAYER_VXLAN;
1907 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1908 ret = mlx5_flow_validate_item_vxlan_gpe(items,
1913 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
1915 case RTE_FLOW_ITEM_TYPE_MPLS:
1916 ret = mlx5_flow_validate_item_mpls(dev, items,
1921 last_item = MLX5_FLOW_LAYER_MPLS;
1923 case RTE_FLOW_ITEM_TYPE_META:
1924 ret = flow_dv_validate_item_meta(dev, items, attr,
1928 last_item = MLX5_FLOW_ITEM_METADATA;
1931 return rte_flow_error_set(error, ENOTSUP,
1932 RTE_FLOW_ERROR_TYPE_ITEM,
1933 NULL, "item not supported");
1935 item_flags |= last_item;
1937 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1938 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
1939 return rte_flow_error_set(error, ENOTSUP,
1940 RTE_FLOW_ERROR_TYPE_ACTION,
1941 actions, "too many actions");
1942 switch (actions->type) {
1943 case RTE_FLOW_ACTION_TYPE_VOID:
1945 case RTE_FLOW_ACTION_TYPE_FLAG:
1946 ret = mlx5_flow_validate_action_flag(action_flags,
1950 action_flags |= MLX5_FLOW_ACTION_FLAG;
1953 case RTE_FLOW_ACTION_TYPE_MARK:
1954 ret = mlx5_flow_validate_action_mark(actions,
1959 action_flags |= MLX5_FLOW_ACTION_MARK;
1962 case RTE_FLOW_ACTION_TYPE_DROP:
1963 ret = mlx5_flow_validate_action_drop(action_flags,
1967 action_flags |= MLX5_FLOW_ACTION_DROP;
1970 case RTE_FLOW_ACTION_TYPE_QUEUE:
1971 ret = mlx5_flow_validate_action_queue(actions,
1976 action_flags |= MLX5_FLOW_ACTION_QUEUE;
1979 case RTE_FLOW_ACTION_TYPE_RSS:
1980 ret = mlx5_flow_validate_action_rss(actions,
1985 action_flags |= MLX5_FLOW_ACTION_RSS;
1988 case RTE_FLOW_ACTION_TYPE_COUNT:
1989 ret = flow_dv_validate_action_count(dev, error);
1992 action_flags |= MLX5_FLOW_ACTION_COUNT;
1995 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
1996 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
1997 ret = flow_dv_validate_action_l2_encap(action_flags,
2002 action_flags |= actions->type ==
2003 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
2004 MLX5_FLOW_ACTION_VXLAN_ENCAP :
2005 MLX5_FLOW_ACTION_NVGRE_ENCAP;
2008 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2009 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
2010 ret = flow_dv_validate_action_l2_decap(action_flags,
2014 action_flags |= actions->type ==
2015 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
2016 MLX5_FLOW_ACTION_VXLAN_DECAP :
2017 MLX5_FLOW_ACTION_NVGRE_DECAP;
2020 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
2021 ret = flow_dv_validate_action_raw_encap(action_flags,
2026 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
2029 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
2030 ret = flow_dv_validate_action_raw_decap(action_flags,
2035 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
2038 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
2039 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
2040 ret = flow_dv_validate_action_modify_mac(action_flags,
2046 /* Count all modify-header actions as one action. */
2047 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2049 action_flags |= actions->type ==
2050 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
2051 MLX5_FLOW_ACTION_SET_MAC_SRC :
2052 MLX5_FLOW_ACTION_SET_MAC_DST;
2055 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
2056 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
2057 ret = flow_dv_validate_action_modify_ipv4(action_flags,
2063 /* Count all modify-header actions as one action. */
2064 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2066 action_flags |= actions->type ==
2067 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
2068 MLX5_FLOW_ACTION_SET_IPV4_SRC :
2069 MLX5_FLOW_ACTION_SET_IPV4_DST;
2071 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
2072 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
2073 ret = flow_dv_validate_action_modify_ipv6(action_flags,
2079 /* Count all modify-header actions as one action. */
2080 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2082 action_flags |= actions->type ==
2083 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
2084 MLX5_FLOW_ACTION_SET_IPV6_SRC :
2085 MLX5_FLOW_ACTION_SET_IPV6_DST;
2087 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
2088 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
2089 ret = flow_dv_validate_action_modify_tp(action_flags,
2095 /* Count all modify-header actions as one action. */
2096 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2098 action_flags |= actions->type ==
2099 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
2100 MLX5_FLOW_ACTION_SET_TP_SRC :
2101 MLX5_FLOW_ACTION_SET_TP_DST;
2103 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
2104 case RTE_FLOW_ACTION_TYPE_SET_TTL:
2105 ret = flow_dv_validate_action_modify_ttl(action_flags,
2111 /* Count all modify-header actions as one action. */
2112 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2114 action_flags |= actions->type ==
2115 RTE_FLOW_ACTION_TYPE_SET_TTL ?
2116 MLX5_FLOW_ACTION_SET_TTL :
2117 MLX5_FLOW_ACTION_DEC_TTL;
2119 case RTE_FLOW_ACTION_TYPE_JUMP:
2120 ret = flow_dv_validate_action_jump(actions,
2121 attr->group, error);
2125 action_flags |= MLX5_FLOW_ACTION_JUMP;
2128 return rte_flow_error_set(error, ENOTSUP,
2129 RTE_FLOW_ERROR_TYPE_ACTION,
2131 "action not supported");
2134 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
2135 return rte_flow_error_set(error, EINVAL,
2136 RTE_FLOW_ERROR_TYPE_ACTION, actions,
2137 "no fate action is found");
2142 * Internal preparation function. Allocates the DV flow size,
2143 * this size is constant.
2146 * Pointer to the flow attributes.
2148 * Pointer to the list of items.
2149 * @param[in] actions
2150 * Pointer to the list of actions.
2152 * Pointer to the error structure.
2155 * Pointer to mlx5_flow object on success,
2156 * otherwise NULL and rte_errno is set.
2158 static struct mlx5_flow *
2159 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
2160 const struct rte_flow_item items[] __rte_unused,
2161 const struct rte_flow_action actions[] __rte_unused,
2162 struct rte_flow_error *error)
2164 uint32_t size = sizeof(struct mlx5_flow);
2165 struct mlx5_flow *flow;
2167 flow = rte_calloc(__func__, 1, size, 0);
2169 rte_flow_error_set(error, ENOMEM,
2170 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2171 "not enough memory to create flow");
2174 flow->dv.value.size = MLX5_ST_SZ_DB(fte_match_param);
2180 * Sanity check for match mask and value. Similar to check_valid_spec() in
2181 * kernel driver. If unmasked bit is present in value, it returns failure.
2184 * pointer to match mask buffer.
2185 * @param match_value
2186 * pointer to match value buffer.
2189 * 0 if valid, -EINVAL otherwise.
2192 flow_dv_check_valid_spec(void *match_mask, void *match_value)
2194 uint8_t *m = match_mask;
2195 uint8_t *v = match_value;
2198 for (i = 0; i < MLX5_ST_SZ_DB(fte_match_param); ++i) {
2201 "match_value differs from match_criteria"
2202 " %p[%u] != %p[%u]",
2203 match_value, i, match_mask, i);
2212 * Add Ethernet item to matcher and to the value.
2214 * @param[in, out] matcher
2216 * @param[in, out] key
2217 * Flow matcher value.
2219 * Flow pattern to translate.
2221 * Item is inner pattern.
2224 flow_dv_translate_item_eth(void *matcher, void *key,
2225 const struct rte_flow_item *item, int inner)
2227 const struct rte_flow_item_eth *eth_m = item->mask;
2228 const struct rte_flow_item_eth *eth_v = item->spec;
2229 const struct rte_flow_item_eth nic_mask = {
2230 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2231 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2232 .type = RTE_BE16(0xffff),
2244 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2246 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2248 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2250 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2252 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
2253 ð_m->dst, sizeof(eth_m->dst));
2254 /* The value must be in the range of the mask. */
2255 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
2256 for (i = 0; i < sizeof(eth_m->dst); ++i)
2257 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
2258 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
2259 ð_m->src, sizeof(eth_m->src));
2260 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
2261 /* The value must be in the range of the mask. */
2262 for (i = 0; i < sizeof(eth_m->dst); ++i)
2263 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
2264 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
2265 rte_be_to_cpu_16(eth_m->type));
2266 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
2267 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
2271 * Add VLAN item to matcher and to the value.
2273 * @param[in, out] matcher
2275 * @param[in, out] key
2276 * Flow matcher value.
2278 * Flow pattern to translate.
2280 * Item is inner pattern.
2283 flow_dv_translate_item_vlan(void *matcher, void *key,
2284 const struct rte_flow_item *item,
2287 const struct rte_flow_item_vlan *vlan_m = item->mask;
2288 const struct rte_flow_item_vlan *vlan_v = item->spec;
2289 const struct rte_flow_item_vlan nic_mask = {
2290 .tci = RTE_BE16(0x0fff),
2291 .inner_type = RTE_BE16(0xffff),
2303 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2305 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2307 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2309 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2311 tci_m = rte_be_to_cpu_16(vlan_m->tci);
2312 tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
2313 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
2314 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
2315 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
2316 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
2317 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
2318 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
2319 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
2320 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
2324 * Add IPV4 item to matcher and to the value.
2326 * @param[in, out] matcher
2328 * @param[in, out] key
2329 * Flow matcher value.
2331 * Flow pattern to translate.
2333 * Item is inner pattern.
2335 * The group to insert the rule.
2338 flow_dv_translate_item_ipv4(void *matcher, void *key,
2339 const struct rte_flow_item *item,
2340 int inner, uint32_t group)
2342 const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
2343 const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
2344 const struct rte_flow_item_ipv4 nic_mask = {
2346 .src_addr = RTE_BE32(0xffffffff),
2347 .dst_addr = RTE_BE32(0xffffffff),
2348 .type_of_service = 0xff,
2349 .next_proto_id = 0xff,
2359 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2361 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2363 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2365 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2368 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2370 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
2371 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
2376 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2377 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2378 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2379 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2380 *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
2381 *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
2382 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2383 src_ipv4_src_ipv6.ipv4_layout.ipv4);
2384 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2385 src_ipv4_src_ipv6.ipv4_layout.ipv4);
2386 *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
2387 *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
2388 tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
2389 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
2390 ipv4_m->hdr.type_of_service);
2391 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
2392 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
2393 ipv4_m->hdr.type_of_service >> 2);
2394 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
2395 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2396 ipv4_m->hdr.next_proto_id);
2397 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2398 ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
2402 * Add IPV6 item to matcher and to the value.
2404 * @param[in, out] matcher
2406 * @param[in, out] key
2407 * Flow matcher value.
2409 * Flow pattern to translate.
2411 * Item is inner pattern.
2413 * The group to insert the rule.
2416 flow_dv_translate_item_ipv6(void *matcher, void *key,
2417 const struct rte_flow_item *item,
2418 int inner, uint32_t group)
2420 const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
2421 const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
2422 const struct rte_flow_item_ipv6 nic_mask = {
2425 "\xff\xff\xff\xff\xff\xff\xff\xff"
2426 "\xff\xff\xff\xff\xff\xff\xff\xff",
2428 "\xff\xff\xff\xff\xff\xff\xff\xff"
2429 "\xff\xff\xff\xff\xff\xff\xff\xff",
2430 .vtc_flow = RTE_BE32(0xffffffff),
2437 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2438 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2447 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2449 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2451 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2453 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2456 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2458 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
2459 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
2464 size = sizeof(ipv6_m->hdr.dst_addr);
2465 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2466 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2467 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2468 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2469 memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
2470 for (i = 0; i < size; ++i)
2471 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
2472 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2473 src_ipv4_src_ipv6.ipv6_layout.ipv6);
2474 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2475 src_ipv4_src_ipv6.ipv6_layout.ipv6);
2476 memcpy(l24_m, ipv6_m->hdr.src_addr, size);
2477 for (i = 0; i < size; ++i)
2478 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
2480 vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
2481 vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
2482 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
2483 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
2484 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
2485 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
2488 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
2490 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
2493 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
2495 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
2499 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2501 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2502 ipv6_v->hdr.proto & ipv6_m->hdr.proto);
2506 * Add TCP item to matcher and to the value.
2508 * @param[in, out] matcher
2510 * @param[in, out] key
2511 * Flow matcher value.
2513 * Flow pattern to translate.
2515 * Item is inner pattern.
2518 flow_dv_translate_item_tcp(void *matcher, void *key,
2519 const struct rte_flow_item *item,
2522 const struct rte_flow_item_tcp *tcp_m = item->mask;
2523 const struct rte_flow_item_tcp *tcp_v = item->spec;
2528 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2530 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2532 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2534 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2536 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2537 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
2541 tcp_m = &rte_flow_item_tcp_mask;
2542 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
2543 rte_be_to_cpu_16(tcp_m->hdr.src_port));
2544 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
2545 rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
2546 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
2547 rte_be_to_cpu_16(tcp_m->hdr.dst_port));
2548 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
2549 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
2553 * Add UDP item to matcher and to the value.
2555 * @param[in, out] matcher
2557 * @param[in, out] key
2558 * Flow matcher value.
2560 * Flow pattern to translate.
2562 * Item is inner pattern.
2565 flow_dv_translate_item_udp(void *matcher, void *key,
2566 const struct rte_flow_item *item,
2569 const struct rte_flow_item_udp *udp_m = item->mask;
2570 const struct rte_flow_item_udp *udp_v = item->spec;
2575 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2577 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2579 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2581 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2583 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2584 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
2588 udp_m = &rte_flow_item_udp_mask;
2589 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
2590 rte_be_to_cpu_16(udp_m->hdr.src_port));
2591 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
2592 rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
2593 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
2594 rte_be_to_cpu_16(udp_m->hdr.dst_port));
2595 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2596 rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
2600 * Add GRE item to matcher and to the value.
2602 * @param[in, out] matcher
2604 * @param[in, out] key
2605 * Flow matcher value.
2607 * Flow pattern to translate.
2609 * Item is inner pattern.
2612 flow_dv_translate_item_gre(void *matcher, void *key,
2613 const struct rte_flow_item *item,
2616 const struct rte_flow_item_gre *gre_m = item->mask;
2617 const struct rte_flow_item_gre *gre_v = item->spec;
2620 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2621 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2624 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2626 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2628 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2630 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2632 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2633 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
2637 gre_m = &rte_flow_item_gre_mask;
2638 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
2639 rte_be_to_cpu_16(gre_m->protocol));
2640 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
2641 rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
2645 * Add NVGRE item to matcher and to the value.
2647 * @param[in, out] matcher
2649 * @param[in, out] key
2650 * Flow matcher value.
2652 * Flow pattern to translate.
2654 * Item is inner pattern.
2657 flow_dv_translate_item_nvgre(void *matcher, void *key,
2658 const struct rte_flow_item *item,
2661 const struct rte_flow_item_nvgre *nvgre_m = item->mask;
2662 const struct rte_flow_item_nvgre *nvgre_v = item->spec;
2663 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2664 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2665 const char *tni_flow_id_m = (const char *)nvgre_m->tni;
2666 const char *tni_flow_id_v = (const char *)nvgre_v->tni;
2672 flow_dv_translate_item_gre(matcher, key, item, inner);
2676 nvgre_m = &rte_flow_item_nvgre_mask;
2677 size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
2678 gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
2679 gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
2680 memcpy(gre_key_m, tni_flow_id_m, size);
2681 for (i = 0; i < size; ++i)
2682 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
2686 * Add VXLAN item to matcher and to the value.
2688 * @param[in, out] matcher
2690 * @param[in, out] key
2691 * Flow matcher value.
2693 * Flow pattern to translate.
2695 * Item is inner pattern.
2698 flow_dv_translate_item_vxlan(void *matcher, void *key,
2699 const struct rte_flow_item *item,
2702 const struct rte_flow_item_vxlan *vxlan_m = item->mask;
2703 const struct rte_flow_item_vxlan *vxlan_v = item->spec;
2706 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2707 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2715 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2717 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2719 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2721 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2723 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
2724 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
2725 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
2726 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
2727 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
2732 vxlan_m = &rte_flow_item_vxlan_mask;
2733 size = sizeof(vxlan_m->vni);
2734 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
2735 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
2736 memcpy(vni_m, vxlan_m->vni, size);
2737 for (i = 0; i < size; ++i)
2738 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
2742 * Add MPLS item to matcher and to the value.
2744 * @param[in, out] matcher
2746 * @param[in, out] key
2747 * Flow matcher value.
2749 * Flow pattern to translate.
2750 * @param[in] prev_layer
2751 * The protocol layer indicated in previous item.
2753 * Item is inner pattern.
2756 flow_dv_translate_item_mpls(void *matcher, void *key,
2757 const struct rte_flow_item *item,
2758 uint64_t prev_layer,
2761 const uint32_t *in_mpls_m = item->mask;
2762 const uint32_t *in_mpls_v = item->spec;
2763 uint32_t *out_mpls_m = 0;
2764 uint32_t *out_mpls_v = 0;
2765 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2766 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2767 void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
2769 void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
2770 void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
2771 void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2773 switch (prev_layer) {
2774 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
2775 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
2776 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2777 MLX5_UDP_PORT_MPLS);
2779 case MLX5_FLOW_LAYER_GRE:
2780 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
2781 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
2785 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2786 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2793 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
2794 switch (prev_layer) {
2795 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
2797 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
2798 outer_first_mpls_over_udp);
2800 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
2801 outer_first_mpls_over_udp);
2803 case MLX5_FLOW_LAYER_GRE:
2805 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
2806 outer_first_mpls_over_gre);
2808 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
2809 outer_first_mpls_over_gre);
2812 /* Inner MPLS not over GRE is not supported. */
2815 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
2819 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
2825 if (out_mpls_m && out_mpls_v) {
2826 *out_mpls_m = *in_mpls_m;
2827 *out_mpls_v = *in_mpls_v & *in_mpls_m;
2832 * Add META item to matcher
2834 * @param[in, out] matcher
2836 * @param[in, out] key
2837 * Flow matcher value.
2839 * Flow pattern to translate.
2841 * Item is inner pattern.
2844 flow_dv_translate_item_meta(void *matcher, void *key,
2845 const struct rte_flow_item *item)
2847 const struct rte_flow_item_meta *meta_m;
2848 const struct rte_flow_item_meta *meta_v;
2850 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
2852 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
2854 meta_m = (const void *)item->mask;
2856 meta_m = &rte_flow_item_meta_mask;
2857 meta_v = (const void *)item->spec;
2859 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
2860 rte_be_to_cpu_32(meta_m->data));
2861 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
2862 rte_be_to_cpu_32(meta_v->data & meta_m->data));
2866 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
2868 #define HEADER_IS_ZERO(match_criteria, headers) \
2869 !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
2870 matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
2873 * Calculate flow matcher enable bitmap.
2875 * @param match_criteria
2876 * Pointer to flow matcher criteria.
2879 * Bitmap of enabled fields.
2882 flow_dv_matcher_enable(uint32_t *match_criteria)
2884 uint8_t match_criteria_enable;
2886 match_criteria_enable =
2887 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
2888 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
2889 match_criteria_enable |=
2890 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
2891 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
2892 match_criteria_enable |=
2893 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
2894 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
2895 match_criteria_enable |=
2896 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
2897 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
2898 #ifdef HAVE_MLX5DV_DR
2899 match_criteria_enable |=
2900 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
2901 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
2903 return match_criteria_enable;
2910 * @param dev[in, out]
2911 * Pointer to rte_eth_dev structure.
2912 * @param[in] table_id
2915 * Direction of the table.
2917 * pointer to error structure.
2920 * Returns tables resource based on the index, NULL in case of failed.
2922 static struct mlx5_flow_tbl_resource *
2923 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
2924 uint32_t table_id, uint8_t egress,
2925 struct rte_flow_error *error)
2927 struct mlx5_priv *priv = dev->data->dev_private;
2928 struct mlx5_ibv_shared *sh = priv->sh;
2929 struct mlx5_flow_tbl_resource *tbl;
2931 #ifdef HAVE_MLX5DV_DR
2933 tbl = &sh->tx_tbl[table_id];
2935 tbl->obj = mlx5_glue->dr_create_flow_tbl
2936 (sh->tx_ns, table_id);
2938 tbl = &sh->rx_tbl[table_id];
2940 tbl->obj = mlx5_glue->dr_create_flow_tbl
2941 (sh->rx_ns, table_id);
2944 rte_flow_error_set(error, ENOMEM,
2945 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2946 NULL, "cannot create table");
2949 rte_atomic32_inc(&tbl->refcnt);
2955 return &sh->tx_tbl[table_id];
2957 return &sh->rx_tbl[table_id];
2962 * Release a flow table.
2965 * Table resource to be released.
2968 * Returns 0 if table was released, else return 1;
2971 flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
2975 if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
2976 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
2984 * Register the flow matcher.
2986 * @param dev[in, out]
2987 * Pointer to rte_eth_dev structure.
2988 * @param[in, out] matcher
2989 * Pointer to flow matcher.
2990 * @parm[in, out] dev_flow
2991 * Pointer to the dev_flow.
2993 * pointer to error structure.
2996 * 0 on success otherwise -errno and errno is set.
2999 flow_dv_matcher_register(struct rte_eth_dev *dev,
3000 struct mlx5_flow_dv_matcher *matcher,
3001 struct mlx5_flow *dev_flow,
3002 struct rte_flow_error *error)
3004 struct mlx5_priv *priv = dev->data->dev_private;
3005 struct mlx5_ibv_shared *sh = priv->sh;
3006 struct mlx5_flow_dv_matcher *cache_matcher;
3007 struct mlx5dv_flow_matcher_attr dv_attr = {
3008 .type = IBV_FLOW_ATTR_NORMAL,
3009 .match_mask = (void *)&matcher->mask,
3011 struct mlx5_flow_tbl_resource *tbl = NULL;
3013 /* Lookup from cache. */
3014 LIST_FOREACH(cache_matcher, &sh->matchers, next) {
3015 if (matcher->crc == cache_matcher->crc &&
3016 matcher->priority == cache_matcher->priority &&
3017 matcher->egress == cache_matcher->egress &&
3018 matcher->group == cache_matcher->group &&
3019 !memcmp((const void *)matcher->mask.buf,
3020 (const void *)cache_matcher->mask.buf,
3021 cache_matcher->mask.size)) {
3023 "priority %hd use %s matcher %p: refcnt %d++",
3024 cache_matcher->priority,
3025 cache_matcher->egress ? "tx" : "rx",
3026 (void *)cache_matcher,
3027 rte_atomic32_read(&cache_matcher->refcnt));
3028 rte_atomic32_inc(&cache_matcher->refcnt);
3029 dev_flow->dv.matcher = cache_matcher;
3033 /* Register new matcher. */
3034 cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
3036 return rte_flow_error_set(error, ENOMEM,
3037 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3038 "cannot allocate matcher memory");
3039 tbl = flow_dv_tbl_resource_get(dev, matcher->group * MLX5_GROUP_FACTOR,
3040 matcher->egress, error);
3042 rte_free(cache_matcher);
3043 return rte_flow_error_set(error, ENOMEM,
3044 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3045 NULL, "cannot create table");
3047 *cache_matcher = *matcher;
3048 dv_attr.match_criteria_enable =
3049 flow_dv_matcher_enable(cache_matcher->mask.buf);
3050 dv_attr.priority = matcher->priority;
3051 if (matcher->egress)
3052 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
3053 cache_matcher->matcher_object =
3054 mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
3055 if (!cache_matcher->matcher_object) {
3056 rte_free(cache_matcher);
3057 #ifdef HAVE_MLX5DV_DR
3058 flow_dv_tbl_resource_release(tbl);
3060 return rte_flow_error_set(error, ENOMEM,
3061 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3062 NULL, "cannot create matcher");
3064 rte_atomic32_inc(&cache_matcher->refcnt);
3065 LIST_INSERT_HEAD(&sh->matchers, cache_matcher, next);
3066 dev_flow->dv.matcher = cache_matcher;
3067 DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
3068 cache_matcher->priority,
3069 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
3070 rte_atomic32_read(&cache_matcher->refcnt));
3071 rte_atomic32_inc(&tbl->refcnt);
3076 * Add source vport match to the specified matcher.
3078 * @param[in, out] matcher
3080 * @param[in, out] key
3081 * Flow matcher value.
3083 * Source vport value to match
3088 flow_dv_translate_source_vport(void *matcher, void *key,
3089 int16_t port, uint16_t mask)
3091 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3092 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3094 MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
3095 MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
3099 * Find existing tag resource or create and register a new one.
3101 * @param dev[in, out]
3102 * Pointer to rte_eth_dev structure.
3103 * @param[in, out] resource
3104 * Pointer to tag resource.
3105 * @parm[in, out] dev_flow
3106 * Pointer to the dev_flow.
3108 * pointer to error structure.
3111 * 0 on success otherwise -errno and errno is set.
3114 flow_dv_tag_resource_register
3115 (struct rte_eth_dev *dev,
3116 struct mlx5_flow_dv_tag_resource *resource,
3117 struct mlx5_flow *dev_flow,
3118 struct rte_flow_error *error)
3120 struct mlx5_priv *priv = dev->data->dev_private;
3121 struct mlx5_ibv_shared *sh = priv->sh;
3122 struct mlx5_flow_dv_tag_resource *cache_resource;
3124 /* Lookup a matching resource from cache. */
3125 LIST_FOREACH(cache_resource, &sh->tags, next) {
3126 if (resource->tag == cache_resource->tag) {
3127 DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
3128 (void *)cache_resource,
3129 rte_atomic32_read(&cache_resource->refcnt));
3130 rte_atomic32_inc(&cache_resource->refcnt);
3131 dev_flow->flow->tag_resource = cache_resource;
3135 /* Register new resource. */
3136 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
3137 if (!cache_resource)
3138 return rte_flow_error_set(error, ENOMEM,
3139 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3140 "cannot allocate resource memory");
3141 *cache_resource = *resource;
3142 cache_resource->action = mlx5_glue->dv_create_flow_action_tag
3144 if (!cache_resource->action) {
3145 rte_free(cache_resource);
3146 return rte_flow_error_set(error, ENOMEM,
3147 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3148 NULL, "cannot create action");
3150 rte_atomic32_init(&cache_resource->refcnt);
3151 rte_atomic32_inc(&cache_resource->refcnt);
3152 LIST_INSERT_HEAD(&sh->tags, cache_resource, next);
3153 dev_flow->flow->tag_resource = cache_resource;
3154 DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
3155 (void *)cache_resource,
3156 rte_atomic32_read(&cache_resource->refcnt));
3164 * Pointer to Ethernet device.
3166 * Pointer to mlx5_flow.
3169 * 1 while a reference on it exists, 0 when freed.
3172 flow_dv_tag_release(struct rte_eth_dev *dev,
3173 struct mlx5_flow_dv_tag_resource *tag)
3176 DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
3177 dev->data->port_id, (void *)tag,
3178 rte_atomic32_read(&tag->refcnt));
3179 if (rte_atomic32_dec_and_test(&tag->refcnt)) {
3180 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
3181 LIST_REMOVE(tag, next);
3182 DRV_LOG(DEBUG, "port %u tag %p: removed",
3183 dev->data->port_id, (void *)tag);
3191 * Fill the flow with DV spec.
3194 * Pointer to rte_eth_dev structure.
3195 * @param[in, out] dev_flow
3196 * Pointer to the sub flow.
3198 * Pointer to the flow attributes.
3200 * Pointer to the list of items.
3201 * @param[in] actions
3202 * Pointer to the list of actions.
3204 * Pointer to the error structure.
3207 * 0 on success, a negative errno value otherwise and rte_errno is set.
3210 flow_dv_translate(struct rte_eth_dev *dev,
3211 struct mlx5_flow *dev_flow,
3212 const struct rte_flow_attr *attr,
3213 const struct rte_flow_item items[],
3214 const struct rte_flow_action actions[],
3215 struct rte_flow_error *error)
3217 struct mlx5_priv *priv = dev->data->dev_private;
3218 struct rte_flow *flow = dev_flow->flow;
3219 uint64_t item_flags = 0;
3220 uint64_t last_item = 0;
3221 uint64_t action_flags = 0;
3222 uint64_t priority = attr->priority;
3223 struct mlx5_flow_dv_matcher matcher = {
3225 .size = sizeof(matcher.mask.buf),
3229 bool actions_end = false;
3230 struct mlx5_flow_dv_modify_hdr_resource res = {
3231 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3232 MLX5DV_FLOW_TABLE_TYPE_NIC_RX
3234 union flow_dv_attr flow_attr = { .attr = 0 };
3235 struct mlx5_flow_dv_tag_resource tag_resource;
3237 if (priority == MLX5_FLOW_PRIO_RSVD)
3238 priority = priv->config.flow_prio - 1;
3239 for (; !actions_end ; actions++) {
3240 const struct rte_flow_action_queue *queue;
3241 const struct rte_flow_action_rss *rss;
3242 const struct rte_flow_action *action = actions;
3243 const struct rte_flow_action_count *count = action->conf;
3244 const uint8_t *rss_key;
3245 const struct rte_flow_action_jump *jump_data;
3246 struct mlx5_flow_dv_jump_tbl_resource jump_tbl_resource;
3247 struct mlx5_flow_tbl_resource *tbl;
3249 switch (actions->type) {
3250 case RTE_FLOW_ACTION_TYPE_VOID:
3252 case RTE_FLOW_ACTION_TYPE_FLAG:
3254 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
3255 if (!flow->tag_resource)
3256 if (flow_dv_tag_resource_register
3257 (dev, &tag_resource, dev_flow, error))
3259 dev_flow->dv.actions[actions_n++] =
3260 flow->tag_resource->action;
3261 action_flags |= MLX5_FLOW_ACTION_FLAG;
3263 case RTE_FLOW_ACTION_TYPE_MARK:
3264 tag_resource.tag = mlx5_flow_mark_set
3265 (((const struct rte_flow_action_mark *)
3266 (actions->conf))->id);
3267 if (!flow->tag_resource)
3268 if (flow_dv_tag_resource_register
3269 (dev, &tag_resource, dev_flow, error))
3271 dev_flow->dv.actions[actions_n++] =
3272 flow->tag_resource->action;
3273 action_flags |= MLX5_FLOW_ACTION_MARK;
3275 case RTE_FLOW_ACTION_TYPE_DROP:
3276 action_flags |= MLX5_FLOW_ACTION_DROP;
3278 case RTE_FLOW_ACTION_TYPE_QUEUE:
3279 queue = actions->conf;
3280 flow->rss.queue_num = 1;
3281 (*flow->queue)[0] = queue->index;
3282 action_flags |= MLX5_FLOW_ACTION_QUEUE;
3284 case RTE_FLOW_ACTION_TYPE_RSS:
3285 rss = actions->conf;
3287 memcpy((*flow->queue), rss->queue,
3288 rss->queue_num * sizeof(uint16_t));
3289 flow->rss.queue_num = rss->queue_num;
3290 /* NULL RSS key indicates default RSS key. */
3291 rss_key = !rss->key ? rss_hash_default_key : rss->key;
3292 memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
3293 /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
3294 flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
3295 flow->rss.level = rss->level;
3296 action_flags |= MLX5_FLOW_ACTION_RSS;
3298 case RTE_FLOW_ACTION_TYPE_COUNT:
3299 if (!priv->config.devx) {
3300 rte_errno = ENOTSUP;
3303 flow->counter = flow_dv_counter_new(dev, count->shared,
3305 if (flow->counter == NULL)
3307 dev_flow->dv.actions[actions_n++] =
3308 flow->counter->action;
3309 action_flags |= MLX5_FLOW_ACTION_COUNT;
3312 if (rte_errno == ENOTSUP)
3313 return rte_flow_error_set
3315 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3317 "count action not supported");
3319 return rte_flow_error_set
3321 RTE_FLOW_ERROR_TYPE_ACTION,
3323 "cannot create counter"
3325 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3326 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3327 if (flow_dv_create_action_l2_encap(dev, actions,
3330 dev_flow->dv.actions[actions_n++] =
3331 dev_flow->dv.encap_decap->verbs_action;
3332 action_flags |= actions->type ==
3333 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
3334 MLX5_FLOW_ACTION_VXLAN_ENCAP :
3335 MLX5_FLOW_ACTION_NVGRE_ENCAP;
3337 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3338 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
3339 if (flow_dv_create_action_l2_decap(dev, dev_flow,
3342 dev_flow->dv.actions[actions_n++] =
3343 dev_flow->dv.encap_decap->verbs_action;
3344 action_flags |= actions->type ==
3345 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
3346 MLX5_FLOW_ACTION_VXLAN_DECAP :
3347 MLX5_FLOW_ACTION_NVGRE_DECAP;
3349 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3350 /* Handle encap with preceding decap. */
3351 if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
3352 if (flow_dv_create_action_raw_encap
3353 (dev, actions, dev_flow, attr, error))
3355 dev_flow->dv.actions[actions_n++] =
3356 dev_flow->dv.encap_decap->verbs_action;
3358 /* Handle encap without preceding decap. */
3359 if (flow_dv_create_action_l2_encap(dev, actions,
3363 dev_flow->dv.actions[actions_n++] =
3364 dev_flow->dv.encap_decap->verbs_action;
3366 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
3368 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3369 /* Check if this decap is followed by encap. */
3370 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
3371 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
3374 /* Handle decap only if it isn't followed by encap. */
3375 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3376 if (flow_dv_create_action_l2_decap(dev,
3380 dev_flow->dv.actions[actions_n++] =
3381 dev_flow->dv.encap_decap->verbs_action;
3383 /* If decap is followed by encap, handle it at encap. */
3384 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
3386 case RTE_FLOW_ACTION_TYPE_JUMP:
3387 jump_data = action->conf;
3388 tbl = flow_dv_tbl_resource_get(dev, jump_data->group *
3390 attr->egress, error);
3392 return rte_flow_error_set
3394 RTE_FLOW_ERROR_TYPE_ACTION,
3396 "cannot create jump action.");
3397 jump_tbl_resource.tbl = tbl;
3398 if (flow_dv_jump_tbl_resource_register
3399 (dev, &jump_tbl_resource, dev_flow, error)) {
3400 flow_dv_tbl_resource_release(tbl);
3401 return rte_flow_error_set
3403 RTE_FLOW_ERROR_TYPE_ACTION,
3405 "cannot create jump action.");
3407 dev_flow->dv.actions[actions_n++] =
3408 dev_flow->dv.jump->action;
3409 action_flags |= MLX5_FLOW_ACTION_JUMP;
3411 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
3412 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
3413 if (flow_dv_convert_action_modify_mac(&res, actions,
3416 action_flags |= actions->type ==
3417 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
3418 MLX5_FLOW_ACTION_SET_MAC_SRC :
3419 MLX5_FLOW_ACTION_SET_MAC_DST;
3421 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
3422 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
3423 if (flow_dv_convert_action_modify_ipv4(&res, actions,
3426 action_flags |= actions->type ==
3427 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
3428 MLX5_FLOW_ACTION_SET_IPV4_SRC :
3429 MLX5_FLOW_ACTION_SET_IPV4_DST;
3431 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
3432 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
3433 if (flow_dv_convert_action_modify_ipv6(&res, actions,
3436 action_flags |= actions->type ==
3437 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
3438 MLX5_FLOW_ACTION_SET_IPV6_SRC :
3439 MLX5_FLOW_ACTION_SET_IPV6_DST;
3441 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
3442 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
3443 if (flow_dv_convert_action_modify_tp(&res, actions,
3447 action_flags |= actions->type ==
3448 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
3449 MLX5_FLOW_ACTION_SET_TP_SRC :
3450 MLX5_FLOW_ACTION_SET_TP_DST;
3452 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
3453 if (flow_dv_convert_action_modify_dec_ttl(&res, items,
3457 action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
3459 case RTE_FLOW_ACTION_TYPE_SET_TTL:
3460 if (flow_dv_convert_action_modify_ttl(&res, actions,
3464 action_flags |= MLX5_FLOW_ACTION_SET_TTL;
3466 case RTE_FLOW_ACTION_TYPE_END:
3468 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
3469 /* create modify action if needed. */
3470 if (flow_dv_modify_hdr_resource_register
3475 dev_flow->dv.actions[actions_n++] =
3476 dev_flow->dv.modify_hdr->verbs_action;
3483 dev_flow->dv.actions_n = actions_n;
3484 flow->actions = action_flags;
3485 if (attr->ingress && !attr->transfer &&
3486 (priv->representor || priv->master)) {
3487 /* It was validated - we support unidirection flows only. */
3488 assert(!attr->egress);
3490 * Add matching on source vport index only
3491 * for ingress rules in E-Switch configurations.
3493 flow_dv_translate_source_vport(matcher.mask.buf,
3494 dev_flow->dv.value.buf,
3498 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3499 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
3500 void *match_mask = matcher.mask.buf;
3501 void *match_value = dev_flow->dv.value.buf;
3503 switch (items->type) {
3504 case RTE_FLOW_ITEM_TYPE_ETH:
3505 flow_dv_translate_item_eth(match_mask, match_value,
3507 matcher.priority = MLX5_PRIORITY_MAP_L2;
3508 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
3509 MLX5_FLOW_LAYER_OUTER_L2;
3511 case RTE_FLOW_ITEM_TYPE_VLAN:
3512 flow_dv_translate_item_vlan(match_mask, match_value,
3514 matcher.priority = MLX5_PRIORITY_MAP_L2;
3515 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
3516 MLX5_FLOW_LAYER_INNER_VLAN) :
3517 (MLX5_FLOW_LAYER_OUTER_L2 |
3518 MLX5_FLOW_LAYER_OUTER_VLAN);
3520 case RTE_FLOW_ITEM_TYPE_IPV4:
3521 flow_dv_translate_item_ipv4(match_mask, match_value,
3522 items, tunnel, attr->group);
3523 matcher.priority = MLX5_PRIORITY_MAP_L3;
3524 dev_flow->dv.hash_fields |=
3525 mlx5_flow_hashfields_adjust
3527 MLX5_IPV4_LAYER_TYPES,
3528 MLX5_IPV4_IBV_RX_HASH);
3529 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3530 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3532 case RTE_FLOW_ITEM_TYPE_IPV6:
3533 flow_dv_translate_item_ipv6(match_mask, match_value,
3534 items, tunnel, attr->group);
3535 matcher.priority = MLX5_PRIORITY_MAP_L3;
3536 dev_flow->dv.hash_fields |=
3537 mlx5_flow_hashfields_adjust
3539 MLX5_IPV6_LAYER_TYPES,
3540 MLX5_IPV6_IBV_RX_HASH);
3541 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3542 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3544 case RTE_FLOW_ITEM_TYPE_TCP:
3545 flow_dv_translate_item_tcp(match_mask, match_value,
3547 matcher.priority = MLX5_PRIORITY_MAP_L4;
3548 dev_flow->dv.hash_fields |=
3549 mlx5_flow_hashfields_adjust
3550 (dev_flow, tunnel, ETH_RSS_TCP,
3551 IBV_RX_HASH_SRC_PORT_TCP |
3552 IBV_RX_HASH_DST_PORT_TCP);
3553 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
3554 MLX5_FLOW_LAYER_OUTER_L4_TCP;
3556 case RTE_FLOW_ITEM_TYPE_UDP:
3557 flow_dv_translate_item_udp(match_mask, match_value,
3559 matcher.priority = MLX5_PRIORITY_MAP_L4;
3560 dev_flow->dv.hash_fields |=
3561 mlx5_flow_hashfields_adjust
3562 (dev_flow, tunnel, ETH_RSS_UDP,
3563 IBV_RX_HASH_SRC_PORT_UDP |
3564 IBV_RX_HASH_DST_PORT_UDP);
3565 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
3566 MLX5_FLOW_LAYER_OUTER_L4_UDP;
3568 case RTE_FLOW_ITEM_TYPE_GRE:
3569 flow_dv_translate_item_gre(match_mask, match_value,
3571 last_item = MLX5_FLOW_LAYER_GRE;
3573 case RTE_FLOW_ITEM_TYPE_NVGRE:
3574 flow_dv_translate_item_nvgre(match_mask, match_value,
3576 last_item = MLX5_FLOW_LAYER_GRE;
3578 case RTE_FLOW_ITEM_TYPE_VXLAN:
3579 flow_dv_translate_item_vxlan(match_mask, match_value,
3581 last_item = MLX5_FLOW_LAYER_VXLAN;
3583 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3584 flow_dv_translate_item_vxlan(match_mask, match_value,
3586 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
3588 case RTE_FLOW_ITEM_TYPE_MPLS:
3589 flow_dv_translate_item_mpls(match_mask, match_value,
3590 items, last_item, tunnel);
3591 last_item = MLX5_FLOW_LAYER_MPLS;
3593 case RTE_FLOW_ITEM_TYPE_META:
3594 flow_dv_translate_item_meta(match_mask, match_value,
3596 last_item = MLX5_FLOW_ITEM_METADATA;
3601 item_flags |= last_item;
3603 assert(!flow_dv_check_valid_spec(matcher.mask.buf,
3604 dev_flow->dv.value.buf));
3605 dev_flow->layers = item_flags;
3606 /* Register matcher. */
3607 matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
3609 matcher.priority = mlx5_flow_adjust_priority(dev, priority,
3611 matcher.egress = attr->egress;
3612 matcher.group = attr->group;
3613 if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
3619 * Apply the flow to the NIC.
3622 * Pointer to the Ethernet device structure.
3623 * @param[in, out] flow
3624 * Pointer to flow structure.
3626 * Pointer to error structure.
3629 * 0 on success, a negative errno value otherwise and rte_errno is set.
3632 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
3633 struct rte_flow_error *error)
3635 struct mlx5_flow_dv *dv;
3636 struct mlx5_flow *dev_flow;
3640 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3643 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
3644 dv->hrxq = mlx5_hrxq_drop_new(dev);
3648 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3649 "cannot get drop hash queue");
3653 mlx5_glue->dv_create_flow_action_dest_ibv_qp
3655 } else if (flow->actions &
3656 (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
3657 struct mlx5_hrxq *hrxq;
3659 hrxq = mlx5_hrxq_get(dev, flow->key,
3660 MLX5_RSS_HASH_KEY_LEN,
3663 flow->rss.queue_num);
3665 hrxq = mlx5_hrxq_new
3666 (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
3667 dv->hash_fields, (*flow->queue),
3668 flow->rss.queue_num,
3669 !!(dev_flow->layers &
3670 MLX5_FLOW_LAYER_TUNNEL));
3674 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3675 "cannot get hash queue");
3680 mlx5_glue->dv_create_flow_action_dest_ibv_qp
3684 mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
3685 (void *)&dv->value, n,
3688 rte_flow_error_set(error, errno,
3689 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3691 "hardware refuses to create flow");
3697 err = rte_errno; /* Save rte_errno before cleanup. */
3698 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3699 struct mlx5_flow_dv *dv = &dev_flow->dv;
3701 if (flow->actions & MLX5_FLOW_ACTION_DROP)
3702 mlx5_hrxq_drop_release(dev);
3704 mlx5_hrxq_release(dev, dv->hrxq);
3708 rte_errno = err; /* Restore rte_errno. */
3713 * Release the flow matcher.
3716 * Pointer to Ethernet device.
3718 * Pointer to mlx5_flow.
3721 * 1 while a reference on it exists, 0 when freed.
3724 flow_dv_matcher_release(struct rte_eth_dev *dev,
3725 struct mlx5_flow *flow)
3727 struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
3728 struct mlx5_priv *priv = dev->data->dev_private;
3729 struct mlx5_ibv_shared *sh = priv->sh;
3730 struct mlx5_flow_tbl_resource *tbl;
3732 assert(matcher->matcher_object);
3733 DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
3734 dev->data->port_id, (void *)matcher,
3735 rte_atomic32_read(&matcher->refcnt));
3736 if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
3737 claim_zero(mlx5_glue->dv_destroy_flow_matcher
3738 (matcher->matcher_object));
3739 LIST_REMOVE(matcher, next);
3740 if (matcher->egress)
3741 tbl = &sh->tx_tbl[matcher->group];
3743 tbl = &sh->rx_tbl[matcher->group];
3744 flow_dv_tbl_resource_release(tbl);
3746 DRV_LOG(DEBUG, "port %u matcher %p: removed",
3747 dev->data->port_id, (void *)matcher);
3754 * Release an encap/decap resource.
3757 * Pointer to mlx5_flow.
3760 * 1 while a reference on it exists, 0 when freed.
3763 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
3765 struct mlx5_flow_dv_encap_decap_resource *cache_resource =
3766 flow->dv.encap_decap;
3768 assert(cache_resource->verbs_action);
3769 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
3770 (void *)cache_resource,
3771 rte_atomic32_read(&cache_resource->refcnt));
3772 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
3773 claim_zero(mlx5_glue->destroy_flow_action
3774 (cache_resource->verbs_action));
3775 LIST_REMOVE(cache_resource, next);
3776 rte_free(cache_resource);
3777 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
3778 (void *)cache_resource);
3785 * Release an jump to table action resource.
3788 * Pointer to mlx5_flow.
3791 * 1 while a reference on it exists, 0 when freed.
3794 flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow)
3796 struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
3799 assert(cache_resource->action);
3800 DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
3801 (void *)cache_resource,
3802 rte_atomic32_read(&cache_resource->refcnt));
3803 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
3804 claim_zero(mlx5_glue->destroy_flow_action
3805 (cache_resource->action));
3806 LIST_REMOVE(cache_resource, next);
3807 flow_dv_tbl_resource_release(cache_resource->tbl);
3808 rte_free(cache_resource);
3809 DRV_LOG(DEBUG, "jump table resource %p: removed",
3810 (void *)cache_resource);
3817 * Release a modify-header resource.
3820 * Pointer to mlx5_flow.
3823 * 1 while a reference on it exists, 0 when freed.
3826 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
3828 struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
3829 flow->dv.modify_hdr;
3831 assert(cache_resource->verbs_action);
3832 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
3833 (void *)cache_resource,
3834 rte_atomic32_read(&cache_resource->refcnt));
3835 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
3836 claim_zero(mlx5_glue->destroy_flow_action
3837 (cache_resource->verbs_action));
3838 LIST_REMOVE(cache_resource, next);
3839 rte_free(cache_resource);
3840 DRV_LOG(DEBUG, "modify-header resource %p: removed",
3841 (void *)cache_resource);
3848 * Remove the flow from the NIC but keeps it in memory.
3851 * Pointer to Ethernet device.
3852 * @param[in, out] flow
3853 * Pointer to flow structure.
3856 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
3858 struct mlx5_flow_dv *dv;
3859 struct mlx5_flow *dev_flow;
3863 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3866 claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
3870 if (flow->actions & MLX5_FLOW_ACTION_DROP)
3871 mlx5_hrxq_drop_release(dev);
3873 mlx5_hrxq_release(dev, dv->hrxq);
3880 * Remove the flow from the NIC and the memory.
3883 * Pointer to the Ethernet device structure.
3884 * @param[in, out] flow
3885 * Pointer to flow structure.
3888 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
3890 struct mlx5_flow *dev_flow;
3894 flow_dv_remove(dev, flow);
3895 if (flow->counter) {
3896 flow_dv_counter_release(flow->counter);
3897 flow->counter = NULL;
3899 if (flow->tag_resource) {
3900 flow_dv_tag_release(dev, flow->tag_resource);
3901 flow->tag_resource = NULL;
3903 while (!LIST_EMPTY(&flow->dev_flows)) {
3904 dev_flow = LIST_FIRST(&flow->dev_flows);
3905 LIST_REMOVE(dev_flow, next);
3906 if (dev_flow->dv.matcher)
3907 flow_dv_matcher_release(dev, dev_flow);
3908 if (dev_flow->dv.encap_decap)
3909 flow_dv_encap_decap_resource_release(dev_flow);
3910 if (dev_flow->dv.modify_hdr)
3911 flow_dv_modify_hdr_resource_release(dev_flow);
3912 if (dev_flow->dv.jump)
3913 flow_dv_jump_tbl_resource_release(dev_flow);
3919 * Query a dv flow rule for its statistics via devx.
3922 * Pointer to Ethernet device.
3924 * Pointer to the sub flow.
3926 * data retrieved by the query.
3928 * Perform verbose error reporting if not NULL.
3931 * 0 on success, a negative errno value otherwise and rte_errno is set.
3934 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
3935 void *data, struct rte_flow_error *error)
3937 struct mlx5_priv *priv = dev->data->dev_private;
3938 struct rte_flow_query_count *qc = data;
3943 if (!priv->config.devx)
3944 return rte_flow_error_set(error, ENOTSUP,
3945 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3947 "counters are not supported");
3948 if (flow->counter) {
3949 err = mlx5_devx_cmd_flow_counter_query
3950 (flow->counter->dcs,
3951 qc->reset, &pkts, &bytes);
3953 return rte_flow_error_set
3955 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3957 "cannot read counters");
3960 qc->hits = pkts - flow->counter->hits;
3961 qc->bytes = bytes - flow->counter->bytes;
3963 flow->counter->hits = pkts;
3964 flow->counter->bytes = bytes;
3968 return rte_flow_error_set(error, EINVAL,
3969 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3971 "counters are not available");
3977 * @see rte_flow_query()
3981 flow_dv_query(struct rte_eth_dev *dev,
3982 struct rte_flow *flow __rte_unused,
3983 const struct rte_flow_action *actions __rte_unused,
3984 void *data __rte_unused,
3985 struct rte_flow_error *error __rte_unused)
3989 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3990 switch (actions->type) {
3991 case RTE_FLOW_ACTION_TYPE_VOID:
3993 case RTE_FLOW_ACTION_TYPE_COUNT:
3994 ret = flow_dv_query_count(dev, flow, data, error);
3997 return rte_flow_error_set(error, ENOTSUP,
3998 RTE_FLOW_ERROR_TYPE_ACTION,
4000 "action not supported");
4007 * Mutex-protected thunk to flow_dv_translate().
4010 flow_d_translate(struct rte_eth_dev *dev,
4011 struct mlx5_flow *dev_flow,
4012 const struct rte_flow_attr *attr,
4013 const struct rte_flow_item items[],
4014 const struct rte_flow_action actions[],
4015 struct rte_flow_error *error)
4019 flow_d_shared_lock(dev);
4020 ret = flow_dv_translate(dev, dev_flow, attr, items, actions, error);
4021 flow_d_shared_unlock(dev);
4026 * Mutex-protected thunk to flow_dv_apply().
4029 flow_d_apply(struct rte_eth_dev *dev,
4030 struct rte_flow *flow,
4031 struct rte_flow_error *error)
4035 flow_d_shared_lock(dev);
4036 ret = flow_dv_apply(dev, flow, error);
4037 flow_d_shared_unlock(dev);
4042 * Mutex-protected thunk to flow_dv_remove().
4045 flow_d_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
4047 flow_d_shared_lock(dev);
4048 flow_dv_remove(dev, flow);
4049 flow_d_shared_unlock(dev);
4053 * Mutex-protected thunk to flow_dv_destroy().
4056 flow_d_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
4058 flow_d_shared_lock(dev);
4059 flow_dv_destroy(dev, flow);
4060 flow_d_shared_unlock(dev);
4063 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
4064 .validate = flow_dv_validate,
4065 .prepare = flow_dv_prepare,
4066 .translate = flow_d_translate,
4067 .apply = flow_d_apply,
4068 .remove = flow_d_remove,
4069 .destroy = flow_d_destroy,
4070 .query = flow_dv_query,
4073 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */