1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #pragma GCC diagnostic ignored "-Wpedantic"
15 #include <infiniband/verbs.h>
17 #pragma GCC diagnostic error "-Wpedantic"
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_ethdev_driver.h>
24 #include <rte_flow_driver.h>
25 #include <rte_malloc.h>
30 #include "mlx5_defs.h"
31 #include "mlx5_glue.h"
32 #include "mlx5_flow.h"
34 #include "mlx5_rxtx.h"
36 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
38 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
39 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
55 * Initialize flow attributes structure according to flow items' types.
58 * Pointer to item specification.
60 * Pointer to flow attributes structure.
63 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
65 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
67 case RTE_FLOW_ITEM_TYPE_IPV4:
70 case RTE_FLOW_ITEM_TYPE_IPV6:
73 case RTE_FLOW_ITEM_TYPE_UDP:
76 case RTE_FLOW_ITEM_TYPE_TCP:
86 struct field_modify_info {
87 uint32_t size; /* Size of field in protocol header, in bytes. */
88 uint32_t offset; /* Offset of field in protocol header, in bytes. */
89 enum mlx5_modification_field id;
92 struct field_modify_info modify_eth[] = {
93 {4, 0, MLX5_MODI_OUT_DMAC_47_16},
94 {2, 4, MLX5_MODI_OUT_DMAC_15_0},
95 {4, 6, MLX5_MODI_OUT_SMAC_47_16},
96 {2, 10, MLX5_MODI_OUT_SMAC_15_0},
100 struct field_modify_info modify_ipv4[] = {
101 {1, 8, MLX5_MODI_OUT_IPV4_TTL},
102 {4, 12, MLX5_MODI_OUT_SIPV4},
103 {4, 16, MLX5_MODI_OUT_DIPV4},
107 struct field_modify_info modify_ipv6[] = {
108 {1, 7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
109 {4, 8, MLX5_MODI_OUT_SIPV6_127_96},
110 {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
111 {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
112 {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
113 {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
114 {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
115 {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
116 {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
120 struct field_modify_info modify_udp[] = {
121 {2, 0, MLX5_MODI_OUT_UDP_SPORT},
122 {2, 2, MLX5_MODI_OUT_UDP_DPORT},
126 struct field_modify_info modify_tcp[] = {
127 {2, 0, MLX5_MODI_OUT_TCP_SPORT},
128 {2, 2, MLX5_MODI_OUT_TCP_DPORT},
133 * Acquire the synchronizing object to protect multithreaded access
134 * to shared dv context. Lock occurs only if context is actually
135 * shared, i.e. we have multiport IB device and representors are
139 * Pointer to the rte_eth_dev structure.
142 flow_d_shared_lock(struct rte_eth_dev *dev)
144 struct mlx5_priv *priv = dev->data->dev_private;
145 struct mlx5_ibv_shared *sh = priv->sh;
147 if (sh->dv_refcnt > 1) {
150 ret = pthread_mutex_lock(&sh->dv_mutex);
157 flow_d_shared_unlock(struct rte_eth_dev *dev)
159 struct mlx5_priv *priv = dev->data->dev_private;
160 struct mlx5_ibv_shared *sh = priv->sh;
162 if (sh->dv_refcnt > 1) {
165 ret = pthread_mutex_unlock(&sh->dv_mutex);
172 * Convert modify-header action to DV specification.
175 * Pointer to item specification.
177 * Pointer to field modification information.
178 * @param[in,out] resource
179 * Pointer to the modify-header resource.
181 * Type of modification.
183 * Pointer to the error structure.
186 * 0 on success, a negative errno value otherwise and rte_errno is set.
189 flow_dv_convert_modify_action(struct rte_flow_item *item,
190 struct field_modify_info *field,
191 struct mlx5_flow_dv_modify_hdr_resource *resource,
193 struct rte_flow_error *error)
195 uint32_t i = resource->actions_num;
196 struct mlx5_modification_cmd *actions = resource->actions;
197 const uint8_t *spec = item->spec;
198 const uint8_t *mask = item->mask;
201 while (field->size) {
203 /* Generate modify command for each mask segment. */
204 memcpy(&set, &mask[field->offset], field->size);
206 if (i >= MLX5_MODIFY_NUM)
207 return rte_flow_error_set(error, EINVAL,
208 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
209 "too many items to modify");
210 actions[i].action_type = type;
211 actions[i].field = field->id;
212 actions[i].length = field->size ==
213 4 ? 0 : field->size * 8;
214 rte_memcpy(&actions[i].data[4 - field->size],
215 &spec[field->offset], field->size);
216 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
219 if (resource->actions_num != i)
220 resource->actions_num = i;
223 if (!resource->actions_num)
224 return rte_flow_error_set(error, EINVAL,
225 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
226 "invalid modification flow item");
231 * Convert modify-header set IPv4 address action to DV specification.
233 * @param[in,out] resource
234 * Pointer to the modify-header resource.
236 * Pointer to action specification.
238 * Pointer to the error structure.
241 * 0 on success, a negative errno value otherwise and rte_errno is set.
244 flow_dv_convert_action_modify_ipv4
245 (struct mlx5_flow_dv_modify_hdr_resource *resource,
246 const struct rte_flow_action *action,
247 struct rte_flow_error *error)
249 const struct rte_flow_action_set_ipv4 *conf =
250 (const struct rte_flow_action_set_ipv4 *)(action->conf);
251 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
252 struct rte_flow_item_ipv4 ipv4;
253 struct rte_flow_item_ipv4 ipv4_mask;
255 memset(&ipv4, 0, sizeof(ipv4));
256 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
257 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
258 ipv4.hdr.src_addr = conf->ipv4_addr;
259 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
261 ipv4.hdr.dst_addr = conf->ipv4_addr;
262 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
265 item.mask = &ipv4_mask;
266 return flow_dv_convert_modify_action(&item, modify_ipv4, resource,
267 MLX5_MODIFICATION_TYPE_SET, error);
271 * Convert modify-header set IPv6 address action to DV specification.
273 * @param[in,out] resource
274 * Pointer to the modify-header resource.
276 * Pointer to action specification.
278 * Pointer to the error structure.
281 * 0 on success, a negative errno value otherwise and rte_errno is set.
284 flow_dv_convert_action_modify_ipv6
285 (struct mlx5_flow_dv_modify_hdr_resource *resource,
286 const struct rte_flow_action *action,
287 struct rte_flow_error *error)
289 const struct rte_flow_action_set_ipv6 *conf =
290 (const struct rte_flow_action_set_ipv6 *)(action->conf);
291 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
292 struct rte_flow_item_ipv6 ipv6;
293 struct rte_flow_item_ipv6 ipv6_mask;
295 memset(&ipv6, 0, sizeof(ipv6));
296 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
297 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
298 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
299 sizeof(ipv6.hdr.src_addr));
300 memcpy(&ipv6_mask.hdr.src_addr,
301 &rte_flow_item_ipv6_mask.hdr.src_addr,
302 sizeof(ipv6.hdr.src_addr));
304 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
305 sizeof(ipv6.hdr.dst_addr));
306 memcpy(&ipv6_mask.hdr.dst_addr,
307 &rte_flow_item_ipv6_mask.hdr.dst_addr,
308 sizeof(ipv6.hdr.dst_addr));
311 item.mask = &ipv6_mask;
312 return flow_dv_convert_modify_action(&item, modify_ipv6, resource,
313 MLX5_MODIFICATION_TYPE_SET, error);
317 * Convert modify-header set MAC address action to DV specification.
319 * @param[in,out] resource
320 * Pointer to the modify-header resource.
322 * Pointer to action specification.
324 * Pointer to the error structure.
327 * 0 on success, a negative errno value otherwise and rte_errno is set.
330 flow_dv_convert_action_modify_mac
331 (struct mlx5_flow_dv_modify_hdr_resource *resource,
332 const struct rte_flow_action *action,
333 struct rte_flow_error *error)
335 const struct rte_flow_action_set_mac *conf =
336 (const struct rte_flow_action_set_mac *)(action->conf);
337 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
338 struct rte_flow_item_eth eth;
339 struct rte_flow_item_eth eth_mask;
341 memset(ð, 0, sizeof(eth));
342 memset(ð_mask, 0, sizeof(eth_mask));
343 if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
344 memcpy(ð.src.addr_bytes, &conf->mac_addr,
345 sizeof(eth.src.addr_bytes));
346 memcpy(ð_mask.src.addr_bytes,
347 &rte_flow_item_eth_mask.src.addr_bytes,
348 sizeof(eth_mask.src.addr_bytes));
350 memcpy(ð.dst.addr_bytes, &conf->mac_addr,
351 sizeof(eth.dst.addr_bytes));
352 memcpy(ð_mask.dst.addr_bytes,
353 &rte_flow_item_eth_mask.dst.addr_bytes,
354 sizeof(eth_mask.dst.addr_bytes));
357 item.mask = ð_mask;
358 return flow_dv_convert_modify_action(&item, modify_eth, resource,
359 MLX5_MODIFICATION_TYPE_SET, error);
363 * Convert modify-header set TP action to DV specification.
365 * @param[in,out] resource
366 * Pointer to the modify-header resource.
368 * Pointer to action specification.
370 * Pointer to rte_flow_item objects list.
372 * Pointer to flow attributes structure.
374 * Pointer to the error structure.
377 * 0 on success, a negative errno value otherwise and rte_errno is set.
380 flow_dv_convert_action_modify_tp
381 (struct mlx5_flow_dv_modify_hdr_resource *resource,
382 const struct rte_flow_action *action,
383 const struct rte_flow_item *items,
384 union flow_dv_attr *attr,
385 struct rte_flow_error *error)
387 const struct rte_flow_action_set_tp *conf =
388 (const struct rte_flow_action_set_tp *)(action->conf);
389 struct rte_flow_item item;
390 struct rte_flow_item_udp udp;
391 struct rte_flow_item_udp udp_mask;
392 struct rte_flow_item_tcp tcp;
393 struct rte_flow_item_tcp tcp_mask;
394 struct field_modify_info *field;
397 flow_dv_attr_init(items, attr);
399 memset(&udp, 0, sizeof(udp));
400 memset(&udp_mask, 0, sizeof(udp_mask));
401 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
402 udp.hdr.src_port = conf->port;
403 udp_mask.hdr.src_port =
404 rte_flow_item_udp_mask.hdr.src_port;
406 udp.hdr.dst_port = conf->port;
407 udp_mask.hdr.dst_port =
408 rte_flow_item_udp_mask.hdr.dst_port;
410 item.type = RTE_FLOW_ITEM_TYPE_UDP;
412 item.mask = &udp_mask;
416 memset(&tcp, 0, sizeof(tcp));
417 memset(&tcp_mask, 0, sizeof(tcp_mask));
418 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
419 tcp.hdr.src_port = conf->port;
420 tcp_mask.hdr.src_port =
421 rte_flow_item_tcp_mask.hdr.src_port;
423 tcp.hdr.dst_port = conf->port;
424 tcp_mask.hdr.dst_port =
425 rte_flow_item_tcp_mask.hdr.dst_port;
427 item.type = RTE_FLOW_ITEM_TYPE_TCP;
429 item.mask = &tcp_mask;
432 return flow_dv_convert_modify_action(&item, field, resource,
433 MLX5_MODIFICATION_TYPE_SET, error);
437 * Convert modify-header set TTL action to DV specification.
439 * @param[in,out] resource
440 * Pointer to the modify-header resource.
442 * Pointer to action specification.
444 * Pointer to rte_flow_item objects list.
446 * Pointer to flow attributes structure.
448 * Pointer to the error structure.
451 * 0 on success, a negative errno value otherwise and rte_errno is set.
454 flow_dv_convert_action_modify_ttl
455 (struct mlx5_flow_dv_modify_hdr_resource *resource,
456 const struct rte_flow_action *action,
457 const struct rte_flow_item *items,
458 union flow_dv_attr *attr,
459 struct rte_flow_error *error)
461 const struct rte_flow_action_set_ttl *conf =
462 (const struct rte_flow_action_set_ttl *)(action->conf);
463 struct rte_flow_item item;
464 struct rte_flow_item_ipv4 ipv4;
465 struct rte_flow_item_ipv4 ipv4_mask;
466 struct rte_flow_item_ipv6 ipv6;
467 struct rte_flow_item_ipv6 ipv6_mask;
468 struct field_modify_info *field;
471 flow_dv_attr_init(items, attr);
473 memset(&ipv4, 0, sizeof(ipv4));
474 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
475 ipv4.hdr.time_to_live = conf->ttl_value;
476 ipv4_mask.hdr.time_to_live = 0xFF;
477 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
479 item.mask = &ipv4_mask;
483 memset(&ipv6, 0, sizeof(ipv6));
484 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
485 ipv6.hdr.hop_limits = conf->ttl_value;
486 ipv6_mask.hdr.hop_limits = 0xFF;
487 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
489 item.mask = &ipv6_mask;
492 return flow_dv_convert_modify_action(&item, field, resource,
493 MLX5_MODIFICATION_TYPE_SET, error);
497 * Convert modify-header decrement TTL action to DV specification.
499 * @param[in,out] resource
500 * Pointer to the modify-header resource.
502 * Pointer to action specification.
504 * Pointer to rte_flow_item objects list.
506 * Pointer to flow attributes structure.
508 * Pointer to the error structure.
511 * 0 on success, a negative errno value otherwise and rte_errno is set.
514 flow_dv_convert_action_modify_dec_ttl
515 (struct mlx5_flow_dv_modify_hdr_resource *resource,
516 const struct rte_flow_item *items,
517 union flow_dv_attr *attr,
518 struct rte_flow_error *error)
520 struct rte_flow_item item;
521 struct rte_flow_item_ipv4 ipv4;
522 struct rte_flow_item_ipv4 ipv4_mask;
523 struct rte_flow_item_ipv6 ipv6;
524 struct rte_flow_item_ipv6 ipv6_mask;
525 struct field_modify_info *field;
528 flow_dv_attr_init(items, attr);
530 memset(&ipv4, 0, sizeof(ipv4));
531 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
532 ipv4.hdr.time_to_live = 0xFF;
533 ipv4_mask.hdr.time_to_live = 0xFF;
534 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
536 item.mask = &ipv4_mask;
540 memset(&ipv6, 0, sizeof(ipv6));
541 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
542 ipv6.hdr.hop_limits = 0xFF;
543 ipv6_mask.hdr.hop_limits = 0xFF;
544 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
546 item.mask = &ipv6_mask;
549 return flow_dv_convert_modify_action(&item, field, resource,
550 MLX5_MODIFICATION_TYPE_ADD, error);
554 * Validate META item.
557 * Pointer to the rte_eth_dev structure.
559 * Item specification.
561 * Attributes of flow that includes this item.
563 * Pointer to error structure.
566 * 0 on success, a negative errno value otherwise and rte_errno is set.
569 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
570 const struct rte_flow_item *item,
571 const struct rte_flow_attr *attr,
572 struct rte_flow_error *error)
574 const struct rte_flow_item_meta *spec = item->spec;
575 const struct rte_flow_item_meta *mask = item->mask;
576 const struct rte_flow_item_meta nic_mask = {
577 .data = RTE_BE32(UINT32_MAX)
580 uint64_t offloads = dev->data->dev_conf.txmode.offloads;
582 if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
583 return rte_flow_error_set(error, EPERM,
584 RTE_FLOW_ERROR_TYPE_ITEM,
586 "match on metadata offload "
587 "configuration is off for this port");
589 return rte_flow_error_set(error, EINVAL,
590 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
592 "data cannot be empty");
594 return rte_flow_error_set(error, EINVAL,
595 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
597 "data cannot be zero");
599 mask = &rte_flow_item_meta_mask;
600 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
601 (const uint8_t *)&nic_mask,
602 sizeof(struct rte_flow_item_meta),
607 return rte_flow_error_set(error, ENOTSUP,
608 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
610 "pattern not supported for ingress");
615 * Validate count action.
620 * Pointer to error structure.
623 * 0 on success, a negative errno value otherwise and rte_errno is set.
626 flow_dv_validate_action_count(struct rte_eth_dev *dev,
627 struct rte_flow_error *error)
629 struct mlx5_priv *priv = dev->data->dev_private;
631 if (!priv->config.devx)
633 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
637 return rte_flow_error_set
639 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
641 "count action not supported");
645 * Validate the L2 encap action.
647 * @param[in] action_flags
648 * Holds the actions detected until now.
650 * Pointer to the encap action.
652 * Pointer to flow attributes
654 * Pointer to error structure.
657 * 0 on success, a negative errno value otherwise and rte_errno is set.
660 flow_dv_validate_action_l2_encap(uint64_t action_flags,
661 const struct rte_flow_action *action,
662 const struct rte_flow_attr *attr,
663 struct rte_flow_error *error)
666 return rte_flow_error_set(error, EINVAL,
667 RTE_FLOW_ERROR_TYPE_ACTION, action,
668 "configuration cannot be null");
669 if (action_flags & MLX5_FLOW_ACTION_DROP)
670 return rte_flow_error_set(error, EINVAL,
671 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
672 "can't drop and encap in same flow");
673 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
674 return rte_flow_error_set(error, EINVAL,
675 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
676 "can only have a single encap or"
677 " decap action in a flow");
679 return rte_flow_error_set(error, ENOTSUP,
680 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
682 "encap action not supported for "
688 * Validate the L2 decap action.
690 * @param[in] action_flags
691 * Holds the actions detected until now.
693 * Pointer to flow attributes
695 * Pointer to error structure.
698 * 0 on success, a negative errno value otherwise and rte_errno is set.
701 flow_dv_validate_action_l2_decap(uint64_t action_flags,
702 const struct rte_flow_attr *attr,
703 struct rte_flow_error *error)
705 if (action_flags & MLX5_FLOW_ACTION_DROP)
706 return rte_flow_error_set(error, EINVAL,
707 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
708 "can't drop and decap in same flow");
709 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
710 return rte_flow_error_set(error, EINVAL,
711 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
712 "can only have a single encap or"
713 " decap action in a flow");
714 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
715 return rte_flow_error_set(error, EINVAL,
716 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
717 "can't have decap action after"
720 return rte_flow_error_set(error, ENOTSUP,
721 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
723 "decap action not supported for "
729 * Validate the raw encap action.
731 * @param[in] action_flags
732 * Holds the actions detected until now.
734 * Pointer to the encap action.
736 * Pointer to flow attributes
738 * Pointer to error structure.
741 * 0 on success, a negative errno value otherwise and rte_errno is set.
744 flow_dv_validate_action_raw_encap(uint64_t action_flags,
745 const struct rte_flow_action *action,
746 const struct rte_flow_attr *attr,
747 struct rte_flow_error *error)
750 return rte_flow_error_set(error, EINVAL,
751 RTE_FLOW_ERROR_TYPE_ACTION, action,
752 "configuration cannot be null");
753 if (action_flags & MLX5_FLOW_ACTION_DROP)
754 return rte_flow_error_set(error, EINVAL,
755 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
756 "can't drop and encap in same flow");
757 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
758 return rte_flow_error_set(error, EINVAL,
759 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
760 "can only have a single encap"
761 " action in a flow");
762 /* encap without preceding decap is not supported for ingress */
763 if (attr->ingress && !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
764 return rte_flow_error_set(error, ENOTSUP,
765 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
767 "encap action not supported for "
773 * Validate the raw decap action.
775 * @param[in] action_flags
776 * Holds the actions detected until now.
778 * Pointer to the encap action.
780 * Pointer to flow attributes
782 * Pointer to error structure.
785 * 0 on success, a negative errno value otherwise and rte_errno is set.
788 flow_dv_validate_action_raw_decap(uint64_t action_flags,
789 const struct rte_flow_action *action,
790 const struct rte_flow_attr *attr,
791 struct rte_flow_error *error)
793 if (action_flags & MLX5_FLOW_ACTION_DROP)
794 return rte_flow_error_set(error, EINVAL,
795 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
796 "can't drop and decap in same flow");
797 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
798 return rte_flow_error_set(error, EINVAL,
799 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
800 "can't have encap action before"
802 if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
803 return rte_flow_error_set(error, EINVAL,
804 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
805 "can only have a single decap"
806 " action in a flow");
807 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
808 return rte_flow_error_set(error, EINVAL,
809 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
810 "can't have decap action after"
812 /* decap action is valid on egress only if it is followed by encap */
814 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
815 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
818 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
819 return rte_flow_error_set
821 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
822 NULL, "decap action not supported"
829 * Find existing encap/decap resource or create and register a new one.
831 * @param dev[in, out]
832 * Pointer to rte_eth_dev structure.
833 * @param[in, out] resource
834 * Pointer to encap/decap resource.
835 * @parm[in, out] dev_flow
836 * Pointer to the dev_flow.
838 * pointer to error structure.
841 * 0 on success otherwise -errno and errno is set.
844 flow_dv_encap_decap_resource_register
845 (struct rte_eth_dev *dev,
846 struct mlx5_flow_dv_encap_decap_resource *resource,
847 struct mlx5_flow *dev_flow,
848 struct rte_flow_error *error)
850 struct mlx5_priv *priv = dev->data->dev_private;
851 struct mlx5_ibv_shared *sh = priv->sh;
852 struct mlx5_flow_dv_encap_decap_resource *cache_resource;
853 struct rte_flow *flow = dev_flow->flow;
854 struct mlx5dv_dr_ns *ns;
856 resource->flags = flow->group ? 0 : 1;
862 /* Lookup a matching resource from cache. */
863 LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
864 if (resource->reformat_type == cache_resource->reformat_type &&
865 resource->ft_type == cache_resource->ft_type &&
866 resource->flags == cache_resource->flags &&
867 resource->size == cache_resource->size &&
868 !memcmp((const void *)resource->buf,
869 (const void *)cache_resource->buf,
871 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
872 (void *)cache_resource,
873 rte_atomic32_read(&cache_resource->refcnt));
874 rte_atomic32_inc(&cache_resource->refcnt);
875 dev_flow->dv.encap_decap = cache_resource;
879 /* Register new encap/decap resource. */
880 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
882 return rte_flow_error_set(error, ENOMEM,
883 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
884 "cannot allocate resource memory");
885 *cache_resource = *resource;
886 cache_resource->verbs_action =
887 mlx5_glue->dv_create_flow_action_packet_reformat
888 (sh->ctx, cache_resource->reformat_type,
889 cache_resource->ft_type, ns, cache_resource->flags,
890 cache_resource->size,
891 (cache_resource->size ? cache_resource->buf : NULL));
892 if (!cache_resource->verbs_action) {
893 rte_free(cache_resource);
894 return rte_flow_error_set(error, ENOMEM,
895 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
896 NULL, "cannot create action");
898 rte_atomic32_init(&cache_resource->refcnt);
899 rte_atomic32_inc(&cache_resource->refcnt);
900 LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
901 dev_flow->dv.encap_decap = cache_resource;
902 DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
903 (void *)cache_resource,
904 rte_atomic32_read(&cache_resource->refcnt));
909 * Find existing table jump resource or create and register a new one.
911 * @param dev[in, out]
912 * Pointer to rte_eth_dev structure.
913 * @param[in, out] resource
914 * Pointer to jump table resource.
915 * @parm[in, out] dev_flow
916 * Pointer to the dev_flow.
918 * pointer to error structure.
921 * 0 on success otherwise -errno and errno is set.
924 flow_dv_jump_tbl_resource_register
925 (struct rte_eth_dev *dev,
926 struct mlx5_flow_dv_jump_tbl_resource *resource,
927 struct mlx5_flow *dev_flow,
928 struct rte_flow_error *error)
930 struct mlx5_priv *priv = dev->data->dev_private;
931 struct mlx5_ibv_shared *sh = priv->sh;
932 struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
934 /* Lookup a matching resource from cache. */
935 LIST_FOREACH(cache_resource, &sh->jump_tbl, next) {
936 if (resource->tbl == cache_resource->tbl) {
937 DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++",
938 (void *)cache_resource,
939 rte_atomic32_read(&cache_resource->refcnt));
940 rte_atomic32_inc(&cache_resource->refcnt);
941 dev_flow->dv.jump = cache_resource;
945 /* Register new jump table resource. */
946 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
948 return rte_flow_error_set(error, ENOMEM,
949 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
950 "cannot allocate resource memory");
951 *cache_resource = *resource;
952 cache_resource->action =
953 mlx5_glue->dr_create_flow_action_dest_flow_tbl
954 (resource->tbl->obj);
955 if (!cache_resource->action) {
956 rte_free(cache_resource);
957 return rte_flow_error_set(error, ENOMEM,
958 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
959 NULL, "cannot create action");
961 rte_atomic32_init(&cache_resource->refcnt);
962 rte_atomic32_inc(&cache_resource->refcnt);
963 LIST_INSERT_HEAD(&sh->jump_tbl, cache_resource, next);
964 dev_flow->dv.jump = cache_resource;
965 DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++",
966 (void *)cache_resource,
967 rte_atomic32_read(&cache_resource->refcnt));
972 * Get the size of specific rte_flow_item_type
974 * @param[in] item_type
975 * Tested rte_flow_item_type.
978 * sizeof struct item_type, 0 if void or irrelevant.
981 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
986 case RTE_FLOW_ITEM_TYPE_ETH:
987 retval = sizeof(struct rte_flow_item_eth);
989 case RTE_FLOW_ITEM_TYPE_VLAN:
990 retval = sizeof(struct rte_flow_item_vlan);
992 case RTE_FLOW_ITEM_TYPE_IPV4:
993 retval = sizeof(struct rte_flow_item_ipv4);
995 case RTE_FLOW_ITEM_TYPE_IPV6:
996 retval = sizeof(struct rte_flow_item_ipv6);
998 case RTE_FLOW_ITEM_TYPE_UDP:
999 retval = sizeof(struct rte_flow_item_udp);
1001 case RTE_FLOW_ITEM_TYPE_TCP:
1002 retval = sizeof(struct rte_flow_item_tcp);
1004 case RTE_FLOW_ITEM_TYPE_VXLAN:
1005 retval = sizeof(struct rte_flow_item_vxlan);
1007 case RTE_FLOW_ITEM_TYPE_GRE:
1008 retval = sizeof(struct rte_flow_item_gre);
1010 case RTE_FLOW_ITEM_TYPE_NVGRE:
1011 retval = sizeof(struct rte_flow_item_nvgre);
1013 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1014 retval = sizeof(struct rte_flow_item_vxlan_gpe);
1016 case RTE_FLOW_ITEM_TYPE_MPLS:
1017 retval = sizeof(struct rte_flow_item_mpls);
1019 case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
1027 #define MLX5_ENCAP_IPV4_VERSION 0x40
1028 #define MLX5_ENCAP_IPV4_IHL_MIN 0x05
1029 #define MLX5_ENCAP_IPV4_TTL_DEF 0x40
1030 #define MLX5_ENCAP_IPV6_VTC_FLOW 0x60000000
1031 #define MLX5_ENCAP_IPV6_HOP_LIMIT 0xff
1032 #define MLX5_ENCAP_VXLAN_FLAGS 0x08000000
1033 #define MLX5_ENCAP_VXLAN_GPE_FLAGS 0x04
1036 * Convert the encap action data from list of rte_flow_item to raw buffer
1039 * Pointer to rte_flow_item objects list.
1041 * Pointer to the output buffer.
1043 * Pointer to the output buffer size.
1045 * Pointer to the error structure.
1048 * 0 on success, a negative errno value otherwise and rte_errno is set.
1051 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
1052 size_t *size, struct rte_flow_error *error)
1054 struct ether_hdr *eth = NULL;
1055 struct vlan_hdr *vlan = NULL;
1056 struct ipv4_hdr *ipv4 = NULL;
1057 struct ipv6_hdr *ipv6 = NULL;
1058 struct udp_hdr *udp = NULL;
1059 struct vxlan_hdr *vxlan = NULL;
1060 struct vxlan_gpe_hdr *vxlan_gpe = NULL;
1061 struct gre_hdr *gre = NULL;
1063 size_t temp_size = 0;
1066 return rte_flow_error_set(error, EINVAL,
1067 RTE_FLOW_ERROR_TYPE_ACTION,
1068 NULL, "invalid empty data");
1069 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1070 len = flow_dv_get_item_len(items->type);
1071 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
1072 return rte_flow_error_set(error, EINVAL,
1073 RTE_FLOW_ERROR_TYPE_ACTION,
1074 (void *)items->type,
1075 "items total size is too big"
1076 " for encap action");
1077 rte_memcpy((void *)&buf[temp_size], items->spec, len);
1078 switch (items->type) {
1079 case RTE_FLOW_ITEM_TYPE_ETH:
1080 eth = (struct ether_hdr *)&buf[temp_size];
1082 case RTE_FLOW_ITEM_TYPE_VLAN:
1083 vlan = (struct vlan_hdr *)&buf[temp_size];
1085 return rte_flow_error_set(error, EINVAL,
1086 RTE_FLOW_ERROR_TYPE_ACTION,
1087 (void *)items->type,
1088 "eth header not found");
1089 if (!eth->ether_type)
1090 eth->ether_type = RTE_BE16(ETHER_TYPE_VLAN);
1092 case RTE_FLOW_ITEM_TYPE_IPV4:
1093 ipv4 = (struct ipv4_hdr *)&buf[temp_size];
1095 return rte_flow_error_set(error, EINVAL,
1096 RTE_FLOW_ERROR_TYPE_ACTION,
1097 (void *)items->type,
1098 "neither eth nor vlan"
1100 if (vlan && !vlan->eth_proto)
1101 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv4);
1102 else if (eth && !eth->ether_type)
1103 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv4);
1104 if (!ipv4->version_ihl)
1105 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
1106 MLX5_ENCAP_IPV4_IHL_MIN;
1107 if (!ipv4->time_to_live)
1108 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
1110 case RTE_FLOW_ITEM_TYPE_IPV6:
1111 ipv6 = (struct ipv6_hdr *)&buf[temp_size];
1113 return rte_flow_error_set(error, EINVAL,
1114 RTE_FLOW_ERROR_TYPE_ACTION,
1115 (void *)items->type,
1116 "neither eth nor vlan"
1118 if (vlan && !vlan->eth_proto)
1119 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv6);
1120 else if (eth && !eth->ether_type)
1121 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv6);
1122 if (!ipv6->vtc_flow)
1124 RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
1125 if (!ipv6->hop_limits)
1126 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
1128 case RTE_FLOW_ITEM_TYPE_UDP:
1129 udp = (struct udp_hdr *)&buf[temp_size];
1131 return rte_flow_error_set(error, EINVAL,
1132 RTE_FLOW_ERROR_TYPE_ACTION,
1133 (void *)items->type,
1134 "ip header not found");
1135 if (ipv4 && !ipv4->next_proto_id)
1136 ipv4->next_proto_id = IPPROTO_UDP;
1137 else if (ipv6 && !ipv6->proto)
1138 ipv6->proto = IPPROTO_UDP;
1140 case RTE_FLOW_ITEM_TYPE_VXLAN:
1141 vxlan = (struct vxlan_hdr *)&buf[temp_size];
1143 return rte_flow_error_set(error, EINVAL,
1144 RTE_FLOW_ERROR_TYPE_ACTION,
1145 (void *)items->type,
1146 "udp header not found");
1148 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
1149 if (!vxlan->vx_flags)
1151 RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
1153 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1154 vxlan_gpe = (struct vxlan_gpe_hdr *)&buf[temp_size];
1156 return rte_flow_error_set(error, EINVAL,
1157 RTE_FLOW_ERROR_TYPE_ACTION,
1158 (void *)items->type,
1159 "udp header not found");
1160 if (!vxlan_gpe->proto)
1161 return rte_flow_error_set(error, EINVAL,
1162 RTE_FLOW_ERROR_TYPE_ACTION,
1163 (void *)items->type,
1164 "next protocol not found");
1167 RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
1168 if (!vxlan_gpe->vx_flags)
1169 vxlan_gpe->vx_flags =
1170 MLX5_ENCAP_VXLAN_GPE_FLAGS;
1172 case RTE_FLOW_ITEM_TYPE_GRE:
1173 case RTE_FLOW_ITEM_TYPE_NVGRE:
1174 gre = (struct gre_hdr *)&buf[temp_size];
1176 return rte_flow_error_set(error, EINVAL,
1177 RTE_FLOW_ERROR_TYPE_ACTION,
1178 (void *)items->type,
1179 "next protocol not found");
1181 return rte_flow_error_set(error, EINVAL,
1182 RTE_FLOW_ERROR_TYPE_ACTION,
1183 (void *)items->type,
1184 "ip header not found");
1185 if (ipv4 && !ipv4->next_proto_id)
1186 ipv4->next_proto_id = IPPROTO_GRE;
1187 else if (ipv6 && !ipv6->proto)
1188 ipv6->proto = IPPROTO_GRE;
1190 case RTE_FLOW_ITEM_TYPE_VOID:
1193 return rte_flow_error_set(error, EINVAL,
1194 RTE_FLOW_ERROR_TYPE_ACTION,
1195 (void *)items->type,
1196 "unsupported item type");
1206 * Convert L2 encap action to DV specification.
1209 * Pointer to rte_eth_dev structure.
1211 * Pointer to action structure.
1212 * @param[in, out] dev_flow
1213 * Pointer to the mlx5_flow.
1215 * Pointer to the error structure.
1218 * 0 on success, a negative errno value otherwise and rte_errno is set.
1221 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
1222 const struct rte_flow_action *action,
1223 struct mlx5_flow *dev_flow,
1224 struct rte_flow_error *error)
1226 const struct rte_flow_item *encap_data;
1227 const struct rte_flow_action_raw_encap *raw_encap_data;
1228 struct mlx5_flow_dv_encap_decap_resource res = {
1230 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
1231 .ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
1234 if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
1236 (const struct rte_flow_action_raw_encap *)action->conf;
1237 res.size = raw_encap_data->size;
1238 memcpy(res.buf, raw_encap_data->data, res.size);
1240 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
1242 ((const struct rte_flow_action_vxlan_encap *)
1243 action->conf)->definition;
1246 ((const struct rte_flow_action_nvgre_encap *)
1247 action->conf)->definition;
1248 if (flow_dv_convert_encap_data(encap_data, res.buf,
1252 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1253 return rte_flow_error_set(error, EINVAL,
1254 RTE_FLOW_ERROR_TYPE_ACTION,
1255 NULL, "can't create L2 encap action");
1260 * Convert L2 decap action to DV specification.
1263 * Pointer to rte_eth_dev structure.
1264 * @param[in, out] dev_flow
1265 * Pointer to the mlx5_flow.
1267 * Pointer to the error structure.
1270 * 0 on success, a negative errno value otherwise and rte_errno is set.
1273 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
1274 struct mlx5_flow *dev_flow,
1275 struct rte_flow_error *error)
1277 struct mlx5_flow_dv_encap_decap_resource res = {
1280 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
1281 .ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
1284 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1285 return rte_flow_error_set(error, EINVAL,
1286 RTE_FLOW_ERROR_TYPE_ACTION,
1287 NULL, "can't create L2 decap action");
1292 * Convert raw decap/encap (L3 tunnel) action to DV specification.
1295 * Pointer to rte_eth_dev structure.
1297 * Pointer to action structure.
1298 * @param[in, out] dev_flow
1299 * Pointer to the mlx5_flow.
1301 * Pointer to the flow attributes.
1303 * Pointer to the error structure.
1306 * 0 on success, a negative errno value otherwise and rte_errno is set.
1309 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
1310 const struct rte_flow_action *action,
1311 struct mlx5_flow *dev_flow,
1312 const struct rte_flow_attr *attr,
1313 struct rte_flow_error *error)
1315 const struct rte_flow_action_raw_encap *encap_data;
1316 struct mlx5_flow_dv_encap_decap_resource res;
1318 encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
1319 res.size = encap_data->size;
1320 memcpy(res.buf, encap_data->data, res.size);
1321 res.reformat_type = attr->egress ?
1322 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
1323 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
1324 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
1325 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
1326 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1327 return rte_flow_error_set(error, EINVAL,
1328 RTE_FLOW_ERROR_TYPE_ACTION,
1329 NULL, "can't create encap action");
1334 * Validate the modify-header actions.
1336 * @param[in] action_flags
1337 * Holds the actions detected until now.
1339 * Pointer to the modify action.
1341 * Pointer to error structure.
1344 * 0 on success, a negative errno value otherwise and rte_errno is set.
1347 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
1348 const struct rte_flow_action *action,
1349 struct rte_flow_error *error)
1351 if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
1352 return rte_flow_error_set(error, EINVAL,
1353 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1354 NULL, "action configuration not set");
1355 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1356 return rte_flow_error_set(error, EINVAL,
1357 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1358 "can't have encap action before"
1364 * Validate the modify-header MAC address actions.
1366 * @param[in] action_flags
1367 * Holds the actions detected until now.
1369 * Pointer to the modify action.
1370 * @param[in] item_flags
1371 * Holds the items detected.
1373 * Pointer to error structure.
1376 * 0 on success, a negative errno value otherwise and rte_errno is set.
1379 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
1380 const struct rte_flow_action *action,
1381 const uint64_t item_flags,
1382 struct rte_flow_error *error)
1386 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1388 if (!(item_flags & MLX5_FLOW_LAYER_L2))
1389 return rte_flow_error_set(error, EINVAL,
1390 RTE_FLOW_ERROR_TYPE_ACTION,
1392 "no L2 item in pattern");
1398 * Validate the modify-header IPv4 address actions.
1400 * @param[in] action_flags
1401 * Holds the actions detected until now.
1403 * Pointer to the modify action.
1404 * @param[in] item_flags
1405 * Holds the items detected.
1407 * Pointer to error structure.
1410 * 0 on success, a negative errno value otherwise and rte_errno is set.
1413 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
1414 const struct rte_flow_action *action,
1415 const uint64_t item_flags,
1416 struct rte_flow_error *error)
1420 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1422 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
1423 return rte_flow_error_set(error, EINVAL,
1424 RTE_FLOW_ERROR_TYPE_ACTION,
1426 "no ipv4 item in pattern");
1432 * Validate the modify-header IPv6 address actions.
1434 * @param[in] action_flags
1435 * Holds the actions detected until now.
1437 * Pointer to the modify action.
1438 * @param[in] item_flags
1439 * Holds the items detected.
1441 * Pointer to error structure.
1444 * 0 on success, a negative errno value otherwise and rte_errno is set.
1447 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
1448 const struct rte_flow_action *action,
1449 const uint64_t item_flags,
1450 struct rte_flow_error *error)
1454 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1456 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
1457 return rte_flow_error_set(error, EINVAL,
1458 RTE_FLOW_ERROR_TYPE_ACTION,
1460 "no ipv6 item in pattern");
1466 * Validate the modify-header TP actions.
1468 * @param[in] action_flags
1469 * Holds the actions detected until now.
1471 * Pointer to the modify action.
1472 * @param[in] item_flags
1473 * Holds the items detected.
1475 * Pointer to error structure.
1478 * 0 on success, a negative errno value otherwise and rte_errno is set.
1481 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
1482 const struct rte_flow_action *action,
1483 const uint64_t item_flags,
1484 struct rte_flow_error *error)
1488 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1490 if (!(item_flags & MLX5_FLOW_LAYER_L4))
1491 return rte_flow_error_set(error, EINVAL,
1492 RTE_FLOW_ERROR_TYPE_ACTION,
1493 NULL, "no transport layer "
1500 * Validate the modify-header TTL actions.
1502 * @param[in] action_flags
1503 * Holds the actions detected until now.
1505 * Pointer to the modify action.
1506 * @param[in] item_flags
1507 * Holds the items detected.
1509 * Pointer to error structure.
1512 * 0 on success, a negative errno value otherwise and rte_errno is set.
1515 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
1516 const struct rte_flow_action *action,
1517 const uint64_t item_flags,
1518 struct rte_flow_error *error)
1522 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1524 if (!(item_flags & MLX5_FLOW_LAYER_L3))
1525 return rte_flow_error_set(error, EINVAL,
1526 RTE_FLOW_ERROR_TYPE_ACTION,
1528 "no IP protocol in pattern");
1534 * Validate jump action.
1537 * Pointer to the modify action.
1539 * The group of the current flow.
1541 * Pointer to error structure.
1544 * 0 on success, a negative errno value otherwise and rte_errno is set.
1547 flow_dv_validate_action_jump(const struct rte_flow_action *action,
1549 struct rte_flow_error *error)
1551 if (action->type != RTE_FLOW_ACTION_TYPE_JUMP && !action->conf)
1552 return rte_flow_error_set(error, EINVAL,
1553 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1554 NULL, "action configuration not set");
1555 if (group >= ((const struct rte_flow_action_jump *)action->conf)->group)
1556 return rte_flow_error_set(error, EINVAL,
1557 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1558 "target group must be higher then"
1559 " the current flow group");
1565 * Find existing modify-header resource or create and register a new one.
1567 * @param dev[in, out]
1568 * Pointer to rte_eth_dev structure.
1569 * @param[in, out] resource
1570 * Pointer to modify-header resource.
1571 * @parm[in, out] dev_flow
1572 * Pointer to the dev_flow.
1574 * pointer to error structure.
1577 * 0 on success otherwise -errno and errno is set.
1580 flow_dv_modify_hdr_resource_register
1581 (struct rte_eth_dev *dev,
1582 struct mlx5_flow_dv_modify_hdr_resource *resource,
1583 struct mlx5_flow *dev_flow,
1584 struct rte_flow_error *error)
1586 struct mlx5_priv *priv = dev->data->dev_private;
1587 struct mlx5_ibv_shared *sh = priv->sh;
1588 struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
1590 struct mlx5dv_dr_ns *ns =
1591 resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX ?
1592 sh->tx_ns : sh->rx_ns;
1594 /* Lookup a matching resource from cache. */
1595 LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
1596 if (resource->ft_type == cache_resource->ft_type &&
1597 resource->actions_num == cache_resource->actions_num &&
1598 !memcmp((const void *)resource->actions,
1599 (const void *)cache_resource->actions,
1600 (resource->actions_num *
1601 sizeof(resource->actions[0])))) {
1602 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
1603 (void *)cache_resource,
1604 rte_atomic32_read(&cache_resource->refcnt));
1605 rte_atomic32_inc(&cache_resource->refcnt);
1606 dev_flow->dv.modify_hdr = cache_resource;
1610 /* Register new modify-header resource. */
1611 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1612 if (!cache_resource)
1613 return rte_flow_error_set(error, ENOMEM,
1614 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1615 "cannot allocate resource memory");
1616 *cache_resource = *resource;
1617 cache_resource->verbs_action =
1618 mlx5_glue->dv_create_flow_action_modify_header
1619 (sh->ctx, cache_resource->ft_type,
1621 cache_resource->actions_num *
1622 sizeof(cache_resource->actions[0]),
1623 (uint64_t *)cache_resource->actions);
1624 if (!cache_resource->verbs_action) {
1625 rte_free(cache_resource);
1626 return rte_flow_error_set(error, ENOMEM,
1627 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1628 NULL, "cannot create action");
1630 rte_atomic32_init(&cache_resource->refcnt);
1631 rte_atomic32_inc(&cache_resource->refcnt);
1632 LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
1633 dev_flow->dv.modify_hdr = cache_resource;
1634 DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
1635 (void *)cache_resource,
1636 rte_atomic32_read(&cache_resource->refcnt));
1641 * Get or create a flow counter.
1644 * Pointer to the Ethernet device structure.
1646 * Indicate if this counter is shared with other flows.
1648 * Counter identifier.
1651 * pointer to flow counter on success, NULL otherwise and rte_errno is set.
1653 static struct mlx5_flow_counter *
1654 flow_dv_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
1656 struct mlx5_priv *priv = dev->data->dev_private;
1657 struct mlx5_flow_counter *cnt = NULL;
1658 struct mlx5_devx_counter_set *dcs = NULL;
1661 if (!priv->config.devx) {
1666 LIST_FOREACH(cnt, &priv->flow_counters, next) {
1667 if (cnt->shared && cnt->id == id) {
1673 cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
1674 dcs = rte_calloc(__func__, 1, sizeof(*dcs), 0);
1679 ret = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, dcs);
1682 struct mlx5_flow_counter tmpl = {
1688 tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
1694 LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
1704 * Release a flow counter.
1706 * @param[in] counter
1707 * Pointer to the counter handler.
1710 flow_dv_counter_release(struct mlx5_flow_counter *counter)
1716 if (--counter->ref_cnt == 0) {
1717 ret = mlx5_devx_cmd_flow_counter_free(counter->dcs->obj);
1719 DRV_LOG(ERR, "Failed to free devx counters, %d", ret);
1720 LIST_REMOVE(counter, next);
1721 rte_free(counter->dcs);
1727 * Verify the @p attributes will be correctly understood by the NIC and store
1728 * them in the @p flow if everything is correct.
1731 * Pointer to dev struct.
1732 * @param[in] attributes
1733 * Pointer to flow attributes
1735 * Pointer to error structure.
1738 * 0 on success, a negative errno value otherwise and rte_errno is set.
1741 flow_dv_validate_attributes(struct rte_eth_dev *dev,
1742 const struct rte_flow_attr *attributes,
1743 struct rte_flow_error *error)
1745 struct mlx5_priv *priv = dev->data->dev_private;
1746 uint32_t priority_max = priv->config.flow_prio - 1;
1748 #ifndef HAVE_MLX5DV_DR
1749 if (attributes->group)
1750 return rte_flow_error_set(error, ENOTSUP,
1751 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1753 "groups is not supported");
1755 if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
1756 attributes->priority >= priority_max)
1757 return rte_flow_error_set(error, ENOTSUP,
1758 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1760 "priority out of range");
1761 if (attributes->transfer)
1762 return rte_flow_error_set(error, ENOTSUP,
1763 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1765 "transfer is not supported");
1766 if (!(attributes->egress ^ attributes->ingress))
1767 return rte_flow_error_set(error, ENOTSUP,
1768 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1769 "must specify exactly one of "
1770 "ingress or egress");
1775 * Internal validation function. For validating both actions and items.
1778 * Pointer to the rte_eth_dev structure.
1780 * Pointer to the flow attributes.
1782 * Pointer to the list of items.
1783 * @param[in] actions
1784 * Pointer to the list of actions.
1786 * Pointer to the error structure.
1789 * 0 on success, a negative errno value otherwise and rte_errno is set.
1792 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1793 const struct rte_flow_item items[],
1794 const struct rte_flow_action actions[],
1795 struct rte_flow_error *error)
1798 uint64_t action_flags = 0;
1799 uint64_t item_flags = 0;
1800 uint64_t last_item = 0;
1801 uint8_t next_protocol = 0xff;
1806 ret = flow_dv_validate_attributes(dev, attr, error);
1809 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1810 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1811 switch (items->type) {
1812 case RTE_FLOW_ITEM_TYPE_VOID:
1814 case RTE_FLOW_ITEM_TYPE_ETH:
1815 ret = mlx5_flow_validate_item_eth(items, item_flags,
1819 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1820 MLX5_FLOW_LAYER_OUTER_L2;
1822 case RTE_FLOW_ITEM_TYPE_VLAN:
1823 ret = mlx5_flow_validate_item_vlan(items, item_flags,
1827 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1828 MLX5_FLOW_LAYER_OUTER_VLAN;
1830 case RTE_FLOW_ITEM_TYPE_IPV4:
1831 ret = mlx5_flow_validate_item_ipv4(items, item_flags,
1835 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1836 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1837 if (items->mask != NULL &&
1838 ((const struct rte_flow_item_ipv4 *)
1839 items->mask)->hdr.next_proto_id) {
1841 ((const struct rte_flow_item_ipv4 *)
1842 (items->spec))->hdr.next_proto_id;
1844 ((const struct rte_flow_item_ipv4 *)
1845 (items->mask))->hdr.next_proto_id;
1847 /* Reset for inner layer. */
1848 next_protocol = 0xff;
1851 case RTE_FLOW_ITEM_TYPE_IPV6:
1852 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
1856 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1857 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1858 if (items->mask != NULL &&
1859 ((const struct rte_flow_item_ipv6 *)
1860 items->mask)->hdr.proto) {
1862 ((const struct rte_flow_item_ipv6 *)
1863 items->spec)->hdr.proto;
1865 ((const struct rte_flow_item_ipv6 *)
1866 items->mask)->hdr.proto;
1868 /* Reset for inner layer. */
1869 next_protocol = 0xff;
1872 case RTE_FLOW_ITEM_TYPE_TCP:
1873 ret = mlx5_flow_validate_item_tcp
1876 &rte_flow_item_tcp_mask,
1880 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1881 MLX5_FLOW_LAYER_OUTER_L4_TCP;
1883 case RTE_FLOW_ITEM_TYPE_UDP:
1884 ret = mlx5_flow_validate_item_udp(items, item_flags,
1889 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1890 MLX5_FLOW_LAYER_OUTER_L4_UDP;
1892 case RTE_FLOW_ITEM_TYPE_GRE:
1893 case RTE_FLOW_ITEM_TYPE_NVGRE:
1894 ret = mlx5_flow_validate_item_gre(items, item_flags,
1895 next_protocol, error);
1898 last_item = MLX5_FLOW_LAYER_GRE;
1900 case RTE_FLOW_ITEM_TYPE_VXLAN:
1901 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
1905 last_item = MLX5_FLOW_LAYER_VXLAN;
1907 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1908 ret = mlx5_flow_validate_item_vxlan_gpe(items,
1913 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
1915 case RTE_FLOW_ITEM_TYPE_MPLS:
1916 ret = mlx5_flow_validate_item_mpls(dev, items,
1921 last_item = MLX5_FLOW_LAYER_MPLS;
1923 case RTE_FLOW_ITEM_TYPE_META:
1924 ret = flow_dv_validate_item_meta(dev, items, attr,
1928 last_item = MLX5_FLOW_ITEM_METADATA;
1931 return rte_flow_error_set(error, ENOTSUP,
1932 RTE_FLOW_ERROR_TYPE_ITEM,
1933 NULL, "item not supported");
1935 item_flags |= last_item;
1937 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1938 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
1939 return rte_flow_error_set(error, ENOTSUP,
1940 RTE_FLOW_ERROR_TYPE_ACTION,
1941 actions, "too many actions");
1942 switch (actions->type) {
1943 case RTE_FLOW_ACTION_TYPE_VOID:
1945 case RTE_FLOW_ACTION_TYPE_FLAG:
1946 ret = mlx5_flow_validate_action_flag(action_flags,
1950 action_flags |= MLX5_FLOW_ACTION_FLAG;
1953 case RTE_FLOW_ACTION_TYPE_MARK:
1954 ret = mlx5_flow_validate_action_mark(actions,
1959 action_flags |= MLX5_FLOW_ACTION_MARK;
1962 case RTE_FLOW_ACTION_TYPE_DROP:
1963 ret = mlx5_flow_validate_action_drop(action_flags,
1967 action_flags |= MLX5_FLOW_ACTION_DROP;
1970 case RTE_FLOW_ACTION_TYPE_QUEUE:
1971 ret = mlx5_flow_validate_action_queue(actions,
1976 action_flags |= MLX5_FLOW_ACTION_QUEUE;
1979 case RTE_FLOW_ACTION_TYPE_RSS:
1980 ret = mlx5_flow_validate_action_rss(actions,
1986 action_flags |= MLX5_FLOW_ACTION_RSS;
1989 case RTE_FLOW_ACTION_TYPE_COUNT:
1990 ret = flow_dv_validate_action_count(dev, error);
1993 action_flags |= MLX5_FLOW_ACTION_COUNT;
1996 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
1997 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
1998 ret = flow_dv_validate_action_l2_encap(action_flags,
2003 action_flags |= actions->type ==
2004 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
2005 MLX5_FLOW_ACTION_VXLAN_ENCAP :
2006 MLX5_FLOW_ACTION_NVGRE_ENCAP;
2009 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2010 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
2011 ret = flow_dv_validate_action_l2_decap(action_flags,
2015 action_flags |= actions->type ==
2016 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
2017 MLX5_FLOW_ACTION_VXLAN_DECAP :
2018 MLX5_FLOW_ACTION_NVGRE_DECAP;
2021 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
2022 ret = flow_dv_validate_action_raw_encap(action_flags,
2027 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
2030 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
2031 ret = flow_dv_validate_action_raw_decap(action_flags,
2036 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
2039 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
2040 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
2041 ret = flow_dv_validate_action_modify_mac(action_flags,
2047 /* Count all modify-header actions as one action. */
2048 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2050 action_flags |= actions->type ==
2051 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
2052 MLX5_FLOW_ACTION_SET_MAC_SRC :
2053 MLX5_FLOW_ACTION_SET_MAC_DST;
2056 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
2057 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
2058 ret = flow_dv_validate_action_modify_ipv4(action_flags,
2064 /* Count all modify-header actions as one action. */
2065 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2067 action_flags |= actions->type ==
2068 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
2069 MLX5_FLOW_ACTION_SET_IPV4_SRC :
2070 MLX5_FLOW_ACTION_SET_IPV4_DST;
2072 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
2073 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
2074 ret = flow_dv_validate_action_modify_ipv6(action_flags,
2080 /* Count all modify-header actions as one action. */
2081 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2083 action_flags |= actions->type ==
2084 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
2085 MLX5_FLOW_ACTION_SET_IPV6_SRC :
2086 MLX5_FLOW_ACTION_SET_IPV6_DST;
2088 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
2089 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
2090 ret = flow_dv_validate_action_modify_tp(action_flags,
2096 /* Count all modify-header actions as one action. */
2097 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2099 action_flags |= actions->type ==
2100 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
2101 MLX5_FLOW_ACTION_SET_TP_SRC :
2102 MLX5_FLOW_ACTION_SET_TP_DST;
2104 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
2105 case RTE_FLOW_ACTION_TYPE_SET_TTL:
2106 ret = flow_dv_validate_action_modify_ttl(action_flags,
2112 /* Count all modify-header actions as one action. */
2113 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2115 action_flags |= actions->type ==
2116 RTE_FLOW_ACTION_TYPE_SET_TTL ?
2117 MLX5_FLOW_ACTION_SET_TTL :
2118 MLX5_FLOW_ACTION_DEC_TTL;
2120 case RTE_FLOW_ACTION_TYPE_JUMP:
2121 ret = flow_dv_validate_action_jump(actions,
2122 attr->group, error);
2126 action_flags |= MLX5_FLOW_ACTION_JUMP;
2129 return rte_flow_error_set(error, ENOTSUP,
2130 RTE_FLOW_ERROR_TYPE_ACTION,
2132 "action not supported");
2135 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
2136 return rte_flow_error_set(error, EINVAL,
2137 RTE_FLOW_ERROR_TYPE_ACTION, actions,
2138 "no fate action is found");
2143 * Internal preparation function. Allocates the DV flow size,
2144 * this size is constant.
2147 * Pointer to the flow attributes.
2149 * Pointer to the list of items.
2150 * @param[in] actions
2151 * Pointer to the list of actions.
2153 * Pointer to the error structure.
2156 * Pointer to mlx5_flow object on success,
2157 * otherwise NULL and rte_errno is set.
2159 static struct mlx5_flow *
2160 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
2161 const struct rte_flow_item items[] __rte_unused,
2162 const struct rte_flow_action actions[] __rte_unused,
2163 struct rte_flow_error *error)
2165 uint32_t size = sizeof(struct mlx5_flow);
2166 struct mlx5_flow *flow;
2168 flow = rte_calloc(__func__, 1, size, 0);
2170 rte_flow_error_set(error, ENOMEM,
2171 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2172 "not enough memory to create flow");
2175 flow->dv.value.size = MLX5_ST_SZ_DB(fte_match_param);
2181 * Sanity check for match mask and value. Similar to check_valid_spec() in
2182 * kernel driver. If unmasked bit is present in value, it returns failure.
2185 * pointer to match mask buffer.
2186 * @param match_value
2187 * pointer to match value buffer.
2190 * 0 if valid, -EINVAL otherwise.
2193 flow_dv_check_valid_spec(void *match_mask, void *match_value)
2195 uint8_t *m = match_mask;
2196 uint8_t *v = match_value;
2199 for (i = 0; i < MLX5_ST_SZ_DB(fte_match_param); ++i) {
2202 "match_value differs from match_criteria"
2203 " %p[%u] != %p[%u]",
2204 match_value, i, match_mask, i);
2213 * Add Ethernet item to matcher and to the value.
2215 * @param[in, out] matcher
2217 * @param[in, out] key
2218 * Flow matcher value.
2220 * Flow pattern to translate.
2222 * Item is inner pattern.
2225 flow_dv_translate_item_eth(void *matcher, void *key,
2226 const struct rte_flow_item *item, int inner)
2228 const struct rte_flow_item_eth *eth_m = item->mask;
2229 const struct rte_flow_item_eth *eth_v = item->spec;
2230 const struct rte_flow_item_eth nic_mask = {
2231 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2232 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2233 .type = RTE_BE16(0xffff),
2245 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2247 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2249 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2251 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2253 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
2254 ð_m->dst, sizeof(eth_m->dst));
2255 /* The value must be in the range of the mask. */
2256 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
2257 for (i = 0; i < sizeof(eth_m->dst); ++i)
2258 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
2259 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
2260 ð_m->src, sizeof(eth_m->src));
2261 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
2262 /* The value must be in the range of the mask. */
2263 for (i = 0; i < sizeof(eth_m->dst); ++i)
2264 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
2265 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
2266 rte_be_to_cpu_16(eth_m->type));
2267 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
2268 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
2272 * Add VLAN item to matcher and to the value.
2274 * @param[in, out] matcher
2276 * @param[in, out] key
2277 * Flow matcher value.
2279 * Flow pattern to translate.
2281 * Item is inner pattern.
2284 flow_dv_translate_item_vlan(void *matcher, void *key,
2285 const struct rte_flow_item *item,
2288 const struct rte_flow_item_vlan *vlan_m = item->mask;
2289 const struct rte_flow_item_vlan *vlan_v = item->spec;
2290 const struct rte_flow_item_vlan nic_mask = {
2291 .tci = RTE_BE16(0x0fff),
2292 .inner_type = RTE_BE16(0xffff),
2304 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2306 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2308 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2310 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2312 tci_m = rte_be_to_cpu_16(vlan_m->tci);
2313 tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
2314 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
2315 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
2316 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
2317 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
2318 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
2319 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
2320 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
2321 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
2325 * Add IPV4 item to matcher and to the value.
2327 * @param[in, out] matcher
2329 * @param[in, out] key
2330 * Flow matcher value.
2332 * Flow pattern to translate.
2334 * Item is inner pattern.
2336 * The group to insert the rule.
2339 flow_dv_translate_item_ipv4(void *matcher, void *key,
2340 const struct rte_flow_item *item,
2341 int inner, uint32_t group)
2343 const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
2344 const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
2345 const struct rte_flow_item_ipv4 nic_mask = {
2347 .src_addr = RTE_BE32(0xffffffff),
2348 .dst_addr = RTE_BE32(0xffffffff),
2349 .type_of_service = 0xff,
2350 .next_proto_id = 0xff,
2360 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2362 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2364 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2366 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2369 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2371 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
2372 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
2377 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2378 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2379 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2380 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2381 *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
2382 *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
2383 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2384 src_ipv4_src_ipv6.ipv4_layout.ipv4);
2385 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2386 src_ipv4_src_ipv6.ipv4_layout.ipv4);
2387 *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
2388 *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
2389 tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
2390 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
2391 ipv4_m->hdr.type_of_service);
2392 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
2393 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
2394 ipv4_m->hdr.type_of_service >> 2);
2395 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
2396 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2397 ipv4_m->hdr.next_proto_id);
2398 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2399 ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
2403 * Add IPV6 item to matcher and to the value.
2405 * @param[in, out] matcher
2407 * @param[in, out] key
2408 * Flow matcher value.
2410 * Flow pattern to translate.
2412 * Item is inner pattern.
2414 * The group to insert the rule.
2417 flow_dv_translate_item_ipv6(void *matcher, void *key,
2418 const struct rte_flow_item *item,
2419 int inner, uint32_t group)
2421 const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
2422 const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
2423 const struct rte_flow_item_ipv6 nic_mask = {
2426 "\xff\xff\xff\xff\xff\xff\xff\xff"
2427 "\xff\xff\xff\xff\xff\xff\xff\xff",
2429 "\xff\xff\xff\xff\xff\xff\xff\xff"
2430 "\xff\xff\xff\xff\xff\xff\xff\xff",
2431 .vtc_flow = RTE_BE32(0xffffffff),
2438 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2439 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2448 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2450 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2452 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2454 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2457 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2459 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
2460 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
2465 size = sizeof(ipv6_m->hdr.dst_addr);
2466 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2467 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2468 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2469 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2470 memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
2471 for (i = 0; i < size; ++i)
2472 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
2473 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2474 src_ipv4_src_ipv6.ipv6_layout.ipv6);
2475 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2476 src_ipv4_src_ipv6.ipv6_layout.ipv6);
2477 memcpy(l24_m, ipv6_m->hdr.src_addr, size);
2478 for (i = 0; i < size; ++i)
2479 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
2481 vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
2482 vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
2483 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
2484 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
2485 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
2486 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
2489 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
2491 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
2494 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
2496 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
2500 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2502 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2503 ipv6_v->hdr.proto & ipv6_m->hdr.proto);
2507 * Add TCP item to matcher and to the value.
2509 * @param[in, out] matcher
2511 * @param[in, out] key
2512 * Flow matcher value.
2514 * Flow pattern to translate.
2516 * Item is inner pattern.
2519 flow_dv_translate_item_tcp(void *matcher, void *key,
2520 const struct rte_flow_item *item,
2523 const struct rte_flow_item_tcp *tcp_m = item->mask;
2524 const struct rte_flow_item_tcp *tcp_v = item->spec;
2529 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2531 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2533 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2535 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2537 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2538 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
2542 tcp_m = &rte_flow_item_tcp_mask;
2543 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
2544 rte_be_to_cpu_16(tcp_m->hdr.src_port));
2545 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
2546 rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
2547 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
2548 rte_be_to_cpu_16(tcp_m->hdr.dst_port));
2549 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
2550 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
2554 * Add UDP item to matcher and to the value.
2556 * @param[in, out] matcher
2558 * @param[in, out] key
2559 * Flow matcher value.
2561 * Flow pattern to translate.
2563 * Item is inner pattern.
2566 flow_dv_translate_item_udp(void *matcher, void *key,
2567 const struct rte_flow_item *item,
2570 const struct rte_flow_item_udp *udp_m = item->mask;
2571 const struct rte_flow_item_udp *udp_v = item->spec;
2576 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2578 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2580 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2582 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2584 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2585 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
2589 udp_m = &rte_flow_item_udp_mask;
2590 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
2591 rte_be_to_cpu_16(udp_m->hdr.src_port));
2592 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
2593 rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
2594 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
2595 rte_be_to_cpu_16(udp_m->hdr.dst_port));
2596 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2597 rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
2601 * Add GRE item to matcher and to the value.
2603 * @param[in, out] matcher
2605 * @param[in, out] key
2606 * Flow matcher value.
2608 * Flow pattern to translate.
2610 * Item is inner pattern.
2613 flow_dv_translate_item_gre(void *matcher, void *key,
2614 const struct rte_flow_item *item,
2617 const struct rte_flow_item_gre *gre_m = item->mask;
2618 const struct rte_flow_item_gre *gre_v = item->spec;
2621 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2622 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2625 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2627 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2629 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2631 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2633 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2634 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
2638 gre_m = &rte_flow_item_gre_mask;
2639 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
2640 rte_be_to_cpu_16(gre_m->protocol));
2641 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
2642 rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
2646 * Add NVGRE item to matcher and to the value.
2648 * @param[in, out] matcher
2650 * @param[in, out] key
2651 * Flow matcher value.
2653 * Flow pattern to translate.
2655 * Item is inner pattern.
2658 flow_dv_translate_item_nvgre(void *matcher, void *key,
2659 const struct rte_flow_item *item,
2662 const struct rte_flow_item_nvgre *nvgre_m = item->mask;
2663 const struct rte_flow_item_nvgre *nvgre_v = item->spec;
2664 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2665 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2666 const char *tni_flow_id_m = (const char *)nvgre_m->tni;
2667 const char *tni_flow_id_v = (const char *)nvgre_v->tni;
2673 flow_dv_translate_item_gre(matcher, key, item, inner);
2677 nvgre_m = &rte_flow_item_nvgre_mask;
2678 size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
2679 gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
2680 gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
2681 memcpy(gre_key_m, tni_flow_id_m, size);
2682 for (i = 0; i < size; ++i)
2683 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
2687 * Add VXLAN item to matcher and to the value.
2689 * @param[in, out] matcher
2691 * @param[in, out] key
2692 * Flow matcher value.
2694 * Flow pattern to translate.
2696 * Item is inner pattern.
2699 flow_dv_translate_item_vxlan(void *matcher, void *key,
2700 const struct rte_flow_item *item,
2703 const struct rte_flow_item_vxlan *vxlan_m = item->mask;
2704 const struct rte_flow_item_vxlan *vxlan_v = item->spec;
2707 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2708 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2716 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2718 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2720 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2722 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2724 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
2725 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
2726 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
2727 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
2728 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
2733 vxlan_m = &rte_flow_item_vxlan_mask;
2734 size = sizeof(vxlan_m->vni);
2735 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
2736 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
2737 memcpy(vni_m, vxlan_m->vni, size);
2738 for (i = 0; i < size; ++i)
2739 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
2743 * Add MPLS item to matcher and to the value.
2745 * @param[in, out] matcher
2747 * @param[in, out] key
2748 * Flow matcher value.
2750 * Flow pattern to translate.
2751 * @param[in] prev_layer
2752 * The protocol layer indicated in previous item.
2754 * Item is inner pattern.
2757 flow_dv_translate_item_mpls(void *matcher, void *key,
2758 const struct rte_flow_item *item,
2759 uint64_t prev_layer,
2762 const uint32_t *in_mpls_m = item->mask;
2763 const uint32_t *in_mpls_v = item->spec;
2764 uint32_t *out_mpls_m = 0;
2765 uint32_t *out_mpls_v = 0;
2766 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2767 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2768 void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
2770 void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
2771 void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
2772 void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2774 switch (prev_layer) {
2775 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
2776 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
2777 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2778 MLX5_UDP_PORT_MPLS);
2780 case MLX5_FLOW_LAYER_GRE:
2781 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
2782 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
2786 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2787 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2794 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
2795 switch (prev_layer) {
2796 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
2798 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
2799 outer_first_mpls_over_udp);
2801 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
2802 outer_first_mpls_over_udp);
2804 case MLX5_FLOW_LAYER_GRE:
2806 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
2807 outer_first_mpls_over_gre);
2809 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
2810 outer_first_mpls_over_gre);
2813 /* Inner MPLS not over GRE is not supported. */
2816 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
2820 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
2826 if (out_mpls_m && out_mpls_v) {
2827 *out_mpls_m = *in_mpls_m;
2828 *out_mpls_v = *in_mpls_v & *in_mpls_m;
2833 * Add META item to matcher
2835 * @param[in, out] matcher
2837 * @param[in, out] key
2838 * Flow matcher value.
2840 * Flow pattern to translate.
2842 * Item is inner pattern.
2845 flow_dv_translate_item_meta(void *matcher, void *key,
2846 const struct rte_flow_item *item)
2848 const struct rte_flow_item_meta *meta_m;
2849 const struct rte_flow_item_meta *meta_v;
2851 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
2853 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
2855 meta_m = (const void *)item->mask;
2857 meta_m = &rte_flow_item_meta_mask;
2858 meta_v = (const void *)item->spec;
2860 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
2861 rte_be_to_cpu_32(meta_m->data));
2862 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
2863 rte_be_to_cpu_32(meta_v->data & meta_m->data));
2867 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
2869 #define HEADER_IS_ZERO(match_criteria, headers) \
2870 !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
2871 matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
2874 * Calculate flow matcher enable bitmap.
2876 * @param match_criteria
2877 * Pointer to flow matcher criteria.
2880 * Bitmap of enabled fields.
2883 flow_dv_matcher_enable(uint32_t *match_criteria)
2885 uint8_t match_criteria_enable;
2887 match_criteria_enable =
2888 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
2889 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
2890 match_criteria_enable |=
2891 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
2892 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
2893 match_criteria_enable |=
2894 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
2895 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
2896 match_criteria_enable |=
2897 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
2898 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
2899 #ifdef HAVE_MLX5DV_DR
2900 match_criteria_enable |=
2901 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
2902 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
2904 return match_criteria_enable;
2911 * @param dev[in, out]
2912 * Pointer to rte_eth_dev structure.
2913 * @param[in] table_id
2916 * Direction of the table.
2918 * pointer to error structure.
2921 * Returns tables resource based on the index, NULL in case of failed.
2923 static struct mlx5_flow_tbl_resource *
2924 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
2925 uint32_t table_id, uint8_t egress,
2926 struct rte_flow_error *error)
2928 struct mlx5_priv *priv = dev->data->dev_private;
2929 struct mlx5_ibv_shared *sh = priv->sh;
2930 struct mlx5_flow_tbl_resource *tbl;
2932 #ifdef HAVE_MLX5DV_DR
2934 tbl = &sh->tx_tbl[table_id];
2936 tbl->obj = mlx5_glue->dr_create_flow_tbl
2937 (sh->tx_ns, table_id);
2939 tbl = &sh->rx_tbl[table_id];
2941 tbl->obj = mlx5_glue->dr_create_flow_tbl
2942 (sh->rx_ns, table_id);
2945 rte_flow_error_set(error, ENOMEM,
2946 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2947 NULL, "cannot create table");
2950 rte_atomic32_inc(&tbl->refcnt);
2956 return &sh->tx_tbl[table_id];
2958 return &sh->rx_tbl[table_id];
2963 * Release a flow table.
2966 * Table resource to be released.
2969 * Returns 0 if table was released, else return 1;
2972 flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
2976 if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
2977 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
2985 * Register the flow matcher.
2987 * @param dev[in, out]
2988 * Pointer to rte_eth_dev structure.
2989 * @param[in, out] matcher
2990 * Pointer to flow matcher.
2991 * @parm[in, out] dev_flow
2992 * Pointer to the dev_flow.
2994 * pointer to error structure.
2997 * 0 on success otherwise -errno and errno is set.
3000 flow_dv_matcher_register(struct rte_eth_dev *dev,
3001 struct mlx5_flow_dv_matcher *matcher,
3002 struct mlx5_flow *dev_flow,
3003 struct rte_flow_error *error)
3005 struct mlx5_priv *priv = dev->data->dev_private;
3006 struct mlx5_ibv_shared *sh = priv->sh;
3007 struct mlx5_flow_dv_matcher *cache_matcher;
3008 struct mlx5dv_flow_matcher_attr dv_attr = {
3009 .type = IBV_FLOW_ATTR_NORMAL,
3010 .match_mask = (void *)&matcher->mask,
3012 struct mlx5_flow_tbl_resource *tbl = NULL;
3014 /* Lookup from cache. */
3015 LIST_FOREACH(cache_matcher, &sh->matchers, next) {
3016 if (matcher->crc == cache_matcher->crc &&
3017 matcher->priority == cache_matcher->priority &&
3018 matcher->egress == cache_matcher->egress &&
3019 matcher->group == cache_matcher->group &&
3020 !memcmp((const void *)matcher->mask.buf,
3021 (const void *)cache_matcher->mask.buf,
3022 cache_matcher->mask.size)) {
3024 "priority %hd use %s matcher %p: refcnt %d++",
3025 cache_matcher->priority,
3026 cache_matcher->egress ? "tx" : "rx",
3027 (void *)cache_matcher,
3028 rte_atomic32_read(&cache_matcher->refcnt));
3029 rte_atomic32_inc(&cache_matcher->refcnt);
3030 dev_flow->dv.matcher = cache_matcher;
3034 /* Register new matcher. */
3035 cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
3037 return rte_flow_error_set(error, ENOMEM,
3038 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3039 "cannot allocate matcher memory");
3040 tbl = flow_dv_tbl_resource_get(dev, matcher->group * MLX5_GROUP_FACTOR,
3041 matcher->egress, error);
3043 rte_free(cache_matcher);
3044 return rte_flow_error_set(error, ENOMEM,
3045 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3046 NULL, "cannot create table");
3048 *cache_matcher = *matcher;
3049 dv_attr.match_criteria_enable =
3050 flow_dv_matcher_enable(cache_matcher->mask.buf);
3051 dv_attr.priority = matcher->priority;
3052 if (matcher->egress)
3053 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
3054 cache_matcher->matcher_object =
3055 mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
3056 if (!cache_matcher->matcher_object) {
3057 rte_free(cache_matcher);
3058 #ifdef HAVE_MLX5DV_DR
3059 flow_dv_tbl_resource_release(tbl);
3061 return rte_flow_error_set(error, ENOMEM,
3062 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3063 NULL, "cannot create matcher");
3065 rte_atomic32_inc(&cache_matcher->refcnt);
3066 LIST_INSERT_HEAD(&sh->matchers, cache_matcher, next);
3067 dev_flow->dv.matcher = cache_matcher;
3068 DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
3069 cache_matcher->priority,
3070 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
3071 rte_atomic32_read(&cache_matcher->refcnt));
3072 rte_atomic32_inc(&tbl->refcnt);
3077 * Add source vport match to the specified matcher.
3079 * @param[in, out] matcher
3081 * @param[in, out] key
3082 * Flow matcher value.
3084 * Source vport value to match
3089 flow_dv_translate_source_vport(void *matcher, void *key,
3090 int16_t port, uint16_t mask)
3092 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3093 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3095 MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
3096 MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
3100 * Find existing tag resource or create and register a new one.
3102 * @param dev[in, out]
3103 * Pointer to rte_eth_dev structure.
3104 * @param[in, out] resource
3105 * Pointer to tag resource.
3106 * @parm[in, out] dev_flow
3107 * Pointer to the dev_flow.
3109 * pointer to error structure.
3112 * 0 on success otherwise -errno and errno is set.
3115 flow_dv_tag_resource_register
3116 (struct rte_eth_dev *dev,
3117 struct mlx5_flow_dv_tag_resource *resource,
3118 struct mlx5_flow *dev_flow,
3119 struct rte_flow_error *error)
3121 struct mlx5_priv *priv = dev->data->dev_private;
3122 struct mlx5_ibv_shared *sh = priv->sh;
3123 struct mlx5_flow_dv_tag_resource *cache_resource;
3125 /* Lookup a matching resource from cache. */
3126 LIST_FOREACH(cache_resource, &sh->tags, next) {
3127 if (resource->tag == cache_resource->tag) {
3128 DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
3129 (void *)cache_resource,
3130 rte_atomic32_read(&cache_resource->refcnt));
3131 rte_atomic32_inc(&cache_resource->refcnt);
3132 dev_flow->flow->tag_resource = cache_resource;
3136 /* Register new resource. */
3137 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
3138 if (!cache_resource)
3139 return rte_flow_error_set(error, ENOMEM,
3140 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3141 "cannot allocate resource memory");
3142 *cache_resource = *resource;
3143 cache_resource->action = mlx5_glue->dv_create_flow_action_tag
3145 if (!cache_resource->action) {
3146 rte_free(cache_resource);
3147 return rte_flow_error_set(error, ENOMEM,
3148 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3149 NULL, "cannot create action");
3151 rte_atomic32_init(&cache_resource->refcnt);
3152 rte_atomic32_inc(&cache_resource->refcnt);
3153 LIST_INSERT_HEAD(&sh->tags, cache_resource, next);
3154 dev_flow->flow->tag_resource = cache_resource;
3155 DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
3156 (void *)cache_resource,
3157 rte_atomic32_read(&cache_resource->refcnt));
3165 * Pointer to Ethernet device.
3167 * Pointer to mlx5_flow.
3170 * 1 while a reference on it exists, 0 when freed.
3173 flow_dv_tag_release(struct rte_eth_dev *dev,
3174 struct mlx5_flow_dv_tag_resource *tag)
3177 DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
3178 dev->data->port_id, (void *)tag,
3179 rte_atomic32_read(&tag->refcnt));
3180 if (rte_atomic32_dec_and_test(&tag->refcnt)) {
3181 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
3182 LIST_REMOVE(tag, next);
3183 DRV_LOG(DEBUG, "port %u tag %p: removed",
3184 dev->data->port_id, (void *)tag);
3192 * Fill the flow with DV spec.
3195 * Pointer to rte_eth_dev structure.
3196 * @param[in, out] dev_flow
3197 * Pointer to the sub flow.
3199 * Pointer to the flow attributes.
3201 * Pointer to the list of items.
3202 * @param[in] actions
3203 * Pointer to the list of actions.
3205 * Pointer to the error structure.
3208 * 0 on success, a negative errno value otherwise and rte_errno is set.
3211 flow_dv_translate(struct rte_eth_dev *dev,
3212 struct mlx5_flow *dev_flow,
3213 const struct rte_flow_attr *attr,
3214 const struct rte_flow_item items[],
3215 const struct rte_flow_action actions[],
3216 struct rte_flow_error *error)
3218 struct mlx5_priv *priv = dev->data->dev_private;
3219 struct rte_flow *flow = dev_flow->flow;
3220 uint64_t item_flags = 0;
3221 uint64_t last_item = 0;
3222 uint64_t action_flags = 0;
3223 uint64_t priority = attr->priority;
3224 struct mlx5_flow_dv_matcher matcher = {
3226 .size = sizeof(matcher.mask.buf),
3230 bool actions_end = false;
3231 struct mlx5_flow_dv_modify_hdr_resource res = {
3232 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3233 MLX5DV_FLOW_TABLE_TYPE_NIC_RX
3235 union flow_dv_attr flow_attr = { .attr = 0 };
3236 struct mlx5_flow_dv_tag_resource tag_resource;
3238 if (priority == MLX5_FLOW_PRIO_RSVD)
3239 priority = priv->config.flow_prio - 1;
3240 for (; !actions_end ; actions++) {
3241 const struct rte_flow_action_queue *queue;
3242 const struct rte_flow_action_rss *rss;
3243 const struct rte_flow_action *action = actions;
3244 const struct rte_flow_action_count *count = action->conf;
3245 const uint8_t *rss_key;
3246 const struct rte_flow_action_jump *jump_data;
3247 struct mlx5_flow_dv_jump_tbl_resource jump_tbl_resource;
3248 struct mlx5_flow_tbl_resource *tbl;
3250 switch (actions->type) {
3251 case RTE_FLOW_ACTION_TYPE_VOID:
3253 case RTE_FLOW_ACTION_TYPE_FLAG:
3255 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
3256 if (!flow->tag_resource)
3257 if (flow_dv_tag_resource_register
3258 (dev, &tag_resource, dev_flow, error))
3260 dev_flow->dv.actions[actions_n++] =
3261 flow->tag_resource->action;
3262 action_flags |= MLX5_FLOW_ACTION_FLAG;
3264 case RTE_FLOW_ACTION_TYPE_MARK:
3265 tag_resource.tag = mlx5_flow_mark_set
3266 (((const struct rte_flow_action_mark *)
3267 (actions->conf))->id);
3268 if (!flow->tag_resource)
3269 if (flow_dv_tag_resource_register
3270 (dev, &tag_resource, dev_flow, error))
3272 dev_flow->dv.actions[actions_n++] =
3273 flow->tag_resource->action;
3274 action_flags |= MLX5_FLOW_ACTION_MARK;
3276 case RTE_FLOW_ACTION_TYPE_DROP:
3277 action_flags |= MLX5_FLOW_ACTION_DROP;
3279 case RTE_FLOW_ACTION_TYPE_QUEUE:
3280 queue = actions->conf;
3281 flow->rss.queue_num = 1;
3282 (*flow->queue)[0] = queue->index;
3283 action_flags |= MLX5_FLOW_ACTION_QUEUE;
3285 case RTE_FLOW_ACTION_TYPE_RSS:
3286 rss = actions->conf;
3288 memcpy((*flow->queue), rss->queue,
3289 rss->queue_num * sizeof(uint16_t));
3290 flow->rss.queue_num = rss->queue_num;
3291 /* NULL RSS key indicates default RSS key. */
3292 rss_key = !rss->key ? rss_hash_default_key : rss->key;
3293 memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
3294 /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
3295 flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
3296 flow->rss.level = rss->level;
3297 action_flags |= MLX5_FLOW_ACTION_RSS;
3299 case RTE_FLOW_ACTION_TYPE_COUNT:
3300 if (!priv->config.devx) {
3301 rte_errno = ENOTSUP;
3304 flow->counter = flow_dv_counter_new(dev, count->shared,
3306 if (flow->counter == NULL)
3308 dev_flow->dv.actions[actions_n++] =
3309 flow->counter->action;
3310 action_flags |= MLX5_FLOW_ACTION_COUNT;
3313 if (rte_errno == ENOTSUP)
3314 return rte_flow_error_set
3316 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3318 "count action not supported");
3320 return rte_flow_error_set
3322 RTE_FLOW_ERROR_TYPE_ACTION,
3324 "cannot create counter"
3326 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3327 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3328 if (flow_dv_create_action_l2_encap(dev, actions,
3331 dev_flow->dv.actions[actions_n++] =
3332 dev_flow->dv.encap_decap->verbs_action;
3333 action_flags |= actions->type ==
3334 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
3335 MLX5_FLOW_ACTION_VXLAN_ENCAP :
3336 MLX5_FLOW_ACTION_NVGRE_ENCAP;
3338 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3339 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
3340 if (flow_dv_create_action_l2_decap(dev, dev_flow,
3343 dev_flow->dv.actions[actions_n++] =
3344 dev_flow->dv.encap_decap->verbs_action;
3345 action_flags |= actions->type ==
3346 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
3347 MLX5_FLOW_ACTION_VXLAN_DECAP :
3348 MLX5_FLOW_ACTION_NVGRE_DECAP;
3350 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3351 /* Handle encap with preceding decap. */
3352 if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
3353 if (flow_dv_create_action_raw_encap
3354 (dev, actions, dev_flow, attr, error))
3356 dev_flow->dv.actions[actions_n++] =
3357 dev_flow->dv.encap_decap->verbs_action;
3359 /* Handle encap without preceding decap. */
3360 if (flow_dv_create_action_l2_encap(dev, actions,
3364 dev_flow->dv.actions[actions_n++] =
3365 dev_flow->dv.encap_decap->verbs_action;
3367 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
3369 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3370 /* Check if this decap is followed by encap. */
3371 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
3372 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
3375 /* Handle decap only if it isn't followed by encap. */
3376 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3377 if (flow_dv_create_action_l2_decap(dev,
3381 dev_flow->dv.actions[actions_n++] =
3382 dev_flow->dv.encap_decap->verbs_action;
3384 /* If decap is followed by encap, handle it at encap. */
3385 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
3387 case RTE_FLOW_ACTION_TYPE_JUMP:
3388 jump_data = action->conf;
3389 tbl = flow_dv_tbl_resource_get(dev, jump_data->group *
3391 attr->egress, error);
3393 return rte_flow_error_set
3395 RTE_FLOW_ERROR_TYPE_ACTION,
3397 "cannot create jump action.");
3398 jump_tbl_resource.tbl = tbl;
3399 if (flow_dv_jump_tbl_resource_register
3400 (dev, &jump_tbl_resource, dev_flow, error)) {
3401 flow_dv_tbl_resource_release(tbl);
3402 return rte_flow_error_set
3404 RTE_FLOW_ERROR_TYPE_ACTION,
3406 "cannot create jump action.");
3408 dev_flow->dv.actions[actions_n++] =
3409 dev_flow->dv.jump->action;
3410 action_flags |= MLX5_FLOW_ACTION_JUMP;
3412 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
3413 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
3414 if (flow_dv_convert_action_modify_mac(&res, actions,
3417 action_flags |= actions->type ==
3418 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
3419 MLX5_FLOW_ACTION_SET_MAC_SRC :
3420 MLX5_FLOW_ACTION_SET_MAC_DST;
3422 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
3423 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
3424 if (flow_dv_convert_action_modify_ipv4(&res, actions,
3427 action_flags |= actions->type ==
3428 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
3429 MLX5_FLOW_ACTION_SET_IPV4_SRC :
3430 MLX5_FLOW_ACTION_SET_IPV4_DST;
3432 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
3433 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
3434 if (flow_dv_convert_action_modify_ipv6(&res, actions,
3437 action_flags |= actions->type ==
3438 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
3439 MLX5_FLOW_ACTION_SET_IPV6_SRC :
3440 MLX5_FLOW_ACTION_SET_IPV6_DST;
3442 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
3443 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
3444 if (flow_dv_convert_action_modify_tp(&res, actions,
3448 action_flags |= actions->type ==
3449 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
3450 MLX5_FLOW_ACTION_SET_TP_SRC :
3451 MLX5_FLOW_ACTION_SET_TP_DST;
3453 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
3454 if (flow_dv_convert_action_modify_dec_ttl(&res, items,
3458 action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
3460 case RTE_FLOW_ACTION_TYPE_SET_TTL:
3461 if (flow_dv_convert_action_modify_ttl(&res, actions,
3465 action_flags |= MLX5_FLOW_ACTION_SET_TTL;
3467 case RTE_FLOW_ACTION_TYPE_END:
3469 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
3470 /* create modify action if needed. */
3471 if (flow_dv_modify_hdr_resource_register
3476 dev_flow->dv.actions[actions_n++] =
3477 dev_flow->dv.modify_hdr->verbs_action;
3484 dev_flow->dv.actions_n = actions_n;
3485 flow->actions = action_flags;
3486 if (attr->ingress && !attr->transfer &&
3487 (priv->representor || priv->master)) {
3488 /* It was validated - we support unidirection flows only. */
3489 assert(!attr->egress);
3491 * Add matching on source vport index only
3492 * for ingress rules in E-Switch configurations.
3494 flow_dv_translate_source_vport(matcher.mask.buf,
3495 dev_flow->dv.value.buf,
3499 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3500 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
3501 void *match_mask = matcher.mask.buf;
3502 void *match_value = dev_flow->dv.value.buf;
3504 switch (items->type) {
3505 case RTE_FLOW_ITEM_TYPE_ETH:
3506 flow_dv_translate_item_eth(match_mask, match_value,
3508 matcher.priority = MLX5_PRIORITY_MAP_L2;
3509 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
3510 MLX5_FLOW_LAYER_OUTER_L2;
3512 case RTE_FLOW_ITEM_TYPE_VLAN:
3513 flow_dv_translate_item_vlan(match_mask, match_value,
3515 matcher.priority = MLX5_PRIORITY_MAP_L2;
3516 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
3517 MLX5_FLOW_LAYER_INNER_VLAN) :
3518 (MLX5_FLOW_LAYER_OUTER_L2 |
3519 MLX5_FLOW_LAYER_OUTER_VLAN);
3521 case RTE_FLOW_ITEM_TYPE_IPV4:
3522 flow_dv_translate_item_ipv4(match_mask, match_value,
3523 items, tunnel, attr->group);
3524 matcher.priority = MLX5_PRIORITY_MAP_L3;
3525 dev_flow->dv.hash_fields |=
3526 mlx5_flow_hashfields_adjust
3528 MLX5_IPV4_LAYER_TYPES,
3529 MLX5_IPV4_IBV_RX_HASH);
3530 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3531 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3533 case RTE_FLOW_ITEM_TYPE_IPV6:
3534 flow_dv_translate_item_ipv6(match_mask, match_value,
3535 items, tunnel, attr->group);
3536 matcher.priority = MLX5_PRIORITY_MAP_L3;
3537 dev_flow->dv.hash_fields |=
3538 mlx5_flow_hashfields_adjust
3540 MLX5_IPV6_LAYER_TYPES,
3541 MLX5_IPV6_IBV_RX_HASH);
3542 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3543 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3545 case RTE_FLOW_ITEM_TYPE_TCP:
3546 flow_dv_translate_item_tcp(match_mask, match_value,
3548 matcher.priority = MLX5_PRIORITY_MAP_L4;
3549 dev_flow->dv.hash_fields |=
3550 mlx5_flow_hashfields_adjust
3551 (dev_flow, tunnel, ETH_RSS_TCP,
3552 IBV_RX_HASH_SRC_PORT_TCP |
3553 IBV_RX_HASH_DST_PORT_TCP);
3554 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
3555 MLX5_FLOW_LAYER_OUTER_L4_TCP;
3557 case RTE_FLOW_ITEM_TYPE_UDP:
3558 flow_dv_translate_item_udp(match_mask, match_value,
3560 matcher.priority = MLX5_PRIORITY_MAP_L4;
3561 dev_flow->dv.hash_fields |=
3562 mlx5_flow_hashfields_adjust
3563 (dev_flow, tunnel, ETH_RSS_UDP,
3564 IBV_RX_HASH_SRC_PORT_UDP |
3565 IBV_RX_HASH_DST_PORT_UDP);
3566 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
3567 MLX5_FLOW_LAYER_OUTER_L4_UDP;
3569 case RTE_FLOW_ITEM_TYPE_GRE:
3570 flow_dv_translate_item_gre(match_mask, match_value,
3572 last_item = MLX5_FLOW_LAYER_GRE;
3574 case RTE_FLOW_ITEM_TYPE_NVGRE:
3575 flow_dv_translate_item_nvgre(match_mask, match_value,
3577 last_item = MLX5_FLOW_LAYER_GRE;
3579 case RTE_FLOW_ITEM_TYPE_VXLAN:
3580 flow_dv_translate_item_vxlan(match_mask, match_value,
3582 last_item = MLX5_FLOW_LAYER_VXLAN;
3584 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3585 flow_dv_translate_item_vxlan(match_mask, match_value,
3587 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
3589 case RTE_FLOW_ITEM_TYPE_MPLS:
3590 flow_dv_translate_item_mpls(match_mask, match_value,
3591 items, last_item, tunnel);
3592 last_item = MLX5_FLOW_LAYER_MPLS;
3594 case RTE_FLOW_ITEM_TYPE_META:
3595 flow_dv_translate_item_meta(match_mask, match_value,
3597 last_item = MLX5_FLOW_ITEM_METADATA;
3602 item_flags |= last_item;
3604 assert(!flow_dv_check_valid_spec(matcher.mask.buf,
3605 dev_flow->dv.value.buf));
3606 dev_flow->layers = item_flags;
3607 /* Register matcher. */
3608 matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
3610 matcher.priority = mlx5_flow_adjust_priority(dev, priority,
3612 matcher.egress = attr->egress;
3613 matcher.group = attr->group;
3614 if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
3620 * Apply the flow to the NIC.
3623 * Pointer to the Ethernet device structure.
3624 * @param[in, out] flow
3625 * Pointer to flow structure.
3627 * Pointer to error structure.
3630 * 0 on success, a negative errno value otherwise and rte_errno is set.
3633 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
3634 struct rte_flow_error *error)
3636 struct mlx5_flow_dv *dv;
3637 struct mlx5_flow *dev_flow;
3641 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3644 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
3645 dv->hrxq = mlx5_hrxq_drop_new(dev);
3649 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3650 "cannot get drop hash queue");
3654 mlx5_glue->dv_create_flow_action_dest_ibv_qp
3656 } else if (flow->actions &
3657 (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
3658 struct mlx5_hrxq *hrxq;
3660 hrxq = mlx5_hrxq_get(dev, flow->key,
3661 MLX5_RSS_HASH_KEY_LEN,
3664 flow->rss.queue_num);
3666 hrxq = mlx5_hrxq_new
3667 (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
3668 dv->hash_fields, (*flow->queue),
3669 flow->rss.queue_num,
3670 !!(dev_flow->layers &
3671 MLX5_FLOW_LAYER_TUNNEL));
3675 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3676 "cannot get hash queue");
3681 mlx5_glue->dv_create_flow_action_dest_ibv_qp
3685 mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
3686 (void *)&dv->value, n,
3689 rte_flow_error_set(error, errno,
3690 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3692 "hardware refuses to create flow");
3698 err = rte_errno; /* Save rte_errno before cleanup. */
3699 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3700 struct mlx5_flow_dv *dv = &dev_flow->dv;
3702 if (flow->actions & MLX5_FLOW_ACTION_DROP)
3703 mlx5_hrxq_drop_release(dev);
3705 mlx5_hrxq_release(dev, dv->hrxq);
3709 rte_errno = err; /* Restore rte_errno. */
3714 * Release the flow matcher.
3717 * Pointer to Ethernet device.
3719 * Pointer to mlx5_flow.
3722 * 1 while a reference on it exists, 0 when freed.
3725 flow_dv_matcher_release(struct rte_eth_dev *dev,
3726 struct mlx5_flow *flow)
3728 struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
3729 struct mlx5_priv *priv = dev->data->dev_private;
3730 struct mlx5_ibv_shared *sh = priv->sh;
3731 struct mlx5_flow_tbl_resource *tbl;
3733 assert(matcher->matcher_object);
3734 DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
3735 dev->data->port_id, (void *)matcher,
3736 rte_atomic32_read(&matcher->refcnt));
3737 if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
3738 claim_zero(mlx5_glue->dv_destroy_flow_matcher
3739 (matcher->matcher_object));
3740 LIST_REMOVE(matcher, next);
3741 if (matcher->egress)
3742 tbl = &sh->tx_tbl[matcher->group];
3744 tbl = &sh->rx_tbl[matcher->group];
3745 flow_dv_tbl_resource_release(tbl);
3747 DRV_LOG(DEBUG, "port %u matcher %p: removed",
3748 dev->data->port_id, (void *)matcher);
3755 * Release an encap/decap resource.
3758 * Pointer to mlx5_flow.
3761 * 1 while a reference on it exists, 0 when freed.
3764 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
3766 struct mlx5_flow_dv_encap_decap_resource *cache_resource =
3767 flow->dv.encap_decap;
3769 assert(cache_resource->verbs_action);
3770 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
3771 (void *)cache_resource,
3772 rte_atomic32_read(&cache_resource->refcnt));
3773 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
3774 claim_zero(mlx5_glue->destroy_flow_action
3775 (cache_resource->verbs_action));
3776 LIST_REMOVE(cache_resource, next);
3777 rte_free(cache_resource);
3778 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
3779 (void *)cache_resource);
3786 * Release an jump to table action resource.
3789 * Pointer to mlx5_flow.
3792 * 1 while a reference on it exists, 0 when freed.
3795 flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow)
3797 struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
3800 assert(cache_resource->action);
3801 DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
3802 (void *)cache_resource,
3803 rte_atomic32_read(&cache_resource->refcnt));
3804 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
3805 claim_zero(mlx5_glue->destroy_flow_action
3806 (cache_resource->action));
3807 LIST_REMOVE(cache_resource, next);
3808 flow_dv_tbl_resource_release(cache_resource->tbl);
3809 rte_free(cache_resource);
3810 DRV_LOG(DEBUG, "jump table resource %p: removed",
3811 (void *)cache_resource);
3818 * Release a modify-header resource.
3821 * Pointer to mlx5_flow.
3824 * 1 while a reference on it exists, 0 when freed.
3827 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
3829 struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
3830 flow->dv.modify_hdr;
3832 assert(cache_resource->verbs_action);
3833 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
3834 (void *)cache_resource,
3835 rte_atomic32_read(&cache_resource->refcnt));
3836 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
3837 claim_zero(mlx5_glue->destroy_flow_action
3838 (cache_resource->verbs_action));
3839 LIST_REMOVE(cache_resource, next);
3840 rte_free(cache_resource);
3841 DRV_LOG(DEBUG, "modify-header resource %p: removed",
3842 (void *)cache_resource);
3849 * Remove the flow from the NIC but keeps it in memory.
3852 * Pointer to Ethernet device.
3853 * @param[in, out] flow
3854 * Pointer to flow structure.
3857 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
3859 struct mlx5_flow_dv *dv;
3860 struct mlx5_flow *dev_flow;
3864 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3867 claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
3871 if (flow->actions & MLX5_FLOW_ACTION_DROP)
3872 mlx5_hrxq_drop_release(dev);
3874 mlx5_hrxq_release(dev, dv->hrxq);
3881 * Remove the flow from the NIC and the memory.
3884 * Pointer to the Ethernet device structure.
3885 * @param[in, out] flow
3886 * Pointer to flow structure.
3889 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
3891 struct mlx5_flow *dev_flow;
3895 flow_dv_remove(dev, flow);
3896 if (flow->counter) {
3897 flow_dv_counter_release(flow->counter);
3898 flow->counter = NULL;
3900 if (flow->tag_resource) {
3901 flow_dv_tag_release(dev, flow->tag_resource);
3902 flow->tag_resource = NULL;
3904 while (!LIST_EMPTY(&flow->dev_flows)) {
3905 dev_flow = LIST_FIRST(&flow->dev_flows);
3906 LIST_REMOVE(dev_flow, next);
3907 if (dev_flow->dv.matcher)
3908 flow_dv_matcher_release(dev, dev_flow);
3909 if (dev_flow->dv.encap_decap)
3910 flow_dv_encap_decap_resource_release(dev_flow);
3911 if (dev_flow->dv.modify_hdr)
3912 flow_dv_modify_hdr_resource_release(dev_flow);
3913 if (dev_flow->dv.jump)
3914 flow_dv_jump_tbl_resource_release(dev_flow);
3920 * Query a dv flow rule for its statistics via devx.
3923 * Pointer to Ethernet device.
3925 * Pointer to the sub flow.
3927 * data retrieved by the query.
3929 * Perform verbose error reporting if not NULL.
3932 * 0 on success, a negative errno value otherwise and rte_errno is set.
3935 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
3936 void *data, struct rte_flow_error *error)
3938 struct mlx5_priv *priv = dev->data->dev_private;
3939 struct rte_flow_query_count *qc = data;
3944 if (!priv->config.devx)
3945 return rte_flow_error_set(error, ENOTSUP,
3946 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3948 "counters are not supported");
3949 if (flow->counter) {
3950 err = mlx5_devx_cmd_flow_counter_query
3951 (flow->counter->dcs,
3952 qc->reset, &pkts, &bytes);
3954 return rte_flow_error_set
3956 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3958 "cannot read counters");
3961 qc->hits = pkts - flow->counter->hits;
3962 qc->bytes = bytes - flow->counter->bytes;
3964 flow->counter->hits = pkts;
3965 flow->counter->bytes = bytes;
3969 return rte_flow_error_set(error, EINVAL,
3970 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3972 "counters are not available");
3978 * @see rte_flow_query()
3982 flow_dv_query(struct rte_eth_dev *dev,
3983 struct rte_flow *flow __rte_unused,
3984 const struct rte_flow_action *actions __rte_unused,
3985 void *data __rte_unused,
3986 struct rte_flow_error *error __rte_unused)
3990 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3991 switch (actions->type) {
3992 case RTE_FLOW_ACTION_TYPE_VOID:
3994 case RTE_FLOW_ACTION_TYPE_COUNT:
3995 ret = flow_dv_query_count(dev, flow, data, error);
3998 return rte_flow_error_set(error, ENOTSUP,
3999 RTE_FLOW_ERROR_TYPE_ACTION,
4001 "action not supported");
4008 * Mutex-protected thunk to flow_dv_translate().
4011 flow_d_translate(struct rte_eth_dev *dev,
4012 struct mlx5_flow *dev_flow,
4013 const struct rte_flow_attr *attr,
4014 const struct rte_flow_item items[],
4015 const struct rte_flow_action actions[],
4016 struct rte_flow_error *error)
4020 flow_d_shared_lock(dev);
4021 ret = flow_dv_translate(dev, dev_flow, attr, items, actions, error);
4022 flow_d_shared_unlock(dev);
4027 * Mutex-protected thunk to flow_dv_apply().
4030 flow_d_apply(struct rte_eth_dev *dev,
4031 struct rte_flow *flow,
4032 struct rte_flow_error *error)
4036 flow_d_shared_lock(dev);
4037 ret = flow_dv_apply(dev, flow, error);
4038 flow_d_shared_unlock(dev);
4043 * Mutex-protected thunk to flow_dv_remove().
4046 flow_d_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
4048 flow_d_shared_lock(dev);
4049 flow_dv_remove(dev, flow);
4050 flow_d_shared_unlock(dev);
4054 * Mutex-protected thunk to flow_dv_destroy().
4057 flow_d_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
4059 flow_d_shared_lock(dev);
4060 flow_dv_destroy(dev, flow);
4061 flow_d_shared_unlock(dev);
4064 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
4065 .validate = flow_dv_validate,
4066 .prepare = flow_dv_prepare,
4067 .translate = flow_d_translate,
4068 .apply = flow_d_apply,
4069 .remove = flow_d_remove,
4070 .destroy = flow_d_destroy,
4071 .query = flow_dv_query,
4074 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */