1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #pragma GCC diagnostic ignored "-Wpedantic"
15 #include <infiniband/verbs.h>
17 #pragma GCC diagnostic error "-Wpedantic"
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_eth_ctrl.h>
23 #include <rte_ethdev_driver.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
31 #include "mlx5_defs.h"
32 #include "mlx5_glue.h"
33 #include "mlx5_flow.h"
35 #include "mlx5_rxtx.h"
37 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
39 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
40 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
56 * Initialize flow attributes structure according to flow items' types.
59 * Pointer to item specification.
61 * Pointer to flow attributes structure.
64 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
66 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
68 case RTE_FLOW_ITEM_TYPE_IPV4:
71 case RTE_FLOW_ITEM_TYPE_IPV6:
74 case RTE_FLOW_ITEM_TYPE_UDP:
77 case RTE_FLOW_ITEM_TYPE_TCP:
87 struct field_modify_info {
88 uint32_t size; /* Size of field in protocol header, in bytes. */
89 uint32_t offset; /* Offset of field in protocol header, in bytes. */
90 enum mlx5_modification_field id;
93 struct field_modify_info modify_eth[] = {
94 {4, 0, MLX5_MODI_OUT_DMAC_47_16},
95 {2, 4, MLX5_MODI_OUT_DMAC_15_0},
96 {4, 6, MLX5_MODI_OUT_SMAC_47_16},
97 {2, 10, MLX5_MODI_OUT_SMAC_15_0},
101 struct field_modify_info modify_ipv4[] = {
102 {1, 8, MLX5_MODI_OUT_IPV4_TTL},
103 {4, 12, MLX5_MODI_OUT_SIPV4},
104 {4, 16, MLX5_MODI_OUT_DIPV4},
108 struct field_modify_info modify_ipv6[] = {
109 {1, 7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
110 {4, 8, MLX5_MODI_OUT_SIPV6_127_96},
111 {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
112 {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
113 {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
114 {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
115 {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
116 {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
117 {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
121 struct field_modify_info modify_udp[] = {
122 {2, 0, MLX5_MODI_OUT_UDP_SPORT},
123 {2, 2, MLX5_MODI_OUT_UDP_DPORT},
127 struct field_modify_info modify_tcp[] = {
128 {2, 0, MLX5_MODI_OUT_TCP_SPORT},
129 {2, 2, MLX5_MODI_OUT_TCP_DPORT},
134 * Acquire the synchronizing object to protect multithreaded access
135 * to shared dv context. Lock occurs only if context is actually
136 * shared, i.e. we have multiport IB device and representors are
140 * Pointer to the rte_eth_dev structure.
143 flow_d_shared_lock(struct rte_eth_dev *dev)
145 struct mlx5_priv *priv = dev->data->dev_private;
146 struct mlx5_ibv_shared *sh = priv->sh;
148 if (sh->dv_refcnt > 1) {
151 ret = pthread_mutex_lock(&sh->dv_mutex);
158 flow_d_shared_unlock(struct rte_eth_dev *dev)
160 struct mlx5_priv *priv = dev->data->dev_private;
161 struct mlx5_ibv_shared *sh = priv->sh;
163 if (sh->dv_refcnt > 1) {
166 ret = pthread_mutex_unlock(&sh->dv_mutex);
173 * Convert modify-header action to DV specification.
176 * Pointer to item specification.
178 * Pointer to field modification information.
179 * @param[in,out] resource
180 * Pointer to the modify-header resource.
182 * Type of modification.
184 * Pointer to the error structure.
187 * 0 on success, a negative errno value otherwise and rte_errno is set.
190 flow_dv_convert_modify_action(struct rte_flow_item *item,
191 struct field_modify_info *field,
192 struct mlx5_flow_dv_modify_hdr_resource *resource,
194 struct rte_flow_error *error)
196 uint32_t i = resource->actions_num;
197 struct mlx5_modification_cmd *actions = resource->actions;
198 const uint8_t *spec = item->spec;
199 const uint8_t *mask = item->mask;
202 while (field->size) {
204 /* Generate modify command for each mask segment. */
205 memcpy(&set, &mask[field->offset], field->size);
207 if (i >= MLX5_MODIFY_NUM)
208 return rte_flow_error_set(error, EINVAL,
209 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
210 "too many items to modify");
211 actions[i].action_type = type;
212 actions[i].field = field->id;
213 actions[i].length = field->size ==
214 4 ? 0 : field->size * 8;
215 rte_memcpy(&actions[i].data[4 - field->size],
216 &spec[field->offset], field->size);
217 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
220 if (resource->actions_num != i)
221 resource->actions_num = i;
224 if (!resource->actions_num)
225 return rte_flow_error_set(error, EINVAL,
226 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
227 "invalid modification flow item");
232 * Convert modify-header set IPv4 address action to DV specification.
234 * @param[in,out] resource
235 * Pointer to the modify-header resource.
237 * Pointer to action specification.
239 * Pointer to the error structure.
242 * 0 on success, a negative errno value otherwise and rte_errno is set.
245 flow_dv_convert_action_modify_ipv4
246 (struct mlx5_flow_dv_modify_hdr_resource *resource,
247 const struct rte_flow_action *action,
248 struct rte_flow_error *error)
250 const struct rte_flow_action_set_ipv4 *conf =
251 (const struct rte_flow_action_set_ipv4 *)(action->conf);
252 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
253 struct rte_flow_item_ipv4 ipv4;
254 struct rte_flow_item_ipv4 ipv4_mask;
256 memset(&ipv4, 0, sizeof(ipv4));
257 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
258 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
259 ipv4.hdr.src_addr = conf->ipv4_addr;
260 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
262 ipv4.hdr.dst_addr = conf->ipv4_addr;
263 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
266 item.mask = &ipv4_mask;
267 return flow_dv_convert_modify_action(&item, modify_ipv4, resource,
268 MLX5_MODIFICATION_TYPE_SET, error);
272 * Convert modify-header set IPv6 address action to DV specification.
274 * @param[in,out] resource
275 * Pointer to the modify-header resource.
277 * Pointer to action specification.
279 * Pointer to the error structure.
282 * 0 on success, a negative errno value otherwise and rte_errno is set.
285 flow_dv_convert_action_modify_ipv6
286 (struct mlx5_flow_dv_modify_hdr_resource *resource,
287 const struct rte_flow_action *action,
288 struct rte_flow_error *error)
290 const struct rte_flow_action_set_ipv6 *conf =
291 (const struct rte_flow_action_set_ipv6 *)(action->conf);
292 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
293 struct rte_flow_item_ipv6 ipv6;
294 struct rte_flow_item_ipv6 ipv6_mask;
296 memset(&ipv6, 0, sizeof(ipv6));
297 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
298 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
299 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
300 sizeof(ipv6.hdr.src_addr));
301 memcpy(&ipv6_mask.hdr.src_addr,
302 &rte_flow_item_ipv6_mask.hdr.src_addr,
303 sizeof(ipv6.hdr.src_addr));
305 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
306 sizeof(ipv6.hdr.dst_addr));
307 memcpy(&ipv6_mask.hdr.dst_addr,
308 &rte_flow_item_ipv6_mask.hdr.dst_addr,
309 sizeof(ipv6.hdr.dst_addr));
312 item.mask = &ipv6_mask;
313 return flow_dv_convert_modify_action(&item, modify_ipv6, resource,
314 MLX5_MODIFICATION_TYPE_SET, error);
318 * Convert modify-header set MAC address action to DV specification.
320 * @param[in,out] resource
321 * Pointer to the modify-header resource.
323 * Pointer to action specification.
325 * Pointer to the error structure.
328 * 0 on success, a negative errno value otherwise and rte_errno is set.
331 flow_dv_convert_action_modify_mac
332 (struct mlx5_flow_dv_modify_hdr_resource *resource,
333 const struct rte_flow_action *action,
334 struct rte_flow_error *error)
336 const struct rte_flow_action_set_mac *conf =
337 (const struct rte_flow_action_set_mac *)(action->conf);
338 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
339 struct rte_flow_item_eth eth;
340 struct rte_flow_item_eth eth_mask;
342 memset(ð, 0, sizeof(eth));
343 memset(ð_mask, 0, sizeof(eth_mask));
344 if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
345 memcpy(ð.src.addr_bytes, &conf->mac_addr,
346 sizeof(eth.src.addr_bytes));
347 memcpy(ð_mask.src.addr_bytes,
348 &rte_flow_item_eth_mask.src.addr_bytes,
349 sizeof(eth_mask.src.addr_bytes));
351 memcpy(ð.dst.addr_bytes, &conf->mac_addr,
352 sizeof(eth.dst.addr_bytes));
353 memcpy(ð_mask.dst.addr_bytes,
354 &rte_flow_item_eth_mask.dst.addr_bytes,
355 sizeof(eth_mask.dst.addr_bytes));
358 item.mask = ð_mask;
359 return flow_dv_convert_modify_action(&item, modify_eth, resource,
360 MLX5_MODIFICATION_TYPE_SET, error);
364 * Convert modify-header set TP action to DV specification.
366 * @param[in,out] resource
367 * Pointer to the modify-header resource.
369 * Pointer to action specification.
371 * Pointer to rte_flow_item objects list.
373 * Pointer to flow attributes structure.
375 * Pointer to the error structure.
378 * 0 on success, a negative errno value otherwise and rte_errno is set.
381 flow_dv_convert_action_modify_tp
382 (struct mlx5_flow_dv_modify_hdr_resource *resource,
383 const struct rte_flow_action *action,
384 const struct rte_flow_item *items,
385 union flow_dv_attr *attr,
386 struct rte_flow_error *error)
388 const struct rte_flow_action_set_tp *conf =
389 (const struct rte_flow_action_set_tp *)(action->conf);
390 struct rte_flow_item item;
391 struct rte_flow_item_udp udp;
392 struct rte_flow_item_udp udp_mask;
393 struct rte_flow_item_tcp tcp;
394 struct rte_flow_item_tcp tcp_mask;
395 struct field_modify_info *field;
398 flow_dv_attr_init(items, attr);
400 memset(&udp, 0, sizeof(udp));
401 memset(&udp_mask, 0, sizeof(udp_mask));
402 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
403 udp.hdr.src_port = conf->port;
404 udp_mask.hdr.src_port =
405 rte_flow_item_udp_mask.hdr.src_port;
407 udp.hdr.dst_port = conf->port;
408 udp_mask.hdr.dst_port =
409 rte_flow_item_udp_mask.hdr.dst_port;
411 item.type = RTE_FLOW_ITEM_TYPE_UDP;
413 item.mask = &udp_mask;
417 memset(&tcp, 0, sizeof(tcp));
418 memset(&tcp_mask, 0, sizeof(tcp_mask));
419 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
420 tcp.hdr.src_port = conf->port;
421 tcp_mask.hdr.src_port =
422 rte_flow_item_tcp_mask.hdr.src_port;
424 tcp.hdr.dst_port = conf->port;
425 tcp_mask.hdr.dst_port =
426 rte_flow_item_tcp_mask.hdr.dst_port;
428 item.type = RTE_FLOW_ITEM_TYPE_TCP;
430 item.mask = &tcp_mask;
433 return flow_dv_convert_modify_action(&item, field, resource,
434 MLX5_MODIFICATION_TYPE_SET, error);
438 * Convert modify-header set TTL action to DV specification.
440 * @param[in,out] resource
441 * Pointer to the modify-header resource.
443 * Pointer to action specification.
445 * Pointer to rte_flow_item objects list.
447 * Pointer to flow attributes structure.
449 * Pointer to the error structure.
452 * 0 on success, a negative errno value otherwise and rte_errno is set.
455 flow_dv_convert_action_modify_ttl
456 (struct mlx5_flow_dv_modify_hdr_resource *resource,
457 const struct rte_flow_action *action,
458 const struct rte_flow_item *items,
459 union flow_dv_attr *attr,
460 struct rte_flow_error *error)
462 const struct rte_flow_action_set_ttl *conf =
463 (const struct rte_flow_action_set_ttl *)(action->conf);
464 struct rte_flow_item item;
465 struct rte_flow_item_ipv4 ipv4;
466 struct rte_flow_item_ipv4 ipv4_mask;
467 struct rte_flow_item_ipv6 ipv6;
468 struct rte_flow_item_ipv6 ipv6_mask;
469 struct field_modify_info *field;
472 flow_dv_attr_init(items, attr);
474 memset(&ipv4, 0, sizeof(ipv4));
475 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
476 ipv4.hdr.time_to_live = conf->ttl_value;
477 ipv4_mask.hdr.time_to_live = 0xFF;
478 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
480 item.mask = &ipv4_mask;
484 memset(&ipv6, 0, sizeof(ipv6));
485 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
486 ipv6.hdr.hop_limits = conf->ttl_value;
487 ipv6_mask.hdr.hop_limits = 0xFF;
488 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
490 item.mask = &ipv6_mask;
493 return flow_dv_convert_modify_action(&item, field, resource,
494 MLX5_MODIFICATION_TYPE_SET, error);
498 * Convert modify-header decrement TTL action to DV specification.
500 * @param[in,out] resource
501 * Pointer to the modify-header resource.
503 * Pointer to action specification.
505 * Pointer to rte_flow_item objects list.
507 * Pointer to flow attributes structure.
509 * Pointer to the error structure.
512 * 0 on success, a negative errno value otherwise and rte_errno is set.
515 flow_dv_convert_action_modify_dec_ttl
516 (struct mlx5_flow_dv_modify_hdr_resource *resource,
517 const struct rte_flow_item *items,
518 union flow_dv_attr *attr,
519 struct rte_flow_error *error)
521 struct rte_flow_item item;
522 struct rte_flow_item_ipv4 ipv4;
523 struct rte_flow_item_ipv4 ipv4_mask;
524 struct rte_flow_item_ipv6 ipv6;
525 struct rte_flow_item_ipv6 ipv6_mask;
526 struct field_modify_info *field;
529 flow_dv_attr_init(items, attr);
531 memset(&ipv4, 0, sizeof(ipv4));
532 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
533 ipv4.hdr.time_to_live = 0xFF;
534 ipv4_mask.hdr.time_to_live = 0xFF;
535 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
537 item.mask = &ipv4_mask;
541 memset(&ipv6, 0, sizeof(ipv6));
542 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
543 ipv6.hdr.hop_limits = 0xFF;
544 ipv6_mask.hdr.hop_limits = 0xFF;
545 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
547 item.mask = &ipv6_mask;
550 return flow_dv_convert_modify_action(&item, field, resource,
551 MLX5_MODIFICATION_TYPE_ADD, error);
555 * Validate META item.
558 * Pointer to the rte_eth_dev structure.
560 * Item specification.
562 * Attributes of flow that includes this item.
564 * Pointer to error structure.
567 * 0 on success, a negative errno value otherwise and rte_errno is set.
570 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
571 const struct rte_flow_item *item,
572 const struct rte_flow_attr *attr,
573 struct rte_flow_error *error)
575 const struct rte_flow_item_meta *spec = item->spec;
576 const struct rte_flow_item_meta *mask = item->mask;
577 const struct rte_flow_item_meta nic_mask = {
578 .data = RTE_BE32(UINT32_MAX)
581 uint64_t offloads = dev->data->dev_conf.txmode.offloads;
583 if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
584 return rte_flow_error_set(error, EPERM,
585 RTE_FLOW_ERROR_TYPE_ITEM,
587 "match on metadata offload "
588 "configuration is off for this port");
590 return rte_flow_error_set(error, EINVAL,
591 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
593 "data cannot be empty");
595 return rte_flow_error_set(error, EINVAL,
596 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
598 "data cannot be zero");
600 mask = &rte_flow_item_meta_mask;
601 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
602 (const uint8_t *)&nic_mask,
603 sizeof(struct rte_flow_item_meta),
608 return rte_flow_error_set(error, ENOTSUP,
609 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
611 "pattern not supported for ingress");
616 * Validate count action.
621 * Pointer to error structure.
624 * 0 on success, a negative errno value otherwise and rte_errno is set.
627 flow_dv_validate_action_count(struct rte_eth_dev *dev,
628 struct rte_flow_error *error)
630 struct mlx5_priv *priv = dev->data->dev_private;
632 if (!priv->config.devx)
634 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
638 return rte_flow_error_set
640 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
642 "count action not supported");
646 * Validate the L2 encap action.
648 * @param[in] action_flags
649 * Holds the actions detected until now.
651 * Pointer to the encap action.
653 * Pointer to flow attributes
655 * Pointer to error structure.
658 * 0 on success, a negative errno value otherwise and rte_errno is set.
661 flow_dv_validate_action_l2_encap(uint64_t action_flags,
662 const struct rte_flow_action *action,
663 const struct rte_flow_attr *attr,
664 struct rte_flow_error *error)
667 return rte_flow_error_set(error, EINVAL,
668 RTE_FLOW_ERROR_TYPE_ACTION, action,
669 "configuration cannot be null");
670 if (action_flags & MLX5_FLOW_ACTION_DROP)
671 return rte_flow_error_set(error, EINVAL,
672 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
673 "can't drop and encap in same flow");
674 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
675 return rte_flow_error_set(error, EINVAL,
676 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
677 "can only have a single encap or"
678 " decap action in a flow");
680 return rte_flow_error_set(error, ENOTSUP,
681 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
683 "encap action not supported for "
689 * Validate the L2 decap action.
691 * @param[in] action_flags
692 * Holds the actions detected until now.
694 * Pointer to flow attributes
696 * Pointer to error structure.
699 * 0 on success, a negative errno value otherwise and rte_errno is set.
702 flow_dv_validate_action_l2_decap(uint64_t action_flags,
703 const struct rte_flow_attr *attr,
704 struct rte_flow_error *error)
706 if (action_flags & MLX5_FLOW_ACTION_DROP)
707 return rte_flow_error_set(error, EINVAL,
708 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
709 "can't drop and decap in same flow");
710 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
711 return rte_flow_error_set(error, EINVAL,
712 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
713 "can only have a single encap or"
714 " decap action in a flow");
715 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
716 return rte_flow_error_set(error, EINVAL,
717 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
718 "can't have decap action after"
721 return rte_flow_error_set(error, ENOTSUP,
722 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
724 "decap action not supported for "
730 * Validate the raw encap action.
732 * @param[in] action_flags
733 * Holds the actions detected until now.
735 * Pointer to the encap action.
737 * Pointer to flow attributes
739 * Pointer to error structure.
742 * 0 on success, a negative errno value otherwise and rte_errno is set.
745 flow_dv_validate_action_raw_encap(uint64_t action_flags,
746 const struct rte_flow_action *action,
747 const struct rte_flow_attr *attr,
748 struct rte_flow_error *error)
751 return rte_flow_error_set(error, EINVAL,
752 RTE_FLOW_ERROR_TYPE_ACTION, action,
753 "configuration cannot be null");
754 if (action_flags & MLX5_FLOW_ACTION_DROP)
755 return rte_flow_error_set(error, EINVAL,
756 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
757 "can't drop and encap in same flow");
758 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
759 return rte_flow_error_set(error, EINVAL,
760 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
761 "can only have a single encap"
762 " action in a flow");
763 /* encap without preceding decap is not supported for ingress */
764 if (attr->ingress && !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
765 return rte_flow_error_set(error, ENOTSUP,
766 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
768 "encap action not supported for "
774 * Validate the raw decap action.
776 * @param[in] action_flags
777 * Holds the actions detected until now.
779 * Pointer to the encap action.
781 * Pointer to flow attributes
783 * Pointer to error structure.
786 * 0 on success, a negative errno value otherwise and rte_errno is set.
789 flow_dv_validate_action_raw_decap(uint64_t action_flags,
790 const struct rte_flow_action *action,
791 const struct rte_flow_attr *attr,
792 struct rte_flow_error *error)
794 if (action_flags & MLX5_FLOW_ACTION_DROP)
795 return rte_flow_error_set(error, EINVAL,
796 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
797 "can't drop and decap in same flow");
798 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
799 return rte_flow_error_set(error, EINVAL,
800 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
801 "can't have encap action before"
803 if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
804 return rte_flow_error_set(error, EINVAL,
805 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
806 "can only have a single decap"
807 " action in a flow");
808 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
809 return rte_flow_error_set(error, EINVAL,
810 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
811 "can't have decap action after"
813 /* decap action is valid on egress only if it is followed by encap */
815 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
816 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
819 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
820 return rte_flow_error_set
822 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
823 NULL, "decap action not supported"
830 * Find existing encap/decap resource or create and register a new one.
832 * @param dev[in, out]
833 * Pointer to rte_eth_dev structure.
834 * @param[in, out] resource
835 * Pointer to encap/decap resource.
836 * @parm[in, out] dev_flow
837 * Pointer to the dev_flow.
839 * pointer to error structure.
842 * 0 on success otherwise -errno and errno is set.
845 flow_dv_encap_decap_resource_register
846 (struct rte_eth_dev *dev,
847 struct mlx5_flow_dv_encap_decap_resource *resource,
848 struct mlx5_flow *dev_flow,
849 struct rte_flow_error *error)
851 struct mlx5_priv *priv = dev->data->dev_private;
852 struct mlx5_ibv_shared *sh = priv->sh;
853 struct mlx5_flow_dv_encap_decap_resource *cache_resource;
854 struct rte_flow *flow = dev_flow->flow;
855 struct mlx5dv_dr_ns *ns;
857 resource->flags = flow->group ? 0 : 1;
863 /* Lookup a matching resource from cache. */
864 LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
865 if (resource->reformat_type == cache_resource->reformat_type &&
866 resource->ft_type == cache_resource->ft_type &&
867 resource->flags == cache_resource->flags &&
868 resource->size == cache_resource->size &&
869 !memcmp((const void *)resource->buf,
870 (const void *)cache_resource->buf,
872 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
873 (void *)cache_resource,
874 rte_atomic32_read(&cache_resource->refcnt));
875 rte_atomic32_inc(&cache_resource->refcnt);
876 dev_flow->dv.encap_decap = cache_resource;
880 /* Register new encap/decap resource. */
881 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
883 return rte_flow_error_set(error, ENOMEM,
884 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
885 "cannot allocate resource memory");
886 *cache_resource = *resource;
887 cache_resource->verbs_action =
888 mlx5_glue->dv_create_flow_action_packet_reformat
889 (sh->ctx, cache_resource->reformat_type,
890 cache_resource->ft_type, ns, cache_resource->flags,
891 cache_resource->size,
892 (cache_resource->size ? cache_resource->buf : NULL));
893 if (!cache_resource->verbs_action) {
894 rte_free(cache_resource);
895 return rte_flow_error_set(error, ENOMEM,
896 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
897 NULL, "cannot create action");
899 rte_atomic32_init(&cache_resource->refcnt);
900 rte_atomic32_inc(&cache_resource->refcnt);
901 LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
902 dev_flow->dv.encap_decap = cache_resource;
903 DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
904 (void *)cache_resource,
905 rte_atomic32_read(&cache_resource->refcnt));
910 * Find existing table jump resource or create and register a new one.
912 * @param dev[in, out]
913 * Pointer to rte_eth_dev structure.
914 * @param[in, out] resource
915 * Pointer to jump table resource.
916 * @parm[in, out] dev_flow
917 * Pointer to the dev_flow.
919 * pointer to error structure.
922 * 0 on success otherwise -errno and errno is set.
925 flow_dv_jump_tbl_resource_register
926 (struct rte_eth_dev *dev,
927 struct mlx5_flow_dv_jump_tbl_resource *resource,
928 struct mlx5_flow *dev_flow,
929 struct rte_flow_error *error)
931 struct mlx5_priv *priv = dev->data->dev_private;
932 struct mlx5_ibv_shared *sh = priv->sh;
933 struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
935 /* Lookup a matching resource from cache. */
936 LIST_FOREACH(cache_resource, &sh->jump_tbl, next) {
937 if (resource->tbl == cache_resource->tbl) {
938 DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++",
939 (void *)cache_resource,
940 rte_atomic32_read(&cache_resource->refcnt));
941 rte_atomic32_inc(&cache_resource->refcnt);
942 dev_flow->dv.jump = cache_resource;
946 /* Register new jump table resource. */
947 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
949 return rte_flow_error_set(error, ENOMEM,
950 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
951 "cannot allocate resource memory");
952 *cache_resource = *resource;
953 cache_resource->action =
954 mlx5_glue->dr_create_flow_action_dest_flow_tbl
955 (resource->tbl->obj);
956 if (!cache_resource->action) {
957 rte_free(cache_resource);
958 return rte_flow_error_set(error, ENOMEM,
959 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
960 NULL, "cannot create action");
962 rte_atomic32_init(&cache_resource->refcnt);
963 rte_atomic32_inc(&cache_resource->refcnt);
964 LIST_INSERT_HEAD(&sh->jump_tbl, cache_resource, next);
965 dev_flow->dv.jump = cache_resource;
966 DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++",
967 (void *)cache_resource,
968 rte_atomic32_read(&cache_resource->refcnt));
973 * Get the size of specific rte_flow_item_type
975 * @param[in] item_type
976 * Tested rte_flow_item_type.
979 * sizeof struct item_type, 0 if void or irrelevant.
982 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
987 case RTE_FLOW_ITEM_TYPE_ETH:
988 retval = sizeof(struct rte_flow_item_eth);
990 case RTE_FLOW_ITEM_TYPE_VLAN:
991 retval = sizeof(struct rte_flow_item_vlan);
993 case RTE_FLOW_ITEM_TYPE_IPV4:
994 retval = sizeof(struct rte_flow_item_ipv4);
996 case RTE_FLOW_ITEM_TYPE_IPV6:
997 retval = sizeof(struct rte_flow_item_ipv6);
999 case RTE_FLOW_ITEM_TYPE_UDP:
1000 retval = sizeof(struct rte_flow_item_udp);
1002 case RTE_FLOW_ITEM_TYPE_TCP:
1003 retval = sizeof(struct rte_flow_item_tcp);
1005 case RTE_FLOW_ITEM_TYPE_VXLAN:
1006 retval = sizeof(struct rte_flow_item_vxlan);
1008 case RTE_FLOW_ITEM_TYPE_GRE:
1009 retval = sizeof(struct rte_flow_item_gre);
1011 case RTE_FLOW_ITEM_TYPE_NVGRE:
1012 retval = sizeof(struct rte_flow_item_nvgre);
1014 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1015 retval = sizeof(struct rte_flow_item_vxlan_gpe);
1017 case RTE_FLOW_ITEM_TYPE_MPLS:
1018 retval = sizeof(struct rte_flow_item_mpls);
1020 case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
1028 #define MLX5_ENCAP_IPV4_VERSION 0x40
1029 #define MLX5_ENCAP_IPV4_IHL_MIN 0x05
1030 #define MLX5_ENCAP_IPV4_TTL_DEF 0x40
1031 #define MLX5_ENCAP_IPV6_VTC_FLOW 0x60000000
1032 #define MLX5_ENCAP_IPV6_HOP_LIMIT 0xff
1033 #define MLX5_ENCAP_VXLAN_FLAGS 0x08000000
1034 #define MLX5_ENCAP_VXLAN_GPE_FLAGS 0x04
1037 * Convert the encap action data from list of rte_flow_item to raw buffer
1040 * Pointer to rte_flow_item objects list.
1042 * Pointer to the output buffer.
1044 * Pointer to the output buffer size.
1046 * Pointer to the error structure.
1049 * 0 on success, a negative errno value otherwise and rte_errno is set.
1052 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
1053 size_t *size, struct rte_flow_error *error)
1055 struct ether_hdr *eth = NULL;
1056 struct vlan_hdr *vlan = NULL;
1057 struct ipv4_hdr *ipv4 = NULL;
1058 struct ipv6_hdr *ipv6 = NULL;
1059 struct udp_hdr *udp = NULL;
1060 struct vxlan_hdr *vxlan = NULL;
1061 struct vxlan_gpe_hdr *vxlan_gpe = NULL;
1062 struct gre_hdr *gre = NULL;
1064 size_t temp_size = 0;
1067 return rte_flow_error_set(error, EINVAL,
1068 RTE_FLOW_ERROR_TYPE_ACTION,
1069 NULL, "invalid empty data");
1070 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1071 len = flow_dv_get_item_len(items->type);
1072 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
1073 return rte_flow_error_set(error, EINVAL,
1074 RTE_FLOW_ERROR_TYPE_ACTION,
1075 (void *)items->type,
1076 "items total size is too big"
1077 " for encap action");
1078 rte_memcpy((void *)&buf[temp_size], items->spec, len);
1079 switch (items->type) {
1080 case RTE_FLOW_ITEM_TYPE_ETH:
1081 eth = (struct ether_hdr *)&buf[temp_size];
1083 case RTE_FLOW_ITEM_TYPE_VLAN:
1084 vlan = (struct vlan_hdr *)&buf[temp_size];
1086 return rte_flow_error_set(error, EINVAL,
1087 RTE_FLOW_ERROR_TYPE_ACTION,
1088 (void *)items->type,
1089 "eth header not found");
1090 if (!eth->ether_type)
1091 eth->ether_type = RTE_BE16(ETHER_TYPE_VLAN);
1093 case RTE_FLOW_ITEM_TYPE_IPV4:
1094 ipv4 = (struct ipv4_hdr *)&buf[temp_size];
1096 return rte_flow_error_set(error, EINVAL,
1097 RTE_FLOW_ERROR_TYPE_ACTION,
1098 (void *)items->type,
1099 "neither eth nor vlan"
1101 if (vlan && !vlan->eth_proto)
1102 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv4);
1103 else if (eth && !eth->ether_type)
1104 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv4);
1105 if (!ipv4->version_ihl)
1106 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
1107 MLX5_ENCAP_IPV4_IHL_MIN;
1108 if (!ipv4->time_to_live)
1109 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
1111 case RTE_FLOW_ITEM_TYPE_IPV6:
1112 ipv6 = (struct ipv6_hdr *)&buf[temp_size];
1114 return rte_flow_error_set(error, EINVAL,
1115 RTE_FLOW_ERROR_TYPE_ACTION,
1116 (void *)items->type,
1117 "neither eth nor vlan"
1119 if (vlan && !vlan->eth_proto)
1120 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv6);
1121 else if (eth && !eth->ether_type)
1122 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv6);
1123 if (!ipv6->vtc_flow)
1125 RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
1126 if (!ipv6->hop_limits)
1127 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
1129 case RTE_FLOW_ITEM_TYPE_UDP:
1130 udp = (struct udp_hdr *)&buf[temp_size];
1132 return rte_flow_error_set(error, EINVAL,
1133 RTE_FLOW_ERROR_TYPE_ACTION,
1134 (void *)items->type,
1135 "ip header not found");
1136 if (ipv4 && !ipv4->next_proto_id)
1137 ipv4->next_proto_id = IPPROTO_UDP;
1138 else if (ipv6 && !ipv6->proto)
1139 ipv6->proto = IPPROTO_UDP;
1141 case RTE_FLOW_ITEM_TYPE_VXLAN:
1142 vxlan = (struct vxlan_hdr *)&buf[temp_size];
1144 return rte_flow_error_set(error, EINVAL,
1145 RTE_FLOW_ERROR_TYPE_ACTION,
1146 (void *)items->type,
1147 "udp header not found");
1149 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
1150 if (!vxlan->vx_flags)
1152 RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
1154 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1155 vxlan_gpe = (struct vxlan_gpe_hdr *)&buf[temp_size];
1157 return rte_flow_error_set(error, EINVAL,
1158 RTE_FLOW_ERROR_TYPE_ACTION,
1159 (void *)items->type,
1160 "udp header not found");
1161 if (!vxlan_gpe->proto)
1162 return rte_flow_error_set(error, EINVAL,
1163 RTE_FLOW_ERROR_TYPE_ACTION,
1164 (void *)items->type,
1165 "next protocol not found");
1168 RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
1169 if (!vxlan_gpe->vx_flags)
1170 vxlan_gpe->vx_flags =
1171 MLX5_ENCAP_VXLAN_GPE_FLAGS;
1173 case RTE_FLOW_ITEM_TYPE_GRE:
1174 case RTE_FLOW_ITEM_TYPE_NVGRE:
1175 gre = (struct gre_hdr *)&buf[temp_size];
1177 return rte_flow_error_set(error, EINVAL,
1178 RTE_FLOW_ERROR_TYPE_ACTION,
1179 (void *)items->type,
1180 "next protocol not found");
1182 return rte_flow_error_set(error, EINVAL,
1183 RTE_FLOW_ERROR_TYPE_ACTION,
1184 (void *)items->type,
1185 "ip header not found");
1186 if (ipv4 && !ipv4->next_proto_id)
1187 ipv4->next_proto_id = IPPROTO_GRE;
1188 else if (ipv6 && !ipv6->proto)
1189 ipv6->proto = IPPROTO_GRE;
1191 case RTE_FLOW_ITEM_TYPE_VOID:
1194 return rte_flow_error_set(error, EINVAL,
1195 RTE_FLOW_ERROR_TYPE_ACTION,
1196 (void *)items->type,
1197 "unsupported item type");
1207 * Convert L2 encap action to DV specification.
1210 * Pointer to rte_eth_dev structure.
1212 * Pointer to action structure.
1213 * @param[in, out] dev_flow
1214 * Pointer to the mlx5_flow.
1216 * Pointer to the error structure.
1219 * 0 on success, a negative errno value otherwise and rte_errno is set.
1222 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
1223 const struct rte_flow_action *action,
1224 struct mlx5_flow *dev_flow,
1225 struct rte_flow_error *error)
1227 const struct rte_flow_item *encap_data;
1228 const struct rte_flow_action_raw_encap *raw_encap_data;
1229 struct mlx5_flow_dv_encap_decap_resource res = {
1231 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
1232 .ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
1235 if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
1237 (const struct rte_flow_action_raw_encap *)action->conf;
1238 res.size = raw_encap_data->size;
1239 memcpy(res.buf, raw_encap_data->data, res.size);
1241 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
1243 ((const struct rte_flow_action_vxlan_encap *)
1244 action->conf)->definition;
1247 ((const struct rte_flow_action_nvgre_encap *)
1248 action->conf)->definition;
1249 if (flow_dv_convert_encap_data(encap_data, res.buf,
1253 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1254 return rte_flow_error_set(error, EINVAL,
1255 RTE_FLOW_ERROR_TYPE_ACTION,
1256 NULL, "can't create L2 encap action");
1261 * Convert L2 decap action to DV specification.
1264 * Pointer to rte_eth_dev structure.
1265 * @param[in, out] dev_flow
1266 * Pointer to the mlx5_flow.
1268 * Pointer to the error structure.
1271 * 0 on success, a negative errno value otherwise and rte_errno is set.
1274 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
1275 struct mlx5_flow *dev_flow,
1276 struct rte_flow_error *error)
1278 struct mlx5_flow_dv_encap_decap_resource res = {
1281 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
1282 .ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
1285 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1286 return rte_flow_error_set(error, EINVAL,
1287 RTE_FLOW_ERROR_TYPE_ACTION,
1288 NULL, "can't create L2 decap action");
1293 * Convert raw decap/encap (L3 tunnel) action to DV specification.
1296 * Pointer to rte_eth_dev structure.
1298 * Pointer to action structure.
1299 * @param[in, out] dev_flow
1300 * Pointer to the mlx5_flow.
1302 * Pointer to the flow attributes.
1304 * Pointer to the error structure.
1307 * 0 on success, a negative errno value otherwise and rte_errno is set.
1310 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
1311 const struct rte_flow_action *action,
1312 struct mlx5_flow *dev_flow,
1313 const struct rte_flow_attr *attr,
1314 struct rte_flow_error *error)
1316 const struct rte_flow_action_raw_encap *encap_data;
1317 struct mlx5_flow_dv_encap_decap_resource res;
1319 encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
1320 res.size = encap_data->size;
1321 memcpy(res.buf, encap_data->data, res.size);
1322 res.reformat_type = attr->egress ?
1323 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
1324 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
1325 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
1326 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
1327 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1328 return rte_flow_error_set(error, EINVAL,
1329 RTE_FLOW_ERROR_TYPE_ACTION,
1330 NULL, "can't create encap action");
1335 * Validate the modify-header actions.
1337 * @param[in] action_flags
1338 * Holds the actions detected until now.
1340 * Pointer to the modify action.
1342 * Pointer to error structure.
1345 * 0 on success, a negative errno value otherwise and rte_errno is set.
1348 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
1349 const struct rte_flow_action *action,
1350 struct rte_flow_error *error)
1352 if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
1353 return rte_flow_error_set(error, EINVAL,
1354 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1355 NULL, "action configuration not set");
1356 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1357 return rte_flow_error_set(error, EINVAL,
1358 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1359 "can't have encap action before"
1365 * Validate the modify-header MAC address actions.
1367 * @param[in] action_flags
1368 * Holds the actions detected until now.
1370 * Pointer to the modify action.
1371 * @param[in] item_flags
1372 * Holds the items detected.
1374 * Pointer to error structure.
1377 * 0 on success, a negative errno value otherwise and rte_errno is set.
1380 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
1381 const struct rte_flow_action *action,
1382 const uint64_t item_flags,
1383 struct rte_flow_error *error)
1387 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1389 if (!(item_flags & MLX5_FLOW_LAYER_L2))
1390 return rte_flow_error_set(error, EINVAL,
1391 RTE_FLOW_ERROR_TYPE_ACTION,
1393 "no L2 item in pattern");
1399 * Validate the modify-header IPv4 address actions.
1401 * @param[in] action_flags
1402 * Holds the actions detected until now.
1404 * Pointer to the modify action.
1405 * @param[in] item_flags
1406 * Holds the items detected.
1408 * Pointer to error structure.
1411 * 0 on success, a negative errno value otherwise and rte_errno is set.
1414 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
1415 const struct rte_flow_action *action,
1416 const uint64_t item_flags,
1417 struct rte_flow_error *error)
1421 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1423 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
1424 return rte_flow_error_set(error, EINVAL,
1425 RTE_FLOW_ERROR_TYPE_ACTION,
1427 "no ipv4 item in pattern");
1433 * Validate the modify-header IPv6 address actions.
1435 * @param[in] action_flags
1436 * Holds the actions detected until now.
1438 * Pointer to the modify action.
1439 * @param[in] item_flags
1440 * Holds the items detected.
1442 * Pointer to error structure.
1445 * 0 on success, a negative errno value otherwise and rte_errno is set.
1448 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
1449 const struct rte_flow_action *action,
1450 const uint64_t item_flags,
1451 struct rte_flow_error *error)
1455 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1457 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
1458 return rte_flow_error_set(error, EINVAL,
1459 RTE_FLOW_ERROR_TYPE_ACTION,
1461 "no ipv6 item in pattern");
1467 * Validate the modify-header TP actions.
1469 * @param[in] action_flags
1470 * Holds the actions detected until now.
1472 * Pointer to the modify action.
1473 * @param[in] item_flags
1474 * Holds the items detected.
1476 * Pointer to error structure.
1479 * 0 on success, a negative errno value otherwise and rte_errno is set.
1482 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
1483 const struct rte_flow_action *action,
1484 const uint64_t item_flags,
1485 struct rte_flow_error *error)
1489 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1491 if (!(item_flags & MLX5_FLOW_LAYER_L4))
1492 return rte_flow_error_set(error, EINVAL,
1493 RTE_FLOW_ERROR_TYPE_ACTION,
1494 NULL, "no transport layer "
1501 * Validate the modify-header TTL actions.
1503 * @param[in] action_flags
1504 * Holds the actions detected until now.
1506 * Pointer to the modify action.
1507 * @param[in] item_flags
1508 * Holds the items detected.
1510 * Pointer to error structure.
1513 * 0 on success, a negative errno value otherwise and rte_errno is set.
1516 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
1517 const struct rte_flow_action *action,
1518 const uint64_t item_flags,
1519 struct rte_flow_error *error)
1523 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1525 if (!(item_flags & MLX5_FLOW_LAYER_L3))
1526 return rte_flow_error_set(error, EINVAL,
1527 RTE_FLOW_ERROR_TYPE_ACTION,
1529 "no IP protocol in pattern");
1535 * Validate jump action.
1538 * Pointer to the modify action.
1540 * The group of the current flow.
1542 * Pointer to error structure.
1545 * 0 on success, a negative errno value otherwise and rte_errno is set.
1548 flow_dv_validate_action_jump(const struct rte_flow_action *action,
1550 struct rte_flow_error *error)
1552 if (action->type != RTE_FLOW_ACTION_TYPE_JUMP && !action->conf)
1553 return rte_flow_error_set(error, EINVAL,
1554 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1555 NULL, "action configuration not set");
1556 if (group >= ((const struct rte_flow_action_jump *)action->conf)->group)
1557 return rte_flow_error_set(error, EINVAL,
1558 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1559 "target group must be higher then"
1560 " the current flow group");
1566 * Find existing modify-header resource or create and register a new one.
1568 * @param dev[in, out]
1569 * Pointer to rte_eth_dev structure.
1570 * @param[in, out] resource
1571 * Pointer to modify-header resource.
1572 * @parm[in, out] dev_flow
1573 * Pointer to the dev_flow.
1575 * pointer to error structure.
1578 * 0 on success otherwise -errno and errno is set.
1581 flow_dv_modify_hdr_resource_register
1582 (struct rte_eth_dev *dev,
1583 struct mlx5_flow_dv_modify_hdr_resource *resource,
1584 struct mlx5_flow *dev_flow,
1585 struct rte_flow_error *error)
1587 struct mlx5_priv *priv = dev->data->dev_private;
1588 struct mlx5_ibv_shared *sh = priv->sh;
1589 struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
1591 struct mlx5dv_dr_ns *ns =
1592 resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX ?
1593 sh->tx_ns : sh->rx_ns;
1595 /* Lookup a matching resource from cache. */
1596 LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
1597 if (resource->ft_type == cache_resource->ft_type &&
1598 resource->actions_num == cache_resource->actions_num &&
1599 !memcmp((const void *)resource->actions,
1600 (const void *)cache_resource->actions,
1601 (resource->actions_num *
1602 sizeof(resource->actions[0])))) {
1603 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
1604 (void *)cache_resource,
1605 rte_atomic32_read(&cache_resource->refcnt));
1606 rte_atomic32_inc(&cache_resource->refcnt);
1607 dev_flow->dv.modify_hdr = cache_resource;
1611 /* Register new modify-header resource. */
1612 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1613 if (!cache_resource)
1614 return rte_flow_error_set(error, ENOMEM,
1615 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1616 "cannot allocate resource memory");
1617 *cache_resource = *resource;
1618 cache_resource->verbs_action =
1619 mlx5_glue->dv_create_flow_action_modify_header
1620 (sh->ctx, cache_resource->ft_type,
1622 cache_resource->actions_num *
1623 sizeof(cache_resource->actions[0]),
1624 (uint64_t *)cache_resource->actions);
1625 if (!cache_resource->verbs_action) {
1626 rte_free(cache_resource);
1627 return rte_flow_error_set(error, ENOMEM,
1628 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1629 NULL, "cannot create action");
1631 rte_atomic32_init(&cache_resource->refcnt);
1632 rte_atomic32_inc(&cache_resource->refcnt);
1633 LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
1634 dev_flow->dv.modify_hdr = cache_resource;
1635 DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
1636 (void *)cache_resource,
1637 rte_atomic32_read(&cache_resource->refcnt));
1642 * Get or create a flow counter.
1645 * Pointer to the Ethernet device structure.
1647 * Indicate if this counter is shared with other flows.
1649 * Counter identifier.
1652 * pointer to flow counter on success, NULL otherwise and rte_errno is set.
1654 static struct mlx5_flow_counter *
1655 flow_dv_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
1657 struct mlx5_priv *priv = dev->data->dev_private;
1658 struct mlx5_flow_counter *cnt = NULL;
1659 struct mlx5_devx_counter_set *dcs = NULL;
1662 if (!priv->config.devx) {
1667 LIST_FOREACH(cnt, &priv->flow_counters, next) {
1668 if (cnt->shared && cnt->id == id) {
1674 cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
1675 dcs = rte_calloc(__func__, 1, sizeof(*dcs), 0);
1680 ret = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, dcs);
1683 struct mlx5_flow_counter tmpl = {
1689 tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
1695 LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
1705 * Release a flow counter.
1707 * @param[in] counter
1708 * Pointer to the counter handler.
1711 flow_dv_counter_release(struct mlx5_flow_counter *counter)
1717 if (--counter->ref_cnt == 0) {
1718 ret = mlx5_devx_cmd_flow_counter_free(counter->dcs->obj);
1720 DRV_LOG(ERR, "Failed to free devx counters, %d", ret);
1721 LIST_REMOVE(counter, next);
1722 rte_free(counter->dcs);
1728 * Verify the @p attributes will be correctly understood by the NIC and store
1729 * them in the @p flow if everything is correct.
1732 * Pointer to dev struct.
1733 * @param[in] attributes
1734 * Pointer to flow attributes
1736 * Pointer to error structure.
1739 * 0 on success, a negative errno value otherwise and rte_errno is set.
1742 flow_dv_validate_attributes(struct rte_eth_dev *dev,
1743 const struct rte_flow_attr *attributes,
1744 struct rte_flow_error *error)
1746 struct mlx5_priv *priv = dev->data->dev_private;
1747 uint32_t priority_max = priv->config.flow_prio - 1;
1749 #ifndef HAVE_MLX5DV_DR
1750 if (attributes->group)
1751 return rte_flow_error_set(error, ENOTSUP,
1752 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1754 "groups is not supported");
1756 if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
1757 attributes->priority >= priority_max)
1758 return rte_flow_error_set(error, ENOTSUP,
1759 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1761 "priority out of range");
1762 if (attributes->transfer)
1763 return rte_flow_error_set(error, ENOTSUP,
1764 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1766 "transfer is not supported");
1767 if (!(attributes->egress ^ attributes->ingress))
1768 return rte_flow_error_set(error, ENOTSUP,
1769 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1770 "must specify exactly one of "
1771 "ingress or egress");
1776 * Internal validation function. For validating both actions and items.
1779 * Pointer to the rte_eth_dev structure.
1781 * Pointer to the flow attributes.
1783 * Pointer to the list of items.
1784 * @param[in] actions
1785 * Pointer to the list of actions.
1787 * Pointer to the error structure.
1790 * 0 on success, a negative errno value otherwise and rte_errno is set.
1793 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1794 const struct rte_flow_item items[],
1795 const struct rte_flow_action actions[],
1796 struct rte_flow_error *error)
1799 uint64_t action_flags = 0;
1800 uint64_t item_flags = 0;
1801 uint64_t last_item = 0;
1802 uint8_t next_protocol = 0xff;
1807 ret = flow_dv_validate_attributes(dev, attr, error);
1810 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1811 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1812 switch (items->type) {
1813 case RTE_FLOW_ITEM_TYPE_VOID:
1815 case RTE_FLOW_ITEM_TYPE_ETH:
1816 ret = mlx5_flow_validate_item_eth(items, item_flags,
1820 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1821 MLX5_FLOW_LAYER_OUTER_L2;
1823 case RTE_FLOW_ITEM_TYPE_VLAN:
1824 ret = mlx5_flow_validate_item_vlan(items, item_flags,
1828 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1829 MLX5_FLOW_LAYER_OUTER_VLAN;
1831 case RTE_FLOW_ITEM_TYPE_IPV4:
1832 ret = mlx5_flow_validate_item_ipv4(items, item_flags,
1836 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1837 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1838 if (items->mask != NULL &&
1839 ((const struct rte_flow_item_ipv4 *)
1840 items->mask)->hdr.next_proto_id) {
1842 ((const struct rte_flow_item_ipv4 *)
1843 (items->spec))->hdr.next_proto_id;
1845 ((const struct rte_flow_item_ipv4 *)
1846 (items->mask))->hdr.next_proto_id;
1848 /* Reset for inner layer. */
1849 next_protocol = 0xff;
1852 case RTE_FLOW_ITEM_TYPE_IPV6:
1853 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
1857 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1858 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1859 if (items->mask != NULL &&
1860 ((const struct rte_flow_item_ipv6 *)
1861 items->mask)->hdr.proto) {
1863 ((const struct rte_flow_item_ipv6 *)
1864 items->spec)->hdr.proto;
1866 ((const struct rte_flow_item_ipv6 *)
1867 items->mask)->hdr.proto;
1869 /* Reset for inner layer. */
1870 next_protocol = 0xff;
1873 case RTE_FLOW_ITEM_TYPE_TCP:
1874 ret = mlx5_flow_validate_item_tcp
1877 &rte_flow_item_tcp_mask,
1881 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1882 MLX5_FLOW_LAYER_OUTER_L4_TCP;
1884 case RTE_FLOW_ITEM_TYPE_UDP:
1885 ret = mlx5_flow_validate_item_udp(items, item_flags,
1890 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1891 MLX5_FLOW_LAYER_OUTER_L4_UDP;
1893 case RTE_FLOW_ITEM_TYPE_GRE:
1894 case RTE_FLOW_ITEM_TYPE_NVGRE:
1895 ret = mlx5_flow_validate_item_gre(items, item_flags,
1896 next_protocol, error);
1899 last_item = MLX5_FLOW_LAYER_GRE;
1901 case RTE_FLOW_ITEM_TYPE_VXLAN:
1902 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
1906 last_item = MLX5_FLOW_LAYER_VXLAN;
1908 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1909 ret = mlx5_flow_validate_item_vxlan_gpe(items,
1914 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
1916 case RTE_FLOW_ITEM_TYPE_MPLS:
1917 ret = mlx5_flow_validate_item_mpls(dev, items,
1922 last_item = MLX5_FLOW_LAYER_MPLS;
1924 case RTE_FLOW_ITEM_TYPE_META:
1925 ret = flow_dv_validate_item_meta(dev, items, attr,
1929 last_item = MLX5_FLOW_ITEM_METADATA;
1932 return rte_flow_error_set(error, ENOTSUP,
1933 RTE_FLOW_ERROR_TYPE_ITEM,
1934 NULL, "item not supported");
1936 item_flags |= last_item;
1938 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1939 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
1940 return rte_flow_error_set(error, ENOTSUP,
1941 RTE_FLOW_ERROR_TYPE_ACTION,
1942 actions, "too many actions");
1943 switch (actions->type) {
1944 case RTE_FLOW_ACTION_TYPE_VOID:
1946 case RTE_FLOW_ACTION_TYPE_FLAG:
1947 ret = mlx5_flow_validate_action_flag(action_flags,
1951 action_flags |= MLX5_FLOW_ACTION_FLAG;
1954 case RTE_FLOW_ACTION_TYPE_MARK:
1955 ret = mlx5_flow_validate_action_mark(actions,
1960 action_flags |= MLX5_FLOW_ACTION_MARK;
1963 case RTE_FLOW_ACTION_TYPE_DROP:
1964 ret = mlx5_flow_validate_action_drop(action_flags,
1968 action_flags |= MLX5_FLOW_ACTION_DROP;
1971 case RTE_FLOW_ACTION_TYPE_QUEUE:
1972 ret = mlx5_flow_validate_action_queue(actions,
1977 action_flags |= MLX5_FLOW_ACTION_QUEUE;
1980 case RTE_FLOW_ACTION_TYPE_RSS:
1981 ret = mlx5_flow_validate_action_rss(actions,
1987 action_flags |= MLX5_FLOW_ACTION_RSS;
1990 case RTE_FLOW_ACTION_TYPE_COUNT:
1991 ret = flow_dv_validate_action_count(dev, error);
1994 action_flags |= MLX5_FLOW_ACTION_COUNT;
1997 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
1998 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
1999 ret = flow_dv_validate_action_l2_encap(action_flags,
2004 action_flags |= actions->type ==
2005 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
2006 MLX5_FLOW_ACTION_VXLAN_ENCAP :
2007 MLX5_FLOW_ACTION_NVGRE_ENCAP;
2010 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2011 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
2012 ret = flow_dv_validate_action_l2_decap(action_flags,
2016 action_flags |= actions->type ==
2017 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
2018 MLX5_FLOW_ACTION_VXLAN_DECAP :
2019 MLX5_FLOW_ACTION_NVGRE_DECAP;
2022 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
2023 ret = flow_dv_validate_action_raw_encap(action_flags,
2028 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
2031 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
2032 ret = flow_dv_validate_action_raw_decap(action_flags,
2037 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
2040 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
2041 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
2042 ret = flow_dv_validate_action_modify_mac(action_flags,
2048 /* Count all modify-header actions as one action. */
2049 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2051 action_flags |= actions->type ==
2052 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
2053 MLX5_FLOW_ACTION_SET_MAC_SRC :
2054 MLX5_FLOW_ACTION_SET_MAC_DST;
2057 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
2058 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
2059 ret = flow_dv_validate_action_modify_ipv4(action_flags,
2065 /* Count all modify-header actions as one action. */
2066 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2068 action_flags |= actions->type ==
2069 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
2070 MLX5_FLOW_ACTION_SET_IPV4_SRC :
2071 MLX5_FLOW_ACTION_SET_IPV4_DST;
2073 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
2074 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
2075 ret = flow_dv_validate_action_modify_ipv6(action_flags,
2081 /* Count all modify-header actions as one action. */
2082 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2084 action_flags |= actions->type ==
2085 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
2086 MLX5_FLOW_ACTION_SET_IPV6_SRC :
2087 MLX5_FLOW_ACTION_SET_IPV6_DST;
2089 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
2090 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
2091 ret = flow_dv_validate_action_modify_tp(action_flags,
2097 /* Count all modify-header actions as one action. */
2098 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2100 action_flags |= actions->type ==
2101 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
2102 MLX5_FLOW_ACTION_SET_TP_SRC :
2103 MLX5_FLOW_ACTION_SET_TP_DST;
2105 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
2106 case RTE_FLOW_ACTION_TYPE_SET_TTL:
2107 ret = flow_dv_validate_action_modify_ttl(action_flags,
2113 /* Count all modify-header actions as one action. */
2114 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2116 action_flags |= actions->type ==
2117 RTE_FLOW_ACTION_TYPE_SET_TTL ?
2118 MLX5_FLOW_ACTION_SET_TTL :
2119 MLX5_FLOW_ACTION_DEC_TTL;
2121 case RTE_FLOW_ACTION_TYPE_JUMP:
2122 ret = flow_dv_validate_action_jump(actions,
2123 attr->group, error);
2127 action_flags |= MLX5_FLOW_ACTION_JUMP;
2130 return rte_flow_error_set(error, ENOTSUP,
2131 RTE_FLOW_ERROR_TYPE_ACTION,
2133 "action not supported");
2136 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
2137 return rte_flow_error_set(error, EINVAL,
2138 RTE_FLOW_ERROR_TYPE_ACTION, actions,
2139 "no fate action is found");
2144 * Internal preparation function. Allocates the DV flow size,
2145 * this size is constant.
2148 * Pointer to the flow attributes.
2150 * Pointer to the list of items.
2151 * @param[in] actions
2152 * Pointer to the list of actions.
2154 * Pointer to the error structure.
2157 * Pointer to mlx5_flow object on success,
2158 * otherwise NULL and rte_errno is set.
2160 static struct mlx5_flow *
2161 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
2162 const struct rte_flow_item items[] __rte_unused,
2163 const struct rte_flow_action actions[] __rte_unused,
2164 struct rte_flow_error *error)
2166 uint32_t size = sizeof(struct mlx5_flow);
2167 struct mlx5_flow *flow;
2169 flow = rte_calloc(__func__, 1, size, 0);
2171 rte_flow_error_set(error, ENOMEM,
2172 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2173 "not enough memory to create flow");
2176 flow->dv.value.size = MLX5_ST_SZ_DB(fte_match_param);
2182 * Sanity check for match mask and value. Similar to check_valid_spec() in
2183 * kernel driver. If unmasked bit is present in value, it returns failure.
2186 * pointer to match mask buffer.
2187 * @param match_value
2188 * pointer to match value buffer.
2191 * 0 if valid, -EINVAL otherwise.
2194 flow_dv_check_valid_spec(void *match_mask, void *match_value)
2196 uint8_t *m = match_mask;
2197 uint8_t *v = match_value;
2200 for (i = 0; i < MLX5_ST_SZ_DB(fte_match_param); ++i) {
2203 "match_value differs from match_criteria"
2204 " %p[%u] != %p[%u]",
2205 match_value, i, match_mask, i);
2214 * Add Ethernet item to matcher and to the value.
2216 * @param[in, out] matcher
2218 * @param[in, out] key
2219 * Flow matcher value.
2221 * Flow pattern to translate.
2223 * Item is inner pattern.
2226 flow_dv_translate_item_eth(void *matcher, void *key,
2227 const struct rte_flow_item *item, int inner)
2229 const struct rte_flow_item_eth *eth_m = item->mask;
2230 const struct rte_flow_item_eth *eth_v = item->spec;
2231 const struct rte_flow_item_eth nic_mask = {
2232 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2233 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2234 .type = RTE_BE16(0xffff),
2246 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2248 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2250 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2252 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2254 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
2255 ð_m->dst, sizeof(eth_m->dst));
2256 /* The value must be in the range of the mask. */
2257 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
2258 for (i = 0; i < sizeof(eth_m->dst); ++i)
2259 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
2260 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
2261 ð_m->src, sizeof(eth_m->src));
2262 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
2263 /* The value must be in the range of the mask. */
2264 for (i = 0; i < sizeof(eth_m->dst); ++i)
2265 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
2266 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
2267 rte_be_to_cpu_16(eth_m->type));
2268 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
2269 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
2273 * Add VLAN item to matcher and to the value.
2275 * @param[in, out] matcher
2277 * @param[in, out] key
2278 * Flow matcher value.
2280 * Flow pattern to translate.
2282 * Item is inner pattern.
2285 flow_dv_translate_item_vlan(void *matcher, void *key,
2286 const struct rte_flow_item *item,
2289 const struct rte_flow_item_vlan *vlan_m = item->mask;
2290 const struct rte_flow_item_vlan *vlan_v = item->spec;
2291 const struct rte_flow_item_vlan nic_mask = {
2292 .tci = RTE_BE16(0x0fff),
2293 .inner_type = RTE_BE16(0xffff),
2305 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2307 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2309 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2311 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2313 tci_m = rte_be_to_cpu_16(vlan_m->tci);
2314 tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
2315 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
2316 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
2317 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
2318 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
2319 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
2320 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
2321 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
2322 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
2326 * Add IPV4 item to matcher and to the value.
2328 * @param[in, out] matcher
2330 * @param[in, out] key
2331 * Flow matcher value.
2333 * Flow pattern to translate.
2335 * Item is inner pattern.
2337 * The group to insert the rule.
2340 flow_dv_translate_item_ipv4(void *matcher, void *key,
2341 const struct rte_flow_item *item,
2342 int inner, uint32_t group)
2344 const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
2345 const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
2346 const struct rte_flow_item_ipv4 nic_mask = {
2348 .src_addr = RTE_BE32(0xffffffff),
2349 .dst_addr = RTE_BE32(0xffffffff),
2350 .type_of_service = 0xff,
2351 .next_proto_id = 0xff,
2361 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2363 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2365 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2367 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2370 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2372 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
2373 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
2378 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2379 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2380 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2381 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2382 *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
2383 *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
2384 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2385 src_ipv4_src_ipv6.ipv4_layout.ipv4);
2386 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2387 src_ipv4_src_ipv6.ipv4_layout.ipv4);
2388 *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
2389 *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
2390 tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
2391 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
2392 ipv4_m->hdr.type_of_service);
2393 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
2394 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
2395 ipv4_m->hdr.type_of_service >> 2);
2396 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
2397 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2398 ipv4_m->hdr.next_proto_id);
2399 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2400 ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
2404 * Add IPV6 item to matcher and to the value.
2406 * @param[in, out] matcher
2408 * @param[in, out] key
2409 * Flow matcher value.
2411 * Flow pattern to translate.
2413 * Item is inner pattern.
2415 * The group to insert the rule.
2418 flow_dv_translate_item_ipv6(void *matcher, void *key,
2419 const struct rte_flow_item *item,
2420 int inner, uint32_t group)
2422 const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
2423 const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
2424 const struct rte_flow_item_ipv6 nic_mask = {
2427 "\xff\xff\xff\xff\xff\xff\xff\xff"
2428 "\xff\xff\xff\xff\xff\xff\xff\xff",
2430 "\xff\xff\xff\xff\xff\xff\xff\xff"
2431 "\xff\xff\xff\xff\xff\xff\xff\xff",
2432 .vtc_flow = RTE_BE32(0xffffffff),
2439 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2440 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2449 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2451 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2453 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2455 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2458 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2460 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
2461 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
2466 size = sizeof(ipv6_m->hdr.dst_addr);
2467 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2468 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2469 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2470 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2471 memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
2472 for (i = 0; i < size; ++i)
2473 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
2474 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2475 src_ipv4_src_ipv6.ipv6_layout.ipv6);
2476 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2477 src_ipv4_src_ipv6.ipv6_layout.ipv6);
2478 memcpy(l24_m, ipv6_m->hdr.src_addr, size);
2479 for (i = 0; i < size; ++i)
2480 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
2482 vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
2483 vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
2484 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
2485 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
2486 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
2487 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
2490 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
2492 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
2495 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
2497 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
2501 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2503 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2504 ipv6_v->hdr.proto & ipv6_m->hdr.proto);
2508 * Add TCP item to matcher and to the value.
2510 * @param[in, out] matcher
2512 * @param[in, out] key
2513 * Flow matcher value.
2515 * Flow pattern to translate.
2517 * Item is inner pattern.
2520 flow_dv_translate_item_tcp(void *matcher, void *key,
2521 const struct rte_flow_item *item,
2524 const struct rte_flow_item_tcp *tcp_m = item->mask;
2525 const struct rte_flow_item_tcp *tcp_v = item->spec;
2530 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2532 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2534 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2536 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2538 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2539 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
2543 tcp_m = &rte_flow_item_tcp_mask;
2544 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
2545 rte_be_to_cpu_16(tcp_m->hdr.src_port));
2546 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
2547 rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
2548 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
2549 rte_be_to_cpu_16(tcp_m->hdr.dst_port));
2550 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
2551 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
2555 * Add UDP item to matcher and to the value.
2557 * @param[in, out] matcher
2559 * @param[in, out] key
2560 * Flow matcher value.
2562 * Flow pattern to translate.
2564 * Item is inner pattern.
2567 flow_dv_translate_item_udp(void *matcher, void *key,
2568 const struct rte_flow_item *item,
2571 const struct rte_flow_item_udp *udp_m = item->mask;
2572 const struct rte_flow_item_udp *udp_v = item->spec;
2577 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2579 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2581 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2583 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2585 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2586 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
2590 udp_m = &rte_flow_item_udp_mask;
2591 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
2592 rte_be_to_cpu_16(udp_m->hdr.src_port));
2593 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
2594 rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
2595 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
2596 rte_be_to_cpu_16(udp_m->hdr.dst_port));
2597 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2598 rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
2602 * Add GRE item to matcher and to the value.
2604 * @param[in, out] matcher
2606 * @param[in, out] key
2607 * Flow matcher value.
2609 * Flow pattern to translate.
2611 * Item is inner pattern.
2614 flow_dv_translate_item_gre(void *matcher, void *key,
2615 const struct rte_flow_item *item,
2618 const struct rte_flow_item_gre *gre_m = item->mask;
2619 const struct rte_flow_item_gre *gre_v = item->spec;
2622 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2623 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2626 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2628 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2630 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2632 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2634 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2635 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
2639 gre_m = &rte_flow_item_gre_mask;
2640 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
2641 rte_be_to_cpu_16(gre_m->protocol));
2642 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
2643 rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
2647 * Add NVGRE item to matcher and to the value.
2649 * @param[in, out] matcher
2651 * @param[in, out] key
2652 * Flow matcher value.
2654 * Flow pattern to translate.
2656 * Item is inner pattern.
2659 flow_dv_translate_item_nvgre(void *matcher, void *key,
2660 const struct rte_flow_item *item,
2663 const struct rte_flow_item_nvgre *nvgre_m = item->mask;
2664 const struct rte_flow_item_nvgre *nvgre_v = item->spec;
2665 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2666 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2667 const char *tni_flow_id_m = (const char *)nvgre_m->tni;
2668 const char *tni_flow_id_v = (const char *)nvgre_v->tni;
2674 flow_dv_translate_item_gre(matcher, key, item, inner);
2678 nvgre_m = &rte_flow_item_nvgre_mask;
2679 size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
2680 gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
2681 gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
2682 memcpy(gre_key_m, tni_flow_id_m, size);
2683 for (i = 0; i < size; ++i)
2684 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
2688 * Add VXLAN item to matcher and to the value.
2690 * @param[in, out] matcher
2692 * @param[in, out] key
2693 * Flow matcher value.
2695 * Flow pattern to translate.
2697 * Item is inner pattern.
2700 flow_dv_translate_item_vxlan(void *matcher, void *key,
2701 const struct rte_flow_item *item,
2704 const struct rte_flow_item_vxlan *vxlan_m = item->mask;
2705 const struct rte_flow_item_vxlan *vxlan_v = item->spec;
2708 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2709 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2717 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2719 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2721 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2723 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2725 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
2726 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
2727 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
2728 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
2729 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
2734 vxlan_m = &rte_flow_item_vxlan_mask;
2735 size = sizeof(vxlan_m->vni);
2736 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
2737 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
2738 memcpy(vni_m, vxlan_m->vni, size);
2739 for (i = 0; i < size; ++i)
2740 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
2744 * Add MPLS item to matcher and to the value.
2746 * @param[in, out] matcher
2748 * @param[in, out] key
2749 * Flow matcher value.
2751 * Flow pattern to translate.
2752 * @param[in] prev_layer
2753 * The protocol layer indicated in previous item.
2755 * Item is inner pattern.
2758 flow_dv_translate_item_mpls(void *matcher, void *key,
2759 const struct rte_flow_item *item,
2760 uint64_t prev_layer,
2763 const uint32_t *in_mpls_m = item->mask;
2764 const uint32_t *in_mpls_v = item->spec;
2765 uint32_t *out_mpls_m = 0;
2766 uint32_t *out_mpls_v = 0;
2767 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2768 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2769 void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
2771 void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
2772 void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
2773 void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2775 switch (prev_layer) {
2776 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
2777 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
2778 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2779 MLX5_UDP_PORT_MPLS);
2781 case MLX5_FLOW_LAYER_GRE:
2782 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
2783 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
2787 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2788 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2795 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
2796 switch (prev_layer) {
2797 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
2799 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
2800 outer_first_mpls_over_udp);
2802 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
2803 outer_first_mpls_over_udp);
2805 case MLX5_FLOW_LAYER_GRE:
2807 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
2808 outer_first_mpls_over_gre);
2810 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
2811 outer_first_mpls_over_gre);
2814 /* Inner MPLS not over GRE is not supported. */
2817 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
2821 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
2827 if (out_mpls_m && out_mpls_v) {
2828 *out_mpls_m = *in_mpls_m;
2829 *out_mpls_v = *in_mpls_v & *in_mpls_m;
2834 * Add META item to matcher
2836 * @param[in, out] matcher
2838 * @param[in, out] key
2839 * Flow matcher value.
2841 * Flow pattern to translate.
2843 * Item is inner pattern.
2846 flow_dv_translate_item_meta(void *matcher, void *key,
2847 const struct rte_flow_item *item)
2849 const struct rte_flow_item_meta *meta_m;
2850 const struct rte_flow_item_meta *meta_v;
2852 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
2854 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
2856 meta_m = (const void *)item->mask;
2858 meta_m = &rte_flow_item_meta_mask;
2859 meta_v = (const void *)item->spec;
2861 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
2862 rte_be_to_cpu_32(meta_m->data));
2863 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
2864 rte_be_to_cpu_32(meta_v->data & meta_m->data));
2868 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
2870 #define HEADER_IS_ZERO(match_criteria, headers) \
2871 !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
2872 matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
2875 * Calculate flow matcher enable bitmap.
2877 * @param match_criteria
2878 * Pointer to flow matcher criteria.
2881 * Bitmap of enabled fields.
2884 flow_dv_matcher_enable(uint32_t *match_criteria)
2886 uint8_t match_criteria_enable;
2888 match_criteria_enable =
2889 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
2890 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
2891 match_criteria_enable |=
2892 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
2893 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
2894 match_criteria_enable |=
2895 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
2896 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
2897 match_criteria_enable |=
2898 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
2899 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
2900 #ifdef HAVE_MLX5DV_DR
2901 match_criteria_enable |=
2902 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
2903 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
2905 return match_criteria_enable;
2912 * @param dev[in, out]
2913 * Pointer to rte_eth_dev structure.
2914 * @param[in] table_id
2917 * Direction of the table.
2919 * pointer to error structure.
2922 * Returns tables resource based on the index, NULL in case of failed.
2924 static struct mlx5_flow_tbl_resource *
2925 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
2926 uint32_t table_id, uint8_t egress,
2927 struct rte_flow_error *error)
2929 struct mlx5_priv *priv = dev->data->dev_private;
2930 struct mlx5_ibv_shared *sh = priv->sh;
2931 struct mlx5_flow_tbl_resource *tbl;
2933 #ifdef HAVE_MLX5DV_DR
2935 tbl = &sh->tx_tbl[table_id];
2937 tbl->obj = mlx5_glue->dr_create_flow_tbl
2938 (sh->tx_ns, table_id);
2940 tbl = &sh->rx_tbl[table_id];
2942 tbl->obj = mlx5_glue->dr_create_flow_tbl
2943 (sh->rx_ns, table_id);
2946 rte_flow_error_set(error, ENOMEM,
2947 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2948 NULL, "cannot create table");
2951 rte_atomic32_inc(&tbl->refcnt);
2957 return &sh->tx_tbl[table_id];
2959 return &sh->rx_tbl[table_id];
2964 * Release a flow table.
2967 * Table resource to be released.
2970 * Returns 0 if table was released, else return 1;
2973 flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
2977 if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
2978 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
2986 * Register the flow matcher.
2988 * @param dev[in, out]
2989 * Pointer to rte_eth_dev structure.
2990 * @param[in, out] matcher
2991 * Pointer to flow matcher.
2992 * @parm[in, out] dev_flow
2993 * Pointer to the dev_flow.
2995 * pointer to error structure.
2998 * 0 on success otherwise -errno and errno is set.
3001 flow_dv_matcher_register(struct rte_eth_dev *dev,
3002 struct mlx5_flow_dv_matcher *matcher,
3003 struct mlx5_flow *dev_flow,
3004 struct rte_flow_error *error)
3006 struct mlx5_priv *priv = dev->data->dev_private;
3007 struct mlx5_ibv_shared *sh = priv->sh;
3008 struct mlx5_flow_dv_matcher *cache_matcher;
3009 struct mlx5dv_flow_matcher_attr dv_attr = {
3010 .type = IBV_FLOW_ATTR_NORMAL,
3011 .match_mask = (void *)&matcher->mask,
3013 struct mlx5_flow_tbl_resource *tbl = NULL;
3015 /* Lookup from cache. */
3016 LIST_FOREACH(cache_matcher, &sh->matchers, next) {
3017 if (matcher->crc == cache_matcher->crc &&
3018 matcher->priority == cache_matcher->priority &&
3019 matcher->egress == cache_matcher->egress &&
3020 matcher->group == cache_matcher->group &&
3021 !memcmp((const void *)matcher->mask.buf,
3022 (const void *)cache_matcher->mask.buf,
3023 cache_matcher->mask.size)) {
3025 "priority %hd use %s matcher %p: refcnt %d++",
3026 cache_matcher->priority,
3027 cache_matcher->egress ? "tx" : "rx",
3028 (void *)cache_matcher,
3029 rte_atomic32_read(&cache_matcher->refcnt));
3030 rte_atomic32_inc(&cache_matcher->refcnt);
3031 dev_flow->dv.matcher = cache_matcher;
3035 /* Register new matcher. */
3036 cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
3038 return rte_flow_error_set(error, ENOMEM,
3039 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3040 "cannot allocate matcher memory");
3041 tbl = flow_dv_tbl_resource_get(dev, matcher->group * MLX5_GROUP_FACTOR,
3042 matcher->egress, error);
3044 rte_free(cache_matcher);
3045 return rte_flow_error_set(error, ENOMEM,
3046 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3047 NULL, "cannot create table");
3049 *cache_matcher = *matcher;
3050 dv_attr.match_criteria_enable =
3051 flow_dv_matcher_enable(cache_matcher->mask.buf);
3052 dv_attr.priority = matcher->priority;
3053 if (matcher->egress)
3054 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
3055 cache_matcher->matcher_object =
3056 mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
3057 if (!cache_matcher->matcher_object) {
3058 rte_free(cache_matcher);
3059 #ifdef HAVE_MLX5DV_DR
3060 flow_dv_tbl_resource_release(tbl);
3062 return rte_flow_error_set(error, ENOMEM,
3063 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3064 NULL, "cannot create matcher");
3066 rte_atomic32_inc(&cache_matcher->refcnt);
3067 LIST_INSERT_HEAD(&sh->matchers, cache_matcher, next);
3068 dev_flow->dv.matcher = cache_matcher;
3069 DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
3070 cache_matcher->priority,
3071 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
3072 rte_atomic32_read(&cache_matcher->refcnt));
3073 rte_atomic32_inc(&tbl->refcnt);
3078 * Add source vport match to the specified matcher.
3080 * @param[in, out] matcher
3082 * @param[in, out] key
3083 * Flow matcher value.
3085 * Source vport value to match
3090 flow_dv_translate_source_vport(void *matcher, void *key,
3091 int16_t port, uint16_t mask)
3093 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3094 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3096 MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
3097 MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
3101 * Find existing tag resource or create and register a new one.
3103 * @param dev[in, out]
3104 * Pointer to rte_eth_dev structure.
3105 * @param[in, out] resource
3106 * Pointer to tag resource.
3107 * @parm[in, out] dev_flow
3108 * Pointer to the dev_flow.
3110 * pointer to error structure.
3113 * 0 on success otherwise -errno and errno is set.
3116 flow_dv_tag_resource_register
3117 (struct rte_eth_dev *dev,
3118 struct mlx5_flow_dv_tag_resource *resource,
3119 struct mlx5_flow *dev_flow,
3120 struct rte_flow_error *error)
3122 struct mlx5_priv *priv = dev->data->dev_private;
3123 struct mlx5_ibv_shared *sh = priv->sh;
3124 struct mlx5_flow_dv_tag_resource *cache_resource;
3126 /* Lookup a matching resource from cache. */
3127 LIST_FOREACH(cache_resource, &sh->tags, next) {
3128 if (resource->tag == cache_resource->tag) {
3129 DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
3130 (void *)cache_resource,
3131 rte_atomic32_read(&cache_resource->refcnt));
3132 rte_atomic32_inc(&cache_resource->refcnt);
3133 dev_flow->flow->tag_resource = cache_resource;
3137 /* Register new resource. */
3138 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
3139 if (!cache_resource)
3140 return rte_flow_error_set(error, ENOMEM,
3141 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3142 "cannot allocate resource memory");
3143 *cache_resource = *resource;
3144 cache_resource->action = mlx5_glue->dv_create_flow_action_tag
3146 if (!cache_resource->action) {
3147 rte_free(cache_resource);
3148 return rte_flow_error_set(error, ENOMEM,
3149 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3150 NULL, "cannot create action");
3152 rte_atomic32_init(&cache_resource->refcnt);
3153 rte_atomic32_inc(&cache_resource->refcnt);
3154 LIST_INSERT_HEAD(&sh->tags, cache_resource, next);
3155 dev_flow->flow->tag_resource = cache_resource;
3156 DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
3157 (void *)cache_resource,
3158 rte_atomic32_read(&cache_resource->refcnt));
3166 * Pointer to Ethernet device.
3168 * Pointer to mlx5_flow.
3171 * 1 while a reference on it exists, 0 when freed.
3174 flow_dv_tag_release(struct rte_eth_dev *dev,
3175 struct mlx5_flow_dv_tag_resource *tag)
3178 DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
3179 dev->data->port_id, (void *)tag,
3180 rte_atomic32_read(&tag->refcnt));
3181 if (rte_atomic32_dec_and_test(&tag->refcnt)) {
3182 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
3183 LIST_REMOVE(tag, next);
3184 DRV_LOG(DEBUG, "port %u tag %p: removed",
3185 dev->data->port_id, (void *)tag);
3193 * Fill the flow with DV spec.
3196 * Pointer to rte_eth_dev structure.
3197 * @param[in, out] dev_flow
3198 * Pointer to the sub flow.
3200 * Pointer to the flow attributes.
3202 * Pointer to the list of items.
3203 * @param[in] actions
3204 * Pointer to the list of actions.
3206 * Pointer to the error structure.
3209 * 0 on success, a negative errno value otherwise and rte_errno is set.
3212 flow_dv_translate(struct rte_eth_dev *dev,
3213 struct mlx5_flow *dev_flow,
3214 const struct rte_flow_attr *attr,
3215 const struct rte_flow_item items[],
3216 const struct rte_flow_action actions[],
3217 struct rte_flow_error *error)
3219 struct mlx5_priv *priv = dev->data->dev_private;
3220 struct rte_flow *flow = dev_flow->flow;
3221 uint64_t item_flags = 0;
3222 uint64_t last_item = 0;
3223 uint64_t action_flags = 0;
3224 uint64_t priority = attr->priority;
3225 struct mlx5_flow_dv_matcher matcher = {
3227 .size = sizeof(matcher.mask.buf),
3231 bool actions_end = false;
3232 struct mlx5_flow_dv_modify_hdr_resource res = {
3233 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3234 MLX5DV_FLOW_TABLE_TYPE_NIC_RX
3236 union flow_dv_attr flow_attr = { .attr = 0 };
3237 struct mlx5_flow_dv_tag_resource tag_resource;
3239 if (priority == MLX5_FLOW_PRIO_RSVD)
3240 priority = priv->config.flow_prio - 1;
3241 for (; !actions_end ; actions++) {
3242 const struct rte_flow_action_queue *queue;
3243 const struct rte_flow_action_rss *rss;
3244 const struct rte_flow_action *action = actions;
3245 const struct rte_flow_action_count *count = action->conf;
3246 const uint8_t *rss_key;
3247 const struct rte_flow_action_jump *jump_data;
3248 struct mlx5_flow_dv_jump_tbl_resource jump_tbl_resource;
3249 struct mlx5_flow_tbl_resource *tbl;
3251 switch (actions->type) {
3252 case RTE_FLOW_ACTION_TYPE_VOID:
3254 case RTE_FLOW_ACTION_TYPE_FLAG:
3256 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
3257 if (!flow->tag_resource)
3258 if (flow_dv_tag_resource_register
3259 (dev, &tag_resource, dev_flow, error))
3261 dev_flow->dv.actions[actions_n++] =
3262 flow->tag_resource->action;
3263 action_flags |= MLX5_FLOW_ACTION_FLAG;
3265 case RTE_FLOW_ACTION_TYPE_MARK:
3266 tag_resource.tag = mlx5_flow_mark_set
3267 (((const struct rte_flow_action_mark *)
3268 (actions->conf))->id);
3269 if (!flow->tag_resource)
3270 if (flow_dv_tag_resource_register
3271 (dev, &tag_resource, dev_flow, error))
3273 dev_flow->dv.actions[actions_n++] =
3274 flow->tag_resource->action;
3275 action_flags |= MLX5_FLOW_ACTION_MARK;
3277 case RTE_FLOW_ACTION_TYPE_DROP:
3278 action_flags |= MLX5_FLOW_ACTION_DROP;
3280 case RTE_FLOW_ACTION_TYPE_QUEUE:
3281 queue = actions->conf;
3282 flow->rss.queue_num = 1;
3283 (*flow->queue)[0] = queue->index;
3284 action_flags |= MLX5_FLOW_ACTION_QUEUE;
3286 case RTE_FLOW_ACTION_TYPE_RSS:
3287 rss = actions->conf;
3289 memcpy((*flow->queue), rss->queue,
3290 rss->queue_num * sizeof(uint16_t));
3291 flow->rss.queue_num = rss->queue_num;
3292 /* NULL RSS key indicates default RSS key. */
3293 rss_key = !rss->key ? rss_hash_default_key : rss->key;
3294 memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
3295 /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
3296 flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
3297 flow->rss.level = rss->level;
3298 action_flags |= MLX5_FLOW_ACTION_RSS;
3300 case RTE_FLOW_ACTION_TYPE_COUNT:
3301 if (!priv->config.devx) {
3302 rte_errno = ENOTSUP;
3305 flow->counter = flow_dv_counter_new(dev, count->shared,
3307 if (flow->counter == NULL)
3309 dev_flow->dv.actions[actions_n++] =
3310 flow->counter->action;
3311 action_flags |= MLX5_FLOW_ACTION_COUNT;
3314 if (rte_errno == ENOTSUP)
3315 return rte_flow_error_set
3317 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3319 "count action not supported");
3321 return rte_flow_error_set
3323 RTE_FLOW_ERROR_TYPE_ACTION,
3325 "cannot create counter"
3327 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3328 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3329 if (flow_dv_create_action_l2_encap(dev, actions,
3332 dev_flow->dv.actions[actions_n++] =
3333 dev_flow->dv.encap_decap->verbs_action;
3334 action_flags |= actions->type ==
3335 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
3336 MLX5_FLOW_ACTION_VXLAN_ENCAP :
3337 MLX5_FLOW_ACTION_NVGRE_ENCAP;
3339 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3340 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
3341 if (flow_dv_create_action_l2_decap(dev, dev_flow,
3344 dev_flow->dv.actions[actions_n++] =
3345 dev_flow->dv.encap_decap->verbs_action;
3346 action_flags |= actions->type ==
3347 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
3348 MLX5_FLOW_ACTION_VXLAN_DECAP :
3349 MLX5_FLOW_ACTION_NVGRE_DECAP;
3351 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3352 /* Handle encap with preceding decap. */
3353 if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
3354 if (flow_dv_create_action_raw_encap
3355 (dev, actions, dev_flow, attr, error))
3357 dev_flow->dv.actions[actions_n++] =
3358 dev_flow->dv.encap_decap->verbs_action;
3360 /* Handle encap without preceding decap. */
3361 if (flow_dv_create_action_l2_encap(dev, actions,
3365 dev_flow->dv.actions[actions_n++] =
3366 dev_flow->dv.encap_decap->verbs_action;
3368 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
3370 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3371 /* Check if this decap is followed by encap. */
3372 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
3373 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
3376 /* Handle decap only if it isn't followed by encap. */
3377 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3378 if (flow_dv_create_action_l2_decap(dev,
3382 dev_flow->dv.actions[actions_n++] =
3383 dev_flow->dv.encap_decap->verbs_action;
3385 /* If decap is followed by encap, handle it at encap. */
3386 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
3388 case RTE_FLOW_ACTION_TYPE_JUMP:
3389 jump_data = action->conf;
3390 tbl = flow_dv_tbl_resource_get(dev, jump_data->group *
3392 attr->egress, error);
3394 return rte_flow_error_set
3396 RTE_FLOW_ERROR_TYPE_ACTION,
3398 "cannot create jump action.");
3399 jump_tbl_resource.tbl = tbl;
3400 if (flow_dv_jump_tbl_resource_register
3401 (dev, &jump_tbl_resource, dev_flow, error)) {
3402 flow_dv_tbl_resource_release(tbl);
3403 return rte_flow_error_set
3405 RTE_FLOW_ERROR_TYPE_ACTION,
3407 "cannot create jump action.");
3409 dev_flow->dv.actions[actions_n++] =
3410 dev_flow->dv.jump->action;
3411 action_flags |= MLX5_FLOW_ACTION_JUMP;
3413 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
3414 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
3415 if (flow_dv_convert_action_modify_mac(&res, actions,
3418 action_flags |= actions->type ==
3419 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
3420 MLX5_FLOW_ACTION_SET_MAC_SRC :
3421 MLX5_FLOW_ACTION_SET_MAC_DST;
3423 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
3424 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
3425 if (flow_dv_convert_action_modify_ipv4(&res, actions,
3428 action_flags |= actions->type ==
3429 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
3430 MLX5_FLOW_ACTION_SET_IPV4_SRC :
3431 MLX5_FLOW_ACTION_SET_IPV4_DST;
3433 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
3434 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
3435 if (flow_dv_convert_action_modify_ipv6(&res, actions,
3438 action_flags |= actions->type ==
3439 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
3440 MLX5_FLOW_ACTION_SET_IPV6_SRC :
3441 MLX5_FLOW_ACTION_SET_IPV6_DST;
3443 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
3444 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
3445 if (flow_dv_convert_action_modify_tp(&res, actions,
3449 action_flags |= actions->type ==
3450 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
3451 MLX5_FLOW_ACTION_SET_TP_SRC :
3452 MLX5_FLOW_ACTION_SET_TP_DST;
3454 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
3455 if (flow_dv_convert_action_modify_dec_ttl(&res, items,
3459 action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
3461 case RTE_FLOW_ACTION_TYPE_SET_TTL:
3462 if (flow_dv_convert_action_modify_ttl(&res, actions,
3466 action_flags |= MLX5_FLOW_ACTION_SET_TTL;
3468 case RTE_FLOW_ACTION_TYPE_END:
3470 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
3471 /* create modify action if needed. */
3472 if (flow_dv_modify_hdr_resource_register
3477 dev_flow->dv.actions[actions_n++] =
3478 dev_flow->dv.modify_hdr->verbs_action;
3485 dev_flow->dv.actions_n = actions_n;
3486 flow->actions = action_flags;
3487 if (attr->ingress && !attr->transfer &&
3488 (priv->representor || priv->master)) {
3489 /* It was validated - we support unidirection flows only. */
3490 assert(!attr->egress);
3492 * Add matching on source vport index only
3493 * for ingress rules in E-Switch configurations.
3495 flow_dv_translate_source_vport(matcher.mask.buf,
3496 dev_flow->dv.value.buf,
3500 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3501 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
3502 void *match_mask = matcher.mask.buf;
3503 void *match_value = dev_flow->dv.value.buf;
3505 switch (items->type) {
3506 case RTE_FLOW_ITEM_TYPE_ETH:
3507 flow_dv_translate_item_eth(match_mask, match_value,
3509 matcher.priority = MLX5_PRIORITY_MAP_L2;
3510 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
3511 MLX5_FLOW_LAYER_OUTER_L2;
3513 case RTE_FLOW_ITEM_TYPE_VLAN:
3514 flow_dv_translate_item_vlan(match_mask, match_value,
3516 matcher.priority = MLX5_PRIORITY_MAP_L2;
3517 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
3518 MLX5_FLOW_LAYER_INNER_VLAN) :
3519 (MLX5_FLOW_LAYER_OUTER_L2 |
3520 MLX5_FLOW_LAYER_OUTER_VLAN);
3522 case RTE_FLOW_ITEM_TYPE_IPV4:
3523 flow_dv_translate_item_ipv4(match_mask, match_value,
3524 items, tunnel, attr->group);
3525 matcher.priority = MLX5_PRIORITY_MAP_L3;
3526 dev_flow->dv.hash_fields |=
3527 mlx5_flow_hashfields_adjust
3529 MLX5_IPV4_LAYER_TYPES,
3530 MLX5_IPV4_IBV_RX_HASH);
3531 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3532 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3534 case RTE_FLOW_ITEM_TYPE_IPV6:
3535 flow_dv_translate_item_ipv6(match_mask, match_value,
3536 items, tunnel, attr->group);
3537 matcher.priority = MLX5_PRIORITY_MAP_L3;
3538 dev_flow->dv.hash_fields |=
3539 mlx5_flow_hashfields_adjust
3541 MLX5_IPV6_LAYER_TYPES,
3542 MLX5_IPV6_IBV_RX_HASH);
3543 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3544 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3546 case RTE_FLOW_ITEM_TYPE_TCP:
3547 flow_dv_translate_item_tcp(match_mask, match_value,
3549 matcher.priority = MLX5_PRIORITY_MAP_L4;
3550 dev_flow->dv.hash_fields |=
3551 mlx5_flow_hashfields_adjust
3552 (dev_flow, tunnel, ETH_RSS_TCP,
3553 IBV_RX_HASH_SRC_PORT_TCP |
3554 IBV_RX_HASH_DST_PORT_TCP);
3555 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
3556 MLX5_FLOW_LAYER_OUTER_L4_TCP;
3558 case RTE_FLOW_ITEM_TYPE_UDP:
3559 flow_dv_translate_item_udp(match_mask, match_value,
3561 matcher.priority = MLX5_PRIORITY_MAP_L4;
3562 dev_flow->dv.hash_fields |=
3563 mlx5_flow_hashfields_adjust
3564 (dev_flow, tunnel, ETH_RSS_UDP,
3565 IBV_RX_HASH_SRC_PORT_UDP |
3566 IBV_RX_HASH_DST_PORT_UDP);
3567 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
3568 MLX5_FLOW_LAYER_OUTER_L4_UDP;
3570 case RTE_FLOW_ITEM_TYPE_GRE:
3571 flow_dv_translate_item_gre(match_mask, match_value,
3573 last_item = MLX5_FLOW_LAYER_GRE;
3575 case RTE_FLOW_ITEM_TYPE_NVGRE:
3576 flow_dv_translate_item_nvgre(match_mask, match_value,
3578 last_item = MLX5_FLOW_LAYER_GRE;
3580 case RTE_FLOW_ITEM_TYPE_VXLAN:
3581 flow_dv_translate_item_vxlan(match_mask, match_value,
3583 last_item = MLX5_FLOW_LAYER_VXLAN;
3585 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3586 flow_dv_translate_item_vxlan(match_mask, match_value,
3588 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
3590 case RTE_FLOW_ITEM_TYPE_MPLS:
3591 flow_dv_translate_item_mpls(match_mask, match_value,
3592 items, last_item, tunnel);
3593 last_item = MLX5_FLOW_LAYER_MPLS;
3595 case RTE_FLOW_ITEM_TYPE_META:
3596 flow_dv_translate_item_meta(match_mask, match_value,
3598 last_item = MLX5_FLOW_ITEM_METADATA;
3603 item_flags |= last_item;
3605 assert(!flow_dv_check_valid_spec(matcher.mask.buf,
3606 dev_flow->dv.value.buf));
3607 dev_flow->layers = item_flags;
3608 /* Register matcher. */
3609 matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
3611 matcher.priority = mlx5_flow_adjust_priority(dev, priority,
3613 matcher.egress = attr->egress;
3614 matcher.group = attr->group;
3615 if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
3621 * Apply the flow to the NIC.
3624 * Pointer to the Ethernet device structure.
3625 * @param[in, out] flow
3626 * Pointer to flow structure.
3628 * Pointer to error structure.
3631 * 0 on success, a negative errno value otherwise and rte_errno is set.
3634 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
3635 struct rte_flow_error *error)
3637 struct mlx5_flow_dv *dv;
3638 struct mlx5_flow *dev_flow;
3642 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3645 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
3646 dv->hrxq = mlx5_hrxq_drop_new(dev);
3650 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3651 "cannot get drop hash queue");
3655 mlx5_glue->dv_create_flow_action_dest_ibv_qp
3657 } else if (flow->actions &
3658 (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
3659 struct mlx5_hrxq *hrxq;
3661 hrxq = mlx5_hrxq_get(dev, flow->key,
3662 MLX5_RSS_HASH_KEY_LEN,
3665 flow->rss.queue_num);
3667 hrxq = mlx5_hrxq_new
3668 (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
3669 dv->hash_fields, (*flow->queue),
3670 flow->rss.queue_num,
3671 !!(dev_flow->layers &
3672 MLX5_FLOW_LAYER_TUNNEL));
3676 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3677 "cannot get hash queue");
3682 mlx5_glue->dv_create_flow_action_dest_ibv_qp
3686 mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
3687 (void *)&dv->value, n,
3690 rte_flow_error_set(error, errno,
3691 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3693 "hardware refuses to create flow");
3699 err = rte_errno; /* Save rte_errno before cleanup. */
3700 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3701 struct mlx5_flow_dv *dv = &dev_flow->dv;
3703 if (flow->actions & MLX5_FLOW_ACTION_DROP)
3704 mlx5_hrxq_drop_release(dev);
3706 mlx5_hrxq_release(dev, dv->hrxq);
3710 rte_errno = err; /* Restore rte_errno. */
3715 * Release the flow matcher.
3718 * Pointer to Ethernet device.
3720 * Pointer to mlx5_flow.
3723 * 1 while a reference on it exists, 0 when freed.
3726 flow_dv_matcher_release(struct rte_eth_dev *dev,
3727 struct mlx5_flow *flow)
3729 struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
3730 struct mlx5_priv *priv = dev->data->dev_private;
3731 struct mlx5_ibv_shared *sh = priv->sh;
3732 struct mlx5_flow_tbl_resource *tbl;
3734 assert(matcher->matcher_object);
3735 DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
3736 dev->data->port_id, (void *)matcher,
3737 rte_atomic32_read(&matcher->refcnt));
3738 if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
3739 claim_zero(mlx5_glue->dv_destroy_flow_matcher
3740 (matcher->matcher_object));
3741 LIST_REMOVE(matcher, next);
3742 if (matcher->egress)
3743 tbl = &sh->tx_tbl[matcher->group];
3745 tbl = &sh->rx_tbl[matcher->group];
3746 flow_dv_tbl_resource_release(tbl);
3748 DRV_LOG(DEBUG, "port %u matcher %p: removed",
3749 dev->data->port_id, (void *)matcher);
3756 * Release an encap/decap resource.
3759 * Pointer to mlx5_flow.
3762 * 1 while a reference on it exists, 0 when freed.
3765 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
3767 struct mlx5_flow_dv_encap_decap_resource *cache_resource =
3768 flow->dv.encap_decap;
3770 assert(cache_resource->verbs_action);
3771 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
3772 (void *)cache_resource,
3773 rte_atomic32_read(&cache_resource->refcnt));
3774 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
3775 claim_zero(mlx5_glue->destroy_flow_action
3776 (cache_resource->verbs_action));
3777 LIST_REMOVE(cache_resource, next);
3778 rte_free(cache_resource);
3779 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
3780 (void *)cache_resource);
3787 * Release an jump to table action resource.
3790 * Pointer to mlx5_flow.
3793 * 1 while a reference on it exists, 0 when freed.
3796 flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow)
3798 struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
3801 assert(cache_resource->action);
3802 DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
3803 (void *)cache_resource,
3804 rte_atomic32_read(&cache_resource->refcnt));
3805 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
3806 claim_zero(mlx5_glue->destroy_flow_action
3807 (cache_resource->action));
3808 LIST_REMOVE(cache_resource, next);
3809 flow_dv_tbl_resource_release(cache_resource->tbl);
3810 rte_free(cache_resource);
3811 DRV_LOG(DEBUG, "jump table resource %p: removed",
3812 (void *)cache_resource);
3819 * Release a modify-header resource.
3822 * Pointer to mlx5_flow.
3825 * 1 while a reference on it exists, 0 when freed.
3828 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
3830 struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
3831 flow->dv.modify_hdr;
3833 assert(cache_resource->verbs_action);
3834 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
3835 (void *)cache_resource,
3836 rte_atomic32_read(&cache_resource->refcnt));
3837 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
3838 claim_zero(mlx5_glue->destroy_flow_action
3839 (cache_resource->verbs_action));
3840 LIST_REMOVE(cache_resource, next);
3841 rte_free(cache_resource);
3842 DRV_LOG(DEBUG, "modify-header resource %p: removed",
3843 (void *)cache_resource);
3850 * Remove the flow from the NIC but keeps it in memory.
3853 * Pointer to Ethernet device.
3854 * @param[in, out] flow
3855 * Pointer to flow structure.
3858 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
3860 struct mlx5_flow_dv *dv;
3861 struct mlx5_flow *dev_flow;
3865 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3868 claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
3872 if (flow->actions & MLX5_FLOW_ACTION_DROP)
3873 mlx5_hrxq_drop_release(dev);
3875 mlx5_hrxq_release(dev, dv->hrxq);
3882 * Remove the flow from the NIC and the memory.
3885 * Pointer to the Ethernet device structure.
3886 * @param[in, out] flow
3887 * Pointer to flow structure.
3890 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
3892 struct mlx5_flow *dev_flow;
3896 flow_dv_remove(dev, flow);
3897 if (flow->counter) {
3898 flow_dv_counter_release(flow->counter);
3899 flow->counter = NULL;
3901 if (flow->tag_resource) {
3902 flow_dv_tag_release(dev, flow->tag_resource);
3903 flow->tag_resource = NULL;
3905 while (!LIST_EMPTY(&flow->dev_flows)) {
3906 dev_flow = LIST_FIRST(&flow->dev_flows);
3907 LIST_REMOVE(dev_flow, next);
3908 if (dev_flow->dv.matcher)
3909 flow_dv_matcher_release(dev, dev_flow);
3910 if (dev_flow->dv.encap_decap)
3911 flow_dv_encap_decap_resource_release(dev_flow);
3912 if (dev_flow->dv.modify_hdr)
3913 flow_dv_modify_hdr_resource_release(dev_flow);
3914 if (dev_flow->dv.jump)
3915 flow_dv_jump_tbl_resource_release(dev_flow);
3921 * Query a dv flow rule for its statistics via devx.
3924 * Pointer to Ethernet device.
3926 * Pointer to the sub flow.
3928 * data retrieved by the query.
3930 * Perform verbose error reporting if not NULL.
3933 * 0 on success, a negative errno value otherwise and rte_errno is set.
3936 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
3937 void *data, struct rte_flow_error *error)
3939 struct mlx5_priv *priv = dev->data->dev_private;
3940 struct rte_flow_query_count *qc = data;
3945 if (!priv->config.devx)
3946 return rte_flow_error_set(error, ENOTSUP,
3947 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3949 "counters are not supported");
3950 if (flow->counter) {
3951 err = mlx5_devx_cmd_flow_counter_query
3952 (flow->counter->dcs,
3953 qc->reset, &pkts, &bytes);
3955 return rte_flow_error_set
3957 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3959 "cannot read counters");
3962 qc->hits = pkts - flow->counter->hits;
3963 qc->bytes = bytes - flow->counter->bytes;
3965 flow->counter->hits = pkts;
3966 flow->counter->bytes = bytes;
3970 return rte_flow_error_set(error, EINVAL,
3971 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3973 "counters are not available");
3979 * @see rte_flow_query()
3983 flow_dv_query(struct rte_eth_dev *dev,
3984 struct rte_flow *flow __rte_unused,
3985 const struct rte_flow_action *actions __rte_unused,
3986 void *data __rte_unused,
3987 struct rte_flow_error *error __rte_unused)
3991 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3992 switch (actions->type) {
3993 case RTE_FLOW_ACTION_TYPE_VOID:
3995 case RTE_FLOW_ACTION_TYPE_COUNT:
3996 ret = flow_dv_query_count(dev, flow, data, error);
3999 return rte_flow_error_set(error, ENOTSUP,
4000 RTE_FLOW_ERROR_TYPE_ACTION,
4002 "action not supported");
4009 * Mutex-protected thunk to flow_dv_translate().
4012 flow_d_translate(struct rte_eth_dev *dev,
4013 struct mlx5_flow *dev_flow,
4014 const struct rte_flow_attr *attr,
4015 const struct rte_flow_item items[],
4016 const struct rte_flow_action actions[],
4017 struct rte_flow_error *error)
4021 flow_d_shared_lock(dev);
4022 ret = flow_dv_translate(dev, dev_flow, attr, items, actions, error);
4023 flow_d_shared_unlock(dev);
4028 * Mutex-protected thunk to flow_dv_apply().
4031 flow_d_apply(struct rte_eth_dev *dev,
4032 struct rte_flow *flow,
4033 struct rte_flow_error *error)
4037 flow_d_shared_lock(dev);
4038 ret = flow_dv_apply(dev, flow, error);
4039 flow_d_shared_unlock(dev);
4044 * Mutex-protected thunk to flow_dv_remove().
4047 flow_d_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
4049 flow_d_shared_lock(dev);
4050 flow_dv_remove(dev, flow);
4051 flow_d_shared_unlock(dev);
4055 * Mutex-protected thunk to flow_dv_destroy().
4058 flow_d_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
4060 flow_d_shared_lock(dev);
4061 flow_dv_destroy(dev, flow);
4062 flow_d_shared_unlock(dev);
4065 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
4066 .validate = flow_dv_validate,
4067 .prepare = flow_dv_prepare,
4068 .translate = flow_d_translate,
4069 .apply = flow_d_apply,
4070 .remove = flow_d_remove,
4071 .destroy = flow_d_destroy,
4072 .query = flow_dv_query,
4075 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */