1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #pragma GCC diagnostic ignored "-Wpedantic"
15 #include <infiniband/verbs.h>
17 #pragma GCC diagnostic error "-Wpedantic"
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_ethdev_driver.h>
24 #include <rte_flow_driver.h>
25 #include <rte_malloc.h>
30 #include "mlx5_defs.h"
31 #include "mlx5_glue.h"
32 #include "mlx5_flow.h"
34 #include "mlx5_rxtx.h"
36 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
38 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
39 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
42 #ifndef HAVE_MLX5DV_DR_ESWITCH
43 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
44 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
48 #ifndef HAVE_MLX5DV_DR
49 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
65 * Initialize flow attributes structure according to flow items' types.
68 * Pointer to item specification.
70 * Pointer to flow attributes structure.
73 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
75 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
77 case RTE_FLOW_ITEM_TYPE_IPV4:
80 case RTE_FLOW_ITEM_TYPE_IPV6:
83 case RTE_FLOW_ITEM_TYPE_UDP:
86 case RTE_FLOW_ITEM_TYPE_TCP:
96 struct field_modify_info {
97 uint32_t size; /* Size of field in protocol header, in bytes. */
98 uint32_t offset; /* Offset of field in protocol header, in bytes. */
99 enum mlx5_modification_field id;
102 struct field_modify_info modify_eth[] = {
103 {4, 0, MLX5_MODI_OUT_DMAC_47_16},
104 {2, 4, MLX5_MODI_OUT_DMAC_15_0},
105 {4, 6, MLX5_MODI_OUT_SMAC_47_16},
106 {2, 10, MLX5_MODI_OUT_SMAC_15_0},
110 struct field_modify_info modify_ipv4[] = {
111 {1, 8, MLX5_MODI_OUT_IPV4_TTL},
112 {4, 12, MLX5_MODI_OUT_SIPV4},
113 {4, 16, MLX5_MODI_OUT_DIPV4},
117 struct field_modify_info modify_ipv6[] = {
118 {1, 7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
119 {4, 8, MLX5_MODI_OUT_SIPV6_127_96},
120 {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
121 {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
122 {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
123 {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
124 {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
125 {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
126 {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
130 struct field_modify_info modify_udp[] = {
131 {2, 0, MLX5_MODI_OUT_UDP_SPORT},
132 {2, 2, MLX5_MODI_OUT_UDP_DPORT},
136 struct field_modify_info modify_tcp[] = {
137 {2, 0, MLX5_MODI_OUT_TCP_SPORT},
138 {2, 2, MLX5_MODI_OUT_TCP_DPORT},
143 * Acquire the synchronizing object to protect multithreaded access
144 * to shared dv context. Lock occurs only if context is actually
145 * shared, i.e. we have multiport IB device and representors are
149 * Pointer to the rte_eth_dev structure.
152 flow_d_shared_lock(struct rte_eth_dev *dev)
154 struct mlx5_priv *priv = dev->data->dev_private;
155 struct mlx5_ibv_shared *sh = priv->sh;
157 if (sh->dv_refcnt > 1) {
160 ret = pthread_mutex_lock(&sh->dv_mutex);
167 flow_d_shared_unlock(struct rte_eth_dev *dev)
169 struct mlx5_priv *priv = dev->data->dev_private;
170 struct mlx5_ibv_shared *sh = priv->sh;
172 if (sh->dv_refcnt > 1) {
175 ret = pthread_mutex_unlock(&sh->dv_mutex);
182 * Convert modify-header action to DV specification.
185 * Pointer to item specification.
187 * Pointer to field modification information.
188 * @param[in,out] resource
189 * Pointer to the modify-header resource.
191 * Type of modification.
193 * Pointer to the error structure.
196 * 0 on success, a negative errno value otherwise and rte_errno is set.
199 flow_dv_convert_modify_action(struct rte_flow_item *item,
200 struct field_modify_info *field,
201 struct mlx5_flow_dv_modify_hdr_resource *resource,
203 struct rte_flow_error *error)
205 uint32_t i = resource->actions_num;
206 struct mlx5_modification_cmd *actions = resource->actions;
207 const uint8_t *spec = item->spec;
208 const uint8_t *mask = item->mask;
211 while (field->size) {
213 /* Generate modify command for each mask segment. */
214 memcpy(&set, &mask[field->offset], field->size);
216 if (i >= MLX5_MODIFY_NUM)
217 return rte_flow_error_set(error, EINVAL,
218 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
219 "too many items to modify");
220 actions[i].action_type = type;
221 actions[i].field = field->id;
222 actions[i].length = field->size ==
223 4 ? 0 : field->size * 8;
224 rte_memcpy(&actions[i].data[4 - field->size],
225 &spec[field->offset], field->size);
226 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
229 if (resource->actions_num != i)
230 resource->actions_num = i;
233 if (!resource->actions_num)
234 return rte_flow_error_set(error, EINVAL,
235 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
236 "invalid modification flow item");
241 * Convert modify-header set IPv4 address action to DV specification.
243 * @param[in,out] resource
244 * Pointer to the modify-header resource.
246 * Pointer to action specification.
248 * Pointer to the error structure.
251 * 0 on success, a negative errno value otherwise and rte_errno is set.
254 flow_dv_convert_action_modify_ipv4
255 (struct mlx5_flow_dv_modify_hdr_resource *resource,
256 const struct rte_flow_action *action,
257 struct rte_flow_error *error)
259 const struct rte_flow_action_set_ipv4 *conf =
260 (const struct rte_flow_action_set_ipv4 *)(action->conf);
261 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
262 struct rte_flow_item_ipv4 ipv4;
263 struct rte_flow_item_ipv4 ipv4_mask;
265 memset(&ipv4, 0, sizeof(ipv4));
266 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
267 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
268 ipv4.hdr.src_addr = conf->ipv4_addr;
269 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
271 ipv4.hdr.dst_addr = conf->ipv4_addr;
272 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
275 item.mask = &ipv4_mask;
276 return flow_dv_convert_modify_action(&item, modify_ipv4, resource,
277 MLX5_MODIFICATION_TYPE_SET, error);
281 * Convert modify-header set IPv6 address action to DV specification.
283 * @param[in,out] resource
284 * Pointer to the modify-header resource.
286 * Pointer to action specification.
288 * Pointer to the error structure.
291 * 0 on success, a negative errno value otherwise and rte_errno is set.
294 flow_dv_convert_action_modify_ipv6
295 (struct mlx5_flow_dv_modify_hdr_resource *resource,
296 const struct rte_flow_action *action,
297 struct rte_flow_error *error)
299 const struct rte_flow_action_set_ipv6 *conf =
300 (const struct rte_flow_action_set_ipv6 *)(action->conf);
301 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
302 struct rte_flow_item_ipv6 ipv6;
303 struct rte_flow_item_ipv6 ipv6_mask;
305 memset(&ipv6, 0, sizeof(ipv6));
306 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
307 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
308 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
309 sizeof(ipv6.hdr.src_addr));
310 memcpy(&ipv6_mask.hdr.src_addr,
311 &rte_flow_item_ipv6_mask.hdr.src_addr,
312 sizeof(ipv6.hdr.src_addr));
314 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
315 sizeof(ipv6.hdr.dst_addr));
316 memcpy(&ipv6_mask.hdr.dst_addr,
317 &rte_flow_item_ipv6_mask.hdr.dst_addr,
318 sizeof(ipv6.hdr.dst_addr));
321 item.mask = &ipv6_mask;
322 return flow_dv_convert_modify_action(&item, modify_ipv6, resource,
323 MLX5_MODIFICATION_TYPE_SET, error);
327 * Convert modify-header set MAC address action to DV specification.
329 * @param[in,out] resource
330 * Pointer to the modify-header resource.
332 * Pointer to action specification.
334 * Pointer to the error structure.
337 * 0 on success, a negative errno value otherwise and rte_errno is set.
340 flow_dv_convert_action_modify_mac
341 (struct mlx5_flow_dv_modify_hdr_resource *resource,
342 const struct rte_flow_action *action,
343 struct rte_flow_error *error)
345 const struct rte_flow_action_set_mac *conf =
346 (const struct rte_flow_action_set_mac *)(action->conf);
347 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
348 struct rte_flow_item_eth eth;
349 struct rte_flow_item_eth eth_mask;
351 memset(ð, 0, sizeof(eth));
352 memset(ð_mask, 0, sizeof(eth_mask));
353 if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
354 memcpy(ð.src.addr_bytes, &conf->mac_addr,
355 sizeof(eth.src.addr_bytes));
356 memcpy(ð_mask.src.addr_bytes,
357 &rte_flow_item_eth_mask.src.addr_bytes,
358 sizeof(eth_mask.src.addr_bytes));
360 memcpy(ð.dst.addr_bytes, &conf->mac_addr,
361 sizeof(eth.dst.addr_bytes));
362 memcpy(ð_mask.dst.addr_bytes,
363 &rte_flow_item_eth_mask.dst.addr_bytes,
364 sizeof(eth_mask.dst.addr_bytes));
367 item.mask = ð_mask;
368 return flow_dv_convert_modify_action(&item, modify_eth, resource,
369 MLX5_MODIFICATION_TYPE_SET, error);
373 * Convert modify-header set TP action to DV specification.
375 * @param[in,out] resource
376 * Pointer to the modify-header resource.
378 * Pointer to action specification.
380 * Pointer to rte_flow_item objects list.
382 * Pointer to flow attributes structure.
384 * Pointer to the error structure.
387 * 0 on success, a negative errno value otherwise and rte_errno is set.
390 flow_dv_convert_action_modify_tp
391 (struct mlx5_flow_dv_modify_hdr_resource *resource,
392 const struct rte_flow_action *action,
393 const struct rte_flow_item *items,
394 union flow_dv_attr *attr,
395 struct rte_flow_error *error)
397 const struct rte_flow_action_set_tp *conf =
398 (const struct rte_flow_action_set_tp *)(action->conf);
399 struct rte_flow_item item;
400 struct rte_flow_item_udp udp;
401 struct rte_flow_item_udp udp_mask;
402 struct rte_flow_item_tcp tcp;
403 struct rte_flow_item_tcp tcp_mask;
404 struct field_modify_info *field;
407 flow_dv_attr_init(items, attr);
409 memset(&udp, 0, sizeof(udp));
410 memset(&udp_mask, 0, sizeof(udp_mask));
411 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
412 udp.hdr.src_port = conf->port;
413 udp_mask.hdr.src_port =
414 rte_flow_item_udp_mask.hdr.src_port;
416 udp.hdr.dst_port = conf->port;
417 udp_mask.hdr.dst_port =
418 rte_flow_item_udp_mask.hdr.dst_port;
420 item.type = RTE_FLOW_ITEM_TYPE_UDP;
422 item.mask = &udp_mask;
426 memset(&tcp, 0, sizeof(tcp));
427 memset(&tcp_mask, 0, sizeof(tcp_mask));
428 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
429 tcp.hdr.src_port = conf->port;
430 tcp_mask.hdr.src_port =
431 rte_flow_item_tcp_mask.hdr.src_port;
433 tcp.hdr.dst_port = conf->port;
434 tcp_mask.hdr.dst_port =
435 rte_flow_item_tcp_mask.hdr.dst_port;
437 item.type = RTE_FLOW_ITEM_TYPE_TCP;
439 item.mask = &tcp_mask;
442 return flow_dv_convert_modify_action(&item, field, resource,
443 MLX5_MODIFICATION_TYPE_SET, error);
447 * Convert modify-header set TTL action to DV specification.
449 * @param[in,out] resource
450 * Pointer to the modify-header resource.
452 * Pointer to action specification.
454 * Pointer to rte_flow_item objects list.
456 * Pointer to flow attributes structure.
458 * Pointer to the error structure.
461 * 0 on success, a negative errno value otherwise and rte_errno is set.
464 flow_dv_convert_action_modify_ttl
465 (struct mlx5_flow_dv_modify_hdr_resource *resource,
466 const struct rte_flow_action *action,
467 const struct rte_flow_item *items,
468 union flow_dv_attr *attr,
469 struct rte_flow_error *error)
471 const struct rte_flow_action_set_ttl *conf =
472 (const struct rte_flow_action_set_ttl *)(action->conf);
473 struct rte_flow_item item;
474 struct rte_flow_item_ipv4 ipv4;
475 struct rte_flow_item_ipv4 ipv4_mask;
476 struct rte_flow_item_ipv6 ipv6;
477 struct rte_flow_item_ipv6 ipv6_mask;
478 struct field_modify_info *field;
481 flow_dv_attr_init(items, attr);
483 memset(&ipv4, 0, sizeof(ipv4));
484 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
485 ipv4.hdr.time_to_live = conf->ttl_value;
486 ipv4_mask.hdr.time_to_live = 0xFF;
487 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
489 item.mask = &ipv4_mask;
493 memset(&ipv6, 0, sizeof(ipv6));
494 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
495 ipv6.hdr.hop_limits = conf->ttl_value;
496 ipv6_mask.hdr.hop_limits = 0xFF;
497 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
499 item.mask = &ipv6_mask;
502 return flow_dv_convert_modify_action(&item, field, resource,
503 MLX5_MODIFICATION_TYPE_SET, error);
507 * Convert modify-header decrement TTL action to DV specification.
509 * @param[in,out] resource
510 * Pointer to the modify-header resource.
512 * Pointer to action specification.
514 * Pointer to rte_flow_item objects list.
516 * Pointer to flow attributes structure.
518 * Pointer to the error structure.
521 * 0 on success, a negative errno value otherwise and rte_errno is set.
524 flow_dv_convert_action_modify_dec_ttl
525 (struct mlx5_flow_dv_modify_hdr_resource *resource,
526 const struct rte_flow_item *items,
527 union flow_dv_attr *attr,
528 struct rte_flow_error *error)
530 struct rte_flow_item item;
531 struct rte_flow_item_ipv4 ipv4;
532 struct rte_flow_item_ipv4 ipv4_mask;
533 struct rte_flow_item_ipv6 ipv6;
534 struct rte_flow_item_ipv6 ipv6_mask;
535 struct field_modify_info *field;
538 flow_dv_attr_init(items, attr);
540 memset(&ipv4, 0, sizeof(ipv4));
541 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
542 ipv4.hdr.time_to_live = 0xFF;
543 ipv4_mask.hdr.time_to_live = 0xFF;
544 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
546 item.mask = &ipv4_mask;
550 memset(&ipv6, 0, sizeof(ipv6));
551 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
552 ipv6.hdr.hop_limits = 0xFF;
553 ipv6_mask.hdr.hop_limits = 0xFF;
554 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
556 item.mask = &ipv6_mask;
559 return flow_dv_convert_modify_action(&item, field, resource,
560 MLX5_MODIFICATION_TYPE_ADD, error);
564 * Validate META item.
567 * Pointer to the rte_eth_dev structure.
569 * Item specification.
571 * Attributes of flow that includes this item.
573 * Pointer to error structure.
576 * 0 on success, a negative errno value otherwise and rte_errno is set.
579 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
580 const struct rte_flow_item *item,
581 const struct rte_flow_attr *attr,
582 struct rte_flow_error *error)
584 const struct rte_flow_item_meta *spec = item->spec;
585 const struct rte_flow_item_meta *mask = item->mask;
586 const struct rte_flow_item_meta nic_mask = {
587 .data = RTE_BE32(UINT32_MAX)
590 uint64_t offloads = dev->data->dev_conf.txmode.offloads;
592 if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
593 return rte_flow_error_set(error, EPERM,
594 RTE_FLOW_ERROR_TYPE_ITEM,
596 "match on metadata offload "
597 "configuration is off for this port");
599 return rte_flow_error_set(error, EINVAL,
600 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
602 "data cannot be empty");
604 return rte_flow_error_set(error, EINVAL,
605 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
607 "data cannot be zero");
609 mask = &rte_flow_item_meta_mask;
610 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
611 (const uint8_t *)&nic_mask,
612 sizeof(struct rte_flow_item_meta),
617 return rte_flow_error_set(error, ENOTSUP,
618 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
620 "pattern not supported for ingress");
625 * Validate vport item.
628 * Pointer to the rte_eth_dev structure.
630 * Item specification.
632 * Attributes of flow that includes this item.
633 * @param[in] item_flags
634 * Bit-fields that holds the items detected until now.
636 * Pointer to error structure.
639 * 0 on success, a negative errno value otherwise and rte_errno is set.
642 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
643 const struct rte_flow_item *item,
644 const struct rte_flow_attr *attr,
646 struct rte_flow_error *error)
648 const struct rte_flow_item_port_id *spec = item->spec;
649 const struct rte_flow_item_port_id *mask = item->mask;
650 const struct rte_flow_item_port_id switch_mask = {
653 uint16_t esw_domain_id;
654 uint16_t item_port_esw_domain_id;
658 return rte_flow_error_set(error, EINVAL,
659 RTE_FLOW_ERROR_TYPE_ITEM,
661 "match on port id is valid only"
662 " when transfer flag is enabled");
663 if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
664 return rte_flow_error_set(error, ENOTSUP,
665 RTE_FLOW_ERROR_TYPE_ITEM, item,
666 "multiple source ports are not"
670 if (mask->id != 0xffffffff)
671 return rte_flow_error_set(error, ENOTSUP,
672 RTE_FLOW_ERROR_TYPE_ITEM_MASK,
674 "no support for partial mask on"
676 ret = mlx5_flow_item_acceptable
677 (item, (const uint8_t *)mask,
678 (const uint8_t *)&rte_flow_item_port_id_mask,
679 sizeof(struct rte_flow_item_port_id),
685 ret = mlx5_port_to_eswitch_info(spec->id, &item_port_esw_domain_id,
688 return rte_flow_error_set(error, -ret,
689 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
690 "failed to obtain E-Switch info for"
692 ret = mlx5_port_to_eswitch_info(dev->data->port_id,
693 &esw_domain_id, NULL);
695 return rte_flow_error_set(error, -ret,
696 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
698 "failed to obtain E-Switch info");
699 if (item_port_esw_domain_id != esw_domain_id)
700 return rte_flow_error_set(error, -ret,
701 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
702 "cannot match on a port from a"
703 " different E-Switch");
708 * Validate count action.
713 * Pointer to error structure.
716 * 0 on success, a negative errno value otherwise and rte_errno is set.
719 flow_dv_validate_action_count(struct rte_eth_dev *dev,
720 struct rte_flow_error *error)
722 struct mlx5_priv *priv = dev->data->dev_private;
724 if (!priv->config.devx)
726 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
730 return rte_flow_error_set
732 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
734 "count action not supported");
738 * Validate the L2 encap action.
740 * @param[in] action_flags
741 * Holds the actions detected until now.
743 * Pointer to the encap action.
745 * Pointer to flow attributes
747 * Pointer to error structure.
750 * 0 on success, a negative errno value otherwise and rte_errno is set.
753 flow_dv_validate_action_l2_encap(uint64_t action_flags,
754 const struct rte_flow_action *action,
755 const struct rte_flow_attr *attr,
756 struct rte_flow_error *error)
759 return rte_flow_error_set(error, EINVAL,
760 RTE_FLOW_ERROR_TYPE_ACTION, action,
761 "configuration cannot be null");
762 if (action_flags & MLX5_FLOW_ACTION_DROP)
763 return rte_flow_error_set(error, EINVAL,
764 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
765 "can't drop and encap in same flow");
766 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
767 return rte_flow_error_set(error, EINVAL,
768 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
769 "can only have a single encap or"
770 " decap action in a flow");
771 if (!attr->transfer && attr->ingress)
772 return rte_flow_error_set(error, ENOTSUP,
773 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
775 "encap action not supported for "
781 * Validate the L2 decap action.
783 * @param[in] action_flags
784 * Holds the actions detected until now.
786 * Pointer to flow attributes
788 * Pointer to error structure.
791 * 0 on success, a negative errno value otherwise and rte_errno is set.
794 flow_dv_validate_action_l2_decap(uint64_t action_flags,
795 const struct rte_flow_attr *attr,
796 struct rte_flow_error *error)
798 if (action_flags & MLX5_FLOW_ACTION_DROP)
799 return rte_flow_error_set(error, EINVAL,
800 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
801 "can't drop and decap in same flow");
802 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
803 return rte_flow_error_set(error, EINVAL,
804 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
805 "can only have a single encap or"
806 " decap action in a flow");
807 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
808 return rte_flow_error_set(error, EINVAL,
809 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
810 "can't have decap action after"
813 return rte_flow_error_set(error, ENOTSUP,
814 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
816 "decap action not supported for "
822 * Validate the raw encap action.
824 * @param[in] action_flags
825 * Holds the actions detected until now.
827 * Pointer to the encap action.
829 * Pointer to flow attributes
831 * Pointer to error structure.
834 * 0 on success, a negative errno value otherwise and rte_errno is set.
837 flow_dv_validate_action_raw_encap(uint64_t action_flags,
838 const struct rte_flow_action *action,
839 const struct rte_flow_attr *attr,
840 struct rte_flow_error *error)
843 return rte_flow_error_set(error, EINVAL,
844 RTE_FLOW_ERROR_TYPE_ACTION, action,
845 "configuration cannot be null");
846 if (action_flags & MLX5_FLOW_ACTION_DROP)
847 return rte_flow_error_set(error, EINVAL,
848 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
849 "can't drop and encap in same flow");
850 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
851 return rte_flow_error_set(error, EINVAL,
852 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
853 "can only have a single encap"
854 " action in a flow");
855 /* encap without preceding decap is not supported for ingress */
856 if (!attr->transfer && attr->ingress &&
857 !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
858 return rte_flow_error_set(error, ENOTSUP,
859 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
861 "encap action not supported for "
867 * Validate the raw decap action.
869 * @param[in] action_flags
870 * Holds the actions detected until now.
872 * Pointer to the encap action.
874 * Pointer to flow attributes
876 * Pointer to error structure.
879 * 0 on success, a negative errno value otherwise and rte_errno is set.
882 flow_dv_validate_action_raw_decap(uint64_t action_flags,
883 const struct rte_flow_action *action,
884 const struct rte_flow_attr *attr,
885 struct rte_flow_error *error)
887 if (action_flags & MLX5_FLOW_ACTION_DROP)
888 return rte_flow_error_set(error, EINVAL,
889 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
890 "can't drop and decap in same flow");
891 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
892 return rte_flow_error_set(error, EINVAL,
893 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
894 "can't have encap action before"
896 if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
897 return rte_flow_error_set(error, EINVAL,
898 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
899 "can only have a single decap"
900 " action in a flow");
901 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
902 return rte_flow_error_set(error, EINVAL,
903 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
904 "can't have decap action after"
906 /* decap action is valid on egress only if it is followed by encap */
908 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
909 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
912 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
913 return rte_flow_error_set
915 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
916 NULL, "decap action not supported"
923 * Find existing encap/decap resource or create and register a new one.
925 * @param dev[in, out]
926 * Pointer to rte_eth_dev structure.
927 * @param[in, out] resource
928 * Pointer to encap/decap resource.
929 * @parm[in, out] dev_flow
930 * Pointer to the dev_flow.
932 * pointer to error structure.
935 * 0 on success otherwise -errno and errno is set.
938 flow_dv_encap_decap_resource_register
939 (struct rte_eth_dev *dev,
940 struct mlx5_flow_dv_encap_decap_resource *resource,
941 struct mlx5_flow *dev_flow,
942 struct rte_flow_error *error)
944 struct mlx5_priv *priv = dev->data->dev_private;
945 struct mlx5_ibv_shared *sh = priv->sh;
946 struct mlx5_flow_dv_encap_decap_resource *cache_resource;
947 struct rte_flow *flow = dev_flow->flow;
948 struct mlx5dv_dr_domain *domain;
950 resource->flags = flow->group ? 0 : 1;
951 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
952 domain = sh->fdb_domain;
953 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
954 domain = sh->rx_domain;
956 domain = sh->tx_domain;
958 /* Lookup a matching resource from cache. */
959 LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
960 if (resource->reformat_type == cache_resource->reformat_type &&
961 resource->ft_type == cache_resource->ft_type &&
962 resource->flags == cache_resource->flags &&
963 resource->size == cache_resource->size &&
964 !memcmp((const void *)resource->buf,
965 (const void *)cache_resource->buf,
967 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
968 (void *)cache_resource,
969 rte_atomic32_read(&cache_resource->refcnt));
970 rte_atomic32_inc(&cache_resource->refcnt);
971 dev_flow->dv.encap_decap = cache_resource;
975 /* Register new encap/decap resource. */
976 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
978 return rte_flow_error_set(error, ENOMEM,
979 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
980 "cannot allocate resource memory");
981 *cache_resource = *resource;
982 cache_resource->verbs_action =
983 mlx5_glue->dv_create_flow_action_packet_reformat
984 (sh->ctx, cache_resource->reformat_type,
985 cache_resource->ft_type, domain, cache_resource->flags,
986 cache_resource->size,
987 (cache_resource->size ? cache_resource->buf : NULL));
988 if (!cache_resource->verbs_action) {
989 rte_free(cache_resource);
990 return rte_flow_error_set(error, ENOMEM,
991 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
992 NULL, "cannot create action");
994 rte_atomic32_init(&cache_resource->refcnt);
995 rte_atomic32_inc(&cache_resource->refcnt);
996 LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
997 dev_flow->dv.encap_decap = cache_resource;
998 DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
999 (void *)cache_resource,
1000 rte_atomic32_read(&cache_resource->refcnt));
1005 * Find existing table jump resource or create and register a new one.
1007 * @param dev[in, out]
1008 * Pointer to rte_eth_dev structure.
1009 * @param[in, out] resource
1010 * Pointer to jump table resource.
1011 * @parm[in, out] dev_flow
1012 * Pointer to the dev_flow.
1014 * pointer to error structure.
1017 * 0 on success otherwise -errno and errno is set.
1020 flow_dv_jump_tbl_resource_register
1021 (struct rte_eth_dev *dev,
1022 struct mlx5_flow_dv_jump_tbl_resource *resource,
1023 struct mlx5_flow *dev_flow,
1024 struct rte_flow_error *error)
1026 struct mlx5_priv *priv = dev->data->dev_private;
1027 struct mlx5_ibv_shared *sh = priv->sh;
1028 struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
1030 /* Lookup a matching resource from cache. */
1031 LIST_FOREACH(cache_resource, &sh->jump_tbl, next) {
1032 if (resource->tbl == cache_resource->tbl) {
1033 DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++",
1034 (void *)cache_resource,
1035 rte_atomic32_read(&cache_resource->refcnt));
1036 rte_atomic32_inc(&cache_resource->refcnt);
1037 dev_flow->dv.jump = cache_resource;
1041 /* Register new jump table resource. */
1042 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1043 if (!cache_resource)
1044 return rte_flow_error_set(error, ENOMEM,
1045 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1046 "cannot allocate resource memory");
1047 *cache_resource = *resource;
1048 cache_resource->action =
1049 mlx5_glue->dr_create_flow_action_dest_flow_tbl
1050 (resource->tbl->obj);
1051 if (!cache_resource->action) {
1052 rte_free(cache_resource);
1053 return rte_flow_error_set(error, ENOMEM,
1054 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1055 NULL, "cannot create action");
1057 rte_atomic32_init(&cache_resource->refcnt);
1058 rte_atomic32_inc(&cache_resource->refcnt);
1059 LIST_INSERT_HEAD(&sh->jump_tbl, cache_resource, next);
1060 dev_flow->dv.jump = cache_resource;
1061 DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++",
1062 (void *)cache_resource,
1063 rte_atomic32_read(&cache_resource->refcnt));
1068 * Find existing table port ID resource or create and register a new one.
1070 * @param dev[in, out]
1071 * Pointer to rte_eth_dev structure.
1072 * @param[in, out] resource
1073 * Pointer to port ID action resource.
1074 * @parm[in, out] dev_flow
1075 * Pointer to the dev_flow.
1077 * pointer to error structure.
1080 * 0 on success otherwise -errno and errno is set.
1083 flow_dv_port_id_action_resource_register
1084 (struct rte_eth_dev *dev,
1085 struct mlx5_flow_dv_port_id_action_resource *resource,
1086 struct mlx5_flow *dev_flow,
1087 struct rte_flow_error *error)
1089 struct mlx5_priv *priv = dev->data->dev_private;
1090 struct mlx5_ibv_shared *sh = priv->sh;
1091 struct mlx5_flow_dv_port_id_action_resource *cache_resource;
1093 /* Lookup a matching resource from cache. */
1094 LIST_FOREACH(cache_resource, &sh->port_id_action_list, next) {
1095 if (resource->port_id == cache_resource->port_id) {
1096 DRV_LOG(DEBUG, "port id action resource resource %p: "
1098 (void *)cache_resource,
1099 rte_atomic32_read(&cache_resource->refcnt));
1100 rte_atomic32_inc(&cache_resource->refcnt);
1101 dev_flow->dv.port_id_action = cache_resource;
1105 /* Register new port id action resource. */
1106 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1107 if (!cache_resource)
1108 return rte_flow_error_set(error, ENOMEM,
1109 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1110 "cannot allocate resource memory");
1111 *cache_resource = *resource;
1112 cache_resource->action =
1113 mlx5_glue->dr_create_flow_action_dest_vport
1114 (priv->sh->fdb_domain, resource->port_id);
1115 if (!cache_resource->action) {
1116 rte_free(cache_resource);
1117 return rte_flow_error_set(error, ENOMEM,
1118 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1119 NULL, "cannot create action");
1121 rte_atomic32_init(&cache_resource->refcnt);
1122 rte_atomic32_inc(&cache_resource->refcnt);
1123 LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
1124 dev_flow->dv.port_id_action = cache_resource;
1125 DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
1126 (void *)cache_resource,
1127 rte_atomic32_read(&cache_resource->refcnt));
1132 * Get the size of specific rte_flow_item_type
1134 * @param[in] item_type
1135 * Tested rte_flow_item_type.
1138 * sizeof struct item_type, 0 if void or irrelevant.
1141 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
1145 switch (item_type) {
1146 case RTE_FLOW_ITEM_TYPE_ETH:
1147 retval = sizeof(struct rte_flow_item_eth);
1149 case RTE_FLOW_ITEM_TYPE_VLAN:
1150 retval = sizeof(struct rte_flow_item_vlan);
1152 case RTE_FLOW_ITEM_TYPE_IPV4:
1153 retval = sizeof(struct rte_flow_item_ipv4);
1155 case RTE_FLOW_ITEM_TYPE_IPV6:
1156 retval = sizeof(struct rte_flow_item_ipv6);
1158 case RTE_FLOW_ITEM_TYPE_UDP:
1159 retval = sizeof(struct rte_flow_item_udp);
1161 case RTE_FLOW_ITEM_TYPE_TCP:
1162 retval = sizeof(struct rte_flow_item_tcp);
1164 case RTE_FLOW_ITEM_TYPE_VXLAN:
1165 retval = sizeof(struct rte_flow_item_vxlan);
1167 case RTE_FLOW_ITEM_TYPE_GRE:
1168 retval = sizeof(struct rte_flow_item_gre);
1170 case RTE_FLOW_ITEM_TYPE_NVGRE:
1171 retval = sizeof(struct rte_flow_item_nvgre);
1173 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1174 retval = sizeof(struct rte_flow_item_vxlan_gpe);
1176 case RTE_FLOW_ITEM_TYPE_MPLS:
1177 retval = sizeof(struct rte_flow_item_mpls);
1179 case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
1187 #define MLX5_ENCAP_IPV4_VERSION 0x40
1188 #define MLX5_ENCAP_IPV4_IHL_MIN 0x05
1189 #define MLX5_ENCAP_IPV4_TTL_DEF 0x40
1190 #define MLX5_ENCAP_IPV6_VTC_FLOW 0x60000000
1191 #define MLX5_ENCAP_IPV6_HOP_LIMIT 0xff
1192 #define MLX5_ENCAP_VXLAN_FLAGS 0x08000000
1193 #define MLX5_ENCAP_VXLAN_GPE_FLAGS 0x04
1196 * Convert the encap action data from list of rte_flow_item to raw buffer
1199 * Pointer to rte_flow_item objects list.
1201 * Pointer to the output buffer.
1203 * Pointer to the output buffer size.
1205 * Pointer to the error structure.
1208 * 0 on success, a negative errno value otherwise and rte_errno is set.
1211 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
1212 size_t *size, struct rte_flow_error *error)
1214 struct rte_ether_hdr *eth = NULL;
1215 struct rte_vlan_hdr *vlan = NULL;
1216 struct rte_ipv4_hdr *ipv4 = NULL;
1217 struct rte_ipv6_hdr *ipv6 = NULL;
1218 struct rte_udp_hdr *udp = NULL;
1219 struct rte_vxlan_hdr *vxlan = NULL;
1220 struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
1221 struct rte_gre_hdr *gre = NULL;
1223 size_t temp_size = 0;
1226 return rte_flow_error_set(error, EINVAL,
1227 RTE_FLOW_ERROR_TYPE_ACTION,
1228 NULL, "invalid empty data");
1229 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1230 len = flow_dv_get_item_len(items->type);
1231 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
1232 return rte_flow_error_set(error, EINVAL,
1233 RTE_FLOW_ERROR_TYPE_ACTION,
1234 (void *)items->type,
1235 "items total size is too big"
1236 " for encap action");
1237 rte_memcpy((void *)&buf[temp_size], items->spec, len);
1238 switch (items->type) {
1239 case RTE_FLOW_ITEM_TYPE_ETH:
1240 eth = (struct rte_ether_hdr *)&buf[temp_size];
1242 case RTE_FLOW_ITEM_TYPE_VLAN:
1243 vlan = (struct rte_vlan_hdr *)&buf[temp_size];
1245 return rte_flow_error_set(error, EINVAL,
1246 RTE_FLOW_ERROR_TYPE_ACTION,
1247 (void *)items->type,
1248 "eth header not found");
1249 if (!eth->ether_type)
1250 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
1252 case RTE_FLOW_ITEM_TYPE_IPV4:
1253 ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
1255 return rte_flow_error_set(error, EINVAL,
1256 RTE_FLOW_ERROR_TYPE_ACTION,
1257 (void *)items->type,
1258 "neither eth nor vlan"
1260 if (vlan && !vlan->eth_proto)
1261 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1262 else if (eth && !eth->ether_type)
1263 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1264 if (!ipv4->version_ihl)
1265 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
1266 MLX5_ENCAP_IPV4_IHL_MIN;
1267 if (!ipv4->time_to_live)
1268 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
1270 case RTE_FLOW_ITEM_TYPE_IPV6:
1271 ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
1273 return rte_flow_error_set(error, EINVAL,
1274 RTE_FLOW_ERROR_TYPE_ACTION,
1275 (void *)items->type,
1276 "neither eth nor vlan"
1278 if (vlan && !vlan->eth_proto)
1279 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1280 else if (eth && !eth->ether_type)
1281 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1282 if (!ipv6->vtc_flow)
1284 RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
1285 if (!ipv6->hop_limits)
1286 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
1288 case RTE_FLOW_ITEM_TYPE_UDP:
1289 udp = (struct rte_udp_hdr *)&buf[temp_size];
1291 return rte_flow_error_set(error, EINVAL,
1292 RTE_FLOW_ERROR_TYPE_ACTION,
1293 (void *)items->type,
1294 "ip header not found");
1295 if (ipv4 && !ipv4->next_proto_id)
1296 ipv4->next_proto_id = IPPROTO_UDP;
1297 else if (ipv6 && !ipv6->proto)
1298 ipv6->proto = IPPROTO_UDP;
1300 case RTE_FLOW_ITEM_TYPE_VXLAN:
1301 vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
1303 return rte_flow_error_set(error, EINVAL,
1304 RTE_FLOW_ERROR_TYPE_ACTION,
1305 (void *)items->type,
1306 "udp header not found");
1308 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
1309 if (!vxlan->vx_flags)
1311 RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
1313 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1314 vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
1316 return rte_flow_error_set(error, EINVAL,
1317 RTE_FLOW_ERROR_TYPE_ACTION,
1318 (void *)items->type,
1319 "udp header not found");
1320 if (!vxlan_gpe->proto)
1321 return rte_flow_error_set(error, EINVAL,
1322 RTE_FLOW_ERROR_TYPE_ACTION,
1323 (void *)items->type,
1324 "next protocol not found");
1327 RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
1328 if (!vxlan_gpe->vx_flags)
1329 vxlan_gpe->vx_flags =
1330 MLX5_ENCAP_VXLAN_GPE_FLAGS;
1332 case RTE_FLOW_ITEM_TYPE_GRE:
1333 case RTE_FLOW_ITEM_TYPE_NVGRE:
1334 gre = (struct rte_gre_hdr *)&buf[temp_size];
1336 return rte_flow_error_set(error, EINVAL,
1337 RTE_FLOW_ERROR_TYPE_ACTION,
1338 (void *)items->type,
1339 "next protocol not found");
1341 return rte_flow_error_set(error, EINVAL,
1342 RTE_FLOW_ERROR_TYPE_ACTION,
1343 (void *)items->type,
1344 "ip header not found");
1345 if (ipv4 && !ipv4->next_proto_id)
1346 ipv4->next_proto_id = IPPROTO_GRE;
1347 else if (ipv6 && !ipv6->proto)
1348 ipv6->proto = IPPROTO_GRE;
1350 case RTE_FLOW_ITEM_TYPE_VOID:
1353 return rte_flow_error_set(error, EINVAL,
1354 RTE_FLOW_ERROR_TYPE_ACTION,
1355 (void *)items->type,
1356 "unsupported item type");
1366 * Convert L2 encap action to DV specification.
1369 * Pointer to rte_eth_dev structure.
1371 * Pointer to action structure.
1372 * @param[in, out] dev_flow
1373 * Pointer to the mlx5_flow.
1374 * @param[in] transfer
1375 * Mark if the flow is E-Switch flow.
1377 * Pointer to the error structure.
1380 * 0 on success, a negative errno value otherwise and rte_errno is set.
1383 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
1384 const struct rte_flow_action *action,
1385 struct mlx5_flow *dev_flow,
1387 struct rte_flow_error *error)
1389 const struct rte_flow_item *encap_data;
1390 const struct rte_flow_action_raw_encap *raw_encap_data;
1391 struct mlx5_flow_dv_encap_decap_resource res = {
1393 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
1394 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
1395 MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
1398 if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
1400 (const struct rte_flow_action_raw_encap *)action->conf;
1401 res.size = raw_encap_data->size;
1402 memcpy(res.buf, raw_encap_data->data, res.size);
1404 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
1406 ((const struct rte_flow_action_vxlan_encap *)
1407 action->conf)->definition;
1410 ((const struct rte_flow_action_nvgre_encap *)
1411 action->conf)->definition;
1412 if (flow_dv_convert_encap_data(encap_data, res.buf,
1416 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1417 return rte_flow_error_set(error, EINVAL,
1418 RTE_FLOW_ERROR_TYPE_ACTION,
1419 NULL, "can't create L2 encap action");
1424 * Convert L2 decap action to DV specification.
1427 * Pointer to rte_eth_dev structure.
1428 * @param[in, out] dev_flow
1429 * Pointer to the mlx5_flow.
1430 * @param[in] transfer
1431 * Mark if the flow is E-Switch flow.
1433 * Pointer to the error structure.
1436 * 0 on success, a negative errno value otherwise and rte_errno is set.
1439 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
1440 struct mlx5_flow *dev_flow,
1442 struct rte_flow_error *error)
1444 struct mlx5_flow_dv_encap_decap_resource res = {
1447 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
1448 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
1449 MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
1452 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1453 return rte_flow_error_set(error, EINVAL,
1454 RTE_FLOW_ERROR_TYPE_ACTION,
1455 NULL, "can't create L2 decap action");
1460 * Convert raw decap/encap (L3 tunnel) action to DV specification.
1463 * Pointer to rte_eth_dev structure.
1465 * Pointer to action structure.
1466 * @param[in, out] dev_flow
1467 * Pointer to the mlx5_flow.
1469 * Pointer to the flow attributes.
1471 * Pointer to the error structure.
1474 * 0 on success, a negative errno value otherwise and rte_errno is set.
1477 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
1478 const struct rte_flow_action *action,
1479 struct mlx5_flow *dev_flow,
1480 const struct rte_flow_attr *attr,
1481 struct rte_flow_error *error)
1483 const struct rte_flow_action_raw_encap *encap_data;
1484 struct mlx5_flow_dv_encap_decap_resource res;
1486 encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
1487 res.size = encap_data->size;
1488 memcpy(res.buf, encap_data->data, res.size);
1489 res.reformat_type = attr->egress ?
1490 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
1491 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
1493 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
1495 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
1496 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
1497 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1498 return rte_flow_error_set(error, EINVAL,
1499 RTE_FLOW_ERROR_TYPE_ACTION,
1500 NULL, "can't create encap action");
1505 * Validate the modify-header actions.
1507 * @param[in] action_flags
1508 * Holds the actions detected until now.
1510 * Pointer to the modify action.
1512 * Pointer to error structure.
1515 * 0 on success, a negative errno value otherwise and rte_errno is set.
1518 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
1519 const struct rte_flow_action *action,
1520 struct rte_flow_error *error)
1522 if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
1523 return rte_flow_error_set(error, EINVAL,
1524 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1525 NULL, "action configuration not set");
1526 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1527 return rte_flow_error_set(error, EINVAL,
1528 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1529 "can't have encap action before"
1535 * Validate the modify-header MAC address actions.
1537 * @param[in] action_flags
1538 * Holds the actions detected until now.
1540 * Pointer to the modify action.
1541 * @param[in] item_flags
1542 * Holds the items detected.
1544 * Pointer to error structure.
1547 * 0 on success, a negative errno value otherwise and rte_errno is set.
1550 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
1551 const struct rte_flow_action *action,
1552 const uint64_t item_flags,
1553 struct rte_flow_error *error)
1557 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1559 if (!(item_flags & MLX5_FLOW_LAYER_L2))
1560 return rte_flow_error_set(error, EINVAL,
1561 RTE_FLOW_ERROR_TYPE_ACTION,
1563 "no L2 item in pattern");
1569 * Validate the modify-header IPv4 address actions.
1571 * @param[in] action_flags
1572 * Holds the actions detected until now.
1574 * Pointer to the modify action.
1575 * @param[in] item_flags
1576 * Holds the items detected.
1578 * Pointer to error structure.
1581 * 0 on success, a negative errno value otherwise and rte_errno is set.
1584 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
1585 const struct rte_flow_action *action,
1586 const uint64_t item_flags,
1587 struct rte_flow_error *error)
1591 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1593 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
1594 return rte_flow_error_set(error, EINVAL,
1595 RTE_FLOW_ERROR_TYPE_ACTION,
1597 "no ipv4 item in pattern");
1603 * Validate the modify-header IPv6 address actions.
1605 * @param[in] action_flags
1606 * Holds the actions detected until now.
1608 * Pointer to the modify action.
1609 * @param[in] item_flags
1610 * Holds the items detected.
1612 * Pointer to error structure.
1615 * 0 on success, a negative errno value otherwise and rte_errno is set.
1618 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
1619 const struct rte_flow_action *action,
1620 const uint64_t item_flags,
1621 struct rte_flow_error *error)
1625 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1627 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
1628 return rte_flow_error_set(error, EINVAL,
1629 RTE_FLOW_ERROR_TYPE_ACTION,
1631 "no ipv6 item in pattern");
1637 * Validate the modify-header TP actions.
1639 * @param[in] action_flags
1640 * Holds the actions detected until now.
1642 * Pointer to the modify action.
1643 * @param[in] item_flags
1644 * Holds the items detected.
1646 * Pointer to error structure.
1649 * 0 on success, a negative errno value otherwise and rte_errno is set.
1652 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
1653 const struct rte_flow_action *action,
1654 const uint64_t item_flags,
1655 struct rte_flow_error *error)
1659 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1661 if (!(item_flags & MLX5_FLOW_LAYER_L4))
1662 return rte_flow_error_set(error, EINVAL,
1663 RTE_FLOW_ERROR_TYPE_ACTION,
1664 NULL, "no transport layer "
1671 * Validate the modify-header TTL actions.
1673 * @param[in] action_flags
1674 * Holds the actions detected until now.
1676 * Pointer to the modify action.
1677 * @param[in] item_flags
1678 * Holds the items detected.
1680 * Pointer to error structure.
1683 * 0 on success, a negative errno value otherwise and rte_errno is set.
1686 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
1687 const struct rte_flow_action *action,
1688 const uint64_t item_flags,
1689 struct rte_flow_error *error)
1693 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1695 if (!(item_flags & MLX5_FLOW_LAYER_L3))
1696 return rte_flow_error_set(error, EINVAL,
1697 RTE_FLOW_ERROR_TYPE_ACTION,
1699 "no IP protocol in pattern");
1705 * Validate jump action.
1708 * Pointer to the modify action.
1710 * The group of the current flow.
1712 * Pointer to error structure.
1715 * 0 on success, a negative errno value otherwise and rte_errno is set.
1718 flow_dv_validate_action_jump(const struct rte_flow_action *action,
1720 struct rte_flow_error *error)
1722 if (action->type != RTE_FLOW_ACTION_TYPE_JUMP && !action->conf)
1723 return rte_flow_error_set(error, EINVAL,
1724 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1725 NULL, "action configuration not set");
1726 if (group >= ((const struct rte_flow_action_jump *)action->conf)->group)
1727 return rte_flow_error_set(error, EINVAL,
1728 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1729 "target group must be higher then"
1730 " the current flow group");
1735 * Validate the port_id action.
1738 * Pointer to rte_eth_dev structure.
1739 * @param[in] action_flags
1740 * Bit-fields that holds the actions detected until now.
1742 * Port_id RTE action structure.
1744 * Attributes of flow that includes this action.
1746 * Pointer to error structure.
1749 * 0 on success, a negative errno value otherwise and rte_errno is set.
1752 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
1753 uint64_t action_flags,
1754 const struct rte_flow_action *action,
1755 const struct rte_flow_attr *attr,
1756 struct rte_flow_error *error)
1758 const struct rte_flow_action_port_id *port_id;
1760 uint16_t esw_domain_id;
1761 uint16_t act_port_domain_id;
1764 if (!attr->transfer)
1765 return rte_flow_error_set(error, ENOTSUP,
1766 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1768 "port id action is valid in transfer"
1770 if (!action || !action->conf)
1771 return rte_flow_error_set(error, ENOTSUP,
1772 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1774 "port id action parameters must be"
1776 if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
1777 MLX5_FLOW_FATE_ESWITCH_ACTIONS))
1778 return rte_flow_error_set(error, EINVAL,
1779 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1780 "can have only one fate actions in"
1782 ret = mlx5_port_to_eswitch_info(dev->data->port_id,
1783 &esw_domain_id, NULL);
1785 return rte_flow_error_set(error, -ret,
1786 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1788 "failed to obtain E-Switch info");
1789 port_id = action->conf;
1790 port = port_id->original ? dev->data->port_id : port_id->id;
1791 ret = mlx5_port_to_eswitch_info(port, &act_port_domain_id, NULL);
1793 return rte_flow_error_set
1795 RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
1796 "failed to obtain E-Switch port id for port");
1797 if (act_port_domain_id != esw_domain_id)
1798 return rte_flow_error_set
1800 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1801 "port does not belong to"
1802 " E-Switch being configured");
1807 * Find existing modify-header resource or create and register a new one.
1809 * @param dev[in, out]
1810 * Pointer to rte_eth_dev structure.
1811 * @param[in, out] resource
1812 * Pointer to modify-header resource.
1813 * @parm[in, out] dev_flow
1814 * Pointer to the dev_flow.
1816 * pointer to error structure.
1819 * 0 on success otherwise -errno and errno is set.
1822 flow_dv_modify_hdr_resource_register
1823 (struct rte_eth_dev *dev,
1824 struct mlx5_flow_dv_modify_hdr_resource *resource,
1825 struct mlx5_flow *dev_flow,
1826 struct rte_flow_error *error)
1828 struct mlx5_priv *priv = dev->data->dev_private;
1829 struct mlx5_ibv_shared *sh = priv->sh;
1830 struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
1831 struct mlx5dv_dr_domain *ns;
1833 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
1834 ns = sh->fdb_domain;
1835 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
1840 dev_flow->flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
1841 /* Lookup a matching resource from cache. */
1842 LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
1843 if (resource->ft_type == cache_resource->ft_type &&
1844 resource->actions_num == cache_resource->actions_num &&
1845 resource->flags == cache_resource->flags &&
1846 !memcmp((const void *)resource->actions,
1847 (const void *)cache_resource->actions,
1848 (resource->actions_num *
1849 sizeof(resource->actions[0])))) {
1850 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
1851 (void *)cache_resource,
1852 rte_atomic32_read(&cache_resource->refcnt));
1853 rte_atomic32_inc(&cache_resource->refcnt);
1854 dev_flow->dv.modify_hdr = cache_resource;
1858 /* Register new modify-header resource. */
1859 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1860 if (!cache_resource)
1861 return rte_flow_error_set(error, ENOMEM,
1862 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1863 "cannot allocate resource memory");
1864 *cache_resource = *resource;
1865 cache_resource->verbs_action =
1866 mlx5_glue->dv_create_flow_action_modify_header
1867 (sh->ctx, cache_resource->ft_type,
1868 ns, cache_resource->flags,
1869 cache_resource->actions_num *
1870 sizeof(cache_resource->actions[0]),
1871 (uint64_t *)cache_resource->actions);
1872 if (!cache_resource->verbs_action) {
1873 rte_free(cache_resource);
1874 return rte_flow_error_set(error, ENOMEM,
1875 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1876 NULL, "cannot create action");
1878 rte_atomic32_init(&cache_resource->refcnt);
1879 rte_atomic32_inc(&cache_resource->refcnt);
1880 LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
1881 dev_flow->dv.modify_hdr = cache_resource;
1882 DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
1883 (void *)cache_resource,
1884 rte_atomic32_read(&cache_resource->refcnt));
1889 * Get or create a flow counter.
1892 * Pointer to the Ethernet device structure.
1894 * Indicate if this counter is shared with other flows.
1896 * Counter identifier.
1899 * pointer to flow counter on success, NULL otherwise and rte_errno is set.
1901 static struct mlx5_flow_counter *
1902 flow_dv_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
1904 struct mlx5_priv *priv = dev->data->dev_private;
1905 struct mlx5_flow_counter *cnt = NULL;
1906 struct mlx5_devx_counter_set *dcs = NULL;
1909 if (!priv->config.devx) {
1914 LIST_FOREACH(cnt, &priv->flow_counters, next) {
1915 if (cnt->shared && cnt->id == id) {
1921 cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
1922 dcs = rte_calloc(__func__, 1, sizeof(*dcs), 0);
1927 ret = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, dcs);
1930 struct mlx5_flow_counter tmpl = {
1936 tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
1942 LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
1952 * Release a flow counter.
1954 * @param[in] counter
1955 * Pointer to the counter handler.
1958 flow_dv_counter_release(struct mlx5_flow_counter *counter)
1964 if (--counter->ref_cnt == 0) {
1965 ret = mlx5_devx_cmd_flow_counter_free(counter->dcs->obj);
1967 DRV_LOG(ERR, "Failed to free devx counters, %d", ret);
1968 LIST_REMOVE(counter, next);
1969 rte_free(counter->dcs);
1975 * Verify the @p attributes will be correctly understood by the NIC and store
1976 * them in the @p flow if everything is correct.
1979 * Pointer to dev struct.
1980 * @param[in] attributes
1981 * Pointer to flow attributes
1983 * Pointer to error structure.
1986 * 0 on success, a negative errno value otherwise and rte_errno is set.
1989 flow_dv_validate_attributes(struct rte_eth_dev *dev,
1990 const struct rte_flow_attr *attributes,
1991 struct rte_flow_error *error)
1993 struct mlx5_priv *priv = dev->data->dev_private;
1994 uint32_t priority_max = priv->config.flow_prio - 1;
1996 #ifndef HAVE_MLX5DV_DR
1997 if (attributes->group)
1998 return rte_flow_error_set(error, ENOTSUP,
1999 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
2001 "groups is not supported");
2003 if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
2004 attributes->priority >= priority_max)
2005 return rte_flow_error_set(error, ENOTSUP,
2006 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2008 "priority out of range");
2009 if (attributes->transfer) {
2010 if (!priv->config.dv_esw_en)
2011 return rte_flow_error_set
2013 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2014 "E-Switch dr is not supported");
2015 if (!(priv->representor || priv->master))
2016 return rte_flow_error_set
2017 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2018 NULL, "E-Switch configurationd can only be"
2019 " done by a master or a representor device");
2020 if (attributes->egress)
2021 return rte_flow_error_set
2023 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
2024 "egress is not supported");
2025 if (attributes->group >= MLX5_MAX_TABLES_FDB)
2026 return rte_flow_error_set
2028 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2029 NULL, "group must be smaller than "
2030 RTE_STR(MLX5_MAX_FDB_TABLES));
2032 if (!(attributes->egress ^ attributes->ingress))
2033 return rte_flow_error_set(error, ENOTSUP,
2034 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
2035 "must specify exactly one of "
2036 "ingress or egress");
2041 * Internal validation function. For validating both actions and items.
2044 * Pointer to the rte_eth_dev structure.
2046 * Pointer to the flow attributes.
2048 * Pointer to the list of items.
2049 * @param[in] actions
2050 * Pointer to the list of actions.
2052 * Pointer to the error structure.
2055 * 0 on success, a negative errno value otherwise and rte_errno is set.
2058 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
2059 const struct rte_flow_item items[],
2060 const struct rte_flow_action actions[],
2061 struct rte_flow_error *error)
2064 uint64_t action_flags = 0;
2065 uint64_t item_flags = 0;
2066 uint64_t last_item = 0;
2067 uint8_t next_protocol = 0xff;
2069 struct rte_flow_item_tcp nic_tcp_mask = {
2072 .src_port = RTE_BE16(UINT16_MAX),
2073 .dst_port = RTE_BE16(UINT16_MAX),
2079 ret = flow_dv_validate_attributes(dev, attr, error);
2082 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2083 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2084 switch (items->type) {
2085 case RTE_FLOW_ITEM_TYPE_VOID:
2087 case RTE_FLOW_ITEM_TYPE_PORT_ID:
2088 ret = flow_dv_validate_item_port_id
2089 (dev, items, attr, item_flags, error);
2092 last_item |= MLX5_FLOW_ITEM_PORT_ID;
2094 case RTE_FLOW_ITEM_TYPE_ETH:
2095 ret = mlx5_flow_validate_item_eth(items, item_flags,
2099 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
2100 MLX5_FLOW_LAYER_OUTER_L2;
2102 case RTE_FLOW_ITEM_TYPE_VLAN:
2103 ret = mlx5_flow_validate_item_vlan(items, item_flags,
2107 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2108 MLX5_FLOW_LAYER_OUTER_VLAN;
2110 case RTE_FLOW_ITEM_TYPE_IPV4:
2111 ret = mlx5_flow_validate_item_ipv4(items, item_flags,
2115 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
2116 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
2117 if (items->mask != NULL &&
2118 ((const struct rte_flow_item_ipv4 *)
2119 items->mask)->hdr.next_proto_id) {
2121 ((const struct rte_flow_item_ipv4 *)
2122 (items->spec))->hdr.next_proto_id;
2124 ((const struct rte_flow_item_ipv4 *)
2125 (items->mask))->hdr.next_proto_id;
2127 /* Reset for inner layer. */
2128 next_protocol = 0xff;
2131 case RTE_FLOW_ITEM_TYPE_IPV6:
2132 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
2136 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
2137 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
2138 if (items->mask != NULL &&
2139 ((const struct rte_flow_item_ipv6 *)
2140 items->mask)->hdr.proto) {
2142 ((const struct rte_flow_item_ipv6 *)
2143 items->spec)->hdr.proto;
2145 ((const struct rte_flow_item_ipv6 *)
2146 items->mask)->hdr.proto;
2148 /* Reset for inner layer. */
2149 next_protocol = 0xff;
2152 case RTE_FLOW_ITEM_TYPE_TCP:
2153 ret = mlx5_flow_validate_item_tcp
2160 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
2161 MLX5_FLOW_LAYER_OUTER_L4_TCP;
2163 case RTE_FLOW_ITEM_TYPE_UDP:
2164 ret = mlx5_flow_validate_item_udp(items, item_flags,
2169 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
2170 MLX5_FLOW_LAYER_OUTER_L4_UDP;
2172 case RTE_FLOW_ITEM_TYPE_GRE:
2173 case RTE_FLOW_ITEM_TYPE_NVGRE:
2174 ret = mlx5_flow_validate_item_gre(items, item_flags,
2175 next_protocol, error);
2178 last_item = MLX5_FLOW_LAYER_GRE;
2180 case RTE_FLOW_ITEM_TYPE_VXLAN:
2181 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
2185 last_item = MLX5_FLOW_LAYER_VXLAN;
2187 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
2188 ret = mlx5_flow_validate_item_vxlan_gpe(items,
2193 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
2195 case RTE_FLOW_ITEM_TYPE_MPLS:
2196 ret = mlx5_flow_validate_item_mpls(dev, items,
2201 last_item = MLX5_FLOW_LAYER_MPLS;
2203 case RTE_FLOW_ITEM_TYPE_META:
2204 ret = flow_dv_validate_item_meta(dev, items, attr,
2208 last_item = MLX5_FLOW_ITEM_METADATA;
2211 return rte_flow_error_set(error, ENOTSUP,
2212 RTE_FLOW_ERROR_TYPE_ITEM,
2213 NULL, "item not supported");
2215 item_flags |= last_item;
2217 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2218 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
2219 return rte_flow_error_set(error, ENOTSUP,
2220 RTE_FLOW_ERROR_TYPE_ACTION,
2221 actions, "too many actions");
2222 switch (actions->type) {
2223 case RTE_FLOW_ACTION_TYPE_VOID:
2225 case RTE_FLOW_ACTION_TYPE_PORT_ID:
2226 ret = flow_dv_validate_action_port_id(dev,
2233 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
2236 case RTE_FLOW_ACTION_TYPE_FLAG:
2237 ret = mlx5_flow_validate_action_flag(action_flags,
2241 action_flags |= MLX5_FLOW_ACTION_FLAG;
2244 case RTE_FLOW_ACTION_TYPE_MARK:
2245 ret = mlx5_flow_validate_action_mark(actions,
2250 action_flags |= MLX5_FLOW_ACTION_MARK;
2253 case RTE_FLOW_ACTION_TYPE_DROP:
2254 ret = mlx5_flow_validate_action_drop(action_flags,
2258 action_flags |= MLX5_FLOW_ACTION_DROP;
2261 case RTE_FLOW_ACTION_TYPE_QUEUE:
2262 ret = mlx5_flow_validate_action_queue(actions,
2267 action_flags |= MLX5_FLOW_ACTION_QUEUE;
2270 case RTE_FLOW_ACTION_TYPE_RSS:
2271 ret = mlx5_flow_validate_action_rss(actions,
2277 action_flags |= MLX5_FLOW_ACTION_RSS;
2280 case RTE_FLOW_ACTION_TYPE_COUNT:
2281 ret = flow_dv_validate_action_count(dev, error);
2284 action_flags |= MLX5_FLOW_ACTION_COUNT;
2287 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2288 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
2289 ret = flow_dv_validate_action_l2_encap(action_flags,
2294 action_flags |= actions->type ==
2295 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
2296 MLX5_FLOW_ACTION_VXLAN_ENCAP :
2297 MLX5_FLOW_ACTION_NVGRE_ENCAP;
2300 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2301 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
2302 ret = flow_dv_validate_action_l2_decap(action_flags,
2306 action_flags |= actions->type ==
2307 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
2308 MLX5_FLOW_ACTION_VXLAN_DECAP :
2309 MLX5_FLOW_ACTION_NVGRE_DECAP;
2312 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
2313 ret = flow_dv_validate_action_raw_encap(action_flags,
2318 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
2321 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
2322 ret = flow_dv_validate_action_raw_decap(action_flags,
2327 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
2330 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
2331 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
2332 ret = flow_dv_validate_action_modify_mac(action_flags,
2338 /* Count all modify-header actions as one action. */
2339 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2341 action_flags |= actions->type ==
2342 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
2343 MLX5_FLOW_ACTION_SET_MAC_SRC :
2344 MLX5_FLOW_ACTION_SET_MAC_DST;
2347 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
2348 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
2349 ret = flow_dv_validate_action_modify_ipv4(action_flags,
2355 /* Count all modify-header actions as one action. */
2356 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2358 action_flags |= actions->type ==
2359 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
2360 MLX5_FLOW_ACTION_SET_IPV4_SRC :
2361 MLX5_FLOW_ACTION_SET_IPV4_DST;
2363 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
2364 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
2365 ret = flow_dv_validate_action_modify_ipv6(action_flags,
2371 /* Count all modify-header actions as one action. */
2372 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2374 action_flags |= actions->type ==
2375 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
2376 MLX5_FLOW_ACTION_SET_IPV6_SRC :
2377 MLX5_FLOW_ACTION_SET_IPV6_DST;
2379 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
2380 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
2381 ret = flow_dv_validate_action_modify_tp(action_flags,
2387 /* Count all modify-header actions as one action. */
2388 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2390 action_flags |= actions->type ==
2391 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
2392 MLX5_FLOW_ACTION_SET_TP_SRC :
2393 MLX5_FLOW_ACTION_SET_TP_DST;
2395 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
2396 case RTE_FLOW_ACTION_TYPE_SET_TTL:
2397 ret = flow_dv_validate_action_modify_ttl(action_flags,
2403 /* Count all modify-header actions as one action. */
2404 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2406 action_flags |= actions->type ==
2407 RTE_FLOW_ACTION_TYPE_SET_TTL ?
2408 MLX5_FLOW_ACTION_SET_TTL :
2409 MLX5_FLOW_ACTION_DEC_TTL;
2411 case RTE_FLOW_ACTION_TYPE_JUMP:
2412 ret = flow_dv_validate_action_jump(actions,
2413 attr->group, error);
2417 action_flags |= MLX5_FLOW_ACTION_JUMP;
2420 return rte_flow_error_set(error, ENOTSUP,
2421 RTE_FLOW_ERROR_TYPE_ACTION,
2423 "action not supported");
2426 /* Eswitch has few restrictions on using items and actions */
2427 if (attr->transfer) {
2428 if (action_flags & MLX5_FLOW_ACTION_FLAG)
2429 return rte_flow_error_set(error, ENOTSUP,
2430 RTE_FLOW_ERROR_TYPE_ACTION,
2432 "unsupported action FLAG");
2433 if (action_flags & MLX5_FLOW_ACTION_MARK)
2434 return rte_flow_error_set(error, ENOTSUP,
2435 RTE_FLOW_ERROR_TYPE_ACTION,
2437 "unsupported action MARK");
2438 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
2439 return rte_flow_error_set(error, ENOTSUP,
2440 RTE_FLOW_ERROR_TYPE_ACTION,
2442 "unsupported action QUEUE");
2443 if (action_flags & MLX5_FLOW_ACTION_RSS)
2444 return rte_flow_error_set(error, ENOTSUP,
2445 RTE_FLOW_ERROR_TYPE_ACTION,
2447 "unsupported action RSS");
2448 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
2449 return rte_flow_error_set(error, EINVAL,
2450 RTE_FLOW_ERROR_TYPE_ACTION,
2452 "no fate action is found");
2454 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
2455 return rte_flow_error_set(error, EINVAL,
2456 RTE_FLOW_ERROR_TYPE_ACTION,
2458 "no fate action is found");
2464 * Internal preparation function. Allocates the DV flow size,
2465 * this size is constant.
2468 * Pointer to the flow attributes.
2470 * Pointer to the list of items.
2471 * @param[in] actions
2472 * Pointer to the list of actions.
2474 * Pointer to the error structure.
2477 * Pointer to mlx5_flow object on success,
2478 * otherwise NULL and rte_errno is set.
2480 static struct mlx5_flow *
2481 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
2482 const struct rte_flow_item items[] __rte_unused,
2483 const struct rte_flow_action actions[] __rte_unused,
2484 struct rte_flow_error *error)
2486 uint32_t size = sizeof(struct mlx5_flow);
2487 struct mlx5_flow *flow;
2489 flow = rte_calloc(__func__, 1, size, 0);
2491 rte_flow_error_set(error, ENOMEM,
2492 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2493 "not enough memory to create flow");
2496 flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
2502 * Sanity check for match mask and value. Similar to check_valid_spec() in
2503 * kernel driver. If unmasked bit is present in value, it returns failure.
2506 * pointer to match mask buffer.
2507 * @param match_value
2508 * pointer to match value buffer.
2511 * 0 if valid, -EINVAL otherwise.
2514 flow_dv_check_valid_spec(void *match_mask, void *match_value)
2516 uint8_t *m = match_mask;
2517 uint8_t *v = match_value;
2520 for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
2523 "match_value differs from match_criteria"
2524 " %p[%u] != %p[%u]",
2525 match_value, i, match_mask, i);
2534 * Add Ethernet item to matcher and to the value.
2536 * @param[in, out] matcher
2538 * @param[in, out] key
2539 * Flow matcher value.
2541 * Flow pattern to translate.
2543 * Item is inner pattern.
2546 flow_dv_translate_item_eth(void *matcher, void *key,
2547 const struct rte_flow_item *item, int inner)
2549 const struct rte_flow_item_eth *eth_m = item->mask;
2550 const struct rte_flow_item_eth *eth_v = item->spec;
2551 const struct rte_flow_item_eth nic_mask = {
2552 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2553 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2554 .type = RTE_BE16(0xffff),
2566 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2568 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2570 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2572 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2574 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
2575 ð_m->dst, sizeof(eth_m->dst));
2576 /* The value must be in the range of the mask. */
2577 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
2578 for (i = 0; i < sizeof(eth_m->dst); ++i)
2579 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
2580 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
2581 ð_m->src, sizeof(eth_m->src));
2582 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
2583 /* The value must be in the range of the mask. */
2584 for (i = 0; i < sizeof(eth_m->dst); ++i)
2585 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
2586 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
2587 rte_be_to_cpu_16(eth_m->type));
2588 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
2589 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
2593 * Add VLAN item to matcher and to the value.
2595 * @param[in, out] matcher
2597 * @param[in, out] key
2598 * Flow matcher value.
2600 * Flow pattern to translate.
2602 * Item is inner pattern.
2605 flow_dv_translate_item_vlan(void *matcher, void *key,
2606 const struct rte_flow_item *item,
2609 const struct rte_flow_item_vlan *vlan_m = item->mask;
2610 const struct rte_flow_item_vlan *vlan_v = item->spec;
2611 const struct rte_flow_item_vlan nic_mask = {
2612 .tci = RTE_BE16(0x0fff),
2613 .inner_type = RTE_BE16(0xffff),
2625 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2627 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2629 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2631 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2633 tci_m = rte_be_to_cpu_16(vlan_m->tci);
2634 tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
2635 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
2636 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
2637 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
2638 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
2639 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
2640 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
2641 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
2642 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
2646 * Add IPV4 item to matcher and to the value.
2648 * @param[in, out] matcher
2650 * @param[in, out] key
2651 * Flow matcher value.
2653 * Flow pattern to translate.
2655 * Item is inner pattern.
2657 * The group to insert the rule.
2660 flow_dv_translate_item_ipv4(void *matcher, void *key,
2661 const struct rte_flow_item *item,
2662 int inner, uint32_t group)
2664 const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
2665 const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
2666 const struct rte_flow_item_ipv4 nic_mask = {
2668 .src_addr = RTE_BE32(0xffffffff),
2669 .dst_addr = RTE_BE32(0xffffffff),
2670 .type_of_service = 0xff,
2671 .next_proto_id = 0xff,
2681 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2683 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2685 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2687 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2690 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2692 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
2693 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
2698 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2699 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2700 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2701 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2702 *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
2703 *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
2704 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2705 src_ipv4_src_ipv6.ipv4_layout.ipv4);
2706 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2707 src_ipv4_src_ipv6.ipv4_layout.ipv4);
2708 *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
2709 *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
2710 tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
2711 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
2712 ipv4_m->hdr.type_of_service);
2713 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
2714 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
2715 ipv4_m->hdr.type_of_service >> 2);
2716 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
2717 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2718 ipv4_m->hdr.next_proto_id);
2719 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2720 ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
2724 * Add IPV6 item to matcher and to the value.
2726 * @param[in, out] matcher
2728 * @param[in, out] key
2729 * Flow matcher value.
2731 * Flow pattern to translate.
2733 * Item is inner pattern.
2735 * The group to insert the rule.
2738 flow_dv_translate_item_ipv6(void *matcher, void *key,
2739 const struct rte_flow_item *item,
2740 int inner, uint32_t group)
2742 const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
2743 const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
2744 const struct rte_flow_item_ipv6 nic_mask = {
2747 "\xff\xff\xff\xff\xff\xff\xff\xff"
2748 "\xff\xff\xff\xff\xff\xff\xff\xff",
2750 "\xff\xff\xff\xff\xff\xff\xff\xff"
2751 "\xff\xff\xff\xff\xff\xff\xff\xff",
2752 .vtc_flow = RTE_BE32(0xffffffff),
2759 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2760 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2769 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2771 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2773 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2775 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2778 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2780 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
2781 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
2786 size = sizeof(ipv6_m->hdr.dst_addr);
2787 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2788 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2789 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2790 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2791 memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
2792 for (i = 0; i < size; ++i)
2793 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
2794 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2795 src_ipv4_src_ipv6.ipv6_layout.ipv6);
2796 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2797 src_ipv4_src_ipv6.ipv6_layout.ipv6);
2798 memcpy(l24_m, ipv6_m->hdr.src_addr, size);
2799 for (i = 0; i < size; ++i)
2800 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
2802 vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
2803 vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
2804 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
2805 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
2806 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
2807 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
2810 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
2812 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
2815 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
2817 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
2821 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2823 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2824 ipv6_v->hdr.proto & ipv6_m->hdr.proto);
2828 * Add TCP item to matcher and to the value.
2830 * @param[in, out] matcher
2832 * @param[in, out] key
2833 * Flow matcher value.
2835 * Flow pattern to translate.
2837 * Item is inner pattern.
2840 flow_dv_translate_item_tcp(void *matcher, void *key,
2841 const struct rte_flow_item *item,
2844 const struct rte_flow_item_tcp *tcp_m = item->mask;
2845 const struct rte_flow_item_tcp *tcp_v = item->spec;
2850 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2852 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2854 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2856 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2858 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2859 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
2863 tcp_m = &rte_flow_item_tcp_mask;
2864 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
2865 rte_be_to_cpu_16(tcp_m->hdr.src_port));
2866 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
2867 rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
2868 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
2869 rte_be_to_cpu_16(tcp_m->hdr.dst_port));
2870 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
2871 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
2872 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
2873 tcp_m->hdr.tcp_flags);
2874 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
2875 (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
2879 * Add UDP item to matcher and to the value.
2881 * @param[in, out] matcher
2883 * @param[in, out] key
2884 * Flow matcher value.
2886 * Flow pattern to translate.
2888 * Item is inner pattern.
2891 flow_dv_translate_item_udp(void *matcher, void *key,
2892 const struct rte_flow_item *item,
2895 const struct rte_flow_item_udp *udp_m = item->mask;
2896 const struct rte_flow_item_udp *udp_v = item->spec;
2901 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2903 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2905 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2907 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2909 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2910 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
2914 udp_m = &rte_flow_item_udp_mask;
2915 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
2916 rte_be_to_cpu_16(udp_m->hdr.src_port));
2917 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
2918 rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
2919 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
2920 rte_be_to_cpu_16(udp_m->hdr.dst_port));
2921 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2922 rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
2926 * Add GRE item to matcher and to the value.
2928 * @param[in, out] matcher
2930 * @param[in, out] key
2931 * Flow matcher value.
2933 * Flow pattern to translate.
2935 * Item is inner pattern.
2938 flow_dv_translate_item_gre(void *matcher, void *key,
2939 const struct rte_flow_item *item,
2942 const struct rte_flow_item_gre *gre_m = item->mask;
2943 const struct rte_flow_item_gre *gre_v = item->spec;
2946 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2947 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2950 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2952 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2954 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2956 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2958 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2959 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
2963 gre_m = &rte_flow_item_gre_mask;
2964 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
2965 rte_be_to_cpu_16(gre_m->protocol));
2966 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
2967 rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
2971 * Add NVGRE item to matcher and to the value.
2973 * @param[in, out] matcher
2975 * @param[in, out] key
2976 * Flow matcher value.
2978 * Flow pattern to translate.
2980 * Item is inner pattern.
2983 flow_dv_translate_item_nvgre(void *matcher, void *key,
2984 const struct rte_flow_item *item,
2987 const struct rte_flow_item_nvgre *nvgre_m = item->mask;
2988 const struct rte_flow_item_nvgre *nvgre_v = item->spec;
2989 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2990 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2991 const char *tni_flow_id_m = (const char *)nvgre_m->tni;
2992 const char *tni_flow_id_v = (const char *)nvgre_v->tni;
2998 flow_dv_translate_item_gre(matcher, key, item, inner);
3002 nvgre_m = &rte_flow_item_nvgre_mask;
3003 size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
3004 gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
3005 gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
3006 memcpy(gre_key_m, tni_flow_id_m, size);
3007 for (i = 0; i < size; ++i)
3008 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
3012 * Add VXLAN item to matcher and to the value.
3014 * @param[in, out] matcher
3016 * @param[in, out] key
3017 * Flow matcher value.
3019 * Flow pattern to translate.
3021 * Item is inner pattern.
3024 flow_dv_translate_item_vxlan(void *matcher, void *key,
3025 const struct rte_flow_item *item,
3028 const struct rte_flow_item_vxlan *vxlan_m = item->mask;
3029 const struct rte_flow_item_vxlan *vxlan_v = item->spec;
3032 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3033 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3041 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3043 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3045 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3047 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3049 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
3050 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
3051 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
3052 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
3053 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
3058 vxlan_m = &rte_flow_item_vxlan_mask;
3059 size = sizeof(vxlan_m->vni);
3060 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
3061 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
3062 memcpy(vni_m, vxlan_m->vni, size);
3063 for (i = 0; i < size; ++i)
3064 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
3068 * Add MPLS item to matcher and to the value.
3070 * @param[in, out] matcher
3072 * @param[in, out] key
3073 * Flow matcher value.
3075 * Flow pattern to translate.
3076 * @param[in] prev_layer
3077 * The protocol layer indicated in previous item.
3079 * Item is inner pattern.
3082 flow_dv_translate_item_mpls(void *matcher, void *key,
3083 const struct rte_flow_item *item,
3084 uint64_t prev_layer,
3087 const uint32_t *in_mpls_m = item->mask;
3088 const uint32_t *in_mpls_v = item->spec;
3089 uint32_t *out_mpls_m = 0;
3090 uint32_t *out_mpls_v = 0;
3091 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3092 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3093 void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
3095 void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
3096 void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
3097 void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3099 switch (prev_layer) {
3100 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
3101 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
3102 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
3103 MLX5_UDP_PORT_MPLS);
3105 case MLX5_FLOW_LAYER_GRE:
3106 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
3107 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
3108 RTE_ETHER_TYPE_MPLS);
3111 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3112 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
3119 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
3120 switch (prev_layer) {
3121 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
3123 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
3124 outer_first_mpls_over_udp);
3126 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
3127 outer_first_mpls_over_udp);
3129 case MLX5_FLOW_LAYER_GRE:
3131 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
3132 outer_first_mpls_over_gre);
3134 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
3135 outer_first_mpls_over_gre);
3138 /* Inner MPLS not over GRE is not supported. */
3141 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
3145 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
3151 if (out_mpls_m && out_mpls_v) {
3152 *out_mpls_m = *in_mpls_m;
3153 *out_mpls_v = *in_mpls_v & *in_mpls_m;
3158 * Add META item to matcher
3160 * @param[in, out] matcher
3162 * @param[in, out] key
3163 * Flow matcher value.
3165 * Flow pattern to translate.
3167 * Item is inner pattern.
3170 flow_dv_translate_item_meta(void *matcher, void *key,
3171 const struct rte_flow_item *item)
3173 const struct rte_flow_item_meta *meta_m;
3174 const struct rte_flow_item_meta *meta_v;
3176 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
3178 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
3180 meta_m = (const void *)item->mask;
3182 meta_m = &rte_flow_item_meta_mask;
3183 meta_v = (const void *)item->spec;
3185 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
3186 rte_be_to_cpu_32(meta_m->data));
3187 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
3188 rte_be_to_cpu_32(meta_v->data & meta_m->data));
3193 * Add source vport match to the specified matcher.
3195 * @param[in, out] matcher
3197 * @param[in, out] key
3198 * Flow matcher value.
3200 * Source vport value to match
3205 flow_dv_translate_item_source_vport(void *matcher, void *key,
3206 int16_t port, uint16_t mask)
3208 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3209 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3211 MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
3212 MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
3216 * Translate port-id item to eswitch match on port-id.
3219 * The devich to configure through.
3220 * @param[in, out] matcher
3222 * @param[in, out] key
3223 * Flow matcher value.
3225 * Flow pattern to translate.
3228 * 0 on success, a negative errno value otherwise.
3231 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
3232 void *key, const struct rte_flow_item *item)
3234 const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
3235 const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
3236 uint16_t mask, val, id;
3239 mask = pid_m ? pid_m->id : 0xffff;
3240 id = pid_v ? pid_v->id : dev->data->port_id;
3241 ret = mlx5_port_to_eswitch_info(id, NULL, &val);
3244 flow_dv_translate_item_source_vport(matcher, key, val, mask);
3248 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
3250 #define HEADER_IS_ZERO(match_criteria, headers) \
3251 !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
3252 matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
3255 * Calculate flow matcher enable bitmap.
3257 * @param match_criteria
3258 * Pointer to flow matcher criteria.
3261 * Bitmap of enabled fields.
3264 flow_dv_matcher_enable(uint32_t *match_criteria)
3266 uint8_t match_criteria_enable;
3268 match_criteria_enable =
3269 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
3270 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
3271 match_criteria_enable |=
3272 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
3273 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
3274 match_criteria_enable |=
3275 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
3276 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
3277 match_criteria_enable |=
3278 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
3279 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
3280 #ifdef HAVE_MLX5DV_DR
3281 match_criteria_enable |=
3282 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
3283 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
3285 return match_criteria_enable;
3292 * @param dev[in, out]
3293 * Pointer to rte_eth_dev structure.
3294 * @param[in] table_id
3297 * Direction of the table.
3298 * @param[in] transfer
3299 * E-Switch or NIC flow.
3301 * pointer to error structure.
3304 * Returns tables resource based on the index, NULL in case of failed.
3306 static struct mlx5_flow_tbl_resource *
3307 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
3308 uint32_t table_id, uint8_t egress,
3310 struct rte_flow_error *error)
3312 struct mlx5_priv *priv = dev->data->dev_private;
3313 struct mlx5_ibv_shared *sh = priv->sh;
3314 struct mlx5_flow_tbl_resource *tbl;
3316 #ifdef HAVE_MLX5DV_DR
3318 tbl = &sh->fdb_tbl[table_id];
3320 tbl->obj = mlx5_glue->dr_create_flow_tbl
3321 (sh->fdb_domain, table_id);
3322 } else if (egress) {
3323 tbl = &sh->tx_tbl[table_id];
3325 tbl->obj = mlx5_glue->dr_create_flow_tbl
3326 (sh->tx_domain, table_id);
3328 tbl = &sh->rx_tbl[table_id];
3330 tbl->obj = mlx5_glue->dr_create_flow_tbl
3331 (sh->rx_domain, table_id);
3334 rte_flow_error_set(error, ENOMEM,
3335 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3336 NULL, "cannot create table");
3339 rte_atomic32_inc(&tbl->refcnt);
3345 return &sh->fdb_tbl[table_id];
3347 return &sh->tx_tbl[table_id];
3349 return &sh->rx_tbl[table_id];
3354 * Release a flow table.
3357 * Table resource to be released.
3360 * Returns 0 if table was released, else return 1;
3363 flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
3367 if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
3368 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
3376 * Register the flow matcher.
3378 * @param dev[in, out]
3379 * Pointer to rte_eth_dev structure.
3380 * @param[in, out] matcher
3381 * Pointer to flow matcher.
3382 * @parm[in, out] dev_flow
3383 * Pointer to the dev_flow.
3385 * pointer to error structure.
3388 * 0 on success otherwise -errno and errno is set.
3391 flow_dv_matcher_register(struct rte_eth_dev *dev,
3392 struct mlx5_flow_dv_matcher *matcher,
3393 struct mlx5_flow *dev_flow,
3394 struct rte_flow_error *error)
3396 struct mlx5_priv *priv = dev->data->dev_private;
3397 struct mlx5_ibv_shared *sh = priv->sh;
3398 struct mlx5_flow_dv_matcher *cache_matcher;
3399 struct mlx5dv_flow_matcher_attr dv_attr = {
3400 .type = IBV_FLOW_ATTR_NORMAL,
3401 .match_mask = (void *)&matcher->mask,
3403 struct mlx5_flow_tbl_resource *tbl = NULL;
3405 /* Lookup from cache. */
3406 LIST_FOREACH(cache_matcher, &sh->matchers, next) {
3407 if (matcher->crc == cache_matcher->crc &&
3408 matcher->priority == cache_matcher->priority &&
3409 matcher->egress == cache_matcher->egress &&
3410 matcher->group == cache_matcher->group &&
3411 matcher->transfer == cache_matcher->transfer &&
3412 !memcmp((const void *)matcher->mask.buf,
3413 (const void *)cache_matcher->mask.buf,
3414 cache_matcher->mask.size)) {
3416 "priority %hd use %s matcher %p: refcnt %d++",
3417 cache_matcher->priority,
3418 cache_matcher->egress ? "tx" : "rx",
3419 (void *)cache_matcher,
3420 rte_atomic32_read(&cache_matcher->refcnt));
3421 rte_atomic32_inc(&cache_matcher->refcnt);
3422 dev_flow->dv.matcher = cache_matcher;
3426 /* Register new matcher. */
3427 cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
3429 return rte_flow_error_set(error, ENOMEM,
3430 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3431 "cannot allocate matcher memory");
3432 tbl = flow_dv_tbl_resource_get(dev, matcher->group * MLX5_GROUP_FACTOR,
3433 matcher->egress, matcher->transfer,
3436 rte_free(cache_matcher);
3437 return rte_flow_error_set(error, ENOMEM,
3438 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3439 NULL, "cannot create table");
3441 *cache_matcher = *matcher;
3442 dv_attr.match_criteria_enable =
3443 flow_dv_matcher_enable(cache_matcher->mask.buf);
3444 dv_attr.priority = matcher->priority;
3445 if (matcher->egress)
3446 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
3447 cache_matcher->matcher_object =
3448 mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
3449 if (!cache_matcher->matcher_object) {
3450 rte_free(cache_matcher);
3451 #ifdef HAVE_MLX5DV_DR
3452 flow_dv_tbl_resource_release(tbl);
3454 return rte_flow_error_set(error, ENOMEM,
3455 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3456 NULL, "cannot create matcher");
3458 rte_atomic32_inc(&cache_matcher->refcnt);
3459 LIST_INSERT_HEAD(&sh->matchers, cache_matcher, next);
3460 dev_flow->dv.matcher = cache_matcher;
3461 DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
3462 cache_matcher->priority,
3463 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
3464 rte_atomic32_read(&cache_matcher->refcnt));
3465 rte_atomic32_inc(&tbl->refcnt);
3470 * Find existing tag resource or create and register a new one.
3472 * @param dev[in, out]
3473 * Pointer to rte_eth_dev structure.
3474 * @param[in, out] resource
3475 * Pointer to tag resource.
3476 * @parm[in, out] dev_flow
3477 * Pointer to the dev_flow.
3479 * pointer to error structure.
3482 * 0 on success otherwise -errno and errno is set.
3485 flow_dv_tag_resource_register
3486 (struct rte_eth_dev *dev,
3487 struct mlx5_flow_dv_tag_resource *resource,
3488 struct mlx5_flow *dev_flow,
3489 struct rte_flow_error *error)
3491 struct mlx5_priv *priv = dev->data->dev_private;
3492 struct mlx5_ibv_shared *sh = priv->sh;
3493 struct mlx5_flow_dv_tag_resource *cache_resource;
3495 /* Lookup a matching resource from cache. */
3496 LIST_FOREACH(cache_resource, &sh->tags, next) {
3497 if (resource->tag == cache_resource->tag) {
3498 DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
3499 (void *)cache_resource,
3500 rte_atomic32_read(&cache_resource->refcnt));
3501 rte_atomic32_inc(&cache_resource->refcnt);
3502 dev_flow->flow->tag_resource = cache_resource;
3506 /* Register new resource. */
3507 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
3508 if (!cache_resource)
3509 return rte_flow_error_set(error, ENOMEM,
3510 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3511 "cannot allocate resource memory");
3512 *cache_resource = *resource;
3513 cache_resource->action = mlx5_glue->dv_create_flow_action_tag
3515 if (!cache_resource->action) {
3516 rte_free(cache_resource);
3517 return rte_flow_error_set(error, ENOMEM,
3518 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3519 NULL, "cannot create action");
3521 rte_atomic32_init(&cache_resource->refcnt);
3522 rte_atomic32_inc(&cache_resource->refcnt);
3523 LIST_INSERT_HEAD(&sh->tags, cache_resource, next);
3524 dev_flow->flow->tag_resource = cache_resource;
3525 DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
3526 (void *)cache_resource,
3527 rte_atomic32_read(&cache_resource->refcnt));
3535 * Pointer to Ethernet device.
3537 * Pointer to mlx5_flow.
3540 * 1 while a reference on it exists, 0 when freed.
3543 flow_dv_tag_release(struct rte_eth_dev *dev,
3544 struct mlx5_flow_dv_tag_resource *tag)
3547 DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
3548 dev->data->port_id, (void *)tag,
3549 rte_atomic32_read(&tag->refcnt));
3550 if (rte_atomic32_dec_and_test(&tag->refcnt)) {
3551 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
3552 LIST_REMOVE(tag, next);
3553 DRV_LOG(DEBUG, "port %u tag %p: removed",
3554 dev->data->port_id, (void *)tag);
3562 * Translate port ID action to vport.
3565 * Pointer to rte_eth_dev structure.
3567 * Pointer to the port ID action.
3568 * @param[out] dst_port_id
3569 * The target port ID.
3571 * Pointer to the error structure.
3574 * 0 on success, a negative errno value otherwise and rte_errno is set.
3577 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
3578 const struct rte_flow_action *action,
3579 uint32_t *dst_port_id,
3580 struct rte_flow_error *error)
3585 const struct rte_flow_action_port_id *conf =
3586 (const struct rte_flow_action_port_id *)action->conf;
3588 port = conf->original ? dev->data->port_id : conf->id;
3589 ret = mlx5_port_to_eswitch_info(port, NULL, &port_id);
3591 return rte_flow_error_set(error, -ret,
3592 RTE_FLOW_ERROR_TYPE_ACTION,
3594 "No eswitch info was found for port");
3595 *dst_port_id = port_id;
3600 * Fill the flow with DV spec.
3603 * Pointer to rte_eth_dev structure.
3604 * @param[in, out] dev_flow
3605 * Pointer to the sub flow.
3607 * Pointer to the flow attributes.
3609 * Pointer to the list of items.
3610 * @param[in] actions
3611 * Pointer to the list of actions.
3613 * Pointer to the error structure.
3616 * 0 on success, a negative errno value otherwise and rte_errno is set.
3619 flow_dv_translate(struct rte_eth_dev *dev,
3620 struct mlx5_flow *dev_flow,
3621 const struct rte_flow_attr *attr,
3622 const struct rte_flow_item items[],
3623 const struct rte_flow_action actions[],
3624 struct rte_flow_error *error)
3626 struct mlx5_priv *priv = dev->data->dev_private;
3627 struct rte_flow *flow = dev_flow->flow;
3628 uint64_t item_flags = 0;
3629 uint64_t last_item = 0;
3630 uint64_t action_flags = 0;
3631 uint64_t priority = attr->priority;
3632 struct mlx5_flow_dv_matcher matcher = {
3634 .size = sizeof(matcher.mask.buf),
3638 bool actions_end = false;
3639 struct mlx5_flow_dv_modify_hdr_resource res = {
3640 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3641 MLX5DV_FLOW_TABLE_TYPE_NIC_RX
3643 union flow_dv_attr flow_attr = { .attr = 0 };
3644 struct mlx5_flow_dv_tag_resource tag_resource;
3645 uint32_t modify_action_position = UINT32_MAX;
3646 void *match_mask = matcher.mask.buf;
3647 void *match_value = dev_flow->dv.value.buf;
3649 flow->group = attr->group;
3651 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3652 if (priority == MLX5_FLOW_PRIO_RSVD)
3653 priority = priv->config.flow_prio - 1;
3654 for (; !actions_end ; actions++) {
3655 const struct rte_flow_action_queue *queue;
3656 const struct rte_flow_action_rss *rss;
3657 const struct rte_flow_action *action = actions;
3658 const struct rte_flow_action_count *count = action->conf;
3659 const uint8_t *rss_key;
3660 const struct rte_flow_action_jump *jump_data;
3661 struct mlx5_flow_dv_jump_tbl_resource jump_tbl_resource;
3662 struct mlx5_flow_tbl_resource *tbl;
3663 uint32_t port_id = 0;
3664 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
3666 switch (actions->type) {
3667 case RTE_FLOW_ACTION_TYPE_VOID:
3669 case RTE_FLOW_ACTION_TYPE_PORT_ID:
3670 if (flow_dv_translate_action_port_id(dev, action,
3673 port_id_resource.port_id = port_id;
3674 if (flow_dv_port_id_action_resource_register
3675 (dev, &port_id_resource, dev_flow, error))
3677 dev_flow->dv.actions[actions_n++] =
3678 dev_flow->dv.port_id_action->action;
3679 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
3681 case RTE_FLOW_ACTION_TYPE_FLAG:
3683 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
3684 if (!flow->tag_resource)
3685 if (flow_dv_tag_resource_register
3686 (dev, &tag_resource, dev_flow, error))
3688 dev_flow->dv.actions[actions_n++] =
3689 flow->tag_resource->action;
3690 action_flags |= MLX5_FLOW_ACTION_FLAG;
3692 case RTE_FLOW_ACTION_TYPE_MARK:
3693 tag_resource.tag = mlx5_flow_mark_set
3694 (((const struct rte_flow_action_mark *)
3695 (actions->conf))->id);
3696 if (!flow->tag_resource)
3697 if (flow_dv_tag_resource_register
3698 (dev, &tag_resource, dev_flow, error))
3700 dev_flow->dv.actions[actions_n++] =
3701 flow->tag_resource->action;
3702 action_flags |= MLX5_FLOW_ACTION_MARK;
3704 case RTE_FLOW_ACTION_TYPE_DROP:
3705 action_flags |= MLX5_FLOW_ACTION_DROP;
3707 case RTE_FLOW_ACTION_TYPE_QUEUE:
3708 queue = actions->conf;
3709 flow->rss.queue_num = 1;
3710 (*flow->queue)[0] = queue->index;
3711 action_flags |= MLX5_FLOW_ACTION_QUEUE;
3713 case RTE_FLOW_ACTION_TYPE_RSS:
3714 rss = actions->conf;
3716 memcpy((*flow->queue), rss->queue,
3717 rss->queue_num * sizeof(uint16_t));
3718 flow->rss.queue_num = rss->queue_num;
3719 /* NULL RSS key indicates default RSS key. */
3720 rss_key = !rss->key ? rss_hash_default_key : rss->key;
3721 memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
3722 /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
3723 flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
3724 flow->rss.level = rss->level;
3725 action_flags |= MLX5_FLOW_ACTION_RSS;
3727 case RTE_FLOW_ACTION_TYPE_COUNT:
3728 if (!priv->config.devx) {
3729 rte_errno = ENOTSUP;
3732 flow->counter = flow_dv_counter_new(dev, count->shared,
3734 if (flow->counter == NULL)
3736 dev_flow->dv.actions[actions_n++] =
3737 flow->counter->action;
3738 action_flags |= MLX5_FLOW_ACTION_COUNT;
3741 if (rte_errno == ENOTSUP)
3742 return rte_flow_error_set
3744 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3746 "count action not supported");
3748 return rte_flow_error_set
3750 RTE_FLOW_ERROR_TYPE_ACTION,
3752 "cannot create counter"
3754 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3755 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3756 if (flow_dv_create_action_l2_encap(dev, actions,
3761 dev_flow->dv.actions[actions_n++] =
3762 dev_flow->dv.encap_decap->verbs_action;
3763 action_flags |= actions->type ==
3764 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
3765 MLX5_FLOW_ACTION_VXLAN_ENCAP :
3766 MLX5_FLOW_ACTION_NVGRE_ENCAP;
3768 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3769 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
3770 if (flow_dv_create_action_l2_decap(dev, dev_flow,
3774 dev_flow->dv.actions[actions_n++] =
3775 dev_flow->dv.encap_decap->verbs_action;
3776 action_flags |= actions->type ==
3777 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
3778 MLX5_FLOW_ACTION_VXLAN_DECAP :
3779 MLX5_FLOW_ACTION_NVGRE_DECAP;
3781 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3782 /* Handle encap with preceding decap. */
3783 if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
3784 if (flow_dv_create_action_raw_encap
3785 (dev, actions, dev_flow, attr, error))
3787 dev_flow->dv.actions[actions_n++] =
3788 dev_flow->dv.encap_decap->verbs_action;
3790 /* Handle encap without preceding decap. */
3791 if (flow_dv_create_action_l2_encap
3792 (dev, actions, dev_flow, attr->transfer,
3795 dev_flow->dv.actions[actions_n++] =
3796 dev_flow->dv.encap_decap->verbs_action;
3798 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
3800 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3801 /* Check if this decap is followed by encap. */
3802 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
3803 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
3806 /* Handle decap only if it isn't followed by encap. */
3807 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3808 if (flow_dv_create_action_l2_decap
3809 (dev, dev_flow, attr->transfer, error))
3811 dev_flow->dv.actions[actions_n++] =
3812 dev_flow->dv.encap_decap->verbs_action;
3814 /* If decap is followed by encap, handle it at encap. */
3815 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
3817 case RTE_FLOW_ACTION_TYPE_JUMP:
3818 jump_data = action->conf;
3819 tbl = flow_dv_tbl_resource_get(dev, jump_data->group *
3822 attr->transfer, error);
3824 return rte_flow_error_set
3826 RTE_FLOW_ERROR_TYPE_ACTION,
3828 "cannot create jump action.");
3829 jump_tbl_resource.tbl = tbl;
3830 if (flow_dv_jump_tbl_resource_register
3831 (dev, &jump_tbl_resource, dev_flow, error)) {
3832 flow_dv_tbl_resource_release(tbl);
3833 return rte_flow_error_set
3835 RTE_FLOW_ERROR_TYPE_ACTION,
3837 "cannot create jump action.");
3839 dev_flow->dv.actions[actions_n++] =
3840 dev_flow->dv.jump->action;
3841 action_flags |= MLX5_FLOW_ACTION_JUMP;
3843 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
3844 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
3845 if (flow_dv_convert_action_modify_mac(&res, actions,
3848 action_flags |= actions->type ==
3849 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
3850 MLX5_FLOW_ACTION_SET_MAC_SRC :
3851 MLX5_FLOW_ACTION_SET_MAC_DST;
3853 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
3854 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
3855 if (flow_dv_convert_action_modify_ipv4(&res, actions,
3858 action_flags |= actions->type ==
3859 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
3860 MLX5_FLOW_ACTION_SET_IPV4_SRC :
3861 MLX5_FLOW_ACTION_SET_IPV4_DST;
3863 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
3864 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
3865 if (flow_dv_convert_action_modify_ipv6(&res, actions,
3868 action_flags |= actions->type ==
3869 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
3870 MLX5_FLOW_ACTION_SET_IPV6_SRC :
3871 MLX5_FLOW_ACTION_SET_IPV6_DST;
3873 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
3874 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
3875 if (flow_dv_convert_action_modify_tp(&res, actions,
3879 action_flags |= actions->type ==
3880 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
3881 MLX5_FLOW_ACTION_SET_TP_SRC :
3882 MLX5_FLOW_ACTION_SET_TP_DST;
3884 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
3885 if (flow_dv_convert_action_modify_dec_ttl(&res, items,
3889 action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
3891 case RTE_FLOW_ACTION_TYPE_SET_TTL:
3892 if (flow_dv_convert_action_modify_ttl(&res, actions,
3896 action_flags |= MLX5_FLOW_ACTION_SET_TTL;
3898 case RTE_FLOW_ACTION_TYPE_END:
3900 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
3901 /* create modify action if needed. */
3902 if (flow_dv_modify_hdr_resource_register
3907 dev_flow->dv.actions[modify_action_position] =
3908 dev_flow->dv.modify_hdr->verbs_action;
3914 if ((action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) &&
3915 modify_action_position == UINT32_MAX)
3916 modify_action_position = actions_n++;
3918 dev_flow->dv.actions_n = actions_n;
3919 flow->actions = action_flags;
3920 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3921 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
3923 switch (items->type) {
3924 case RTE_FLOW_ITEM_TYPE_PORT_ID:
3925 flow_dv_translate_item_port_id(dev, match_mask,
3926 match_value, items);
3927 last_item = MLX5_FLOW_ITEM_PORT_ID;
3929 case RTE_FLOW_ITEM_TYPE_ETH:
3930 flow_dv_translate_item_eth(match_mask, match_value,
3932 matcher.priority = MLX5_PRIORITY_MAP_L2;
3933 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
3934 MLX5_FLOW_LAYER_OUTER_L2;
3936 case RTE_FLOW_ITEM_TYPE_VLAN:
3937 flow_dv_translate_item_vlan(match_mask, match_value,
3939 matcher.priority = MLX5_PRIORITY_MAP_L2;
3940 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
3941 MLX5_FLOW_LAYER_INNER_VLAN) :
3942 (MLX5_FLOW_LAYER_OUTER_L2 |
3943 MLX5_FLOW_LAYER_OUTER_VLAN);
3945 case RTE_FLOW_ITEM_TYPE_IPV4:
3946 flow_dv_translate_item_ipv4(match_mask, match_value,
3947 items, tunnel, attr->group);
3948 matcher.priority = MLX5_PRIORITY_MAP_L3;
3949 dev_flow->dv.hash_fields |=
3950 mlx5_flow_hashfields_adjust
3952 MLX5_IPV4_LAYER_TYPES,
3953 MLX5_IPV4_IBV_RX_HASH);
3954 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3955 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3957 case RTE_FLOW_ITEM_TYPE_IPV6:
3958 flow_dv_translate_item_ipv6(match_mask, match_value,
3959 items, tunnel, attr->group);
3960 matcher.priority = MLX5_PRIORITY_MAP_L3;
3961 dev_flow->dv.hash_fields |=
3962 mlx5_flow_hashfields_adjust
3964 MLX5_IPV6_LAYER_TYPES,
3965 MLX5_IPV6_IBV_RX_HASH);
3966 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3967 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3969 case RTE_FLOW_ITEM_TYPE_TCP:
3970 flow_dv_translate_item_tcp(match_mask, match_value,
3972 matcher.priority = MLX5_PRIORITY_MAP_L4;
3973 dev_flow->dv.hash_fields |=
3974 mlx5_flow_hashfields_adjust
3975 (dev_flow, tunnel, ETH_RSS_TCP,
3976 IBV_RX_HASH_SRC_PORT_TCP |
3977 IBV_RX_HASH_DST_PORT_TCP);
3978 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
3979 MLX5_FLOW_LAYER_OUTER_L4_TCP;
3981 case RTE_FLOW_ITEM_TYPE_UDP:
3982 flow_dv_translate_item_udp(match_mask, match_value,
3984 matcher.priority = MLX5_PRIORITY_MAP_L4;
3985 dev_flow->dv.hash_fields |=
3986 mlx5_flow_hashfields_adjust
3987 (dev_flow, tunnel, ETH_RSS_UDP,
3988 IBV_RX_HASH_SRC_PORT_UDP |
3989 IBV_RX_HASH_DST_PORT_UDP);
3990 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
3991 MLX5_FLOW_LAYER_OUTER_L4_UDP;
3993 case RTE_FLOW_ITEM_TYPE_GRE:
3994 flow_dv_translate_item_gre(match_mask, match_value,
3996 last_item = MLX5_FLOW_LAYER_GRE;
3998 case RTE_FLOW_ITEM_TYPE_NVGRE:
3999 flow_dv_translate_item_nvgre(match_mask, match_value,
4001 last_item = MLX5_FLOW_LAYER_GRE;
4003 case RTE_FLOW_ITEM_TYPE_VXLAN:
4004 flow_dv_translate_item_vxlan(match_mask, match_value,
4006 last_item = MLX5_FLOW_LAYER_VXLAN;
4008 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4009 flow_dv_translate_item_vxlan(match_mask, match_value,
4011 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
4013 case RTE_FLOW_ITEM_TYPE_MPLS:
4014 flow_dv_translate_item_mpls(match_mask, match_value,
4015 items, last_item, tunnel);
4016 last_item = MLX5_FLOW_LAYER_MPLS;
4018 case RTE_FLOW_ITEM_TYPE_META:
4019 flow_dv_translate_item_meta(match_mask, match_value,
4021 last_item = MLX5_FLOW_ITEM_METADATA;
4026 item_flags |= last_item;
4029 * In case of ingress traffic when E-Switch mode is enabled,
4030 * we have two cases where we need to set the source port manually.
4031 * The first one, is in case of Nic steering rule, and the second is
4032 * E-Switch rule where no port_id item was found. In both cases
4033 * the source port is set according the current port in use.
4035 if ((attr->ingress && !(item_flags & MLX5_FLOW_ITEM_PORT_ID)) &&
4036 (priv->representor || priv->master)) {
4037 if (flow_dv_translate_item_port_id(dev, match_mask,
4041 assert(!flow_dv_check_valid_spec(matcher.mask.buf,
4042 dev_flow->dv.value.buf));
4043 dev_flow->layers = item_flags;
4044 /* Register matcher. */
4045 matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
4047 matcher.priority = mlx5_flow_adjust_priority(dev, priority,
4049 matcher.egress = attr->egress;
4050 matcher.group = attr->group;
4051 matcher.transfer = attr->transfer;
4052 if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
4058 * Apply the flow to the NIC.
4061 * Pointer to the Ethernet device structure.
4062 * @param[in, out] flow
4063 * Pointer to flow structure.
4065 * Pointer to error structure.
4068 * 0 on success, a negative errno value otherwise and rte_errno is set.
4071 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
4072 struct rte_flow_error *error)
4074 struct mlx5_flow_dv *dv;
4075 struct mlx5_flow *dev_flow;
4076 struct mlx5_priv *priv = dev->data->dev_private;
4080 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4083 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
4084 if (flow->transfer) {
4085 dv->actions[n++] = priv->sh->esw_drop_action;
4087 dv->hrxq = mlx5_hrxq_drop_new(dev);
4091 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4093 "cannot get drop hash queue");
4096 dv->actions[n++] = dv->hrxq->action;
4098 } else if (flow->actions &
4099 (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
4100 struct mlx5_hrxq *hrxq;
4102 hrxq = mlx5_hrxq_get(dev, flow->key,
4103 MLX5_RSS_HASH_KEY_LEN,
4106 flow->rss.queue_num);
4108 hrxq = mlx5_hrxq_new
4109 (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
4110 dv->hash_fields, (*flow->queue),
4111 flow->rss.queue_num,
4112 !!(dev_flow->layers &
4113 MLX5_FLOW_LAYER_TUNNEL));
4117 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4118 "cannot get hash queue");
4122 dv->actions[n++] = dv->hrxq->action;
4125 mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
4126 (void *)&dv->value, n,
4129 rte_flow_error_set(error, errno,
4130 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4132 "hardware refuses to create flow");
4138 err = rte_errno; /* Save rte_errno before cleanup. */
4139 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4140 struct mlx5_flow_dv *dv = &dev_flow->dv;
4142 if (flow->actions & MLX5_FLOW_ACTION_DROP)
4143 mlx5_hrxq_drop_release(dev);
4145 mlx5_hrxq_release(dev, dv->hrxq);
4149 rte_errno = err; /* Restore rte_errno. */
4154 * Release the flow matcher.
4157 * Pointer to Ethernet device.
4159 * Pointer to mlx5_flow.
4162 * 1 while a reference on it exists, 0 when freed.
4165 flow_dv_matcher_release(struct rte_eth_dev *dev,
4166 struct mlx5_flow *flow)
4168 struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
4169 struct mlx5_priv *priv = dev->data->dev_private;
4170 struct mlx5_ibv_shared *sh = priv->sh;
4171 struct mlx5_flow_tbl_resource *tbl;
4173 assert(matcher->matcher_object);
4174 DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
4175 dev->data->port_id, (void *)matcher,
4176 rte_atomic32_read(&matcher->refcnt));
4177 if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
4178 claim_zero(mlx5_glue->dv_destroy_flow_matcher
4179 (matcher->matcher_object));
4180 LIST_REMOVE(matcher, next);
4181 if (matcher->egress)
4182 tbl = &sh->tx_tbl[matcher->group];
4184 tbl = &sh->rx_tbl[matcher->group];
4185 flow_dv_tbl_resource_release(tbl);
4187 DRV_LOG(DEBUG, "port %u matcher %p: removed",
4188 dev->data->port_id, (void *)matcher);
4195 * Release an encap/decap resource.
4198 * Pointer to mlx5_flow.
4201 * 1 while a reference on it exists, 0 when freed.
4204 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
4206 struct mlx5_flow_dv_encap_decap_resource *cache_resource =
4207 flow->dv.encap_decap;
4209 assert(cache_resource->verbs_action);
4210 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
4211 (void *)cache_resource,
4212 rte_atomic32_read(&cache_resource->refcnt));
4213 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4214 claim_zero(mlx5_glue->destroy_flow_action
4215 (cache_resource->verbs_action));
4216 LIST_REMOVE(cache_resource, next);
4217 rte_free(cache_resource);
4218 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
4219 (void *)cache_resource);
4226 * Release an jump to table action resource.
4229 * Pointer to mlx5_flow.
4232 * 1 while a reference on it exists, 0 when freed.
4235 flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow)
4237 struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
4240 assert(cache_resource->action);
4241 DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
4242 (void *)cache_resource,
4243 rte_atomic32_read(&cache_resource->refcnt));
4244 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4245 claim_zero(mlx5_glue->destroy_flow_action
4246 (cache_resource->action));
4247 LIST_REMOVE(cache_resource, next);
4248 flow_dv_tbl_resource_release(cache_resource->tbl);
4249 rte_free(cache_resource);
4250 DRV_LOG(DEBUG, "jump table resource %p: removed",
4251 (void *)cache_resource);
4258 * Release a modify-header resource.
4261 * Pointer to mlx5_flow.
4264 * 1 while a reference on it exists, 0 when freed.
4267 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
4269 struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
4270 flow->dv.modify_hdr;
4272 assert(cache_resource->verbs_action);
4273 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
4274 (void *)cache_resource,
4275 rte_atomic32_read(&cache_resource->refcnt));
4276 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4277 claim_zero(mlx5_glue->destroy_flow_action
4278 (cache_resource->verbs_action));
4279 LIST_REMOVE(cache_resource, next);
4280 rte_free(cache_resource);
4281 DRV_LOG(DEBUG, "modify-header resource %p: removed",
4282 (void *)cache_resource);
4289 * Release port ID action resource.
4292 * Pointer to mlx5_flow.
4295 * 1 while a reference on it exists, 0 when freed.
4298 flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
4300 struct mlx5_flow_dv_port_id_action_resource *cache_resource =
4301 flow->dv.port_id_action;
4303 assert(cache_resource->action);
4304 DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
4305 (void *)cache_resource,
4306 rte_atomic32_read(&cache_resource->refcnt));
4307 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4308 claim_zero(mlx5_glue->destroy_flow_action
4309 (cache_resource->action));
4310 LIST_REMOVE(cache_resource, next);
4311 rte_free(cache_resource);
4312 DRV_LOG(DEBUG, "port id action resource %p: removed",
4313 (void *)cache_resource);
4320 * Remove the flow from the NIC but keeps it in memory.
4323 * Pointer to Ethernet device.
4324 * @param[in, out] flow
4325 * Pointer to flow structure.
4328 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
4330 struct mlx5_flow_dv *dv;
4331 struct mlx5_flow *dev_flow;
4335 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4338 claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
4342 if (flow->actions & MLX5_FLOW_ACTION_DROP)
4343 mlx5_hrxq_drop_release(dev);
4345 mlx5_hrxq_release(dev, dv->hrxq);
4352 * Remove the flow from the NIC and the memory.
4355 * Pointer to the Ethernet device structure.
4356 * @param[in, out] flow
4357 * Pointer to flow structure.
4360 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
4362 struct mlx5_flow *dev_flow;
4366 flow_dv_remove(dev, flow);
4367 if (flow->counter) {
4368 flow_dv_counter_release(flow->counter);
4369 flow->counter = NULL;
4371 if (flow->tag_resource) {
4372 flow_dv_tag_release(dev, flow->tag_resource);
4373 flow->tag_resource = NULL;
4375 while (!LIST_EMPTY(&flow->dev_flows)) {
4376 dev_flow = LIST_FIRST(&flow->dev_flows);
4377 LIST_REMOVE(dev_flow, next);
4378 if (dev_flow->dv.matcher)
4379 flow_dv_matcher_release(dev, dev_flow);
4380 if (dev_flow->dv.encap_decap)
4381 flow_dv_encap_decap_resource_release(dev_flow);
4382 if (dev_flow->dv.modify_hdr)
4383 flow_dv_modify_hdr_resource_release(dev_flow);
4384 if (dev_flow->dv.jump)
4385 flow_dv_jump_tbl_resource_release(dev_flow);
4386 if (dev_flow->dv.port_id_action)
4387 flow_dv_port_id_action_resource_release(dev_flow);
4393 * Query a dv flow rule for its statistics via devx.
4396 * Pointer to Ethernet device.
4398 * Pointer to the sub flow.
4400 * data retrieved by the query.
4402 * Perform verbose error reporting if not NULL.
4405 * 0 on success, a negative errno value otherwise and rte_errno is set.
4408 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
4409 void *data, struct rte_flow_error *error)
4411 struct mlx5_priv *priv = dev->data->dev_private;
4412 struct rte_flow_query_count *qc = data;
4417 if (!priv->config.devx)
4418 return rte_flow_error_set(error, ENOTSUP,
4419 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4421 "counters are not supported");
4422 if (flow->counter) {
4423 err = mlx5_devx_cmd_flow_counter_query
4424 (flow->counter->dcs,
4425 qc->reset, &pkts, &bytes);
4427 return rte_flow_error_set
4429 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4431 "cannot read counters");
4434 qc->hits = pkts - flow->counter->hits;
4435 qc->bytes = bytes - flow->counter->bytes;
4437 flow->counter->hits = pkts;
4438 flow->counter->bytes = bytes;
4442 return rte_flow_error_set(error, EINVAL,
4443 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4445 "counters are not available");
4451 * @see rte_flow_query()
4455 flow_dv_query(struct rte_eth_dev *dev,
4456 struct rte_flow *flow __rte_unused,
4457 const struct rte_flow_action *actions __rte_unused,
4458 void *data __rte_unused,
4459 struct rte_flow_error *error __rte_unused)
4463 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4464 switch (actions->type) {
4465 case RTE_FLOW_ACTION_TYPE_VOID:
4467 case RTE_FLOW_ACTION_TYPE_COUNT:
4468 ret = flow_dv_query_count(dev, flow, data, error);
4471 return rte_flow_error_set(error, ENOTSUP,
4472 RTE_FLOW_ERROR_TYPE_ACTION,
4474 "action not supported");
4481 * Mutex-protected thunk to flow_dv_translate().
4484 flow_d_translate(struct rte_eth_dev *dev,
4485 struct mlx5_flow *dev_flow,
4486 const struct rte_flow_attr *attr,
4487 const struct rte_flow_item items[],
4488 const struct rte_flow_action actions[],
4489 struct rte_flow_error *error)
4493 flow_d_shared_lock(dev);
4494 ret = flow_dv_translate(dev, dev_flow, attr, items, actions, error);
4495 flow_d_shared_unlock(dev);
4500 * Mutex-protected thunk to flow_dv_apply().
4503 flow_d_apply(struct rte_eth_dev *dev,
4504 struct rte_flow *flow,
4505 struct rte_flow_error *error)
4509 flow_d_shared_lock(dev);
4510 ret = flow_dv_apply(dev, flow, error);
4511 flow_d_shared_unlock(dev);
4516 * Mutex-protected thunk to flow_dv_remove().
4519 flow_d_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
4521 flow_d_shared_lock(dev);
4522 flow_dv_remove(dev, flow);
4523 flow_d_shared_unlock(dev);
4527 * Mutex-protected thunk to flow_dv_destroy().
4530 flow_d_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
4532 flow_d_shared_lock(dev);
4533 flow_dv_destroy(dev, flow);
4534 flow_d_shared_unlock(dev);
4537 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
4538 .validate = flow_dv_validate,
4539 .prepare = flow_dv_prepare,
4540 .translate = flow_d_translate,
4541 .apply = flow_d_apply,
4542 .remove = flow_d_remove,
4543 .destroy = flow_d_destroy,
4544 .query = flow_dv_query,
4547 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */