1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #pragma GCC diagnostic ignored "-Wpedantic"
15 #include <infiniband/verbs.h>
17 #pragma GCC diagnostic error "-Wpedantic"
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_ethdev_driver.h>
24 #include <rte_flow_driver.h>
25 #include <rte_malloc.h>
30 #include "mlx5_defs.h"
31 #include "mlx5_glue.h"
32 #include "mlx5_flow.h"
34 #include "mlx5_rxtx.h"
36 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
38 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
39 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
42 #ifndef HAVE_MLX5DV_DR_ESWITCH
43 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
44 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
48 #ifndef HAVE_MLX5DV_DR
49 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
65 * Initialize flow attributes structure according to flow items' types.
68 * Pointer to item specification.
70 * Pointer to flow attributes structure.
73 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
75 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
77 case RTE_FLOW_ITEM_TYPE_IPV4:
80 case RTE_FLOW_ITEM_TYPE_IPV6:
83 case RTE_FLOW_ITEM_TYPE_UDP:
86 case RTE_FLOW_ITEM_TYPE_TCP:
96 struct field_modify_info {
97 uint32_t size; /* Size of field in protocol header, in bytes. */
98 uint32_t offset; /* Offset of field in protocol header, in bytes. */
99 enum mlx5_modification_field id;
102 struct field_modify_info modify_eth[] = {
103 {4, 0, MLX5_MODI_OUT_DMAC_47_16},
104 {2, 4, MLX5_MODI_OUT_DMAC_15_0},
105 {4, 6, MLX5_MODI_OUT_SMAC_47_16},
106 {2, 10, MLX5_MODI_OUT_SMAC_15_0},
110 struct field_modify_info modify_ipv4[] = {
111 {1, 8, MLX5_MODI_OUT_IPV4_TTL},
112 {4, 12, MLX5_MODI_OUT_SIPV4},
113 {4, 16, MLX5_MODI_OUT_DIPV4},
117 struct field_modify_info modify_ipv6[] = {
118 {1, 7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
119 {4, 8, MLX5_MODI_OUT_SIPV6_127_96},
120 {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
121 {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
122 {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
123 {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
124 {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
125 {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
126 {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
130 struct field_modify_info modify_udp[] = {
131 {2, 0, MLX5_MODI_OUT_UDP_SPORT},
132 {2, 2, MLX5_MODI_OUT_UDP_DPORT},
136 struct field_modify_info modify_tcp[] = {
137 {2, 0, MLX5_MODI_OUT_TCP_SPORT},
138 {2, 2, MLX5_MODI_OUT_TCP_DPORT},
143 * Acquire the synchronizing object to protect multithreaded access
144 * to shared dv context. Lock occurs only if context is actually
145 * shared, i.e. we have multiport IB device and representors are
149 * Pointer to the rte_eth_dev structure.
152 flow_d_shared_lock(struct rte_eth_dev *dev)
154 struct mlx5_priv *priv = dev->data->dev_private;
155 struct mlx5_ibv_shared *sh = priv->sh;
157 if (sh->dv_refcnt > 1) {
160 ret = pthread_mutex_lock(&sh->dv_mutex);
167 flow_d_shared_unlock(struct rte_eth_dev *dev)
169 struct mlx5_priv *priv = dev->data->dev_private;
170 struct mlx5_ibv_shared *sh = priv->sh;
172 if (sh->dv_refcnt > 1) {
175 ret = pthread_mutex_unlock(&sh->dv_mutex);
182 * Convert modify-header action to DV specification.
185 * Pointer to item specification.
187 * Pointer to field modification information.
188 * @param[in,out] resource
189 * Pointer to the modify-header resource.
191 * Type of modification.
193 * Pointer to the error structure.
196 * 0 on success, a negative errno value otherwise and rte_errno is set.
199 flow_dv_convert_modify_action(struct rte_flow_item *item,
200 struct field_modify_info *field,
201 struct mlx5_flow_dv_modify_hdr_resource *resource,
203 struct rte_flow_error *error)
205 uint32_t i = resource->actions_num;
206 struct mlx5_modification_cmd *actions = resource->actions;
207 const uint8_t *spec = item->spec;
208 const uint8_t *mask = item->mask;
211 while (field->size) {
213 /* Generate modify command for each mask segment. */
214 memcpy(&set, &mask[field->offset], field->size);
216 if (i >= MLX5_MODIFY_NUM)
217 return rte_flow_error_set(error, EINVAL,
218 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
219 "too many items to modify");
220 actions[i].action_type = type;
221 actions[i].field = field->id;
222 actions[i].length = field->size ==
223 4 ? 0 : field->size * 8;
224 rte_memcpy(&actions[i].data[4 - field->size],
225 &spec[field->offset], field->size);
226 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
229 if (resource->actions_num != i)
230 resource->actions_num = i;
233 if (!resource->actions_num)
234 return rte_flow_error_set(error, EINVAL,
235 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
236 "invalid modification flow item");
241 * Convert modify-header set IPv4 address action to DV specification.
243 * @param[in,out] resource
244 * Pointer to the modify-header resource.
246 * Pointer to action specification.
248 * Pointer to the error structure.
251 * 0 on success, a negative errno value otherwise and rte_errno is set.
254 flow_dv_convert_action_modify_ipv4
255 (struct mlx5_flow_dv_modify_hdr_resource *resource,
256 const struct rte_flow_action *action,
257 struct rte_flow_error *error)
259 const struct rte_flow_action_set_ipv4 *conf =
260 (const struct rte_flow_action_set_ipv4 *)(action->conf);
261 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
262 struct rte_flow_item_ipv4 ipv4;
263 struct rte_flow_item_ipv4 ipv4_mask;
265 memset(&ipv4, 0, sizeof(ipv4));
266 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
267 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
268 ipv4.hdr.src_addr = conf->ipv4_addr;
269 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
271 ipv4.hdr.dst_addr = conf->ipv4_addr;
272 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
275 item.mask = &ipv4_mask;
276 return flow_dv_convert_modify_action(&item, modify_ipv4, resource,
277 MLX5_MODIFICATION_TYPE_SET, error);
281 * Convert modify-header set IPv6 address action to DV specification.
283 * @param[in,out] resource
284 * Pointer to the modify-header resource.
286 * Pointer to action specification.
288 * Pointer to the error structure.
291 * 0 on success, a negative errno value otherwise and rte_errno is set.
294 flow_dv_convert_action_modify_ipv6
295 (struct mlx5_flow_dv_modify_hdr_resource *resource,
296 const struct rte_flow_action *action,
297 struct rte_flow_error *error)
299 const struct rte_flow_action_set_ipv6 *conf =
300 (const struct rte_flow_action_set_ipv6 *)(action->conf);
301 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
302 struct rte_flow_item_ipv6 ipv6;
303 struct rte_flow_item_ipv6 ipv6_mask;
305 memset(&ipv6, 0, sizeof(ipv6));
306 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
307 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
308 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
309 sizeof(ipv6.hdr.src_addr));
310 memcpy(&ipv6_mask.hdr.src_addr,
311 &rte_flow_item_ipv6_mask.hdr.src_addr,
312 sizeof(ipv6.hdr.src_addr));
314 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
315 sizeof(ipv6.hdr.dst_addr));
316 memcpy(&ipv6_mask.hdr.dst_addr,
317 &rte_flow_item_ipv6_mask.hdr.dst_addr,
318 sizeof(ipv6.hdr.dst_addr));
321 item.mask = &ipv6_mask;
322 return flow_dv_convert_modify_action(&item, modify_ipv6, resource,
323 MLX5_MODIFICATION_TYPE_SET, error);
327 * Convert modify-header set MAC address action to DV specification.
329 * @param[in,out] resource
330 * Pointer to the modify-header resource.
332 * Pointer to action specification.
334 * Pointer to the error structure.
337 * 0 on success, a negative errno value otherwise and rte_errno is set.
340 flow_dv_convert_action_modify_mac
341 (struct mlx5_flow_dv_modify_hdr_resource *resource,
342 const struct rte_flow_action *action,
343 struct rte_flow_error *error)
345 const struct rte_flow_action_set_mac *conf =
346 (const struct rte_flow_action_set_mac *)(action->conf);
347 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
348 struct rte_flow_item_eth eth;
349 struct rte_flow_item_eth eth_mask;
351 memset(ð, 0, sizeof(eth));
352 memset(ð_mask, 0, sizeof(eth_mask));
353 if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
354 memcpy(ð.src.addr_bytes, &conf->mac_addr,
355 sizeof(eth.src.addr_bytes));
356 memcpy(ð_mask.src.addr_bytes,
357 &rte_flow_item_eth_mask.src.addr_bytes,
358 sizeof(eth_mask.src.addr_bytes));
360 memcpy(ð.dst.addr_bytes, &conf->mac_addr,
361 sizeof(eth.dst.addr_bytes));
362 memcpy(ð_mask.dst.addr_bytes,
363 &rte_flow_item_eth_mask.dst.addr_bytes,
364 sizeof(eth_mask.dst.addr_bytes));
367 item.mask = ð_mask;
368 return flow_dv_convert_modify_action(&item, modify_eth, resource,
369 MLX5_MODIFICATION_TYPE_SET, error);
373 * Convert modify-header set TP action to DV specification.
375 * @param[in,out] resource
376 * Pointer to the modify-header resource.
378 * Pointer to action specification.
380 * Pointer to rte_flow_item objects list.
382 * Pointer to flow attributes structure.
384 * Pointer to the error structure.
387 * 0 on success, a negative errno value otherwise and rte_errno is set.
390 flow_dv_convert_action_modify_tp
391 (struct mlx5_flow_dv_modify_hdr_resource *resource,
392 const struct rte_flow_action *action,
393 const struct rte_flow_item *items,
394 union flow_dv_attr *attr,
395 struct rte_flow_error *error)
397 const struct rte_flow_action_set_tp *conf =
398 (const struct rte_flow_action_set_tp *)(action->conf);
399 struct rte_flow_item item;
400 struct rte_flow_item_udp udp;
401 struct rte_flow_item_udp udp_mask;
402 struct rte_flow_item_tcp tcp;
403 struct rte_flow_item_tcp tcp_mask;
404 struct field_modify_info *field;
407 flow_dv_attr_init(items, attr);
409 memset(&udp, 0, sizeof(udp));
410 memset(&udp_mask, 0, sizeof(udp_mask));
411 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
412 udp.hdr.src_port = conf->port;
413 udp_mask.hdr.src_port =
414 rte_flow_item_udp_mask.hdr.src_port;
416 udp.hdr.dst_port = conf->port;
417 udp_mask.hdr.dst_port =
418 rte_flow_item_udp_mask.hdr.dst_port;
420 item.type = RTE_FLOW_ITEM_TYPE_UDP;
422 item.mask = &udp_mask;
426 memset(&tcp, 0, sizeof(tcp));
427 memset(&tcp_mask, 0, sizeof(tcp_mask));
428 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
429 tcp.hdr.src_port = conf->port;
430 tcp_mask.hdr.src_port =
431 rte_flow_item_tcp_mask.hdr.src_port;
433 tcp.hdr.dst_port = conf->port;
434 tcp_mask.hdr.dst_port =
435 rte_flow_item_tcp_mask.hdr.dst_port;
437 item.type = RTE_FLOW_ITEM_TYPE_TCP;
439 item.mask = &tcp_mask;
442 return flow_dv_convert_modify_action(&item, field, resource,
443 MLX5_MODIFICATION_TYPE_SET, error);
447 * Convert modify-header set TTL action to DV specification.
449 * @param[in,out] resource
450 * Pointer to the modify-header resource.
452 * Pointer to action specification.
454 * Pointer to rte_flow_item objects list.
456 * Pointer to flow attributes structure.
458 * Pointer to the error structure.
461 * 0 on success, a negative errno value otherwise and rte_errno is set.
464 flow_dv_convert_action_modify_ttl
465 (struct mlx5_flow_dv_modify_hdr_resource *resource,
466 const struct rte_flow_action *action,
467 const struct rte_flow_item *items,
468 union flow_dv_attr *attr,
469 struct rte_flow_error *error)
471 const struct rte_flow_action_set_ttl *conf =
472 (const struct rte_flow_action_set_ttl *)(action->conf);
473 struct rte_flow_item item;
474 struct rte_flow_item_ipv4 ipv4;
475 struct rte_flow_item_ipv4 ipv4_mask;
476 struct rte_flow_item_ipv6 ipv6;
477 struct rte_flow_item_ipv6 ipv6_mask;
478 struct field_modify_info *field;
481 flow_dv_attr_init(items, attr);
483 memset(&ipv4, 0, sizeof(ipv4));
484 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
485 ipv4.hdr.time_to_live = conf->ttl_value;
486 ipv4_mask.hdr.time_to_live = 0xFF;
487 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
489 item.mask = &ipv4_mask;
493 memset(&ipv6, 0, sizeof(ipv6));
494 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
495 ipv6.hdr.hop_limits = conf->ttl_value;
496 ipv6_mask.hdr.hop_limits = 0xFF;
497 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
499 item.mask = &ipv6_mask;
502 return flow_dv_convert_modify_action(&item, field, resource,
503 MLX5_MODIFICATION_TYPE_SET, error);
507 * Convert modify-header decrement TTL action to DV specification.
509 * @param[in,out] resource
510 * Pointer to the modify-header resource.
512 * Pointer to action specification.
514 * Pointer to rte_flow_item objects list.
516 * Pointer to flow attributes structure.
518 * Pointer to the error structure.
521 * 0 on success, a negative errno value otherwise and rte_errno is set.
524 flow_dv_convert_action_modify_dec_ttl
525 (struct mlx5_flow_dv_modify_hdr_resource *resource,
526 const struct rte_flow_item *items,
527 union flow_dv_attr *attr,
528 struct rte_flow_error *error)
530 struct rte_flow_item item;
531 struct rte_flow_item_ipv4 ipv4;
532 struct rte_flow_item_ipv4 ipv4_mask;
533 struct rte_flow_item_ipv6 ipv6;
534 struct rte_flow_item_ipv6 ipv6_mask;
535 struct field_modify_info *field;
538 flow_dv_attr_init(items, attr);
540 memset(&ipv4, 0, sizeof(ipv4));
541 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
542 ipv4.hdr.time_to_live = 0xFF;
543 ipv4_mask.hdr.time_to_live = 0xFF;
544 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
546 item.mask = &ipv4_mask;
550 memset(&ipv6, 0, sizeof(ipv6));
551 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
552 ipv6.hdr.hop_limits = 0xFF;
553 ipv6_mask.hdr.hop_limits = 0xFF;
554 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
556 item.mask = &ipv6_mask;
559 return flow_dv_convert_modify_action(&item, field, resource,
560 MLX5_MODIFICATION_TYPE_ADD, error);
564 * Validate META item.
567 * Pointer to the rte_eth_dev structure.
569 * Item specification.
571 * Attributes of flow that includes this item.
573 * Pointer to error structure.
576 * 0 on success, a negative errno value otherwise and rte_errno is set.
579 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
580 const struct rte_flow_item *item,
581 const struct rte_flow_attr *attr,
582 struct rte_flow_error *error)
584 const struct rte_flow_item_meta *spec = item->spec;
585 const struct rte_flow_item_meta *mask = item->mask;
586 const struct rte_flow_item_meta nic_mask = {
587 .data = RTE_BE32(UINT32_MAX)
590 uint64_t offloads = dev->data->dev_conf.txmode.offloads;
592 if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
593 return rte_flow_error_set(error, EPERM,
594 RTE_FLOW_ERROR_TYPE_ITEM,
596 "match on metadata offload "
597 "configuration is off for this port");
599 return rte_flow_error_set(error, EINVAL,
600 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
602 "data cannot be empty");
604 return rte_flow_error_set(error, EINVAL,
605 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
607 "data cannot be zero");
609 mask = &rte_flow_item_meta_mask;
610 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
611 (const uint8_t *)&nic_mask,
612 sizeof(struct rte_flow_item_meta),
617 return rte_flow_error_set(error, ENOTSUP,
618 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
620 "pattern not supported for ingress");
625 * Validate vport item.
628 * Pointer to the rte_eth_dev structure.
630 * Item specification.
632 * Attributes of flow that includes this item.
633 * @param[in] item_flags
634 * Bit-fields that holds the items detected until now.
636 * Pointer to error structure.
639 * 0 on success, a negative errno value otherwise and rte_errno is set.
642 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
643 const struct rte_flow_item *item,
644 const struct rte_flow_attr *attr,
646 struct rte_flow_error *error)
648 const struct rte_flow_item_port_id *spec = item->spec;
649 const struct rte_flow_item_port_id *mask = item->mask;
650 const struct rte_flow_item_port_id switch_mask = {
653 uint16_t esw_domain_id;
654 uint16_t item_port_esw_domain_id;
658 return rte_flow_error_set(error, EINVAL,
659 RTE_FLOW_ERROR_TYPE_ITEM,
661 "match on port id is valid only"
662 " when transfer flag is enabled");
663 if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
664 return rte_flow_error_set(error, ENOTSUP,
665 RTE_FLOW_ERROR_TYPE_ITEM, item,
666 "multiple source ports are not"
670 if (mask->id != 0xffffffff)
671 return rte_flow_error_set(error, ENOTSUP,
672 RTE_FLOW_ERROR_TYPE_ITEM_MASK,
674 "no support for partial mask on"
676 ret = mlx5_flow_item_acceptable
677 (item, (const uint8_t *)mask,
678 (const uint8_t *)&rte_flow_item_port_id_mask,
679 sizeof(struct rte_flow_item_port_id),
685 ret = mlx5_port_to_eswitch_info(spec->id, &item_port_esw_domain_id,
688 return rte_flow_error_set(error, -ret,
689 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
690 "failed to obtain E-Switch info for"
692 ret = mlx5_port_to_eswitch_info(dev->data->port_id,
693 &esw_domain_id, NULL);
695 return rte_flow_error_set(error, -ret,
696 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
698 "failed to obtain E-Switch info");
699 if (item_port_esw_domain_id != esw_domain_id)
700 return rte_flow_error_set(error, -ret,
701 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
702 "cannot match on a port from a"
703 " different E-Switch");
708 * Validate count action.
713 * Pointer to error structure.
716 * 0 on success, a negative errno value otherwise and rte_errno is set.
719 flow_dv_validate_action_count(struct rte_eth_dev *dev,
720 struct rte_flow_error *error)
722 struct mlx5_priv *priv = dev->data->dev_private;
724 if (!priv->config.devx)
726 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
730 return rte_flow_error_set
732 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
734 "count action not supported");
738 * Validate the L2 encap action.
740 * @param[in] action_flags
741 * Holds the actions detected until now.
743 * Pointer to the encap action.
745 * Pointer to flow attributes
747 * Pointer to error structure.
750 * 0 on success, a negative errno value otherwise and rte_errno is set.
753 flow_dv_validate_action_l2_encap(uint64_t action_flags,
754 const struct rte_flow_action *action,
755 const struct rte_flow_attr *attr,
756 struct rte_flow_error *error)
759 return rte_flow_error_set(error, EINVAL,
760 RTE_FLOW_ERROR_TYPE_ACTION, action,
761 "configuration cannot be null");
762 if (action_flags & MLX5_FLOW_ACTION_DROP)
763 return rte_flow_error_set(error, EINVAL,
764 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
765 "can't drop and encap in same flow");
766 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
767 return rte_flow_error_set(error, EINVAL,
768 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
769 "can only have a single encap or"
770 " decap action in a flow");
771 if (!attr->transfer && attr->ingress)
772 return rte_flow_error_set(error, ENOTSUP,
773 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
775 "encap action not supported for "
781 * Validate the L2 decap action.
783 * @param[in] action_flags
784 * Holds the actions detected until now.
786 * Pointer to flow attributes
788 * Pointer to error structure.
791 * 0 on success, a negative errno value otherwise and rte_errno is set.
794 flow_dv_validate_action_l2_decap(uint64_t action_flags,
795 const struct rte_flow_attr *attr,
796 struct rte_flow_error *error)
798 if (action_flags & MLX5_FLOW_ACTION_DROP)
799 return rte_flow_error_set(error, EINVAL,
800 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
801 "can't drop and decap in same flow");
802 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
803 return rte_flow_error_set(error, EINVAL,
804 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
805 "can only have a single encap or"
806 " decap action in a flow");
807 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
808 return rte_flow_error_set(error, EINVAL,
809 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
810 "can't have decap action after"
813 return rte_flow_error_set(error, ENOTSUP,
814 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
816 "decap action not supported for "
822 * Validate the raw encap action.
824 * @param[in] action_flags
825 * Holds the actions detected until now.
827 * Pointer to the encap action.
829 * Pointer to flow attributes
831 * Pointer to error structure.
834 * 0 on success, a negative errno value otherwise and rte_errno is set.
837 flow_dv_validate_action_raw_encap(uint64_t action_flags,
838 const struct rte_flow_action *action,
839 const struct rte_flow_attr *attr,
840 struct rte_flow_error *error)
843 return rte_flow_error_set(error, EINVAL,
844 RTE_FLOW_ERROR_TYPE_ACTION, action,
845 "configuration cannot be null");
846 if (action_flags & MLX5_FLOW_ACTION_DROP)
847 return rte_flow_error_set(error, EINVAL,
848 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
849 "can't drop and encap in same flow");
850 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
851 return rte_flow_error_set(error, EINVAL,
852 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
853 "can only have a single encap"
854 " action in a flow");
855 /* encap without preceding decap is not supported for ingress */
856 if (!attr->transfer && attr->ingress &&
857 !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
858 return rte_flow_error_set(error, ENOTSUP,
859 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
861 "encap action not supported for "
867 * Validate the raw decap action.
869 * @param[in] action_flags
870 * Holds the actions detected until now.
872 * Pointer to the encap action.
874 * Pointer to flow attributes
876 * Pointer to error structure.
879 * 0 on success, a negative errno value otherwise and rte_errno is set.
882 flow_dv_validate_action_raw_decap(uint64_t action_flags,
883 const struct rte_flow_action *action,
884 const struct rte_flow_attr *attr,
885 struct rte_flow_error *error)
887 if (action_flags & MLX5_FLOW_ACTION_DROP)
888 return rte_flow_error_set(error, EINVAL,
889 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
890 "can't drop and decap in same flow");
891 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
892 return rte_flow_error_set(error, EINVAL,
893 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
894 "can't have encap action before"
896 if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
897 return rte_flow_error_set(error, EINVAL,
898 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
899 "can only have a single decap"
900 " action in a flow");
901 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
902 return rte_flow_error_set(error, EINVAL,
903 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
904 "can't have decap action after"
906 /* decap action is valid on egress only if it is followed by encap */
908 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
909 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
912 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
913 return rte_flow_error_set
915 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
916 NULL, "decap action not supported"
923 * Find existing encap/decap resource or create and register a new one.
925 * @param dev[in, out]
926 * Pointer to rte_eth_dev structure.
927 * @param[in, out] resource
928 * Pointer to encap/decap resource.
929 * @parm[in, out] dev_flow
930 * Pointer to the dev_flow.
932 * pointer to error structure.
935 * 0 on success otherwise -errno and errno is set.
938 flow_dv_encap_decap_resource_register
939 (struct rte_eth_dev *dev,
940 struct mlx5_flow_dv_encap_decap_resource *resource,
941 struct mlx5_flow *dev_flow,
942 struct rte_flow_error *error)
944 struct mlx5_priv *priv = dev->data->dev_private;
945 struct mlx5_ibv_shared *sh = priv->sh;
946 struct mlx5_flow_dv_encap_decap_resource *cache_resource;
947 struct rte_flow *flow = dev_flow->flow;
948 struct mlx5dv_dr_domain *domain;
950 resource->flags = flow->group ? 0 : 1;
951 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
952 domain = sh->fdb_domain;
953 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
954 domain = sh->rx_domain;
956 domain = sh->tx_domain;
958 /* Lookup a matching resource from cache. */
959 LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
960 if (resource->reformat_type == cache_resource->reformat_type &&
961 resource->ft_type == cache_resource->ft_type &&
962 resource->flags == cache_resource->flags &&
963 resource->size == cache_resource->size &&
964 !memcmp((const void *)resource->buf,
965 (const void *)cache_resource->buf,
967 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
968 (void *)cache_resource,
969 rte_atomic32_read(&cache_resource->refcnt));
970 rte_atomic32_inc(&cache_resource->refcnt);
971 dev_flow->dv.encap_decap = cache_resource;
975 /* Register new encap/decap resource. */
976 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
978 return rte_flow_error_set(error, ENOMEM,
979 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
980 "cannot allocate resource memory");
981 *cache_resource = *resource;
982 cache_resource->verbs_action =
983 mlx5_glue->dv_create_flow_action_packet_reformat
984 (sh->ctx, cache_resource->reformat_type,
985 cache_resource->ft_type, domain, cache_resource->flags,
986 cache_resource->size,
987 (cache_resource->size ? cache_resource->buf : NULL));
988 if (!cache_resource->verbs_action) {
989 rte_free(cache_resource);
990 return rte_flow_error_set(error, ENOMEM,
991 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
992 NULL, "cannot create action");
994 rte_atomic32_init(&cache_resource->refcnt);
995 rte_atomic32_inc(&cache_resource->refcnt);
996 LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
997 dev_flow->dv.encap_decap = cache_resource;
998 DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
999 (void *)cache_resource,
1000 rte_atomic32_read(&cache_resource->refcnt));
1005 * Find existing table jump resource or create and register a new one.
1007 * @param dev[in, out]
1008 * Pointer to rte_eth_dev structure.
1009 * @param[in, out] resource
1010 * Pointer to jump table resource.
1011 * @parm[in, out] dev_flow
1012 * Pointer to the dev_flow.
1014 * pointer to error structure.
1017 * 0 on success otherwise -errno and errno is set.
1020 flow_dv_jump_tbl_resource_register
1021 (struct rte_eth_dev *dev,
1022 struct mlx5_flow_dv_jump_tbl_resource *resource,
1023 struct mlx5_flow *dev_flow,
1024 struct rte_flow_error *error)
1026 struct mlx5_priv *priv = dev->data->dev_private;
1027 struct mlx5_ibv_shared *sh = priv->sh;
1028 struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
1030 /* Lookup a matching resource from cache. */
1031 LIST_FOREACH(cache_resource, &sh->jump_tbl, next) {
1032 if (resource->tbl == cache_resource->tbl) {
1033 DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++",
1034 (void *)cache_resource,
1035 rte_atomic32_read(&cache_resource->refcnt));
1036 rte_atomic32_inc(&cache_resource->refcnt);
1037 dev_flow->dv.jump = cache_resource;
1041 /* Register new jump table resource. */
1042 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1043 if (!cache_resource)
1044 return rte_flow_error_set(error, ENOMEM,
1045 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1046 "cannot allocate resource memory");
1047 *cache_resource = *resource;
1048 cache_resource->action =
1049 mlx5_glue->dr_create_flow_action_dest_flow_tbl
1050 (resource->tbl->obj);
1051 if (!cache_resource->action) {
1052 rte_free(cache_resource);
1053 return rte_flow_error_set(error, ENOMEM,
1054 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1055 NULL, "cannot create action");
1057 rte_atomic32_init(&cache_resource->refcnt);
1058 rte_atomic32_inc(&cache_resource->refcnt);
1059 LIST_INSERT_HEAD(&sh->jump_tbl, cache_resource, next);
1060 dev_flow->dv.jump = cache_resource;
1061 DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++",
1062 (void *)cache_resource,
1063 rte_atomic32_read(&cache_resource->refcnt));
1068 * Find existing table port ID resource or create and register a new one.
1070 * @param dev[in, out]
1071 * Pointer to rte_eth_dev structure.
1072 * @param[in, out] resource
1073 * Pointer to port ID action resource.
1074 * @parm[in, out] dev_flow
1075 * Pointer to the dev_flow.
1077 * pointer to error structure.
1080 * 0 on success otherwise -errno and errno is set.
1083 flow_dv_port_id_action_resource_register
1084 (struct rte_eth_dev *dev,
1085 struct mlx5_flow_dv_port_id_action_resource *resource,
1086 struct mlx5_flow *dev_flow,
1087 struct rte_flow_error *error)
1089 struct mlx5_priv *priv = dev->data->dev_private;
1090 struct mlx5_ibv_shared *sh = priv->sh;
1091 struct mlx5_flow_dv_port_id_action_resource *cache_resource;
1093 /* Lookup a matching resource from cache. */
1094 LIST_FOREACH(cache_resource, &sh->port_id_action_list, next) {
1095 if (resource->port_id == cache_resource->port_id) {
1096 DRV_LOG(DEBUG, "port id action resource resource %p: "
1098 (void *)cache_resource,
1099 rte_atomic32_read(&cache_resource->refcnt));
1100 rte_atomic32_inc(&cache_resource->refcnt);
1101 dev_flow->dv.port_id_action = cache_resource;
1105 /* Register new port id action resource. */
1106 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1107 if (!cache_resource)
1108 return rte_flow_error_set(error, ENOMEM,
1109 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1110 "cannot allocate resource memory");
1111 *cache_resource = *resource;
1112 cache_resource->action =
1113 mlx5_glue->dr_create_flow_action_dest_vport
1114 (priv->sh->fdb_domain, resource->port_id);
1115 if (!cache_resource->action) {
1116 rte_free(cache_resource);
1117 return rte_flow_error_set(error, ENOMEM,
1118 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1119 NULL, "cannot create action");
1121 rte_atomic32_init(&cache_resource->refcnt);
1122 rte_atomic32_inc(&cache_resource->refcnt);
1123 LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
1124 dev_flow->dv.port_id_action = cache_resource;
1125 DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
1126 (void *)cache_resource,
1127 rte_atomic32_read(&cache_resource->refcnt));
1132 * Get the size of specific rte_flow_item_type
1134 * @param[in] item_type
1135 * Tested rte_flow_item_type.
1138 * sizeof struct item_type, 0 if void or irrelevant.
1141 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
1145 switch (item_type) {
1146 case RTE_FLOW_ITEM_TYPE_ETH:
1147 retval = sizeof(struct rte_flow_item_eth);
1149 case RTE_FLOW_ITEM_TYPE_VLAN:
1150 retval = sizeof(struct rte_flow_item_vlan);
1152 case RTE_FLOW_ITEM_TYPE_IPV4:
1153 retval = sizeof(struct rte_flow_item_ipv4);
1155 case RTE_FLOW_ITEM_TYPE_IPV6:
1156 retval = sizeof(struct rte_flow_item_ipv6);
1158 case RTE_FLOW_ITEM_TYPE_UDP:
1159 retval = sizeof(struct rte_flow_item_udp);
1161 case RTE_FLOW_ITEM_TYPE_TCP:
1162 retval = sizeof(struct rte_flow_item_tcp);
1164 case RTE_FLOW_ITEM_TYPE_VXLAN:
1165 retval = sizeof(struct rte_flow_item_vxlan);
1167 case RTE_FLOW_ITEM_TYPE_GRE:
1168 retval = sizeof(struct rte_flow_item_gre);
1170 case RTE_FLOW_ITEM_TYPE_NVGRE:
1171 retval = sizeof(struct rte_flow_item_nvgre);
1173 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1174 retval = sizeof(struct rte_flow_item_vxlan_gpe);
1176 case RTE_FLOW_ITEM_TYPE_MPLS:
1177 retval = sizeof(struct rte_flow_item_mpls);
1179 case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
1187 #define MLX5_ENCAP_IPV4_VERSION 0x40
1188 #define MLX5_ENCAP_IPV4_IHL_MIN 0x05
1189 #define MLX5_ENCAP_IPV4_TTL_DEF 0x40
1190 #define MLX5_ENCAP_IPV6_VTC_FLOW 0x60000000
1191 #define MLX5_ENCAP_IPV6_HOP_LIMIT 0xff
1192 #define MLX5_ENCAP_VXLAN_FLAGS 0x08000000
1193 #define MLX5_ENCAP_VXLAN_GPE_FLAGS 0x04
1196 * Convert the encap action data from list of rte_flow_item to raw buffer
1199 * Pointer to rte_flow_item objects list.
1201 * Pointer to the output buffer.
1203 * Pointer to the output buffer size.
1205 * Pointer to the error structure.
1208 * 0 on success, a negative errno value otherwise and rte_errno is set.
1211 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
1212 size_t *size, struct rte_flow_error *error)
1214 struct ether_hdr *eth = NULL;
1215 struct vlan_hdr *vlan = NULL;
1216 struct ipv4_hdr *ipv4 = NULL;
1217 struct ipv6_hdr *ipv6 = NULL;
1218 struct udp_hdr *udp = NULL;
1219 struct vxlan_hdr *vxlan = NULL;
1220 struct vxlan_gpe_hdr *vxlan_gpe = NULL;
1221 struct gre_hdr *gre = NULL;
1223 size_t temp_size = 0;
1226 return rte_flow_error_set(error, EINVAL,
1227 RTE_FLOW_ERROR_TYPE_ACTION,
1228 NULL, "invalid empty data");
1229 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1230 len = flow_dv_get_item_len(items->type);
1231 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
1232 return rte_flow_error_set(error, EINVAL,
1233 RTE_FLOW_ERROR_TYPE_ACTION,
1234 (void *)items->type,
1235 "items total size is too big"
1236 " for encap action");
1237 rte_memcpy((void *)&buf[temp_size], items->spec, len);
1238 switch (items->type) {
1239 case RTE_FLOW_ITEM_TYPE_ETH:
1240 eth = (struct ether_hdr *)&buf[temp_size];
1242 case RTE_FLOW_ITEM_TYPE_VLAN:
1243 vlan = (struct vlan_hdr *)&buf[temp_size];
1245 return rte_flow_error_set(error, EINVAL,
1246 RTE_FLOW_ERROR_TYPE_ACTION,
1247 (void *)items->type,
1248 "eth header not found");
1249 if (!eth->ether_type)
1250 eth->ether_type = RTE_BE16(ETHER_TYPE_VLAN);
1252 case RTE_FLOW_ITEM_TYPE_IPV4:
1253 ipv4 = (struct ipv4_hdr *)&buf[temp_size];
1255 return rte_flow_error_set(error, EINVAL,
1256 RTE_FLOW_ERROR_TYPE_ACTION,
1257 (void *)items->type,
1258 "neither eth nor vlan"
1260 if (vlan && !vlan->eth_proto)
1261 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv4);
1262 else if (eth && !eth->ether_type)
1263 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv4);
1264 if (!ipv4->version_ihl)
1265 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
1266 MLX5_ENCAP_IPV4_IHL_MIN;
1267 if (!ipv4->time_to_live)
1268 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
1270 case RTE_FLOW_ITEM_TYPE_IPV6:
1271 ipv6 = (struct ipv6_hdr *)&buf[temp_size];
1273 return rte_flow_error_set(error, EINVAL,
1274 RTE_FLOW_ERROR_TYPE_ACTION,
1275 (void *)items->type,
1276 "neither eth nor vlan"
1278 if (vlan && !vlan->eth_proto)
1279 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv6);
1280 else if (eth && !eth->ether_type)
1281 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv6);
1282 if (!ipv6->vtc_flow)
1284 RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
1285 if (!ipv6->hop_limits)
1286 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
1288 case RTE_FLOW_ITEM_TYPE_UDP:
1289 udp = (struct udp_hdr *)&buf[temp_size];
1291 return rte_flow_error_set(error, EINVAL,
1292 RTE_FLOW_ERROR_TYPE_ACTION,
1293 (void *)items->type,
1294 "ip header not found");
1295 if (ipv4 && !ipv4->next_proto_id)
1296 ipv4->next_proto_id = IPPROTO_UDP;
1297 else if (ipv6 && !ipv6->proto)
1298 ipv6->proto = IPPROTO_UDP;
1300 case RTE_FLOW_ITEM_TYPE_VXLAN:
1301 vxlan = (struct vxlan_hdr *)&buf[temp_size];
1303 return rte_flow_error_set(error, EINVAL,
1304 RTE_FLOW_ERROR_TYPE_ACTION,
1305 (void *)items->type,
1306 "udp header not found");
1308 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
1309 if (!vxlan->vx_flags)
1311 RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
1313 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1314 vxlan_gpe = (struct vxlan_gpe_hdr *)&buf[temp_size];
1316 return rte_flow_error_set(error, EINVAL,
1317 RTE_FLOW_ERROR_TYPE_ACTION,
1318 (void *)items->type,
1319 "udp header not found");
1320 if (!vxlan_gpe->proto)
1321 return rte_flow_error_set(error, EINVAL,
1322 RTE_FLOW_ERROR_TYPE_ACTION,
1323 (void *)items->type,
1324 "next protocol not found");
1327 RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
1328 if (!vxlan_gpe->vx_flags)
1329 vxlan_gpe->vx_flags =
1330 MLX5_ENCAP_VXLAN_GPE_FLAGS;
1332 case RTE_FLOW_ITEM_TYPE_GRE:
1333 case RTE_FLOW_ITEM_TYPE_NVGRE:
1334 gre = (struct gre_hdr *)&buf[temp_size];
1336 return rte_flow_error_set(error, EINVAL,
1337 RTE_FLOW_ERROR_TYPE_ACTION,
1338 (void *)items->type,
1339 "next protocol not found");
1341 return rte_flow_error_set(error, EINVAL,
1342 RTE_FLOW_ERROR_TYPE_ACTION,
1343 (void *)items->type,
1344 "ip header not found");
1345 if (ipv4 && !ipv4->next_proto_id)
1346 ipv4->next_proto_id = IPPROTO_GRE;
1347 else if (ipv6 && !ipv6->proto)
1348 ipv6->proto = IPPROTO_GRE;
1350 case RTE_FLOW_ITEM_TYPE_VOID:
1353 return rte_flow_error_set(error, EINVAL,
1354 RTE_FLOW_ERROR_TYPE_ACTION,
1355 (void *)items->type,
1356 "unsupported item type");
1366 * Convert L2 encap action to DV specification.
1369 * Pointer to rte_eth_dev structure.
1371 * Pointer to action structure.
1372 * @param[in, out] dev_flow
1373 * Pointer to the mlx5_flow.
1374 * @param[in] transfer
1375 * Mark if the flow is E-Switch flow.
1377 * Pointer to the error structure.
1380 * 0 on success, a negative errno value otherwise and rte_errno is set.
1383 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
1384 const struct rte_flow_action *action,
1385 struct mlx5_flow *dev_flow,
1387 struct rte_flow_error *error)
1389 const struct rte_flow_item *encap_data;
1390 const struct rte_flow_action_raw_encap *raw_encap_data;
1391 struct mlx5_flow_dv_encap_decap_resource res = {
1393 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
1394 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
1395 MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
1398 if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
1400 (const struct rte_flow_action_raw_encap *)action->conf;
1401 res.size = raw_encap_data->size;
1402 memcpy(res.buf, raw_encap_data->data, res.size);
1404 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
1406 ((const struct rte_flow_action_vxlan_encap *)
1407 action->conf)->definition;
1410 ((const struct rte_flow_action_nvgre_encap *)
1411 action->conf)->definition;
1412 if (flow_dv_convert_encap_data(encap_data, res.buf,
1416 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1417 return rte_flow_error_set(error, EINVAL,
1418 RTE_FLOW_ERROR_TYPE_ACTION,
1419 NULL, "can't create L2 encap action");
1424 * Convert L2 decap action to DV specification.
1427 * Pointer to rte_eth_dev structure.
1428 * @param[in, out] dev_flow
1429 * Pointer to the mlx5_flow.
1430 * @param[in] transfer
1431 * Mark if the flow is E-Switch flow.
1433 * Pointer to the error structure.
1436 * 0 on success, a negative errno value otherwise and rte_errno is set.
1439 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
1440 struct mlx5_flow *dev_flow,
1442 struct rte_flow_error *error)
1444 struct mlx5_flow_dv_encap_decap_resource res = {
1447 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
1448 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
1449 MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
1452 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1453 return rte_flow_error_set(error, EINVAL,
1454 RTE_FLOW_ERROR_TYPE_ACTION,
1455 NULL, "can't create L2 decap action");
1460 * Convert raw decap/encap (L3 tunnel) action to DV specification.
1463 * Pointer to rte_eth_dev structure.
1465 * Pointer to action structure.
1466 * @param[in, out] dev_flow
1467 * Pointer to the mlx5_flow.
1469 * Pointer to the flow attributes.
1471 * Pointer to the error structure.
1474 * 0 on success, a negative errno value otherwise and rte_errno is set.
1477 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
1478 const struct rte_flow_action *action,
1479 struct mlx5_flow *dev_flow,
1480 const struct rte_flow_attr *attr,
1481 struct rte_flow_error *error)
1483 const struct rte_flow_action_raw_encap *encap_data;
1484 struct mlx5_flow_dv_encap_decap_resource res;
1486 encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
1487 res.size = encap_data->size;
1488 memcpy(res.buf, encap_data->data, res.size);
1489 res.reformat_type = attr->egress ?
1490 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
1491 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
1493 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
1495 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
1496 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
1497 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1498 return rte_flow_error_set(error, EINVAL,
1499 RTE_FLOW_ERROR_TYPE_ACTION,
1500 NULL, "can't create encap action");
1505 * Validate the modify-header actions.
1507 * @param[in] action_flags
1508 * Holds the actions detected until now.
1510 * Pointer to the modify action.
1512 * Pointer to error structure.
1515 * 0 on success, a negative errno value otherwise and rte_errno is set.
1518 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
1519 const struct rte_flow_action *action,
1520 struct rte_flow_error *error)
1522 if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
1523 return rte_flow_error_set(error, EINVAL,
1524 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1525 NULL, "action configuration not set");
1526 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1527 return rte_flow_error_set(error, EINVAL,
1528 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1529 "can't have encap action before"
1535 * Validate the modify-header MAC address actions.
1537 * @param[in] action_flags
1538 * Holds the actions detected until now.
1540 * Pointer to the modify action.
1541 * @param[in] item_flags
1542 * Holds the items detected.
1544 * Pointer to error structure.
1547 * 0 on success, a negative errno value otherwise and rte_errno is set.
1550 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
1551 const struct rte_flow_action *action,
1552 const uint64_t item_flags,
1553 struct rte_flow_error *error)
1557 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1559 if (!(item_flags & MLX5_FLOW_LAYER_L2))
1560 return rte_flow_error_set(error, EINVAL,
1561 RTE_FLOW_ERROR_TYPE_ACTION,
1563 "no L2 item in pattern");
1569 * Validate the modify-header IPv4 address actions.
1571 * @param[in] action_flags
1572 * Holds the actions detected until now.
1574 * Pointer to the modify action.
1575 * @param[in] item_flags
1576 * Holds the items detected.
1578 * Pointer to error structure.
1581 * 0 on success, a negative errno value otherwise and rte_errno is set.
1584 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
1585 const struct rte_flow_action *action,
1586 const uint64_t item_flags,
1587 struct rte_flow_error *error)
1591 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1593 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
1594 return rte_flow_error_set(error, EINVAL,
1595 RTE_FLOW_ERROR_TYPE_ACTION,
1597 "no ipv4 item in pattern");
1603 * Validate the modify-header IPv6 address actions.
1605 * @param[in] action_flags
1606 * Holds the actions detected until now.
1608 * Pointer to the modify action.
1609 * @param[in] item_flags
1610 * Holds the items detected.
1612 * Pointer to error structure.
1615 * 0 on success, a negative errno value otherwise and rte_errno is set.
1618 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
1619 const struct rte_flow_action *action,
1620 const uint64_t item_flags,
1621 struct rte_flow_error *error)
1625 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1627 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
1628 return rte_flow_error_set(error, EINVAL,
1629 RTE_FLOW_ERROR_TYPE_ACTION,
1631 "no ipv6 item in pattern");
1637 * Validate the modify-header TP actions.
1639 * @param[in] action_flags
1640 * Holds the actions detected until now.
1642 * Pointer to the modify action.
1643 * @param[in] item_flags
1644 * Holds the items detected.
1646 * Pointer to error structure.
1649 * 0 on success, a negative errno value otherwise and rte_errno is set.
1652 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
1653 const struct rte_flow_action *action,
1654 const uint64_t item_flags,
1655 struct rte_flow_error *error)
1659 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1661 if (!(item_flags & MLX5_FLOW_LAYER_L4))
1662 return rte_flow_error_set(error, EINVAL,
1663 RTE_FLOW_ERROR_TYPE_ACTION,
1664 NULL, "no transport layer "
1671 * Validate the modify-header TTL actions.
1673 * @param[in] action_flags
1674 * Holds the actions detected until now.
1676 * Pointer to the modify action.
1677 * @param[in] item_flags
1678 * Holds the items detected.
1680 * Pointer to error structure.
1683 * 0 on success, a negative errno value otherwise and rte_errno is set.
1686 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
1687 const struct rte_flow_action *action,
1688 const uint64_t item_flags,
1689 struct rte_flow_error *error)
1693 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1695 if (!(item_flags & MLX5_FLOW_LAYER_L3))
1696 return rte_flow_error_set(error, EINVAL,
1697 RTE_FLOW_ERROR_TYPE_ACTION,
1699 "no IP protocol in pattern");
1705 * Validate jump action.
1708 * Pointer to the modify action.
1710 * The group of the current flow.
1712 * Pointer to error structure.
1715 * 0 on success, a negative errno value otherwise and rte_errno is set.
1718 flow_dv_validate_action_jump(const struct rte_flow_action *action,
1720 struct rte_flow_error *error)
1722 if (action->type != RTE_FLOW_ACTION_TYPE_JUMP && !action->conf)
1723 return rte_flow_error_set(error, EINVAL,
1724 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1725 NULL, "action configuration not set");
1726 if (group >= ((const struct rte_flow_action_jump *)action->conf)->group)
1727 return rte_flow_error_set(error, EINVAL,
1728 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1729 "target group must be higher then"
1730 " the current flow group");
1735 * Validate the port_id action.
1738 * Pointer to rte_eth_dev structure.
1739 * @param[in] action_flags
1740 * Bit-fields that holds the actions detected until now.
1742 * Port_id RTE action structure.
1744 * Attributes of flow that includes this action.
1746 * Pointer to error structure.
1749 * 0 on success, a negative errno value otherwise and rte_errno is set.
1752 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
1753 uint64_t action_flags,
1754 const struct rte_flow_action *action,
1755 const struct rte_flow_attr *attr,
1756 struct rte_flow_error *error)
1758 const struct rte_flow_action_port_id *port_id;
1760 uint16_t esw_domain_id;
1761 uint16_t act_port_domain_id;
1764 if (!attr->transfer)
1765 return rte_flow_error_set(error, ENOTSUP,
1766 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1768 "port id action is valid in transfer"
1770 if (!action || !action->conf)
1771 return rte_flow_error_set(error, ENOTSUP,
1772 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1774 "port id action parameters must be"
1776 if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
1777 MLX5_FLOW_FATE_ESWITCH_ACTIONS))
1778 return rte_flow_error_set(error, EINVAL,
1779 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1780 "can have only one fate actions in"
1782 ret = mlx5_port_to_eswitch_info(dev->data->port_id,
1783 &esw_domain_id, NULL);
1785 return rte_flow_error_set(error, -ret,
1786 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1788 "failed to obtain E-Switch info");
1789 port_id = action->conf;
1790 port = port_id->original ? dev->data->port_id : port_id->id;
1791 ret = mlx5_port_to_eswitch_info(port, &act_port_domain_id, NULL);
1793 return rte_flow_error_set
1795 RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
1796 "failed to obtain E-Switch port id for port");
1797 if (act_port_domain_id != esw_domain_id)
1798 return rte_flow_error_set
1800 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1801 "port does not belong to"
1802 " E-Switch being configured");
1807 * Find existing modify-header resource or create and register a new one.
1809 * @param dev[in, out]
1810 * Pointer to rte_eth_dev structure.
1811 * @param[in, out] resource
1812 * Pointer to modify-header resource.
1813 * @parm[in, out] dev_flow
1814 * Pointer to the dev_flow.
1816 * pointer to error structure.
1819 * 0 on success otherwise -errno and errno is set.
1822 flow_dv_modify_hdr_resource_register
1823 (struct rte_eth_dev *dev,
1824 struct mlx5_flow_dv_modify_hdr_resource *resource,
1825 struct mlx5_flow *dev_flow,
1826 struct rte_flow_error *error)
1828 struct mlx5_priv *priv = dev->data->dev_private;
1829 struct mlx5_ibv_shared *sh = priv->sh;
1830 struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
1831 struct mlx5dv_dr_domain *ns;
1833 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
1834 ns = sh->fdb_domain;
1835 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
1840 dev_flow->flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
1841 /* Lookup a matching resource from cache. */
1842 LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
1843 if (resource->ft_type == cache_resource->ft_type &&
1844 resource->actions_num == cache_resource->actions_num &&
1845 resource->flags == cache_resource->flags &&
1846 !memcmp((const void *)resource->actions,
1847 (const void *)cache_resource->actions,
1848 (resource->actions_num *
1849 sizeof(resource->actions[0])))) {
1850 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
1851 (void *)cache_resource,
1852 rte_atomic32_read(&cache_resource->refcnt));
1853 rte_atomic32_inc(&cache_resource->refcnt);
1854 dev_flow->dv.modify_hdr = cache_resource;
1858 /* Register new modify-header resource. */
1859 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1860 if (!cache_resource)
1861 return rte_flow_error_set(error, ENOMEM,
1862 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1863 "cannot allocate resource memory");
1864 *cache_resource = *resource;
1865 cache_resource->verbs_action =
1866 mlx5_glue->dv_create_flow_action_modify_header
1867 (sh->ctx, cache_resource->ft_type,
1868 ns, cache_resource->flags,
1869 cache_resource->actions_num *
1870 sizeof(cache_resource->actions[0]),
1871 (uint64_t *)cache_resource->actions);
1872 if (!cache_resource->verbs_action) {
1873 rte_free(cache_resource);
1874 return rte_flow_error_set(error, ENOMEM,
1875 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1876 NULL, "cannot create action");
1878 rte_atomic32_init(&cache_resource->refcnt);
1879 rte_atomic32_inc(&cache_resource->refcnt);
1880 LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
1881 dev_flow->dv.modify_hdr = cache_resource;
1882 DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
1883 (void *)cache_resource,
1884 rte_atomic32_read(&cache_resource->refcnt));
1889 * Get or create a flow counter.
1892 * Pointer to the Ethernet device structure.
1894 * Indicate if this counter is shared with other flows.
1896 * Counter identifier.
1899 * pointer to flow counter on success, NULL otherwise and rte_errno is set.
1901 static struct mlx5_flow_counter *
1902 flow_dv_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
1904 struct mlx5_priv *priv = dev->data->dev_private;
1905 struct mlx5_flow_counter *cnt = NULL;
1906 struct mlx5_devx_counter_set *dcs = NULL;
1909 if (!priv->config.devx) {
1914 LIST_FOREACH(cnt, &priv->flow_counters, next) {
1915 if (cnt->shared && cnt->id == id) {
1921 cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
1922 dcs = rte_calloc(__func__, 1, sizeof(*dcs), 0);
1927 ret = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, dcs);
1930 struct mlx5_flow_counter tmpl = {
1936 tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
1942 LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
1952 * Release a flow counter.
1954 * @param[in] counter
1955 * Pointer to the counter handler.
1958 flow_dv_counter_release(struct mlx5_flow_counter *counter)
1964 if (--counter->ref_cnt == 0) {
1965 ret = mlx5_devx_cmd_flow_counter_free(counter->dcs->obj);
1967 DRV_LOG(ERR, "Failed to free devx counters, %d", ret);
1968 LIST_REMOVE(counter, next);
1969 rte_free(counter->dcs);
1975 * Verify the @p attributes will be correctly understood by the NIC and store
1976 * them in the @p flow if everything is correct.
1979 * Pointer to dev struct.
1980 * @param[in] attributes
1981 * Pointer to flow attributes
1983 * Pointer to error structure.
1986 * 0 on success, a negative errno value otherwise and rte_errno is set.
1989 flow_dv_validate_attributes(struct rte_eth_dev *dev,
1990 const struct rte_flow_attr *attributes,
1991 struct rte_flow_error *error)
1993 struct mlx5_priv *priv = dev->data->dev_private;
1994 uint32_t priority_max = priv->config.flow_prio - 1;
1996 #ifndef HAVE_MLX5DV_DR
1997 if (attributes->group)
1998 return rte_flow_error_set(error, ENOTSUP,
1999 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
2001 "groups is not supported");
2003 if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
2004 attributes->priority >= priority_max)
2005 return rte_flow_error_set(error, ENOTSUP,
2006 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2008 "priority out of range");
2009 if (attributes->transfer) {
2010 if (!priv->config.dv_esw_en)
2011 return rte_flow_error_set
2013 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2014 "E-Switch dr is not supported");
2015 if (!(priv->representor || priv->master))
2016 return rte_flow_error_set
2017 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2018 NULL, "E-Switch configurationd can only be"
2019 " done by a master or a representor device");
2020 if (attributes->egress)
2021 return rte_flow_error_set
2023 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
2024 "egress is not supported");
2025 if (attributes->group >= MLX5_MAX_TABLES_FDB)
2026 return rte_flow_error_set
2028 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2029 NULL, "group must be smaller than "
2030 RTE_STR(MLX5_MAX_FDB_TABLES));
2032 if (!(attributes->egress ^ attributes->ingress))
2033 return rte_flow_error_set(error, ENOTSUP,
2034 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
2035 "must specify exactly one of "
2036 "ingress or egress");
2041 * Internal validation function. For validating both actions and items.
2044 * Pointer to the rte_eth_dev structure.
2046 * Pointer to the flow attributes.
2048 * Pointer to the list of items.
2049 * @param[in] actions
2050 * Pointer to the list of actions.
2052 * Pointer to the error structure.
2055 * 0 on success, a negative errno value otherwise and rte_errno is set.
2058 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
2059 const struct rte_flow_item items[],
2060 const struct rte_flow_action actions[],
2061 struct rte_flow_error *error)
2064 uint64_t action_flags = 0;
2065 uint64_t item_flags = 0;
2066 uint64_t last_item = 0;
2067 uint8_t next_protocol = 0xff;
2072 ret = flow_dv_validate_attributes(dev, attr, error);
2075 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2076 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2077 switch (items->type) {
2078 case RTE_FLOW_ITEM_TYPE_VOID:
2080 case RTE_FLOW_ITEM_TYPE_PORT_ID:
2081 ret = flow_dv_validate_item_port_id
2082 (dev, items, attr, item_flags, error);
2085 last_item |= MLX5_FLOW_ITEM_PORT_ID;
2087 case RTE_FLOW_ITEM_TYPE_ETH:
2088 ret = mlx5_flow_validate_item_eth(items, item_flags,
2092 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
2093 MLX5_FLOW_LAYER_OUTER_L2;
2095 case RTE_FLOW_ITEM_TYPE_VLAN:
2096 ret = mlx5_flow_validate_item_vlan(items, item_flags,
2100 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2101 MLX5_FLOW_LAYER_OUTER_VLAN;
2103 case RTE_FLOW_ITEM_TYPE_IPV4:
2104 ret = mlx5_flow_validate_item_ipv4(items, item_flags,
2108 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
2109 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
2110 if (items->mask != NULL &&
2111 ((const struct rte_flow_item_ipv4 *)
2112 items->mask)->hdr.next_proto_id) {
2114 ((const struct rte_flow_item_ipv4 *)
2115 (items->spec))->hdr.next_proto_id;
2117 ((const struct rte_flow_item_ipv4 *)
2118 (items->mask))->hdr.next_proto_id;
2120 /* Reset for inner layer. */
2121 next_protocol = 0xff;
2124 case RTE_FLOW_ITEM_TYPE_IPV6:
2125 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
2129 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
2130 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
2131 if (items->mask != NULL &&
2132 ((const struct rte_flow_item_ipv6 *)
2133 items->mask)->hdr.proto) {
2135 ((const struct rte_flow_item_ipv6 *)
2136 items->spec)->hdr.proto;
2138 ((const struct rte_flow_item_ipv6 *)
2139 items->mask)->hdr.proto;
2141 /* Reset for inner layer. */
2142 next_protocol = 0xff;
2145 case RTE_FLOW_ITEM_TYPE_TCP:
2146 ret = mlx5_flow_validate_item_tcp
2149 &rte_flow_item_tcp_mask,
2153 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
2154 MLX5_FLOW_LAYER_OUTER_L4_TCP;
2156 case RTE_FLOW_ITEM_TYPE_UDP:
2157 ret = mlx5_flow_validate_item_udp(items, item_flags,
2162 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
2163 MLX5_FLOW_LAYER_OUTER_L4_UDP;
2165 case RTE_FLOW_ITEM_TYPE_GRE:
2166 case RTE_FLOW_ITEM_TYPE_NVGRE:
2167 ret = mlx5_flow_validate_item_gre(items, item_flags,
2168 next_protocol, error);
2171 last_item = MLX5_FLOW_LAYER_GRE;
2173 case RTE_FLOW_ITEM_TYPE_VXLAN:
2174 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
2178 last_item = MLX5_FLOW_LAYER_VXLAN;
2180 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
2181 ret = mlx5_flow_validate_item_vxlan_gpe(items,
2186 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
2188 case RTE_FLOW_ITEM_TYPE_MPLS:
2189 ret = mlx5_flow_validate_item_mpls(dev, items,
2194 last_item = MLX5_FLOW_LAYER_MPLS;
2196 case RTE_FLOW_ITEM_TYPE_META:
2197 ret = flow_dv_validate_item_meta(dev, items, attr,
2201 last_item = MLX5_FLOW_ITEM_METADATA;
2204 return rte_flow_error_set(error, ENOTSUP,
2205 RTE_FLOW_ERROR_TYPE_ITEM,
2206 NULL, "item not supported");
2208 item_flags |= last_item;
2210 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2211 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
2212 return rte_flow_error_set(error, ENOTSUP,
2213 RTE_FLOW_ERROR_TYPE_ACTION,
2214 actions, "too many actions");
2215 switch (actions->type) {
2216 case RTE_FLOW_ACTION_TYPE_VOID:
2218 case RTE_FLOW_ACTION_TYPE_PORT_ID:
2219 ret = flow_dv_validate_action_port_id(dev,
2226 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
2229 case RTE_FLOW_ACTION_TYPE_FLAG:
2230 ret = mlx5_flow_validate_action_flag(action_flags,
2234 action_flags |= MLX5_FLOW_ACTION_FLAG;
2237 case RTE_FLOW_ACTION_TYPE_MARK:
2238 ret = mlx5_flow_validate_action_mark(actions,
2243 action_flags |= MLX5_FLOW_ACTION_MARK;
2246 case RTE_FLOW_ACTION_TYPE_DROP:
2247 ret = mlx5_flow_validate_action_drop(action_flags,
2251 action_flags |= MLX5_FLOW_ACTION_DROP;
2254 case RTE_FLOW_ACTION_TYPE_QUEUE:
2255 ret = mlx5_flow_validate_action_queue(actions,
2260 action_flags |= MLX5_FLOW_ACTION_QUEUE;
2263 case RTE_FLOW_ACTION_TYPE_RSS:
2264 ret = mlx5_flow_validate_action_rss(actions,
2270 action_flags |= MLX5_FLOW_ACTION_RSS;
2273 case RTE_FLOW_ACTION_TYPE_COUNT:
2274 ret = flow_dv_validate_action_count(dev, error);
2277 action_flags |= MLX5_FLOW_ACTION_COUNT;
2280 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2281 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
2282 ret = flow_dv_validate_action_l2_encap(action_flags,
2287 action_flags |= actions->type ==
2288 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
2289 MLX5_FLOW_ACTION_VXLAN_ENCAP :
2290 MLX5_FLOW_ACTION_NVGRE_ENCAP;
2293 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2294 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
2295 ret = flow_dv_validate_action_l2_decap(action_flags,
2299 action_flags |= actions->type ==
2300 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
2301 MLX5_FLOW_ACTION_VXLAN_DECAP :
2302 MLX5_FLOW_ACTION_NVGRE_DECAP;
2305 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
2306 ret = flow_dv_validate_action_raw_encap(action_flags,
2311 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
2314 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
2315 ret = flow_dv_validate_action_raw_decap(action_flags,
2320 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
2323 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
2324 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
2325 ret = flow_dv_validate_action_modify_mac(action_flags,
2331 /* Count all modify-header actions as one action. */
2332 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2334 action_flags |= actions->type ==
2335 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
2336 MLX5_FLOW_ACTION_SET_MAC_SRC :
2337 MLX5_FLOW_ACTION_SET_MAC_DST;
2340 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
2341 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
2342 ret = flow_dv_validate_action_modify_ipv4(action_flags,
2348 /* Count all modify-header actions as one action. */
2349 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2351 action_flags |= actions->type ==
2352 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
2353 MLX5_FLOW_ACTION_SET_IPV4_SRC :
2354 MLX5_FLOW_ACTION_SET_IPV4_DST;
2356 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
2357 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
2358 ret = flow_dv_validate_action_modify_ipv6(action_flags,
2364 /* Count all modify-header actions as one action. */
2365 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2367 action_flags |= actions->type ==
2368 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
2369 MLX5_FLOW_ACTION_SET_IPV6_SRC :
2370 MLX5_FLOW_ACTION_SET_IPV6_DST;
2372 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
2373 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
2374 ret = flow_dv_validate_action_modify_tp(action_flags,
2380 /* Count all modify-header actions as one action. */
2381 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2383 action_flags |= actions->type ==
2384 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
2385 MLX5_FLOW_ACTION_SET_TP_SRC :
2386 MLX5_FLOW_ACTION_SET_TP_DST;
2388 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
2389 case RTE_FLOW_ACTION_TYPE_SET_TTL:
2390 ret = flow_dv_validate_action_modify_ttl(action_flags,
2396 /* Count all modify-header actions as one action. */
2397 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2399 action_flags |= actions->type ==
2400 RTE_FLOW_ACTION_TYPE_SET_TTL ?
2401 MLX5_FLOW_ACTION_SET_TTL :
2402 MLX5_FLOW_ACTION_DEC_TTL;
2404 case RTE_FLOW_ACTION_TYPE_JUMP:
2405 ret = flow_dv_validate_action_jump(actions,
2406 attr->group, error);
2410 action_flags |= MLX5_FLOW_ACTION_JUMP;
2413 return rte_flow_error_set(error, ENOTSUP,
2414 RTE_FLOW_ERROR_TYPE_ACTION,
2416 "action not supported");
2419 /* Eswitch has few restrictions on using items and actions */
2420 if (attr->transfer) {
2421 if (action_flags & MLX5_FLOW_ACTION_FLAG)
2422 return rte_flow_error_set(error, ENOTSUP,
2423 RTE_FLOW_ERROR_TYPE_ACTION,
2425 "unsupported action FLAG");
2426 if (action_flags & MLX5_FLOW_ACTION_MARK)
2427 return rte_flow_error_set(error, ENOTSUP,
2428 RTE_FLOW_ERROR_TYPE_ACTION,
2430 "unsupported action MARK");
2431 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
2432 return rte_flow_error_set(error, ENOTSUP,
2433 RTE_FLOW_ERROR_TYPE_ACTION,
2435 "unsupported action QUEUE");
2436 if (action_flags & MLX5_FLOW_ACTION_RSS)
2437 return rte_flow_error_set(error, ENOTSUP,
2438 RTE_FLOW_ERROR_TYPE_ACTION,
2440 "unsupported action RSS");
2441 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
2442 return rte_flow_error_set(error, EINVAL,
2443 RTE_FLOW_ERROR_TYPE_ACTION,
2445 "no fate action is found");
2447 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
2448 return rte_flow_error_set(error, EINVAL,
2449 RTE_FLOW_ERROR_TYPE_ACTION,
2451 "no fate action is found");
2457 * Internal preparation function. Allocates the DV flow size,
2458 * this size is constant.
2461 * Pointer to the flow attributes.
2463 * Pointer to the list of items.
2464 * @param[in] actions
2465 * Pointer to the list of actions.
2467 * Pointer to the error structure.
2470 * Pointer to mlx5_flow object on success,
2471 * otherwise NULL and rte_errno is set.
2473 static struct mlx5_flow *
2474 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
2475 const struct rte_flow_item items[] __rte_unused,
2476 const struct rte_flow_action actions[] __rte_unused,
2477 struct rte_flow_error *error)
2479 uint32_t size = sizeof(struct mlx5_flow);
2480 struct mlx5_flow *flow;
2482 flow = rte_calloc(__func__, 1, size, 0);
2484 rte_flow_error_set(error, ENOMEM,
2485 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2486 "not enough memory to create flow");
2489 flow->dv.value.size = MLX5_ST_SZ_DB(fte_match_param);
2495 * Sanity check for match mask and value. Similar to check_valid_spec() in
2496 * kernel driver. If unmasked bit is present in value, it returns failure.
2499 * pointer to match mask buffer.
2500 * @param match_value
2501 * pointer to match value buffer.
2504 * 0 if valid, -EINVAL otherwise.
2507 flow_dv_check_valid_spec(void *match_mask, void *match_value)
2509 uint8_t *m = match_mask;
2510 uint8_t *v = match_value;
2513 for (i = 0; i < MLX5_ST_SZ_DB(fte_match_param); ++i) {
2516 "match_value differs from match_criteria"
2517 " %p[%u] != %p[%u]",
2518 match_value, i, match_mask, i);
2527 * Add Ethernet item to matcher and to the value.
2529 * @param[in, out] matcher
2531 * @param[in, out] key
2532 * Flow matcher value.
2534 * Flow pattern to translate.
2536 * Item is inner pattern.
2539 flow_dv_translate_item_eth(void *matcher, void *key,
2540 const struct rte_flow_item *item, int inner)
2542 const struct rte_flow_item_eth *eth_m = item->mask;
2543 const struct rte_flow_item_eth *eth_v = item->spec;
2544 const struct rte_flow_item_eth nic_mask = {
2545 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2546 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2547 .type = RTE_BE16(0xffff),
2559 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2561 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2563 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2565 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2567 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
2568 ð_m->dst, sizeof(eth_m->dst));
2569 /* The value must be in the range of the mask. */
2570 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
2571 for (i = 0; i < sizeof(eth_m->dst); ++i)
2572 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
2573 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
2574 ð_m->src, sizeof(eth_m->src));
2575 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
2576 /* The value must be in the range of the mask. */
2577 for (i = 0; i < sizeof(eth_m->dst); ++i)
2578 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
2579 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
2580 rte_be_to_cpu_16(eth_m->type));
2581 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
2582 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
2586 * Add VLAN item to matcher and to the value.
2588 * @param[in, out] matcher
2590 * @param[in, out] key
2591 * Flow matcher value.
2593 * Flow pattern to translate.
2595 * Item is inner pattern.
2598 flow_dv_translate_item_vlan(void *matcher, void *key,
2599 const struct rte_flow_item *item,
2602 const struct rte_flow_item_vlan *vlan_m = item->mask;
2603 const struct rte_flow_item_vlan *vlan_v = item->spec;
2604 const struct rte_flow_item_vlan nic_mask = {
2605 .tci = RTE_BE16(0x0fff),
2606 .inner_type = RTE_BE16(0xffff),
2618 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2620 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2622 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2624 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2626 tci_m = rte_be_to_cpu_16(vlan_m->tci);
2627 tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
2628 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
2629 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
2630 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
2631 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
2632 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
2633 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
2634 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
2635 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
2639 * Add IPV4 item to matcher and to the value.
2641 * @param[in, out] matcher
2643 * @param[in, out] key
2644 * Flow matcher value.
2646 * Flow pattern to translate.
2648 * Item is inner pattern.
2650 * The group to insert the rule.
2653 flow_dv_translate_item_ipv4(void *matcher, void *key,
2654 const struct rte_flow_item *item,
2655 int inner, uint32_t group)
2657 const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
2658 const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
2659 const struct rte_flow_item_ipv4 nic_mask = {
2661 .src_addr = RTE_BE32(0xffffffff),
2662 .dst_addr = RTE_BE32(0xffffffff),
2663 .type_of_service = 0xff,
2664 .next_proto_id = 0xff,
2674 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2676 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2678 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2680 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2683 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2685 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
2686 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
2691 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2692 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2693 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2694 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2695 *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
2696 *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
2697 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2698 src_ipv4_src_ipv6.ipv4_layout.ipv4);
2699 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2700 src_ipv4_src_ipv6.ipv4_layout.ipv4);
2701 *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
2702 *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
2703 tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
2704 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
2705 ipv4_m->hdr.type_of_service);
2706 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
2707 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
2708 ipv4_m->hdr.type_of_service >> 2);
2709 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
2710 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2711 ipv4_m->hdr.next_proto_id);
2712 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2713 ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
2717 * Add IPV6 item to matcher and to the value.
2719 * @param[in, out] matcher
2721 * @param[in, out] key
2722 * Flow matcher value.
2724 * Flow pattern to translate.
2726 * Item is inner pattern.
2728 * The group to insert the rule.
2731 flow_dv_translate_item_ipv6(void *matcher, void *key,
2732 const struct rte_flow_item *item,
2733 int inner, uint32_t group)
2735 const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
2736 const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
2737 const struct rte_flow_item_ipv6 nic_mask = {
2740 "\xff\xff\xff\xff\xff\xff\xff\xff"
2741 "\xff\xff\xff\xff\xff\xff\xff\xff",
2743 "\xff\xff\xff\xff\xff\xff\xff\xff"
2744 "\xff\xff\xff\xff\xff\xff\xff\xff",
2745 .vtc_flow = RTE_BE32(0xffffffff),
2752 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2753 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2762 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2764 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2766 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2768 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2771 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2773 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
2774 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
2779 size = sizeof(ipv6_m->hdr.dst_addr);
2780 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2781 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2782 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2783 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2784 memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
2785 for (i = 0; i < size; ++i)
2786 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
2787 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2788 src_ipv4_src_ipv6.ipv6_layout.ipv6);
2789 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2790 src_ipv4_src_ipv6.ipv6_layout.ipv6);
2791 memcpy(l24_m, ipv6_m->hdr.src_addr, size);
2792 for (i = 0; i < size; ++i)
2793 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
2795 vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
2796 vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
2797 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
2798 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
2799 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
2800 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
2803 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
2805 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
2808 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
2810 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
2814 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2816 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2817 ipv6_v->hdr.proto & ipv6_m->hdr.proto);
2821 * Add TCP item to matcher and to the value.
2823 * @param[in, out] matcher
2825 * @param[in, out] key
2826 * Flow matcher value.
2828 * Flow pattern to translate.
2830 * Item is inner pattern.
2833 flow_dv_translate_item_tcp(void *matcher, void *key,
2834 const struct rte_flow_item *item,
2837 const struct rte_flow_item_tcp *tcp_m = item->mask;
2838 const struct rte_flow_item_tcp *tcp_v = item->spec;
2843 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2845 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2847 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2849 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2851 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2852 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
2856 tcp_m = &rte_flow_item_tcp_mask;
2857 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
2858 rte_be_to_cpu_16(tcp_m->hdr.src_port));
2859 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
2860 rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
2861 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
2862 rte_be_to_cpu_16(tcp_m->hdr.dst_port));
2863 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
2864 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
2868 * Add UDP item to matcher and to the value.
2870 * @param[in, out] matcher
2872 * @param[in, out] key
2873 * Flow matcher value.
2875 * Flow pattern to translate.
2877 * Item is inner pattern.
2880 flow_dv_translate_item_udp(void *matcher, void *key,
2881 const struct rte_flow_item *item,
2884 const struct rte_flow_item_udp *udp_m = item->mask;
2885 const struct rte_flow_item_udp *udp_v = item->spec;
2890 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2892 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2894 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2896 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2898 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2899 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
2903 udp_m = &rte_flow_item_udp_mask;
2904 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
2905 rte_be_to_cpu_16(udp_m->hdr.src_port));
2906 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
2907 rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
2908 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
2909 rte_be_to_cpu_16(udp_m->hdr.dst_port));
2910 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2911 rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
2915 * Add GRE item to matcher and to the value.
2917 * @param[in, out] matcher
2919 * @param[in, out] key
2920 * Flow matcher value.
2922 * Flow pattern to translate.
2924 * Item is inner pattern.
2927 flow_dv_translate_item_gre(void *matcher, void *key,
2928 const struct rte_flow_item *item,
2931 const struct rte_flow_item_gre *gre_m = item->mask;
2932 const struct rte_flow_item_gre *gre_v = item->spec;
2935 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2936 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2939 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2941 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2943 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2945 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2947 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2948 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
2952 gre_m = &rte_flow_item_gre_mask;
2953 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
2954 rte_be_to_cpu_16(gre_m->protocol));
2955 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
2956 rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
2960 * Add NVGRE item to matcher and to the value.
2962 * @param[in, out] matcher
2964 * @param[in, out] key
2965 * Flow matcher value.
2967 * Flow pattern to translate.
2969 * Item is inner pattern.
2972 flow_dv_translate_item_nvgre(void *matcher, void *key,
2973 const struct rte_flow_item *item,
2976 const struct rte_flow_item_nvgre *nvgre_m = item->mask;
2977 const struct rte_flow_item_nvgre *nvgre_v = item->spec;
2978 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2979 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2980 const char *tni_flow_id_m = (const char *)nvgre_m->tni;
2981 const char *tni_flow_id_v = (const char *)nvgre_v->tni;
2987 flow_dv_translate_item_gre(matcher, key, item, inner);
2991 nvgre_m = &rte_flow_item_nvgre_mask;
2992 size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
2993 gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
2994 gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
2995 memcpy(gre_key_m, tni_flow_id_m, size);
2996 for (i = 0; i < size; ++i)
2997 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
3001 * Add VXLAN item to matcher and to the value.
3003 * @param[in, out] matcher
3005 * @param[in, out] key
3006 * Flow matcher value.
3008 * Flow pattern to translate.
3010 * Item is inner pattern.
3013 flow_dv_translate_item_vxlan(void *matcher, void *key,
3014 const struct rte_flow_item *item,
3017 const struct rte_flow_item_vxlan *vxlan_m = item->mask;
3018 const struct rte_flow_item_vxlan *vxlan_v = item->spec;
3021 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3022 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3030 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3032 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3034 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3036 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3038 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
3039 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
3040 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
3041 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
3042 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
3047 vxlan_m = &rte_flow_item_vxlan_mask;
3048 size = sizeof(vxlan_m->vni);
3049 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
3050 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
3051 memcpy(vni_m, vxlan_m->vni, size);
3052 for (i = 0; i < size; ++i)
3053 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
3057 * Add MPLS item to matcher and to the value.
3059 * @param[in, out] matcher
3061 * @param[in, out] key
3062 * Flow matcher value.
3064 * Flow pattern to translate.
3065 * @param[in] prev_layer
3066 * The protocol layer indicated in previous item.
3068 * Item is inner pattern.
3071 flow_dv_translate_item_mpls(void *matcher, void *key,
3072 const struct rte_flow_item *item,
3073 uint64_t prev_layer,
3076 const uint32_t *in_mpls_m = item->mask;
3077 const uint32_t *in_mpls_v = item->spec;
3078 uint32_t *out_mpls_m = 0;
3079 uint32_t *out_mpls_v = 0;
3080 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3081 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3082 void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
3084 void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
3085 void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
3086 void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3088 switch (prev_layer) {
3089 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
3090 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
3091 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
3092 MLX5_UDP_PORT_MPLS);
3094 case MLX5_FLOW_LAYER_GRE:
3095 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
3096 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
3100 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3101 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
3108 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
3109 switch (prev_layer) {
3110 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
3112 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
3113 outer_first_mpls_over_udp);
3115 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
3116 outer_first_mpls_over_udp);
3118 case MLX5_FLOW_LAYER_GRE:
3120 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
3121 outer_first_mpls_over_gre);
3123 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
3124 outer_first_mpls_over_gre);
3127 /* Inner MPLS not over GRE is not supported. */
3130 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
3134 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
3140 if (out_mpls_m && out_mpls_v) {
3141 *out_mpls_m = *in_mpls_m;
3142 *out_mpls_v = *in_mpls_v & *in_mpls_m;
3147 * Add META item to matcher
3149 * @param[in, out] matcher
3151 * @param[in, out] key
3152 * Flow matcher value.
3154 * Flow pattern to translate.
3156 * Item is inner pattern.
3159 flow_dv_translate_item_meta(void *matcher, void *key,
3160 const struct rte_flow_item *item)
3162 const struct rte_flow_item_meta *meta_m;
3163 const struct rte_flow_item_meta *meta_v;
3165 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
3167 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
3169 meta_m = (const void *)item->mask;
3171 meta_m = &rte_flow_item_meta_mask;
3172 meta_v = (const void *)item->spec;
3174 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
3175 rte_be_to_cpu_32(meta_m->data));
3176 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
3177 rte_be_to_cpu_32(meta_v->data & meta_m->data));
3182 * Add source vport match to the specified matcher.
3184 * @param[in, out] matcher
3186 * @param[in, out] key
3187 * Flow matcher value.
3189 * Source vport value to match
3194 flow_dv_translate_item_source_vport(void *matcher, void *key,
3195 int16_t port, uint16_t mask)
3197 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3198 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3200 MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
3201 MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
3205 * Translate port-id item to eswitch match on port-id.
3208 * The devich to configure through.
3209 * @param[in, out] matcher
3211 * @param[in, out] key
3212 * Flow matcher value.
3214 * Flow pattern to translate.
3217 * 0 on success, a negative errno value otherwise.
3220 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
3221 void *key, const struct rte_flow_item *item)
3223 const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
3224 const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
3225 uint16_t mask, val, id;
3228 mask = pid_m ? pid_m->id : 0xffff;
3229 id = pid_v ? pid_v->id : dev->data->port_id;
3230 ret = mlx5_port_to_eswitch_info(id, NULL, &val);
3233 flow_dv_translate_item_source_vport(matcher, key, val, mask);
3237 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
3239 #define HEADER_IS_ZERO(match_criteria, headers) \
3240 !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
3241 matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
3244 * Calculate flow matcher enable bitmap.
3246 * @param match_criteria
3247 * Pointer to flow matcher criteria.
3250 * Bitmap of enabled fields.
3253 flow_dv_matcher_enable(uint32_t *match_criteria)
3255 uint8_t match_criteria_enable;
3257 match_criteria_enable =
3258 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
3259 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
3260 match_criteria_enable |=
3261 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
3262 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
3263 match_criteria_enable |=
3264 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
3265 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
3266 match_criteria_enable |=
3267 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
3268 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
3269 #ifdef HAVE_MLX5DV_DR
3270 match_criteria_enable |=
3271 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
3272 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
3274 return match_criteria_enable;
3281 * @param dev[in, out]
3282 * Pointer to rte_eth_dev structure.
3283 * @param[in] table_id
3286 * Direction of the table.
3287 * @param[in] transfer
3288 * E-Switch or NIC flow.
3290 * pointer to error structure.
3293 * Returns tables resource based on the index, NULL in case of failed.
3295 static struct mlx5_flow_tbl_resource *
3296 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
3297 uint32_t table_id, uint8_t egress,
3299 struct rte_flow_error *error)
3301 struct mlx5_priv *priv = dev->data->dev_private;
3302 struct mlx5_ibv_shared *sh = priv->sh;
3303 struct mlx5_flow_tbl_resource *tbl;
3305 #ifdef HAVE_MLX5DV_DR
3307 tbl = &sh->fdb_tbl[table_id];
3309 tbl->obj = mlx5_glue->dr_create_flow_tbl
3310 (sh->fdb_domain, table_id);
3311 } else if (egress) {
3312 tbl = &sh->tx_tbl[table_id];
3314 tbl->obj = mlx5_glue->dr_create_flow_tbl
3315 (sh->tx_domain, table_id);
3317 tbl = &sh->rx_tbl[table_id];
3319 tbl->obj = mlx5_glue->dr_create_flow_tbl
3320 (sh->rx_domain, table_id);
3323 rte_flow_error_set(error, ENOMEM,
3324 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3325 NULL, "cannot create table");
3328 rte_atomic32_inc(&tbl->refcnt);
3334 return &sh->fdb_tbl[table_id];
3336 return &sh->tx_tbl[table_id];
3338 return &sh->rx_tbl[table_id];
3343 * Release a flow table.
3346 * Table resource to be released.
3349 * Returns 0 if table was released, else return 1;
3352 flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
3356 if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
3357 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
3365 * Register the flow matcher.
3367 * @param dev[in, out]
3368 * Pointer to rte_eth_dev structure.
3369 * @param[in, out] matcher
3370 * Pointer to flow matcher.
3371 * @parm[in, out] dev_flow
3372 * Pointer to the dev_flow.
3374 * pointer to error structure.
3377 * 0 on success otherwise -errno and errno is set.
3380 flow_dv_matcher_register(struct rte_eth_dev *dev,
3381 struct mlx5_flow_dv_matcher *matcher,
3382 struct mlx5_flow *dev_flow,
3383 struct rte_flow_error *error)
3385 struct mlx5_priv *priv = dev->data->dev_private;
3386 struct mlx5_ibv_shared *sh = priv->sh;
3387 struct mlx5_flow_dv_matcher *cache_matcher;
3388 struct mlx5dv_flow_matcher_attr dv_attr = {
3389 .type = IBV_FLOW_ATTR_NORMAL,
3390 .match_mask = (void *)&matcher->mask,
3392 struct mlx5_flow_tbl_resource *tbl = NULL;
3394 /* Lookup from cache. */
3395 LIST_FOREACH(cache_matcher, &sh->matchers, next) {
3396 if (matcher->crc == cache_matcher->crc &&
3397 matcher->priority == cache_matcher->priority &&
3398 matcher->egress == cache_matcher->egress &&
3399 matcher->group == cache_matcher->group &&
3400 matcher->transfer == cache_matcher->transfer &&
3401 !memcmp((const void *)matcher->mask.buf,
3402 (const void *)cache_matcher->mask.buf,
3403 cache_matcher->mask.size)) {
3405 "priority %hd use %s matcher %p: refcnt %d++",
3406 cache_matcher->priority,
3407 cache_matcher->egress ? "tx" : "rx",
3408 (void *)cache_matcher,
3409 rte_atomic32_read(&cache_matcher->refcnt));
3410 rte_atomic32_inc(&cache_matcher->refcnt);
3411 dev_flow->dv.matcher = cache_matcher;
3415 /* Register new matcher. */
3416 cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
3418 return rte_flow_error_set(error, ENOMEM,
3419 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3420 "cannot allocate matcher memory");
3421 tbl = flow_dv_tbl_resource_get(dev, matcher->group * MLX5_GROUP_FACTOR,
3422 matcher->egress, matcher->transfer,
3425 rte_free(cache_matcher);
3426 return rte_flow_error_set(error, ENOMEM,
3427 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3428 NULL, "cannot create table");
3430 *cache_matcher = *matcher;
3431 dv_attr.match_criteria_enable =
3432 flow_dv_matcher_enable(cache_matcher->mask.buf);
3433 dv_attr.priority = matcher->priority;
3434 if (matcher->egress)
3435 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
3436 cache_matcher->matcher_object =
3437 mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
3438 if (!cache_matcher->matcher_object) {
3439 rte_free(cache_matcher);
3440 #ifdef HAVE_MLX5DV_DR
3441 flow_dv_tbl_resource_release(tbl);
3443 return rte_flow_error_set(error, ENOMEM,
3444 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3445 NULL, "cannot create matcher");
3447 rte_atomic32_inc(&cache_matcher->refcnt);
3448 LIST_INSERT_HEAD(&sh->matchers, cache_matcher, next);
3449 dev_flow->dv.matcher = cache_matcher;
3450 DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
3451 cache_matcher->priority,
3452 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
3453 rte_atomic32_read(&cache_matcher->refcnt));
3454 rte_atomic32_inc(&tbl->refcnt);
3459 * Find existing tag resource or create and register a new one.
3461 * @param dev[in, out]
3462 * Pointer to rte_eth_dev structure.
3463 * @param[in, out] resource
3464 * Pointer to tag resource.
3465 * @parm[in, out] dev_flow
3466 * Pointer to the dev_flow.
3468 * pointer to error structure.
3471 * 0 on success otherwise -errno and errno is set.
3474 flow_dv_tag_resource_register
3475 (struct rte_eth_dev *dev,
3476 struct mlx5_flow_dv_tag_resource *resource,
3477 struct mlx5_flow *dev_flow,
3478 struct rte_flow_error *error)
3480 struct mlx5_priv *priv = dev->data->dev_private;
3481 struct mlx5_ibv_shared *sh = priv->sh;
3482 struct mlx5_flow_dv_tag_resource *cache_resource;
3484 /* Lookup a matching resource from cache. */
3485 LIST_FOREACH(cache_resource, &sh->tags, next) {
3486 if (resource->tag == cache_resource->tag) {
3487 DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
3488 (void *)cache_resource,
3489 rte_atomic32_read(&cache_resource->refcnt));
3490 rte_atomic32_inc(&cache_resource->refcnt);
3491 dev_flow->flow->tag_resource = cache_resource;
3495 /* Register new resource. */
3496 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
3497 if (!cache_resource)
3498 return rte_flow_error_set(error, ENOMEM,
3499 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3500 "cannot allocate resource memory");
3501 *cache_resource = *resource;
3502 cache_resource->action = mlx5_glue->dv_create_flow_action_tag
3504 if (!cache_resource->action) {
3505 rte_free(cache_resource);
3506 return rte_flow_error_set(error, ENOMEM,
3507 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3508 NULL, "cannot create action");
3510 rte_atomic32_init(&cache_resource->refcnt);
3511 rte_atomic32_inc(&cache_resource->refcnt);
3512 LIST_INSERT_HEAD(&sh->tags, cache_resource, next);
3513 dev_flow->flow->tag_resource = cache_resource;
3514 DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
3515 (void *)cache_resource,
3516 rte_atomic32_read(&cache_resource->refcnt));
3524 * Pointer to Ethernet device.
3526 * Pointer to mlx5_flow.
3529 * 1 while a reference on it exists, 0 when freed.
3532 flow_dv_tag_release(struct rte_eth_dev *dev,
3533 struct mlx5_flow_dv_tag_resource *tag)
3536 DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
3537 dev->data->port_id, (void *)tag,
3538 rte_atomic32_read(&tag->refcnt));
3539 if (rte_atomic32_dec_and_test(&tag->refcnt)) {
3540 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
3541 LIST_REMOVE(tag, next);
3542 DRV_LOG(DEBUG, "port %u tag %p: removed",
3543 dev->data->port_id, (void *)tag);
3551 * Translate port ID action to vport.
3554 * Pointer to rte_eth_dev structure.
3556 * Pointer to the port ID action.
3557 * @param[out] dst_port_id
3558 * The target port ID.
3560 * Pointer to the error structure.
3563 * 0 on success, a negative errno value otherwise and rte_errno is set.
3566 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
3567 const struct rte_flow_action *action,
3568 uint32_t *dst_port_id,
3569 struct rte_flow_error *error)
3574 const struct rte_flow_action_port_id *conf =
3575 (const struct rte_flow_action_port_id *)action->conf;
3577 port = conf->original ? dev->data->port_id : conf->id;
3578 ret = mlx5_port_to_eswitch_info(port, NULL, &port_id);
3580 return rte_flow_error_set(error, -ret,
3581 RTE_FLOW_ERROR_TYPE_ACTION,
3583 "No eswitch info was found for port");
3584 *dst_port_id = port_id;
3589 * Fill the flow with DV spec.
3592 * Pointer to rte_eth_dev structure.
3593 * @param[in, out] dev_flow
3594 * Pointer to the sub flow.
3596 * Pointer to the flow attributes.
3598 * Pointer to the list of items.
3599 * @param[in] actions
3600 * Pointer to the list of actions.
3602 * Pointer to the error structure.
3605 * 0 on success, a negative errno value otherwise and rte_errno is set.
3608 flow_dv_translate(struct rte_eth_dev *dev,
3609 struct mlx5_flow *dev_flow,
3610 const struct rte_flow_attr *attr,
3611 const struct rte_flow_item items[],
3612 const struct rte_flow_action actions[],
3613 struct rte_flow_error *error)
3615 struct mlx5_priv *priv = dev->data->dev_private;
3616 struct rte_flow *flow = dev_flow->flow;
3617 uint64_t item_flags = 0;
3618 uint64_t last_item = 0;
3619 uint64_t action_flags = 0;
3620 uint64_t priority = attr->priority;
3621 struct mlx5_flow_dv_matcher matcher = {
3623 .size = sizeof(matcher.mask.buf),
3627 bool actions_end = false;
3628 struct mlx5_flow_dv_modify_hdr_resource res = {
3629 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3630 MLX5DV_FLOW_TABLE_TYPE_NIC_RX
3632 union flow_dv_attr flow_attr = { .attr = 0 };
3633 struct mlx5_flow_dv_tag_resource tag_resource;
3634 uint32_t modify_action_position = UINT32_MAX;
3635 void *match_mask = matcher.mask.buf;
3636 void *match_value = dev_flow->dv.value.buf;
3638 flow->group = attr->group;
3640 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3641 if (priority == MLX5_FLOW_PRIO_RSVD)
3642 priority = priv->config.flow_prio - 1;
3643 for (; !actions_end ; actions++) {
3644 const struct rte_flow_action_queue *queue;
3645 const struct rte_flow_action_rss *rss;
3646 const struct rte_flow_action *action = actions;
3647 const struct rte_flow_action_count *count = action->conf;
3648 const uint8_t *rss_key;
3649 const struct rte_flow_action_jump *jump_data;
3650 struct mlx5_flow_dv_jump_tbl_resource jump_tbl_resource;
3651 struct mlx5_flow_tbl_resource *tbl;
3652 uint32_t port_id = 0;
3653 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
3655 switch (actions->type) {
3656 case RTE_FLOW_ACTION_TYPE_VOID:
3658 case RTE_FLOW_ACTION_TYPE_PORT_ID:
3659 if (flow_dv_translate_action_port_id(dev, action,
3662 port_id_resource.port_id = port_id;
3663 if (flow_dv_port_id_action_resource_register
3664 (dev, &port_id_resource, dev_flow, error))
3666 dev_flow->dv.actions[actions_n++] =
3667 dev_flow->dv.port_id_action->action;
3668 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
3670 case RTE_FLOW_ACTION_TYPE_FLAG:
3672 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
3673 if (!flow->tag_resource)
3674 if (flow_dv_tag_resource_register
3675 (dev, &tag_resource, dev_flow, error))
3677 dev_flow->dv.actions[actions_n++] =
3678 flow->tag_resource->action;
3679 action_flags |= MLX5_FLOW_ACTION_FLAG;
3681 case RTE_FLOW_ACTION_TYPE_MARK:
3682 tag_resource.tag = mlx5_flow_mark_set
3683 (((const struct rte_flow_action_mark *)
3684 (actions->conf))->id);
3685 if (!flow->tag_resource)
3686 if (flow_dv_tag_resource_register
3687 (dev, &tag_resource, dev_flow, error))
3689 dev_flow->dv.actions[actions_n++] =
3690 flow->tag_resource->action;
3691 action_flags |= MLX5_FLOW_ACTION_MARK;
3693 case RTE_FLOW_ACTION_TYPE_DROP:
3694 action_flags |= MLX5_FLOW_ACTION_DROP;
3696 case RTE_FLOW_ACTION_TYPE_QUEUE:
3697 queue = actions->conf;
3698 flow->rss.queue_num = 1;
3699 (*flow->queue)[0] = queue->index;
3700 action_flags |= MLX5_FLOW_ACTION_QUEUE;
3702 case RTE_FLOW_ACTION_TYPE_RSS:
3703 rss = actions->conf;
3705 memcpy((*flow->queue), rss->queue,
3706 rss->queue_num * sizeof(uint16_t));
3707 flow->rss.queue_num = rss->queue_num;
3708 /* NULL RSS key indicates default RSS key. */
3709 rss_key = !rss->key ? rss_hash_default_key : rss->key;
3710 memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
3711 /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
3712 flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
3713 flow->rss.level = rss->level;
3714 action_flags |= MLX5_FLOW_ACTION_RSS;
3716 case RTE_FLOW_ACTION_TYPE_COUNT:
3717 if (!priv->config.devx) {
3718 rte_errno = ENOTSUP;
3721 flow->counter = flow_dv_counter_new(dev, count->shared,
3723 if (flow->counter == NULL)
3725 dev_flow->dv.actions[actions_n++] =
3726 flow->counter->action;
3727 action_flags |= MLX5_FLOW_ACTION_COUNT;
3730 if (rte_errno == ENOTSUP)
3731 return rte_flow_error_set
3733 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3735 "count action not supported");
3737 return rte_flow_error_set
3739 RTE_FLOW_ERROR_TYPE_ACTION,
3741 "cannot create counter"
3743 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3744 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3745 if (flow_dv_create_action_l2_encap(dev, actions,
3750 dev_flow->dv.actions[actions_n++] =
3751 dev_flow->dv.encap_decap->verbs_action;
3752 action_flags |= actions->type ==
3753 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
3754 MLX5_FLOW_ACTION_VXLAN_ENCAP :
3755 MLX5_FLOW_ACTION_NVGRE_ENCAP;
3757 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3758 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
3759 if (flow_dv_create_action_l2_decap(dev, dev_flow,
3763 dev_flow->dv.actions[actions_n++] =
3764 dev_flow->dv.encap_decap->verbs_action;
3765 action_flags |= actions->type ==
3766 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
3767 MLX5_FLOW_ACTION_VXLAN_DECAP :
3768 MLX5_FLOW_ACTION_NVGRE_DECAP;
3770 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3771 /* Handle encap with preceding decap. */
3772 if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
3773 if (flow_dv_create_action_raw_encap
3774 (dev, actions, dev_flow, attr, error))
3776 dev_flow->dv.actions[actions_n++] =
3777 dev_flow->dv.encap_decap->verbs_action;
3779 /* Handle encap without preceding decap. */
3780 if (flow_dv_create_action_l2_encap
3781 (dev, actions, dev_flow, attr->transfer,
3784 dev_flow->dv.actions[actions_n++] =
3785 dev_flow->dv.encap_decap->verbs_action;
3787 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
3789 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3790 /* Check if this decap is followed by encap. */
3791 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
3792 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
3795 /* Handle decap only if it isn't followed by encap. */
3796 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3797 if (flow_dv_create_action_l2_decap
3798 (dev, dev_flow, attr->transfer, error))
3800 dev_flow->dv.actions[actions_n++] =
3801 dev_flow->dv.encap_decap->verbs_action;
3803 /* If decap is followed by encap, handle it at encap. */
3804 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
3806 case RTE_FLOW_ACTION_TYPE_JUMP:
3807 jump_data = action->conf;
3808 tbl = flow_dv_tbl_resource_get(dev, jump_data->group *
3811 attr->transfer, error);
3813 return rte_flow_error_set
3815 RTE_FLOW_ERROR_TYPE_ACTION,
3817 "cannot create jump action.");
3818 jump_tbl_resource.tbl = tbl;
3819 if (flow_dv_jump_tbl_resource_register
3820 (dev, &jump_tbl_resource, dev_flow, error)) {
3821 flow_dv_tbl_resource_release(tbl);
3822 return rte_flow_error_set
3824 RTE_FLOW_ERROR_TYPE_ACTION,
3826 "cannot create jump action.");
3828 dev_flow->dv.actions[actions_n++] =
3829 dev_flow->dv.jump->action;
3830 action_flags |= MLX5_FLOW_ACTION_JUMP;
3832 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
3833 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
3834 if (flow_dv_convert_action_modify_mac(&res, actions,
3837 action_flags |= actions->type ==
3838 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
3839 MLX5_FLOW_ACTION_SET_MAC_SRC :
3840 MLX5_FLOW_ACTION_SET_MAC_DST;
3842 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
3843 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
3844 if (flow_dv_convert_action_modify_ipv4(&res, actions,
3847 action_flags |= actions->type ==
3848 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
3849 MLX5_FLOW_ACTION_SET_IPV4_SRC :
3850 MLX5_FLOW_ACTION_SET_IPV4_DST;
3852 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
3853 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
3854 if (flow_dv_convert_action_modify_ipv6(&res, actions,
3857 action_flags |= actions->type ==
3858 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
3859 MLX5_FLOW_ACTION_SET_IPV6_SRC :
3860 MLX5_FLOW_ACTION_SET_IPV6_DST;
3862 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
3863 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
3864 if (flow_dv_convert_action_modify_tp(&res, actions,
3868 action_flags |= actions->type ==
3869 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
3870 MLX5_FLOW_ACTION_SET_TP_SRC :
3871 MLX5_FLOW_ACTION_SET_TP_DST;
3873 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
3874 if (flow_dv_convert_action_modify_dec_ttl(&res, items,
3878 action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
3880 case RTE_FLOW_ACTION_TYPE_SET_TTL:
3881 if (flow_dv_convert_action_modify_ttl(&res, actions,
3885 action_flags |= MLX5_FLOW_ACTION_SET_TTL;
3887 case RTE_FLOW_ACTION_TYPE_END:
3889 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
3890 /* create modify action if needed. */
3891 if (flow_dv_modify_hdr_resource_register
3896 dev_flow->dv.actions[modify_action_position] =
3897 dev_flow->dv.modify_hdr->verbs_action;
3903 if ((action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) &&
3904 modify_action_position == UINT32_MAX)
3905 modify_action_position = actions_n++;
3907 dev_flow->dv.actions_n = actions_n;
3908 flow->actions = action_flags;
3909 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3910 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
3912 switch (items->type) {
3913 case RTE_FLOW_ITEM_TYPE_PORT_ID:
3914 flow_dv_translate_item_port_id(dev, match_mask,
3915 match_value, items);
3916 last_item = MLX5_FLOW_ITEM_PORT_ID;
3918 case RTE_FLOW_ITEM_TYPE_ETH:
3919 flow_dv_translate_item_eth(match_mask, match_value,
3921 matcher.priority = MLX5_PRIORITY_MAP_L2;
3922 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
3923 MLX5_FLOW_LAYER_OUTER_L2;
3925 case RTE_FLOW_ITEM_TYPE_VLAN:
3926 flow_dv_translate_item_vlan(match_mask, match_value,
3928 matcher.priority = MLX5_PRIORITY_MAP_L2;
3929 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
3930 MLX5_FLOW_LAYER_INNER_VLAN) :
3931 (MLX5_FLOW_LAYER_OUTER_L2 |
3932 MLX5_FLOW_LAYER_OUTER_VLAN);
3934 case RTE_FLOW_ITEM_TYPE_IPV4:
3935 flow_dv_translate_item_ipv4(match_mask, match_value,
3936 items, tunnel, attr->group);
3937 matcher.priority = MLX5_PRIORITY_MAP_L3;
3938 dev_flow->dv.hash_fields |=
3939 mlx5_flow_hashfields_adjust
3941 MLX5_IPV4_LAYER_TYPES,
3942 MLX5_IPV4_IBV_RX_HASH);
3943 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3944 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3946 case RTE_FLOW_ITEM_TYPE_IPV6:
3947 flow_dv_translate_item_ipv6(match_mask, match_value,
3948 items, tunnel, attr->group);
3949 matcher.priority = MLX5_PRIORITY_MAP_L3;
3950 dev_flow->dv.hash_fields |=
3951 mlx5_flow_hashfields_adjust
3953 MLX5_IPV6_LAYER_TYPES,
3954 MLX5_IPV6_IBV_RX_HASH);
3955 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3956 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3958 case RTE_FLOW_ITEM_TYPE_TCP:
3959 flow_dv_translate_item_tcp(match_mask, match_value,
3961 matcher.priority = MLX5_PRIORITY_MAP_L4;
3962 dev_flow->dv.hash_fields |=
3963 mlx5_flow_hashfields_adjust
3964 (dev_flow, tunnel, ETH_RSS_TCP,
3965 IBV_RX_HASH_SRC_PORT_TCP |
3966 IBV_RX_HASH_DST_PORT_TCP);
3967 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
3968 MLX5_FLOW_LAYER_OUTER_L4_TCP;
3970 case RTE_FLOW_ITEM_TYPE_UDP:
3971 flow_dv_translate_item_udp(match_mask, match_value,
3973 matcher.priority = MLX5_PRIORITY_MAP_L4;
3974 dev_flow->dv.hash_fields |=
3975 mlx5_flow_hashfields_adjust
3976 (dev_flow, tunnel, ETH_RSS_UDP,
3977 IBV_RX_HASH_SRC_PORT_UDP |
3978 IBV_RX_HASH_DST_PORT_UDP);
3979 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
3980 MLX5_FLOW_LAYER_OUTER_L4_UDP;
3982 case RTE_FLOW_ITEM_TYPE_GRE:
3983 flow_dv_translate_item_gre(match_mask, match_value,
3985 last_item = MLX5_FLOW_LAYER_GRE;
3987 case RTE_FLOW_ITEM_TYPE_NVGRE:
3988 flow_dv_translate_item_nvgre(match_mask, match_value,
3990 last_item = MLX5_FLOW_LAYER_GRE;
3992 case RTE_FLOW_ITEM_TYPE_VXLAN:
3993 flow_dv_translate_item_vxlan(match_mask, match_value,
3995 last_item = MLX5_FLOW_LAYER_VXLAN;
3997 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3998 flow_dv_translate_item_vxlan(match_mask, match_value,
4000 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
4002 case RTE_FLOW_ITEM_TYPE_MPLS:
4003 flow_dv_translate_item_mpls(match_mask, match_value,
4004 items, last_item, tunnel);
4005 last_item = MLX5_FLOW_LAYER_MPLS;
4007 case RTE_FLOW_ITEM_TYPE_META:
4008 flow_dv_translate_item_meta(match_mask, match_value,
4010 last_item = MLX5_FLOW_ITEM_METADATA;
4015 item_flags |= last_item;
4018 * In case of ingress traffic when E-Switch mode is enabled,
4019 * we have two cases where we need to set the source port manually.
4020 * The first one, is in case of Nic steering rule, and the second is
4021 * E-Switch rule where no port_id item was found. In both cases
4022 * the source port is set according the current port in use.
4024 if ((attr->ingress && !(item_flags & MLX5_FLOW_ITEM_PORT_ID)) &&
4025 (priv->representor || priv->master)) {
4026 if (flow_dv_translate_item_port_id(dev, match_mask,
4030 assert(!flow_dv_check_valid_spec(matcher.mask.buf,
4031 dev_flow->dv.value.buf));
4032 dev_flow->layers = item_flags;
4033 /* Register matcher. */
4034 matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
4036 matcher.priority = mlx5_flow_adjust_priority(dev, priority,
4038 matcher.egress = attr->egress;
4039 matcher.group = attr->group;
4040 matcher.transfer = attr->transfer;
4041 if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
4047 * Apply the flow to the NIC.
4050 * Pointer to the Ethernet device structure.
4051 * @param[in, out] flow
4052 * Pointer to flow structure.
4054 * Pointer to error structure.
4057 * 0 on success, a negative errno value otherwise and rte_errno is set.
4060 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
4061 struct rte_flow_error *error)
4063 struct mlx5_flow_dv *dv;
4064 struct mlx5_flow *dev_flow;
4065 struct mlx5_priv *priv = dev->data->dev_private;
4069 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4072 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
4073 if (flow->transfer) {
4074 dv->actions[n++] = priv->sh->esw_drop_action;
4076 dv->hrxq = mlx5_hrxq_drop_new(dev);
4080 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4082 "cannot get drop hash queue");
4085 dv->actions[n++] = dv->hrxq->action;
4087 } else if (flow->actions &
4088 (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
4089 struct mlx5_hrxq *hrxq;
4091 hrxq = mlx5_hrxq_get(dev, flow->key,
4092 MLX5_RSS_HASH_KEY_LEN,
4095 flow->rss.queue_num);
4097 hrxq = mlx5_hrxq_new
4098 (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
4099 dv->hash_fields, (*flow->queue),
4100 flow->rss.queue_num,
4101 !!(dev_flow->layers &
4102 MLX5_FLOW_LAYER_TUNNEL));
4106 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4107 "cannot get hash queue");
4111 dv->actions[n++] = dv->hrxq->action;
4114 mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
4115 (void *)&dv->value, n,
4118 rte_flow_error_set(error, errno,
4119 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4121 "hardware refuses to create flow");
4127 err = rte_errno; /* Save rte_errno before cleanup. */
4128 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4129 struct mlx5_flow_dv *dv = &dev_flow->dv;
4131 if (flow->actions & MLX5_FLOW_ACTION_DROP)
4132 mlx5_hrxq_drop_release(dev);
4134 mlx5_hrxq_release(dev, dv->hrxq);
4138 rte_errno = err; /* Restore rte_errno. */
4143 * Release the flow matcher.
4146 * Pointer to Ethernet device.
4148 * Pointer to mlx5_flow.
4151 * 1 while a reference on it exists, 0 when freed.
4154 flow_dv_matcher_release(struct rte_eth_dev *dev,
4155 struct mlx5_flow *flow)
4157 struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
4158 struct mlx5_priv *priv = dev->data->dev_private;
4159 struct mlx5_ibv_shared *sh = priv->sh;
4160 struct mlx5_flow_tbl_resource *tbl;
4162 assert(matcher->matcher_object);
4163 DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
4164 dev->data->port_id, (void *)matcher,
4165 rte_atomic32_read(&matcher->refcnt));
4166 if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
4167 claim_zero(mlx5_glue->dv_destroy_flow_matcher
4168 (matcher->matcher_object));
4169 LIST_REMOVE(matcher, next);
4170 if (matcher->egress)
4171 tbl = &sh->tx_tbl[matcher->group];
4173 tbl = &sh->rx_tbl[matcher->group];
4174 flow_dv_tbl_resource_release(tbl);
4176 DRV_LOG(DEBUG, "port %u matcher %p: removed",
4177 dev->data->port_id, (void *)matcher);
4184 * Release an encap/decap resource.
4187 * Pointer to mlx5_flow.
4190 * 1 while a reference on it exists, 0 when freed.
4193 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
4195 struct mlx5_flow_dv_encap_decap_resource *cache_resource =
4196 flow->dv.encap_decap;
4198 assert(cache_resource->verbs_action);
4199 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
4200 (void *)cache_resource,
4201 rte_atomic32_read(&cache_resource->refcnt));
4202 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4203 claim_zero(mlx5_glue->destroy_flow_action
4204 (cache_resource->verbs_action));
4205 LIST_REMOVE(cache_resource, next);
4206 rte_free(cache_resource);
4207 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
4208 (void *)cache_resource);
4215 * Release an jump to table action resource.
4218 * Pointer to mlx5_flow.
4221 * 1 while a reference on it exists, 0 when freed.
4224 flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow)
4226 struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
4229 assert(cache_resource->action);
4230 DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
4231 (void *)cache_resource,
4232 rte_atomic32_read(&cache_resource->refcnt));
4233 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4234 claim_zero(mlx5_glue->destroy_flow_action
4235 (cache_resource->action));
4236 LIST_REMOVE(cache_resource, next);
4237 flow_dv_tbl_resource_release(cache_resource->tbl);
4238 rte_free(cache_resource);
4239 DRV_LOG(DEBUG, "jump table resource %p: removed",
4240 (void *)cache_resource);
4247 * Release a modify-header resource.
4250 * Pointer to mlx5_flow.
4253 * 1 while a reference on it exists, 0 when freed.
4256 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
4258 struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
4259 flow->dv.modify_hdr;
4261 assert(cache_resource->verbs_action);
4262 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
4263 (void *)cache_resource,
4264 rte_atomic32_read(&cache_resource->refcnt));
4265 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4266 claim_zero(mlx5_glue->destroy_flow_action
4267 (cache_resource->verbs_action));
4268 LIST_REMOVE(cache_resource, next);
4269 rte_free(cache_resource);
4270 DRV_LOG(DEBUG, "modify-header resource %p: removed",
4271 (void *)cache_resource);
4278 * Release port ID action resource.
4281 * Pointer to mlx5_flow.
4284 * 1 while a reference on it exists, 0 when freed.
4287 flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
4289 struct mlx5_flow_dv_port_id_action_resource *cache_resource =
4290 flow->dv.port_id_action;
4292 assert(cache_resource->action);
4293 DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
4294 (void *)cache_resource,
4295 rte_atomic32_read(&cache_resource->refcnt));
4296 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4297 claim_zero(mlx5_glue->destroy_flow_action
4298 (cache_resource->action));
4299 LIST_REMOVE(cache_resource, next);
4300 rte_free(cache_resource);
4301 DRV_LOG(DEBUG, "port id action resource %p: removed",
4302 (void *)cache_resource);
4309 * Remove the flow from the NIC but keeps it in memory.
4312 * Pointer to Ethernet device.
4313 * @param[in, out] flow
4314 * Pointer to flow structure.
4317 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
4319 struct mlx5_flow_dv *dv;
4320 struct mlx5_flow *dev_flow;
4324 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4327 claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
4331 if (flow->actions & MLX5_FLOW_ACTION_DROP)
4332 mlx5_hrxq_drop_release(dev);
4334 mlx5_hrxq_release(dev, dv->hrxq);
4341 * Remove the flow from the NIC and the memory.
4344 * Pointer to the Ethernet device structure.
4345 * @param[in, out] flow
4346 * Pointer to flow structure.
4349 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
4351 struct mlx5_flow *dev_flow;
4355 flow_dv_remove(dev, flow);
4356 if (flow->counter) {
4357 flow_dv_counter_release(flow->counter);
4358 flow->counter = NULL;
4360 if (flow->tag_resource) {
4361 flow_dv_tag_release(dev, flow->tag_resource);
4362 flow->tag_resource = NULL;
4364 while (!LIST_EMPTY(&flow->dev_flows)) {
4365 dev_flow = LIST_FIRST(&flow->dev_flows);
4366 LIST_REMOVE(dev_flow, next);
4367 if (dev_flow->dv.matcher)
4368 flow_dv_matcher_release(dev, dev_flow);
4369 if (dev_flow->dv.encap_decap)
4370 flow_dv_encap_decap_resource_release(dev_flow);
4371 if (dev_flow->dv.modify_hdr)
4372 flow_dv_modify_hdr_resource_release(dev_flow);
4373 if (dev_flow->dv.jump)
4374 flow_dv_jump_tbl_resource_release(dev_flow);
4375 if (dev_flow->dv.port_id_action)
4376 flow_dv_port_id_action_resource_release(dev_flow);
4382 * Query a dv flow rule for its statistics via devx.
4385 * Pointer to Ethernet device.
4387 * Pointer to the sub flow.
4389 * data retrieved by the query.
4391 * Perform verbose error reporting if not NULL.
4394 * 0 on success, a negative errno value otherwise and rte_errno is set.
4397 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
4398 void *data, struct rte_flow_error *error)
4400 struct mlx5_priv *priv = dev->data->dev_private;
4401 struct rte_flow_query_count *qc = data;
4406 if (!priv->config.devx)
4407 return rte_flow_error_set(error, ENOTSUP,
4408 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4410 "counters are not supported");
4411 if (flow->counter) {
4412 err = mlx5_devx_cmd_flow_counter_query
4413 (flow->counter->dcs,
4414 qc->reset, &pkts, &bytes);
4416 return rte_flow_error_set
4418 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4420 "cannot read counters");
4423 qc->hits = pkts - flow->counter->hits;
4424 qc->bytes = bytes - flow->counter->bytes;
4426 flow->counter->hits = pkts;
4427 flow->counter->bytes = bytes;
4431 return rte_flow_error_set(error, EINVAL,
4432 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4434 "counters are not available");
4440 * @see rte_flow_query()
4444 flow_dv_query(struct rte_eth_dev *dev,
4445 struct rte_flow *flow __rte_unused,
4446 const struct rte_flow_action *actions __rte_unused,
4447 void *data __rte_unused,
4448 struct rte_flow_error *error __rte_unused)
4452 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4453 switch (actions->type) {
4454 case RTE_FLOW_ACTION_TYPE_VOID:
4456 case RTE_FLOW_ACTION_TYPE_COUNT:
4457 ret = flow_dv_query_count(dev, flow, data, error);
4460 return rte_flow_error_set(error, ENOTSUP,
4461 RTE_FLOW_ERROR_TYPE_ACTION,
4463 "action not supported");
4470 * Mutex-protected thunk to flow_dv_translate().
4473 flow_d_translate(struct rte_eth_dev *dev,
4474 struct mlx5_flow *dev_flow,
4475 const struct rte_flow_attr *attr,
4476 const struct rte_flow_item items[],
4477 const struct rte_flow_action actions[],
4478 struct rte_flow_error *error)
4482 flow_d_shared_lock(dev);
4483 ret = flow_dv_translate(dev, dev_flow, attr, items, actions, error);
4484 flow_d_shared_unlock(dev);
4489 * Mutex-protected thunk to flow_dv_apply().
4492 flow_d_apply(struct rte_eth_dev *dev,
4493 struct rte_flow *flow,
4494 struct rte_flow_error *error)
4498 flow_d_shared_lock(dev);
4499 ret = flow_dv_apply(dev, flow, error);
4500 flow_d_shared_unlock(dev);
4505 * Mutex-protected thunk to flow_dv_remove().
4508 flow_d_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
4510 flow_d_shared_lock(dev);
4511 flow_dv_remove(dev, flow);
4512 flow_d_shared_unlock(dev);
4516 * Mutex-protected thunk to flow_dv_destroy().
4519 flow_d_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
4521 flow_d_shared_lock(dev);
4522 flow_dv_destroy(dev, flow);
4523 flow_d_shared_unlock(dev);
4526 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
4527 .validate = flow_dv_validate,
4528 .prepare = flow_dv_prepare,
4529 .translate = flow_d_translate,
4530 .apply = flow_d_apply,
4531 .remove = flow_d_remove,
4532 .destroy = flow_d_destroy,
4533 .query = flow_dv_query,
4536 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */