1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #pragma GCC diagnostic ignored "-Wpedantic"
15 #include <infiniband/verbs.h>
17 #pragma GCC diagnostic error "-Wpedantic"
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_ethdev_driver.h>
24 #include <rte_flow_driver.h>
25 #include <rte_malloc.h>
30 #include "mlx5_defs.h"
31 #include "mlx5_glue.h"
32 #include "mlx5_flow.h"
34 #include "mlx5_rxtx.h"
36 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
38 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
39 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
55 * Initialize flow attributes structure according to flow items' types.
58 * Pointer to item specification.
60 * Pointer to flow attributes structure.
63 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
65 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
67 case RTE_FLOW_ITEM_TYPE_IPV4:
70 case RTE_FLOW_ITEM_TYPE_IPV6:
73 case RTE_FLOW_ITEM_TYPE_UDP:
76 case RTE_FLOW_ITEM_TYPE_TCP:
86 struct field_modify_info {
87 uint32_t size; /* Size of field in protocol header, in bytes. */
88 uint32_t offset; /* Offset of field in protocol header, in bytes. */
89 enum mlx5_modification_field id;
92 struct field_modify_info modify_eth[] = {
93 {4, 0, MLX5_MODI_OUT_DMAC_47_16},
94 {2, 4, MLX5_MODI_OUT_DMAC_15_0},
95 {4, 6, MLX5_MODI_OUT_SMAC_47_16},
96 {2, 10, MLX5_MODI_OUT_SMAC_15_0},
100 struct field_modify_info modify_ipv4[] = {
101 {1, 8, MLX5_MODI_OUT_IPV4_TTL},
102 {4, 12, MLX5_MODI_OUT_SIPV4},
103 {4, 16, MLX5_MODI_OUT_DIPV4},
107 struct field_modify_info modify_ipv6[] = {
108 {1, 7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
109 {4, 8, MLX5_MODI_OUT_SIPV6_127_96},
110 {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
111 {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
112 {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
113 {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
114 {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
115 {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
116 {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
120 struct field_modify_info modify_udp[] = {
121 {2, 0, MLX5_MODI_OUT_UDP_SPORT},
122 {2, 2, MLX5_MODI_OUT_UDP_DPORT},
126 struct field_modify_info modify_tcp[] = {
127 {2, 0, MLX5_MODI_OUT_TCP_SPORT},
128 {2, 2, MLX5_MODI_OUT_TCP_DPORT},
133 * Acquire the synchronizing object to protect multithreaded access
134 * to shared dv context. Lock occurs only if context is actually
135 * shared, i.e. we have multiport IB device and representors are
139 * Pointer to the rte_eth_dev structure.
142 flow_d_shared_lock(struct rte_eth_dev *dev)
144 struct mlx5_priv *priv = dev->data->dev_private;
145 struct mlx5_ibv_shared *sh = priv->sh;
147 if (sh->dv_refcnt > 1) {
150 ret = pthread_mutex_lock(&sh->dv_mutex);
157 flow_d_shared_unlock(struct rte_eth_dev *dev)
159 struct mlx5_priv *priv = dev->data->dev_private;
160 struct mlx5_ibv_shared *sh = priv->sh;
162 if (sh->dv_refcnt > 1) {
165 ret = pthread_mutex_unlock(&sh->dv_mutex);
172 * Convert modify-header action to DV specification.
175 * Pointer to item specification.
177 * Pointer to field modification information.
178 * @param[in,out] resource
179 * Pointer to the modify-header resource.
181 * Type of modification.
183 * Pointer to the error structure.
186 * 0 on success, a negative errno value otherwise and rte_errno is set.
189 flow_dv_convert_modify_action(struct rte_flow_item *item,
190 struct field_modify_info *field,
191 struct mlx5_flow_dv_modify_hdr_resource *resource,
193 struct rte_flow_error *error)
195 uint32_t i = resource->actions_num;
196 struct mlx5_modification_cmd *actions = resource->actions;
197 const uint8_t *spec = item->spec;
198 const uint8_t *mask = item->mask;
201 while (field->size) {
203 /* Generate modify command for each mask segment. */
204 memcpy(&set, &mask[field->offset], field->size);
206 if (i >= MLX5_MODIFY_NUM)
207 return rte_flow_error_set(error, EINVAL,
208 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
209 "too many items to modify");
210 actions[i].action_type = type;
211 actions[i].field = field->id;
212 actions[i].length = field->size ==
213 4 ? 0 : field->size * 8;
214 rte_memcpy(&actions[i].data[4 - field->size],
215 &spec[field->offset], field->size);
216 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
219 if (resource->actions_num != i)
220 resource->actions_num = i;
223 if (!resource->actions_num)
224 return rte_flow_error_set(error, EINVAL,
225 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
226 "invalid modification flow item");
231 * Convert modify-header set IPv4 address action to DV specification.
233 * @param[in,out] resource
234 * Pointer to the modify-header resource.
236 * Pointer to action specification.
238 * Pointer to the error structure.
241 * 0 on success, a negative errno value otherwise and rte_errno is set.
244 flow_dv_convert_action_modify_ipv4
245 (struct mlx5_flow_dv_modify_hdr_resource *resource,
246 const struct rte_flow_action *action,
247 struct rte_flow_error *error)
249 const struct rte_flow_action_set_ipv4 *conf =
250 (const struct rte_flow_action_set_ipv4 *)(action->conf);
251 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
252 struct rte_flow_item_ipv4 ipv4;
253 struct rte_flow_item_ipv4 ipv4_mask;
255 memset(&ipv4, 0, sizeof(ipv4));
256 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
257 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
258 ipv4.hdr.src_addr = conf->ipv4_addr;
259 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
261 ipv4.hdr.dst_addr = conf->ipv4_addr;
262 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
265 item.mask = &ipv4_mask;
266 return flow_dv_convert_modify_action(&item, modify_ipv4, resource,
267 MLX5_MODIFICATION_TYPE_SET, error);
271 * Convert modify-header set IPv6 address action to DV specification.
273 * @param[in,out] resource
274 * Pointer to the modify-header resource.
276 * Pointer to action specification.
278 * Pointer to the error structure.
281 * 0 on success, a negative errno value otherwise and rte_errno is set.
284 flow_dv_convert_action_modify_ipv6
285 (struct mlx5_flow_dv_modify_hdr_resource *resource,
286 const struct rte_flow_action *action,
287 struct rte_flow_error *error)
289 const struct rte_flow_action_set_ipv6 *conf =
290 (const struct rte_flow_action_set_ipv6 *)(action->conf);
291 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
292 struct rte_flow_item_ipv6 ipv6;
293 struct rte_flow_item_ipv6 ipv6_mask;
295 memset(&ipv6, 0, sizeof(ipv6));
296 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
297 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
298 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
299 sizeof(ipv6.hdr.src_addr));
300 memcpy(&ipv6_mask.hdr.src_addr,
301 &rte_flow_item_ipv6_mask.hdr.src_addr,
302 sizeof(ipv6.hdr.src_addr));
304 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
305 sizeof(ipv6.hdr.dst_addr));
306 memcpy(&ipv6_mask.hdr.dst_addr,
307 &rte_flow_item_ipv6_mask.hdr.dst_addr,
308 sizeof(ipv6.hdr.dst_addr));
311 item.mask = &ipv6_mask;
312 return flow_dv_convert_modify_action(&item, modify_ipv6, resource,
313 MLX5_MODIFICATION_TYPE_SET, error);
317 * Convert modify-header set MAC address action to DV specification.
319 * @param[in,out] resource
320 * Pointer to the modify-header resource.
322 * Pointer to action specification.
324 * Pointer to the error structure.
327 * 0 on success, a negative errno value otherwise and rte_errno is set.
330 flow_dv_convert_action_modify_mac
331 (struct mlx5_flow_dv_modify_hdr_resource *resource,
332 const struct rte_flow_action *action,
333 struct rte_flow_error *error)
335 const struct rte_flow_action_set_mac *conf =
336 (const struct rte_flow_action_set_mac *)(action->conf);
337 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
338 struct rte_flow_item_eth eth;
339 struct rte_flow_item_eth eth_mask;
341 memset(ð, 0, sizeof(eth));
342 memset(ð_mask, 0, sizeof(eth_mask));
343 if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
344 memcpy(ð.src.addr_bytes, &conf->mac_addr,
345 sizeof(eth.src.addr_bytes));
346 memcpy(ð_mask.src.addr_bytes,
347 &rte_flow_item_eth_mask.src.addr_bytes,
348 sizeof(eth_mask.src.addr_bytes));
350 memcpy(ð.dst.addr_bytes, &conf->mac_addr,
351 sizeof(eth.dst.addr_bytes));
352 memcpy(ð_mask.dst.addr_bytes,
353 &rte_flow_item_eth_mask.dst.addr_bytes,
354 sizeof(eth_mask.dst.addr_bytes));
357 item.mask = ð_mask;
358 return flow_dv_convert_modify_action(&item, modify_eth, resource,
359 MLX5_MODIFICATION_TYPE_SET, error);
363 * Convert modify-header set TP action to DV specification.
365 * @param[in,out] resource
366 * Pointer to the modify-header resource.
368 * Pointer to action specification.
370 * Pointer to rte_flow_item objects list.
372 * Pointer to flow attributes structure.
374 * Pointer to the error structure.
377 * 0 on success, a negative errno value otherwise and rte_errno is set.
380 flow_dv_convert_action_modify_tp
381 (struct mlx5_flow_dv_modify_hdr_resource *resource,
382 const struct rte_flow_action *action,
383 const struct rte_flow_item *items,
384 union flow_dv_attr *attr,
385 struct rte_flow_error *error)
387 const struct rte_flow_action_set_tp *conf =
388 (const struct rte_flow_action_set_tp *)(action->conf);
389 struct rte_flow_item item;
390 struct rte_flow_item_udp udp;
391 struct rte_flow_item_udp udp_mask;
392 struct rte_flow_item_tcp tcp;
393 struct rte_flow_item_tcp tcp_mask;
394 struct field_modify_info *field;
397 flow_dv_attr_init(items, attr);
399 memset(&udp, 0, sizeof(udp));
400 memset(&udp_mask, 0, sizeof(udp_mask));
401 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
402 udp.hdr.src_port = conf->port;
403 udp_mask.hdr.src_port =
404 rte_flow_item_udp_mask.hdr.src_port;
406 udp.hdr.dst_port = conf->port;
407 udp_mask.hdr.dst_port =
408 rte_flow_item_udp_mask.hdr.dst_port;
410 item.type = RTE_FLOW_ITEM_TYPE_UDP;
412 item.mask = &udp_mask;
416 memset(&tcp, 0, sizeof(tcp));
417 memset(&tcp_mask, 0, sizeof(tcp_mask));
418 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
419 tcp.hdr.src_port = conf->port;
420 tcp_mask.hdr.src_port =
421 rte_flow_item_tcp_mask.hdr.src_port;
423 tcp.hdr.dst_port = conf->port;
424 tcp_mask.hdr.dst_port =
425 rte_flow_item_tcp_mask.hdr.dst_port;
427 item.type = RTE_FLOW_ITEM_TYPE_TCP;
429 item.mask = &tcp_mask;
432 return flow_dv_convert_modify_action(&item, field, resource,
433 MLX5_MODIFICATION_TYPE_SET, error);
437 * Convert modify-header set TTL action to DV specification.
439 * @param[in,out] resource
440 * Pointer to the modify-header resource.
442 * Pointer to action specification.
444 * Pointer to rte_flow_item objects list.
446 * Pointer to flow attributes structure.
448 * Pointer to the error structure.
451 * 0 on success, a negative errno value otherwise and rte_errno is set.
454 flow_dv_convert_action_modify_ttl
455 (struct mlx5_flow_dv_modify_hdr_resource *resource,
456 const struct rte_flow_action *action,
457 const struct rte_flow_item *items,
458 union flow_dv_attr *attr,
459 struct rte_flow_error *error)
461 const struct rte_flow_action_set_ttl *conf =
462 (const struct rte_flow_action_set_ttl *)(action->conf);
463 struct rte_flow_item item;
464 struct rte_flow_item_ipv4 ipv4;
465 struct rte_flow_item_ipv4 ipv4_mask;
466 struct rte_flow_item_ipv6 ipv6;
467 struct rte_flow_item_ipv6 ipv6_mask;
468 struct field_modify_info *field;
471 flow_dv_attr_init(items, attr);
473 memset(&ipv4, 0, sizeof(ipv4));
474 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
475 ipv4.hdr.time_to_live = conf->ttl_value;
476 ipv4_mask.hdr.time_to_live = 0xFF;
477 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
479 item.mask = &ipv4_mask;
483 memset(&ipv6, 0, sizeof(ipv6));
484 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
485 ipv6.hdr.hop_limits = conf->ttl_value;
486 ipv6_mask.hdr.hop_limits = 0xFF;
487 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
489 item.mask = &ipv6_mask;
492 return flow_dv_convert_modify_action(&item, field, resource,
493 MLX5_MODIFICATION_TYPE_SET, error);
497 * Convert modify-header decrement TTL action to DV specification.
499 * @param[in,out] resource
500 * Pointer to the modify-header resource.
502 * Pointer to action specification.
504 * Pointer to rte_flow_item objects list.
506 * Pointer to flow attributes structure.
508 * Pointer to the error structure.
511 * 0 on success, a negative errno value otherwise and rte_errno is set.
514 flow_dv_convert_action_modify_dec_ttl
515 (struct mlx5_flow_dv_modify_hdr_resource *resource,
516 const struct rte_flow_item *items,
517 union flow_dv_attr *attr,
518 struct rte_flow_error *error)
520 struct rte_flow_item item;
521 struct rte_flow_item_ipv4 ipv4;
522 struct rte_flow_item_ipv4 ipv4_mask;
523 struct rte_flow_item_ipv6 ipv6;
524 struct rte_flow_item_ipv6 ipv6_mask;
525 struct field_modify_info *field;
528 flow_dv_attr_init(items, attr);
530 memset(&ipv4, 0, sizeof(ipv4));
531 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
532 ipv4.hdr.time_to_live = 0xFF;
533 ipv4_mask.hdr.time_to_live = 0xFF;
534 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
536 item.mask = &ipv4_mask;
540 memset(&ipv6, 0, sizeof(ipv6));
541 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
542 ipv6.hdr.hop_limits = 0xFF;
543 ipv6_mask.hdr.hop_limits = 0xFF;
544 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
546 item.mask = &ipv6_mask;
549 return flow_dv_convert_modify_action(&item, field, resource,
550 MLX5_MODIFICATION_TYPE_ADD, error);
554 * Validate META item.
557 * Pointer to the rte_eth_dev structure.
559 * Item specification.
561 * Attributes of flow that includes this item.
563 * Pointer to error structure.
566 * 0 on success, a negative errno value otherwise and rte_errno is set.
569 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
570 const struct rte_flow_item *item,
571 const struct rte_flow_attr *attr,
572 struct rte_flow_error *error)
574 const struct rte_flow_item_meta *spec = item->spec;
575 const struct rte_flow_item_meta *mask = item->mask;
576 const struct rte_flow_item_meta nic_mask = {
577 .data = RTE_BE32(UINT32_MAX)
580 uint64_t offloads = dev->data->dev_conf.txmode.offloads;
582 if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
583 return rte_flow_error_set(error, EPERM,
584 RTE_FLOW_ERROR_TYPE_ITEM,
586 "match on metadata offload "
587 "configuration is off for this port");
589 return rte_flow_error_set(error, EINVAL,
590 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
592 "data cannot be empty");
594 return rte_flow_error_set(error, EINVAL,
595 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
597 "data cannot be zero");
599 mask = &rte_flow_item_meta_mask;
600 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
601 (const uint8_t *)&nic_mask,
602 sizeof(struct rte_flow_item_meta),
607 return rte_flow_error_set(error, ENOTSUP,
608 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
610 "pattern not supported for ingress");
615 * Validate vport item.
618 * Pointer to the rte_eth_dev structure.
620 * Item specification.
622 * Attributes of flow that includes this item.
623 * @param[in] item_flags
624 * Bit-fields that holds the items detected until now.
626 * Pointer to error structure.
629 * 0 on success, a negative errno value otherwise and rte_errno is set.
632 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
633 const struct rte_flow_item *item,
634 const struct rte_flow_attr *attr,
636 struct rte_flow_error *error)
638 const struct rte_flow_item_port_id *spec = item->spec;
639 const struct rte_flow_item_port_id *mask = item->mask;
640 const struct rte_flow_item_port_id switch_mask = {
643 uint16_t esw_domain_id;
644 uint16_t item_port_esw_domain_id;
648 return rte_flow_error_set(error, EINVAL,
649 RTE_FLOW_ERROR_TYPE_ITEM,
651 "match on port id is valid only"
652 " when transfer flag is enabled");
653 if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
654 return rte_flow_error_set(error, ENOTSUP,
655 RTE_FLOW_ERROR_TYPE_ITEM, item,
656 "multiple source ports are not"
660 if (mask->id != 0xffffffff)
661 return rte_flow_error_set(error, ENOTSUP,
662 RTE_FLOW_ERROR_TYPE_ITEM_MASK,
664 "no support for partial mask on"
666 ret = mlx5_flow_item_acceptable
667 (item, (const uint8_t *)mask,
668 (const uint8_t *)&rte_flow_item_port_id_mask,
669 sizeof(struct rte_flow_item_port_id),
675 ret = mlx5_port_to_eswitch_info(spec->id, &item_port_esw_domain_id,
678 return rte_flow_error_set(error, -ret,
679 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
680 "failed to obtain E-Switch info for"
682 ret = mlx5_port_to_eswitch_info(dev->data->port_id,
683 &esw_domain_id, NULL);
685 return rte_flow_error_set(error, -ret,
686 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
688 "failed to obtain E-Switch info");
689 if (item_port_esw_domain_id != esw_domain_id)
690 return rte_flow_error_set(error, -ret,
691 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
692 "cannot match on a port from a"
693 " different E-Switch");
698 * Validate count action.
703 * Pointer to error structure.
706 * 0 on success, a negative errno value otherwise and rte_errno is set.
709 flow_dv_validate_action_count(struct rte_eth_dev *dev,
710 struct rte_flow_error *error)
712 struct mlx5_priv *priv = dev->data->dev_private;
714 if (!priv->config.devx)
716 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
720 return rte_flow_error_set
722 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
724 "count action not supported");
728 * Validate the L2 encap action.
730 * @param[in] action_flags
731 * Holds the actions detected until now.
733 * Pointer to the encap action.
735 * Pointer to flow attributes
737 * Pointer to error structure.
740 * 0 on success, a negative errno value otherwise and rte_errno is set.
743 flow_dv_validate_action_l2_encap(uint64_t action_flags,
744 const struct rte_flow_action *action,
745 const struct rte_flow_attr *attr,
746 struct rte_flow_error *error)
749 return rte_flow_error_set(error, EINVAL,
750 RTE_FLOW_ERROR_TYPE_ACTION, action,
751 "configuration cannot be null");
752 if (action_flags & MLX5_FLOW_ACTION_DROP)
753 return rte_flow_error_set(error, EINVAL,
754 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
755 "can't drop and encap in same flow");
756 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
757 return rte_flow_error_set(error, EINVAL,
758 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
759 "can only have a single encap or"
760 " decap action in a flow");
761 if (!attr->transfer && attr->ingress)
762 return rte_flow_error_set(error, ENOTSUP,
763 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
765 "encap action not supported for "
771 * Validate the L2 decap action.
773 * @param[in] action_flags
774 * Holds the actions detected until now.
776 * Pointer to flow attributes
778 * Pointer to error structure.
781 * 0 on success, a negative errno value otherwise and rte_errno is set.
784 flow_dv_validate_action_l2_decap(uint64_t action_flags,
785 const struct rte_flow_attr *attr,
786 struct rte_flow_error *error)
788 if (action_flags & MLX5_FLOW_ACTION_DROP)
789 return rte_flow_error_set(error, EINVAL,
790 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
791 "can't drop and decap in same flow");
792 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
793 return rte_flow_error_set(error, EINVAL,
794 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
795 "can only have a single encap or"
796 " decap action in a flow");
797 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
798 return rte_flow_error_set(error, EINVAL,
799 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
800 "can't have decap action after"
803 return rte_flow_error_set(error, ENOTSUP,
804 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
806 "decap action not supported for "
812 * Validate the raw encap action.
814 * @param[in] action_flags
815 * Holds the actions detected until now.
817 * Pointer to the encap action.
819 * Pointer to flow attributes
821 * Pointer to error structure.
824 * 0 on success, a negative errno value otherwise and rte_errno is set.
827 flow_dv_validate_action_raw_encap(uint64_t action_flags,
828 const struct rte_flow_action *action,
829 const struct rte_flow_attr *attr,
830 struct rte_flow_error *error)
833 return rte_flow_error_set(error, EINVAL,
834 RTE_FLOW_ERROR_TYPE_ACTION, action,
835 "configuration cannot be null");
836 if (action_flags & MLX5_FLOW_ACTION_DROP)
837 return rte_flow_error_set(error, EINVAL,
838 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
839 "can't drop and encap in same flow");
840 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
841 return rte_flow_error_set(error, EINVAL,
842 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
843 "can only have a single encap"
844 " action in a flow");
845 /* encap without preceding decap is not supported for ingress */
846 if (!attr->transfer && attr->ingress &&
847 !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
848 return rte_flow_error_set(error, ENOTSUP,
849 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
851 "encap action not supported for "
857 * Validate the raw decap action.
859 * @param[in] action_flags
860 * Holds the actions detected until now.
862 * Pointer to the encap action.
864 * Pointer to flow attributes
866 * Pointer to error structure.
869 * 0 on success, a negative errno value otherwise and rte_errno is set.
872 flow_dv_validate_action_raw_decap(uint64_t action_flags,
873 const struct rte_flow_action *action,
874 const struct rte_flow_attr *attr,
875 struct rte_flow_error *error)
877 if (action_flags & MLX5_FLOW_ACTION_DROP)
878 return rte_flow_error_set(error, EINVAL,
879 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
880 "can't drop and decap in same flow");
881 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
882 return rte_flow_error_set(error, EINVAL,
883 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
884 "can't have encap action before"
886 if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
887 return rte_flow_error_set(error, EINVAL,
888 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
889 "can only have a single decap"
890 " action in a flow");
891 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
892 return rte_flow_error_set(error, EINVAL,
893 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
894 "can't have decap action after"
896 /* decap action is valid on egress only if it is followed by encap */
898 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
899 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
902 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
903 return rte_flow_error_set
905 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
906 NULL, "decap action not supported"
913 * Find existing encap/decap resource or create and register a new one.
915 * @param dev[in, out]
916 * Pointer to rte_eth_dev structure.
917 * @param[in, out] resource
918 * Pointer to encap/decap resource.
919 * @parm[in, out] dev_flow
920 * Pointer to the dev_flow.
922 * pointer to error structure.
925 * 0 on success otherwise -errno and errno is set.
928 flow_dv_encap_decap_resource_register
929 (struct rte_eth_dev *dev,
930 struct mlx5_flow_dv_encap_decap_resource *resource,
931 struct mlx5_flow *dev_flow,
932 struct rte_flow_error *error)
934 struct mlx5_priv *priv = dev->data->dev_private;
935 struct mlx5_ibv_shared *sh = priv->sh;
936 struct mlx5_flow_dv_encap_decap_resource *cache_resource;
937 struct rte_flow *flow = dev_flow->flow;
938 struct mlx5dv_dr_ns *ns;
940 resource->flags = flow->group ? 0 : 1;
946 /* Lookup a matching resource from cache. */
947 LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
948 if (resource->reformat_type == cache_resource->reformat_type &&
949 resource->ft_type == cache_resource->ft_type &&
950 resource->flags == cache_resource->flags &&
951 resource->size == cache_resource->size &&
952 !memcmp((const void *)resource->buf,
953 (const void *)cache_resource->buf,
955 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
956 (void *)cache_resource,
957 rte_atomic32_read(&cache_resource->refcnt));
958 rte_atomic32_inc(&cache_resource->refcnt);
959 dev_flow->dv.encap_decap = cache_resource;
963 /* Register new encap/decap resource. */
964 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
966 return rte_flow_error_set(error, ENOMEM,
967 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
968 "cannot allocate resource memory");
969 *cache_resource = *resource;
970 cache_resource->verbs_action =
971 mlx5_glue->dv_create_flow_action_packet_reformat
972 (sh->ctx, cache_resource->reformat_type,
973 cache_resource->ft_type, ns, cache_resource->flags,
974 cache_resource->size,
975 (cache_resource->size ? cache_resource->buf : NULL));
976 if (!cache_resource->verbs_action) {
977 rte_free(cache_resource);
978 return rte_flow_error_set(error, ENOMEM,
979 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
980 NULL, "cannot create action");
982 rte_atomic32_init(&cache_resource->refcnt);
983 rte_atomic32_inc(&cache_resource->refcnt);
984 LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
985 dev_flow->dv.encap_decap = cache_resource;
986 DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
987 (void *)cache_resource,
988 rte_atomic32_read(&cache_resource->refcnt));
993 * Find existing table jump resource or create and register a new one.
995 * @param dev[in, out]
996 * Pointer to rte_eth_dev structure.
997 * @param[in, out] resource
998 * Pointer to jump table resource.
999 * @parm[in, out] dev_flow
1000 * Pointer to the dev_flow.
1002 * pointer to error structure.
1005 * 0 on success otherwise -errno and errno is set.
1008 flow_dv_jump_tbl_resource_register
1009 (struct rte_eth_dev *dev,
1010 struct mlx5_flow_dv_jump_tbl_resource *resource,
1011 struct mlx5_flow *dev_flow,
1012 struct rte_flow_error *error)
1014 struct mlx5_priv *priv = dev->data->dev_private;
1015 struct mlx5_ibv_shared *sh = priv->sh;
1016 struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
1018 /* Lookup a matching resource from cache. */
1019 LIST_FOREACH(cache_resource, &sh->jump_tbl, next) {
1020 if (resource->tbl == cache_resource->tbl) {
1021 DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++",
1022 (void *)cache_resource,
1023 rte_atomic32_read(&cache_resource->refcnt));
1024 rte_atomic32_inc(&cache_resource->refcnt);
1025 dev_flow->dv.jump = cache_resource;
1029 /* Register new jump table resource. */
1030 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1031 if (!cache_resource)
1032 return rte_flow_error_set(error, ENOMEM,
1033 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1034 "cannot allocate resource memory");
1035 *cache_resource = *resource;
1036 cache_resource->action =
1037 mlx5_glue->dr_create_flow_action_dest_flow_tbl
1038 (resource->tbl->obj);
1039 if (!cache_resource->action) {
1040 rte_free(cache_resource);
1041 return rte_flow_error_set(error, ENOMEM,
1042 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1043 NULL, "cannot create action");
1045 rte_atomic32_init(&cache_resource->refcnt);
1046 rte_atomic32_inc(&cache_resource->refcnt);
1047 LIST_INSERT_HEAD(&sh->jump_tbl, cache_resource, next);
1048 dev_flow->dv.jump = cache_resource;
1049 DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++",
1050 (void *)cache_resource,
1051 rte_atomic32_read(&cache_resource->refcnt));
1056 * Get the size of specific rte_flow_item_type
1058 * @param[in] item_type
1059 * Tested rte_flow_item_type.
1062 * sizeof struct item_type, 0 if void or irrelevant.
1065 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
1069 switch (item_type) {
1070 case RTE_FLOW_ITEM_TYPE_ETH:
1071 retval = sizeof(struct rte_flow_item_eth);
1073 case RTE_FLOW_ITEM_TYPE_VLAN:
1074 retval = sizeof(struct rte_flow_item_vlan);
1076 case RTE_FLOW_ITEM_TYPE_IPV4:
1077 retval = sizeof(struct rte_flow_item_ipv4);
1079 case RTE_FLOW_ITEM_TYPE_IPV6:
1080 retval = sizeof(struct rte_flow_item_ipv6);
1082 case RTE_FLOW_ITEM_TYPE_UDP:
1083 retval = sizeof(struct rte_flow_item_udp);
1085 case RTE_FLOW_ITEM_TYPE_TCP:
1086 retval = sizeof(struct rte_flow_item_tcp);
1088 case RTE_FLOW_ITEM_TYPE_VXLAN:
1089 retval = sizeof(struct rte_flow_item_vxlan);
1091 case RTE_FLOW_ITEM_TYPE_GRE:
1092 retval = sizeof(struct rte_flow_item_gre);
1094 case RTE_FLOW_ITEM_TYPE_NVGRE:
1095 retval = sizeof(struct rte_flow_item_nvgre);
1097 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1098 retval = sizeof(struct rte_flow_item_vxlan_gpe);
1100 case RTE_FLOW_ITEM_TYPE_MPLS:
1101 retval = sizeof(struct rte_flow_item_mpls);
1103 case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
1111 #define MLX5_ENCAP_IPV4_VERSION 0x40
1112 #define MLX5_ENCAP_IPV4_IHL_MIN 0x05
1113 #define MLX5_ENCAP_IPV4_TTL_DEF 0x40
1114 #define MLX5_ENCAP_IPV6_VTC_FLOW 0x60000000
1115 #define MLX5_ENCAP_IPV6_HOP_LIMIT 0xff
1116 #define MLX5_ENCAP_VXLAN_FLAGS 0x08000000
1117 #define MLX5_ENCAP_VXLAN_GPE_FLAGS 0x04
1120 * Convert the encap action data from list of rte_flow_item to raw buffer
1123 * Pointer to rte_flow_item objects list.
1125 * Pointer to the output buffer.
1127 * Pointer to the output buffer size.
1129 * Pointer to the error structure.
1132 * 0 on success, a negative errno value otherwise and rte_errno is set.
1135 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
1136 size_t *size, struct rte_flow_error *error)
1138 struct ether_hdr *eth = NULL;
1139 struct vlan_hdr *vlan = NULL;
1140 struct ipv4_hdr *ipv4 = NULL;
1141 struct ipv6_hdr *ipv6 = NULL;
1142 struct udp_hdr *udp = NULL;
1143 struct vxlan_hdr *vxlan = NULL;
1144 struct vxlan_gpe_hdr *vxlan_gpe = NULL;
1145 struct gre_hdr *gre = NULL;
1147 size_t temp_size = 0;
1150 return rte_flow_error_set(error, EINVAL,
1151 RTE_FLOW_ERROR_TYPE_ACTION,
1152 NULL, "invalid empty data");
1153 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1154 len = flow_dv_get_item_len(items->type);
1155 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
1156 return rte_flow_error_set(error, EINVAL,
1157 RTE_FLOW_ERROR_TYPE_ACTION,
1158 (void *)items->type,
1159 "items total size is too big"
1160 " for encap action");
1161 rte_memcpy((void *)&buf[temp_size], items->spec, len);
1162 switch (items->type) {
1163 case RTE_FLOW_ITEM_TYPE_ETH:
1164 eth = (struct ether_hdr *)&buf[temp_size];
1166 case RTE_FLOW_ITEM_TYPE_VLAN:
1167 vlan = (struct vlan_hdr *)&buf[temp_size];
1169 return rte_flow_error_set(error, EINVAL,
1170 RTE_FLOW_ERROR_TYPE_ACTION,
1171 (void *)items->type,
1172 "eth header not found");
1173 if (!eth->ether_type)
1174 eth->ether_type = RTE_BE16(ETHER_TYPE_VLAN);
1176 case RTE_FLOW_ITEM_TYPE_IPV4:
1177 ipv4 = (struct ipv4_hdr *)&buf[temp_size];
1179 return rte_flow_error_set(error, EINVAL,
1180 RTE_FLOW_ERROR_TYPE_ACTION,
1181 (void *)items->type,
1182 "neither eth nor vlan"
1184 if (vlan && !vlan->eth_proto)
1185 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv4);
1186 else if (eth && !eth->ether_type)
1187 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv4);
1188 if (!ipv4->version_ihl)
1189 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
1190 MLX5_ENCAP_IPV4_IHL_MIN;
1191 if (!ipv4->time_to_live)
1192 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
1194 case RTE_FLOW_ITEM_TYPE_IPV6:
1195 ipv6 = (struct ipv6_hdr *)&buf[temp_size];
1197 return rte_flow_error_set(error, EINVAL,
1198 RTE_FLOW_ERROR_TYPE_ACTION,
1199 (void *)items->type,
1200 "neither eth nor vlan"
1202 if (vlan && !vlan->eth_proto)
1203 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv6);
1204 else if (eth && !eth->ether_type)
1205 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv6);
1206 if (!ipv6->vtc_flow)
1208 RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
1209 if (!ipv6->hop_limits)
1210 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
1212 case RTE_FLOW_ITEM_TYPE_UDP:
1213 udp = (struct udp_hdr *)&buf[temp_size];
1215 return rte_flow_error_set(error, EINVAL,
1216 RTE_FLOW_ERROR_TYPE_ACTION,
1217 (void *)items->type,
1218 "ip header not found");
1219 if (ipv4 && !ipv4->next_proto_id)
1220 ipv4->next_proto_id = IPPROTO_UDP;
1221 else if (ipv6 && !ipv6->proto)
1222 ipv6->proto = IPPROTO_UDP;
1224 case RTE_FLOW_ITEM_TYPE_VXLAN:
1225 vxlan = (struct vxlan_hdr *)&buf[temp_size];
1227 return rte_flow_error_set(error, EINVAL,
1228 RTE_FLOW_ERROR_TYPE_ACTION,
1229 (void *)items->type,
1230 "udp header not found");
1232 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
1233 if (!vxlan->vx_flags)
1235 RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
1237 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1238 vxlan_gpe = (struct vxlan_gpe_hdr *)&buf[temp_size];
1240 return rte_flow_error_set(error, EINVAL,
1241 RTE_FLOW_ERROR_TYPE_ACTION,
1242 (void *)items->type,
1243 "udp header not found");
1244 if (!vxlan_gpe->proto)
1245 return rte_flow_error_set(error, EINVAL,
1246 RTE_FLOW_ERROR_TYPE_ACTION,
1247 (void *)items->type,
1248 "next protocol not found");
1251 RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
1252 if (!vxlan_gpe->vx_flags)
1253 vxlan_gpe->vx_flags =
1254 MLX5_ENCAP_VXLAN_GPE_FLAGS;
1256 case RTE_FLOW_ITEM_TYPE_GRE:
1257 case RTE_FLOW_ITEM_TYPE_NVGRE:
1258 gre = (struct gre_hdr *)&buf[temp_size];
1260 return rte_flow_error_set(error, EINVAL,
1261 RTE_FLOW_ERROR_TYPE_ACTION,
1262 (void *)items->type,
1263 "next protocol not found");
1265 return rte_flow_error_set(error, EINVAL,
1266 RTE_FLOW_ERROR_TYPE_ACTION,
1267 (void *)items->type,
1268 "ip header not found");
1269 if (ipv4 && !ipv4->next_proto_id)
1270 ipv4->next_proto_id = IPPROTO_GRE;
1271 else if (ipv6 && !ipv6->proto)
1272 ipv6->proto = IPPROTO_GRE;
1274 case RTE_FLOW_ITEM_TYPE_VOID:
1277 return rte_flow_error_set(error, EINVAL,
1278 RTE_FLOW_ERROR_TYPE_ACTION,
1279 (void *)items->type,
1280 "unsupported item type");
1290 * Convert L2 encap action to DV specification.
1293 * Pointer to rte_eth_dev structure.
1295 * Pointer to action structure.
1296 * @param[in, out] dev_flow
1297 * Pointer to the mlx5_flow.
1299 * Pointer to the error structure.
1302 * 0 on success, a negative errno value otherwise and rte_errno is set.
1305 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
1306 const struct rte_flow_action *action,
1307 struct mlx5_flow *dev_flow,
1308 struct rte_flow_error *error)
1310 const struct rte_flow_item *encap_data;
1311 const struct rte_flow_action_raw_encap *raw_encap_data;
1312 struct mlx5_flow_dv_encap_decap_resource res = {
1314 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
1315 .ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
1318 if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
1320 (const struct rte_flow_action_raw_encap *)action->conf;
1321 res.size = raw_encap_data->size;
1322 memcpy(res.buf, raw_encap_data->data, res.size);
1324 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
1326 ((const struct rte_flow_action_vxlan_encap *)
1327 action->conf)->definition;
1330 ((const struct rte_flow_action_nvgre_encap *)
1331 action->conf)->definition;
1332 if (flow_dv_convert_encap_data(encap_data, res.buf,
1336 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1337 return rte_flow_error_set(error, EINVAL,
1338 RTE_FLOW_ERROR_TYPE_ACTION,
1339 NULL, "can't create L2 encap action");
1344 * Convert L2 decap action to DV specification.
1347 * Pointer to rte_eth_dev structure.
1348 * @param[in, out] dev_flow
1349 * Pointer to the mlx5_flow.
1351 * Pointer to the error structure.
1354 * 0 on success, a negative errno value otherwise and rte_errno is set.
1357 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
1358 struct mlx5_flow *dev_flow,
1359 struct rte_flow_error *error)
1361 struct mlx5_flow_dv_encap_decap_resource res = {
1364 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
1365 .ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
1368 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1369 return rte_flow_error_set(error, EINVAL,
1370 RTE_FLOW_ERROR_TYPE_ACTION,
1371 NULL, "can't create L2 decap action");
1376 * Convert raw decap/encap (L3 tunnel) action to DV specification.
1379 * Pointer to rte_eth_dev structure.
1381 * Pointer to action structure.
1382 * @param[in, out] dev_flow
1383 * Pointer to the mlx5_flow.
1385 * Pointer to the flow attributes.
1387 * Pointer to the error structure.
1390 * 0 on success, a negative errno value otherwise and rte_errno is set.
1393 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
1394 const struct rte_flow_action *action,
1395 struct mlx5_flow *dev_flow,
1396 const struct rte_flow_attr *attr,
1397 struct rte_flow_error *error)
1399 const struct rte_flow_action_raw_encap *encap_data;
1400 struct mlx5_flow_dv_encap_decap_resource res;
1402 encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
1403 res.size = encap_data->size;
1404 memcpy(res.buf, encap_data->data, res.size);
1405 res.reformat_type = attr->egress ?
1406 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
1407 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
1408 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
1409 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
1410 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1411 return rte_flow_error_set(error, EINVAL,
1412 RTE_FLOW_ERROR_TYPE_ACTION,
1413 NULL, "can't create encap action");
1418 * Validate the modify-header actions.
1420 * @param[in] action_flags
1421 * Holds the actions detected until now.
1423 * Pointer to the modify action.
1425 * Pointer to error structure.
1428 * 0 on success, a negative errno value otherwise and rte_errno is set.
1431 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
1432 const struct rte_flow_action *action,
1433 struct rte_flow_error *error)
1435 if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
1436 return rte_flow_error_set(error, EINVAL,
1437 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1438 NULL, "action configuration not set");
1439 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1440 return rte_flow_error_set(error, EINVAL,
1441 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1442 "can't have encap action before"
1448 * Validate the modify-header MAC address actions.
1450 * @param[in] action_flags
1451 * Holds the actions detected until now.
1453 * Pointer to the modify action.
1454 * @param[in] item_flags
1455 * Holds the items detected.
1457 * Pointer to error structure.
1460 * 0 on success, a negative errno value otherwise and rte_errno is set.
1463 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
1464 const struct rte_flow_action *action,
1465 const uint64_t item_flags,
1466 struct rte_flow_error *error)
1470 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1472 if (!(item_flags & MLX5_FLOW_LAYER_L2))
1473 return rte_flow_error_set(error, EINVAL,
1474 RTE_FLOW_ERROR_TYPE_ACTION,
1476 "no L2 item in pattern");
1482 * Validate the modify-header IPv4 address actions.
1484 * @param[in] action_flags
1485 * Holds the actions detected until now.
1487 * Pointer to the modify action.
1488 * @param[in] item_flags
1489 * Holds the items detected.
1491 * Pointer to error structure.
1494 * 0 on success, a negative errno value otherwise and rte_errno is set.
1497 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
1498 const struct rte_flow_action *action,
1499 const uint64_t item_flags,
1500 struct rte_flow_error *error)
1504 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1506 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
1507 return rte_flow_error_set(error, EINVAL,
1508 RTE_FLOW_ERROR_TYPE_ACTION,
1510 "no ipv4 item in pattern");
1516 * Validate the modify-header IPv6 address actions.
1518 * @param[in] action_flags
1519 * Holds the actions detected until now.
1521 * Pointer to the modify action.
1522 * @param[in] item_flags
1523 * Holds the items detected.
1525 * Pointer to error structure.
1528 * 0 on success, a negative errno value otherwise and rte_errno is set.
1531 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
1532 const struct rte_flow_action *action,
1533 const uint64_t item_flags,
1534 struct rte_flow_error *error)
1538 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1540 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
1541 return rte_flow_error_set(error, EINVAL,
1542 RTE_FLOW_ERROR_TYPE_ACTION,
1544 "no ipv6 item in pattern");
1550 * Validate the modify-header TP actions.
1552 * @param[in] action_flags
1553 * Holds the actions detected until now.
1555 * Pointer to the modify action.
1556 * @param[in] item_flags
1557 * Holds the items detected.
1559 * Pointer to error structure.
1562 * 0 on success, a negative errno value otherwise and rte_errno is set.
1565 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
1566 const struct rte_flow_action *action,
1567 const uint64_t item_flags,
1568 struct rte_flow_error *error)
1572 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1574 if (!(item_flags & MLX5_FLOW_LAYER_L4))
1575 return rte_flow_error_set(error, EINVAL,
1576 RTE_FLOW_ERROR_TYPE_ACTION,
1577 NULL, "no transport layer "
1584 * Validate the modify-header TTL actions.
1586 * @param[in] action_flags
1587 * Holds the actions detected until now.
1589 * Pointer to the modify action.
1590 * @param[in] item_flags
1591 * Holds the items detected.
1593 * Pointer to error structure.
1596 * 0 on success, a negative errno value otherwise and rte_errno is set.
1599 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
1600 const struct rte_flow_action *action,
1601 const uint64_t item_flags,
1602 struct rte_flow_error *error)
1606 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1608 if (!(item_flags & MLX5_FLOW_LAYER_L3))
1609 return rte_flow_error_set(error, EINVAL,
1610 RTE_FLOW_ERROR_TYPE_ACTION,
1612 "no IP protocol in pattern");
1618 * Validate jump action.
1621 * Pointer to the modify action.
1623 * The group of the current flow.
1625 * Pointer to error structure.
1628 * 0 on success, a negative errno value otherwise and rte_errno is set.
1631 flow_dv_validate_action_jump(const struct rte_flow_action *action,
1633 struct rte_flow_error *error)
1635 if (action->type != RTE_FLOW_ACTION_TYPE_JUMP && !action->conf)
1636 return rte_flow_error_set(error, EINVAL,
1637 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1638 NULL, "action configuration not set");
1639 if (group >= ((const struct rte_flow_action_jump *)action->conf)->group)
1640 return rte_flow_error_set(error, EINVAL,
1641 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1642 "target group must be higher then"
1643 " the current flow group");
1648 * Validate the port_id action.
1651 * Pointer to rte_eth_dev structure.
1652 * @param[in] action_flags
1653 * Bit-fields that holds the actions detected until now.
1655 * Port_id RTE action structure.
1657 * Attributes of flow that includes this action.
1659 * Pointer to error structure.
1662 * 0 on success, a negative errno value otherwise and rte_errno is set.
1665 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
1666 uint64_t action_flags,
1667 const struct rte_flow_action *action,
1668 const struct rte_flow_attr *attr,
1669 struct rte_flow_error *error)
1671 const struct rte_flow_action_port_id *port_id;
1673 uint16_t esw_domain_id;
1674 uint16_t act_port_domain_id;
1677 if (!attr->transfer)
1678 return rte_flow_error_set(error, ENOTSUP,
1679 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1681 "port id action is valid in transfer"
1683 if (!action || !action->conf)
1684 return rte_flow_error_set(error, ENOTSUP,
1685 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1687 "port id action parameters must be"
1689 if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
1690 MLX5_FLOW_FATE_ESWITCH_ACTIONS))
1691 return rte_flow_error_set(error, EINVAL,
1692 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1693 "can have only one fate actions in"
1695 ret = mlx5_port_to_eswitch_info(dev->data->port_id,
1696 &esw_domain_id, NULL);
1698 return rte_flow_error_set(error, -ret,
1699 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1701 "failed to obtain E-Switch info");
1702 port_id = action->conf;
1703 port = port_id->original ? dev->data->port_id : port_id->id;
1704 ret = mlx5_port_to_eswitch_info(port, &act_port_domain_id, NULL);
1706 return rte_flow_error_set
1708 RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
1709 "failed to obtain E-Switch port id for port");
1710 if (act_port_domain_id != esw_domain_id)
1711 return rte_flow_error_set
1713 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1714 "port does not belong to"
1715 " E-Switch being configured");
1720 * Find existing modify-header resource or create and register a new one.
1722 * @param dev[in, out]
1723 * Pointer to rte_eth_dev structure.
1724 * @param[in, out] resource
1725 * Pointer to modify-header resource.
1726 * @parm[in, out] dev_flow
1727 * Pointer to the dev_flow.
1729 * pointer to error structure.
1732 * 0 on success otherwise -errno and errno is set.
1735 flow_dv_modify_hdr_resource_register
1736 (struct rte_eth_dev *dev,
1737 struct mlx5_flow_dv_modify_hdr_resource *resource,
1738 struct mlx5_flow *dev_flow,
1739 struct rte_flow_error *error)
1741 struct mlx5_priv *priv = dev->data->dev_private;
1742 struct mlx5_ibv_shared *sh = priv->sh;
1743 struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
1745 struct mlx5dv_dr_ns *ns =
1746 resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX ?
1747 sh->tx_ns : sh->rx_ns;
1749 /* Lookup a matching resource from cache. */
1750 LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
1751 if (resource->ft_type == cache_resource->ft_type &&
1752 resource->actions_num == cache_resource->actions_num &&
1753 !memcmp((const void *)resource->actions,
1754 (const void *)cache_resource->actions,
1755 (resource->actions_num *
1756 sizeof(resource->actions[0])))) {
1757 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
1758 (void *)cache_resource,
1759 rte_atomic32_read(&cache_resource->refcnt));
1760 rte_atomic32_inc(&cache_resource->refcnt);
1761 dev_flow->dv.modify_hdr = cache_resource;
1765 /* Register new modify-header resource. */
1766 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1767 if (!cache_resource)
1768 return rte_flow_error_set(error, ENOMEM,
1769 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1770 "cannot allocate resource memory");
1771 *cache_resource = *resource;
1772 cache_resource->verbs_action =
1773 mlx5_glue->dv_create_flow_action_modify_header
1774 (sh->ctx, cache_resource->ft_type,
1776 cache_resource->actions_num *
1777 sizeof(cache_resource->actions[0]),
1778 (uint64_t *)cache_resource->actions);
1779 if (!cache_resource->verbs_action) {
1780 rte_free(cache_resource);
1781 return rte_flow_error_set(error, ENOMEM,
1782 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1783 NULL, "cannot create action");
1785 rte_atomic32_init(&cache_resource->refcnt);
1786 rte_atomic32_inc(&cache_resource->refcnt);
1787 LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
1788 dev_flow->dv.modify_hdr = cache_resource;
1789 DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
1790 (void *)cache_resource,
1791 rte_atomic32_read(&cache_resource->refcnt));
1796 * Get or create a flow counter.
1799 * Pointer to the Ethernet device structure.
1801 * Indicate if this counter is shared with other flows.
1803 * Counter identifier.
1806 * pointer to flow counter on success, NULL otherwise and rte_errno is set.
1808 static struct mlx5_flow_counter *
1809 flow_dv_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
1811 struct mlx5_priv *priv = dev->data->dev_private;
1812 struct mlx5_flow_counter *cnt = NULL;
1813 struct mlx5_devx_counter_set *dcs = NULL;
1816 if (!priv->config.devx) {
1821 LIST_FOREACH(cnt, &priv->flow_counters, next) {
1822 if (cnt->shared && cnt->id == id) {
1828 cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
1829 dcs = rte_calloc(__func__, 1, sizeof(*dcs), 0);
1834 ret = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, dcs);
1837 struct mlx5_flow_counter tmpl = {
1843 tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
1849 LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
1859 * Release a flow counter.
1861 * @param[in] counter
1862 * Pointer to the counter handler.
1865 flow_dv_counter_release(struct mlx5_flow_counter *counter)
1871 if (--counter->ref_cnt == 0) {
1872 ret = mlx5_devx_cmd_flow_counter_free(counter->dcs->obj);
1874 DRV_LOG(ERR, "Failed to free devx counters, %d", ret);
1875 LIST_REMOVE(counter, next);
1876 rte_free(counter->dcs);
1882 * Verify the @p attributes will be correctly understood by the NIC and store
1883 * them in the @p flow if everything is correct.
1886 * Pointer to dev struct.
1887 * @param[in] attributes
1888 * Pointer to flow attributes
1890 * Pointer to error structure.
1893 * 0 on success, a negative errno value otherwise and rte_errno is set.
1896 flow_dv_validate_attributes(struct rte_eth_dev *dev,
1897 const struct rte_flow_attr *attributes,
1898 struct rte_flow_error *error)
1900 struct mlx5_priv *priv = dev->data->dev_private;
1901 uint32_t priority_max = priv->config.flow_prio - 1;
1903 #ifndef HAVE_MLX5DV_DR
1904 if (attributes->group)
1905 return rte_flow_error_set(error, ENOTSUP,
1906 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1908 "groups is not supported");
1910 if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
1911 attributes->priority >= priority_max)
1912 return rte_flow_error_set(error, ENOTSUP,
1913 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1915 "priority out of range");
1916 if (attributes->transfer) {
1917 if (!priv->config.dv_esw_en)
1918 return rte_flow_error_set
1920 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1921 "E-Switch dr is not supported");
1922 if (!(priv->representor || priv->master))
1923 return rte_flow_error_set
1924 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1925 NULL, "E-Switch configurationd can only be"
1926 " done by a master or a representor device");
1927 if (attributes->egress)
1928 return rte_flow_error_set
1930 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
1931 "egress is not supported");
1932 if (attributes->group >= MLX5_MAX_TABLES_FDB)
1933 return rte_flow_error_set
1935 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1936 NULL, "group must be smaller than "
1937 RTE_STR(MLX5_MAX_FDB_TABLES));
1939 if (!(attributes->egress ^ attributes->ingress))
1940 return rte_flow_error_set(error, ENOTSUP,
1941 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1942 "must specify exactly one of "
1943 "ingress or egress");
1948 * Internal validation function. For validating both actions and items.
1951 * Pointer to the rte_eth_dev structure.
1953 * Pointer to the flow attributes.
1955 * Pointer to the list of items.
1956 * @param[in] actions
1957 * Pointer to the list of actions.
1959 * Pointer to the error structure.
1962 * 0 on success, a negative errno value otherwise and rte_errno is set.
1965 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1966 const struct rte_flow_item items[],
1967 const struct rte_flow_action actions[],
1968 struct rte_flow_error *error)
1971 uint64_t action_flags = 0;
1972 uint64_t item_flags = 0;
1973 uint64_t last_item = 0;
1974 uint8_t next_protocol = 0xff;
1979 ret = flow_dv_validate_attributes(dev, attr, error);
1982 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1983 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1984 switch (items->type) {
1985 case RTE_FLOW_ITEM_TYPE_VOID:
1987 case RTE_FLOW_ITEM_TYPE_PORT_ID:
1988 ret = flow_dv_validate_item_port_id
1989 (dev, items, attr, item_flags, error);
1992 last_item |= MLX5_FLOW_ITEM_PORT_ID;
1994 case RTE_FLOW_ITEM_TYPE_ETH:
1995 ret = mlx5_flow_validate_item_eth(items, item_flags,
1999 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
2000 MLX5_FLOW_LAYER_OUTER_L2;
2002 case RTE_FLOW_ITEM_TYPE_VLAN:
2003 ret = mlx5_flow_validate_item_vlan(items, item_flags,
2007 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2008 MLX5_FLOW_LAYER_OUTER_VLAN;
2010 case RTE_FLOW_ITEM_TYPE_IPV4:
2011 ret = mlx5_flow_validate_item_ipv4(items, item_flags,
2015 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
2016 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
2017 if (items->mask != NULL &&
2018 ((const struct rte_flow_item_ipv4 *)
2019 items->mask)->hdr.next_proto_id) {
2021 ((const struct rte_flow_item_ipv4 *)
2022 (items->spec))->hdr.next_proto_id;
2024 ((const struct rte_flow_item_ipv4 *)
2025 (items->mask))->hdr.next_proto_id;
2027 /* Reset for inner layer. */
2028 next_protocol = 0xff;
2031 case RTE_FLOW_ITEM_TYPE_IPV6:
2032 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
2036 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
2037 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
2038 if (items->mask != NULL &&
2039 ((const struct rte_flow_item_ipv6 *)
2040 items->mask)->hdr.proto) {
2042 ((const struct rte_flow_item_ipv6 *)
2043 items->spec)->hdr.proto;
2045 ((const struct rte_flow_item_ipv6 *)
2046 items->mask)->hdr.proto;
2048 /* Reset for inner layer. */
2049 next_protocol = 0xff;
2052 case RTE_FLOW_ITEM_TYPE_TCP:
2053 ret = mlx5_flow_validate_item_tcp
2056 &rte_flow_item_tcp_mask,
2060 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
2061 MLX5_FLOW_LAYER_OUTER_L4_TCP;
2063 case RTE_FLOW_ITEM_TYPE_UDP:
2064 ret = mlx5_flow_validate_item_udp(items, item_flags,
2069 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
2070 MLX5_FLOW_LAYER_OUTER_L4_UDP;
2072 case RTE_FLOW_ITEM_TYPE_GRE:
2073 case RTE_FLOW_ITEM_TYPE_NVGRE:
2074 ret = mlx5_flow_validate_item_gre(items, item_flags,
2075 next_protocol, error);
2078 last_item = MLX5_FLOW_LAYER_GRE;
2080 case RTE_FLOW_ITEM_TYPE_VXLAN:
2081 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
2085 last_item = MLX5_FLOW_LAYER_VXLAN;
2087 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
2088 ret = mlx5_flow_validate_item_vxlan_gpe(items,
2093 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
2095 case RTE_FLOW_ITEM_TYPE_MPLS:
2096 ret = mlx5_flow_validate_item_mpls(dev, items,
2101 last_item = MLX5_FLOW_LAYER_MPLS;
2103 case RTE_FLOW_ITEM_TYPE_META:
2104 ret = flow_dv_validate_item_meta(dev, items, attr,
2108 last_item = MLX5_FLOW_ITEM_METADATA;
2111 return rte_flow_error_set(error, ENOTSUP,
2112 RTE_FLOW_ERROR_TYPE_ITEM,
2113 NULL, "item not supported");
2115 item_flags |= last_item;
2117 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2118 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
2119 return rte_flow_error_set(error, ENOTSUP,
2120 RTE_FLOW_ERROR_TYPE_ACTION,
2121 actions, "too many actions");
2122 switch (actions->type) {
2123 case RTE_FLOW_ACTION_TYPE_VOID:
2125 case RTE_FLOW_ACTION_TYPE_PORT_ID:
2126 ret = flow_dv_validate_action_port_id(dev,
2133 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
2136 case RTE_FLOW_ACTION_TYPE_FLAG:
2137 ret = mlx5_flow_validate_action_flag(action_flags,
2141 action_flags |= MLX5_FLOW_ACTION_FLAG;
2144 case RTE_FLOW_ACTION_TYPE_MARK:
2145 ret = mlx5_flow_validate_action_mark(actions,
2150 action_flags |= MLX5_FLOW_ACTION_MARK;
2153 case RTE_FLOW_ACTION_TYPE_DROP:
2154 ret = mlx5_flow_validate_action_drop(action_flags,
2158 action_flags |= MLX5_FLOW_ACTION_DROP;
2161 case RTE_FLOW_ACTION_TYPE_QUEUE:
2162 ret = mlx5_flow_validate_action_queue(actions,
2167 action_flags |= MLX5_FLOW_ACTION_QUEUE;
2170 case RTE_FLOW_ACTION_TYPE_RSS:
2171 ret = mlx5_flow_validate_action_rss(actions,
2177 action_flags |= MLX5_FLOW_ACTION_RSS;
2180 case RTE_FLOW_ACTION_TYPE_COUNT:
2181 ret = flow_dv_validate_action_count(dev, error);
2184 action_flags |= MLX5_FLOW_ACTION_COUNT;
2187 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2188 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
2189 ret = flow_dv_validate_action_l2_encap(action_flags,
2194 action_flags |= actions->type ==
2195 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
2196 MLX5_FLOW_ACTION_VXLAN_ENCAP :
2197 MLX5_FLOW_ACTION_NVGRE_ENCAP;
2200 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2201 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
2202 ret = flow_dv_validate_action_l2_decap(action_flags,
2206 action_flags |= actions->type ==
2207 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
2208 MLX5_FLOW_ACTION_VXLAN_DECAP :
2209 MLX5_FLOW_ACTION_NVGRE_DECAP;
2212 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
2213 ret = flow_dv_validate_action_raw_encap(action_flags,
2218 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
2221 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
2222 ret = flow_dv_validate_action_raw_decap(action_flags,
2227 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
2230 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
2231 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
2232 ret = flow_dv_validate_action_modify_mac(action_flags,
2238 /* Count all modify-header actions as one action. */
2239 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2241 action_flags |= actions->type ==
2242 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
2243 MLX5_FLOW_ACTION_SET_MAC_SRC :
2244 MLX5_FLOW_ACTION_SET_MAC_DST;
2247 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
2248 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
2249 ret = flow_dv_validate_action_modify_ipv4(action_flags,
2255 /* Count all modify-header actions as one action. */
2256 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2258 action_flags |= actions->type ==
2259 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
2260 MLX5_FLOW_ACTION_SET_IPV4_SRC :
2261 MLX5_FLOW_ACTION_SET_IPV4_DST;
2263 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
2264 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
2265 ret = flow_dv_validate_action_modify_ipv6(action_flags,
2271 /* Count all modify-header actions as one action. */
2272 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2274 action_flags |= actions->type ==
2275 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
2276 MLX5_FLOW_ACTION_SET_IPV6_SRC :
2277 MLX5_FLOW_ACTION_SET_IPV6_DST;
2279 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
2280 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
2281 ret = flow_dv_validate_action_modify_tp(action_flags,
2287 /* Count all modify-header actions as one action. */
2288 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2290 action_flags |= actions->type ==
2291 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
2292 MLX5_FLOW_ACTION_SET_TP_SRC :
2293 MLX5_FLOW_ACTION_SET_TP_DST;
2295 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
2296 case RTE_FLOW_ACTION_TYPE_SET_TTL:
2297 ret = flow_dv_validate_action_modify_ttl(action_flags,
2303 /* Count all modify-header actions as one action. */
2304 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2306 action_flags |= actions->type ==
2307 RTE_FLOW_ACTION_TYPE_SET_TTL ?
2308 MLX5_FLOW_ACTION_SET_TTL :
2309 MLX5_FLOW_ACTION_DEC_TTL;
2311 case RTE_FLOW_ACTION_TYPE_JUMP:
2312 ret = flow_dv_validate_action_jump(actions,
2313 attr->group, error);
2317 action_flags |= MLX5_FLOW_ACTION_JUMP;
2320 return rte_flow_error_set(error, ENOTSUP,
2321 RTE_FLOW_ERROR_TYPE_ACTION,
2323 "action not supported");
2326 /* Eswitch has few restrictions on using items and actions */
2327 if (attr->transfer) {
2328 if (action_flags & MLX5_FLOW_ACTION_FLAG)
2329 return rte_flow_error_set(error, ENOTSUP,
2330 RTE_FLOW_ERROR_TYPE_ACTION,
2332 "unsupported action FLAG");
2333 if (action_flags & MLX5_FLOW_ACTION_MARK)
2334 return rte_flow_error_set(error, ENOTSUP,
2335 RTE_FLOW_ERROR_TYPE_ACTION,
2337 "unsupported action MARK");
2338 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
2339 return rte_flow_error_set(error, ENOTSUP,
2340 RTE_FLOW_ERROR_TYPE_ACTION,
2342 "unsupported action QUEUE");
2343 if (action_flags & MLX5_FLOW_ACTION_RSS)
2344 return rte_flow_error_set(error, ENOTSUP,
2345 RTE_FLOW_ERROR_TYPE_ACTION,
2347 "unsupported action RSS");
2348 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
2349 return rte_flow_error_set(error, EINVAL,
2350 RTE_FLOW_ERROR_TYPE_ACTION,
2352 "no fate action is found");
2354 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
2355 return rte_flow_error_set(error, EINVAL,
2356 RTE_FLOW_ERROR_TYPE_ACTION,
2358 "no fate action is found");
2364 * Internal preparation function. Allocates the DV flow size,
2365 * this size is constant.
2368 * Pointer to the flow attributes.
2370 * Pointer to the list of items.
2371 * @param[in] actions
2372 * Pointer to the list of actions.
2374 * Pointer to the error structure.
2377 * Pointer to mlx5_flow object on success,
2378 * otherwise NULL and rte_errno is set.
2380 static struct mlx5_flow *
2381 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
2382 const struct rte_flow_item items[] __rte_unused,
2383 const struct rte_flow_action actions[] __rte_unused,
2384 struct rte_flow_error *error)
2386 uint32_t size = sizeof(struct mlx5_flow);
2387 struct mlx5_flow *flow;
2389 flow = rte_calloc(__func__, 1, size, 0);
2391 rte_flow_error_set(error, ENOMEM,
2392 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2393 "not enough memory to create flow");
2396 flow->dv.value.size = MLX5_ST_SZ_DB(fte_match_param);
2402 * Sanity check for match mask and value. Similar to check_valid_spec() in
2403 * kernel driver. If unmasked bit is present in value, it returns failure.
2406 * pointer to match mask buffer.
2407 * @param match_value
2408 * pointer to match value buffer.
2411 * 0 if valid, -EINVAL otherwise.
2414 flow_dv_check_valid_spec(void *match_mask, void *match_value)
2416 uint8_t *m = match_mask;
2417 uint8_t *v = match_value;
2420 for (i = 0; i < MLX5_ST_SZ_DB(fte_match_param); ++i) {
2423 "match_value differs from match_criteria"
2424 " %p[%u] != %p[%u]",
2425 match_value, i, match_mask, i);
2434 * Add Ethernet item to matcher and to the value.
2436 * @param[in, out] matcher
2438 * @param[in, out] key
2439 * Flow matcher value.
2441 * Flow pattern to translate.
2443 * Item is inner pattern.
2446 flow_dv_translate_item_eth(void *matcher, void *key,
2447 const struct rte_flow_item *item, int inner)
2449 const struct rte_flow_item_eth *eth_m = item->mask;
2450 const struct rte_flow_item_eth *eth_v = item->spec;
2451 const struct rte_flow_item_eth nic_mask = {
2452 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2453 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2454 .type = RTE_BE16(0xffff),
2466 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2468 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2470 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2472 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2474 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
2475 ð_m->dst, sizeof(eth_m->dst));
2476 /* The value must be in the range of the mask. */
2477 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
2478 for (i = 0; i < sizeof(eth_m->dst); ++i)
2479 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
2480 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
2481 ð_m->src, sizeof(eth_m->src));
2482 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
2483 /* The value must be in the range of the mask. */
2484 for (i = 0; i < sizeof(eth_m->dst); ++i)
2485 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
2486 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
2487 rte_be_to_cpu_16(eth_m->type));
2488 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
2489 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
2493 * Add VLAN item to matcher and to the value.
2495 * @param[in, out] matcher
2497 * @param[in, out] key
2498 * Flow matcher value.
2500 * Flow pattern to translate.
2502 * Item is inner pattern.
2505 flow_dv_translate_item_vlan(void *matcher, void *key,
2506 const struct rte_flow_item *item,
2509 const struct rte_flow_item_vlan *vlan_m = item->mask;
2510 const struct rte_flow_item_vlan *vlan_v = item->spec;
2511 const struct rte_flow_item_vlan nic_mask = {
2512 .tci = RTE_BE16(0x0fff),
2513 .inner_type = RTE_BE16(0xffff),
2525 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2527 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2529 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2531 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2533 tci_m = rte_be_to_cpu_16(vlan_m->tci);
2534 tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
2535 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
2536 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
2537 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
2538 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
2539 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
2540 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
2541 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
2542 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
2546 * Add IPV4 item to matcher and to the value.
2548 * @param[in, out] matcher
2550 * @param[in, out] key
2551 * Flow matcher value.
2553 * Flow pattern to translate.
2555 * Item is inner pattern.
2557 * The group to insert the rule.
2560 flow_dv_translate_item_ipv4(void *matcher, void *key,
2561 const struct rte_flow_item *item,
2562 int inner, uint32_t group)
2564 const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
2565 const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
2566 const struct rte_flow_item_ipv4 nic_mask = {
2568 .src_addr = RTE_BE32(0xffffffff),
2569 .dst_addr = RTE_BE32(0xffffffff),
2570 .type_of_service = 0xff,
2571 .next_proto_id = 0xff,
2581 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2583 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2585 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2587 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2590 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2592 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
2593 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
2598 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2599 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2600 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2601 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2602 *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
2603 *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
2604 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2605 src_ipv4_src_ipv6.ipv4_layout.ipv4);
2606 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2607 src_ipv4_src_ipv6.ipv4_layout.ipv4);
2608 *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
2609 *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
2610 tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
2611 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
2612 ipv4_m->hdr.type_of_service);
2613 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
2614 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
2615 ipv4_m->hdr.type_of_service >> 2);
2616 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
2617 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2618 ipv4_m->hdr.next_proto_id);
2619 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2620 ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
2624 * Add IPV6 item to matcher and to the value.
2626 * @param[in, out] matcher
2628 * @param[in, out] key
2629 * Flow matcher value.
2631 * Flow pattern to translate.
2633 * Item is inner pattern.
2635 * The group to insert the rule.
2638 flow_dv_translate_item_ipv6(void *matcher, void *key,
2639 const struct rte_flow_item *item,
2640 int inner, uint32_t group)
2642 const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
2643 const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
2644 const struct rte_flow_item_ipv6 nic_mask = {
2647 "\xff\xff\xff\xff\xff\xff\xff\xff"
2648 "\xff\xff\xff\xff\xff\xff\xff\xff",
2650 "\xff\xff\xff\xff\xff\xff\xff\xff"
2651 "\xff\xff\xff\xff\xff\xff\xff\xff",
2652 .vtc_flow = RTE_BE32(0xffffffff),
2659 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2660 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2669 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2671 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2673 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2675 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2678 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2680 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
2681 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
2686 size = sizeof(ipv6_m->hdr.dst_addr);
2687 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2688 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2689 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2690 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2691 memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
2692 for (i = 0; i < size; ++i)
2693 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
2694 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2695 src_ipv4_src_ipv6.ipv6_layout.ipv6);
2696 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2697 src_ipv4_src_ipv6.ipv6_layout.ipv6);
2698 memcpy(l24_m, ipv6_m->hdr.src_addr, size);
2699 for (i = 0; i < size; ++i)
2700 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
2702 vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
2703 vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
2704 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
2705 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
2706 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
2707 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
2710 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
2712 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
2715 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
2717 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
2721 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2723 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2724 ipv6_v->hdr.proto & ipv6_m->hdr.proto);
2728 * Add TCP item to matcher and to the value.
2730 * @param[in, out] matcher
2732 * @param[in, out] key
2733 * Flow matcher value.
2735 * Flow pattern to translate.
2737 * Item is inner pattern.
2740 flow_dv_translate_item_tcp(void *matcher, void *key,
2741 const struct rte_flow_item *item,
2744 const struct rte_flow_item_tcp *tcp_m = item->mask;
2745 const struct rte_flow_item_tcp *tcp_v = item->spec;
2750 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2752 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2754 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2756 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2758 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2759 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
2763 tcp_m = &rte_flow_item_tcp_mask;
2764 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
2765 rte_be_to_cpu_16(tcp_m->hdr.src_port));
2766 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
2767 rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
2768 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
2769 rte_be_to_cpu_16(tcp_m->hdr.dst_port));
2770 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
2771 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
2775 * Add UDP item to matcher and to the value.
2777 * @param[in, out] matcher
2779 * @param[in, out] key
2780 * Flow matcher value.
2782 * Flow pattern to translate.
2784 * Item is inner pattern.
2787 flow_dv_translate_item_udp(void *matcher, void *key,
2788 const struct rte_flow_item *item,
2791 const struct rte_flow_item_udp *udp_m = item->mask;
2792 const struct rte_flow_item_udp *udp_v = item->spec;
2797 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2799 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2801 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2803 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2805 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2806 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
2810 udp_m = &rte_flow_item_udp_mask;
2811 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
2812 rte_be_to_cpu_16(udp_m->hdr.src_port));
2813 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
2814 rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
2815 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
2816 rte_be_to_cpu_16(udp_m->hdr.dst_port));
2817 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2818 rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
2822 * Add GRE item to matcher and to the value.
2824 * @param[in, out] matcher
2826 * @param[in, out] key
2827 * Flow matcher value.
2829 * Flow pattern to translate.
2831 * Item is inner pattern.
2834 flow_dv_translate_item_gre(void *matcher, void *key,
2835 const struct rte_flow_item *item,
2838 const struct rte_flow_item_gre *gre_m = item->mask;
2839 const struct rte_flow_item_gre *gre_v = item->spec;
2842 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2843 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2846 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2848 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2850 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2852 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2854 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2855 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
2859 gre_m = &rte_flow_item_gre_mask;
2860 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
2861 rte_be_to_cpu_16(gre_m->protocol));
2862 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
2863 rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
2867 * Add NVGRE item to matcher and to the value.
2869 * @param[in, out] matcher
2871 * @param[in, out] key
2872 * Flow matcher value.
2874 * Flow pattern to translate.
2876 * Item is inner pattern.
2879 flow_dv_translate_item_nvgre(void *matcher, void *key,
2880 const struct rte_flow_item *item,
2883 const struct rte_flow_item_nvgre *nvgre_m = item->mask;
2884 const struct rte_flow_item_nvgre *nvgre_v = item->spec;
2885 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2886 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2887 const char *tni_flow_id_m = (const char *)nvgre_m->tni;
2888 const char *tni_flow_id_v = (const char *)nvgre_v->tni;
2894 flow_dv_translate_item_gre(matcher, key, item, inner);
2898 nvgre_m = &rte_flow_item_nvgre_mask;
2899 size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
2900 gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
2901 gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
2902 memcpy(gre_key_m, tni_flow_id_m, size);
2903 for (i = 0; i < size; ++i)
2904 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
2908 * Add VXLAN item to matcher and to the value.
2910 * @param[in, out] matcher
2912 * @param[in, out] key
2913 * Flow matcher value.
2915 * Flow pattern to translate.
2917 * Item is inner pattern.
2920 flow_dv_translate_item_vxlan(void *matcher, void *key,
2921 const struct rte_flow_item *item,
2924 const struct rte_flow_item_vxlan *vxlan_m = item->mask;
2925 const struct rte_flow_item_vxlan *vxlan_v = item->spec;
2928 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2929 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2937 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2939 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2941 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2943 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2945 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
2946 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
2947 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
2948 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
2949 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
2954 vxlan_m = &rte_flow_item_vxlan_mask;
2955 size = sizeof(vxlan_m->vni);
2956 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
2957 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
2958 memcpy(vni_m, vxlan_m->vni, size);
2959 for (i = 0; i < size; ++i)
2960 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
2964 * Add MPLS item to matcher and to the value.
2966 * @param[in, out] matcher
2968 * @param[in, out] key
2969 * Flow matcher value.
2971 * Flow pattern to translate.
2972 * @param[in] prev_layer
2973 * The protocol layer indicated in previous item.
2975 * Item is inner pattern.
2978 flow_dv_translate_item_mpls(void *matcher, void *key,
2979 const struct rte_flow_item *item,
2980 uint64_t prev_layer,
2983 const uint32_t *in_mpls_m = item->mask;
2984 const uint32_t *in_mpls_v = item->spec;
2985 uint32_t *out_mpls_m = 0;
2986 uint32_t *out_mpls_v = 0;
2987 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2988 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2989 void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
2991 void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
2992 void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
2993 void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2995 switch (prev_layer) {
2996 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
2997 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
2998 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2999 MLX5_UDP_PORT_MPLS);
3001 case MLX5_FLOW_LAYER_GRE:
3002 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
3003 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
3007 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3008 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
3015 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
3016 switch (prev_layer) {
3017 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
3019 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
3020 outer_first_mpls_over_udp);
3022 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
3023 outer_first_mpls_over_udp);
3025 case MLX5_FLOW_LAYER_GRE:
3027 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
3028 outer_first_mpls_over_gre);
3030 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
3031 outer_first_mpls_over_gre);
3034 /* Inner MPLS not over GRE is not supported. */
3037 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
3041 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
3047 if (out_mpls_m && out_mpls_v) {
3048 *out_mpls_m = *in_mpls_m;
3049 *out_mpls_v = *in_mpls_v & *in_mpls_m;
3054 * Add META item to matcher
3056 * @param[in, out] matcher
3058 * @param[in, out] key
3059 * Flow matcher value.
3061 * Flow pattern to translate.
3063 * Item is inner pattern.
3066 flow_dv_translate_item_meta(void *matcher, void *key,
3067 const struct rte_flow_item *item)
3069 const struct rte_flow_item_meta *meta_m;
3070 const struct rte_flow_item_meta *meta_v;
3072 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
3074 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
3076 meta_m = (const void *)item->mask;
3078 meta_m = &rte_flow_item_meta_mask;
3079 meta_v = (const void *)item->spec;
3081 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
3082 rte_be_to_cpu_32(meta_m->data));
3083 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
3084 rte_be_to_cpu_32(meta_v->data & meta_m->data));
3089 * Add source vport match to the specified matcher.
3091 * @param[in, out] matcher
3093 * @param[in, out] key
3094 * Flow matcher value.
3096 * Source vport value to match
3101 flow_dv_translate_item_source_vport(void *matcher, void *key,
3102 int16_t port, uint16_t mask)
3104 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3105 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3107 MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
3108 MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
3112 * Translate port-id item to eswitch match on port-id.
3115 * The devich to configure through.
3116 * @param[in, out] matcher
3118 * @param[in, out] key
3119 * Flow matcher value.
3121 * Flow pattern to translate.
3124 * 0 on success, a negative errno value otherwise.
3127 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
3128 void *key, const struct rte_flow_item *item)
3130 const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
3131 const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
3132 uint16_t mask, val, id;
3135 mask = pid_m ? pid_m->id : 0xffff;
3136 id = pid_v ? pid_v->id : dev->data->port_id;
3137 ret = mlx5_port_to_eswitch_info(id, NULL, &val);
3140 flow_dv_translate_item_source_vport(matcher, key, val, mask);
3144 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
3146 #define HEADER_IS_ZERO(match_criteria, headers) \
3147 !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
3148 matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
3151 * Calculate flow matcher enable bitmap.
3153 * @param match_criteria
3154 * Pointer to flow matcher criteria.
3157 * Bitmap of enabled fields.
3160 flow_dv_matcher_enable(uint32_t *match_criteria)
3162 uint8_t match_criteria_enable;
3164 match_criteria_enable =
3165 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
3166 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
3167 match_criteria_enable |=
3168 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
3169 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
3170 match_criteria_enable |=
3171 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
3172 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
3173 match_criteria_enable |=
3174 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
3175 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
3176 #ifdef HAVE_MLX5DV_DR
3177 match_criteria_enable |=
3178 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
3179 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
3181 return match_criteria_enable;
3188 * @param dev[in, out]
3189 * Pointer to rte_eth_dev structure.
3190 * @param[in] table_id
3193 * Direction of the table.
3194 * @param[in] transfer
3195 * E-Switch or NIC flow.
3197 * pointer to error structure.
3200 * Returns tables resource based on the index, NULL in case of failed.
3202 static struct mlx5_flow_tbl_resource *
3203 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
3204 uint32_t table_id, uint8_t egress,
3206 struct rte_flow_error *error)
3208 struct mlx5_priv *priv = dev->data->dev_private;
3209 struct mlx5_ibv_shared *sh = priv->sh;
3210 struct mlx5_flow_tbl_resource *tbl;
3212 #ifdef HAVE_MLX5DV_DR
3214 tbl = &sh->fdb_tbl[table_id];
3216 tbl->obj = mlx5_glue->dr_create_flow_tbl
3217 (sh->fdb_ns, table_id);
3218 } else if (egress) {
3219 tbl = &sh->tx_tbl[table_id];
3221 tbl->obj = mlx5_glue->dr_create_flow_tbl
3222 (sh->tx_ns, table_id);
3224 tbl = &sh->rx_tbl[table_id];
3226 tbl->obj = mlx5_glue->dr_create_flow_tbl
3227 (sh->rx_ns, table_id);
3230 rte_flow_error_set(error, ENOMEM,
3231 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3232 NULL, "cannot create table");
3235 rte_atomic32_inc(&tbl->refcnt);
3241 return &sh->fdb_tbl[table_id];
3243 return &sh->tx_tbl[table_id];
3245 return &sh->rx_tbl[table_id];
3250 * Release a flow table.
3253 * Table resource to be released.
3256 * Returns 0 if table was released, else return 1;
3259 flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
3263 if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
3264 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
3272 * Register the flow matcher.
3274 * @param dev[in, out]
3275 * Pointer to rte_eth_dev structure.
3276 * @param[in, out] matcher
3277 * Pointer to flow matcher.
3278 * @parm[in, out] dev_flow
3279 * Pointer to the dev_flow.
3281 * pointer to error structure.
3284 * 0 on success otherwise -errno and errno is set.
3287 flow_dv_matcher_register(struct rte_eth_dev *dev,
3288 struct mlx5_flow_dv_matcher *matcher,
3289 struct mlx5_flow *dev_flow,
3290 struct rte_flow_error *error)
3292 struct mlx5_priv *priv = dev->data->dev_private;
3293 struct mlx5_ibv_shared *sh = priv->sh;
3294 struct mlx5_flow_dv_matcher *cache_matcher;
3295 struct mlx5dv_flow_matcher_attr dv_attr = {
3296 .type = IBV_FLOW_ATTR_NORMAL,
3297 .match_mask = (void *)&matcher->mask,
3299 struct mlx5_flow_tbl_resource *tbl = NULL;
3301 /* Lookup from cache. */
3302 LIST_FOREACH(cache_matcher, &sh->matchers, next) {
3303 if (matcher->crc == cache_matcher->crc &&
3304 matcher->priority == cache_matcher->priority &&
3305 matcher->egress == cache_matcher->egress &&
3306 matcher->group == cache_matcher->group &&
3307 matcher->transfer == cache_matcher->transfer &&
3308 !memcmp((const void *)matcher->mask.buf,
3309 (const void *)cache_matcher->mask.buf,
3310 cache_matcher->mask.size)) {
3312 "priority %hd use %s matcher %p: refcnt %d++",
3313 cache_matcher->priority,
3314 cache_matcher->egress ? "tx" : "rx",
3315 (void *)cache_matcher,
3316 rte_atomic32_read(&cache_matcher->refcnt));
3317 rte_atomic32_inc(&cache_matcher->refcnt);
3318 dev_flow->dv.matcher = cache_matcher;
3322 /* Register new matcher. */
3323 cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
3325 return rte_flow_error_set(error, ENOMEM,
3326 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3327 "cannot allocate matcher memory");
3328 tbl = flow_dv_tbl_resource_get(dev, matcher->group * MLX5_GROUP_FACTOR,
3329 matcher->egress, matcher->transfer,
3332 rte_free(cache_matcher);
3333 return rte_flow_error_set(error, ENOMEM,
3334 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3335 NULL, "cannot create table");
3337 *cache_matcher = *matcher;
3338 dv_attr.match_criteria_enable =
3339 flow_dv_matcher_enable(cache_matcher->mask.buf);
3340 dv_attr.priority = matcher->priority;
3341 if (matcher->egress)
3342 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
3343 cache_matcher->matcher_object =
3344 mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
3345 if (!cache_matcher->matcher_object) {
3346 rte_free(cache_matcher);
3347 #ifdef HAVE_MLX5DV_DR
3348 flow_dv_tbl_resource_release(tbl);
3350 return rte_flow_error_set(error, ENOMEM,
3351 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3352 NULL, "cannot create matcher");
3354 rte_atomic32_inc(&cache_matcher->refcnt);
3355 LIST_INSERT_HEAD(&sh->matchers, cache_matcher, next);
3356 dev_flow->dv.matcher = cache_matcher;
3357 DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
3358 cache_matcher->priority,
3359 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
3360 rte_atomic32_read(&cache_matcher->refcnt));
3361 rte_atomic32_inc(&tbl->refcnt);
3366 * Find existing tag resource or create and register a new one.
3368 * @param dev[in, out]
3369 * Pointer to rte_eth_dev structure.
3370 * @param[in, out] resource
3371 * Pointer to tag resource.
3372 * @parm[in, out] dev_flow
3373 * Pointer to the dev_flow.
3375 * pointer to error structure.
3378 * 0 on success otherwise -errno and errno is set.
3381 flow_dv_tag_resource_register
3382 (struct rte_eth_dev *dev,
3383 struct mlx5_flow_dv_tag_resource *resource,
3384 struct mlx5_flow *dev_flow,
3385 struct rte_flow_error *error)
3387 struct mlx5_priv *priv = dev->data->dev_private;
3388 struct mlx5_ibv_shared *sh = priv->sh;
3389 struct mlx5_flow_dv_tag_resource *cache_resource;
3391 /* Lookup a matching resource from cache. */
3392 LIST_FOREACH(cache_resource, &sh->tags, next) {
3393 if (resource->tag == cache_resource->tag) {
3394 DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
3395 (void *)cache_resource,
3396 rte_atomic32_read(&cache_resource->refcnt));
3397 rte_atomic32_inc(&cache_resource->refcnt);
3398 dev_flow->flow->tag_resource = cache_resource;
3402 /* Register new resource. */
3403 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
3404 if (!cache_resource)
3405 return rte_flow_error_set(error, ENOMEM,
3406 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3407 "cannot allocate resource memory");
3408 *cache_resource = *resource;
3409 cache_resource->action = mlx5_glue->dv_create_flow_action_tag
3411 if (!cache_resource->action) {
3412 rte_free(cache_resource);
3413 return rte_flow_error_set(error, ENOMEM,
3414 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3415 NULL, "cannot create action");
3417 rte_atomic32_init(&cache_resource->refcnt);
3418 rte_atomic32_inc(&cache_resource->refcnt);
3419 LIST_INSERT_HEAD(&sh->tags, cache_resource, next);
3420 dev_flow->flow->tag_resource = cache_resource;
3421 DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
3422 (void *)cache_resource,
3423 rte_atomic32_read(&cache_resource->refcnt));
3431 * Pointer to Ethernet device.
3433 * Pointer to mlx5_flow.
3436 * 1 while a reference on it exists, 0 when freed.
3439 flow_dv_tag_release(struct rte_eth_dev *dev,
3440 struct mlx5_flow_dv_tag_resource *tag)
3443 DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
3444 dev->data->port_id, (void *)tag,
3445 rte_atomic32_read(&tag->refcnt));
3446 if (rte_atomic32_dec_and_test(&tag->refcnt)) {
3447 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
3448 LIST_REMOVE(tag, next);
3449 DRV_LOG(DEBUG, "port %u tag %p: removed",
3450 dev->data->port_id, (void *)tag);
3458 * Fill the flow with DV spec.
3461 * Pointer to rte_eth_dev structure.
3462 * @param[in, out] dev_flow
3463 * Pointer to the sub flow.
3465 * Pointer to the flow attributes.
3467 * Pointer to the list of items.
3468 * @param[in] actions
3469 * Pointer to the list of actions.
3471 * Pointer to the error structure.
3474 * 0 on success, a negative errno value otherwise and rte_errno is set.
3477 flow_dv_translate(struct rte_eth_dev *dev,
3478 struct mlx5_flow *dev_flow,
3479 const struct rte_flow_attr *attr,
3480 const struct rte_flow_item items[],
3481 const struct rte_flow_action actions[],
3482 struct rte_flow_error *error)
3484 struct mlx5_priv *priv = dev->data->dev_private;
3485 struct rte_flow *flow = dev_flow->flow;
3486 uint64_t item_flags = 0;
3487 uint64_t last_item = 0;
3488 uint64_t action_flags = 0;
3489 uint64_t priority = attr->priority;
3490 struct mlx5_flow_dv_matcher matcher = {
3492 .size = sizeof(matcher.mask.buf),
3496 bool actions_end = false;
3497 struct mlx5_flow_dv_modify_hdr_resource res = {
3498 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3499 MLX5DV_FLOW_TABLE_TYPE_NIC_RX
3501 union flow_dv_attr flow_attr = { .attr = 0 };
3502 struct mlx5_flow_dv_tag_resource tag_resource;
3503 uint32_t modify_action_position = UINT32_MAX;
3505 if (priority == MLX5_FLOW_PRIO_RSVD)
3506 priority = priv->config.flow_prio - 1;
3507 for (; !actions_end ; actions++) {
3508 const struct rte_flow_action_queue *queue;
3509 const struct rte_flow_action_rss *rss;
3510 const struct rte_flow_action *action = actions;
3511 const struct rte_flow_action_count *count = action->conf;
3512 const uint8_t *rss_key;
3513 const struct rte_flow_action_jump *jump_data;
3514 struct mlx5_flow_dv_jump_tbl_resource jump_tbl_resource;
3515 struct mlx5_flow_tbl_resource *tbl;
3517 switch (actions->type) {
3518 case RTE_FLOW_ACTION_TYPE_VOID:
3520 case RTE_FLOW_ACTION_TYPE_FLAG:
3522 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
3523 if (!flow->tag_resource)
3524 if (flow_dv_tag_resource_register
3525 (dev, &tag_resource, dev_flow, error))
3527 dev_flow->dv.actions[actions_n++] =
3528 flow->tag_resource->action;
3529 action_flags |= MLX5_FLOW_ACTION_FLAG;
3531 case RTE_FLOW_ACTION_TYPE_MARK:
3532 tag_resource.tag = mlx5_flow_mark_set
3533 (((const struct rte_flow_action_mark *)
3534 (actions->conf))->id);
3535 if (!flow->tag_resource)
3536 if (flow_dv_tag_resource_register
3537 (dev, &tag_resource, dev_flow, error))
3539 dev_flow->dv.actions[actions_n++] =
3540 flow->tag_resource->action;
3541 action_flags |= MLX5_FLOW_ACTION_MARK;
3543 case RTE_FLOW_ACTION_TYPE_DROP:
3544 action_flags |= MLX5_FLOW_ACTION_DROP;
3546 case RTE_FLOW_ACTION_TYPE_QUEUE:
3547 queue = actions->conf;
3548 flow->rss.queue_num = 1;
3549 (*flow->queue)[0] = queue->index;
3550 action_flags |= MLX5_FLOW_ACTION_QUEUE;
3552 case RTE_FLOW_ACTION_TYPE_RSS:
3553 rss = actions->conf;
3555 memcpy((*flow->queue), rss->queue,
3556 rss->queue_num * sizeof(uint16_t));
3557 flow->rss.queue_num = rss->queue_num;
3558 /* NULL RSS key indicates default RSS key. */
3559 rss_key = !rss->key ? rss_hash_default_key : rss->key;
3560 memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
3561 /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
3562 flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
3563 flow->rss.level = rss->level;
3564 action_flags |= MLX5_FLOW_ACTION_RSS;
3566 case RTE_FLOW_ACTION_TYPE_COUNT:
3567 if (!priv->config.devx) {
3568 rte_errno = ENOTSUP;
3571 flow->counter = flow_dv_counter_new(dev, count->shared,
3573 if (flow->counter == NULL)
3575 dev_flow->dv.actions[actions_n++] =
3576 flow->counter->action;
3577 action_flags |= MLX5_FLOW_ACTION_COUNT;
3580 if (rte_errno == ENOTSUP)
3581 return rte_flow_error_set
3583 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3585 "count action not supported");
3587 return rte_flow_error_set
3589 RTE_FLOW_ERROR_TYPE_ACTION,
3591 "cannot create counter"
3593 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3594 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3595 if (flow_dv_create_action_l2_encap(dev, actions,
3598 dev_flow->dv.actions[actions_n++] =
3599 dev_flow->dv.encap_decap->verbs_action;
3600 action_flags |= actions->type ==
3601 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
3602 MLX5_FLOW_ACTION_VXLAN_ENCAP :
3603 MLX5_FLOW_ACTION_NVGRE_ENCAP;
3605 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3606 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
3607 if (flow_dv_create_action_l2_decap(dev, dev_flow,
3610 dev_flow->dv.actions[actions_n++] =
3611 dev_flow->dv.encap_decap->verbs_action;
3612 action_flags |= actions->type ==
3613 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
3614 MLX5_FLOW_ACTION_VXLAN_DECAP :
3615 MLX5_FLOW_ACTION_NVGRE_DECAP;
3617 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3618 /* Handle encap with preceding decap. */
3619 if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
3620 if (flow_dv_create_action_raw_encap
3621 (dev, actions, dev_flow, attr, error))
3623 dev_flow->dv.actions[actions_n++] =
3624 dev_flow->dv.encap_decap->verbs_action;
3626 /* Handle encap without preceding decap. */
3627 if (flow_dv_create_action_l2_encap(dev, actions,
3631 dev_flow->dv.actions[actions_n++] =
3632 dev_flow->dv.encap_decap->verbs_action;
3634 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
3636 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3637 /* Check if this decap is followed by encap. */
3638 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
3639 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
3642 /* Handle decap only if it isn't followed by encap. */
3643 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3644 if (flow_dv_create_action_l2_decap(dev,
3648 dev_flow->dv.actions[actions_n++] =
3649 dev_flow->dv.encap_decap->verbs_action;
3651 /* If decap is followed by encap, handle it at encap. */
3652 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
3654 case RTE_FLOW_ACTION_TYPE_JUMP:
3655 jump_data = action->conf;
3656 tbl = flow_dv_tbl_resource_get(dev, jump_data->group *
3659 attr->transfer, error);
3661 return rte_flow_error_set
3663 RTE_FLOW_ERROR_TYPE_ACTION,
3665 "cannot create jump action.");
3666 jump_tbl_resource.tbl = tbl;
3667 if (flow_dv_jump_tbl_resource_register
3668 (dev, &jump_tbl_resource, dev_flow, error)) {
3669 flow_dv_tbl_resource_release(tbl);
3670 return rte_flow_error_set
3672 RTE_FLOW_ERROR_TYPE_ACTION,
3674 "cannot create jump action.");
3676 dev_flow->dv.actions[actions_n++] =
3677 dev_flow->dv.jump->action;
3678 action_flags |= MLX5_FLOW_ACTION_JUMP;
3680 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
3681 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
3682 if (flow_dv_convert_action_modify_mac(&res, actions,
3685 action_flags |= actions->type ==
3686 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
3687 MLX5_FLOW_ACTION_SET_MAC_SRC :
3688 MLX5_FLOW_ACTION_SET_MAC_DST;
3690 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
3691 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
3692 if (flow_dv_convert_action_modify_ipv4(&res, actions,
3695 action_flags |= actions->type ==
3696 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
3697 MLX5_FLOW_ACTION_SET_IPV4_SRC :
3698 MLX5_FLOW_ACTION_SET_IPV4_DST;
3700 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
3701 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
3702 if (flow_dv_convert_action_modify_ipv6(&res, actions,
3705 action_flags |= actions->type ==
3706 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
3707 MLX5_FLOW_ACTION_SET_IPV6_SRC :
3708 MLX5_FLOW_ACTION_SET_IPV6_DST;
3710 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
3711 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
3712 if (flow_dv_convert_action_modify_tp(&res, actions,
3716 action_flags |= actions->type ==
3717 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
3718 MLX5_FLOW_ACTION_SET_TP_SRC :
3719 MLX5_FLOW_ACTION_SET_TP_DST;
3721 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
3722 if (flow_dv_convert_action_modify_dec_ttl(&res, items,
3726 action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
3728 case RTE_FLOW_ACTION_TYPE_SET_TTL:
3729 if (flow_dv_convert_action_modify_ttl(&res, actions,
3733 action_flags |= MLX5_FLOW_ACTION_SET_TTL;
3735 case RTE_FLOW_ACTION_TYPE_END:
3737 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
3738 /* create modify action if needed. */
3739 if (flow_dv_modify_hdr_resource_register
3744 dev_flow->dv.actions[modify_action_position] =
3745 dev_flow->dv.modify_hdr->verbs_action;
3751 if ((action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) &&
3752 modify_action_position == UINT32_MAX)
3753 modify_action_position = actions_n++;
3755 dev_flow->dv.actions_n = actions_n;
3756 flow->actions = action_flags;
3757 if (attr->ingress && !attr->transfer &&
3758 (priv->representor || priv->master)) {
3759 /* It was validated - we support unidirection flows only. */
3760 assert(!attr->egress);
3762 * Add matching on source vport index only
3763 * for ingress rules in E-Switch configurations.
3765 flow_dv_translate_item_source_vport(matcher.mask.buf,
3766 dev_flow->dv.value.buf,
3770 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3771 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
3772 void *match_mask = matcher.mask.buf;
3773 void *match_value = dev_flow->dv.value.buf;
3775 switch (items->type) {
3776 case RTE_FLOW_ITEM_TYPE_PORT_ID:
3777 flow_dv_translate_item_port_id(dev, match_mask,
3778 match_value, items);
3779 last_item = MLX5_FLOW_ITEM_PORT_ID;
3781 case RTE_FLOW_ITEM_TYPE_ETH:
3782 flow_dv_translate_item_eth(match_mask, match_value,
3784 matcher.priority = MLX5_PRIORITY_MAP_L2;
3785 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
3786 MLX5_FLOW_LAYER_OUTER_L2;
3788 case RTE_FLOW_ITEM_TYPE_VLAN:
3789 flow_dv_translate_item_vlan(match_mask, match_value,
3791 matcher.priority = MLX5_PRIORITY_MAP_L2;
3792 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
3793 MLX5_FLOW_LAYER_INNER_VLAN) :
3794 (MLX5_FLOW_LAYER_OUTER_L2 |
3795 MLX5_FLOW_LAYER_OUTER_VLAN);
3797 case RTE_FLOW_ITEM_TYPE_IPV4:
3798 flow_dv_translate_item_ipv4(match_mask, match_value,
3799 items, tunnel, attr->group);
3800 matcher.priority = MLX5_PRIORITY_MAP_L3;
3801 dev_flow->dv.hash_fields |=
3802 mlx5_flow_hashfields_adjust
3804 MLX5_IPV4_LAYER_TYPES,
3805 MLX5_IPV4_IBV_RX_HASH);
3806 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3807 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3809 case RTE_FLOW_ITEM_TYPE_IPV6:
3810 flow_dv_translate_item_ipv6(match_mask, match_value,
3811 items, tunnel, attr->group);
3812 matcher.priority = MLX5_PRIORITY_MAP_L3;
3813 dev_flow->dv.hash_fields |=
3814 mlx5_flow_hashfields_adjust
3816 MLX5_IPV6_LAYER_TYPES,
3817 MLX5_IPV6_IBV_RX_HASH);
3818 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3819 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3821 case RTE_FLOW_ITEM_TYPE_TCP:
3822 flow_dv_translate_item_tcp(match_mask, match_value,
3824 matcher.priority = MLX5_PRIORITY_MAP_L4;
3825 dev_flow->dv.hash_fields |=
3826 mlx5_flow_hashfields_adjust
3827 (dev_flow, tunnel, ETH_RSS_TCP,
3828 IBV_RX_HASH_SRC_PORT_TCP |
3829 IBV_RX_HASH_DST_PORT_TCP);
3830 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
3831 MLX5_FLOW_LAYER_OUTER_L4_TCP;
3833 case RTE_FLOW_ITEM_TYPE_UDP:
3834 flow_dv_translate_item_udp(match_mask, match_value,
3836 matcher.priority = MLX5_PRIORITY_MAP_L4;
3837 dev_flow->dv.hash_fields |=
3838 mlx5_flow_hashfields_adjust
3839 (dev_flow, tunnel, ETH_RSS_UDP,
3840 IBV_RX_HASH_SRC_PORT_UDP |
3841 IBV_RX_HASH_DST_PORT_UDP);
3842 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
3843 MLX5_FLOW_LAYER_OUTER_L4_UDP;
3845 case RTE_FLOW_ITEM_TYPE_GRE:
3846 flow_dv_translate_item_gre(match_mask, match_value,
3848 last_item = MLX5_FLOW_LAYER_GRE;
3850 case RTE_FLOW_ITEM_TYPE_NVGRE:
3851 flow_dv_translate_item_nvgre(match_mask, match_value,
3853 last_item = MLX5_FLOW_LAYER_GRE;
3855 case RTE_FLOW_ITEM_TYPE_VXLAN:
3856 flow_dv_translate_item_vxlan(match_mask, match_value,
3858 last_item = MLX5_FLOW_LAYER_VXLAN;
3860 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3861 flow_dv_translate_item_vxlan(match_mask, match_value,
3863 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
3865 case RTE_FLOW_ITEM_TYPE_MPLS:
3866 flow_dv_translate_item_mpls(match_mask, match_value,
3867 items, last_item, tunnel);
3868 last_item = MLX5_FLOW_LAYER_MPLS;
3870 case RTE_FLOW_ITEM_TYPE_META:
3871 flow_dv_translate_item_meta(match_mask, match_value,
3873 last_item = MLX5_FLOW_ITEM_METADATA;
3878 item_flags |= last_item;
3880 assert(!flow_dv_check_valid_spec(matcher.mask.buf,
3881 dev_flow->dv.value.buf));
3882 dev_flow->layers = item_flags;
3883 /* Register matcher. */
3884 matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
3886 matcher.priority = mlx5_flow_adjust_priority(dev, priority,
3888 matcher.egress = attr->egress;
3889 matcher.group = attr->group;
3890 matcher.transfer = attr->transfer;
3891 if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
3897 * Apply the flow to the NIC.
3900 * Pointer to the Ethernet device structure.
3901 * @param[in, out] flow
3902 * Pointer to flow structure.
3904 * Pointer to error structure.
3907 * 0 on success, a negative errno value otherwise and rte_errno is set.
3910 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
3911 struct rte_flow_error *error)
3913 struct mlx5_flow_dv *dv;
3914 struct mlx5_flow *dev_flow;
3918 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3921 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
3922 dv->hrxq = mlx5_hrxq_drop_new(dev);
3926 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3927 "cannot get drop hash queue");
3930 dv->actions[n++] = dv->hrxq->action;
3931 } else if (flow->actions &
3932 (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
3933 struct mlx5_hrxq *hrxq;
3935 hrxq = mlx5_hrxq_get(dev, flow->key,
3936 MLX5_RSS_HASH_KEY_LEN,
3939 flow->rss.queue_num);
3941 hrxq = mlx5_hrxq_new
3942 (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
3943 dv->hash_fields, (*flow->queue),
3944 flow->rss.queue_num,
3945 !!(dev_flow->layers &
3946 MLX5_FLOW_LAYER_TUNNEL));
3950 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3951 "cannot get hash queue");
3955 dv->actions[n++] = dv->hrxq->action;
3958 mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
3959 (void *)&dv->value, n,
3962 rte_flow_error_set(error, errno,
3963 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3965 "hardware refuses to create flow");
3971 err = rte_errno; /* Save rte_errno before cleanup. */
3972 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3973 struct mlx5_flow_dv *dv = &dev_flow->dv;
3975 if (flow->actions & MLX5_FLOW_ACTION_DROP)
3976 mlx5_hrxq_drop_release(dev);
3978 mlx5_hrxq_release(dev, dv->hrxq);
3982 rte_errno = err; /* Restore rte_errno. */
3987 * Release the flow matcher.
3990 * Pointer to Ethernet device.
3992 * Pointer to mlx5_flow.
3995 * 1 while a reference on it exists, 0 when freed.
3998 flow_dv_matcher_release(struct rte_eth_dev *dev,
3999 struct mlx5_flow *flow)
4001 struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
4002 struct mlx5_priv *priv = dev->data->dev_private;
4003 struct mlx5_ibv_shared *sh = priv->sh;
4004 struct mlx5_flow_tbl_resource *tbl;
4006 assert(matcher->matcher_object);
4007 DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
4008 dev->data->port_id, (void *)matcher,
4009 rte_atomic32_read(&matcher->refcnt));
4010 if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
4011 claim_zero(mlx5_glue->dv_destroy_flow_matcher
4012 (matcher->matcher_object));
4013 LIST_REMOVE(matcher, next);
4014 if (matcher->egress)
4015 tbl = &sh->tx_tbl[matcher->group];
4017 tbl = &sh->rx_tbl[matcher->group];
4018 flow_dv_tbl_resource_release(tbl);
4020 DRV_LOG(DEBUG, "port %u matcher %p: removed",
4021 dev->data->port_id, (void *)matcher);
4028 * Release an encap/decap resource.
4031 * Pointer to mlx5_flow.
4034 * 1 while a reference on it exists, 0 when freed.
4037 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
4039 struct mlx5_flow_dv_encap_decap_resource *cache_resource =
4040 flow->dv.encap_decap;
4042 assert(cache_resource->verbs_action);
4043 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
4044 (void *)cache_resource,
4045 rte_atomic32_read(&cache_resource->refcnt));
4046 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4047 claim_zero(mlx5_glue->destroy_flow_action
4048 (cache_resource->verbs_action));
4049 LIST_REMOVE(cache_resource, next);
4050 rte_free(cache_resource);
4051 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
4052 (void *)cache_resource);
4059 * Release an jump to table action resource.
4062 * Pointer to mlx5_flow.
4065 * 1 while a reference on it exists, 0 when freed.
4068 flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow)
4070 struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
4073 assert(cache_resource->action);
4074 DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
4075 (void *)cache_resource,
4076 rte_atomic32_read(&cache_resource->refcnt));
4077 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4078 claim_zero(mlx5_glue->destroy_flow_action
4079 (cache_resource->action));
4080 LIST_REMOVE(cache_resource, next);
4081 flow_dv_tbl_resource_release(cache_resource->tbl);
4082 rte_free(cache_resource);
4083 DRV_LOG(DEBUG, "jump table resource %p: removed",
4084 (void *)cache_resource);
4091 * Release a modify-header resource.
4094 * Pointer to mlx5_flow.
4097 * 1 while a reference on it exists, 0 when freed.
4100 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
4102 struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
4103 flow->dv.modify_hdr;
4105 assert(cache_resource->verbs_action);
4106 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
4107 (void *)cache_resource,
4108 rte_atomic32_read(&cache_resource->refcnt));
4109 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4110 claim_zero(mlx5_glue->destroy_flow_action
4111 (cache_resource->verbs_action));
4112 LIST_REMOVE(cache_resource, next);
4113 rte_free(cache_resource);
4114 DRV_LOG(DEBUG, "modify-header resource %p: removed",
4115 (void *)cache_resource);
4122 * Remove the flow from the NIC but keeps it in memory.
4125 * Pointer to Ethernet device.
4126 * @param[in, out] flow
4127 * Pointer to flow structure.
4130 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
4132 struct mlx5_flow_dv *dv;
4133 struct mlx5_flow *dev_flow;
4137 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4140 claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
4144 if (flow->actions & MLX5_FLOW_ACTION_DROP)
4145 mlx5_hrxq_drop_release(dev);
4147 mlx5_hrxq_release(dev, dv->hrxq);
4154 * Remove the flow from the NIC and the memory.
4157 * Pointer to the Ethernet device structure.
4158 * @param[in, out] flow
4159 * Pointer to flow structure.
4162 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
4164 struct mlx5_flow *dev_flow;
4168 flow_dv_remove(dev, flow);
4169 if (flow->counter) {
4170 flow_dv_counter_release(flow->counter);
4171 flow->counter = NULL;
4173 if (flow->tag_resource) {
4174 flow_dv_tag_release(dev, flow->tag_resource);
4175 flow->tag_resource = NULL;
4177 while (!LIST_EMPTY(&flow->dev_flows)) {
4178 dev_flow = LIST_FIRST(&flow->dev_flows);
4179 LIST_REMOVE(dev_flow, next);
4180 if (dev_flow->dv.matcher)
4181 flow_dv_matcher_release(dev, dev_flow);
4182 if (dev_flow->dv.encap_decap)
4183 flow_dv_encap_decap_resource_release(dev_flow);
4184 if (dev_flow->dv.modify_hdr)
4185 flow_dv_modify_hdr_resource_release(dev_flow);
4186 if (dev_flow->dv.jump)
4187 flow_dv_jump_tbl_resource_release(dev_flow);
4193 * Query a dv flow rule for its statistics via devx.
4196 * Pointer to Ethernet device.
4198 * Pointer to the sub flow.
4200 * data retrieved by the query.
4202 * Perform verbose error reporting if not NULL.
4205 * 0 on success, a negative errno value otherwise and rte_errno is set.
4208 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
4209 void *data, struct rte_flow_error *error)
4211 struct mlx5_priv *priv = dev->data->dev_private;
4212 struct rte_flow_query_count *qc = data;
4217 if (!priv->config.devx)
4218 return rte_flow_error_set(error, ENOTSUP,
4219 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4221 "counters are not supported");
4222 if (flow->counter) {
4223 err = mlx5_devx_cmd_flow_counter_query
4224 (flow->counter->dcs,
4225 qc->reset, &pkts, &bytes);
4227 return rte_flow_error_set
4229 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4231 "cannot read counters");
4234 qc->hits = pkts - flow->counter->hits;
4235 qc->bytes = bytes - flow->counter->bytes;
4237 flow->counter->hits = pkts;
4238 flow->counter->bytes = bytes;
4242 return rte_flow_error_set(error, EINVAL,
4243 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4245 "counters are not available");
4251 * @see rte_flow_query()
4255 flow_dv_query(struct rte_eth_dev *dev,
4256 struct rte_flow *flow __rte_unused,
4257 const struct rte_flow_action *actions __rte_unused,
4258 void *data __rte_unused,
4259 struct rte_flow_error *error __rte_unused)
4263 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4264 switch (actions->type) {
4265 case RTE_FLOW_ACTION_TYPE_VOID:
4267 case RTE_FLOW_ACTION_TYPE_COUNT:
4268 ret = flow_dv_query_count(dev, flow, data, error);
4271 return rte_flow_error_set(error, ENOTSUP,
4272 RTE_FLOW_ERROR_TYPE_ACTION,
4274 "action not supported");
4281 * Mutex-protected thunk to flow_dv_translate().
4284 flow_d_translate(struct rte_eth_dev *dev,
4285 struct mlx5_flow *dev_flow,
4286 const struct rte_flow_attr *attr,
4287 const struct rte_flow_item items[],
4288 const struct rte_flow_action actions[],
4289 struct rte_flow_error *error)
4293 flow_d_shared_lock(dev);
4294 ret = flow_dv_translate(dev, dev_flow, attr, items, actions, error);
4295 flow_d_shared_unlock(dev);
4300 * Mutex-protected thunk to flow_dv_apply().
4303 flow_d_apply(struct rte_eth_dev *dev,
4304 struct rte_flow *flow,
4305 struct rte_flow_error *error)
4309 flow_d_shared_lock(dev);
4310 ret = flow_dv_apply(dev, flow, error);
4311 flow_d_shared_unlock(dev);
4316 * Mutex-protected thunk to flow_dv_remove().
4319 flow_d_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
4321 flow_d_shared_lock(dev);
4322 flow_dv_remove(dev, flow);
4323 flow_d_shared_unlock(dev);
4327 * Mutex-protected thunk to flow_dv_destroy().
4330 flow_d_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
4332 flow_d_shared_lock(dev);
4333 flow_dv_destroy(dev, flow);
4334 flow_d_shared_unlock(dev);
4337 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
4338 .validate = flow_dv_validate,
4339 .prepare = flow_dv_prepare,
4340 .translate = flow_d_translate,
4341 .apply = flow_d_apply,
4342 .remove = flow_d_remove,
4343 .destroy = flow_d_destroy,
4344 .query = flow_dv_query,
4347 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */