1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #pragma GCC diagnostic ignored "-Wpedantic"
15 #include <infiniband/verbs.h>
17 #pragma GCC diagnostic error "-Wpedantic"
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_ethdev_driver.h>
24 #include <rte_flow_driver.h>
25 #include <rte_malloc.h>
30 #include "mlx5_defs.h"
31 #include "mlx5_glue.h"
32 #include "mlx5_flow.h"
34 #include "mlx5_rxtx.h"
36 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
38 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
39 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
42 #ifndef HAVE_MLX5DV_DR_ESWITCH
43 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
46 #ifndef HAVE_MLX5DV_DR
47 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
63 * Initialize flow attributes structure according to flow items' types.
66 * Pointer to item specification.
68 * Pointer to flow attributes structure.
71 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
73 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
75 case RTE_FLOW_ITEM_TYPE_IPV4:
78 case RTE_FLOW_ITEM_TYPE_IPV6:
81 case RTE_FLOW_ITEM_TYPE_UDP:
84 case RTE_FLOW_ITEM_TYPE_TCP:
94 struct field_modify_info {
95 uint32_t size; /* Size of field in protocol header, in bytes. */
96 uint32_t offset; /* Offset of field in protocol header, in bytes. */
97 enum mlx5_modification_field id;
100 struct field_modify_info modify_eth[] = {
101 {4, 0, MLX5_MODI_OUT_DMAC_47_16},
102 {2, 4, MLX5_MODI_OUT_DMAC_15_0},
103 {4, 6, MLX5_MODI_OUT_SMAC_47_16},
104 {2, 10, MLX5_MODI_OUT_SMAC_15_0},
108 struct field_modify_info modify_ipv4[] = {
109 {1, 8, MLX5_MODI_OUT_IPV4_TTL},
110 {4, 12, MLX5_MODI_OUT_SIPV4},
111 {4, 16, MLX5_MODI_OUT_DIPV4},
115 struct field_modify_info modify_ipv6[] = {
116 {1, 7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
117 {4, 8, MLX5_MODI_OUT_SIPV6_127_96},
118 {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
119 {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
120 {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
121 {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
122 {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
123 {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
124 {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
128 struct field_modify_info modify_udp[] = {
129 {2, 0, MLX5_MODI_OUT_UDP_SPORT},
130 {2, 2, MLX5_MODI_OUT_UDP_DPORT},
134 struct field_modify_info modify_tcp[] = {
135 {2, 0, MLX5_MODI_OUT_TCP_SPORT},
136 {2, 2, MLX5_MODI_OUT_TCP_DPORT},
141 * Acquire the synchronizing object to protect multithreaded access
142 * to shared dv context. Lock occurs only if context is actually
143 * shared, i.e. we have multiport IB device and representors are
147 * Pointer to the rte_eth_dev structure.
150 flow_d_shared_lock(struct rte_eth_dev *dev)
152 struct mlx5_priv *priv = dev->data->dev_private;
153 struct mlx5_ibv_shared *sh = priv->sh;
155 if (sh->dv_refcnt > 1) {
158 ret = pthread_mutex_lock(&sh->dv_mutex);
165 flow_d_shared_unlock(struct rte_eth_dev *dev)
167 struct mlx5_priv *priv = dev->data->dev_private;
168 struct mlx5_ibv_shared *sh = priv->sh;
170 if (sh->dv_refcnt > 1) {
173 ret = pthread_mutex_unlock(&sh->dv_mutex);
180 * Convert modify-header action to DV specification.
183 * Pointer to item specification.
185 * Pointer to field modification information.
186 * @param[in,out] resource
187 * Pointer to the modify-header resource.
189 * Type of modification.
191 * Pointer to the error structure.
194 * 0 on success, a negative errno value otherwise and rte_errno is set.
197 flow_dv_convert_modify_action(struct rte_flow_item *item,
198 struct field_modify_info *field,
199 struct mlx5_flow_dv_modify_hdr_resource *resource,
201 struct rte_flow_error *error)
203 uint32_t i = resource->actions_num;
204 struct mlx5_modification_cmd *actions = resource->actions;
205 const uint8_t *spec = item->spec;
206 const uint8_t *mask = item->mask;
209 while (field->size) {
211 /* Generate modify command for each mask segment. */
212 memcpy(&set, &mask[field->offset], field->size);
214 if (i >= MLX5_MODIFY_NUM)
215 return rte_flow_error_set(error, EINVAL,
216 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
217 "too many items to modify");
218 actions[i].action_type = type;
219 actions[i].field = field->id;
220 actions[i].length = field->size ==
221 4 ? 0 : field->size * 8;
222 rte_memcpy(&actions[i].data[4 - field->size],
223 &spec[field->offset], field->size);
224 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
227 if (resource->actions_num != i)
228 resource->actions_num = i;
231 if (!resource->actions_num)
232 return rte_flow_error_set(error, EINVAL,
233 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
234 "invalid modification flow item");
239 * Convert modify-header set IPv4 address action to DV specification.
241 * @param[in,out] resource
242 * Pointer to the modify-header resource.
244 * Pointer to action specification.
246 * Pointer to the error structure.
249 * 0 on success, a negative errno value otherwise and rte_errno is set.
252 flow_dv_convert_action_modify_ipv4
253 (struct mlx5_flow_dv_modify_hdr_resource *resource,
254 const struct rte_flow_action *action,
255 struct rte_flow_error *error)
257 const struct rte_flow_action_set_ipv4 *conf =
258 (const struct rte_flow_action_set_ipv4 *)(action->conf);
259 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
260 struct rte_flow_item_ipv4 ipv4;
261 struct rte_flow_item_ipv4 ipv4_mask;
263 memset(&ipv4, 0, sizeof(ipv4));
264 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
265 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
266 ipv4.hdr.src_addr = conf->ipv4_addr;
267 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
269 ipv4.hdr.dst_addr = conf->ipv4_addr;
270 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
273 item.mask = &ipv4_mask;
274 return flow_dv_convert_modify_action(&item, modify_ipv4, resource,
275 MLX5_MODIFICATION_TYPE_SET, error);
279 * Convert modify-header set IPv6 address action to DV specification.
281 * @param[in,out] resource
282 * Pointer to the modify-header resource.
284 * Pointer to action specification.
286 * Pointer to the error structure.
289 * 0 on success, a negative errno value otherwise and rte_errno is set.
292 flow_dv_convert_action_modify_ipv6
293 (struct mlx5_flow_dv_modify_hdr_resource *resource,
294 const struct rte_flow_action *action,
295 struct rte_flow_error *error)
297 const struct rte_flow_action_set_ipv6 *conf =
298 (const struct rte_flow_action_set_ipv6 *)(action->conf);
299 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
300 struct rte_flow_item_ipv6 ipv6;
301 struct rte_flow_item_ipv6 ipv6_mask;
303 memset(&ipv6, 0, sizeof(ipv6));
304 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
305 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
306 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
307 sizeof(ipv6.hdr.src_addr));
308 memcpy(&ipv6_mask.hdr.src_addr,
309 &rte_flow_item_ipv6_mask.hdr.src_addr,
310 sizeof(ipv6.hdr.src_addr));
312 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
313 sizeof(ipv6.hdr.dst_addr));
314 memcpy(&ipv6_mask.hdr.dst_addr,
315 &rte_flow_item_ipv6_mask.hdr.dst_addr,
316 sizeof(ipv6.hdr.dst_addr));
319 item.mask = &ipv6_mask;
320 return flow_dv_convert_modify_action(&item, modify_ipv6, resource,
321 MLX5_MODIFICATION_TYPE_SET, error);
325 * Convert modify-header set MAC address action to DV specification.
327 * @param[in,out] resource
328 * Pointer to the modify-header resource.
330 * Pointer to action specification.
332 * Pointer to the error structure.
335 * 0 on success, a negative errno value otherwise and rte_errno is set.
338 flow_dv_convert_action_modify_mac
339 (struct mlx5_flow_dv_modify_hdr_resource *resource,
340 const struct rte_flow_action *action,
341 struct rte_flow_error *error)
343 const struct rte_flow_action_set_mac *conf =
344 (const struct rte_flow_action_set_mac *)(action->conf);
345 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
346 struct rte_flow_item_eth eth;
347 struct rte_flow_item_eth eth_mask;
349 memset(ð, 0, sizeof(eth));
350 memset(ð_mask, 0, sizeof(eth_mask));
351 if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
352 memcpy(ð.src.addr_bytes, &conf->mac_addr,
353 sizeof(eth.src.addr_bytes));
354 memcpy(ð_mask.src.addr_bytes,
355 &rte_flow_item_eth_mask.src.addr_bytes,
356 sizeof(eth_mask.src.addr_bytes));
358 memcpy(ð.dst.addr_bytes, &conf->mac_addr,
359 sizeof(eth.dst.addr_bytes));
360 memcpy(ð_mask.dst.addr_bytes,
361 &rte_flow_item_eth_mask.dst.addr_bytes,
362 sizeof(eth_mask.dst.addr_bytes));
365 item.mask = ð_mask;
366 return flow_dv_convert_modify_action(&item, modify_eth, resource,
367 MLX5_MODIFICATION_TYPE_SET, error);
371 * Convert modify-header set TP action to DV specification.
373 * @param[in,out] resource
374 * Pointer to the modify-header resource.
376 * Pointer to action specification.
378 * Pointer to rte_flow_item objects list.
380 * Pointer to flow attributes structure.
382 * Pointer to the error structure.
385 * 0 on success, a negative errno value otherwise and rte_errno is set.
388 flow_dv_convert_action_modify_tp
389 (struct mlx5_flow_dv_modify_hdr_resource *resource,
390 const struct rte_flow_action *action,
391 const struct rte_flow_item *items,
392 union flow_dv_attr *attr,
393 struct rte_flow_error *error)
395 const struct rte_flow_action_set_tp *conf =
396 (const struct rte_flow_action_set_tp *)(action->conf);
397 struct rte_flow_item item;
398 struct rte_flow_item_udp udp;
399 struct rte_flow_item_udp udp_mask;
400 struct rte_flow_item_tcp tcp;
401 struct rte_flow_item_tcp tcp_mask;
402 struct field_modify_info *field;
405 flow_dv_attr_init(items, attr);
407 memset(&udp, 0, sizeof(udp));
408 memset(&udp_mask, 0, sizeof(udp_mask));
409 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
410 udp.hdr.src_port = conf->port;
411 udp_mask.hdr.src_port =
412 rte_flow_item_udp_mask.hdr.src_port;
414 udp.hdr.dst_port = conf->port;
415 udp_mask.hdr.dst_port =
416 rte_flow_item_udp_mask.hdr.dst_port;
418 item.type = RTE_FLOW_ITEM_TYPE_UDP;
420 item.mask = &udp_mask;
424 memset(&tcp, 0, sizeof(tcp));
425 memset(&tcp_mask, 0, sizeof(tcp_mask));
426 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
427 tcp.hdr.src_port = conf->port;
428 tcp_mask.hdr.src_port =
429 rte_flow_item_tcp_mask.hdr.src_port;
431 tcp.hdr.dst_port = conf->port;
432 tcp_mask.hdr.dst_port =
433 rte_flow_item_tcp_mask.hdr.dst_port;
435 item.type = RTE_FLOW_ITEM_TYPE_TCP;
437 item.mask = &tcp_mask;
440 return flow_dv_convert_modify_action(&item, field, resource,
441 MLX5_MODIFICATION_TYPE_SET, error);
445 * Convert modify-header set TTL action to DV specification.
447 * @param[in,out] resource
448 * Pointer to the modify-header resource.
450 * Pointer to action specification.
452 * Pointer to rte_flow_item objects list.
454 * Pointer to flow attributes structure.
456 * Pointer to the error structure.
459 * 0 on success, a negative errno value otherwise and rte_errno is set.
462 flow_dv_convert_action_modify_ttl
463 (struct mlx5_flow_dv_modify_hdr_resource *resource,
464 const struct rte_flow_action *action,
465 const struct rte_flow_item *items,
466 union flow_dv_attr *attr,
467 struct rte_flow_error *error)
469 const struct rte_flow_action_set_ttl *conf =
470 (const struct rte_flow_action_set_ttl *)(action->conf);
471 struct rte_flow_item item;
472 struct rte_flow_item_ipv4 ipv4;
473 struct rte_flow_item_ipv4 ipv4_mask;
474 struct rte_flow_item_ipv6 ipv6;
475 struct rte_flow_item_ipv6 ipv6_mask;
476 struct field_modify_info *field;
479 flow_dv_attr_init(items, attr);
481 memset(&ipv4, 0, sizeof(ipv4));
482 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
483 ipv4.hdr.time_to_live = conf->ttl_value;
484 ipv4_mask.hdr.time_to_live = 0xFF;
485 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
487 item.mask = &ipv4_mask;
491 memset(&ipv6, 0, sizeof(ipv6));
492 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
493 ipv6.hdr.hop_limits = conf->ttl_value;
494 ipv6_mask.hdr.hop_limits = 0xFF;
495 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
497 item.mask = &ipv6_mask;
500 return flow_dv_convert_modify_action(&item, field, resource,
501 MLX5_MODIFICATION_TYPE_SET, error);
505 * Convert modify-header decrement TTL action to DV specification.
507 * @param[in,out] resource
508 * Pointer to the modify-header resource.
510 * Pointer to action specification.
512 * Pointer to rte_flow_item objects list.
514 * Pointer to flow attributes structure.
516 * Pointer to the error structure.
519 * 0 on success, a negative errno value otherwise and rte_errno is set.
522 flow_dv_convert_action_modify_dec_ttl
523 (struct mlx5_flow_dv_modify_hdr_resource *resource,
524 const struct rte_flow_item *items,
525 union flow_dv_attr *attr,
526 struct rte_flow_error *error)
528 struct rte_flow_item item;
529 struct rte_flow_item_ipv4 ipv4;
530 struct rte_flow_item_ipv4 ipv4_mask;
531 struct rte_flow_item_ipv6 ipv6;
532 struct rte_flow_item_ipv6 ipv6_mask;
533 struct field_modify_info *field;
536 flow_dv_attr_init(items, attr);
538 memset(&ipv4, 0, sizeof(ipv4));
539 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
540 ipv4.hdr.time_to_live = 0xFF;
541 ipv4_mask.hdr.time_to_live = 0xFF;
542 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
544 item.mask = &ipv4_mask;
548 memset(&ipv6, 0, sizeof(ipv6));
549 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
550 ipv6.hdr.hop_limits = 0xFF;
551 ipv6_mask.hdr.hop_limits = 0xFF;
552 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
554 item.mask = &ipv6_mask;
557 return flow_dv_convert_modify_action(&item, field, resource,
558 MLX5_MODIFICATION_TYPE_ADD, error);
562 * Validate META item.
565 * Pointer to the rte_eth_dev structure.
567 * Item specification.
569 * Attributes of flow that includes this item.
571 * Pointer to error structure.
574 * 0 on success, a negative errno value otherwise and rte_errno is set.
577 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
578 const struct rte_flow_item *item,
579 const struct rte_flow_attr *attr,
580 struct rte_flow_error *error)
582 const struct rte_flow_item_meta *spec = item->spec;
583 const struct rte_flow_item_meta *mask = item->mask;
584 const struct rte_flow_item_meta nic_mask = {
585 .data = RTE_BE32(UINT32_MAX)
588 uint64_t offloads = dev->data->dev_conf.txmode.offloads;
590 if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
591 return rte_flow_error_set(error, EPERM,
592 RTE_FLOW_ERROR_TYPE_ITEM,
594 "match on metadata offload "
595 "configuration is off for this port");
597 return rte_flow_error_set(error, EINVAL,
598 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
600 "data cannot be empty");
602 return rte_flow_error_set(error, EINVAL,
603 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
605 "data cannot be zero");
607 mask = &rte_flow_item_meta_mask;
608 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
609 (const uint8_t *)&nic_mask,
610 sizeof(struct rte_flow_item_meta),
615 return rte_flow_error_set(error, ENOTSUP,
616 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
618 "pattern not supported for ingress");
623 * Validate vport item.
626 * Pointer to the rte_eth_dev structure.
628 * Item specification.
630 * Attributes of flow that includes this item.
631 * @param[in] item_flags
632 * Bit-fields that holds the items detected until now.
634 * Pointer to error structure.
637 * 0 on success, a negative errno value otherwise and rte_errno is set.
640 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
641 const struct rte_flow_item *item,
642 const struct rte_flow_attr *attr,
644 struct rte_flow_error *error)
646 const struct rte_flow_item_port_id *spec = item->spec;
647 const struct rte_flow_item_port_id *mask = item->mask;
648 const struct rte_flow_item_port_id switch_mask = {
651 uint16_t esw_domain_id;
652 uint16_t item_port_esw_domain_id;
656 return rte_flow_error_set(error, EINVAL,
657 RTE_FLOW_ERROR_TYPE_ITEM,
659 "match on port id is valid only"
660 " when transfer flag is enabled");
661 if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
662 return rte_flow_error_set(error, ENOTSUP,
663 RTE_FLOW_ERROR_TYPE_ITEM, item,
664 "multiple source ports are not"
668 if (mask->id != 0xffffffff)
669 return rte_flow_error_set(error, ENOTSUP,
670 RTE_FLOW_ERROR_TYPE_ITEM_MASK,
672 "no support for partial mask on"
674 ret = mlx5_flow_item_acceptable
675 (item, (const uint8_t *)mask,
676 (const uint8_t *)&rte_flow_item_port_id_mask,
677 sizeof(struct rte_flow_item_port_id),
683 ret = mlx5_port_to_eswitch_info(spec->id, &item_port_esw_domain_id,
686 return rte_flow_error_set(error, -ret,
687 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
688 "failed to obtain E-Switch info for"
690 ret = mlx5_port_to_eswitch_info(dev->data->port_id,
691 &esw_domain_id, NULL);
693 return rte_flow_error_set(error, -ret,
694 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
696 "failed to obtain E-Switch info");
697 if (item_port_esw_domain_id != esw_domain_id)
698 return rte_flow_error_set(error, -ret,
699 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
700 "cannot match on a port from a"
701 " different E-Switch");
706 * Validate count action.
711 * Pointer to error structure.
714 * 0 on success, a negative errno value otherwise and rte_errno is set.
717 flow_dv_validate_action_count(struct rte_eth_dev *dev,
718 struct rte_flow_error *error)
720 struct mlx5_priv *priv = dev->data->dev_private;
722 if (!priv->config.devx)
724 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
728 return rte_flow_error_set
730 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
732 "count action not supported");
736 * Validate the L2 encap action.
738 * @param[in] action_flags
739 * Holds the actions detected until now.
741 * Pointer to the encap action.
743 * Pointer to flow attributes
745 * Pointer to error structure.
748 * 0 on success, a negative errno value otherwise and rte_errno is set.
751 flow_dv_validate_action_l2_encap(uint64_t action_flags,
752 const struct rte_flow_action *action,
753 const struct rte_flow_attr *attr,
754 struct rte_flow_error *error)
757 return rte_flow_error_set(error, EINVAL,
758 RTE_FLOW_ERROR_TYPE_ACTION, action,
759 "configuration cannot be null");
760 if (action_flags & MLX5_FLOW_ACTION_DROP)
761 return rte_flow_error_set(error, EINVAL,
762 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
763 "can't drop and encap in same flow");
764 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
765 return rte_flow_error_set(error, EINVAL,
766 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
767 "can only have a single encap or"
768 " decap action in a flow");
769 if (!attr->transfer && attr->ingress)
770 return rte_flow_error_set(error, ENOTSUP,
771 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
773 "encap action not supported for "
779 * Validate the L2 decap action.
781 * @param[in] action_flags
782 * Holds the actions detected until now.
784 * Pointer to flow attributes
786 * Pointer to error structure.
789 * 0 on success, a negative errno value otherwise and rte_errno is set.
792 flow_dv_validate_action_l2_decap(uint64_t action_flags,
793 const struct rte_flow_attr *attr,
794 struct rte_flow_error *error)
796 if (action_flags & MLX5_FLOW_ACTION_DROP)
797 return rte_flow_error_set(error, EINVAL,
798 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
799 "can't drop and decap in same flow");
800 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
801 return rte_flow_error_set(error, EINVAL,
802 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
803 "can only have a single encap or"
804 " decap action in a flow");
805 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
806 return rte_flow_error_set(error, EINVAL,
807 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
808 "can't have decap action after"
811 return rte_flow_error_set(error, ENOTSUP,
812 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
814 "decap action not supported for "
820 * Validate the raw encap action.
822 * @param[in] action_flags
823 * Holds the actions detected until now.
825 * Pointer to the encap action.
827 * Pointer to flow attributes
829 * Pointer to error structure.
832 * 0 on success, a negative errno value otherwise and rte_errno is set.
835 flow_dv_validate_action_raw_encap(uint64_t action_flags,
836 const struct rte_flow_action *action,
837 const struct rte_flow_attr *attr,
838 struct rte_flow_error *error)
841 return rte_flow_error_set(error, EINVAL,
842 RTE_FLOW_ERROR_TYPE_ACTION, action,
843 "configuration cannot be null");
844 if (action_flags & MLX5_FLOW_ACTION_DROP)
845 return rte_flow_error_set(error, EINVAL,
846 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
847 "can't drop and encap in same flow");
848 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
849 return rte_flow_error_set(error, EINVAL,
850 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
851 "can only have a single encap"
852 " action in a flow");
853 /* encap without preceding decap is not supported for ingress */
854 if (!attr->transfer && attr->ingress &&
855 !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
856 return rte_flow_error_set(error, ENOTSUP,
857 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
859 "encap action not supported for "
865 * Validate the raw decap action.
867 * @param[in] action_flags
868 * Holds the actions detected until now.
870 * Pointer to the encap action.
872 * Pointer to flow attributes
874 * Pointer to error structure.
877 * 0 on success, a negative errno value otherwise and rte_errno is set.
880 flow_dv_validate_action_raw_decap(uint64_t action_flags,
881 const struct rte_flow_action *action,
882 const struct rte_flow_attr *attr,
883 struct rte_flow_error *error)
885 if (action_flags & MLX5_FLOW_ACTION_DROP)
886 return rte_flow_error_set(error, EINVAL,
887 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
888 "can't drop and decap in same flow");
889 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
890 return rte_flow_error_set(error, EINVAL,
891 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
892 "can't have encap action before"
894 if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
895 return rte_flow_error_set(error, EINVAL,
896 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
897 "can only have a single decap"
898 " action in a flow");
899 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
900 return rte_flow_error_set(error, EINVAL,
901 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
902 "can't have decap action after"
904 /* decap action is valid on egress only if it is followed by encap */
906 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
907 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
910 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
911 return rte_flow_error_set
913 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
914 NULL, "decap action not supported"
921 * Find existing encap/decap resource or create and register a new one.
923 * @param dev[in, out]
924 * Pointer to rte_eth_dev structure.
925 * @param[in, out] resource
926 * Pointer to encap/decap resource.
927 * @parm[in, out] dev_flow
928 * Pointer to the dev_flow.
930 * pointer to error structure.
933 * 0 on success otherwise -errno and errno is set.
936 flow_dv_encap_decap_resource_register
937 (struct rte_eth_dev *dev,
938 struct mlx5_flow_dv_encap_decap_resource *resource,
939 struct mlx5_flow *dev_flow,
940 struct rte_flow_error *error)
942 struct mlx5_priv *priv = dev->data->dev_private;
943 struct mlx5_ibv_shared *sh = priv->sh;
944 struct mlx5_flow_dv_encap_decap_resource *cache_resource;
945 struct rte_flow *flow = dev_flow->flow;
946 struct mlx5dv_dr_ns *ns;
948 resource->flags = flow->group ? 0 : 1;
949 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
951 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
956 /* Lookup a matching resource from cache. */
957 LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
958 if (resource->reformat_type == cache_resource->reformat_type &&
959 resource->ft_type == cache_resource->ft_type &&
960 resource->flags == cache_resource->flags &&
961 resource->size == cache_resource->size &&
962 !memcmp((const void *)resource->buf,
963 (const void *)cache_resource->buf,
965 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
966 (void *)cache_resource,
967 rte_atomic32_read(&cache_resource->refcnt));
968 rte_atomic32_inc(&cache_resource->refcnt);
969 dev_flow->dv.encap_decap = cache_resource;
973 /* Register new encap/decap resource. */
974 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
976 return rte_flow_error_set(error, ENOMEM,
977 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
978 "cannot allocate resource memory");
979 *cache_resource = *resource;
980 cache_resource->verbs_action =
981 mlx5_glue->dv_create_flow_action_packet_reformat
982 (sh->ctx, cache_resource->reformat_type,
983 cache_resource->ft_type, ns, cache_resource->flags,
984 cache_resource->size,
985 (cache_resource->size ? cache_resource->buf : NULL));
986 if (!cache_resource->verbs_action) {
987 rte_free(cache_resource);
988 return rte_flow_error_set(error, ENOMEM,
989 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
990 NULL, "cannot create action");
992 rte_atomic32_init(&cache_resource->refcnt);
993 rte_atomic32_inc(&cache_resource->refcnt);
994 LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
995 dev_flow->dv.encap_decap = cache_resource;
996 DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
997 (void *)cache_resource,
998 rte_atomic32_read(&cache_resource->refcnt));
1003 * Find existing table jump resource or create and register a new one.
1005 * @param dev[in, out]
1006 * Pointer to rte_eth_dev structure.
1007 * @param[in, out] resource
1008 * Pointer to jump table resource.
1009 * @parm[in, out] dev_flow
1010 * Pointer to the dev_flow.
1012 * pointer to error structure.
1015 * 0 on success otherwise -errno and errno is set.
1018 flow_dv_jump_tbl_resource_register
1019 (struct rte_eth_dev *dev,
1020 struct mlx5_flow_dv_jump_tbl_resource *resource,
1021 struct mlx5_flow *dev_flow,
1022 struct rte_flow_error *error)
1024 struct mlx5_priv *priv = dev->data->dev_private;
1025 struct mlx5_ibv_shared *sh = priv->sh;
1026 struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
1028 /* Lookup a matching resource from cache. */
1029 LIST_FOREACH(cache_resource, &sh->jump_tbl, next) {
1030 if (resource->tbl == cache_resource->tbl) {
1031 DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++",
1032 (void *)cache_resource,
1033 rte_atomic32_read(&cache_resource->refcnt));
1034 rte_atomic32_inc(&cache_resource->refcnt);
1035 dev_flow->dv.jump = cache_resource;
1039 /* Register new jump table resource. */
1040 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1041 if (!cache_resource)
1042 return rte_flow_error_set(error, ENOMEM,
1043 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1044 "cannot allocate resource memory");
1045 *cache_resource = *resource;
1046 cache_resource->action =
1047 mlx5_glue->dr_create_flow_action_dest_flow_tbl
1048 (resource->tbl->obj);
1049 if (!cache_resource->action) {
1050 rte_free(cache_resource);
1051 return rte_flow_error_set(error, ENOMEM,
1052 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1053 NULL, "cannot create action");
1055 rte_atomic32_init(&cache_resource->refcnt);
1056 rte_atomic32_inc(&cache_resource->refcnt);
1057 LIST_INSERT_HEAD(&sh->jump_tbl, cache_resource, next);
1058 dev_flow->dv.jump = cache_resource;
1059 DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++",
1060 (void *)cache_resource,
1061 rte_atomic32_read(&cache_resource->refcnt));
1066 * Find existing table port ID resource or create and register a new one.
1068 * @param dev[in, out]
1069 * Pointer to rte_eth_dev structure.
1070 * @param[in, out] resource
1071 * Pointer to port ID action resource.
1072 * @parm[in, out] dev_flow
1073 * Pointer to the dev_flow.
1075 * pointer to error structure.
1078 * 0 on success otherwise -errno and errno is set.
1081 flow_dv_port_id_action_resource_register
1082 (struct rte_eth_dev *dev,
1083 struct mlx5_flow_dv_port_id_action_resource *resource,
1084 struct mlx5_flow *dev_flow,
1085 struct rte_flow_error *error)
1087 struct mlx5_priv *priv = dev->data->dev_private;
1088 struct mlx5_ibv_shared *sh = priv->sh;
1089 struct mlx5_flow_dv_port_id_action_resource *cache_resource;
1091 /* Lookup a matching resource from cache. */
1092 LIST_FOREACH(cache_resource, &sh->port_id_action_list, next) {
1093 if (resource->port_id == cache_resource->port_id) {
1094 DRV_LOG(DEBUG, "port id action resource resource %p: "
1096 (void *)cache_resource,
1097 rte_atomic32_read(&cache_resource->refcnt));
1098 rte_atomic32_inc(&cache_resource->refcnt);
1099 dev_flow->dv.port_id_action = cache_resource;
1103 /* Register new port id action resource. */
1104 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1105 if (!cache_resource)
1106 return rte_flow_error_set(error, ENOMEM,
1107 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1108 "cannot allocate resource memory");
1109 *cache_resource = *resource;
1110 cache_resource->action =
1111 mlx5_glue->dr_create_flow_action_dest_vport(priv->sh->fdb_ns,
1113 if (!cache_resource->action) {
1114 rte_free(cache_resource);
1115 return rte_flow_error_set(error, ENOMEM,
1116 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1117 NULL, "cannot create action");
1119 rte_atomic32_init(&cache_resource->refcnt);
1120 rte_atomic32_inc(&cache_resource->refcnt);
1121 LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
1122 dev_flow->dv.port_id_action = cache_resource;
1123 DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
1124 (void *)cache_resource,
1125 rte_atomic32_read(&cache_resource->refcnt));
1130 * Get the size of specific rte_flow_item_type
1132 * @param[in] item_type
1133 * Tested rte_flow_item_type.
1136 * sizeof struct item_type, 0 if void or irrelevant.
1139 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
1143 switch (item_type) {
1144 case RTE_FLOW_ITEM_TYPE_ETH:
1145 retval = sizeof(struct rte_flow_item_eth);
1147 case RTE_FLOW_ITEM_TYPE_VLAN:
1148 retval = sizeof(struct rte_flow_item_vlan);
1150 case RTE_FLOW_ITEM_TYPE_IPV4:
1151 retval = sizeof(struct rte_flow_item_ipv4);
1153 case RTE_FLOW_ITEM_TYPE_IPV6:
1154 retval = sizeof(struct rte_flow_item_ipv6);
1156 case RTE_FLOW_ITEM_TYPE_UDP:
1157 retval = sizeof(struct rte_flow_item_udp);
1159 case RTE_FLOW_ITEM_TYPE_TCP:
1160 retval = sizeof(struct rte_flow_item_tcp);
1162 case RTE_FLOW_ITEM_TYPE_VXLAN:
1163 retval = sizeof(struct rte_flow_item_vxlan);
1165 case RTE_FLOW_ITEM_TYPE_GRE:
1166 retval = sizeof(struct rte_flow_item_gre);
1168 case RTE_FLOW_ITEM_TYPE_NVGRE:
1169 retval = sizeof(struct rte_flow_item_nvgre);
1171 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1172 retval = sizeof(struct rte_flow_item_vxlan_gpe);
1174 case RTE_FLOW_ITEM_TYPE_MPLS:
1175 retval = sizeof(struct rte_flow_item_mpls);
1177 case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
1185 #define MLX5_ENCAP_IPV4_VERSION 0x40
1186 #define MLX5_ENCAP_IPV4_IHL_MIN 0x05
1187 #define MLX5_ENCAP_IPV4_TTL_DEF 0x40
1188 #define MLX5_ENCAP_IPV6_VTC_FLOW 0x60000000
1189 #define MLX5_ENCAP_IPV6_HOP_LIMIT 0xff
1190 #define MLX5_ENCAP_VXLAN_FLAGS 0x08000000
1191 #define MLX5_ENCAP_VXLAN_GPE_FLAGS 0x04
1194 * Convert the encap action data from list of rte_flow_item to raw buffer
1197 * Pointer to rte_flow_item objects list.
1199 * Pointer to the output buffer.
1201 * Pointer to the output buffer size.
1203 * Pointer to the error structure.
1206 * 0 on success, a negative errno value otherwise and rte_errno is set.
1209 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
1210 size_t *size, struct rte_flow_error *error)
1212 struct ether_hdr *eth = NULL;
1213 struct vlan_hdr *vlan = NULL;
1214 struct ipv4_hdr *ipv4 = NULL;
1215 struct ipv6_hdr *ipv6 = NULL;
1216 struct udp_hdr *udp = NULL;
1217 struct vxlan_hdr *vxlan = NULL;
1218 struct vxlan_gpe_hdr *vxlan_gpe = NULL;
1219 struct gre_hdr *gre = NULL;
1221 size_t temp_size = 0;
1224 return rte_flow_error_set(error, EINVAL,
1225 RTE_FLOW_ERROR_TYPE_ACTION,
1226 NULL, "invalid empty data");
1227 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1228 len = flow_dv_get_item_len(items->type);
1229 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
1230 return rte_flow_error_set(error, EINVAL,
1231 RTE_FLOW_ERROR_TYPE_ACTION,
1232 (void *)items->type,
1233 "items total size is too big"
1234 " for encap action");
1235 rte_memcpy((void *)&buf[temp_size], items->spec, len);
1236 switch (items->type) {
1237 case RTE_FLOW_ITEM_TYPE_ETH:
1238 eth = (struct ether_hdr *)&buf[temp_size];
1240 case RTE_FLOW_ITEM_TYPE_VLAN:
1241 vlan = (struct vlan_hdr *)&buf[temp_size];
1243 return rte_flow_error_set(error, EINVAL,
1244 RTE_FLOW_ERROR_TYPE_ACTION,
1245 (void *)items->type,
1246 "eth header not found");
1247 if (!eth->ether_type)
1248 eth->ether_type = RTE_BE16(ETHER_TYPE_VLAN);
1250 case RTE_FLOW_ITEM_TYPE_IPV4:
1251 ipv4 = (struct ipv4_hdr *)&buf[temp_size];
1253 return rte_flow_error_set(error, EINVAL,
1254 RTE_FLOW_ERROR_TYPE_ACTION,
1255 (void *)items->type,
1256 "neither eth nor vlan"
1258 if (vlan && !vlan->eth_proto)
1259 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv4);
1260 else if (eth && !eth->ether_type)
1261 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv4);
1262 if (!ipv4->version_ihl)
1263 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
1264 MLX5_ENCAP_IPV4_IHL_MIN;
1265 if (!ipv4->time_to_live)
1266 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
1268 case RTE_FLOW_ITEM_TYPE_IPV6:
1269 ipv6 = (struct ipv6_hdr *)&buf[temp_size];
1271 return rte_flow_error_set(error, EINVAL,
1272 RTE_FLOW_ERROR_TYPE_ACTION,
1273 (void *)items->type,
1274 "neither eth nor vlan"
1276 if (vlan && !vlan->eth_proto)
1277 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv6);
1278 else if (eth && !eth->ether_type)
1279 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv6);
1280 if (!ipv6->vtc_flow)
1282 RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
1283 if (!ipv6->hop_limits)
1284 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
1286 case RTE_FLOW_ITEM_TYPE_UDP:
1287 udp = (struct udp_hdr *)&buf[temp_size];
1289 return rte_flow_error_set(error, EINVAL,
1290 RTE_FLOW_ERROR_TYPE_ACTION,
1291 (void *)items->type,
1292 "ip header not found");
1293 if (ipv4 && !ipv4->next_proto_id)
1294 ipv4->next_proto_id = IPPROTO_UDP;
1295 else if (ipv6 && !ipv6->proto)
1296 ipv6->proto = IPPROTO_UDP;
1298 case RTE_FLOW_ITEM_TYPE_VXLAN:
1299 vxlan = (struct vxlan_hdr *)&buf[temp_size];
1301 return rte_flow_error_set(error, EINVAL,
1302 RTE_FLOW_ERROR_TYPE_ACTION,
1303 (void *)items->type,
1304 "udp header not found");
1306 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
1307 if (!vxlan->vx_flags)
1309 RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
1311 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1312 vxlan_gpe = (struct vxlan_gpe_hdr *)&buf[temp_size];
1314 return rte_flow_error_set(error, EINVAL,
1315 RTE_FLOW_ERROR_TYPE_ACTION,
1316 (void *)items->type,
1317 "udp header not found");
1318 if (!vxlan_gpe->proto)
1319 return rte_flow_error_set(error, EINVAL,
1320 RTE_FLOW_ERROR_TYPE_ACTION,
1321 (void *)items->type,
1322 "next protocol not found");
1325 RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
1326 if (!vxlan_gpe->vx_flags)
1327 vxlan_gpe->vx_flags =
1328 MLX5_ENCAP_VXLAN_GPE_FLAGS;
1330 case RTE_FLOW_ITEM_TYPE_GRE:
1331 case RTE_FLOW_ITEM_TYPE_NVGRE:
1332 gre = (struct gre_hdr *)&buf[temp_size];
1334 return rte_flow_error_set(error, EINVAL,
1335 RTE_FLOW_ERROR_TYPE_ACTION,
1336 (void *)items->type,
1337 "next protocol not found");
1339 return rte_flow_error_set(error, EINVAL,
1340 RTE_FLOW_ERROR_TYPE_ACTION,
1341 (void *)items->type,
1342 "ip header not found");
1343 if (ipv4 && !ipv4->next_proto_id)
1344 ipv4->next_proto_id = IPPROTO_GRE;
1345 else if (ipv6 && !ipv6->proto)
1346 ipv6->proto = IPPROTO_GRE;
1348 case RTE_FLOW_ITEM_TYPE_VOID:
1351 return rte_flow_error_set(error, EINVAL,
1352 RTE_FLOW_ERROR_TYPE_ACTION,
1353 (void *)items->type,
1354 "unsupported item type");
1364 * Convert L2 encap action to DV specification.
1367 * Pointer to rte_eth_dev structure.
1369 * Pointer to action structure.
1370 * @param[in, out] dev_flow
1371 * Pointer to the mlx5_flow.
1372 * @param[in] transfer
1373 * Mark if the flow is E-Switch flow.
1375 * Pointer to the error structure.
1378 * 0 on success, a negative errno value otherwise and rte_errno is set.
1381 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
1382 const struct rte_flow_action *action,
1383 struct mlx5_flow *dev_flow,
1385 struct rte_flow_error *error)
1387 const struct rte_flow_item *encap_data;
1388 const struct rte_flow_action_raw_encap *raw_encap_data;
1389 struct mlx5_flow_dv_encap_decap_resource res = {
1391 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
1392 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
1393 MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
1396 if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
1398 (const struct rte_flow_action_raw_encap *)action->conf;
1399 res.size = raw_encap_data->size;
1400 memcpy(res.buf, raw_encap_data->data, res.size);
1402 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
1404 ((const struct rte_flow_action_vxlan_encap *)
1405 action->conf)->definition;
1408 ((const struct rte_flow_action_nvgre_encap *)
1409 action->conf)->definition;
1410 if (flow_dv_convert_encap_data(encap_data, res.buf,
1414 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1415 return rte_flow_error_set(error, EINVAL,
1416 RTE_FLOW_ERROR_TYPE_ACTION,
1417 NULL, "can't create L2 encap action");
1422 * Convert L2 decap action to DV specification.
1425 * Pointer to rte_eth_dev structure.
1426 * @param[in, out] dev_flow
1427 * Pointer to the mlx5_flow.
1428 * @param[in] transfer
1429 * Mark if the flow is E-Switch flow.
1431 * Pointer to the error structure.
1434 * 0 on success, a negative errno value otherwise and rte_errno is set.
1437 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
1438 struct mlx5_flow *dev_flow,
1440 struct rte_flow_error *error)
1442 struct mlx5_flow_dv_encap_decap_resource res = {
1445 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
1446 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
1447 MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
1450 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1451 return rte_flow_error_set(error, EINVAL,
1452 RTE_FLOW_ERROR_TYPE_ACTION,
1453 NULL, "can't create L2 decap action");
1458 * Convert raw decap/encap (L3 tunnel) action to DV specification.
1461 * Pointer to rte_eth_dev structure.
1463 * Pointer to action structure.
1464 * @param[in, out] dev_flow
1465 * Pointer to the mlx5_flow.
1467 * Pointer to the flow attributes.
1469 * Pointer to the error structure.
1472 * 0 on success, a negative errno value otherwise and rte_errno is set.
1475 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
1476 const struct rte_flow_action *action,
1477 struct mlx5_flow *dev_flow,
1478 const struct rte_flow_attr *attr,
1479 struct rte_flow_error *error)
1481 const struct rte_flow_action_raw_encap *encap_data;
1482 struct mlx5_flow_dv_encap_decap_resource res;
1484 encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
1485 res.size = encap_data->size;
1486 memcpy(res.buf, encap_data->data, res.size);
1487 res.reformat_type = attr->egress ?
1488 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
1489 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
1491 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
1493 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
1494 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
1495 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1496 return rte_flow_error_set(error, EINVAL,
1497 RTE_FLOW_ERROR_TYPE_ACTION,
1498 NULL, "can't create encap action");
1503 * Validate the modify-header actions.
1505 * @param[in] action_flags
1506 * Holds the actions detected until now.
1508 * Pointer to the modify action.
1510 * Pointer to error structure.
1513 * 0 on success, a negative errno value otherwise and rte_errno is set.
1516 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
1517 const struct rte_flow_action *action,
1518 struct rte_flow_error *error)
1520 if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
1521 return rte_flow_error_set(error, EINVAL,
1522 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1523 NULL, "action configuration not set");
1524 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1525 return rte_flow_error_set(error, EINVAL,
1526 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1527 "can't have encap action before"
1533 * Validate the modify-header MAC address actions.
1535 * @param[in] action_flags
1536 * Holds the actions detected until now.
1538 * Pointer to the modify action.
1539 * @param[in] item_flags
1540 * Holds the items detected.
1542 * Pointer to error structure.
1545 * 0 on success, a negative errno value otherwise and rte_errno is set.
1548 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
1549 const struct rte_flow_action *action,
1550 const uint64_t item_flags,
1551 struct rte_flow_error *error)
1555 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1557 if (!(item_flags & MLX5_FLOW_LAYER_L2))
1558 return rte_flow_error_set(error, EINVAL,
1559 RTE_FLOW_ERROR_TYPE_ACTION,
1561 "no L2 item in pattern");
1567 * Validate the modify-header IPv4 address actions.
1569 * @param[in] action_flags
1570 * Holds the actions detected until now.
1572 * Pointer to the modify action.
1573 * @param[in] item_flags
1574 * Holds the items detected.
1576 * Pointer to error structure.
1579 * 0 on success, a negative errno value otherwise and rte_errno is set.
1582 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
1583 const struct rte_flow_action *action,
1584 const uint64_t item_flags,
1585 struct rte_flow_error *error)
1589 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1591 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
1592 return rte_flow_error_set(error, EINVAL,
1593 RTE_FLOW_ERROR_TYPE_ACTION,
1595 "no ipv4 item in pattern");
1601 * Validate the modify-header IPv6 address actions.
1603 * @param[in] action_flags
1604 * Holds the actions detected until now.
1606 * Pointer to the modify action.
1607 * @param[in] item_flags
1608 * Holds the items detected.
1610 * Pointer to error structure.
1613 * 0 on success, a negative errno value otherwise and rte_errno is set.
1616 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
1617 const struct rte_flow_action *action,
1618 const uint64_t item_flags,
1619 struct rte_flow_error *error)
1623 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1625 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
1626 return rte_flow_error_set(error, EINVAL,
1627 RTE_FLOW_ERROR_TYPE_ACTION,
1629 "no ipv6 item in pattern");
1635 * Validate the modify-header TP actions.
1637 * @param[in] action_flags
1638 * Holds the actions detected until now.
1640 * Pointer to the modify action.
1641 * @param[in] item_flags
1642 * Holds the items detected.
1644 * Pointer to error structure.
1647 * 0 on success, a negative errno value otherwise and rte_errno is set.
1650 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
1651 const struct rte_flow_action *action,
1652 const uint64_t item_flags,
1653 struct rte_flow_error *error)
1657 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1659 if (!(item_flags & MLX5_FLOW_LAYER_L4))
1660 return rte_flow_error_set(error, EINVAL,
1661 RTE_FLOW_ERROR_TYPE_ACTION,
1662 NULL, "no transport layer "
1669 * Validate the modify-header TTL actions.
1671 * @param[in] action_flags
1672 * Holds the actions detected until now.
1674 * Pointer to the modify action.
1675 * @param[in] item_flags
1676 * Holds the items detected.
1678 * Pointer to error structure.
1681 * 0 on success, a negative errno value otherwise and rte_errno is set.
1684 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
1685 const struct rte_flow_action *action,
1686 const uint64_t item_flags,
1687 struct rte_flow_error *error)
1691 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1693 if (!(item_flags & MLX5_FLOW_LAYER_L3))
1694 return rte_flow_error_set(error, EINVAL,
1695 RTE_FLOW_ERROR_TYPE_ACTION,
1697 "no IP protocol in pattern");
1703 * Validate jump action.
1706 * Pointer to the modify action.
1708 * The group of the current flow.
1710 * Pointer to error structure.
1713 * 0 on success, a negative errno value otherwise and rte_errno is set.
1716 flow_dv_validate_action_jump(const struct rte_flow_action *action,
1718 struct rte_flow_error *error)
1720 if (action->type != RTE_FLOW_ACTION_TYPE_JUMP && !action->conf)
1721 return rte_flow_error_set(error, EINVAL,
1722 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1723 NULL, "action configuration not set");
1724 if (group >= ((const struct rte_flow_action_jump *)action->conf)->group)
1725 return rte_flow_error_set(error, EINVAL,
1726 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1727 "target group must be higher then"
1728 " the current flow group");
1733 * Validate the port_id action.
1736 * Pointer to rte_eth_dev structure.
1737 * @param[in] action_flags
1738 * Bit-fields that holds the actions detected until now.
1740 * Port_id RTE action structure.
1742 * Attributes of flow that includes this action.
1744 * Pointer to error structure.
1747 * 0 on success, a negative errno value otherwise and rte_errno is set.
1750 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
1751 uint64_t action_flags,
1752 const struct rte_flow_action *action,
1753 const struct rte_flow_attr *attr,
1754 struct rte_flow_error *error)
1756 const struct rte_flow_action_port_id *port_id;
1758 uint16_t esw_domain_id;
1759 uint16_t act_port_domain_id;
1762 if (!attr->transfer)
1763 return rte_flow_error_set(error, ENOTSUP,
1764 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1766 "port id action is valid in transfer"
1768 if (!action || !action->conf)
1769 return rte_flow_error_set(error, ENOTSUP,
1770 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1772 "port id action parameters must be"
1774 if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
1775 MLX5_FLOW_FATE_ESWITCH_ACTIONS))
1776 return rte_flow_error_set(error, EINVAL,
1777 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1778 "can have only one fate actions in"
1780 ret = mlx5_port_to_eswitch_info(dev->data->port_id,
1781 &esw_domain_id, NULL);
1783 return rte_flow_error_set(error, -ret,
1784 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1786 "failed to obtain E-Switch info");
1787 port_id = action->conf;
1788 port = port_id->original ? dev->data->port_id : port_id->id;
1789 ret = mlx5_port_to_eswitch_info(port, &act_port_domain_id, NULL);
1791 return rte_flow_error_set
1793 RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
1794 "failed to obtain E-Switch port id for port");
1795 if (act_port_domain_id != esw_domain_id)
1796 return rte_flow_error_set
1798 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1799 "port does not belong to"
1800 " E-Switch being configured");
1805 * Find existing modify-header resource or create and register a new one.
1807 * @param dev[in, out]
1808 * Pointer to rte_eth_dev structure.
1809 * @param[in, out] resource
1810 * Pointer to modify-header resource.
1811 * @parm[in, out] dev_flow
1812 * Pointer to the dev_flow.
1814 * pointer to error structure.
1817 * 0 on success otherwise -errno and errno is set.
1820 flow_dv_modify_hdr_resource_register
1821 (struct rte_eth_dev *dev,
1822 struct mlx5_flow_dv_modify_hdr_resource *resource,
1823 struct mlx5_flow *dev_flow,
1824 struct rte_flow_error *error)
1826 struct mlx5_priv *priv = dev->data->dev_private;
1827 struct mlx5_ibv_shared *sh = priv->sh;
1828 struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
1829 struct mlx5dv_dr_ns *ns;
1831 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
1833 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
1838 dev_flow->flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
1839 /* Lookup a matching resource from cache. */
1840 LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
1841 if (resource->ft_type == cache_resource->ft_type &&
1842 resource->actions_num == cache_resource->actions_num &&
1843 resource->flags == cache_resource->flags &&
1844 !memcmp((const void *)resource->actions,
1845 (const void *)cache_resource->actions,
1846 (resource->actions_num *
1847 sizeof(resource->actions[0])))) {
1848 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
1849 (void *)cache_resource,
1850 rte_atomic32_read(&cache_resource->refcnt));
1851 rte_atomic32_inc(&cache_resource->refcnt);
1852 dev_flow->dv.modify_hdr = cache_resource;
1856 /* Register new modify-header resource. */
1857 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1858 if (!cache_resource)
1859 return rte_flow_error_set(error, ENOMEM,
1860 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1861 "cannot allocate resource memory");
1862 *cache_resource = *resource;
1863 cache_resource->verbs_action =
1864 mlx5_glue->dv_create_flow_action_modify_header
1865 (sh->ctx, cache_resource->ft_type,
1866 ns, cache_resource->flags,
1867 cache_resource->actions_num *
1868 sizeof(cache_resource->actions[0]),
1869 (uint64_t *)cache_resource->actions);
1870 if (!cache_resource->verbs_action) {
1871 rte_free(cache_resource);
1872 return rte_flow_error_set(error, ENOMEM,
1873 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1874 NULL, "cannot create action");
1876 rte_atomic32_init(&cache_resource->refcnt);
1877 rte_atomic32_inc(&cache_resource->refcnt);
1878 LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
1879 dev_flow->dv.modify_hdr = cache_resource;
1880 DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
1881 (void *)cache_resource,
1882 rte_atomic32_read(&cache_resource->refcnt));
1887 * Get or create a flow counter.
1890 * Pointer to the Ethernet device structure.
1892 * Indicate if this counter is shared with other flows.
1894 * Counter identifier.
1897 * pointer to flow counter on success, NULL otherwise and rte_errno is set.
1899 static struct mlx5_flow_counter *
1900 flow_dv_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
1902 struct mlx5_priv *priv = dev->data->dev_private;
1903 struct mlx5_flow_counter *cnt = NULL;
1904 struct mlx5_devx_counter_set *dcs = NULL;
1907 if (!priv->config.devx) {
1912 LIST_FOREACH(cnt, &priv->flow_counters, next) {
1913 if (cnt->shared && cnt->id == id) {
1919 cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
1920 dcs = rte_calloc(__func__, 1, sizeof(*dcs), 0);
1925 ret = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, dcs);
1928 struct mlx5_flow_counter tmpl = {
1934 tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
1940 LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
1950 * Release a flow counter.
1952 * @param[in] counter
1953 * Pointer to the counter handler.
1956 flow_dv_counter_release(struct mlx5_flow_counter *counter)
1962 if (--counter->ref_cnt == 0) {
1963 ret = mlx5_devx_cmd_flow_counter_free(counter->dcs->obj);
1965 DRV_LOG(ERR, "Failed to free devx counters, %d", ret);
1966 LIST_REMOVE(counter, next);
1967 rte_free(counter->dcs);
1973 * Verify the @p attributes will be correctly understood by the NIC and store
1974 * them in the @p flow if everything is correct.
1977 * Pointer to dev struct.
1978 * @param[in] attributes
1979 * Pointer to flow attributes
1981 * Pointer to error structure.
1984 * 0 on success, a negative errno value otherwise and rte_errno is set.
1987 flow_dv_validate_attributes(struct rte_eth_dev *dev,
1988 const struct rte_flow_attr *attributes,
1989 struct rte_flow_error *error)
1991 struct mlx5_priv *priv = dev->data->dev_private;
1992 uint32_t priority_max = priv->config.flow_prio - 1;
1994 #ifndef HAVE_MLX5DV_DR
1995 if (attributes->group)
1996 return rte_flow_error_set(error, ENOTSUP,
1997 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1999 "groups is not supported");
2001 if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
2002 attributes->priority >= priority_max)
2003 return rte_flow_error_set(error, ENOTSUP,
2004 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2006 "priority out of range");
2007 if (attributes->transfer) {
2008 if (!priv->config.dv_esw_en)
2009 return rte_flow_error_set
2011 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2012 "E-Switch dr is not supported");
2013 if (!(priv->representor || priv->master))
2014 return rte_flow_error_set
2015 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2016 NULL, "E-Switch configurationd can only be"
2017 " done by a master or a representor device");
2018 if (attributes->egress)
2019 return rte_flow_error_set
2021 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
2022 "egress is not supported");
2023 if (attributes->group >= MLX5_MAX_TABLES_FDB)
2024 return rte_flow_error_set
2026 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2027 NULL, "group must be smaller than "
2028 RTE_STR(MLX5_MAX_FDB_TABLES));
2030 if (!(attributes->egress ^ attributes->ingress))
2031 return rte_flow_error_set(error, ENOTSUP,
2032 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
2033 "must specify exactly one of "
2034 "ingress or egress");
2039 * Internal validation function. For validating both actions and items.
2042 * Pointer to the rte_eth_dev structure.
2044 * Pointer to the flow attributes.
2046 * Pointer to the list of items.
2047 * @param[in] actions
2048 * Pointer to the list of actions.
2050 * Pointer to the error structure.
2053 * 0 on success, a negative errno value otherwise and rte_errno is set.
2056 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
2057 const struct rte_flow_item items[],
2058 const struct rte_flow_action actions[],
2059 struct rte_flow_error *error)
2062 uint64_t action_flags = 0;
2063 uint64_t item_flags = 0;
2064 uint64_t last_item = 0;
2065 uint8_t next_protocol = 0xff;
2070 ret = flow_dv_validate_attributes(dev, attr, error);
2073 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2074 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2075 switch (items->type) {
2076 case RTE_FLOW_ITEM_TYPE_VOID:
2078 case RTE_FLOW_ITEM_TYPE_PORT_ID:
2079 ret = flow_dv_validate_item_port_id
2080 (dev, items, attr, item_flags, error);
2083 last_item |= MLX5_FLOW_ITEM_PORT_ID;
2085 case RTE_FLOW_ITEM_TYPE_ETH:
2086 ret = mlx5_flow_validate_item_eth(items, item_flags,
2090 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
2091 MLX5_FLOW_LAYER_OUTER_L2;
2093 case RTE_FLOW_ITEM_TYPE_VLAN:
2094 ret = mlx5_flow_validate_item_vlan(items, item_flags,
2098 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2099 MLX5_FLOW_LAYER_OUTER_VLAN;
2101 case RTE_FLOW_ITEM_TYPE_IPV4:
2102 ret = mlx5_flow_validate_item_ipv4(items, item_flags,
2106 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
2107 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
2108 if (items->mask != NULL &&
2109 ((const struct rte_flow_item_ipv4 *)
2110 items->mask)->hdr.next_proto_id) {
2112 ((const struct rte_flow_item_ipv4 *)
2113 (items->spec))->hdr.next_proto_id;
2115 ((const struct rte_flow_item_ipv4 *)
2116 (items->mask))->hdr.next_proto_id;
2118 /* Reset for inner layer. */
2119 next_protocol = 0xff;
2122 case RTE_FLOW_ITEM_TYPE_IPV6:
2123 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
2127 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
2128 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
2129 if (items->mask != NULL &&
2130 ((const struct rte_flow_item_ipv6 *)
2131 items->mask)->hdr.proto) {
2133 ((const struct rte_flow_item_ipv6 *)
2134 items->spec)->hdr.proto;
2136 ((const struct rte_flow_item_ipv6 *)
2137 items->mask)->hdr.proto;
2139 /* Reset for inner layer. */
2140 next_protocol = 0xff;
2143 case RTE_FLOW_ITEM_TYPE_TCP:
2144 ret = mlx5_flow_validate_item_tcp
2147 &rte_flow_item_tcp_mask,
2151 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
2152 MLX5_FLOW_LAYER_OUTER_L4_TCP;
2154 case RTE_FLOW_ITEM_TYPE_UDP:
2155 ret = mlx5_flow_validate_item_udp(items, item_flags,
2160 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
2161 MLX5_FLOW_LAYER_OUTER_L4_UDP;
2163 case RTE_FLOW_ITEM_TYPE_GRE:
2164 case RTE_FLOW_ITEM_TYPE_NVGRE:
2165 ret = mlx5_flow_validate_item_gre(items, item_flags,
2166 next_protocol, error);
2169 last_item = MLX5_FLOW_LAYER_GRE;
2171 case RTE_FLOW_ITEM_TYPE_VXLAN:
2172 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
2176 last_item = MLX5_FLOW_LAYER_VXLAN;
2178 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
2179 ret = mlx5_flow_validate_item_vxlan_gpe(items,
2184 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
2186 case RTE_FLOW_ITEM_TYPE_MPLS:
2187 ret = mlx5_flow_validate_item_mpls(dev, items,
2192 last_item = MLX5_FLOW_LAYER_MPLS;
2194 case RTE_FLOW_ITEM_TYPE_META:
2195 ret = flow_dv_validate_item_meta(dev, items, attr,
2199 last_item = MLX5_FLOW_ITEM_METADATA;
2202 return rte_flow_error_set(error, ENOTSUP,
2203 RTE_FLOW_ERROR_TYPE_ITEM,
2204 NULL, "item not supported");
2206 item_flags |= last_item;
2208 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2209 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
2210 return rte_flow_error_set(error, ENOTSUP,
2211 RTE_FLOW_ERROR_TYPE_ACTION,
2212 actions, "too many actions");
2213 switch (actions->type) {
2214 case RTE_FLOW_ACTION_TYPE_VOID:
2216 case RTE_FLOW_ACTION_TYPE_PORT_ID:
2217 ret = flow_dv_validate_action_port_id(dev,
2224 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
2227 case RTE_FLOW_ACTION_TYPE_FLAG:
2228 ret = mlx5_flow_validate_action_flag(action_flags,
2232 action_flags |= MLX5_FLOW_ACTION_FLAG;
2235 case RTE_FLOW_ACTION_TYPE_MARK:
2236 ret = mlx5_flow_validate_action_mark(actions,
2241 action_flags |= MLX5_FLOW_ACTION_MARK;
2244 case RTE_FLOW_ACTION_TYPE_DROP:
2245 ret = mlx5_flow_validate_action_drop(action_flags,
2249 action_flags |= MLX5_FLOW_ACTION_DROP;
2252 case RTE_FLOW_ACTION_TYPE_QUEUE:
2253 ret = mlx5_flow_validate_action_queue(actions,
2258 action_flags |= MLX5_FLOW_ACTION_QUEUE;
2261 case RTE_FLOW_ACTION_TYPE_RSS:
2262 ret = mlx5_flow_validate_action_rss(actions,
2268 action_flags |= MLX5_FLOW_ACTION_RSS;
2271 case RTE_FLOW_ACTION_TYPE_COUNT:
2272 ret = flow_dv_validate_action_count(dev, error);
2275 action_flags |= MLX5_FLOW_ACTION_COUNT;
2278 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2279 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
2280 ret = flow_dv_validate_action_l2_encap(action_flags,
2285 action_flags |= actions->type ==
2286 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
2287 MLX5_FLOW_ACTION_VXLAN_ENCAP :
2288 MLX5_FLOW_ACTION_NVGRE_ENCAP;
2291 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2292 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
2293 ret = flow_dv_validate_action_l2_decap(action_flags,
2297 action_flags |= actions->type ==
2298 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
2299 MLX5_FLOW_ACTION_VXLAN_DECAP :
2300 MLX5_FLOW_ACTION_NVGRE_DECAP;
2303 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
2304 ret = flow_dv_validate_action_raw_encap(action_flags,
2309 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
2312 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
2313 ret = flow_dv_validate_action_raw_decap(action_flags,
2318 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
2321 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
2322 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
2323 ret = flow_dv_validate_action_modify_mac(action_flags,
2329 /* Count all modify-header actions as one action. */
2330 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2332 action_flags |= actions->type ==
2333 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
2334 MLX5_FLOW_ACTION_SET_MAC_SRC :
2335 MLX5_FLOW_ACTION_SET_MAC_DST;
2338 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
2339 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
2340 ret = flow_dv_validate_action_modify_ipv4(action_flags,
2346 /* Count all modify-header actions as one action. */
2347 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2349 action_flags |= actions->type ==
2350 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
2351 MLX5_FLOW_ACTION_SET_IPV4_SRC :
2352 MLX5_FLOW_ACTION_SET_IPV4_DST;
2354 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
2355 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
2356 ret = flow_dv_validate_action_modify_ipv6(action_flags,
2362 /* Count all modify-header actions as one action. */
2363 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2365 action_flags |= actions->type ==
2366 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
2367 MLX5_FLOW_ACTION_SET_IPV6_SRC :
2368 MLX5_FLOW_ACTION_SET_IPV6_DST;
2370 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
2371 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
2372 ret = flow_dv_validate_action_modify_tp(action_flags,
2378 /* Count all modify-header actions as one action. */
2379 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2381 action_flags |= actions->type ==
2382 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
2383 MLX5_FLOW_ACTION_SET_TP_SRC :
2384 MLX5_FLOW_ACTION_SET_TP_DST;
2386 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
2387 case RTE_FLOW_ACTION_TYPE_SET_TTL:
2388 ret = flow_dv_validate_action_modify_ttl(action_flags,
2394 /* Count all modify-header actions as one action. */
2395 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2397 action_flags |= actions->type ==
2398 RTE_FLOW_ACTION_TYPE_SET_TTL ?
2399 MLX5_FLOW_ACTION_SET_TTL :
2400 MLX5_FLOW_ACTION_DEC_TTL;
2402 case RTE_FLOW_ACTION_TYPE_JUMP:
2403 ret = flow_dv_validate_action_jump(actions,
2404 attr->group, error);
2408 action_flags |= MLX5_FLOW_ACTION_JUMP;
2411 return rte_flow_error_set(error, ENOTSUP,
2412 RTE_FLOW_ERROR_TYPE_ACTION,
2414 "action not supported");
2417 /* Eswitch has few restrictions on using items and actions */
2418 if (attr->transfer) {
2419 if (action_flags & MLX5_FLOW_ACTION_FLAG)
2420 return rte_flow_error_set(error, ENOTSUP,
2421 RTE_FLOW_ERROR_TYPE_ACTION,
2423 "unsupported action FLAG");
2424 if (action_flags & MLX5_FLOW_ACTION_MARK)
2425 return rte_flow_error_set(error, ENOTSUP,
2426 RTE_FLOW_ERROR_TYPE_ACTION,
2428 "unsupported action MARK");
2429 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
2430 return rte_flow_error_set(error, ENOTSUP,
2431 RTE_FLOW_ERROR_TYPE_ACTION,
2433 "unsupported action QUEUE");
2434 if (action_flags & MLX5_FLOW_ACTION_RSS)
2435 return rte_flow_error_set(error, ENOTSUP,
2436 RTE_FLOW_ERROR_TYPE_ACTION,
2438 "unsupported action RSS");
2439 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
2440 return rte_flow_error_set(error, EINVAL,
2441 RTE_FLOW_ERROR_TYPE_ACTION,
2443 "no fate action is found");
2445 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
2446 return rte_flow_error_set(error, EINVAL,
2447 RTE_FLOW_ERROR_TYPE_ACTION,
2449 "no fate action is found");
2455 * Internal preparation function. Allocates the DV flow size,
2456 * this size is constant.
2459 * Pointer to the flow attributes.
2461 * Pointer to the list of items.
2462 * @param[in] actions
2463 * Pointer to the list of actions.
2465 * Pointer to the error structure.
2468 * Pointer to mlx5_flow object on success,
2469 * otherwise NULL and rte_errno is set.
2471 static struct mlx5_flow *
2472 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
2473 const struct rte_flow_item items[] __rte_unused,
2474 const struct rte_flow_action actions[] __rte_unused,
2475 struct rte_flow_error *error)
2477 uint32_t size = sizeof(struct mlx5_flow);
2478 struct mlx5_flow *flow;
2480 flow = rte_calloc(__func__, 1, size, 0);
2482 rte_flow_error_set(error, ENOMEM,
2483 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2484 "not enough memory to create flow");
2487 flow->dv.value.size = MLX5_ST_SZ_DB(fte_match_param);
2493 * Sanity check for match mask and value. Similar to check_valid_spec() in
2494 * kernel driver. If unmasked bit is present in value, it returns failure.
2497 * pointer to match mask buffer.
2498 * @param match_value
2499 * pointer to match value buffer.
2502 * 0 if valid, -EINVAL otherwise.
2505 flow_dv_check_valid_spec(void *match_mask, void *match_value)
2507 uint8_t *m = match_mask;
2508 uint8_t *v = match_value;
2511 for (i = 0; i < MLX5_ST_SZ_DB(fte_match_param); ++i) {
2514 "match_value differs from match_criteria"
2515 " %p[%u] != %p[%u]",
2516 match_value, i, match_mask, i);
2525 * Add Ethernet item to matcher and to the value.
2527 * @param[in, out] matcher
2529 * @param[in, out] key
2530 * Flow matcher value.
2532 * Flow pattern to translate.
2534 * Item is inner pattern.
2537 flow_dv_translate_item_eth(void *matcher, void *key,
2538 const struct rte_flow_item *item, int inner)
2540 const struct rte_flow_item_eth *eth_m = item->mask;
2541 const struct rte_flow_item_eth *eth_v = item->spec;
2542 const struct rte_flow_item_eth nic_mask = {
2543 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2544 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2545 .type = RTE_BE16(0xffff),
2557 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2559 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2561 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2563 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2565 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
2566 ð_m->dst, sizeof(eth_m->dst));
2567 /* The value must be in the range of the mask. */
2568 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
2569 for (i = 0; i < sizeof(eth_m->dst); ++i)
2570 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
2571 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
2572 ð_m->src, sizeof(eth_m->src));
2573 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
2574 /* The value must be in the range of the mask. */
2575 for (i = 0; i < sizeof(eth_m->dst); ++i)
2576 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
2577 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
2578 rte_be_to_cpu_16(eth_m->type));
2579 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
2580 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
2584 * Add VLAN item to matcher and to the value.
2586 * @param[in, out] matcher
2588 * @param[in, out] key
2589 * Flow matcher value.
2591 * Flow pattern to translate.
2593 * Item is inner pattern.
2596 flow_dv_translate_item_vlan(void *matcher, void *key,
2597 const struct rte_flow_item *item,
2600 const struct rte_flow_item_vlan *vlan_m = item->mask;
2601 const struct rte_flow_item_vlan *vlan_v = item->spec;
2602 const struct rte_flow_item_vlan nic_mask = {
2603 .tci = RTE_BE16(0x0fff),
2604 .inner_type = RTE_BE16(0xffff),
2616 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2618 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2620 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2622 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2624 tci_m = rte_be_to_cpu_16(vlan_m->tci);
2625 tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
2626 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
2627 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
2628 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
2629 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
2630 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
2631 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
2632 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
2633 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
2637 * Add IPV4 item to matcher and to the value.
2639 * @param[in, out] matcher
2641 * @param[in, out] key
2642 * Flow matcher value.
2644 * Flow pattern to translate.
2646 * Item is inner pattern.
2648 * The group to insert the rule.
2651 flow_dv_translate_item_ipv4(void *matcher, void *key,
2652 const struct rte_flow_item *item,
2653 int inner, uint32_t group)
2655 const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
2656 const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
2657 const struct rte_flow_item_ipv4 nic_mask = {
2659 .src_addr = RTE_BE32(0xffffffff),
2660 .dst_addr = RTE_BE32(0xffffffff),
2661 .type_of_service = 0xff,
2662 .next_proto_id = 0xff,
2672 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2674 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2676 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2678 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2681 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2683 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
2684 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
2689 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2690 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2691 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2692 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2693 *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
2694 *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
2695 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2696 src_ipv4_src_ipv6.ipv4_layout.ipv4);
2697 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2698 src_ipv4_src_ipv6.ipv4_layout.ipv4);
2699 *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
2700 *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
2701 tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
2702 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
2703 ipv4_m->hdr.type_of_service);
2704 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
2705 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
2706 ipv4_m->hdr.type_of_service >> 2);
2707 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
2708 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2709 ipv4_m->hdr.next_proto_id);
2710 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2711 ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
2715 * Add IPV6 item to matcher and to the value.
2717 * @param[in, out] matcher
2719 * @param[in, out] key
2720 * Flow matcher value.
2722 * Flow pattern to translate.
2724 * Item is inner pattern.
2726 * The group to insert the rule.
2729 flow_dv_translate_item_ipv6(void *matcher, void *key,
2730 const struct rte_flow_item *item,
2731 int inner, uint32_t group)
2733 const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
2734 const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
2735 const struct rte_flow_item_ipv6 nic_mask = {
2738 "\xff\xff\xff\xff\xff\xff\xff\xff"
2739 "\xff\xff\xff\xff\xff\xff\xff\xff",
2741 "\xff\xff\xff\xff\xff\xff\xff\xff"
2742 "\xff\xff\xff\xff\xff\xff\xff\xff",
2743 .vtc_flow = RTE_BE32(0xffffffff),
2750 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2751 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2760 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2762 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2764 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2766 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2769 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2771 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
2772 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
2777 size = sizeof(ipv6_m->hdr.dst_addr);
2778 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2779 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2780 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2781 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2782 memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
2783 for (i = 0; i < size; ++i)
2784 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
2785 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2786 src_ipv4_src_ipv6.ipv6_layout.ipv6);
2787 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2788 src_ipv4_src_ipv6.ipv6_layout.ipv6);
2789 memcpy(l24_m, ipv6_m->hdr.src_addr, size);
2790 for (i = 0; i < size; ++i)
2791 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
2793 vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
2794 vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
2795 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
2796 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
2797 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
2798 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
2801 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
2803 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
2806 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
2808 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
2812 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2814 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2815 ipv6_v->hdr.proto & ipv6_m->hdr.proto);
2819 * Add TCP item to matcher and to the value.
2821 * @param[in, out] matcher
2823 * @param[in, out] key
2824 * Flow matcher value.
2826 * Flow pattern to translate.
2828 * Item is inner pattern.
2831 flow_dv_translate_item_tcp(void *matcher, void *key,
2832 const struct rte_flow_item *item,
2835 const struct rte_flow_item_tcp *tcp_m = item->mask;
2836 const struct rte_flow_item_tcp *tcp_v = item->spec;
2841 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2843 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2845 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2847 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2849 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2850 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
2854 tcp_m = &rte_flow_item_tcp_mask;
2855 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
2856 rte_be_to_cpu_16(tcp_m->hdr.src_port));
2857 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
2858 rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
2859 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
2860 rte_be_to_cpu_16(tcp_m->hdr.dst_port));
2861 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
2862 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
2866 * Add UDP item to matcher and to the value.
2868 * @param[in, out] matcher
2870 * @param[in, out] key
2871 * Flow matcher value.
2873 * Flow pattern to translate.
2875 * Item is inner pattern.
2878 flow_dv_translate_item_udp(void *matcher, void *key,
2879 const struct rte_flow_item *item,
2882 const struct rte_flow_item_udp *udp_m = item->mask;
2883 const struct rte_flow_item_udp *udp_v = item->spec;
2888 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2890 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2892 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2894 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2896 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2897 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
2901 udp_m = &rte_flow_item_udp_mask;
2902 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
2903 rte_be_to_cpu_16(udp_m->hdr.src_port));
2904 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
2905 rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
2906 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
2907 rte_be_to_cpu_16(udp_m->hdr.dst_port));
2908 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2909 rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
2913 * Add GRE item to matcher and to the value.
2915 * @param[in, out] matcher
2917 * @param[in, out] key
2918 * Flow matcher value.
2920 * Flow pattern to translate.
2922 * Item is inner pattern.
2925 flow_dv_translate_item_gre(void *matcher, void *key,
2926 const struct rte_flow_item *item,
2929 const struct rte_flow_item_gre *gre_m = item->mask;
2930 const struct rte_flow_item_gre *gre_v = item->spec;
2933 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2934 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2937 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2939 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2941 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2943 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2945 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2946 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
2950 gre_m = &rte_flow_item_gre_mask;
2951 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
2952 rte_be_to_cpu_16(gre_m->protocol));
2953 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
2954 rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
2958 * Add NVGRE item to matcher and to the value.
2960 * @param[in, out] matcher
2962 * @param[in, out] key
2963 * Flow matcher value.
2965 * Flow pattern to translate.
2967 * Item is inner pattern.
2970 flow_dv_translate_item_nvgre(void *matcher, void *key,
2971 const struct rte_flow_item *item,
2974 const struct rte_flow_item_nvgre *nvgre_m = item->mask;
2975 const struct rte_flow_item_nvgre *nvgre_v = item->spec;
2976 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2977 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2978 const char *tni_flow_id_m = (const char *)nvgre_m->tni;
2979 const char *tni_flow_id_v = (const char *)nvgre_v->tni;
2985 flow_dv_translate_item_gre(matcher, key, item, inner);
2989 nvgre_m = &rte_flow_item_nvgre_mask;
2990 size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
2991 gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
2992 gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
2993 memcpy(gre_key_m, tni_flow_id_m, size);
2994 for (i = 0; i < size; ++i)
2995 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
2999 * Add VXLAN item to matcher and to the value.
3001 * @param[in, out] matcher
3003 * @param[in, out] key
3004 * Flow matcher value.
3006 * Flow pattern to translate.
3008 * Item is inner pattern.
3011 flow_dv_translate_item_vxlan(void *matcher, void *key,
3012 const struct rte_flow_item *item,
3015 const struct rte_flow_item_vxlan *vxlan_m = item->mask;
3016 const struct rte_flow_item_vxlan *vxlan_v = item->spec;
3019 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3020 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3028 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3030 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3032 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3034 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3036 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
3037 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
3038 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
3039 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
3040 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
3045 vxlan_m = &rte_flow_item_vxlan_mask;
3046 size = sizeof(vxlan_m->vni);
3047 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
3048 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
3049 memcpy(vni_m, vxlan_m->vni, size);
3050 for (i = 0; i < size; ++i)
3051 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
3055 * Add MPLS item to matcher and to the value.
3057 * @param[in, out] matcher
3059 * @param[in, out] key
3060 * Flow matcher value.
3062 * Flow pattern to translate.
3063 * @param[in] prev_layer
3064 * The protocol layer indicated in previous item.
3066 * Item is inner pattern.
3069 flow_dv_translate_item_mpls(void *matcher, void *key,
3070 const struct rte_flow_item *item,
3071 uint64_t prev_layer,
3074 const uint32_t *in_mpls_m = item->mask;
3075 const uint32_t *in_mpls_v = item->spec;
3076 uint32_t *out_mpls_m = 0;
3077 uint32_t *out_mpls_v = 0;
3078 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3079 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3080 void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
3082 void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
3083 void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
3084 void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3086 switch (prev_layer) {
3087 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
3088 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
3089 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
3090 MLX5_UDP_PORT_MPLS);
3092 case MLX5_FLOW_LAYER_GRE:
3093 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
3094 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
3098 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3099 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
3106 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
3107 switch (prev_layer) {
3108 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
3110 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
3111 outer_first_mpls_over_udp);
3113 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
3114 outer_first_mpls_over_udp);
3116 case MLX5_FLOW_LAYER_GRE:
3118 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
3119 outer_first_mpls_over_gre);
3121 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
3122 outer_first_mpls_over_gre);
3125 /* Inner MPLS not over GRE is not supported. */
3128 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
3132 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
3138 if (out_mpls_m && out_mpls_v) {
3139 *out_mpls_m = *in_mpls_m;
3140 *out_mpls_v = *in_mpls_v & *in_mpls_m;
3145 * Add META item to matcher
3147 * @param[in, out] matcher
3149 * @param[in, out] key
3150 * Flow matcher value.
3152 * Flow pattern to translate.
3154 * Item is inner pattern.
3157 flow_dv_translate_item_meta(void *matcher, void *key,
3158 const struct rte_flow_item *item)
3160 const struct rte_flow_item_meta *meta_m;
3161 const struct rte_flow_item_meta *meta_v;
3163 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
3165 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
3167 meta_m = (const void *)item->mask;
3169 meta_m = &rte_flow_item_meta_mask;
3170 meta_v = (const void *)item->spec;
3172 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
3173 rte_be_to_cpu_32(meta_m->data));
3174 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
3175 rte_be_to_cpu_32(meta_v->data & meta_m->data));
3180 * Add source vport match to the specified matcher.
3182 * @param[in, out] matcher
3184 * @param[in, out] key
3185 * Flow matcher value.
3187 * Source vport value to match
3192 flow_dv_translate_item_source_vport(void *matcher, void *key,
3193 int16_t port, uint16_t mask)
3195 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3196 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3198 MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
3199 MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
3203 * Translate port-id item to eswitch match on port-id.
3206 * The devich to configure through.
3207 * @param[in, out] matcher
3209 * @param[in, out] key
3210 * Flow matcher value.
3212 * Flow pattern to translate.
3215 * 0 on success, a negative errno value otherwise.
3218 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
3219 void *key, const struct rte_flow_item *item)
3221 const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
3222 const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
3223 uint16_t mask, val, id;
3226 mask = pid_m ? pid_m->id : 0xffff;
3227 id = pid_v ? pid_v->id : dev->data->port_id;
3228 ret = mlx5_port_to_eswitch_info(id, NULL, &val);
3231 flow_dv_translate_item_source_vport(matcher, key, val, mask);
3235 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
3237 #define HEADER_IS_ZERO(match_criteria, headers) \
3238 !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
3239 matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
3242 * Calculate flow matcher enable bitmap.
3244 * @param match_criteria
3245 * Pointer to flow matcher criteria.
3248 * Bitmap of enabled fields.
3251 flow_dv_matcher_enable(uint32_t *match_criteria)
3253 uint8_t match_criteria_enable;
3255 match_criteria_enable =
3256 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
3257 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
3258 match_criteria_enable |=
3259 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
3260 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
3261 match_criteria_enable |=
3262 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
3263 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
3264 match_criteria_enable |=
3265 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
3266 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
3267 #ifdef HAVE_MLX5DV_DR
3268 match_criteria_enable |=
3269 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
3270 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
3272 return match_criteria_enable;
3279 * @param dev[in, out]
3280 * Pointer to rte_eth_dev structure.
3281 * @param[in] table_id
3284 * Direction of the table.
3285 * @param[in] transfer
3286 * E-Switch or NIC flow.
3288 * pointer to error structure.
3291 * Returns tables resource based on the index, NULL in case of failed.
3293 static struct mlx5_flow_tbl_resource *
3294 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
3295 uint32_t table_id, uint8_t egress,
3297 struct rte_flow_error *error)
3299 struct mlx5_priv *priv = dev->data->dev_private;
3300 struct mlx5_ibv_shared *sh = priv->sh;
3301 struct mlx5_flow_tbl_resource *tbl;
3303 #ifdef HAVE_MLX5DV_DR
3305 tbl = &sh->fdb_tbl[table_id];
3307 tbl->obj = mlx5_glue->dr_create_flow_tbl
3308 (sh->fdb_ns, table_id);
3309 } else if (egress) {
3310 tbl = &sh->tx_tbl[table_id];
3312 tbl->obj = mlx5_glue->dr_create_flow_tbl
3313 (sh->tx_ns, table_id);
3315 tbl = &sh->rx_tbl[table_id];
3317 tbl->obj = mlx5_glue->dr_create_flow_tbl
3318 (sh->rx_ns, table_id);
3321 rte_flow_error_set(error, ENOMEM,
3322 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3323 NULL, "cannot create table");
3326 rte_atomic32_inc(&tbl->refcnt);
3332 return &sh->fdb_tbl[table_id];
3334 return &sh->tx_tbl[table_id];
3336 return &sh->rx_tbl[table_id];
3341 * Release a flow table.
3344 * Table resource to be released.
3347 * Returns 0 if table was released, else return 1;
3350 flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
3354 if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
3355 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
3363 * Register the flow matcher.
3365 * @param dev[in, out]
3366 * Pointer to rte_eth_dev structure.
3367 * @param[in, out] matcher
3368 * Pointer to flow matcher.
3369 * @parm[in, out] dev_flow
3370 * Pointer to the dev_flow.
3372 * pointer to error structure.
3375 * 0 on success otherwise -errno and errno is set.
3378 flow_dv_matcher_register(struct rte_eth_dev *dev,
3379 struct mlx5_flow_dv_matcher *matcher,
3380 struct mlx5_flow *dev_flow,
3381 struct rte_flow_error *error)
3383 struct mlx5_priv *priv = dev->data->dev_private;
3384 struct mlx5_ibv_shared *sh = priv->sh;
3385 struct mlx5_flow_dv_matcher *cache_matcher;
3386 struct mlx5dv_flow_matcher_attr dv_attr = {
3387 .type = IBV_FLOW_ATTR_NORMAL,
3388 .match_mask = (void *)&matcher->mask,
3390 struct mlx5_flow_tbl_resource *tbl = NULL;
3392 /* Lookup from cache. */
3393 LIST_FOREACH(cache_matcher, &sh->matchers, next) {
3394 if (matcher->crc == cache_matcher->crc &&
3395 matcher->priority == cache_matcher->priority &&
3396 matcher->egress == cache_matcher->egress &&
3397 matcher->group == cache_matcher->group &&
3398 matcher->transfer == cache_matcher->transfer &&
3399 !memcmp((const void *)matcher->mask.buf,
3400 (const void *)cache_matcher->mask.buf,
3401 cache_matcher->mask.size)) {
3403 "priority %hd use %s matcher %p: refcnt %d++",
3404 cache_matcher->priority,
3405 cache_matcher->egress ? "tx" : "rx",
3406 (void *)cache_matcher,
3407 rte_atomic32_read(&cache_matcher->refcnt));
3408 rte_atomic32_inc(&cache_matcher->refcnt);
3409 dev_flow->dv.matcher = cache_matcher;
3413 /* Register new matcher. */
3414 cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
3416 return rte_flow_error_set(error, ENOMEM,
3417 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3418 "cannot allocate matcher memory");
3419 tbl = flow_dv_tbl_resource_get(dev, matcher->group * MLX5_GROUP_FACTOR,
3420 matcher->egress, matcher->transfer,
3423 rte_free(cache_matcher);
3424 return rte_flow_error_set(error, ENOMEM,
3425 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3426 NULL, "cannot create table");
3428 *cache_matcher = *matcher;
3429 dv_attr.match_criteria_enable =
3430 flow_dv_matcher_enable(cache_matcher->mask.buf);
3431 dv_attr.priority = matcher->priority;
3432 if (matcher->egress)
3433 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
3434 cache_matcher->matcher_object =
3435 mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
3436 if (!cache_matcher->matcher_object) {
3437 rte_free(cache_matcher);
3438 #ifdef HAVE_MLX5DV_DR
3439 flow_dv_tbl_resource_release(tbl);
3441 return rte_flow_error_set(error, ENOMEM,
3442 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3443 NULL, "cannot create matcher");
3445 rte_atomic32_inc(&cache_matcher->refcnt);
3446 LIST_INSERT_HEAD(&sh->matchers, cache_matcher, next);
3447 dev_flow->dv.matcher = cache_matcher;
3448 DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
3449 cache_matcher->priority,
3450 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
3451 rte_atomic32_read(&cache_matcher->refcnt));
3452 rte_atomic32_inc(&tbl->refcnt);
3457 * Find existing tag resource or create and register a new one.
3459 * @param dev[in, out]
3460 * Pointer to rte_eth_dev structure.
3461 * @param[in, out] resource
3462 * Pointer to tag resource.
3463 * @parm[in, out] dev_flow
3464 * Pointer to the dev_flow.
3466 * pointer to error structure.
3469 * 0 on success otherwise -errno and errno is set.
3472 flow_dv_tag_resource_register
3473 (struct rte_eth_dev *dev,
3474 struct mlx5_flow_dv_tag_resource *resource,
3475 struct mlx5_flow *dev_flow,
3476 struct rte_flow_error *error)
3478 struct mlx5_priv *priv = dev->data->dev_private;
3479 struct mlx5_ibv_shared *sh = priv->sh;
3480 struct mlx5_flow_dv_tag_resource *cache_resource;
3482 /* Lookup a matching resource from cache. */
3483 LIST_FOREACH(cache_resource, &sh->tags, next) {
3484 if (resource->tag == cache_resource->tag) {
3485 DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
3486 (void *)cache_resource,
3487 rte_atomic32_read(&cache_resource->refcnt));
3488 rte_atomic32_inc(&cache_resource->refcnt);
3489 dev_flow->flow->tag_resource = cache_resource;
3493 /* Register new resource. */
3494 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
3495 if (!cache_resource)
3496 return rte_flow_error_set(error, ENOMEM,
3497 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3498 "cannot allocate resource memory");
3499 *cache_resource = *resource;
3500 cache_resource->action = mlx5_glue->dv_create_flow_action_tag
3502 if (!cache_resource->action) {
3503 rte_free(cache_resource);
3504 return rte_flow_error_set(error, ENOMEM,
3505 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3506 NULL, "cannot create action");
3508 rte_atomic32_init(&cache_resource->refcnt);
3509 rte_atomic32_inc(&cache_resource->refcnt);
3510 LIST_INSERT_HEAD(&sh->tags, cache_resource, next);
3511 dev_flow->flow->tag_resource = cache_resource;
3512 DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
3513 (void *)cache_resource,
3514 rte_atomic32_read(&cache_resource->refcnt));
3522 * Pointer to Ethernet device.
3524 * Pointer to mlx5_flow.
3527 * 1 while a reference on it exists, 0 when freed.
3530 flow_dv_tag_release(struct rte_eth_dev *dev,
3531 struct mlx5_flow_dv_tag_resource *tag)
3534 DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
3535 dev->data->port_id, (void *)tag,
3536 rte_atomic32_read(&tag->refcnt));
3537 if (rte_atomic32_dec_and_test(&tag->refcnt)) {
3538 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
3539 LIST_REMOVE(tag, next);
3540 DRV_LOG(DEBUG, "port %u tag %p: removed",
3541 dev->data->port_id, (void *)tag);
3549 * Translate port ID action to vport.
3552 * Pointer to rte_eth_dev structure.
3554 * Pointer to the port ID action.
3555 * @param[out] dst_port_id
3556 * The target port ID.
3558 * Pointer to the error structure.
3561 * 0 on success, a negative errno value otherwise and rte_errno is set.
3564 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
3565 const struct rte_flow_action *action,
3566 uint32_t *dst_port_id,
3567 struct rte_flow_error *error)
3572 const struct rte_flow_action_port_id *conf =
3573 (const struct rte_flow_action_port_id *)action->conf;
3575 port = conf->original ? dev->data->port_id : conf->id;
3576 ret = mlx5_port_to_eswitch_info(port, NULL, &port_id);
3578 return rte_flow_error_set(error, -ret,
3579 RTE_FLOW_ERROR_TYPE_ACTION,
3581 "No eswitch info was found for port");
3582 *dst_port_id = port_id;
3587 * Fill the flow with DV spec.
3590 * Pointer to rte_eth_dev structure.
3591 * @param[in, out] dev_flow
3592 * Pointer to the sub flow.
3594 * Pointer to the flow attributes.
3596 * Pointer to the list of items.
3597 * @param[in] actions
3598 * Pointer to the list of actions.
3600 * Pointer to the error structure.
3603 * 0 on success, a negative errno value otherwise and rte_errno is set.
3606 flow_dv_translate(struct rte_eth_dev *dev,
3607 struct mlx5_flow *dev_flow,
3608 const struct rte_flow_attr *attr,
3609 const struct rte_flow_item items[],
3610 const struct rte_flow_action actions[],
3611 struct rte_flow_error *error)
3613 struct mlx5_priv *priv = dev->data->dev_private;
3614 struct rte_flow *flow = dev_flow->flow;
3615 uint64_t item_flags = 0;
3616 uint64_t last_item = 0;
3617 uint64_t action_flags = 0;
3618 uint64_t priority = attr->priority;
3619 struct mlx5_flow_dv_matcher matcher = {
3621 .size = sizeof(matcher.mask.buf),
3625 bool actions_end = false;
3626 struct mlx5_flow_dv_modify_hdr_resource res = {
3627 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3628 MLX5DV_FLOW_TABLE_TYPE_NIC_RX
3630 union flow_dv_attr flow_attr = { .attr = 0 };
3631 struct mlx5_flow_dv_tag_resource tag_resource;
3632 uint32_t modify_action_position = UINT32_MAX;
3633 void *match_mask = matcher.mask.buf;
3634 void *match_value = dev_flow->dv.value.buf;
3636 flow->group = attr->group;
3638 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3639 if (priority == MLX5_FLOW_PRIO_RSVD)
3640 priority = priv->config.flow_prio - 1;
3641 for (; !actions_end ; actions++) {
3642 const struct rte_flow_action_queue *queue;
3643 const struct rte_flow_action_rss *rss;
3644 const struct rte_flow_action *action = actions;
3645 const struct rte_flow_action_count *count = action->conf;
3646 const uint8_t *rss_key;
3647 const struct rte_flow_action_jump *jump_data;
3648 struct mlx5_flow_dv_jump_tbl_resource jump_tbl_resource;
3649 struct mlx5_flow_tbl_resource *tbl;
3650 uint32_t port_id = 0;
3651 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
3653 switch (actions->type) {
3654 case RTE_FLOW_ACTION_TYPE_VOID:
3656 case RTE_FLOW_ACTION_TYPE_PORT_ID:
3657 if (flow_dv_translate_action_port_id(dev, action,
3660 port_id_resource.port_id = port_id;
3661 if (flow_dv_port_id_action_resource_register
3662 (dev, &port_id_resource, dev_flow, error))
3664 dev_flow->dv.actions[actions_n++] =
3665 dev_flow->dv.port_id_action->action;
3666 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
3668 case RTE_FLOW_ACTION_TYPE_FLAG:
3670 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
3671 if (!flow->tag_resource)
3672 if (flow_dv_tag_resource_register
3673 (dev, &tag_resource, dev_flow, error))
3675 dev_flow->dv.actions[actions_n++] =
3676 flow->tag_resource->action;
3677 action_flags |= MLX5_FLOW_ACTION_FLAG;
3679 case RTE_FLOW_ACTION_TYPE_MARK:
3680 tag_resource.tag = mlx5_flow_mark_set
3681 (((const struct rte_flow_action_mark *)
3682 (actions->conf))->id);
3683 if (!flow->tag_resource)
3684 if (flow_dv_tag_resource_register
3685 (dev, &tag_resource, dev_flow, error))
3687 dev_flow->dv.actions[actions_n++] =
3688 flow->tag_resource->action;
3689 action_flags |= MLX5_FLOW_ACTION_MARK;
3691 case RTE_FLOW_ACTION_TYPE_DROP:
3692 action_flags |= MLX5_FLOW_ACTION_DROP;
3694 case RTE_FLOW_ACTION_TYPE_QUEUE:
3695 queue = actions->conf;
3696 flow->rss.queue_num = 1;
3697 (*flow->queue)[0] = queue->index;
3698 action_flags |= MLX5_FLOW_ACTION_QUEUE;
3700 case RTE_FLOW_ACTION_TYPE_RSS:
3701 rss = actions->conf;
3703 memcpy((*flow->queue), rss->queue,
3704 rss->queue_num * sizeof(uint16_t));
3705 flow->rss.queue_num = rss->queue_num;
3706 /* NULL RSS key indicates default RSS key. */
3707 rss_key = !rss->key ? rss_hash_default_key : rss->key;
3708 memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
3709 /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
3710 flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
3711 flow->rss.level = rss->level;
3712 action_flags |= MLX5_FLOW_ACTION_RSS;
3714 case RTE_FLOW_ACTION_TYPE_COUNT:
3715 if (!priv->config.devx) {
3716 rte_errno = ENOTSUP;
3719 flow->counter = flow_dv_counter_new(dev, count->shared,
3721 if (flow->counter == NULL)
3723 dev_flow->dv.actions[actions_n++] =
3724 flow->counter->action;
3725 action_flags |= MLX5_FLOW_ACTION_COUNT;
3728 if (rte_errno == ENOTSUP)
3729 return rte_flow_error_set
3731 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3733 "count action not supported");
3735 return rte_flow_error_set
3737 RTE_FLOW_ERROR_TYPE_ACTION,
3739 "cannot create counter"
3741 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3742 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3743 if (flow_dv_create_action_l2_encap(dev, actions,
3748 dev_flow->dv.actions[actions_n++] =
3749 dev_flow->dv.encap_decap->verbs_action;
3750 action_flags |= actions->type ==
3751 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
3752 MLX5_FLOW_ACTION_VXLAN_ENCAP :
3753 MLX5_FLOW_ACTION_NVGRE_ENCAP;
3755 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3756 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
3757 if (flow_dv_create_action_l2_decap(dev, dev_flow,
3761 dev_flow->dv.actions[actions_n++] =
3762 dev_flow->dv.encap_decap->verbs_action;
3763 action_flags |= actions->type ==
3764 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
3765 MLX5_FLOW_ACTION_VXLAN_DECAP :
3766 MLX5_FLOW_ACTION_NVGRE_DECAP;
3768 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3769 /* Handle encap with preceding decap. */
3770 if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
3771 if (flow_dv_create_action_raw_encap
3772 (dev, actions, dev_flow, attr, error))
3774 dev_flow->dv.actions[actions_n++] =
3775 dev_flow->dv.encap_decap->verbs_action;
3777 /* Handle encap without preceding decap. */
3778 if (flow_dv_create_action_l2_encap
3779 (dev, actions, dev_flow, attr->transfer,
3782 dev_flow->dv.actions[actions_n++] =
3783 dev_flow->dv.encap_decap->verbs_action;
3785 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
3787 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3788 /* Check if this decap is followed by encap. */
3789 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
3790 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
3793 /* Handle decap only if it isn't followed by encap. */
3794 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3795 if (flow_dv_create_action_l2_decap
3796 (dev, dev_flow, attr->transfer, error))
3798 dev_flow->dv.actions[actions_n++] =
3799 dev_flow->dv.encap_decap->verbs_action;
3801 /* If decap is followed by encap, handle it at encap. */
3802 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
3804 case RTE_FLOW_ACTION_TYPE_JUMP:
3805 jump_data = action->conf;
3806 tbl = flow_dv_tbl_resource_get(dev, jump_data->group *
3809 attr->transfer, error);
3811 return rte_flow_error_set
3813 RTE_FLOW_ERROR_TYPE_ACTION,
3815 "cannot create jump action.");
3816 jump_tbl_resource.tbl = tbl;
3817 if (flow_dv_jump_tbl_resource_register
3818 (dev, &jump_tbl_resource, dev_flow, error)) {
3819 flow_dv_tbl_resource_release(tbl);
3820 return rte_flow_error_set
3822 RTE_FLOW_ERROR_TYPE_ACTION,
3824 "cannot create jump action.");
3826 dev_flow->dv.actions[actions_n++] =
3827 dev_flow->dv.jump->action;
3828 action_flags |= MLX5_FLOW_ACTION_JUMP;
3830 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
3831 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
3832 if (flow_dv_convert_action_modify_mac(&res, actions,
3835 action_flags |= actions->type ==
3836 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
3837 MLX5_FLOW_ACTION_SET_MAC_SRC :
3838 MLX5_FLOW_ACTION_SET_MAC_DST;
3840 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
3841 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
3842 if (flow_dv_convert_action_modify_ipv4(&res, actions,
3845 action_flags |= actions->type ==
3846 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
3847 MLX5_FLOW_ACTION_SET_IPV4_SRC :
3848 MLX5_FLOW_ACTION_SET_IPV4_DST;
3850 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
3851 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
3852 if (flow_dv_convert_action_modify_ipv6(&res, actions,
3855 action_flags |= actions->type ==
3856 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
3857 MLX5_FLOW_ACTION_SET_IPV6_SRC :
3858 MLX5_FLOW_ACTION_SET_IPV6_DST;
3860 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
3861 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
3862 if (flow_dv_convert_action_modify_tp(&res, actions,
3866 action_flags |= actions->type ==
3867 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
3868 MLX5_FLOW_ACTION_SET_TP_SRC :
3869 MLX5_FLOW_ACTION_SET_TP_DST;
3871 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
3872 if (flow_dv_convert_action_modify_dec_ttl(&res, items,
3876 action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
3878 case RTE_FLOW_ACTION_TYPE_SET_TTL:
3879 if (flow_dv_convert_action_modify_ttl(&res, actions,
3883 action_flags |= MLX5_FLOW_ACTION_SET_TTL;
3885 case RTE_FLOW_ACTION_TYPE_END:
3887 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
3888 /* create modify action if needed. */
3889 if (flow_dv_modify_hdr_resource_register
3894 dev_flow->dv.actions[modify_action_position] =
3895 dev_flow->dv.modify_hdr->verbs_action;
3901 if ((action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) &&
3902 modify_action_position == UINT32_MAX)
3903 modify_action_position = actions_n++;
3905 dev_flow->dv.actions_n = actions_n;
3906 flow->actions = action_flags;
3907 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3908 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
3910 switch (items->type) {
3911 case RTE_FLOW_ITEM_TYPE_PORT_ID:
3912 flow_dv_translate_item_port_id(dev, match_mask,
3913 match_value, items);
3914 last_item = MLX5_FLOW_ITEM_PORT_ID;
3916 case RTE_FLOW_ITEM_TYPE_ETH:
3917 flow_dv_translate_item_eth(match_mask, match_value,
3919 matcher.priority = MLX5_PRIORITY_MAP_L2;
3920 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
3921 MLX5_FLOW_LAYER_OUTER_L2;
3923 case RTE_FLOW_ITEM_TYPE_VLAN:
3924 flow_dv_translate_item_vlan(match_mask, match_value,
3926 matcher.priority = MLX5_PRIORITY_MAP_L2;
3927 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
3928 MLX5_FLOW_LAYER_INNER_VLAN) :
3929 (MLX5_FLOW_LAYER_OUTER_L2 |
3930 MLX5_FLOW_LAYER_OUTER_VLAN);
3932 case RTE_FLOW_ITEM_TYPE_IPV4:
3933 flow_dv_translate_item_ipv4(match_mask, match_value,
3934 items, tunnel, attr->group);
3935 matcher.priority = MLX5_PRIORITY_MAP_L3;
3936 dev_flow->dv.hash_fields |=
3937 mlx5_flow_hashfields_adjust
3939 MLX5_IPV4_LAYER_TYPES,
3940 MLX5_IPV4_IBV_RX_HASH);
3941 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3942 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3944 case RTE_FLOW_ITEM_TYPE_IPV6:
3945 flow_dv_translate_item_ipv6(match_mask, match_value,
3946 items, tunnel, attr->group);
3947 matcher.priority = MLX5_PRIORITY_MAP_L3;
3948 dev_flow->dv.hash_fields |=
3949 mlx5_flow_hashfields_adjust
3951 MLX5_IPV6_LAYER_TYPES,
3952 MLX5_IPV6_IBV_RX_HASH);
3953 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3954 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3956 case RTE_FLOW_ITEM_TYPE_TCP:
3957 flow_dv_translate_item_tcp(match_mask, match_value,
3959 matcher.priority = MLX5_PRIORITY_MAP_L4;
3960 dev_flow->dv.hash_fields |=
3961 mlx5_flow_hashfields_adjust
3962 (dev_flow, tunnel, ETH_RSS_TCP,
3963 IBV_RX_HASH_SRC_PORT_TCP |
3964 IBV_RX_HASH_DST_PORT_TCP);
3965 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
3966 MLX5_FLOW_LAYER_OUTER_L4_TCP;
3968 case RTE_FLOW_ITEM_TYPE_UDP:
3969 flow_dv_translate_item_udp(match_mask, match_value,
3971 matcher.priority = MLX5_PRIORITY_MAP_L4;
3972 dev_flow->dv.hash_fields |=
3973 mlx5_flow_hashfields_adjust
3974 (dev_flow, tunnel, ETH_RSS_UDP,
3975 IBV_RX_HASH_SRC_PORT_UDP |
3976 IBV_RX_HASH_DST_PORT_UDP);
3977 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
3978 MLX5_FLOW_LAYER_OUTER_L4_UDP;
3980 case RTE_FLOW_ITEM_TYPE_GRE:
3981 flow_dv_translate_item_gre(match_mask, match_value,
3983 last_item = MLX5_FLOW_LAYER_GRE;
3985 case RTE_FLOW_ITEM_TYPE_NVGRE:
3986 flow_dv_translate_item_nvgre(match_mask, match_value,
3988 last_item = MLX5_FLOW_LAYER_GRE;
3990 case RTE_FLOW_ITEM_TYPE_VXLAN:
3991 flow_dv_translate_item_vxlan(match_mask, match_value,
3993 last_item = MLX5_FLOW_LAYER_VXLAN;
3995 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3996 flow_dv_translate_item_vxlan(match_mask, match_value,
3998 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
4000 case RTE_FLOW_ITEM_TYPE_MPLS:
4001 flow_dv_translate_item_mpls(match_mask, match_value,
4002 items, last_item, tunnel);
4003 last_item = MLX5_FLOW_LAYER_MPLS;
4005 case RTE_FLOW_ITEM_TYPE_META:
4006 flow_dv_translate_item_meta(match_mask, match_value,
4008 last_item = MLX5_FLOW_ITEM_METADATA;
4013 item_flags |= last_item;
4016 * In case of ingress traffic when E-Switch mode is enabled,
4017 * we have two cases where we need to set the source port manually.
4018 * The first one, is in case of Nic steering rule, and the second is
4019 * E-Switch rule where no port_id item was found. In both cases
4020 * the source port is set according the current port in use.
4022 if ((attr->ingress && !(item_flags & MLX5_FLOW_ITEM_PORT_ID)) &&
4023 (priv->representor || priv->master)) {
4024 if (flow_dv_translate_item_port_id(dev, match_mask,
4028 assert(!flow_dv_check_valid_spec(matcher.mask.buf,
4029 dev_flow->dv.value.buf));
4030 dev_flow->layers = item_flags;
4031 /* Register matcher. */
4032 matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
4034 matcher.priority = mlx5_flow_adjust_priority(dev, priority,
4036 matcher.egress = attr->egress;
4037 matcher.group = attr->group;
4038 matcher.transfer = attr->transfer;
4039 if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
4045 * Apply the flow to the NIC.
4048 * Pointer to the Ethernet device structure.
4049 * @param[in, out] flow
4050 * Pointer to flow structure.
4052 * Pointer to error structure.
4055 * 0 on success, a negative errno value otherwise and rte_errno is set.
4058 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
4059 struct rte_flow_error *error)
4061 struct mlx5_flow_dv *dv;
4062 struct mlx5_flow *dev_flow;
4063 struct mlx5_priv *priv = dev->data->dev_private;
4067 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4070 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
4071 if (flow->transfer) {
4072 dv->actions[n++] = priv->sh->esw_drop_action;
4074 dv->hrxq = mlx5_hrxq_drop_new(dev);
4078 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4080 "cannot get drop hash queue");
4083 dv->actions[n++] = dv->hrxq->action;
4085 } else if (flow->actions &
4086 (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
4087 struct mlx5_hrxq *hrxq;
4089 hrxq = mlx5_hrxq_get(dev, flow->key,
4090 MLX5_RSS_HASH_KEY_LEN,
4093 flow->rss.queue_num);
4095 hrxq = mlx5_hrxq_new
4096 (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
4097 dv->hash_fields, (*flow->queue),
4098 flow->rss.queue_num,
4099 !!(dev_flow->layers &
4100 MLX5_FLOW_LAYER_TUNNEL));
4104 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4105 "cannot get hash queue");
4109 dv->actions[n++] = dv->hrxq->action;
4112 mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
4113 (void *)&dv->value, n,
4116 rte_flow_error_set(error, errno,
4117 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4119 "hardware refuses to create flow");
4125 err = rte_errno; /* Save rte_errno before cleanup. */
4126 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4127 struct mlx5_flow_dv *dv = &dev_flow->dv;
4129 if (flow->actions & MLX5_FLOW_ACTION_DROP)
4130 mlx5_hrxq_drop_release(dev);
4132 mlx5_hrxq_release(dev, dv->hrxq);
4136 rte_errno = err; /* Restore rte_errno. */
4141 * Release the flow matcher.
4144 * Pointer to Ethernet device.
4146 * Pointer to mlx5_flow.
4149 * 1 while a reference on it exists, 0 when freed.
4152 flow_dv_matcher_release(struct rte_eth_dev *dev,
4153 struct mlx5_flow *flow)
4155 struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
4156 struct mlx5_priv *priv = dev->data->dev_private;
4157 struct mlx5_ibv_shared *sh = priv->sh;
4158 struct mlx5_flow_tbl_resource *tbl;
4160 assert(matcher->matcher_object);
4161 DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
4162 dev->data->port_id, (void *)matcher,
4163 rte_atomic32_read(&matcher->refcnt));
4164 if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
4165 claim_zero(mlx5_glue->dv_destroy_flow_matcher
4166 (matcher->matcher_object));
4167 LIST_REMOVE(matcher, next);
4168 if (matcher->egress)
4169 tbl = &sh->tx_tbl[matcher->group];
4171 tbl = &sh->rx_tbl[matcher->group];
4172 flow_dv_tbl_resource_release(tbl);
4174 DRV_LOG(DEBUG, "port %u matcher %p: removed",
4175 dev->data->port_id, (void *)matcher);
4182 * Release an encap/decap resource.
4185 * Pointer to mlx5_flow.
4188 * 1 while a reference on it exists, 0 when freed.
4191 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
4193 struct mlx5_flow_dv_encap_decap_resource *cache_resource =
4194 flow->dv.encap_decap;
4196 assert(cache_resource->verbs_action);
4197 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
4198 (void *)cache_resource,
4199 rte_atomic32_read(&cache_resource->refcnt));
4200 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4201 claim_zero(mlx5_glue->destroy_flow_action
4202 (cache_resource->verbs_action));
4203 LIST_REMOVE(cache_resource, next);
4204 rte_free(cache_resource);
4205 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
4206 (void *)cache_resource);
4213 * Release an jump to table action resource.
4216 * Pointer to mlx5_flow.
4219 * 1 while a reference on it exists, 0 when freed.
4222 flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow)
4224 struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
4227 assert(cache_resource->action);
4228 DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
4229 (void *)cache_resource,
4230 rte_atomic32_read(&cache_resource->refcnt));
4231 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4232 claim_zero(mlx5_glue->destroy_flow_action
4233 (cache_resource->action));
4234 LIST_REMOVE(cache_resource, next);
4235 flow_dv_tbl_resource_release(cache_resource->tbl);
4236 rte_free(cache_resource);
4237 DRV_LOG(DEBUG, "jump table resource %p: removed",
4238 (void *)cache_resource);
4245 * Release a modify-header resource.
4248 * Pointer to mlx5_flow.
4251 * 1 while a reference on it exists, 0 when freed.
4254 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
4256 struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
4257 flow->dv.modify_hdr;
4259 assert(cache_resource->verbs_action);
4260 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
4261 (void *)cache_resource,
4262 rte_atomic32_read(&cache_resource->refcnt));
4263 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4264 claim_zero(mlx5_glue->destroy_flow_action
4265 (cache_resource->verbs_action));
4266 LIST_REMOVE(cache_resource, next);
4267 rte_free(cache_resource);
4268 DRV_LOG(DEBUG, "modify-header resource %p: removed",
4269 (void *)cache_resource);
4276 * Release port ID action resource.
4279 * Pointer to mlx5_flow.
4282 * 1 while a reference on it exists, 0 when freed.
4285 flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
4287 struct mlx5_flow_dv_port_id_action_resource *cache_resource =
4288 flow->dv.port_id_action;
4290 assert(cache_resource->action);
4291 DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
4292 (void *)cache_resource,
4293 rte_atomic32_read(&cache_resource->refcnt));
4294 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4295 claim_zero(mlx5_glue->destroy_flow_action
4296 (cache_resource->action));
4297 LIST_REMOVE(cache_resource, next);
4298 rte_free(cache_resource);
4299 DRV_LOG(DEBUG, "port id action resource %p: removed",
4300 (void *)cache_resource);
4307 * Remove the flow from the NIC but keeps it in memory.
4310 * Pointer to Ethernet device.
4311 * @param[in, out] flow
4312 * Pointer to flow structure.
4315 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
4317 struct mlx5_flow_dv *dv;
4318 struct mlx5_flow *dev_flow;
4322 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4325 claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
4329 if (flow->actions & MLX5_FLOW_ACTION_DROP)
4330 mlx5_hrxq_drop_release(dev);
4332 mlx5_hrxq_release(dev, dv->hrxq);
4339 * Remove the flow from the NIC and the memory.
4342 * Pointer to the Ethernet device structure.
4343 * @param[in, out] flow
4344 * Pointer to flow structure.
4347 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
4349 struct mlx5_flow *dev_flow;
4353 flow_dv_remove(dev, flow);
4354 if (flow->counter) {
4355 flow_dv_counter_release(flow->counter);
4356 flow->counter = NULL;
4358 if (flow->tag_resource) {
4359 flow_dv_tag_release(dev, flow->tag_resource);
4360 flow->tag_resource = NULL;
4362 while (!LIST_EMPTY(&flow->dev_flows)) {
4363 dev_flow = LIST_FIRST(&flow->dev_flows);
4364 LIST_REMOVE(dev_flow, next);
4365 if (dev_flow->dv.matcher)
4366 flow_dv_matcher_release(dev, dev_flow);
4367 if (dev_flow->dv.encap_decap)
4368 flow_dv_encap_decap_resource_release(dev_flow);
4369 if (dev_flow->dv.modify_hdr)
4370 flow_dv_modify_hdr_resource_release(dev_flow);
4371 if (dev_flow->dv.jump)
4372 flow_dv_jump_tbl_resource_release(dev_flow);
4373 if (dev_flow->dv.port_id_action)
4374 flow_dv_port_id_action_resource_release(dev_flow);
4380 * Query a dv flow rule for its statistics via devx.
4383 * Pointer to Ethernet device.
4385 * Pointer to the sub flow.
4387 * data retrieved by the query.
4389 * Perform verbose error reporting if not NULL.
4392 * 0 on success, a negative errno value otherwise and rte_errno is set.
4395 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
4396 void *data, struct rte_flow_error *error)
4398 struct mlx5_priv *priv = dev->data->dev_private;
4399 struct rte_flow_query_count *qc = data;
4404 if (!priv->config.devx)
4405 return rte_flow_error_set(error, ENOTSUP,
4406 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4408 "counters are not supported");
4409 if (flow->counter) {
4410 err = mlx5_devx_cmd_flow_counter_query
4411 (flow->counter->dcs,
4412 qc->reset, &pkts, &bytes);
4414 return rte_flow_error_set
4416 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4418 "cannot read counters");
4421 qc->hits = pkts - flow->counter->hits;
4422 qc->bytes = bytes - flow->counter->bytes;
4424 flow->counter->hits = pkts;
4425 flow->counter->bytes = bytes;
4429 return rte_flow_error_set(error, EINVAL,
4430 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4432 "counters are not available");
4438 * @see rte_flow_query()
4442 flow_dv_query(struct rte_eth_dev *dev,
4443 struct rte_flow *flow __rte_unused,
4444 const struct rte_flow_action *actions __rte_unused,
4445 void *data __rte_unused,
4446 struct rte_flow_error *error __rte_unused)
4450 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4451 switch (actions->type) {
4452 case RTE_FLOW_ACTION_TYPE_VOID:
4454 case RTE_FLOW_ACTION_TYPE_COUNT:
4455 ret = flow_dv_query_count(dev, flow, data, error);
4458 return rte_flow_error_set(error, ENOTSUP,
4459 RTE_FLOW_ERROR_TYPE_ACTION,
4461 "action not supported");
4468 * Mutex-protected thunk to flow_dv_translate().
4471 flow_d_translate(struct rte_eth_dev *dev,
4472 struct mlx5_flow *dev_flow,
4473 const struct rte_flow_attr *attr,
4474 const struct rte_flow_item items[],
4475 const struct rte_flow_action actions[],
4476 struct rte_flow_error *error)
4480 flow_d_shared_lock(dev);
4481 ret = flow_dv_translate(dev, dev_flow, attr, items, actions, error);
4482 flow_d_shared_unlock(dev);
4487 * Mutex-protected thunk to flow_dv_apply().
4490 flow_d_apply(struct rte_eth_dev *dev,
4491 struct rte_flow *flow,
4492 struct rte_flow_error *error)
4496 flow_d_shared_lock(dev);
4497 ret = flow_dv_apply(dev, flow, error);
4498 flow_d_shared_unlock(dev);
4503 * Mutex-protected thunk to flow_dv_remove().
4506 flow_d_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
4508 flow_d_shared_lock(dev);
4509 flow_dv_remove(dev, flow);
4510 flow_d_shared_unlock(dev);
4514 * Mutex-protected thunk to flow_dv_destroy().
4517 flow_d_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
4519 flow_d_shared_lock(dev);
4520 flow_dv_destroy(dev, flow);
4521 flow_d_shared_unlock(dev);
4524 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
4525 .validate = flow_dv_validate,
4526 .prepare = flow_dv_prepare,
4527 .translate = flow_d_translate,
4528 .apply = flow_d_apply,
4529 .remove = flow_d_remove,
4530 .destroy = flow_d_destroy,
4531 .query = flow_dv_query,
4534 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */