1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #pragma GCC diagnostic ignored "-Wpedantic"
15 #include <infiniband/verbs.h>
17 #pragma GCC diagnostic error "-Wpedantic"
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_ethdev_driver.h>
24 #include <rte_flow_driver.h>
25 #include <rte_malloc.h>
30 #include "mlx5_defs.h"
31 #include "mlx5_glue.h"
32 #include "mlx5_flow.h"
34 #include "mlx5_rxtx.h"
36 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
38 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
39 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
42 #ifndef HAVE_MLX5DV_DR_ESWITCH
43 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
59 * Initialize flow attributes structure according to flow items' types.
62 * Pointer to item specification.
64 * Pointer to flow attributes structure.
67 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
69 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
71 case RTE_FLOW_ITEM_TYPE_IPV4:
74 case RTE_FLOW_ITEM_TYPE_IPV6:
77 case RTE_FLOW_ITEM_TYPE_UDP:
80 case RTE_FLOW_ITEM_TYPE_TCP:
90 struct field_modify_info {
91 uint32_t size; /* Size of field in protocol header, in bytes. */
92 uint32_t offset; /* Offset of field in protocol header, in bytes. */
93 enum mlx5_modification_field id;
96 struct field_modify_info modify_eth[] = {
97 {4, 0, MLX5_MODI_OUT_DMAC_47_16},
98 {2, 4, MLX5_MODI_OUT_DMAC_15_0},
99 {4, 6, MLX5_MODI_OUT_SMAC_47_16},
100 {2, 10, MLX5_MODI_OUT_SMAC_15_0},
104 struct field_modify_info modify_ipv4[] = {
105 {1, 8, MLX5_MODI_OUT_IPV4_TTL},
106 {4, 12, MLX5_MODI_OUT_SIPV4},
107 {4, 16, MLX5_MODI_OUT_DIPV4},
111 struct field_modify_info modify_ipv6[] = {
112 {1, 7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
113 {4, 8, MLX5_MODI_OUT_SIPV6_127_96},
114 {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
115 {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
116 {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
117 {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
118 {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
119 {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
120 {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
124 struct field_modify_info modify_udp[] = {
125 {2, 0, MLX5_MODI_OUT_UDP_SPORT},
126 {2, 2, MLX5_MODI_OUT_UDP_DPORT},
130 struct field_modify_info modify_tcp[] = {
131 {2, 0, MLX5_MODI_OUT_TCP_SPORT},
132 {2, 2, MLX5_MODI_OUT_TCP_DPORT},
137 * Acquire the synchronizing object to protect multithreaded access
138 * to shared dv context. Lock occurs only if context is actually
139 * shared, i.e. we have multiport IB device and representors are
143 * Pointer to the rte_eth_dev structure.
146 flow_d_shared_lock(struct rte_eth_dev *dev)
148 struct mlx5_priv *priv = dev->data->dev_private;
149 struct mlx5_ibv_shared *sh = priv->sh;
151 if (sh->dv_refcnt > 1) {
154 ret = pthread_mutex_lock(&sh->dv_mutex);
161 flow_d_shared_unlock(struct rte_eth_dev *dev)
163 struct mlx5_priv *priv = dev->data->dev_private;
164 struct mlx5_ibv_shared *sh = priv->sh;
166 if (sh->dv_refcnt > 1) {
169 ret = pthread_mutex_unlock(&sh->dv_mutex);
176 * Convert modify-header action to DV specification.
179 * Pointer to item specification.
181 * Pointer to field modification information.
182 * @param[in,out] resource
183 * Pointer to the modify-header resource.
185 * Type of modification.
187 * Pointer to the error structure.
190 * 0 on success, a negative errno value otherwise and rte_errno is set.
193 flow_dv_convert_modify_action(struct rte_flow_item *item,
194 struct field_modify_info *field,
195 struct mlx5_flow_dv_modify_hdr_resource *resource,
197 struct rte_flow_error *error)
199 uint32_t i = resource->actions_num;
200 struct mlx5_modification_cmd *actions = resource->actions;
201 const uint8_t *spec = item->spec;
202 const uint8_t *mask = item->mask;
205 while (field->size) {
207 /* Generate modify command for each mask segment. */
208 memcpy(&set, &mask[field->offset], field->size);
210 if (i >= MLX5_MODIFY_NUM)
211 return rte_flow_error_set(error, EINVAL,
212 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
213 "too many items to modify");
214 actions[i].action_type = type;
215 actions[i].field = field->id;
216 actions[i].length = field->size ==
217 4 ? 0 : field->size * 8;
218 rte_memcpy(&actions[i].data[4 - field->size],
219 &spec[field->offset], field->size);
220 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
223 if (resource->actions_num != i)
224 resource->actions_num = i;
227 if (!resource->actions_num)
228 return rte_flow_error_set(error, EINVAL,
229 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
230 "invalid modification flow item");
235 * Convert modify-header set IPv4 address action to DV specification.
237 * @param[in,out] resource
238 * Pointer to the modify-header resource.
240 * Pointer to action specification.
242 * Pointer to the error structure.
245 * 0 on success, a negative errno value otherwise and rte_errno is set.
248 flow_dv_convert_action_modify_ipv4
249 (struct mlx5_flow_dv_modify_hdr_resource *resource,
250 const struct rte_flow_action *action,
251 struct rte_flow_error *error)
253 const struct rte_flow_action_set_ipv4 *conf =
254 (const struct rte_flow_action_set_ipv4 *)(action->conf);
255 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
256 struct rte_flow_item_ipv4 ipv4;
257 struct rte_flow_item_ipv4 ipv4_mask;
259 memset(&ipv4, 0, sizeof(ipv4));
260 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
261 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
262 ipv4.hdr.src_addr = conf->ipv4_addr;
263 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
265 ipv4.hdr.dst_addr = conf->ipv4_addr;
266 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
269 item.mask = &ipv4_mask;
270 return flow_dv_convert_modify_action(&item, modify_ipv4, resource,
271 MLX5_MODIFICATION_TYPE_SET, error);
275 * Convert modify-header set IPv6 address action to DV specification.
277 * @param[in,out] resource
278 * Pointer to the modify-header resource.
280 * Pointer to action specification.
282 * Pointer to the error structure.
285 * 0 on success, a negative errno value otherwise and rte_errno is set.
288 flow_dv_convert_action_modify_ipv6
289 (struct mlx5_flow_dv_modify_hdr_resource *resource,
290 const struct rte_flow_action *action,
291 struct rte_flow_error *error)
293 const struct rte_flow_action_set_ipv6 *conf =
294 (const struct rte_flow_action_set_ipv6 *)(action->conf);
295 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
296 struct rte_flow_item_ipv6 ipv6;
297 struct rte_flow_item_ipv6 ipv6_mask;
299 memset(&ipv6, 0, sizeof(ipv6));
300 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
301 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
302 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
303 sizeof(ipv6.hdr.src_addr));
304 memcpy(&ipv6_mask.hdr.src_addr,
305 &rte_flow_item_ipv6_mask.hdr.src_addr,
306 sizeof(ipv6.hdr.src_addr));
308 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
309 sizeof(ipv6.hdr.dst_addr));
310 memcpy(&ipv6_mask.hdr.dst_addr,
311 &rte_flow_item_ipv6_mask.hdr.dst_addr,
312 sizeof(ipv6.hdr.dst_addr));
315 item.mask = &ipv6_mask;
316 return flow_dv_convert_modify_action(&item, modify_ipv6, resource,
317 MLX5_MODIFICATION_TYPE_SET, error);
321 * Convert modify-header set MAC address action to DV specification.
323 * @param[in,out] resource
324 * Pointer to the modify-header resource.
326 * Pointer to action specification.
328 * Pointer to the error structure.
331 * 0 on success, a negative errno value otherwise and rte_errno is set.
334 flow_dv_convert_action_modify_mac
335 (struct mlx5_flow_dv_modify_hdr_resource *resource,
336 const struct rte_flow_action *action,
337 struct rte_flow_error *error)
339 const struct rte_flow_action_set_mac *conf =
340 (const struct rte_flow_action_set_mac *)(action->conf);
341 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
342 struct rte_flow_item_eth eth;
343 struct rte_flow_item_eth eth_mask;
345 memset(ð, 0, sizeof(eth));
346 memset(ð_mask, 0, sizeof(eth_mask));
347 if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
348 memcpy(ð.src.addr_bytes, &conf->mac_addr,
349 sizeof(eth.src.addr_bytes));
350 memcpy(ð_mask.src.addr_bytes,
351 &rte_flow_item_eth_mask.src.addr_bytes,
352 sizeof(eth_mask.src.addr_bytes));
354 memcpy(ð.dst.addr_bytes, &conf->mac_addr,
355 sizeof(eth.dst.addr_bytes));
356 memcpy(ð_mask.dst.addr_bytes,
357 &rte_flow_item_eth_mask.dst.addr_bytes,
358 sizeof(eth_mask.dst.addr_bytes));
361 item.mask = ð_mask;
362 return flow_dv_convert_modify_action(&item, modify_eth, resource,
363 MLX5_MODIFICATION_TYPE_SET, error);
367 * Convert modify-header set TP action to DV specification.
369 * @param[in,out] resource
370 * Pointer to the modify-header resource.
372 * Pointer to action specification.
374 * Pointer to rte_flow_item objects list.
376 * Pointer to flow attributes structure.
378 * Pointer to the error structure.
381 * 0 on success, a negative errno value otherwise and rte_errno is set.
384 flow_dv_convert_action_modify_tp
385 (struct mlx5_flow_dv_modify_hdr_resource *resource,
386 const struct rte_flow_action *action,
387 const struct rte_flow_item *items,
388 union flow_dv_attr *attr,
389 struct rte_flow_error *error)
391 const struct rte_flow_action_set_tp *conf =
392 (const struct rte_flow_action_set_tp *)(action->conf);
393 struct rte_flow_item item;
394 struct rte_flow_item_udp udp;
395 struct rte_flow_item_udp udp_mask;
396 struct rte_flow_item_tcp tcp;
397 struct rte_flow_item_tcp tcp_mask;
398 struct field_modify_info *field;
401 flow_dv_attr_init(items, attr);
403 memset(&udp, 0, sizeof(udp));
404 memset(&udp_mask, 0, sizeof(udp_mask));
405 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
406 udp.hdr.src_port = conf->port;
407 udp_mask.hdr.src_port =
408 rte_flow_item_udp_mask.hdr.src_port;
410 udp.hdr.dst_port = conf->port;
411 udp_mask.hdr.dst_port =
412 rte_flow_item_udp_mask.hdr.dst_port;
414 item.type = RTE_FLOW_ITEM_TYPE_UDP;
416 item.mask = &udp_mask;
420 memset(&tcp, 0, sizeof(tcp));
421 memset(&tcp_mask, 0, sizeof(tcp_mask));
422 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
423 tcp.hdr.src_port = conf->port;
424 tcp_mask.hdr.src_port =
425 rte_flow_item_tcp_mask.hdr.src_port;
427 tcp.hdr.dst_port = conf->port;
428 tcp_mask.hdr.dst_port =
429 rte_flow_item_tcp_mask.hdr.dst_port;
431 item.type = RTE_FLOW_ITEM_TYPE_TCP;
433 item.mask = &tcp_mask;
436 return flow_dv_convert_modify_action(&item, field, resource,
437 MLX5_MODIFICATION_TYPE_SET, error);
441 * Convert modify-header set TTL action to DV specification.
443 * @param[in,out] resource
444 * Pointer to the modify-header resource.
446 * Pointer to action specification.
448 * Pointer to rte_flow_item objects list.
450 * Pointer to flow attributes structure.
452 * Pointer to the error structure.
455 * 0 on success, a negative errno value otherwise and rte_errno is set.
458 flow_dv_convert_action_modify_ttl
459 (struct mlx5_flow_dv_modify_hdr_resource *resource,
460 const struct rte_flow_action *action,
461 const struct rte_flow_item *items,
462 union flow_dv_attr *attr,
463 struct rte_flow_error *error)
465 const struct rte_flow_action_set_ttl *conf =
466 (const struct rte_flow_action_set_ttl *)(action->conf);
467 struct rte_flow_item item;
468 struct rte_flow_item_ipv4 ipv4;
469 struct rte_flow_item_ipv4 ipv4_mask;
470 struct rte_flow_item_ipv6 ipv6;
471 struct rte_flow_item_ipv6 ipv6_mask;
472 struct field_modify_info *field;
475 flow_dv_attr_init(items, attr);
477 memset(&ipv4, 0, sizeof(ipv4));
478 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
479 ipv4.hdr.time_to_live = conf->ttl_value;
480 ipv4_mask.hdr.time_to_live = 0xFF;
481 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
483 item.mask = &ipv4_mask;
487 memset(&ipv6, 0, sizeof(ipv6));
488 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
489 ipv6.hdr.hop_limits = conf->ttl_value;
490 ipv6_mask.hdr.hop_limits = 0xFF;
491 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
493 item.mask = &ipv6_mask;
496 return flow_dv_convert_modify_action(&item, field, resource,
497 MLX5_MODIFICATION_TYPE_SET, error);
501 * Convert modify-header decrement TTL action to DV specification.
503 * @param[in,out] resource
504 * Pointer to the modify-header resource.
506 * Pointer to action specification.
508 * Pointer to rte_flow_item objects list.
510 * Pointer to flow attributes structure.
512 * Pointer to the error structure.
515 * 0 on success, a negative errno value otherwise and rte_errno is set.
518 flow_dv_convert_action_modify_dec_ttl
519 (struct mlx5_flow_dv_modify_hdr_resource *resource,
520 const struct rte_flow_item *items,
521 union flow_dv_attr *attr,
522 struct rte_flow_error *error)
524 struct rte_flow_item item;
525 struct rte_flow_item_ipv4 ipv4;
526 struct rte_flow_item_ipv4 ipv4_mask;
527 struct rte_flow_item_ipv6 ipv6;
528 struct rte_flow_item_ipv6 ipv6_mask;
529 struct field_modify_info *field;
532 flow_dv_attr_init(items, attr);
534 memset(&ipv4, 0, sizeof(ipv4));
535 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
536 ipv4.hdr.time_to_live = 0xFF;
537 ipv4_mask.hdr.time_to_live = 0xFF;
538 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
540 item.mask = &ipv4_mask;
544 memset(&ipv6, 0, sizeof(ipv6));
545 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
546 ipv6.hdr.hop_limits = 0xFF;
547 ipv6_mask.hdr.hop_limits = 0xFF;
548 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
550 item.mask = &ipv6_mask;
553 return flow_dv_convert_modify_action(&item, field, resource,
554 MLX5_MODIFICATION_TYPE_ADD, error);
558 * Validate META item.
561 * Pointer to the rte_eth_dev structure.
563 * Item specification.
565 * Attributes of flow that includes this item.
567 * Pointer to error structure.
570 * 0 on success, a negative errno value otherwise and rte_errno is set.
573 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
574 const struct rte_flow_item *item,
575 const struct rte_flow_attr *attr,
576 struct rte_flow_error *error)
578 const struct rte_flow_item_meta *spec = item->spec;
579 const struct rte_flow_item_meta *mask = item->mask;
580 const struct rte_flow_item_meta nic_mask = {
581 .data = RTE_BE32(UINT32_MAX)
584 uint64_t offloads = dev->data->dev_conf.txmode.offloads;
586 if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
587 return rte_flow_error_set(error, EPERM,
588 RTE_FLOW_ERROR_TYPE_ITEM,
590 "match on metadata offload "
591 "configuration is off for this port");
593 return rte_flow_error_set(error, EINVAL,
594 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
596 "data cannot be empty");
598 return rte_flow_error_set(error, EINVAL,
599 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
601 "data cannot be zero");
603 mask = &rte_flow_item_meta_mask;
604 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
605 (const uint8_t *)&nic_mask,
606 sizeof(struct rte_flow_item_meta),
611 return rte_flow_error_set(error, ENOTSUP,
612 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
614 "pattern not supported for ingress");
619 * Validate vport item.
622 * Pointer to the rte_eth_dev structure.
624 * Item specification.
626 * Attributes of flow that includes this item.
627 * @param[in] item_flags
628 * Bit-fields that holds the items detected until now.
630 * Pointer to error structure.
633 * 0 on success, a negative errno value otherwise and rte_errno is set.
636 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
637 const struct rte_flow_item *item,
638 const struct rte_flow_attr *attr,
640 struct rte_flow_error *error)
642 const struct rte_flow_item_port_id *spec = item->spec;
643 const struct rte_flow_item_port_id *mask = item->mask;
644 const struct rte_flow_item_port_id switch_mask = {
647 uint16_t esw_domain_id;
648 uint16_t item_port_esw_domain_id;
652 return rte_flow_error_set(error, EINVAL,
653 RTE_FLOW_ERROR_TYPE_ITEM,
655 "match on port id is valid only"
656 " when transfer flag is enabled");
657 if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
658 return rte_flow_error_set(error, ENOTSUP,
659 RTE_FLOW_ERROR_TYPE_ITEM, item,
660 "multiple source ports are not"
664 if (mask->id != 0xffffffff)
665 return rte_flow_error_set(error, ENOTSUP,
666 RTE_FLOW_ERROR_TYPE_ITEM_MASK,
668 "no support for partial mask on"
670 ret = mlx5_flow_item_acceptable
671 (item, (const uint8_t *)mask,
672 (const uint8_t *)&rte_flow_item_port_id_mask,
673 sizeof(struct rte_flow_item_port_id),
679 ret = mlx5_port_to_eswitch_info(spec->id, &item_port_esw_domain_id,
682 return rte_flow_error_set(error, -ret,
683 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
684 "failed to obtain E-Switch info for"
686 ret = mlx5_port_to_eswitch_info(dev->data->port_id,
687 &esw_domain_id, NULL);
689 return rte_flow_error_set(error, -ret,
690 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
692 "failed to obtain E-Switch info");
693 if (item_port_esw_domain_id != esw_domain_id)
694 return rte_flow_error_set(error, -ret,
695 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
696 "cannot match on a port from a"
697 " different E-Switch");
702 * Validate count action.
707 * Pointer to error structure.
710 * 0 on success, a negative errno value otherwise and rte_errno is set.
713 flow_dv_validate_action_count(struct rte_eth_dev *dev,
714 struct rte_flow_error *error)
716 struct mlx5_priv *priv = dev->data->dev_private;
718 if (!priv->config.devx)
720 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
724 return rte_flow_error_set
726 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
728 "count action not supported");
732 * Validate the L2 encap action.
734 * @param[in] action_flags
735 * Holds the actions detected until now.
737 * Pointer to the encap action.
739 * Pointer to flow attributes
741 * Pointer to error structure.
744 * 0 on success, a negative errno value otherwise and rte_errno is set.
747 flow_dv_validate_action_l2_encap(uint64_t action_flags,
748 const struct rte_flow_action *action,
749 const struct rte_flow_attr *attr,
750 struct rte_flow_error *error)
753 return rte_flow_error_set(error, EINVAL,
754 RTE_FLOW_ERROR_TYPE_ACTION, action,
755 "configuration cannot be null");
756 if (action_flags & MLX5_FLOW_ACTION_DROP)
757 return rte_flow_error_set(error, EINVAL,
758 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
759 "can't drop and encap in same flow");
760 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
761 return rte_flow_error_set(error, EINVAL,
762 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
763 "can only have a single encap or"
764 " decap action in a flow");
765 if (!attr->transfer && attr->ingress)
766 return rte_flow_error_set(error, ENOTSUP,
767 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
769 "encap action not supported for "
775 * Validate the L2 decap action.
777 * @param[in] action_flags
778 * Holds the actions detected until now.
780 * Pointer to flow attributes
782 * Pointer to error structure.
785 * 0 on success, a negative errno value otherwise and rte_errno is set.
788 flow_dv_validate_action_l2_decap(uint64_t action_flags,
789 const struct rte_flow_attr *attr,
790 struct rte_flow_error *error)
792 if (action_flags & MLX5_FLOW_ACTION_DROP)
793 return rte_flow_error_set(error, EINVAL,
794 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
795 "can't drop and decap in same flow");
796 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
797 return rte_flow_error_set(error, EINVAL,
798 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
799 "can only have a single encap or"
800 " decap action in a flow");
801 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
802 return rte_flow_error_set(error, EINVAL,
803 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
804 "can't have decap action after"
807 return rte_flow_error_set(error, ENOTSUP,
808 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
810 "decap action not supported for "
816 * Validate the raw encap action.
818 * @param[in] action_flags
819 * Holds the actions detected until now.
821 * Pointer to the encap action.
823 * Pointer to flow attributes
825 * Pointer to error structure.
828 * 0 on success, a negative errno value otherwise and rte_errno is set.
831 flow_dv_validate_action_raw_encap(uint64_t action_flags,
832 const struct rte_flow_action *action,
833 const struct rte_flow_attr *attr,
834 struct rte_flow_error *error)
837 return rte_flow_error_set(error, EINVAL,
838 RTE_FLOW_ERROR_TYPE_ACTION, action,
839 "configuration cannot be null");
840 if (action_flags & MLX5_FLOW_ACTION_DROP)
841 return rte_flow_error_set(error, EINVAL,
842 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
843 "can't drop and encap in same flow");
844 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
845 return rte_flow_error_set(error, EINVAL,
846 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
847 "can only have a single encap"
848 " action in a flow");
849 /* encap without preceding decap is not supported for ingress */
850 if (!attr->transfer && attr->ingress &&
851 !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
852 return rte_flow_error_set(error, ENOTSUP,
853 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
855 "encap action not supported for "
861 * Validate the raw decap action.
863 * @param[in] action_flags
864 * Holds the actions detected until now.
866 * Pointer to the encap action.
868 * Pointer to flow attributes
870 * Pointer to error structure.
873 * 0 on success, a negative errno value otherwise and rte_errno is set.
876 flow_dv_validate_action_raw_decap(uint64_t action_flags,
877 const struct rte_flow_action *action,
878 const struct rte_flow_attr *attr,
879 struct rte_flow_error *error)
881 if (action_flags & MLX5_FLOW_ACTION_DROP)
882 return rte_flow_error_set(error, EINVAL,
883 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
884 "can't drop and decap in same flow");
885 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
886 return rte_flow_error_set(error, EINVAL,
887 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
888 "can't have encap action before"
890 if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
891 return rte_flow_error_set(error, EINVAL,
892 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
893 "can only have a single decap"
894 " action in a flow");
895 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
896 return rte_flow_error_set(error, EINVAL,
897 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
898 "can't have decap action after"
900 /* decap action is valid on egress only if it is followed by encap */
902 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
903 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
906 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
907 return rte_flow_error_set
909 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
910 NULL, "decap action not supported"
917 * Find existing encap/decap resource or create and register a new one.
919 * @param dev[in, out]
920 * Pointer to rte_eth_dev structure.
921 * @param[in, out] resource
922 * Pointer to encap/decap resource.
923 * @parm[in, out] dev_flow
924 * Pointer to the dev_flow.
926 * pointer to error structure.
929 * 0 on success otherwise -errno and errno is set.
932 flow_dv_encap_decap_resource_register
933 (struct rte_eth_dev *dev,
934 struct mlx5_flow_dv_encap_decap_resource *resource,
935 struct mlx5_flow *dev_flow,
936 struct rte_flow_error *error)
938 struct mlx5_priv *priv = dev->data->dev_private;
939 struct mlx5_ibv_shared *sh = priv->sh;
940 struct mlx5_flow_dv_encap_decap_resource *cache_resource;
941 struct rte_flow *flow = dev_flow->flow;
942 struct mlx5dv_dr_ns *ns;
944 resource->flags = flow->group ? 0 : 1;
945 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
947 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
952 /* Lookup a matching resource from cache. */
953 LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
954 if (resource->reformat_type == cache_resource->reformat_type &&
955 resource->ft_type == cache_resource->ft_type &&
956 resource->flags == cache_resource->flags &&
957 resource->size == cache_resource->size &&
958 !memcmp((const void *)resource->buf,
959 (const void *)cache_resource->buf,
961 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
962 (void *)cache_resource,
963 rte_atomic32_read(&cache_resource->refcnt));
964 rte_atomic32_inc(&cache_resource->refcnt);
965 dev_flow->dv.encap_decap = cache_resource;
969 /* Register new encap/decap resource. */
970 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
972 return rte_flow_error_set(error, ENOMEM,
973 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
974 "cannot allocate resource memory");
975 *cache_resource = *resource;
976 cache_resource->verbs_action =
977 mlx5_glue->dv_create_flow_action_packet_reformat
978 (sh->ctx, cache_resource->reformat_type,
979 cache_resource->ft_type, ns, cache_resource->flags,
980 cache_resource->size,
981 (cache_resource->size ? cache_resource->buf : NULL));
982 if (!cache_resource->verbs_action) {
983 rte_free(cache_resource);
984 return rte_flow_error_set(error, ENOMEM,
985 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
986 NULL, "cannot create action");
988 rte_atomic32_init(&cache_resource->refcnt);
989 rte_atomic32_inc(&cache_resource->refcnt);
990 LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
991 dev_flow->dv.encap_decap = cache_resource;
992 DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
993 (void *)cache_resource,
994 rte_atomic32_read(&cache_resource->refcnt));
999 * Find existing table jump resource or create and register a new one.
1001 * @param dev[in, out]
1002 * Pointer to rte_eth_dev structure.
1003 * @param[in, out] resource
1004 * Pointer to jump table resource.
1005 * @parm[in, out] dev_flow
1006 * Pointer to the dev_flow.
1008 * pointer to error structure.
1011 * 0 on success otherwise -errno and errno is set.
1014 flow_dv_jump_tbl_resource_register
1015 (struct rte_eth_dev *dev,
1016 struct mlx5_flow_dv_jump_tbl_resource *resource,
1017 struct mlx5_flow *dev_flow,
1018 struct rte_flow_error *error)
1020 struct mlx5_priv *priv = dev->data->dev_private;
1021 struct mlx5_ibv_shared *sh = priv->sh;
1022 struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
1024 /* Lookup a matching resource from cache. */
1025 LIST_FOREACH(cache_resource, &sh->jump_tbl, next) {
1026 if (resource->tbl == cache_resource->tbl) {
1027 DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++",
1028 (void *)cache_resource,
1029 rte_atomic32_read(&cache_resource->refcnt));
1030 rte_atomic32_inc(&cache_resource->refcnt);
1031 dev_flow->dv.jump = cache_resource;
1035 /* Register new jump table resource. */
1036 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1037 if (!cache_resource)
1038 return rte_flow_error_set(error, ENOMEM,
1039 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1040 "cannot allocate resource memory");
1041 *cache_resource = *resource;
1042 cache_resource->action =
1043 mlx5_glue->dr_create_flow_action_dest_flow_tbl
1044 (resource->tbl->obj);
1045 if (!cache_resource->action) {
1046 rte_free(cache_resource);
1047 return rte_flow_error_set(error, ENOMEM,
1048 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1049 NULL, "cannot create action");
1051 rte_atomic32_init(&cache_resource->refcnt);
1052 rte_atomic32_inc(&cache_resource->refcnt);
1053 LIST_INSERT_HEAD(&sh->jump_tbl, cache_resource, next);
1054 dev_flow->dv.jump = cache_resource;
1055 DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++",
1056 (void *)cache_resource,
1057 rte_atomic32_read(&cache_resource->refcnt));
1062 * Find existing table port ID resource or create and register a new one.
1064 * @param dev[in, out]
1065 * Pointer to rte_eth_dev structure.
1066 * @param[in, out] resource
1067 * Pointer to port ID action resource.
1068 * @parm[in, out] dev_flow
1069 * Pointer to the dev_flow.
1071 * pointer to error structure.
1074 * 0 on success otherwise -errno and errno is set.
1077 flow_dv_port_id_action_resource_register
1078 (struct rte_eth_dev *dev,
1079 struct mlx5_flow_dv_port_id_action_resource *resource,
1080 struct mlx5_flow *dev_flow,
1081 struct rte_flow_error *error)
1083 struct mlx5_priv *priv = dev->data->dev_private;
1084 struct mlx5_ibv_shared *sh = priv->sh;
1085 struct mlx5_flow_dv_port_id_action_resource *cache_resource;
1087 /* Lookup a matching resource from cache. */
1088 LIST_FOREACH(cache_resource, &sh->port_id_action_list, next) {
1089 if (resource->port_id == cache_resource->port_id) {
1090 DRV_LOG(DEBUG, "port id action resource resource %p: "
1092 (void *)cache_resource,
1093 rte_atomic32_read(&cache_resource->refcnt));
1094 rte_atomic32_inc(&cache_resource->refcnt);
1095 dev_flow->dv.port_id_action = cache_resource;
1099 /* Register new port id action resource. */
1100 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1101 if (!cache_resource)
1102 return rte_flow_error_set(error, ENOMEM,
1103 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1104 "cannot allocate resource memory");
1105 *cache_resource = *resource;
1106 cache_resource->action =
1107 mlx5_glue->dr_create_flow_action_dest_vport(priv->sh->fdb_ns,
1109 if (!cache_resource->action) {
1110 rte_free(cache_resource);
1111 return rte_flow_error_set(error, ENOMEM,
1112 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1113 NULL, "cannot create action");
1115 rte_atomic32_init(&cache_resource->refcnt);
1116 rte_atomic32_inc(&cache_resource->refcnt);
1117 LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
1118 dev_flow->dv.port_id_action = cache_resource;
1119 DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
1120 (void *)cache_resource,
1121 rte_atomic32_read(&cache_resource->refcnt));
1126 * Get the size of specific rte_flow_item_type
1128 * @param[in] item_type
1129 * Tested rte_flow_item_type.
1132 * sizeof struct item_type, 0 if void or irrelevant.
1135 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
1139 switch (item_type) {
1140 case RTE_FLOW_ITEM_TYPE_ETH:
1141 retval = sizeof(struct rte_flow_item_eth);
1143 case RTE_FLOW_ITEM_TYPE_VLAN:
1144 retval = sizeof(struct rte_flow_item_vlan);
1146 case RTE_FLOW_ITEM_TYPE_IPV4:
1147 retval = sizeof(struct rte_flow_item_ipv4);
1149 case RTE_FLOW_ITEM_TYPE_IPV6:
1150 retval = sizeof(struct rte_flow_item_ipv6);
1152 case RTE_FLOW_ITEM_TYPE_UDP:
1153 retval = sizeof(struct rte_flow_item_udp);
1155 case RTE_FLOW_ITEM_TYPE_TCP:
1156 retval = sizeof(struct rte_flow_item_tcp);
1158 case RTE_FLOW_ITEM_TYPE_VXLAN:
1159 retval = sizeof(struct rte_flow_item_vxlan);
1161 case RTE_FLOW_ITEM_TYPE_GRE:
1162 retval = sizeof(struct rte_flow_item_gre);
1164 case RTE_FLOW_ITEM_TYPE_NVGRE:
1165 retval = sizeof(struct rte_flow_item_nvgre);
1167 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1168 retval = sizeof(struct rte_flow_item_vxlan_gpe);
1170 case RTE_FLOW_ITEM_TYPE_MPLS:
1171 retval = sizeof(struct rte_flow_item_mpls);
1173 case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
1181 #define MLX5_ENCAP_IPV4_VERSION 0x40
1182 #define MLX5_ENCAP_IPV4_IHL_MIN 0x05
1183 #define MLX5_ENCAP_IPV4_TTL_DEF 0x40
1184 #define MLX5_ENCAP_IPV6_VTC_FLOW 0x60000000
1185 #define MLX5_ENCAP_IPV6_HOP_LIMIT 0xff
1186 #define MLX5_ENCAP_VXLAN_FLAGS 0x08000000
1187 #define MLX5_ENCAP_VXLAN_GPE_FLAGS 0x04
1190 * Convert the encap action data from list of rte_flow_item to raw buffer
1193 * Pointer to rte_flow_item objects list.
1195 * Pointer to the output buffer.
1197 * Pointer to the output buffer size.
1199 * Pointer to the error structure.
1202 * 0 on success, a negative errno value otherwise and rte_errno is set.
1205 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
1206 size_t *size, struct rte_flow_error *error)
1208 struct ether_hdr *eth = NULL;
1209 struct vlan_hdr *vlan = NULL;
1210 struct ipv4_hdr *ipv4 = NULL;
1211 struct ipv6_hdr *ipv6 = NULL;
1212 struct udp_hdr *udp = NULL;
1213 struct vxlan_hdr *vxlan = NULL;
1214 struct vxlan_gpe_hdr *vxlan_gpe = NULL;
1215 struct gre_hdr *gre = NULL;
1217 size_t temp_size = 0;
1220 return rte_flow_error_set(error, EINVAL,
1221 RTE_FLOW_ERROR_TYPE_ACTION,
1222 NULL, "invalid empty data");
1223 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1224 len = flow_dv_get_item_len(items->type);
1225 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
1226 return rte_flow_error_set(error, EINVAL,
1227 RTE_FLOW_ERROR_TYPE_ACTION,
1228 (void *)items->type,
1229 "items total size is too big"
1230 " for encap action");
1231 rte_memcpy((void *)&buf[temp_size], items->spec, len);
1232 switch (items->type) {
1233 case RTE_FLOW_ITEM_TYPE_ETH:
1234 eth = (struct ether_hdr *)&buf[temp_size];
1236 case RTE_FLOW_ITEM_TYPE_VLAN:
1237 vlan = (struct vlan_hdr *)&buf[temp_size];
1239 return rte_flow_error_set(error, EINVAL,
1240 RTE_FLOW_ERROR_TYPE_ACTION,
1241 (void *)items->type,
1242 "eth header not found");
1243 if (!eth->ether_type)
1244 eth->ether_type = RTE_BE16(ETHER_TYPE_VLAN);
1246 case RTE_FLOW_ITEM_TYPE_IPV4:
1247 ipv4 = (struct ipv4_hdr *)&buf[temp_size];
1249 return rte_flow_error_set(error, EINVAL,
1250 RTE_FLOW_ERROR_TYPE_ACTION,
1251 (void *)items->type,
1252 "neither eth nor vlan"
1254 if (vlan && !vlan->eth_proto)
1255 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv4);
1256 else if (eth && !eth->ether_type)
1257 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv4);
1258 if (!ipv4->version_ihl)
1259 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
1260 MLX5_ENCAP_IPV4_IHL_MIN;
1261 if (!ipv4->time_to_live)
1262 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
1264 case RTE_FLOW_ITEM_TYPE_IPV6:
1265 ipv6 = (struct ipv6_hdr *)&buf[temp_size];
1267 return rte_flow_error_set(error, EINVAL,
1268 RTE_FLOW_ERROR_TYPE_ACTION,
1269 (void *)items->type,
1270 "neither eth nor vlan"
1272 if (vlan && !vlan->eth_proto)
1273 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv6);
1274 else if (eth && !eth->ether_type)
1275 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv6);
1276 if (!ipv6->vtc_flow)
1278 RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
1279 if (!ipv6->hop_limits)
1280 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
1282 case RTE_FLOW_ITEM_TYPE_UDP:
1283 udp = (struct udp_hdr *)&buf[temp_size];
1285 return rte_flow_error_set(error, EINVAL,
1286 RTE_FLOW_ERROR_TYPE_ACTION,
1287 (void *)items->type,
1288 "ip header not found");
1289 if (ipv4 && !ipv4->next_proto_id)
1290 ipv4->next_proto_id = IPPROTO_UDP;
1291 else if (ipv6 && !ipv6->proto)
1292 ipv6->proto = IPPROTO_UDP;
1294 case RTE_FLOW_ITEM_TYPE_VXLAN:
1295 vxlan = (struct vxlan_hdr *)&buf[temp_size];
1297 return rte_flow_error_set(error, EINVAL,
1298 RTE_FLOW_ERROR_TYPE_ACTION,
1299 (void *)items->type,
1300 "udp header not found");
1302 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
1303 if (!vxlan->vx_flags)
1305 RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
1307 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1308 vxlan_gpe = (struct vxlan_gpe_hdr *)&buf[temp_size];
1310 return rte_flow_error_set(error, EINVAL,
1311 RTE_FLOW_ERROR_TYPE_ACTION,
1312 (void *)items->type,
1313 "udp header not found");
1314 if (!vxlan_gpe->proto)
1315 return rte_flow_error_set(error, EINVAL,
1316 RTE_FLOW_ERROR_TYPE_ACTION,
1317 (void *)items->type,
1318 "next protocol not found");
1321 RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
1322 if (!vxlan_gpe->vx_flags)
1323 vxlan_gpe->vx_flags =
1324 MLX5_ENCAP_VXLAN_GPE_FLAGS;
1326 case RTE_FLOW_ITEM_TYPE_GRE:
1327 case RTE_FLOW_ITEM_TYPE_NVGRE:
1328 gre = (struct gre_hdr *)&buf[temp_size];
1330 return rte_flow_error_set(error, EINVAL,
1331 RTE_FLOW_ERROR_TYPE_ACTION,
1332 (void *)items->type,
1333 "next protocol not found");
1335 return rte_flow_error_set(error, EINVAL,
1336 RTE_FLOW_ERROR_TYPE_ACTION,
1337 (void *)items->type,
1338 "ip header not found");
1339 if (ipv4 && !ipv4->next_proto_id)
1340 ipv4->next_proto_id = IPPROTO_GRE;
1341 else if (ipv6 && !ipv6->proto)
1342 ipv6->proto = IPPROTO_GRE;
1344 case RTE_FLOW_ITEM_TYPE_VOID:
1347 return rte_flow_error_set(error, EINVAL,
1348 RTE_FLOW_ERROR_TYPE_ACTION,
1349 (void *)items->type,
1350 "unsupported item type");
1360 * Convert L2 encap action to DV specification.
1363 * Pointer to rte_eth_dev structure.
1365 * Pointer to action structure.
1366 * @param[in, out] dev_flow
1367 * Pointer to the mlx5_flow.
1368 * @param[in] transfer
1369 * Mark if the flow is E-Switch flow.
1371 * Pointer to the error structure.
1374 * 0 on success, a negative errno value otherwise and rte_errno is set.
1377 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
1378 const struct rte_flow_action *action,
1379 struct mlx5_flow *dev_flow,
1381 struct rte_flow_error *error)
1383 const struct rte_flow_item *encap_data;
1384 const struct rte_flow_action_raw_encap *raw_encap_data;
1385 struct mlx5_flow_dv_encap_decap_resource res = {
1387 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
1388 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
1389 MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
1392 if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
1394 (const struct rte_flow_action_raw_encap *)action->conf;
1395 res.size = raw_encap_data->size;
1396 memcpy(res.buf, raw_encap_data->data, res.size);
1398 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
1400 ((const struct rte_flow_action_vxlan_encap *)
1401 action->conf)->definition;
1404 ((const struct rte_flow_action_nvgre_encap *)
1405 action->conf)->definition;
1406 if (flow_dv_convert_encap_data(encap_data, res.buf,
1410 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1411 return rte_flow_error_set(error, EINVAL,
1412 RTE_FLOW_ERROR_TYPE_ACTION,
1413 NULL, "can't create L2 encap action");
1418 * Convert L2 decap action to DV specification.
1421 * Pointer to rte_eth_dev structure.
1422 * @param[in, out] dev_flow
1423 * Pointer to the mlx5_flow.
1424 * @param[in] transfer
1425 * Mark if the flow is E-Switch flow.
1427 * Pointer to the error structure.
1430 * 0 on success, a negative errno value otherwise and rte_errno is set.
1433 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
1434 struct mlx5_flow *dev_flow,
1436 struct rte_flow_error *error)
1438 struct mlx5_flow_dv_encap_decap_resource res = {
1441 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
1442 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
1443 MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
1446 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1447 return rte_flow_error_set(error, EINVAL,
1448 RTE_FLOW_ERROR_TYPE_ACTION,
1449 NULL, "can't create L2 decap action");
1454 * Convert raw decap/encap (L3 tunnel) action to DV specification.
1457 * Pointer to rte_eth_dev structure.
1459 * Pointer to action structure.
1460 * @param[in, out] dev_flow
1461 * Pointer to the mlx5_flow.
1463 * Pointer to the flow attributes.
1465 * Pointer to the error structure.
1468 * 0 on success, a negative errno value otherwise and rte_errno is set.
1471 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
1472 const struct rte_flow_action *action,
1473 struct mlx5_flow *dev_flow,
1474 const struct rte_flow_attr *attr,
1475 struct rte_flow_error *error)
1477 const struct rte_flow_action_raw_encap *encap_data;
1478 struct mlx5_flow_dv_encap_decap_resource res;
1480 encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
1481 res.size = encap_data->size;
1482 memcpy(res.buf, encap_data->data, res.size);
1483 res.reformat_type = attr->egress ?
1484 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
1485 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
1487 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
1489 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
1490 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
1491 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1492 return rte_flow_error_set(error, EINVAL,
1493 RTE_FLOW_ERROR_TYPE_ACTION,
1494 NULL, "can't create encap action");
1499 * Validate the modify-header actions.
1501 * @param[in] action_flags
1502 * Holds the actions detected until now.
1504 * Pointer to the modify action.
1506 * Pointer to error structure.
1509 * 0 on success, a negative errno value otherwise and rte_errno is set.
1512 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
1513 const struct rte_flow_action *action,
1514 struct rte_flow_error *error)
1516 if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
1517 return rte_flow_error_set(error, EINVAL,
1518 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1519 NULL, "action configuration not set");
1520 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1521 return rte_flow_error_set(error, EINVAL,
1522 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1523 "can't have encap action before"
1529 * Validate the modify-header MAC address actions.
1531 * @param[in] action_flags
1532 * Holds the actions detected until now.
1534 * Pointer to the modify action.
1535 * @param[in] item_flags
1536 * Holds the items detected.
1538 * Pointer to error structure.
1541 * 0 on success, a negative errno value otherwise and rte_errno is set.
1544 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
1545 const struct rte_flow_action *action,
1546 const uint64_t item_flags,
1547 struct rte_flow_error *error)
1551 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1553 if (!(item_flags & MLX5_FLOW_LAYER_L2))
1554 return rte_flow_error_set(error, EINVAL,
1555 RTE_FLOW_ERROR_TYPE_ACTION,
1557 "no L2 item in pattern");
1563 * Validate the modify-header IPv4 address actions.
1565 * @param[in] action_flags
1566 * Holds the actions detected until now.
1568 * Pointer to the modify action.
1569 * @param[in] item_flags
1570 * Holds the items detected.
1572 * Pointer to error structure.
1575 * 0 on success, a negative errno value otherwise and rte_errno is set.
1578 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
1579 const struct rte_flow_action *action,
1580 const uint64_t item_flags,
1581 struct rte_flow_error *error)
1585 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1587 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
1588 return rte_flow_error_set(error, EINVAL,
1589 RTE_FLOW_ERROR_TYPE_ACTION,
1591 "no ipv4 item in pattern");
1597 * Validate the modify-header IPv6 address actions.
1599 * @param[in] action_flags
1600 * Holds the actions detected until now.
1602 * Pointer to the modify action.
1603 * @param[in] item_flags
1604 * Holds the items detected.
1606 * Pointer to error structure.
1609 * 0 on success, a negative errno value otherwise and rte_errno is set.
1612 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
1613 const struct rte_flow_action *action,
1614 const uint64_t item_flags,
1615 struct rte_flow_error *error)
1619 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1621 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
1622 return rte_flow_error_set(error, EINVAL,
1623 RTE_FLOW_ERROR_TYPE_ACTION,
1625 "no ipv6 item in pattern");
1631 * Validate the modify-header TP actions.
1633 * @param[in] action_flags
1634 * Holds the actions detected until now.
1636 * Pointer to the modify action.
1637 * @param[in] item_flags
1638 * Holds the items detected.
1640 * Pointer to error structure.
1643 * 0 on success, a negative errno value otherwise and rte_errno is set.
1646 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
1647 const struct rte_flow_action *action,
1648 const uint64_t item_flags,
1649 struct rte_flow_error *error)
1653 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1655 if (!(item_flags & MLX5_FLOW_LAYER_L4))
1656 return rte_flow_error_set(error, EINVAL,
1657 RTE_FLOW_ERROR_TYPE_ACTION,
1658 NULL, "no transport layer "
1665 * Validate the modify-header TTL actions.
1667 * @param[in] action_flags
1668 * Holds the actions detected until now.
1670 * Pointer to the modify action.
1671 * @param[in] item_flags
1672 * Holds the items detected.
1674 * Pointer to error structure.
1677 * 0 on success, a negative errno value otherwise and rte_errno is set.
1680 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
1681 const struct rte_flow_action *action,
1682 const uint64_t item_flags,
1683 struct rte_flow_error *error)
1687 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1689 if (!(item_flags & MLX5_FLOW_LAYER_L3))
1690 return rte_flow_error_set(error, EINVAL,
1691 RTE_FLOW_ERROR_TYPE_ACTION,
1693 "no IP protocol in pattern");
1699 * Validate jump action.
1702 * Pointer to the modify action.
1704 * The group of the current flow.
1706 * Pointer to error structure.
1709 * 0 on success, a negative errno value otherwise and rte_errno is set.
1712 flow_dv_validate_action_jump(const struct rte_flow_action *action,
1714 struct rte_flow_error *error)
1716 if (action->type != RTE_FLOW_ACTION_TYPE_JUMP && !action->conf)
1717 return rte_flow_error_set(error, EINVAL,
1718 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1719 NULL, "action configuration not set");
1720 if (group >= ((const struct rte_flow_action_jump *)action->conf)->group)
1721 return rte_flow_error_set(error, EINVAL,
1722 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1723 "target group must be higher then"
1724 " the current flow group");
1729 * Validate the port_id action.
1732 * Pointer to rte_eth_dev structure.
1733 * @param[in] action_flags
1734 * Bit-fields that holds the actions detected until now.
1736 * Port_id RTE action structure.
1738 * Attributes of flow that includes this action.
1740 * Pointer to error structure.
1743 * 0 on success, a negative errno value otherwise and rte_errno is set.
1746 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
1747 uint64_t action_flags,
1748 const struct rte_flow_action *action,
1749 const struct rte_flow_attr *attr,
1750 struct rte_flow_error *error)
1752 const struct rte_flow_action_port_id *port_id;
1754 uint16_t esw_domain_id;
1755 uint16_t act_port_domain_id;
1758 if (!attr->transfer)
1759 return rte_flow_error_set(error, ENOTSUP,
1760 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1762 "port id action is valid in transfer"
1764 if (!action || !action->conf)
1765 return rte_flow_error_set(error, ENOTSUP,
1766 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1768 "port id action parameters must be"
1770 if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
1771 MLX5_FLOW_FATE_ESWITCH_ACTIONS))
1772 return rte_flow_error_set(error, EINVAL,
1773 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1774 "can have only one fate actions in"
1776 ret = mlx5_port_to_eswitch_info(dev->data->port_id,
1777 &esw_domain_id, NULL);
1779 return rte_flow_error_set(error, -ret,
1780 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1782 "failed to obtain E-Switch info");
1783 port_id = action->conf;
1784 port = port_id->original ? dev->data->port_id : port_id->id;
1785 ret = mlx5_port_to_eswitch_info(port, &act_port_domain_id, NULL);
1787 return rte_flow_error_set
1789 RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
1790 "failed to obtain E-Switch port id for port");
1791 if (act_port_domain_id != esw_domain_id)
1792 return rte_flow_error_set
1794 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1795 "port does not belong to"
1796 " E-Switch being configured");
1801 * Find existing modify-header resource or create and register a new one.
1803 * @param dev[in, out]
1804 * Pointer to rte_eth_dev structure.
1805 * @param[in, out] resource
1806 * Pointer to modify-header resource.
1807 * @parm[in, out] dev_flow
1808 * Pointer to the dev_flow.
1810 * pointer to error structure.
1813 * 0 on success otherwise -errno and errno is set.
1816 flow_dv_modify_hdr_resource_register
1817 (struct rte_eth_dev *dev,
1818 struct mlx5_flow_dv_modify_hdr_resource *resource,
1819 struct mlx5_flow *dev_flow,
1820 struct rte_flow_error *error)
1822 struct mlx5_priv *priv = dev->data->dev_private;
1823 struct mlx5_ibv_shared *sh = priv->sh;
1824 struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
1825 struct mlx5dv_dr_ns *ns;
1827 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
1829 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
1833 /* Lookup a matching resource from cache. */
1834 LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
1835 if (resource->ft_type == cache_resource->ft_type &&
1836 resource->actions_num == cache_resource->actions_num &&
1837 !memcmp((const void *)resource->actions,
1838 (const void *)cache_resource->actions,
1839 (resource->actions_num *
1840 sizeof(resource->actions[0])))) {
1841 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
1842 (void *)cache_resource,
1843 rte_atomic32_read(&cache_resource->refcnt));
1844 rte_atomic32_inc(&cache_resource->refcnt);
1845 dev_flow->dv.modify_hdr = cache_resource;
1849 /* Register new modify-header resource. */
1850 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1851 if (!cache_resource)
1852 return rte_flow_error_set(error, ENOMEM,
1853 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1854 "cannot allocate resource memory");
1855 *cache_resource = *resource;
1856 cache_resource->verbs_action =
1857 mlx5_glue->dv_create_flow_action_modify_header
1858 (sh->ctx, cache_resource->ft_type,
1860 cache_resource->actions_num *
1861 sizeof(cache_resource->actions[0]),
1862 (uint64_t *)cache_resource->actions);
1863 if (!cache_resource->verbs_action) {
1864 rte_free(cache_resource);
1865 return rte_flow_error_set(error, ENOMEM,
1866 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1867 NULL, "cannot create action");
1869 rte_atomic32_init(&cache_resource->refcnt);
1870 rte_atomic32_inc(&cache_resource->refcnt);
1871 LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
1872 dev_flow->dv.modify_hdr = cache_resource;
1873 DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
1874 (void *)cache_resource,
1875 rte_atomic32_read(&cache_resource->refcnt));
1880 * Get or create a flow counter.
1883 * Pointer to the Ethernet device structure.
1885 * Indicate if this counter is shared with other flows.
1887 * Counter identifier.
1890 * pointer to flow counter on success, NULL otherwise and rte_errno is set.
1892 static struct mlx5_flow_counter *
1893 flow_dv_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
1895 struct mlx5_priv *priv = dev->data->dev_private;
1896 struct mlx5_flow_counter *cnt = NULL;
1897 struct mlx5_devx_counter_set *dcs = NULL;
1900 if (!priv->config.devx) {
1905 LIST_FOREACH(cnt, &priv->flow_counters, next) {
1906 if (cnt->shared && cnt->id == id) {
1912 cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
1913 dcs = rte_calloc(__func__, 1, sizeof(*dcs), 0);
1918 ret = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, dcs);
1921 struct mlx5_flow_counter tmpl = {
1927 tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
1933 LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
1943 * Release a flow counter.
1945 * @param[in] counter
1946 * Pointer to the counter handler.
1949 flow_dv_counter_release(struct mlx5_flow_counter *counter)
1955 if (--counter->ref_cnt == 0) {
1956 ret = mlx5_devx_cmd_flow_counter_free(counter->dcs->obj);
1958 DRV_LOG(ERR, "Failed to free devx counters, %d", ret);
1959 LIST_REMOVE(counter, next);
1960 rte_free(counter->dcs);
1966 * Verify the @p attributes will be correctly understood by the NIC and store
1967 * them in the @p flow if everything is correct.
1970 * Pointer to dev struct.
1971 * @param[in] attributes
1972 * Pointer to flow attributes
1974 * Pointer to error structure.
1977 * 0 on success, a negative errno value otherwise and rte_errno is set.
1980 flow_dv_validate_attributes(struct rte_eth_dev *dev,
1981 const struct rte_flow_attr *attributes,
1982 struct rte_flow_error *error)
1984 struct mlx5_priv *priv = dev->data->dev_private;
1985 uint32_t priority_max = priv->config.flow_prio - 1;
1987 #ifndef HAVE_MLX5DV_DR
1988 if (attributes->group)
1989 return rte_flow_error_set(error, ENOTSUP,
1990 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1992 "groups is not supported");
1994 if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
1995 attributes->priority >= priority_max)
1996 return rte_flow_error_set(error, ENOTSUP,
1997 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1999 "priority out of range");
2000 if (attributes->transfer) {
2001 if (!priv->config.dv_esw_en)
2002 return rte_flow_error_set
2004 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2005 "E-Switch dr is not supported");
2006 if (!(priv->representor || priv->master))
2007 return rte_flow_error_set
2008 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2009 NULL, "E-Switch configurationd can only be"
2010 " done by a master or a representor device");
2011 if (attributes->egress)
2012 return rte_flow_error_set
2014 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
2015 "egress is not supported");
2016 if (attributes->group >= MLX5_MAX_TABLES_FDB)
2017 return rte_flow_error_set
2019 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2020 NULL, "group must be smaller than "
2021 RTE_STR(MLX5_MAX_FDB_TABLES));
2023 if (!(attributes->egress ^ attributes->ingress))
2024 return rte_flow_error_set(error, ENOTSUP,
2025 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
2026 "must specify exactly one of "
2027 "ingress or egress");
2032 * Internal validation function. For validating both actions and items.
2035 * Pointer to the rte_eth_dev structure.
2037 * Pointer to the flow attributes.
2039 * Pointer to the list of items.
2040 * @param[in] actions
2041 * Pointer to the list of actions.
2043 * Pointer to the error structure.
2046 * 0 on success, a negative errno value otherwise and rte_errno is set.
2049 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
2050 const struct rte_flow_item items[],
2051 const struct rte_flow_action actions[],
2052 struct rte_flow_error *error)
2055 uint64_t action_flags = 0;
2056 uint64_t item_flags = 0;
2057 uint64_t last_item = 0;
2058 uint8_t next_protocol = 0xff;
2063 ret = flow_dv_validate_attributes(dev, attr, error);
2066 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2067 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2068 switch (items->type) {
2069 case RTE_FLOW_ITEM_TYPE_VOID:
2071 case RTE_FLOW_ITEM_TYPE_PORT_ID:
2072 ret = flow_dv_validate_item_port_id
2073 (dev, items, attr, item_flags, error);
2076 last_item |= MLX5_FLOW_ITEM_PORT_ID;
2078 case RTE_FLOW_ITEM_TYPE_ETH:
2079 ret = mlx5_flow_validate_item_eth(items, item_flags,
2083 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
2084 MLX5_FLOW_LAYER_OUTER_L2;
2086 case RTE_FLOW_ITEM_TYPE_VLAN:
2087 ret = mlx5_flow_validate_item_vlan(items, item_flags,
2091 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2092 MLX5_FLOW_LAYER_OUTER_VLAN;
2094 case RTE_FLOW_ITEM_TYPE_IPV4:
2095 ret = mlx5_flow_validate_item_ipv4(items, item_flags,
2099 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
2100 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
2101 if (items->mask != NULL &&
2102 ((const struct rte_flow_item_ipv4 *)
2103 items->mask)->hdr.next_proto_id) {
2105 ((const struct rte_flow_item_ipv4 *)
2106 (items->spec))->hdr.next_proto_id;
2108 ((const struct rte_flow_item_ipv4 *)
2109 (items->mask))->hdr.next_proto_id;
2111 /* Reset for inner layer. */
2112 next_protocol = 0xff;
2115 case RTE_FLOW_ITEM_TYPE_IPV6:
2116 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
2120 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
2121 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
2122 if (items->mask != NULL &&
2123 ((const struct rte_flow_item_ipv6 *)
2124 items->mask)->hdr.proto) {
2126 ((const struct rte_flow_item_ipv6 *)
2127 items->spec)->hdr.proto;
2129 ((const struct rte_flow_item_ipv6 *)
2130 items->mask)->hdr.proto;
2132 /* Reset for inner layer. */
2133 next_protocol = 0xff;
2136 case RTE_FLOW_ITEM_TYPE_TCP:
2137 ret = mlx5_flow_validate_item_tcp
2140 &rte_flow_item_tcp_mask,
2144 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
2145 MLX5_FLOW_LAYER_OUTER_L4_TCP;
2147 case RTE_FLOW_ITEM_TYPE_UDP:
2148 ret = mlx5_flow_validate_item_udp(items, item_flags,
2153 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
2154 MLX5_FLOW_LAYER_OUTER_L4_UDP;
2156 case RTE_FLOW_ITEM_TYPE_GRE:
2157 case RTE_FLOW_ITEM_TYPE_NVGRE:
2158 ret = mlx5_flow_validate_item_gre(items, item_flags,
2159 next_protocol, error);
2162 last_item = MLX5_FLOW_LAYER_GRE;
2164 case RTE_FLOW_ITEM_TYPE_VXLAN:
2165 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
2169 last_item = MLX5_FLOW_LAYER_VXLAN;
2171 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
2172 ret = mlx5_flow_validate_item_vxlan_gpe(items,
2177 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
2179 case RTE_FLOW_ITEM_TYPE_MPLS:
2180 ret = mlx5_flow_validate_item_mpls(dev, items,
2185 last_item = MLX5_FLOW_LAYER_MPLS;
2187 case RTE_FLOW_ITEM_TYPE_META:
2188 ret = flow_dv_validate_item_meta(dev, items, attr,
2192 last_item = MLX5_FLOW_ITEM_METADATA;
2195 return rte_flow_error_set(error, ENOTSUP,
2196 RTE_FLOW_ERROR_TYPE_ITEM,
2197 NULL, "item not supported");
2199 item_flags |= last_item;
2201 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2202 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
2203 return rte_flow_error_set(error, ENOTSUP,
2204 RTE_FLOW_ERROR_TYPE_ACTION,
2205 actions, "too many actions");
2206 switch (actions->type) {
2207 case RTE_FLOW_ACTION_TYPE_VOID:
2209 case RTE_FLOW_ACTION_TYPE_PORT_ID:
2210 ret = flow_dv_validate_action_port_id(dev,
2217 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
2220 case RTE_FLOW_ACTION_TYPE_FLAG:
2221 ret = mlx5_flow_validate_action_flag(action_flags,
2225 action_flags |= MLX5_FLOW_ACTION_FLAG;
2228 case RTE_FLOW_ACTION_TYPE_MARK:
2229 ret = mlx5_flow_validate_action_mark(actions,
2234 action_flags |= MLX5_FLOW_ACTION_MARK;
2237 case RTE_FLOW_ACTION_TYPE_DROP:
2238 ret = mlx5_flow_validate_action_drop(action_flags,
2242 action_flags |= MLX5_FLOW_ACTION_DROP;
2245 case RTE_FLOW_ACTION_TYPE_QUEUE:
2246 ret = mlx5_flow_validate_action_queue(actions,
2251 action_flags |= MLX5_FLOW_ACTION_QUEUE;
2254 case RTE_FLOW_ACTION_TYPE_RSS:
2255 ret = mlx5_flow_validate_action_rss(actions,
2261 action_flags |= MLX5_FLOW_ACTION_RSS;
2264 case RTE_FLOW_ACTION_TYPE_COUNT:
2265 ret = flow_dv_validate_action_count(dev, error);
2268 action_flags |= MLX5_FLOW_ACTION_COUNT;
2271 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2272 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
2273 ret = flow_dv_validate_action_l2_encap(action_flags,
2278 action_flags |= actions->type ==
2279 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
2280 MLX5_FLOW_ACTION_VXLAN_ENCAP :
2281 MLX5_FLOW_ACTION_NVGRE_ENCAP;
2284 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2285 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
2286 ret = flow_dv_validate_action_l2_decap(action_flags,
2290 action_flags |= actions->type ==
2291 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
2292 MLX5_FLOW_ACTION_VXLAN_DECAP :
2293 MLX5_FLOW_ACTION_NVGRE_DECAP;
2296 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
2297 ret = flow_dv_validate_action_raw_encap(action_flags,
2302 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
2305 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
2306 ret = flow_dv_validate_action_raw_decap(action_flags,
2311 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
2314 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
2315 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
2316 ret = flow_dv_validate_action_modify_mac(action_flags,
2322 /* Count all modify-header actions as one action. */
2323 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2325 action_flags |= actions->type ==
2326 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
2327 MLX5_FLOW_ACTION_SET_MAC_SRC :
2328 MLX5_FLOW_ACTION_SET_MAC_DST;
2331 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
2332 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
2333 ret = flow_dv_validate_action_modify_ipv4(action_flags,
2339 /* Count all modify-header actions as one action. */
2340 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2342 action_flags |= actions->type ==
2343 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
2344 MLX5_FLOW_ACTION_SET_IPV4_SRC :
2345 MLX5_FLOW_ACTION_SET_IPV4_DST;
2347 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
2348 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
2349 ret = flow_dv_validate_action_modify_ipv6(action_flags,
2355 /* Count all modify-header actions as one action. */
2356 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2358 action_flags |= actions->type ==
2359 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
2360 MLX5_FLOW_ACTION_SET_IPV6_SRC :
2361 MLX5_FLOW_ACTION_SET_IPV6_DST;
2363 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
2364 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
2365 ret = flow_dv_validate_action_modify_tp(action_flags,
2371 /* Count all modify-header actions as one action. */
2372 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2374 action_flags |= actions->type ==
2375 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
2376 MLX5_FLOW_ACTION_SET_TP_SRC :
2377 MLX5_FLOW_ACTION_SET_TP_DST;
2379 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
2380 case RTE_FLOW_ACTION_TYPE_SET_TTL:
2381 ret = flow_dv_validate_action_modify_ttl(action_flags,
2387 /* Count all modify-header actions as one action. */
2388 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2390 action_flags |= actions->type ==
2391 RTE_FLOW_ACTION_TYPE_SET_TTL ?
2392 MLX5_FLOW_ACTION_SET_TTL :
2393 MLX5_FLOW_ACTION_DEC_TTL;
2395 case RTE_FLOW_ACTION_TYPE_JUMP:
2396 ret = flow_dv_validate_action_jump(actions,
2397 attr->group, error);
2401 action_flags |= MLX5_FLOW_ACTION_JUMP;
2404 return rte_flow_error_set(error, ENOTSUP,
2405 RTE_FLOW_ERROR_TYPE_ACTION,
2407 "action not supported");
2410 /* Eswitch has few restrictions on using items and actions */
2411 if (attr->transfer) {
2412 if (action_flags & MLX5_FLOW_ACTION_FLAG)
2413 return rte_flow_error_set(error, ENOTSUP,
2414 RTE_FLOW_ERROR_TYPE_ACTION,
2416 "unsupported action FLAG");
2417 if (action_flags & MLX5_FLOW_ACTION_MARK)
2418 return rte_flow_error_set(error, ENOTSUP,
2419 RTE_FLOW_ERROR_TYPE_ACTION,
2421 "unsupported action MARK");
2422 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
2423 return rte_flow_error_set(error, ENOTSUP,
2424 RTE_FLOW_ERROR_TYPE_ACTION,
2426 "unsupported action QUEUE");
2427 if (action_flags & MLX5_FLOW_ACTION_RSS)
2428 return rte_flow_error_set(error, ENOTSUP,
2429 RTE_FLOW_ERROR_TYPE_ACTION,
2431 "unsupported action RSS");
2432 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
2433 return rte_flow_error_set(error, EINVAL,
2434 RTE_FLOW_ERROR_TYPE_ACTION,
2436 "no fate action is found");
2438 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
2439 return rte_flow_error_set(error, EINVAL,
2440 RTE_FLOW_ERROR_TYPE_ACTION,
2442 "no fate action is found");
2448 * Internal preparation function. Allocates the DV flow size,
2449 * this size is constant.
2452 * Pointer to the flow attributes.
2454 * Pointer to the list of items.
2455 * @param[in] actions
2456 * Pointer to the list of actions.
2458 * Pointer to the error structure.
2461 * Pointer to mlx5_flow object on success,
2462 * otherwise NULL and rte_errno is set.
2464 static struct mlx5_flow *
2465 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
2466 const struct rte_flow_item items[] __rte_unused,
2467 const struct rte_flow_action actions[] __rte_unused,
2468 struct rte_flow_error *error)
2470 uint32_t size = sizeof(struct mlx5_flow);
2471 struct mlx5_flow *flow;
2473 flow = rte_calloc(__func__, 1, size, 0);
2475 rte_flow_error_set(error, ENOMEM,
2476 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2477 "not enough memory to create flow");
2480 flow->dv.value.size = MLX5_ST_SZ_DB(fte_match_param);
2486 * Sanity check for match mask and value. Similar to check_valid_spec() in
2487 * kernel driver. If unmasked bit is present in value, it returns failure.
2490 * pointer to match mask buffer.
2491 * @param match_value
2492 * pointer to match value buffer.
2495 * 0 if valid, -EINVAL otherwise.
2498 flow_dv_check_valid_spec(void *match_mask, void *match_value)
2500 uint8_t *m = match_mask;
2501 uint8_t *v = match_value;
2504 for (i = 0; i < MLX5_ST_SZ_DB(fte_match_param); ++i) {
2507 "match_value differs from match_criteria"
2508 " %p[%u] != %p[%u]",
2509 match_value, i, match_mask, i);
2518 * Add Ethernet item to matcher and to the value.
2520 * @param[in, out] matcher
2522 * @param[in, out] key
2523 * Flow matcher value.
2525 * Flow pattern to translate.
2527 * Item is inner pattern.
2530 flow_dv_translate_item_eth(void *matcher, void *key,
2531 const struct rte_flow_item *item, int inner)
2533 const struct rte_flow_item_eth *eth_m = item->mask;
2534 const struct rte_flow_item_eth *eth_v = item->spec;
2535 const struct rte_flow_item_eth nic_mask = {
2536 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2537 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2538 .type = RTE_BE16(0xffff),
2550 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2552 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2554 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2556 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2558 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
2559 ð_m->dst, sizeof(eth_m->dst));
2560 /* The value must be in the range of the mask. */
2561 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
2562 for (i = 0; i < sizeof(eth_m->dst); ++i)
2563 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
2564 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
2565 ð_m->src, sizeof(eth_m->src));
2566 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
2567 /* The value must be in the range of the mask. */
2568 for (i = 0; i < sizeof(eth_m->dst); ++i)
2569 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
2570 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
2571 rte_be_to_cpu_16(eth_m->type));
2572 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
2573 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
2577 * Add VLAN item to matcher and to the value.
2579 * @param[in, out] matcher
2581 * @param[in, out] key
2582 * Flow matcher value.
2584 * Flow pattern to translate.
2586 * Item is inner pattern.
2589 flow_dv_translate_item_vlan(void *matcher, void *key,
2590 const struct rte_flow_item *item,
2593 const struct rte_flow_item_vlan *vlan_m = item->mask;
2594 const struct rte_flow_item_vlan *vlan_v = item->spec;
2595 const struct rte_flow_item_vlan nic_mask = {
2596 .tci = RTE_BE16(0x0fff),
2597 .inner_type = RTE_BE16(0xffff),
2609 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2611 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2613 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2615 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2617 tci_m = rte_be_to_cpu_16(vlan_m->tci);
2618 tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
2619 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
2620 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
2621 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
2622 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
2623 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
2624 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
2625 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
2626 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
2630 * Add IPV4 item to matcher and to the value.
2632 * @param[in, out] matcher
2634 * @param[in, out] key
2635 * Flow matcher value.
2637 * Flow pattern to translate.
2639 * Item is inner pattern.
2641 * The group to insert the rule.
2644 flow_dv_translate_item_ipv4(void *matcher, void *key,
2645 const struct rte_flow_item *item,
2646 int inner, uint32_t group)
2648 const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
2649 const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
2650 const struct rte_flow_item_ipv4 nic_mask = {
2652 .src_addr = RTE_BE32(0xffffffff),
2653 .dst_addr = RTE_BE32(0xffffffff),
2654 .type_of_service = 0xff,
2655 .next_proto_id = 0xff,
2665 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2667 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2669 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2671 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2674 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2676 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
2677 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
2682 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2683 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2684 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2685 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2686 *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
2687 *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
2688 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2689 src_ipv4_src_ipv6.ipv4_layout.ipv4);
2690 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2691 src_ipv4_src_ipv6.ipv4_layout.ipv4);
2692 *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
2693 *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
2694 tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
2695 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
2696 ipv4_m->hdr.type_of_service);
2697 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
2698 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
2699 ipv4_m->hdr.type_of_service >> 2);
2700 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
2701 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2702 ipv4_m->hdr.next_proto_id);
2703 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2704 ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
2708 * Add IPV6 item to matcher and to the value.
2710 * @param[in, out] matcher
2712 * @param[in, out] key
2713 * Flow matcher value.
2715 * Flow pattern to translate.
2717 * Item is inner pattern.
2719 * The group to insert the rule.
2722 flow_dv_translate_item_ipv6(void *matcher, void *key,
2723 const struct rte_flow_item *item,
2724 int inner, uint32_t group)
2726 const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
2727 const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
2728 const struct rte_flow_item_ipv6 nic_mask = {
2731 "\xff\xff\xff\xff\xff\xff\xff\xff"
2732 "\xff\xff\xff\xff\xff\xff\xff\xff",
2734 "\xff\xff\xff\xff\xff\xff\xff\xff"
2735 "\xff\xff\xff\xff\xff\xff\xff\xff",
2736 .vtc_flow = RTE_BE32(0xffffffff),
2743 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2744 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2753 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2755 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2757 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2759 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2762 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2764 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
2765 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
2770 size = sizeof(ipv6_m->hdr.dst_addr);
2771 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2772 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2773 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2774 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2775 memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
2776 for (i = 0; i < size; ++i)
2777 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
2778 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2779 src_ipv4_src_ipv6.ipv6_layout.ipv6);
2780 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2781 src_ipv4_src_ipv6.ipv6_layout.ipv6);
2782 memcpy(l24_m, ipv6_m->hdr.src_addr, size);
2783 for (i = 0; i < size; ++i)
2784 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
2786 vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
2787 vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
2788 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
2789 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
2790 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
2791 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
2794 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
2796 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
2799 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
2801 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
2805 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2807 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2808 ipv6_v->hdr.proto & ipv6_m->hdr.proto);
2812 * Add TCP item to matcher and to the value.
2814 * @param[in, out] matcher
2816 * @param[in, out] key
2817 * Flow matcher value.
2819 * Flow pattern to translate.
2821 * Item is inner pattern.
2824 flow_dv_translate_item_tcp(void *matcher, void *key,
2825 const struct rte_flow_item *item,
2828 const struct rte_flow_item_tcp *tcp_m = item->mask;
2829 const struct rte_flow_item_tcp *tcp_v = item->spec;
2834 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2836 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2838 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2840 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2842 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2843 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
2847 tcp_m = &rte_flow_item_tcp_mask;
2848 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
2849 rte_be_to_cpu_16(tcp_m->hdr.src_port));
2850 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
2851 rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
2852 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
2853 rte_be_to_cpu_16(tcp_m->hdr.dst_port));
2854 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
2855 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
2859 * Add UDP item to matcher and to the value.
2861 * @param[in, out] matcher
2863 * @param[in, out] key
2864 * Flow matcher value.
2866 * Flow pattern to translate.
2868 * Item is inner pattern.
2871 flow_dv_translate_item_udp(void *matcher, void *key,
2872 const struct rte_flow_item *item,
2875 const struct rte_flow_item_udp *udp_m = item->mask;
2876 const struct rte_flow_item_udp *udp_v = item->spec;
2881 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2883 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2885 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2887 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2889 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2890 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
2894 udp_m = &rte_flow_item_udp_mask;
2895 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
2896 rte_be_to_cpu_16(udp_m->hdr.src_port));
2897 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
2898 rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
2899 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
2900 rte_be_to_cpu_16(udp_m->hdr.dst_port));
2901 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2902 rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
2906 * Add GRE item to matcher and to the value.
2908 * @param[in, out] matcher
2910 * @param[in, out] key
2911 * Flow matcher value.
2913 * Flow pattern to translate.
2915 * Item is inner pattern.
2918 flow_dv_translate_item_gre(void *matcher, void *key,
2919 const struct rte_flow_item *item,
2922 const struct rte_flow_item_gre *gre_m = item->mask;
2923 const struct rte_flow_item_gre *gre_v = item->spec;
2926 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2927 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2930 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2932 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2934 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2936 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2938 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2939 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
2943 gre_m = &rte_flow_item_gre_mask;
2944 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
2945 rte_be_to_cpu_16(gre_m->protocol));
2946 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
2947 rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
2951 * Add NVGRE item to matcher and to the value.
2953 * @param[in, out] matcher
2955 * @param[in, out] key
2956 * Flow matcher value.
2958 * Flow pattern to translate.
2960 * Item is inner pattern.
2963 flow_dv_translate_item_nvgre(void *matcher, void *key,
2964 const struct rte_flow_item *item,
2967 const struct rte_flow_item_nvgre *nvgre_m = item->mask;
2968 const struct rte_flow_item_nvgre *nvgre_v = item->spec;
2969 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2970 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2971 const char *tni_flow_id_m = (const char *)nvgre_m->tni;
2972 const char *tni_flow_id_v = (const char *)nvgre_v->tni;
2978 flow_dv_translate_item_gre(matcher, key, item, inner);
2982 nvgre_m = &rte_flow_item_nvgre_mask;
2983 size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
2984 gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
2985 gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
2986 memcpy(gre_key_m, tni_flow_id_m, size);
2987 for (i = 0; i < size; ++i)
2988 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
2992 * Add VXLAN item to matcher and to the value.
2994 * @param[in, out] matcher
2996 * @param[in, out] key
2997 * Flow matcher value.
2999 * Flow pattern to translate.
3001 * Item is inner pattern.
3004 flow_dv_translate_item_vxlan(void *matcher, void *key,
3005 const struct rte_flow_item *item,
3008 const struct rte_flow_item_vxlan *vxlan_m = item->mask;
3009 const struct rte_flow_item_vxlan *vxlan_v = item->spec;
3012 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3013 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3021 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3023 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3025 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3027 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3029 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
3030 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
3031 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
3032 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
3033 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
3038 vxlan_m = &rte_flow_item_vxlan_mask;
3039 size = sizeof(vxlan_m->vni);
3040 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
3041 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
3042 memcpy(vni_m, vxlan_m->vni, size);
3043 for (i = 0; i < size; ++i)
3044 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
3048 * Add MPLS item to matcher and to the value.
3050 * @param[in, out] matcher
3052 * @param[in, out] key
3053 * Flow matcher value.
3055 * Flow pattern to translate.
3056 * @param[in] prev_layer
3057 * The protocol layer indicated in previous item.
3059 * Item is inner pattern.
3062 flow_dv_translate_item_mpls(void *matcher, void *key,
3063 const struct rte_flow_item *item,
3064 uint64_t prev_layer,
3067 const uint32_t *in_mpls_m = item->mask;
3068 const uint32_t *in_mpls_v = item->spec;
3069 uint32_t *out_mpls_m = 0;
3070 uint32_t *out_mpls_v = 0;
3071 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3072 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3073 void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
3075 void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
3076 void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
3077 void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3079 switch (prev_layer) {
3080 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
3081 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
3082 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
3083 MLX5_UDP_PORT_MPLS);
3085 case MLX5_FLOW_LAYER_GRE:
3086 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
3087 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
3091 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3092 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
3099 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
3100 switch (prev_layer) {
3101 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
3103 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
3104 outer_first_mpls_over_udp);
3106 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
3107 outer_first_mpls_over_udp);
3109 case MLX5_FLOW_LAYER_GRE:
3111 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
3112 outer_first_mpls_over_gre);
3114 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
3115 outer_first_mpls_over_gre);
3118 /* Inner MPLS not over GRE is not supported. */
3121 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
3125 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
3131 if (out_mpls_m && out_mpls_v) {
3132 *out_mpls_m = *in_mpls_m;
3133 *out_mpls_v = *in_mpls_v & *in_mpls_m;
3138 * Add META item to matcher
3140 * @param[in, out] matcher
3142 * @param[in, out] key
3143 * Flow matcher value.
3145 * Flow pattern to translate.
3147 * Item is inner pattern.
3150 flow_dv_translate_item_meta(void *matcher, void *key,
3151 const struct rte_flow_item *item)
3153 const struct rte_flow_item_meta *meta_m;
3154 const struct rte_flow_item_meta *meta_v;
3156 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
3158 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
3160 meta_m = (const void *)item->mask;
3162 meta_m = &rte_flow_item_meta_mask;
3163 meta_v = (const void *)item->spec;
3165 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
3166 rte_be_to_cpu_32(meta_m->data));
3167 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
3168 rte_be_to_cpu_32(meta_v->data & meta_m->data));
3173 * Add source vport match to the specified matcher.
3175 * @param[in, out] matcher
3177 * @param[in, out] key
3178 * Flow matcher value.
3180 * Source vport value to match
3185 flow_dv_translate_item_source_vport(void *matcher, void *key,
3186 int16_t port, uint16_t mask)
3188 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3189 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3191 MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
3192 MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
3196 * Translate port-id item to eswitch match on port-id.
3199 * The devich to configure through.
3200 * @param[in, out] matcher
3202 * @param[in, out] key
3203 * Flow matcher value.
3205 * Flow pattern to translate.
3208 * 0 on success, a negative errno value otherwise.
3211 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
3212 void *key, const struct rte_flow_item *item)
3214 const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
3215 const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
3216 uint16_t mask, val, id;
3219 mask = pid_m ? pid_m->id : 0xffff;
3220 id = pid_v ? pid_v->id : dev->data->port_id;
3221 ret = mlx5_port_to_eswitch_info(id, NULL, &val);
3224 flow_dv_translate_item_source_vport(matcher, key, val, mask);
3228 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
3230 #define HEADER_IS_ZERO(match_criteria, headers) \
3231 !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
3232 matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
3235 * Calculate flow matcher enable bitmap.
3237 * @param match_criteria
3238 * Pointer to flow matcher criteria.
3241 * Bitmap of enabled fields.
3244 flow_dv_matcher_enable(uint32_t *match_criteria)
3246 uint8_t match_criteria_enable;
3248 match_criteria_enable =
3249 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
3250 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
3251 match_criteria_enable |=
3252 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
3253 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
3254 match_criteria_enable |=
3255 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
3256 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
3257 match_criteria_enable |=
3258 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
3259 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
3260 #ifdef HAVE_MLX5DV_DR
3261 match_criteria_enable |=
3262 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
3263 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
3265 return match_criteria_enable;
3272 * @param dev[in, out]
3273 * Pointer to rte_eth_dev structure.
3274 * @param[in] table_id
3277 * Direction of the table.
3278 * @param[in] transfer
3279 * E-Switch or NIC flow.
3281 * pointer to error structure.
3284 * Returns tables resource based on the index, NULL in case of failed.
3286 static struct mlx5_flow_tbl_resource *
3287 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
3288 uint32_t table_id, uint8_t egress,
3290 struct rte_flow_error *error)
3292 struct mlx5_priv *priv = dev->data->dev_private;
3293 struct mlx5_ibv_shared *sh = priv->sh;
3294 struct mlx5_flow_tbl_resource *tbl;
3296 #ifdef HAVE_MLX5DV_DR
3298 tbl = &sh->fdb_tbl[table_id];
3300 tbl->obj = mlx5_glue->dr_create_flow_tbl
3301 (sh->fdb_ns, table_id);
3302 } else if (egress) {
3303 tbl = &sh->tx_tbl[table_id];
3305 tbl->obj = mlx5_glue->dr_create_flow_tbl
3306 (sh->tx_ns, table_id);
3308 tbl = &sh->rx_tbl[table_id];
3310 tbl->obj = mlx5_glue->dr_create_flow_tbl
3311 (sh->rx_ns, table_id);
3314 rte_flow_error_set(error, ENOMEM,
3315 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3316 NULL, "cannot create table");
3319 rte_atomic32_inc(&tbl->refcnt);
3325 return &sh->fdb_tbl[table_id];
3327 return &sh->tx_tbl[table_id];
3329 return &sh->rx_tbl[table_id];
3334 * Release a flow table.
3337 * Table resource to be released.
3340 * Returns 0 if table was released, else return 1;
3343 flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
3347 if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
3348 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
3356 * Register the flow matcher.
3358 * @param dev[in, out]
3359 * Pointer to rte_eth_dev structure.
3360 * @param[in, out] matcher
3361 * Pointer to flow matcher.
3362 * @parm[in, out] dev_flow
3363 * Pointer to the dev_flow.
3365 * pointer to error structure.
3368 * 0 on success otherwise -errno and errno is set.
3371 flow_dv_matcher_register(struct rte_eth_dev *dev,
3372 struct mlx5_flow_dv_matcher *matcher,
3373 struct mlx5_flow *dev_flow,
3374 struct rte_flow_error *error)
3376 struct mlx5_priv *priv = dev->data->dev_private;
3377 struct mlx5_ibv_shared *sh = priv->sh;
3378 struct mlx5_flow_dv_matcher *cache_matcher;
3379 struct mlx5dv_flow_matcher_attr dv_attr = {
3380 .type = IBV_FLOW_ATTR_NORMAL,
3381 .match_mask = (void *)&matcher->mask,
3383 struct mlx5_flow_tbl_resource *tbl = NULL;
3385 /* Lookup from cache. */
3386 LIST_FOREACH(cache_matcher, &sh->matchers, next) {
3387 if (matcher->crc == cache_matcher->crc &&
3388 matcher->priority == cache_matcher->priority &&
3389 matcher->egress == cache_matcher->egress &&
3390 matcher->group == cache_matcher->group &&
3391 matcher->transfer == cache_matcher->transfer &&
3392 !memcmp((const void *)matcher->mask.buf,
3393 (const void *)cache_matcher->mask.buf,
3394 cache_matcher->mask.size)) {
3396 "priority %hd use %s matcher %p: refcnt %d++",
3397 cache_matcher->priority,
3398 cache_matcher->egress ? "tx" : "rx",
3399 (void *)cache_matcher,
3400 rte_atomic32_read(&cache_matcher->refcnt));
3401 rte_atomic32_inc(&cache_matcher->refcnt);
3402 dev_flow->dv.matcher = cache_matcher;
3406 /* Register new matcher. */
3407 cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
3409 return rte_flow_error_set(error, ENOMEM,
3410 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3411 "cannot allocate matcher memory");
3412 tbl = flow_dv_tbl_resource_get(dev, matcher->group * MLX5_GROUP_FACTOR,
3413 matcher->egress, matcher->transfer,
3416 rte_free(cache_matcher);
3417 return rte_flow_error_set(error, ENOMEM,
3418 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3419 NULL, "cannot create table");
3421 *cache_matcher = *matcher;
3422 dv_attr.match_criteria_enable =
3423 flow_dv_matcher_enable(cache_matcher->mask.buf);
3424 dv_attr.priority = matcher->priority;
3425 if (matcher->egress)
3426 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
3427 cache_matcher->matcher_object =
3428 mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
3429 if (!cache_matcher->matcher_object) {
3430 rte_free(cache_matcher);
3431 #ifdef HAVE_MLX5DV_DR
3432 flow_dv_tbl_resource_release(tbl);
3434 return rte_flow_error_set(error, ENOMEM,
3435 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3436 NULL, "cannot create matcher");
3438 rte_atomic32_inc(&cache_matcher->refcnt);
3439 LIST_INSERT_HEAD(&sh->matchers, cache_matcher, next);
3440 dev_flow->dv.matcher = cache_matcher;
3441 DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
3442 cache_matcher->priority,
3443 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
3444 rte_atomic32_read(&cache_matcher->refcnt));
3445 rte_atomic32_inc(&tbl->refcnt);
3450 * Find existing tag resource or create and register a new one.
3452 * @param dev[in, out]
3453 * Pointer to rte_eth_dev structure.
3454 * @param[in, out] resource
3455 * Pointer to tag resource.
3456 * @parm[in, out] dev_flow
3457 * Pointer to the dev_flow.
3459 * pointer to error structure.
3462 * 0 on success otherwise -errno and errno is set.
3465 flow_dv_tag_resource_register
3466 (struct rte_eth_dev *dev,
3467 struct mlx5_flow_dv_tag_resource *resource,
3468 struct mlx5_flow *dev_flow,
3469 struct rte_flow_error *error)
3471 struct mlx5_priv *priv = dev->data->dev_private;
3472 struct mlx5_ibv_shared *sh = priv->sh;
3473 struct mlx5_flow_dv_tag_resource *cache_resource;
3475 /* Lookup a matching resource from cache. */
3476 LIST_FOREACH(cache_resource, &sh->tags, next) {
3477 if (resource->tag == cache_resource->tag) {
3478 DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
3479 (void *)cache_resource,
3480 rte_atomic32_read(&cache_resource->refcnt));
3481 rte_atomic32_inc(&cache_resource->refcnt);
3482 dev_flow->flow->tag_resource = cache_resource;
3486 /* Register new resource. */
3487 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
3488 if (!cache_resource)
3489 return rte_flow_error_set(error, ENOMEM,
3490 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3491 "cannot allocate resource memory");
3492 *cache_resource = *resource;
3493 cache_resource->action = mlx5_glue->dv_create_flow_action_tag
3495 if (!cache_resource->action) {
3496 rte_free(cache_resource);
3497 return rte_flow_error_set(error, ENOMEM,
3498 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3499 NULL, "cannot create action");
3501 rte_atomic32_init(&cache_resource->refcnt);
3502 rte_atomic32_inc(&cache_resource->refcnt);
3503 LIST_INSERT_HEAD(&sh->tags, cache_resource, next);
3504 dev_flow->flow->tag_resource = cache_resource;
3505 DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
3506 (void *)cache_resource,
3507 rte_atomic32_read(&cache_resource->refcnt));
3515 * Pointer to Ethernet device.
3517 * Pointer to mlx5_flow.
3520 * 1 while a reference on it exists, 0 when freed.
3523 flow_dv_tag_release(struct rte_eth_dev *dev,
3524 struct mlx5_flow_dv_tag_resource *tag)
3527 DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
3528 dev->data->port_id, (void *)tag,
3529 rte_atomic32_read(&tag->refcnt));
3530 if (rte_atomic32_dec_and_test(&tag->refcnt)) {
3531 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
3532 LIST_REMOVE(tag, next);
3533 DRV_LOG(DEBUG, "port %u tag %p: removed",
3534 dev->data->port_id, (void *)tag);
3542 * Translate port ID action to vport.
3545 * Pointer to rte_eth_dev structure.
3547 * Pointer to the port ID action.
3548 * @param[out] dst_port_id
3549 * The target port ID.
3551 * Pointer to the error structure.
3554 * 0 on success, a negative errno value otherwise and rte_errno is set.
3557 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
3558 const struct rte_flow_action *action,
3559 uint32_t *dst_port_id,
3560 struct rte_flow_error *error)
3565 const struct rte_flow_action_port_id *conf =
3566 (const struct rte_flow_action_port_id *)action->conf;
3568 port = conf->original ? dev->data->port_id : conf->id;
3569 ret = mlx5_port_to_eswitch_info(port, NULL, &port_id);
3571 return rte_flow_error_set(error, -ret,
3572 RTE_FLOW_ERROR_TYPE_ACTION,
3574 "No eswitch info was found for port");
3575 *dst_port_id = port_id;
3580 * Fill the flow with DV spec.
3583 * Pointer to rte_eth_dev structure.
3584 * @param[in, out] dev_flow
3585 * Pointer to the sub flow.
3587 * Pointer to the flow attributes.
3589 * Pointer to the list of items.
3590 * @param[in] actions
3591 * Pointer to the list of actions.
3593 * Pointer to the error structure.
3596 * 0 on success, a negative errno value otherwise and rte_errno is set.
3599 flow_dv_translate(struct rte_eth_dev *dev,
3600 struct mlx5_flow *dev_flow,
3601 const struct rte_flow_attr *attr,
3602 const struct rte_flow_item items[],
3603 const struct rte_flow_action actions[],
3604 struct rte_flow_error *error)
3606 struct mlx5_priv *priv = dev->data->dev_private;
3607 struct rte_flow *flow = dev_flow->flow;
3608 uint64_t item_flags = 0;
3609 uint64_t last_item = 0;
3610 uint64_t action_flags = 0;
3611 uint64_t priority = attr->priority;
3612 struct mlx5_flow_dv_matcher matcher = {
3614 .size = sizeof(matcher.mask.buf),
3618 bool actions_end = false;
3619 struct mlx5_flow_dv_modify_hdr_resource res = {
3620 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3621 MLX5DV_FLOW_TABLE_TYPE_NIC_RX
3623 union flow_dv_attr flow_attr = { .attr = 0 };
3624 struct mlx5_flow_dv_tag_resource tag_resource;
3625 uint32_t modify_action_position = UINT32_MAX;
3628 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3629 if (priority == MLX5_FLOW_PRIO_RSVD)
3630 priority = priv->config.flow_prio - 1;
3631 for (; !actions_end ; actions++) {
3632 const struct rte_flow_action_queue *queue;
3633 const struct rte_flow_action_rss *rss;
3634 const struct rte_flow_action *action = actions;
3635 const struct rte_flow_action_count *count = action->conf;
3636 const uint8_t *rss_key;
3637 const struct rte_flow_action_jump *jump_data;
3638 struct mlx5_flow_dv_jump_tbl_resource jump_tbl_resource;
3639 struct mlx5_flow_tbl_resource *tbl;
3640 uint32_t port_id = 0;
3641 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
3643 switch (actions->type) {
3644 case RTE_FLOW_ACTION_TYPE_VOID:
3646 case RTE_FLOW_ACTION_TYPE_PORT_ID:
3647 if (flow_dv_translate_action_port_id(dev, action,
3650 port_id_resource.port_id = port_id;
3651 if (flow_dv_port_id_action_resource_register
3652 (dev, &port_id_resource, dev_flow, error))
3654 dev_flow->dv.actions[actions_n++] =
3655 dev_flow->dv.port_id_action->action;
3656 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
3658 case RTE_FLOW_ACTION_TYPE_FLAG:
3660 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
3661 if (!flow->tag_resource)
3662 if (flow_dv_tag_resource_register
3663 (dev, &tag_resource, dev_flow, error))
3665 dev_flow->dv.actions[actions_n++] =
3666 flow->tag_resource->action;
3667 action_flags |= MLX5_FLOW_ACTION_FLAG;
3669 case RTE_FLOW_ACTION_TYPE_MARK:
3670 tag_resource.tag = mlx5_flow_mark_set
3671 (((const struct rte_flow_action_mark *)
3672 (actions->conf))->id);
3673 if (!flow->tag_resource)
3674 if (flow_dv_tag_resource_register
3675 (dev, &tag_resource, dev_flow, error))
3677 dev_flow->dv.actions[actions_n++] =
3678 flow->tag_resource->action;
3679 action_flags |= MLX5_FLOW_ACTION_MARK;
3681 case RTE_FLOW_ACTION_TYPE_DROP:
3682 action_flags |= MLX5_FLOW_ACTION_DROP;
3684 case RTE_FLOW_ACTION_TYPE_QUEUE:
3685 queue = actions->conf;
3686 flow->rss.queue_num = 1;
3687 (*flow->queue)[0] = queue->index;
3688 action_flags |= MLX5_FLOW_ACTION_QUEUE;
3690 case RTE_FLOW_ACTION_TYPE_RSS:
3691 rss = actions->conf;
3693 memcpy((*flow->queue), rss->queue,
3694 rss->queue_num * sizeof(uint16_t));
3695 flow->rss.queue_num = rss->queue_num;
3696 /* NULL RSS key indicates default RSS key. */
3697 rss_key = !rss->key ? rss_hash_default_key : rss->key;
3698 memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
3699 /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
3700 flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
3701 flow->rss.level = rss->level;
3702 action_flags |= MLX5_FLOW_ACTION_RSS;
3704 case RTE_FLOW_ACTION_TYPE_COUNT:
3705 if (!priv->config.devx) {
3706 rte_errno = ENOTSUP;
3709 flow->counter = flow_dv_counter_new(dev, count->shared,
3711 if (flow->counter == NULL)
3713 dev_flow->dv.actions[actions_n++] =
3714 flow->counter->action;
3715 action_flags |= MLX5_FLOW_ACTION_COUNT;
3718 if (rte_errno == ENOTSUP)
3719 return rte_flow_error_set
3721 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3723 "count action not supported");
3725 return rte_flow_error_set
3727 RTE_FLOW_ERROR_TYPE_ACTION,
3729 "cannot create counter"
3731 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3732 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3733 if (flow_dv_create_action_l2_encap(dev, actions,
3738 dev_flow->dv.actions[actions_n++] =
3739 dev_flow->dv.encap_decap->verbs_action;
3740 action_flags |= actions->type ==
3741 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
3742 MLX5_FLOW_ACTION_VXLAN_ENCAP :
3743 MLX5_FLOW_ACTION_NVGRE_ENCAP;
3745 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3746 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
3747 if (flow_dv_create_action_l2_decap(dev, dev_flow,
3751 dev_flow->dv.actions[actions_n++] =
3752 dev_flow->dv.encap_decap->verbs_action;
3753 action_flags |= actions->type ==
3754 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
3755 MLX5_FLOW_ACTION_VXLAN_DECAP :
3756 MLX5_FLOW_ACTION_NVGRE_DECAP;
3758 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3759 /* Handle encap with preceding decap. */
3760 if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
3761 if (flow_dv_create_action_raw_encap
3762 (dev, actions, dev_flow, attr, error))
3764 dev_flow->dv.actions[actions_n++] =
3765 dev_flow->dv.encap_decap->verbs_action;
3767 /* Handle encap without preceding decap. */
3768 if (flow_dv_create_action_l2_encap
3769 (dev, actions, dev_flow, attr->transfer,
3772 dev_flow->dv.actions[actions_n++] =
3773 dev_flow->dv.encap_decap->verbs_action;
3775 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
3777 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3778 /* Check if this decap is followed by encap. */
3779 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
3780 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
3783 /* Handle decap only if it isn't followed by encap. */
3784 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3785 if (flow_dv_create_action_l2_decap
3786 (dev, dev_flow, attr->transfer, error))
3788 dev_flow->dv.actions[actions_n++] =
3789 dev_flow->dv.encap_decap->verbs_action;
3791 /* If decap is followed by encap, handle it at encap. */
3792 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
3794 case RTE_FLOW_ACTION_TYPE_JUMP:
3795 jump_data = action->conf;
3796 tbl = flow_dv_tbl_resource_get(dev, jump_data->group *
3799 attr->transfer, error);
3801 return rte_flow_error_set
3803 RTE_FLOW_ERROR_TYPE_ACTION,
3805 "cannot create jump action.");
3806 jump_tbl_resource.tbl = tbl;
3807 if (flow_dv_jump_tbl_resource_register
3808 (dev, &jump_tbl_resource, dev_flow, error)) {
3809 flow_dv_tbl_resource_release(tbl);
3810 return rte_flow_error_set
3812 RTE_FLOW_ERROR_TYPE_ACTION,
3814 "cannot create jump action.");
3816 dev_flow->dv.actions[actions_n++] =
3817 dev_flow->dv.jump->action;
3818 action_flags |= MLX5_FLOW_ACTION_JUMP;
3820 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
3821 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
3822 if (flow_dv_convert_action_modify_mac(&res, actions,
3825 action_flags |= actions->type ==
3826 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
3827 MLX5_FLOW_ACTION_SET_MAC_SRC :
3828 MLX5_FLOW_ACTION_SET_MAC_DST;
3830 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
3831 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
3832 if (flow_dv_convert_action_modify_ipv4(&res, actions,
3835 action_flags |= actions->type ==
3836 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
3837 MLX5_FLOW_ACTION_SET_IPV4_SRC :
3838 MLX5_FLOW_ACTION_SET_IPV4_DST;
3840 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
3841 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
3842 if (flow_dv_convert_action_modify_ipv6(&res, actions,
3845 action_flags |= actions->type ==
3846 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
3847 MLX5_FLOW_ACTION_SET_IPV6_SRC :
3848 MLX5_FLOW_ACTION_SET_IPV6_DST;
3850 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
3851 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
3852 if (flow_dv_convert_action_modify_tp(&res, actions,
3856 action_flags |= actions->type ==
3857 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
3858 MLX5_FLOW_ACTION_SET_TP_SRC :
3859 MLX5_FLOW_ACTION_SET_TP_DST;
3861 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
3862 if (flow_dv_convert_action_modify_dec_ttl(&res, items,
3866 action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
3868 case RTE_FLOW_ACTION_TYPE_SET_TTL:
3869 if (flow_dv_convert_action_modify_ttl(&res, actions,
3873 action_flags |= MLX5_FLOW_ACTION_SET_TTL;
3875 case RTE_FLOW_ACTION_TYPE_END:
3877 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
3878 /* create modify action if needed. */
3879 if (flow_dv_modify_hdr_resource_register
3884 dev_flow->dv.actions[modify_action_position] =
3885 dev_flow->dv.modify_hdr->verbs_action;
3891 if ((action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) &&
3892 modify_action_position == UINT32_MAX)
3893 modify_action_position = actions_n++;
3895 dev_flow->dv.actions_n = actions_n;
3896 flow->actions = action_flags;
3897 if (attr->ingress && !attr->transfer &&
3898 (priv->representor || priv->master)) {
3899 /* It was validated - we support unidirection flows only. */
3900 assert(!attr->egress);
3902 * Add matching on source vport index only
3903 * for ingress rules in E-Switch configurations.
3905 flow_dv_translate_item_source_vport(matcher.mask.buf,
3906 dev_flow->dv.value.buf,
3910 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3911 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
3912 void *match_mask = matcher.mask.buf;
3913 void *match_value = dev_flow->dv.value.buf;
3915 switch (items->type) {
3916 case RTE_FLOW_ITEM_TYPE_PORT_ID:
3917 flow_dv_translate_item_port_id(dev, match_mask,
3918 match_value, items);
3919 last_item = MLX5_FLOW_ITEM_PORT_ID;
3921 case RTE_FLOW_ITEM_TYPE_ETH:
3922 flow_dv_translate_item_eth(match_mask, match_value,
3924 matcher.priority = MLX5_PRIORITY_MAP_L2;
3925 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
3926 MLX5_FLOW_LAYER_OUTER_L2;
3928 case RTE_FLOW_ITEM_TYPE_VLAN:
3929 flow_dv_translate_item_vlan(match_mask, match_value,
3931 matcher.priority = MLX5_PRIORITY_MAP_L2;
3932 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
3933 MLX5_FLOW_LAYER_INNER_VLAN) :
3934 (MLX5_FLOW_LAYER_OUTER_L2 |
3935 MLX5_FLOW_LAYER_OUTER_VLAN);
3937 case RTE_FLOW_ITEM_TYPE_IPV4:
3938 flow_dv_translate_item_ipv4(match_mask, match_value,
3939 items, tunnel, attr->group);
3940 matcher.priority = MLX5_PRIORITY_MAP_L3;
3941 dev_flow->dv.hash_fields |=
3942 mlx5_flow_hashfields_adjust
3944 MLX5_IPV4_LAYER_TYPES,
3945 MLX5_IPV4_IBV_RX_HASH);
3946 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3947 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3949 case RTE_FLOW_ITEM_TYPE_IPV6:
3950 flow_dv_translate_item_ipv6(match_mask, match_value,
3951 items, tunnel, attr->group);
3952 matcher.priority = MLX5_PRIORITY_MAP_L3;
3953 dev_flow->dv.hash_fields |=
3954 mlx5_flow_hashfields_adjust
3956 MLX5_IPV6_LAYER_TYPES,
3957 MLX5_IPV6_IBV_RX_HASH);
3958 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3959 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3961 case RTE_FLOW_ITEM_TYPE_TCP:
3962 flow_dv_translate_item_tcp(match_mask, match_value,
3964 matcher.priority = MLX5_PRIORITY_MAP_L4;
3965 dev_flow->dv.hash_fields |=
3966 mlx5_flow_hashfields_adjust
3967 (dev_flow, tunnel, ETH_RSS_TCP,
3968 IBV_RX_HASH_SRC_PORT_TCP |
3969 IBV_RX_HASH_DST_PORT_TCP);
3970 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
3971 MLX5_FLOW_LAYER_OUTER_L4_TCP;
3973 case RTE_FLOW_ITEM_TYPE_UDP:
3974 flow_dv_translate_item_udp(match_mask, match_value,
3976 matcher.priority = MLX5_PRIORITY_MAP_L4;
3977 dev_flow->dv.hash_fields |=
3978 mlx5_flow_hashfields_adjust
3979 (dev_flow, tunnel, ETH_RSS_UDP,
3980 IBV_RX_HASH_SRC_PORT_UDP |
3981 IBV_RX_HASH_DST_PORT_UDP);
3982 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
3983 MLX5_FLOW_LAYER_OUTER_L4_UDP;
3985 case RTE_FLOW_ITEM_TYPE_GRE:
3986 flow_dv_translate_item_gre(match_mask, match_value,
3988 last_item = MLX5_FLOW_LAYER_GRE;
3990 case RTE_FLOW_ITEM_TYPE_NVGRE:
3991 flow_dv_translate_item_nvgre(match_mask, match_value,
3993 last_item = MLX5_FLOW_LAYER_GRE;
3995 case RTE_FLOW_ITEM_TYPE_VXLAN:
3996 flow_dv_translate_item_vxlan(match_mask, match_value,
3998 last_item = MLX5_FLOW_LAYER_VXLAN;
4000 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4001 flow_dv_translate_item_vxlan(match_mask, match_value,
4003 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
4005 case RTE_FLOW_ITEM_TYPE_MPLS:
4006 flow_dv_translate_item_mpls(match_mask, match_value,
4007 items, last_item, tunnel);
4008 last_item = MLX5_FLOW_LAYER_MPLS;
4010 case RTE_FLOW_ITEM_TYPE_META:
4011 flow_dv_translate_item_meta(match_mask, match_value,
4013 last_item = MLX5_FLOW_ITEM_METADATA;
4018 item_flags |= last_item;
4020 assert(!flow_dv_check_valid_spec(matcher.mask.buf,
4021 dev_flow->dv.value.buf));
4022 dev_flow->layers = item_flags;
4023 /* Register matcher. */
4024 matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
4026 matcher.priority = mlx5_flow_adjust_priority(dev, priority,
4028 matcher.egress = attr->egress;
4029 matcher.group = attr->group;
4030 matcher.transfer = attr->transfer;
4031 if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
4037 * Apply the flow to the NIC.
4040 * Pointer to the Ethernet device structure.
4041 * @param[in, out] flow
4042 * Pointer to flow structure.
4044 * Pointer to error structure.
4047 * 0 on success, a negative errno value otherwise and rte_errno is set.
4050 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
4051 struct rte_flow_error *error)
4053 struct mlx5_flow_dv *dv;
4054 struct mlx5_flow *dev_flow;
4055 struct mlx5_priv *priv = dev->data->dev_private;
4059 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4062 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
4063 if (flow->transfer) {
4064 dv->actions[n++] = priv->sh->esw_drop_action;
4066 dv->hrxq = mlx5_hrxq_drop_new(dev);
4070 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4072 "cannot get drop hash queue");
4075 dv->actions[n++] = dv->hrxq->action;
4077 } else if (flow->actions &
4078 (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
4079 struct mlx5_hrxq *hrxq;
4081 hrxq = mlx5_hrxq_get(dev, flow->key,
4082 MLX5_RSS_HASH_KEY_LEN,
4085 flow->rss.queue_num);
4087 hrxq = mlx5_hrxq_new
4088 (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
4089 dv->hash_fields, (*flow->queue),
4090 flow->rss.queue_num,
4091 !!(dev_flow->layers &
4092 MLX5_FLOW_LAYER_TUNNEL));
4096 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4097 "cannot get hash queue");
4101 dv->actions[n++] = dv->hrxq->action;
4104 mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
4105 (void *)&dv->value, n,
4108 rte_flow_error_set(error, errno,
4109 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4111 "hardware refuses to create flow");
4117 err = rte_errno; /* Save rte_errno before cleanup. */
4118 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4119 struct mlx5_flow_dv *dv = &dev_flow->dv;
4121 if (flow->actions & MLX5_FLOW_ACTION_DROP)
4122 mlx5_hrxq_drop_release(dev);
4124 mlx5_hrxq_release(dev, dv->hrxq);
4128 rte_errno = err; /* Restore rte_errno. */
4133 * Release the flow matcher.
4136 * Pointer to Ethernet device.
4138 * Pointer to mlx5_flow.
4141 * 1 while a reference on it exists, 0 when freed.
4144 flow_dv_matcher_release(struct rte_eth_dev *dev,
4145 struct mlx5_flow *flow)
4147 struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
4148 struct mlx5_priv *priv = dev->data->dev_private;
4149 struct mlx5_ibv_shared *sh = priv->sh;
4150 struct mlx5_flow_tbl_resource *tbl;
4152 assert(matcher->matcher_object);
4153 DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
4154 dev->data->port_id, (void *)matcher,
4155 rte_atomic32_read(&matcher->refcnt));
4156 if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
4157 claim_zero(mlx5_glue->dv_destroy_flow_matcher
4158 (matcher->matcher_object));
4159 LIST_REMOVE(matcher, next);
4160 if (matcher->egress)
4161 tbl = &sh->tx_tbl[matcher->group];
4163 tbl = &sh->rx_tbl[matcher->group];
4164 flow_dv_tbl_resource_release(tbl);
4166 DRV_LOG(DEBUG, "port %u matcher %p: removed",
4167 dev->data->port_id, (void *)matcher);
4174 * Release an encap/decap resource.
4177 * Pointer to mlx5_flow.
4180 * 1 while a reference on it exists, 0 when freed.
4183 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
4185 struct mlx5_flow_dv_encap_decap_resource *cache_resource =
4186 flow->dv.encap_decap;
4188 assert(cache_resource->verbs_action);
4189 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
4190 (void *)cache_resource,
4191 rte_atomic32_read(&cache_resource->refcnt));
4192 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4193 claim_zero(mlx5_glue->destroy_flow_action
4194 (cache_resource->verbs_action));
4195 LIST_REMOVE(cache_resource, next);
4196 rte_free(cache_resource);
4197 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
4198 (void *)cache_resource);
4205 * Release an jump to table action resource.
4208 * Pointer to mlx5_flow.
4211 * 1 while a reference on it exists, 0 when freed.
4214 flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow)
4216 struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
4219 assert(cache_resource->action);
4220 DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
4221 (void *)cache_resource,
4222 rte_atomic32_read(&cache_resource->refcnt));
4223 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4224 claim_zero(mlx5_glue->destroy_flow_action
4225 (cache_resource->action));
4226 LIST_REMOVE(cache_resource, next);
4227 flow_dv_tbl_resource_release(cache_resource->tbl);
4228 rte_free(cache_resource);
4229 DRV_LOG(DEBUG, "jump table resource %p: removed",
4230 (void *)cache_resource);
4237 * Release a modify-header resource.
4240 * Pointer to mlx5_flow.
4243 * 1 while a reference on it exists, 0 when freed.
4246 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
4248 struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
4249 flow->dv.modify_hdr;
4251 assert(cache_resource->verbs_action);
4252 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
4253 (void *)cache_resource,
4254 rte_atomic32_read(&cache_resource->refcnt));
4255 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4256 claim_zero(mlx5_glue->destroy_flow_action
4257 (cache_resource->verbs_action));
4258 LIST_REMOVE(cache_resource, next);
4259 rte_free(cache_resource);
4260 DRV_LOG(DEBUG, "modify-header resource %p: removed",
4261 (void *)cache_resource);
4268 * Release port ID action resource.
4271 * Pointer to mlx5_flow.
4274 * 1 while a reference on it exists, 0 when freed.
4277 flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
4279 struct mlx5_flow_dv_port_id_action_resource *cache_resource =
4280 flow->dv.port_id_action;
4282 assert(cache_resource->action);
4283 DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
4284 (void *)cache_resource,
4285 rte_atomic32_read(&cache_resource->refcnt));
4286 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4287 claim_zero(mlx5_glue->destroy_flow_action
4288 (cache_resource->action));
4289 LIST_REMOVE(cache_resource, next);
4290 rte_free(cache_resource);
4291 DRV_LOG(DEBUG, "port id action resource %p: removed",
4292 (void *)cache_resource);
4299 * Remove the flow from the NIC but keeps it in memory.
4302 * Pointer to Ethernet device.
4303 * @param[in, out] flow
4304 * Pointer to flow structure.
4307 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
4309 struct mlx5_flow_dv *dv;
4310 struct mlx5_flow *dev_flow;
4314 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4317 claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
4321 if (flow->actions & MLX5_FLOW_ACTION_DROP)
4322 mlx5_hrxq_drop_release(dev);
4324 mlx5_hrxq_release(dev, dv->hrxq);
4331 * Remove the flow from the NIC and the memory.
4334 * Pointer to the Ethernet device structure.
4335 * @param[in, out] flow
4336 * Pointer to flow structure.
4339 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
4341 struct mlx5_flow *dev_flow;
4345 flow_dv_remove(dev, flow);
4346 if (flow->counter) {
4347 flow_dv_counter_release(flow->counter);
4348 flow->counter = NULL;
4350 if (flow->tag_resource) {
4351 flow_dv_tag_release(dev, flow->tag_resource);
4352 flow->tag_resource = NULL;
4354 while (!LIST_EMPTY(&flow->dev_flows)) {
4355 dev_flow = LIST_FIRST(&flow->dev_flows);
4356 LIST_REMOVE(dev_flow, next);
4357 if (dev_flow->dv.matcher)
4358 flow_dv_matcher_release(dev, dev_flow);
4359 if (dev_flow->dv.encap_decap)
4360 flow_dv_encap_decap_resource_release(dev_flow);
4361 if (dev_flow->dv.modify_hdr)
4362 flow_dv_modify_hdr_resource_release(dev_flow);
4363 if (dev_flow->dv.jump)
4364 flow_dv_jump_tbl_resource_release(dev_flow);
4365 if (dev_flow->dv.port_id_action)
4366 flow_dv_port_id_action_resource_release(dev_flow);
4372 * Query a dv flow rule for its statistics via devx.
4375 * Pointer to Ethernet device.
4377 * Pointer to the sub flow.
4379 * data retrieved by the query.
4381 * Perform verbose error reporting if not NULL.
4384 * 0 on success, a negative errno value otherwise and rte_errno is set.
4387 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
4388 void *data, struct rte_flow_error *error)
4390 struct mlx5_priv *priv = dev->data->dev_private;
4391 struct rte_flow_query_count *qc = data;
4396 if (!priv->config.devx)
4397 return rte_flow_error_set(error, ENOTSUP,
4398 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4400 "counters are not supported");
4401 if (flow->counter) {
4402 err = mlx5_devx_cmd_flow_counter_query
4403 (flow->counter->dcs,
4404 qc->reset, &pkts, &bytes);
4406 return rte_flow_error_set
4408 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4410 "cannot read counters");
4413 qc->hits = pkts - flow->counter->hits;
4414 qc->bytes = bytes - flow->counter->bytes;
4416 flow->counter->hits = pkts;
4417 flow->counter->bytes = bytes;
4421 return rte_flow_error_set(error, EINVAL,
4422 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4424 "counters are not available");
4430 * @see rte_flow_query()
4434 flow_dv_query(struct rte_eth_dev *dev,
4435 struct rte_flow *flow __rte_unused,
4436 const struct rte_flow_action *actions __rte_unused,
4437 void *data __rte_unused,
4438 struct rte_flow_error *error __rte_unused)
4442 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4443 switch (actions->type) {
4444 case RTE_FLOW_ACTION_TYPE_VOID:
4446 case RTE_FLOW_ACTION_TYPE_COUNT:
4447 ret = flow_dv_query_count(dev, flow, data, error);
4450 return rte_flow_error_set(error, ENOTSUP,
4451 RTE_FLOW_ERROR_TYPE_ACTION,
4453 "action not supported");
4460 * Mutex-protected thunk to flow_dv_translate().
4463 flow_d_translate(struct rte_eth_dev *dev,
4464 struct mlx5_flow *dev_flow,
4465 const struct rte_flow_attr *attr,
4466 const struct rte_flow_item items[],
4467 const struct rte_flow_action actions[],
4468 struct rte_flow_error *error)
4472 flow_d_shared_lock(dev);
4473 ret = flow_dv_translate(dev, dev_flow, attr, items, actions, error);
4474 flow_d_shared_unlock(dev);
4479 * Mutex-protected thunk to flow_dv_apply().
4482 flow_d_apply(struct rte_eth_dev *dev,
4483 struct rte_flow *flow,
4484 struct rte_flow_error *error)
4488 flow_d_shared_lock(dev);
4489 ret = flow_dv_apply(dev, flow, error);
4490 flow_d_shared_unlock(dev);
4495 * Mutex-protected thunk to flow_dv_remove().
4498 flow_d_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
4500 flow_d_shared_lock(dev);
4501 flow_dv_remove(dev, flow);
4502 flow_d_shared_unlock(dev);
4506 * Mutex-protected thunk to flow_dv_destroy().
4509 flow_d_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
4511 flow_d_shared_lock(dev);
4512 flow_dv_destroy(dev, flow);
4513 flow_d_shared_unlock(dev);
4516 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
4517 .validate = flow_dv_validate,
4518 .prepare = flow_dv_prepare,
4519 .translate = flow_d_translate,
4520 .apply = flow_d_apply,
4521 .remove = flow_d_remove,
4522 .destroy = flow_d_destroy,
4523 .query = flow_dv_query,
4526 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */