1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #pragma GCC diagnostic ignored "-Wpedantic"
15 #include <infiniband/verbs.h>
17 #pragma GCC diagnostic error "-Wpedantic"
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_ethdev_driver.h>
24 #include <rte_flow_driver.h>
25 #include <rte_malloc.h>
30 #include "mlx5_defs.h"
31 #include "mlx5_glue.h"
32 #include "mlx5_flow.h"
34 #include "mlx5_rxtx.h"
36 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
38 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
39 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
42 #ifndef HAVE_MLX5DV_DR_ESWITCH
43 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
44 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
48 #ifndef HAVE_MLX5DV_DR
49 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
65 * Initialize flow attributes structure according to flow items' types.
68 * Pointer to item specification.
70 * Pointer to flow attributes structure.
73 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
75 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
77 case RTE_FLOW_ITEM_TYPE_IPV4:
80 case RTE_FLOW_ITEM_TYPE_IPV6:
83 case RTE_FLOW_ITEM_TYPE_UDP:
86 case RTE_FLOW_ITEM_TYPE_TCP:
96 struct field_modify_info {
97 uint32_t size; /* Size of field in protocol header, in bytes. */
98 uint32_t offset; /* Offset of field in protocol header, in bytes. */
99 enum mlx5_modification_field id;
102 struct field_modify_info modify_eth[] = {
103 {4, 0, MLX5_MODI_OUT_DMAC_47_16},
104 {2, 4, MLX5_MODI_OUT_DMAC_15_0},
105 {4, 6, MLX5_MODI_OUT_SMAC_47_16},
106 {2, 10, MLX5_MODI_OUT_SMAC_15_0},
110 struct field_modify_info modify_ipv4[] = {
111 {1, 8, MLX5_MODI_OUT_IPV4_TTL},
112 {4, 12, MLX5_MODI_OUT_SIPV4},
113 {4, 16, MLX5_MODI_OUT_DIPV4},
117 struct field_modify_info modify_ipv6[] = {
118 {1, 7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
119 {4, 8, MLX5_MODI_OUT_SIPV6_127_96},
120 {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
121 {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
122 {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
123 {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
124 {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
125 {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
126 {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
130 struct field_modify_info modify_udp[] = {
131 {2, 0, MLX5_MODI_OUT_UDP_SPORT},
132 {2, 2, MLX5_MODI_OUT_UDP_DPORT},
136 struct field_modify_info modify_tcp[] = {
137 {2, 0, MLX5_MODI_OUT_TCP_SPORT},
138 {2, 2, MLX5_MODI_OUT_TCP_DPORT},
139 {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
140 {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
145 * Acquire the synchronizing object to protect multithreaded access
146 * to shared dv context. Lock occurs only if context is actually
147 * shared, i.e. we have multiport IB device and representors are
151 * Pointer to the rte_eth_dev structure.
154 flow_d_shared_lock(struct rte_eth_dev *dev)
156 struct mlx5_priv *priv = dev->data->dev_private;
157 struct mlx5_ibv_shared *sh = priv->sh;
159 if (sh->dv_refcnt > 1) {
162 ret = pthread_mutex_lock(&sh->dv_mutex);
169 flow_d_shared_unlock(struct rte_eth_dev *dev)
171 struct mlx5_priv *priv = dev->data->dev_private;
172 struct mlx5_ibv_shared *sh = priv->sh;
174 if (sh->dv_refcnt > 1) {
177 ret = pthread_mutex_unlock(&sh->dv_mutex);
184 * Convert modify-header action to DV specification.
187 * Pointer to item specification.
189 * Pointer to field modification information.
190 * @param[in,out] resource
191 * Pointer to the modify-header resource.
193 * Type of modification.
195 * Pointer to the error structure.
198 * 0 on success, a negative errno value otherwise and rte_errno is set.
201 flow_dv_convert_modify_action(struct rte_flow_item *item,
202 struct field_modify_info *field,
203 struct mlx5_flow_dv_modify_hdr_resource *resource,
205 struct rte_flow_error *error)
207 uint32_t i = resource->actions_num;
208 struct mlx5_modification_cmd *actions = resource->actions;
209 const uint8_t *spec = item->spec;
210 const uint8_t *mask = item->mask;
213 while (field->size) {
215 /* Generate modify command for each mask segment. */
216 memcpy(&set, &mask[field->offset], field->size);
218 if (i >= MLX5_MODIFY_NUM)
219 return rte_flow_error_set(error, EINVAL,
220 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
221 "too many items to modify");
222 actions[i].action_type = type;
223 actions[i].field = field->id;
224 actions[i].length = field->size ==
225 4 ? 0 : field->size * 8;
226 rte_memcpy(&actions[i].data[4 - field->size],
227 &spec[field->offset], field->size);
228 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
231 if (resource->actions_num != i)
232 resource->actions_num = i;
235 if (!resource->actions_num)
236 return rte_flow_error_set(error, EINVAL,
237 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
238 "invalid modification flow item");
243 * Convert modify-header set IPv4 address action to DV specification.
245 * @param[in,out] resource
246 * Pointer to the modify-header resource.
248 * Pointer to action specification.
250 * Pointer to the error structure.
253 * 0 on success, a negative errno value otherwise and rte_errno is set.
256 flow_dv_convert_action_modify_ipv4
257 (struct mlx5_flow_dv_modify_hdr_resource *resource,
258 const struct rte_flow_action *action,
259 struct rte_flow_error *error)
261 const struct rte_flow_action_set_ipv4 *conf =
262 (const struct rte_flow_action_set_ipv4 *)(action->conf);
263 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
264 struct rte_flow_item_ipv4 ipv4;
265 struct rte_flow_item_ipv4 ipv4_mask;
267 memset(&ipv4, 0, sizeof(ipv4));
268 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
269 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
270 ipv4.hdr.src_addr = conf->ipv4_addr;
271 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
273 ipv4.hdr.dst_addr = conf->ipv4_addr;
274 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
277 item.mask = &ipv4_mask;
278 return flow_dv_convert_modify_action(&item, modify_ipv4, resource,
279 MLX5_MODIFICATION_TYPE_SET, error);
283 * Convert modify-header set IPv6 address action to DV specification.
285 * @param[in,out] resource
286 * Pointer to the modify-header resource.
288 * Pointer to action specification.
290 * Pointer to the error structure.
293 * 0 on success, a negative errno value otherwise and rte_errno is set.
296 flow_dv_convert_action_modify_ipv6
297 (struct mlx5_flow_dv_modify_hdr_resource *resource,
298 const struct rte_flow_action *action,
299 struct rte_flow_error *error)
301 const struct rte_flow_action_set_ipv6 *conf =
302 (const struct rte_flow_action_set_ipv6 *)(action->conf);
303 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
304 struct rte_flow_item_ipv6 ipv6;
305 struct rte_flow_item_ipv6 ipv6_mask;
307 memset(&ipv6, 0, sizeof(ipv6));
308 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
309 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
310 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
311 sizeof(ipv6.hdr.src_addr));
312 memcpy(&ipv6_mask.hdr.src_addr,
313 &rte_flow_item_ipv6_mask.hdr.src_addr,
314 sizeof(ipv6.hdr.src_addr));
316 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
317 sizeof(ipv6.hdr.dst_addr));
318 memcpy(&ipv6_mask.hdr.dst_addr,
319 &rte_flow_item_ipv6_mask.hdr.dst_addr,
320 sizeof(ipv6.hdr.dst_addr));
323 item.mask = &ipv6_mask;
324 return flow_dv_convert_modify_action(&item, modify_ipv6, resource,
325 MLX5_MODIFICATION_TYPE_SET, error);
329 * Convert modify-header set MAC address action to DV specification.
331 * @param[in,out] resource
332 * Pointer to the modify-header resource.
334 * Pointer to action specification.
336 * Pointer to the error structure.
339 * 0 on success, a negative errno value otherwise and rte_errno is set.
342 flow_dv_convert_action_modify_mac
343 (struct mlx5_flow_dv_modify_hdr_resource *resource,
344 const struct rte_flow_action *action,
345 struct rte_flow_error *error)
347 const struct rte_flow_action_set_mac *conf =
348 (const struct rte_flow_action_set_mac *)(action->conf);
349 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
350 struct rte_flow_item_eth eth;
351 struct rte_flow_item_eth eth_mask;
353 memset(ð, 0, sizeof(eth));
354 memset(ð_mask, 0, sizeof(eth_mask));
355 if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
356 memcpy(ð.src.addr_bytes, &conf->mac_addr,
357 sizeof(eth.src.addr_bytes));
358 memcpy(ð_mask.src.addr_bytes,
359 &rte_flow_item_eth_mask.src.addr_bytes,
360 sizeof(eth_mask.src.addr_bytes));
362 memcpy(ð.dst.addr_bytes, &conf->mac_addr,
363 sizeof(eth.dst.addr_bytes));
364 memcpy(ð_mask.dst.addr_bytes,
365 &rte_flow_item_eth_mask.dst.addr_bytes,
366 sizeof(eth_mask.dst.addr_bytes));
369 item.mask = ð_mask;
370 return flow_dv_convert_modify_action(&item, modify_eth, resource,
371 MLX5_MODIFICATION_TYPE_SET, error);
375 * Convert modify-header set TP action to DV specification.
377 * @param[in,out] resource
378 * Pointer to the modify-header resource.
380 * Pointer to action specification.
382 * Pointer to rte_flow_item objects list.
384 * Pointer to flow attributes structure.
386 * Pointer to the error structure.
389 * 0 on success, a negative errno value otherwise and rte_errno is set.
392 flow_dv_convert_action_modify_tp
393 (struct mlx5_flow_dv_modify_hdr_resource *resource,
394 const struct rte_flow_action *action,
395 const struct rte_flow_item *items,
396 union flow_dv_attr *attr,
397 struct rte_flow_error *error)
399 const struct rte_flow_action_set_tp *conf =
400 (const struct rte_flow_action_set_tp *)(action->conf);
401 struct rte_flow_item item;
402 struct rte_flow_item_udp udp;
403 struct rte_flow_item_udp udp_mask;
404 struct rte_flow_item_tcp tcp;
405 struct rte_flow_item_tcp tcp_mask;
406 struct field_modify_info *field;
409 flow_dv_attr_init(items, attr);
411 memset(&udp, 0, sizeof(udp));
412 memset(&udp_mask, 0, sizeof(udp_mask));
413 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
414 udp.hdr.src_port = conf->port;
415 udp_mask.hdr.src_port =
416 rte_flow_item_udp_mask.hdr.src_port;
418 udp.hdr.dst_port = conf->port;
419 udp_mask.hdr.dst_port =
420 rte_flow_item_udp_mask.hdr.dst_port;
422 item.type = RTE_FLOW_ITEM_TYPE_UDP;
424 item.mask = &udp_mask;
428 memset(&tcp, 0, sizeof(tcp));
429 memset(&tcp_mask, 0, sizeof(tcp_mask));
430 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
431 tcp.hdr.src_port = conf->port;
432 tcp_mask.hdr.src_port =
433 rte_flow_item_tcp_mask.hdr.src_port;
435 tcp.hdr.dst_port = conf->port;
436 tcp_mask.hdr.dst_port =
437 rte_flow_item_tcp_mask.hdr.dst_port;
439 item.type = RTE_FLOW_ITEM_TYPE_TCP;
441 item.mask = &tcp_mask;
444 return flow_dv_convert_modify_action(&item, field, resource,
445 MLX5_MODIFICATION_TYPE_SET, error);
449 * Convert modify-header set TTL action to DV specification.
451 * @param[in,out] resource
452 * Pointer to the modify-header resource.
454 * Pointer to action specification.
456 * Pointer to rte_flow_item objects list.
458 * Pointer to flow attributes structure.
460 * Pointer to the error structure.
463 * 0 on success, a negative errno value otherwise and rte_errno is set.
466 flow_dv_convert_action_modify_ttl
467 (struct mlx5_flow_dv_modify_hdr_resource *resource,
468 const struct rte_flow_action *action,
469 const struct rte_flow_item *items,
470 union flow_dv_attr *attr,
471 struct rte_flow_error *error)
473 const struct rte_flow_action_set_ttl *conf =
474 (const struct rte_flow_action_set_ttl *)(action->conf);
475 struct rte_flow_item item;
476 struct rte_flow_item_ipv4 ipv4;
477 struct rte_flow_item_ipv4 ipv4_mask;
478 struct rte_flow_item_ipv6 ipv6;
479 struct rte_flow_item_ipv6 ipv6_mask;
480 struct field_modify_info *field;
483 flow_dv_attr_init(items, attr);
485 memset(&ipv4, 0, sizeof(ipv4));
486 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
487 ipv4.hdr.time_to_live = conf->ttl_value;
488 ipv4_mask.hdr.time_to_live = 0xFF;
489 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
491 item.mask = &ipv4_mask;
495 memset(&ipv6, 0, sizeof(ipv6));
496 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
497 ipv6.hdr.hop_limits = conf->ttl_value;
498 ipv6_mask.hdr.hop_limits = 0xFF;
499 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
501 item.mask = &ipv6_mask;
504 return flow_dv_convert_modify_action(&item, field, resource,
505 MLX5_MODIFICATION_TYPE_SET, error);
509 * Convert modify-header decrement TTL action to DV specification.
511 * @param[in,out] resource
512 * Pointer to the modify-header resource.
514 * Pointer to action specification.
516 * Pointer to rte_flow_item objects list.
518 * Pointer to flow attributes structure.
520 * Pointer to the error structure.
523 * 0 on success, a negative errno value otherwise and rte_errno is set.
526 flow_dv_convert_action_modify_dec_ttl
527 (struct mlx5_flow_dv_modify_hdr_resource *resource,
528 const struct rte_flow_item *items,
529 union flow_dv_attr *attr,
530 struct rte_flow_error *error)
532 struct rte_flow_item item;
533 struct rte_flow_item_ipv4 ipv4;
534 struct rte_flow_item_ipv4 ipv4_mask;
535 struct rte_flow_item_ipv6 ipv6;
536 struct rte_flow_item_ipv6 ipv6_mask;
537 struct field_modify_info *field;
540 flow_dv_attr_init(items, attr);
542 memset(&ipv4, 0, sizeof(ipv4));
543 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
544 ipv4.hdr.time_to_live = 0xFF;
545 ipv4_mask.hdr.time_to_live = 0xFF;
546 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
548 item.mask = &ipv4_mask;
552 memset(&ipv6, 0, sizeof(ipv6));
553 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
554 ipv6.hdr.hop_limits = 0xFF;
555 ipv6_mask.hdr.hop_limits = 0xFF;
556 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
558 item.mask = &ipv6_mask;
561 return flow_dv_convert_modify_action(&item, field, resource,
562 MLX5_MODIFICATION_TYPE_ADD, error);
566 * Convert modify-header increment/decrement TCP Sequence number
567 * to DV specification.
569 * @param[in,out] resource
570 * Pointer to the modify-header resource.
572 * Pointer to action specification.
574 * Pointer to the error structure.
577 * 0 on success, a negative errno value otherwise and rte_errno is set.
580 flow_dv_convert_action_modify_tcp_seq
581 (struct mlx5_flow_dv_modify_hdr_resource *resource,
582 const struct rte_flow_action *action,
583 struct rte_flow_error *error)
585 const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
586 uint64_t value = rte_be_to_cpu_32(*conf);
587 struct rte_flow_item item;
588 struct rte_flow_item_tcp tcp;
589 struct rte_flow_item_tcp tcp_mask;
591 memset(&tcp, 0, sizeof(tcp));
592 memset(&tcp_mask, 0, sizeof(tcp_mask));
593 if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
595 * The HW has no decrement operation, only increment operation.
596 * To simulate decrement X from Y using increment operation
597 * we need to add UINT32_MAX X times to Y.
598 * Each adding of UINT32_MAX decrements Y by 1.
601 tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
602 tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
603 item.type = RTE_FLOW_ITEM_TYPE_TCP;
605 item.mask = &tcp_mask;
606 return flow_dv_convert_modify_action(&item, modify_tcp, resource,
607 MLX5_MODIFICATION_TYPE_ADD, error);
611 * Convert modify-header increment/decrement TCP Acknowledgment number
612 * to DV specification.
614 * @param[in,out] resource
615 * Pointer to the modify-header resource.
617 * Pointer to action specification.
619 * Pointer to the error structure.
622 * 0 on success, a negative errno value otherwise and rte_errno is set.
625 flow_dv_convert_action_modify_tcp_ack
626 (struct mlx5_flow_dv_modify_hdr_resource *resource,
627 const struct rte_flow_action *action,
628 struct rte_flow_error *error)
630 const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
631 uint64_t value = rte_be_to_cpu_32(*conf);
632 struct rte_flow_item item;
633 struct rte_flow_item_tcp tcp;
634 struct rte_flow_item_tcp tcp_mask;
636 memset(&tcp, 0, sizeof(tcp));
637 memset(&tcp_mask, 0, sizeof(tcp_mask));
638 if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
640 * The HW has no decrement operation, only increment operation.
641 * To simulate decrement X from Y using increment operation
642 * we need to add UINT32_MAX X times to Y.
643 * Each adding of UINT32_MAX decrements Y by 1.
646 tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
647 tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
648 item.type = RTE_FLOW_ITEM_TYPE_TCP;
650 item.mask = &tcp_mask;
651 return flow_dv_convert_modify_action(&item, modify_tcp, resource,
652 MLX5_MODIFICATION_TYPE_ADD, error);
656 * Validate META item.
659 * Pointer to the rte_eth_dev structure.
661 * Item specification.
663 * Attributes of flow that includes this item.
665 * Pointer to error structure.
668 * 0 on success, a negative errno value otherwise and rte_errno is set.
671 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
672 const struct rte_flow_item *item,
673 const struct rte_flow_attr *attr,
674 struct rte_flow_error *error)
676 const struct rte_flow_item_meta *spec = item->spec;
677 const struct rte_flow_item_meta *mask = item->mask;
678 const struct rte_flow_item_meta nic_mask = {
679 .data = RTE_BE32(UINT32_MAX)
682 uint64_t offloads = dev->data->dev_conf.txmode.offloads;
684 if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
685 return rte_flow_error_set(error, EPERM,
686 RTE_FLOW_ERROR_TYPE_ITEM,
688 "match on metadata offload "
689 "configuration is off for this port");
691 return rte_flow_error_set(error, EINVAL,
692 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
694 "data cannot be empty");
696 return rte_flow_error_set(error, EINVAL,
697 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
699 "data cannot be zero");
701 mask = &rte_flow_item_meta_mask;
702 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
703 (const uint8_t *)&nic_mask,
704 sizeof(struct rte_flow_item_meta),
709 return rte_flow_error_set(error, ENOTSUP,
710 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
712 "pattern not supported for ingress");
717 * Validate vport item.
720 * Pointer to the rte_eth_dev structure.
722 * Item specification.
724 * Attributes of flow that includes this item.
725 * @param[in] item_flags
726 * Bit-fields that holds the items detected until now.
728 * Pointer to error structure.
731 * 0 on success, a negative errno value otherwise and rte_errno is set.
734 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
735 const struct rte_flow_item *item,
736 const struct rte_flow_attr *attr,
738 struct rte_flow_error *error)
740 const struct rte_flow_item_port_id *spec = item->spec;
741 const struct rte_flow_item_port_id *mask = item->mask;
742 const struct rte_flow_item_port_id switch_mask = {
745 uint16_t esw_domain_id;
746 uint16_t item_port_esw_domain_id;
750 return rte_flow_error_set(error, EINVAL,
751 RTE_FLOW_ERROR_TYPE_ITEM,
753 "match on port id is valid only"
754 " when transfer flag is enabled");
755 if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
756 return rte_flow_error_set(error, ENOTSUP,
757 RTE_FLOW_ERROR_TYPE_ITEM, item,
758 "multiple source ports are not"
762 if (mask->id != 0xffffffff)
763 return rte_flow_error_set(error, ENOTSUP,
764 RTE_FLOW_ERROR_TYPE_ITEM_MASK,
766 "no support for partial mask on"
768 ret = mlx5_flow_item_acceptable
769 (item, (const uint8_t *)mask,
770 (const uint8_t *)&rte_flow_item_port_id_mask,
771 sizeof(struct rte_flow_item_port_id),
777 ret = mlx5_port_to_eswitch_info(spec->id, &item_port_esw_domain_id,
780 return rte_flow_error_set(error, -ret,
781 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
782 "failed to obtain E-Switch info for"
784 ret = mlx5_port_to_eswitch_info(dev->data->port_id,
785 &esw_domain_id, NULL);
787 return rte_flow_error_set(error, -ret,
788 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
790 "failed to obtain E-Switch info");
791 if (item_port_esw_domain_id != esw_domain_id)
792 return rte_flow_error_set(error, -ret,
793 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
794 "cannot match on a port from a"
795 " different E-Switch");
800 * Validate count action.
805 * Pointer to error structure.
808 * 0 on success, a negative errno value otherwise and rte_errno is set.
811 flow_dv_validate_action_count(struct rte_eth_dev *dev,
812 struct rte_flow_error *error)
814 struct mlx5_priv *priv = dev->data->dev_private;
816 if (!priv->config.devx)
818 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
822 return rte_flow_error_set
824 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
826 "count action not supported");
830 * Validate the L2 encap action.
832 * @param[in] action_flags
833 * Holds the actions detected until now.
835 * Pointer to the encap action.
837 * Pointer to flow attributes
839 * Pointer to error structure.
842 * 0 on success, a negative errno value otherwise and rte_errno is set.
845 flow_dv_validate_action_l2_encap(uint64_t action_flags,
846 const struct rte_flow_action *action,
847 const struct rte_flow_attr *attr,
848 struct rte_flow_error *error)
851 return rte_flow_error_set(error, EINVAL,
852 RTE_FLOW_ERROR_TYPE_ACTION, action,
853 "configuration cannot be null");
854 if (action_flags & MLX5_FLOW_ACTION_DROP)
855 return rte_flow_error_set(error, EINVAL,
856 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
857 "can't drop and encap in same flow");
858 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
859 return rte_flow_error_set(error, EINVAL,
860 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
861 "can only have a single encap or"
862 " decap action in a flow");
863 if (!attr->transfer && attr->ingress)
864 return rte_flow_error_set(error, ENOTSUP,
865 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
867 "encap action not supported for "
873 * Validate the L2 decap action.
875 * @param[in] action_flags
876 * Holds the actions detected until now.
878 * Pointer to flow attributes
880 * Pointer to error structure.
883 * 0 on success, a negative errno value otherwise and rte_errno is set.
886 flow_dv_validate_action_l2_decap(uint64_t action_flags,
887 const struct rte_flow_attr *attr,
888 struct rte_flow_error *error)
890 if (action_flags & MLX5_FLOW_ACTION_DROP)
891 return rte_flow_error_set(error, EINVAL,
892 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
893 "can't drop and decap in same flow");
894 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
895 return rte_flow_error_set(error, EINVAL,
896 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
897 "can only have a single encap or"
898 " decap action in a flow");
899 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
900 return rte_flow_error_set(error, EINVAL,
901 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
902 "can't have decap action after"
905 return rte_flow_error_set(error, ENOTSUP,
906 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
908 "decap action not supported for "
914 * Validate the raw encap action.
916 * @param[in] action_flags
917 * Holds the actions detected until now.
919 * Pointer to the encap action.
921 * Pointer to flow attributes
923 * Pointer to error structure.
926 * 0 on success, a negative errno value otherwise and rte_errno is set.
929 flow_dv_validate_action_raw_encap(uint64_t action_flags,
930 const struct rte_flow_action *action,
931 const struct rte_flow_attr *attr,
932 struct rte_flow_error *error)
935 return rte_flow_error_set(error, EINVAL,
936 RTE_FLOW_ERROR_TYPE_ACTION, action,
937 "configuration cannot be null");
938 if (action_flags & MLX5_FLOW_ACTION_DROP)
939 return rte_flow_error_set(error, EINVAL,
940 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
941 "can't drop and encap in same flow");
942 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
943 return rte_flow_error_set(error, EINVAL,
944 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
945 "can only have a single encap"
946 " action in a flow");
947 /* encap without preceding decap is not supported for ingress */
948 if (!attr->transfer && attr->ingress &&
949 !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
950 return rte_flow_error_set(error, ENOTSUP,
951 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
953 "encap action not supported for "
959 * Validate the raw decap action.
961 * @param[in] action_flags
962 * Holds the actions detected until now.
964 * Pointer to the encap action.
966 * Pointer to flow attributes
968 * Pointer to error structure.
971 * 0 on success, a negative errno value otherwise and rte_errno is set.
974 flow_dv_validate_action_raw_decap(uint64_t action_flags,
975 const struct rte_flow_action *action,
976 const struct rte_flow_attr *attr,
977 struct rte_flow_error *error)
979 if (action_flags & MLX5_FLOW_ACTION_DROP)
980 return rte_flow_error_set(error, EINVAL,
981 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
982 "can't drop and decap in same flow");
983 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
984 return rte_flow_error_set(error, EINVAL,
985 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
986 "can't have encap action before"
988 if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
989 return rte_flow_error_set(error, EINVAL,
990 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
991 "can only have a single decap"
992 " action in a flow");
993 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
994 return rte_flow_error_set(error, EINVAL,
995 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
996 "can't have decap action after"
998 /* decap action is valid on egress only if it is followed by encap */
1000 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
1001 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
1004 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
1005 return rte_flow_error_set
1007 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1008 NULL, "decap action not supported"
1015 * Find existing encap/decap resource or create and register a new one.
1017 * @param dev[in, out]
1018 * Pointer to rte_eth_dev structure.
1019 * @param[in, out] resource
1020 * Pointer to encap/decap resource.
1021 * @parm[in, out] dev_flow
1022 * Pointer to the dev_flow.
1024 * pointer to error structure.
1027 * 0 on success otherwise -errno and errno is set.
1030 flow_dv_encap_decap_resource_register
1031 (struct rte_eth_dev *dev,
1032 struct mlx5_flow_dv_encap_decap_resource *resource,
1033 struct mlx5_flow *dev_flow,
1034 struct rte_flow_error *error)
1036 struct mlx5_priv *priv = dev->data->dev_private;
1037 struct mlx5_ibv_shared *sh = priv->sh;
1038 struct mlx5_flow_dv_encap_decap_resource *cache_resource;
1039 struct rte_flow *flow = dev_flow->flow;
1040 struct mlx5dv_dr_domain *domain;
1042 resource->flags = flow->group ? 0 : 1;
1043 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
1044 domain = sh->fdb_domain;
1045 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
1046 domain = sh->rx_domain;
1048 domain = sh->tx_domain;
1050 /* Lookup a matching resource from cache. */
1051 LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
1052 if (resource->reformat_type == cache_resource->reformat_type &&
1053 resource->ft_type == cache_resource->ft_type &&
1054 resource->flags == cache_resource->flags &&
1055 resource->size == cache_resource->size &&
1056 !memcmp((const void *)resource->buf,
1057 (const void *)cache_resource->buf,
1059 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
1060 (void *)cache_resource,
1061 rte_atomic32_read(&cache_resource->refcnt));
1062 rte_atomic32_inc(&cache_resource->refcnt);
1063 dev_flow->dv.encap_decap = cache_resource;
1067 /* Register new encap/decap resource. */
1068 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1069 if (!cache_resource)
1070 return rte_flow_error_set(error, ENOMEM,
1071 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1072 "cannot allocate resource memory");
1073 *cache_resource = *resource;
1074 cache_resource->verbs_action =
1075 mlx5_glue->dv_create_flow_action_packet_reformat
1076 (sh->ctx, cache_resource->reformat_type,
1077 cache_resource->ft_type, domain, cache_resource->flags,
1078 cache_resource->size,
1079 (cache_resource->size ? cache_resource->buf : NULL));
1080 if (!cache_resource->verbs_action) {
1081 rte_free(cache_resource);
1082 return rte_flow_error_set(error, ENOMEM,
1083 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1084 NULL, "cannot create action");
1086 rte_atomic32_init(&cache_resource->refcnt);
1087 rte_atomic32_inc(&cache_resource->refcnt);
1088 LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
1089 dev_flow->dv.encap_decap = cache_resource;
1090 DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
1091 (void *)cache_resource,
1092 rte_atomic32_read(&cache_resource->refcnt));
1097 * Find existing table jump resource or create and register a new one.
1099 * @param dev[in, out]
1100 * Pointer to rte_eth_dev structure.
1101 * @param[in, out] resource
1102 * Pointer to jump table resource.
1103 * @parm[in, out] dev_flow
1104 * Pointer to the dev_flow.
1106 * pointer to error structure.
1109 * 0 on success otherwise -errno and errno is set.
1112 flow_dv_jump_tbl_resource_register
1113 (struct rte_eth_dev *dev,
1114 struct mlx5_flow_dv_jump_tbl_resource *resource,
1115 struct mlx5_flow *dev_flow,
1116 struct rte_flow_error *error)
1118 struct mlx5_priv *priv = dev->data->dev_private;
1119 struct mlx5_ibv_shared *sh = priv->sh;
1120 struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
1122 /* Lookup a matching resource from cache. */
1123 LIST_FOREACH(cache_resource, &sh->jump_tbl, next) {
1124 if (resource->tbl == cache_resource->tbl) {
1125 DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++",
1126 (void *)cache_resource,
1127 rte_atomic32_read(&cache_resource->refcnt));
1128 rte_atomic32_inc(&cache_resource->refcnt);
1129 dev_flow->dv.jump = cache_resource;
1133 /* Register new jump table resource. */
1134 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1135 if (!cache_resource)
1136 return rte_flow_error_set(error, ENOMEM,
1137 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1138 "cannot allocate resource memory");
1139 *cache_resource = *resource;
1140 cache_resource->action =
1141 mlx5_glue->dr_create_flow_action_dest_flow_tbl
1142 (resource->tbl->obj);
1143 if (!cache_resource->action) {
1144 rte_free(cache_resource);
1145 return rte_flow_error_set(error, ENOMEM,
1146 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1147 NULL, "cannot create action");
1149 rte_atomic32_init(&cache_resource->refcnt);
1150 rte_atomic32_inc(&cache_resource->refcnt);
1151 LIST_INSERT_HEAD(&sh->jump_tbl, cache_resource, next);
1152 dev_flow->dv.jump = cache_resource;
1153 DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++",
1154 (void *)cache_resource,
1155 rte_atomic32_read(&cache_resource->refcnt));
1160 * Find existing table port ID resource or create and register a new one.
1162 * @param dev[in, out]
1163 * Pointer to rte_eth_dev structure.
1164 * @param[in, out] resource
1165 * Pointer to port ID action resource.
1166 * @parm[in, out] dev_flow
1167 * Pointer to the dev_flow.
1169 * pointer to error structure.
1172 * 0 on success otherwise -errno and errno is set.
1175 flow_dv_port_id_action_resource_register
1176 (struct rte_eth_dev *dev,
1177 struct mlx5_flow_dv_port_id_action_resource *resource,
1178 struct mlx5_flow *dev_flow,
1179 struct rte_flow_error *error)
1181 struct mlx5_priv *priv = dev->data->dev_private;
1182 struct mlx5_ibv_shared *sh = priv->sh;
1183 struct mlx5_flow_dv_port_id_action_resource *cache_resource;
1185 /* Lookup a matching resource from cache. */
1186 LIST_FOREACH(cache_resource, &sh->port_id_action_list, next) {
1187 if (resource->port_id == cache_resource->port_id) {
1188 DRV_LOG(DEBUG, "port id action resource resource %p: "
1190 (void *)cache_resource,
1191 rte_atomic32_read(&cache_resource->refcnt));
1192 rte_atomic32_inc(&cache_resource->refcnt);
1193 dev_flow->dv.port_id_action = cache_resource;
1197 /* Register new port id action resource. */
1198 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1199 if (!cache_resource)
1200 return rte_flow_error_set(error, ENOMEM,
1201 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1202 "cannot allocate resource memory");
1203 *cache_resource = *resource;
1204 cache_resource->action =
1205 mlx5_glue->dr_create_flow_action_dest_vport
1206 (priv->sh->fdb_domain, resource->port_id);
1207 if (!cache_resource->action) {
1208 rte_free(cache_resource);
1209 return rte_flow_error_set(error, ENOMEM,
1210 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1211 NULL, "cannot create action");
1213 rte_atomic32_init(&cache_resource->refcnt);
1214 rte_atomic32_inc(&cache_resource->refcnt);
1215 LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
1216 dev_flow->dv.port_id_action = cache_resource;
1217 DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
1218 (void *)cache_resource,
1219 rte_atomic32_read(&cache_resource->refcnt));
1224 * Get the size of specific rte_flow_item_type
1226 * @param[in] item_type
1227 * Tested rte_flow_item_type.
1230 * sizeof struct item_type, 0 if void or irrelevant.
1233 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
1237 switch (item_type) {
1238 case RTE_FLOW_ITEM_TYPE_ETH:
1239 retval = sizeof(struct rte_flow_item_eth);
1241 case RTE_FLOW_ITEM_TYPE_VLAN:
1242 retval = sizeof(struct rte_flow_item_vlan);
1244 case RTE_FLOW_ITEM_TYPE_IPV4:
1245 retval = sizeof(struct rte_flow_item_ipv4);
1247 case RTE_FLOW_ITEM_TYPE_IPV6:
1248 retval = sizeof(struct rte_flow_item_ipv6);
1250 case RTE_FLOW_ITEM_TYPE_UDP:
1251 retval = sizeof(struct rte_flow_item_udp);
1253 case RTE_FLOW_ITEM_TYPE_TCP:
1254 retval = sizeof(struct rte_flow_item_tcp);
1256 case RTE_FLOW_ITEM_TYPE_VXLAN:
1257 retval = sizeof(struct rte_flow_item_vxlan);
1259 case RTE_FLOW_ITEM_TYPE_GRE:
1260 retval = sizeof(struct rte_flow_item_gre);
1262 case RTE_FLOW_ITEM_TYPE_NVGRE:
1263 retval = sizeof(struct rte_flow_item_nvgre);
1265 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1266 retval = sizeof(struct rte_flow_item_vxlan_gpe);
1268 case RTE_FLOW_ITEM_TYPE_MPLS:
1269 retval = sizeof(struct rte_flow_item_mpls);
1271 case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
1279 #define MLX5_ENCAP_IPV4_VERSION 0x40
1280 #define MLX5_ENCAP_IPV4_IHL_MIN 0x05
1281 #define MLX5_ENCAP_IPV4_TTL_DEF 0x40
1282 #define MLX5_ENCAP_IPV6_VTC_FLOW 0x60000000
1283 #define MLX5_ENCAP_IPV6_HOP_LIMIT 0xff
1284 #define MLX5_ENCAP_VXLAN_FLAGS 0x08000000
1285 #define MLX5_ENCAP_VXLAN_GPE_FLAGS 0x04
1288 * Convert the encap action data from list of rte_flow_item to raw buffer
1291 * Pointer to rte_flow_item objects list.
1293 * Pointer to the output buffer.
1295 * Pointer to the output buffer size.
1297 * Pointer to the error structure.
1300 * 0 on success, a negative errno value otherwise and rte_errno is set.
1303 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
1304 size_t *size, struct rte_flow_error *error)
1306 struct rte_ether_hdr *eth = NULL;
1307 struct rte_vlan_hdr *vlan = NULL;
1308 struct rte_ipv4_hdr *ipv4 = NULL;
1309 struct rte_ipv6_hdr *ipv6 = NULL;
1310 struct rte_udp_hdr *udp = NULL;
1311 struct rte_vxlan_hdr *vxlan = NULL;
1312 struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
1313 struct rte_gre_hdr *gre = NULL;
1315 size_t temp_size = 0;
1318 return rte_flow_error_set(error, EINVAL,
1319 RTE_FLOW_ERROR_TYPE_ACTION,
1320 NULL, "invalid empty data");
1321 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1322 len = flow_dv_get_item_len(items->type);
1323 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
1324 return rte_flow_error_set(error, EINVAL,
1325 RTE_FLOW_ERROR_TYPE_ACTION,
1326 (void *)items->type,
1327 "items total size is too big"
1328 " for encap action");
1329 rte_memcpy((void *)&buf[temp_size], items->spec, len);
1330 switch (items->type) {
1331 case RTE_FLOW_ITEM_TYPE_ETH:
1332 eth = (struct rte_ether_hdr *)&buf[temp_size];
1334 case RTE_FLOW_ITEM_TYPE_VLAN:
1335 vlan = (struct rte_vlan_hdr *)&buf[temp_size];
1337 return rte_flow_error_set(error, EINVAL,
1338 RTE_FLOW_ERROR_TYPE_ACTION,
1339 (void *)items->type,
1340 "eth header not found");
1341 if (!eth->ether_type)
1342 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
1344 case RTE_FLOW_ITEM_TYPE_IPV4:
1345 ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
1347 return rte_flow_error_set(error, EINVAL,
1348 RTE_FLOW_ERROR_TYPE_ACTION,
1349 (void *)items->type,
1350 "neither eth nor vlan"
1352 if (vlan && !vlan->eth_proto)
1353 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1354 else if (eth && !eth->ether_type)
1355 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1356 if (!ipv4->version_ihl)
1357 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
1358 MLX5_ENCAP_IPV4_IHL_MIN;
1359 if (!ipv4->time_to_live)
1360 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
1362 case RTE_FLOW_ITEM_TYPE_IPV6:
1363 ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
1365 return rte_flow_error_set(error, EINVAL,
1366 RTE_FLOW_ERROR_TYPE_ACTION,
1367 (void *)items->type,
1368 "neither eth nor vlan"
1370 if (vlan && !vlan->eth_proto)
1371 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1372 else if (eth && !eth->ether_type)
1373 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1374 if (!ipv6->vtc_flow)
1376 RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
1377 if (!ipv6->hop_limits)
1378 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
1380 case RTE_FLOW_ITEM_TYPE_UDP:
1381 udp = (struct rte_udp_hdr *)&buf[temp_size];
1383 return rte_flow_error_set(error, EINVAL,
1384 RTE_FLOW_ERROR_TYPE_ACTION,
1385 (void *)items->type,
1386 "ip header not found");
1387 if (ipv4 && !ipv4->next_proto_id)
1388 ipv4->next_proto_id = IPPROTO_UDP;
1389 else if (ipv6 && !ipv6->proto)
1390 ipv6->proto = IPPROTO_UDP;
1392 case RTE_FLOW_ITEM_TYPE_VXLAN:
1393 vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
1395 return rte_flow_error_set(error, EINVAL,
1396 RTE_FLOW_ERROR_TYPE_ACTION,
1397 (void *)items->type,
1398 "udp header not found");
1400 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
1401 if (!vxlan->vx_flags)
1403 RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
1405 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1406 vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
1408 return rte_flow_error_set(error, EINVAL,
1409 RTE_FLOW_ERROR_TYPE_ACTION,
1410 (void *)items->type,
1411 "udp header not found");
1412 if (!vxlan_gpe->proto)
1413 return rte_flow_error_set(error, EINVAL,
1414 RTE_FLOW_ERROR_TYPE_ACTION,
1415 (void *)items->type,
1416 "next protocol not found");
1419 RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
1420 if (!vxlan_gpe->vx_flags)
1421 vxlan_gpe->vx_flags =
1422 MLX5_ENCAP_VXLAN_GPE_FLAGS;
1424 case RTE_FLOW_ITEM_TYPE_GRE:
1425 case RTE_FLOW_ITEM_TYPE_NVGRE:
1426 gre = (struct rte_gre_hdr *)&buf[temp_size];
1428 return rte_flow_error_set(error, EINVAL,
1429 RTE_FLOW_ERROR_TYPE_ACTION,
1430 (void *)items->type,
1431 "next protocol not found");
1433 return rte_flow_error_set(error, EINVAL,
1434 RTE_FLOW_ERROR_TYPE_ACTION,
1435 (void *)items->type,
1436 "ip header not found");
1437 if (ipv4 && !ipv4->next_proto_id)
1438 ipv4->next_proto_id = IPPROTO_GRE;
1439 else if (ipv6 && !ipv6->proto)
1440 ipv6->proto = IPPROTO_GRE;
1442 case RTE_FLOW_ITEM_TYPE_VOID:
1445 return rte_flow_error_set(error, EINVAL,
1446 RTE_FLOW_ERROR_TYPE_ACTION,
1447 (void *)items->type,
1448 "unsupported item type");
1458 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
1460 struct rte_ether_hdr *eth = NULL;
1461 struct rte_vlan_hdr *vlan = NULL;
1462 struct rte_ipv6_hdr *ipv6 = NULL;
1463 struct rte_udp_hdr *udp = NULL;
1467 eth = (struct rte_ether_hdr *)data;
1468 next_hdr = (char *)(eth + 1);
1469 proto = RTE_BE16(eth->ether_type);
1472 while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
1473 next_hdr += sizeof(struct rte_vlan_hdr);
1474 vlan = (struct rte_vlan_hdr *)next_hdr;
1475 proto = RTE_BE16(vlan->eth_proto);
1478 /* HW calculates IPv4 csum. no need to proceed */
1479 if (proto == RTE_ETHER_TYPE_IPV4)
1482 /* non IPv4/IPv6 header. not supported */
1483 if (proto != RTE_ETHER_TYPE_IPV6) {
1484 return rte_flow_error_set(error, ENOTSUP,
1485 RTE_FLOW_ERROR_TYPE_ACTION,
1486 NULL, "Cannot offload non IPv4/IPv6");
1489 ipv6 = (struct rte_ipv6_hdr *)next_hdr;
1491 /* ignore non UDP */
1492 if (ipv6->proto != IPPROTO_UDP)
1495 udp = (struct rte_udp_hdr *)(ipv6 + 1);
1496 udp->dgram_cksum = 0;
1502 * Convert L2 encap action to DV specification.
1505 * Pointer to rte_eth_dev structure.
1507 * Pointer to action structure.
1508 * @param[in, out] dev_flow
1509 * Pointer to the mlx5_flow.
1510 * @param[in] transfer
1511 * Mark if the flow is E-Switch flow.
1513 * Pointer to the error structure.
1516 * 0 on success, a negative errno value otherwise and rte_errno is set.
1519 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
1520 const struct rte_flow_action *action,
1521 struct mlx5_flow *dev_flow,
1523 struct rte_flow_error *error)
1525 const struct rte_flow_item *encap_data;
1526 const struct rte_flow_action_raw_encap *raw_encap_data;
1527 struct mlx5_flow_dv_encap_decap_resource res = {
1529 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
1530 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
1531 MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
1534 if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
1536 (const struct rte_flow_action_raw_encap *)action->conf;
1537 res.size = raw_encap_data->size;
1538 memcpy(res.buf, raw_encap_data->data, res.size);
1539 if (flow_dv_zero_encap_udp_csum(res.buf, error))
1542 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
1544 ((const struct rte_flow_action_vxlan_encap *)
1545 action->conf)->definition;
1548 ((const struct rte_flow_action_nvgre_encap *)
1549 action->conf)->definition;
1550 if (flow_dv_convert_encap_data(encap_data, res.buf,
1554 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1555 return rte_flow_error_set(error, EINVAL,
1556 RTE_FLOW_ERROR_TYPE_ACTION,
1557 NULL, "can't create L2 encap action");
1562 * Convert L2 decap action to DV specification.
1565 * Pointer to rte_eth_dev structure.
1566 * @param[in, out] dev_flow
1567 * Pointer to the mlx5_flow.
1568 * @param[in] transfer
1569 * Mark if the flow is E-Switch flow.
1571 * Pointer to the error structure.
1574 * 0 on success, a negative errno value otherwise and rte_errno is set.
1577 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
1578 struct mlx5_flow *dev_flow,
1580 struct rte_flow_error *error)
1582 struct mlx5_flow_dv_encap_decap_resource res = {
1585 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
1586 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
1587 MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
1590 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1591 return rte_flow_error_set(error, EINVAL,
1592 RTE_FLOW_ERROR_TYPE_ACTION,
1593 NULL, "can't create L2 decap action");
1598 * Convert raw decap/encap (L3 tunnel) action to DV specification.
1601 * Pointer to rte_eth_dev structure.
1603 * Pointer to action structure.
1604 * @param[in, out] dev_flow
1605 * Pointer to the mlx5_flow.
1607 * Pointer to the flow attributes.
1609 * Pointer to the error structure.
1612 * 0 on success, a negative errno value otherwise and rte_errno is set.
1615 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
1616 const struct rte_flow_action *action,
1617 struct mlx5_flow *dev_flow,
1618 const struct rte_flow_attr *attr,
1619 struct rte_flow_error *error)
1621 const struct rte_flow_action_raw_encap *encap_data;
1622 struct mlx5_flow_dv_encap_decap_resource res;
1624 encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
1625 res.size = encap_data->size;
1626 memcpy(res.buf, encap_data->data, res.size);
1627 res.reformat_type = attr->egress ?
1628 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
1629 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
1631 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
1633 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
1634 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
1635 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1636 return rte_flow_error_set(error, EINVAL,
1637 RTE_FLOW_ERROR_TYPE_ACTION,
1638 NULL, "can't create encap action");
1643 * Validate the modify-header actions.
1645 * @param[in] action_flags
1646 * Holds the actions detected until now.
1648 * Pointer to the modify action.
1650 * Pointer to error structure.
1653 * 0 on success, a negative errno value otherwise and rte_errno is set.
1656 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
1657 const struct rte_flow_action *action,
1658 struct rte_flow_error *error)
1660 if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
1661 return rte_flow_error_set(error, EINVAL,
1662 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1663 NULL, "action configuration not set");
1664 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1665 return rte_flow_error_set(error, EINVAL,
1666 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1667 "can't have encap action before"
1673 * Validate the modify-header MAC address actions.
1675 * @param[in] action_flags
1676 * Holds the actions detected until now.
1678 * Pointer to the modify action.
1679 * @param[in] item_flags
1680 * Holds the items detected.
1682 * Pointer to error structure.
1685 * 0 on success, a negative errno value otherwise and rte_errno is set.
1688 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
1689 const struct rte_flow_action *action,
1690 const uint64_t item_flags,
1691 struct rte_flow_error *error)
1695 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1697 if (!(item_flags & MLX5_FLOW_LAYER_L2))
1698 return rte_flow_error_set(error, EINVAL,
1699 RTE_FLOW_ERROR_TYPE_ACTION,
1701 "no L2 item in pattern");
1707 * Validate the modify-header IPv4 address actions.
1709 * @param[in] action_flags
1710 * Holds the actions detected until now.
1712 * Pointer to the modify action.
1713 * @param[in] item_flags
1714 * Holds the items detected.
1716 * Pointer to error structure.
1719 * 0 on success, a negative errno value otherwise and rte_errno is set.
1722 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
1723 const struct rte_flow_action *action,
1724 const uint64_t item_flags,
1725 struct rte_flow_error *error)
1729 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1731 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
1732 return rte_flow_error_set(error, EINVAL,
1733 RTE_FLOW_ERROR_TYPE_ACTION,
1735 "no ipv4 item in pattern");
1741 * Validate the modify-header IPv6 address actions.
1743 * @param[in] action_flags
1744 * Holds the actions detected until now.
1746 * Pointer to the modify action.
1747 * @param[in] item_flags
1748 * Holds the items detected.
1750 * Pointer to error structure.
1753 * 0 on success, a negative errno value otherwise and rte_errno is set.
1756 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
1757 const struct rte_flow_action *action,
1758 const uint64_t item_flags,
1759 struct rte_flow_error *error)
1763 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1765 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
1766 return rte_flow_error_set(error, EINVAL,
1767 RTE_FLOW_ERROR_TYPE_ACTION,
1769 "no ipv6 item in pattern");
1775 * Validate the modify-header TP actions.
1777 * @param[in] action_flags
1778 * Holds the actions detected until now.
1780 * Pointer to the modify action.
1781 * @param[in] item_flags
1782 * Holds the items detected.
1784 * Pointer to error structure.
1787 * 0 on success, a negative errno value otherwise and rte_errno is set.
1790 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
1791 const struct rte_flow_action *action,
1792 const uint64_t item_flags,
1793 struct rte_flow_error *error)
1797 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1799 if (!(item_flags & MLX5_FLOW_LAYER_L4))
1800 return rte_flow_error_set(error, EINVAL,
1801 RTE_FLOW_ERROR_TYPE_ACTION,
1802 NULL, "no transport layer "
1809 * Validate the modify-header actions of increment/decrement
1810 * TCP Sequence-number.
1812 * @param[in] action_flags
1813 * Holds the actions detected until now.
1815 * Pointer to the modify action.
1816 * @param[in] item_flags
1817 * Holds the items detected.
1819 * Pointer to error structure.
1822 * 0 on success, a negative errno value otherwise and rte_errno is set.
1825 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
1826 const struct rte_flow_action *action,
1827 const uint64_t item_flags,
1828 struct rte_flow_error *error)
1832 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1834 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
1835 return rte_flow_error_set(error, EINVAL,
1836 RTE_FLOW_ERROR_TYPE_ACTION,
1837 NULL, "no TCP item in"
1839 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
1840 (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
1841 (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
1842 (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
1843 return rte_flow_error_set(error, EINVAL,
1844 RTE_FLOW_ERROR_TYPE_ACTION,
1846 "cannot decrease and increase"
1847 " TCP sequence number"
1848 " at the same time");
1854 * Validate the modify-header actions of increment/decrement
1855 * TCP Acknowledgment number.
1857 * @param[in] action_flags
1858 * Holds the actions detected until now.
1860 * Pointer to the modify action.
1861 * @param[in] item_flags
1862 * Holds the items detected.
1864 * Pointer to error structure.
1867 * 0 on success, a negative errno value otherwise and rte_errno is set.
1870 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
1871 const struct rte_flow_action *action,
1872 const uint64_t item_flags,
1873 struct rte_flow_error *error)
1877 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1879 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
1880 return rte_flow_error_set(error, EINVAL,
1881 RTE_FLOW_ERROR_TYPE_ACTION,
1882 NULL, "no TCP item in"
1884 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
1885 (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
1886 (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
1887 (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
1888 return rte_flow_error_set(error, EINVAL,
1889 RTE_FLOW_ERROR_TYPE_ACTION,
1891 "cannot decrease and increase"
1892 " TCP acknowledgment number"
1893 " at the same time");
1899 * Validate the modify-header TTL actions.
1901 * @param[in] action_flags
1902 * Holds the actions detected until now.
1904 * Pointer to the modify action.
1905 * @param[in] item_flags
1906 * Holds the items detected.
1908 * Pointer to error structure.
1911 * 0 on success, a negative errno value otherwise and rte_errno is set.
1914 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
1915 const struct rte_flow_action *action,
1916 const uint64_t item_flags,
1917 struct rte_flow_error *error)
1921 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1923 if (!(item_flags & MLX5_FLOW_LAYER_L3))
1924 return rte_flow_error_set(error, EINVAL,
1925 RTE_FLOW_ERROR_TYPE_ACTION,
1927 "no IP protocol in pattern");
1933 * Validate jump action.
1936 * Pointer to the modify action.
1938 * The group of the current flow.
1940 * Pointer to error structure.
1943 * 0 on success, a negative errno value otherwise and rte_errno is set.
1946 flow_dv_validate_action_jump(const struct rte_flow_action *action,
1948 struct rte_flow_error *error)
1950 if (action->type != RTE_FLOW_ACTION_TYPE_JUMP && !action->conf)
1951 return rte_flow_error_set(error, EINVAL,
1952 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1953 NULL, "action configuration not set");
1954 if (group >= ((const struct rte_flow_action_jump *)action->conf)->group)
1955 return rte_flow_error_set(error, EINVAL,
1956 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1957 "target group must be higher then"
1958 " the current flow group");
1963 * Validate the port_id action.
1966 * Pointer to rte_eth_dev structure.
1967 * @param[in] action_flags
1968 * Bit-fields that holds the actions detected until now.
1970 * Port_id RTE action structure.
1972 * Attributes of flow that includes this action.
1974 * Pointer to error structure.
1977 * 0 on success, a negative errno value otherwise and rte_errno is set.
1980 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
1981 uint64_t action_flags,
1982 const struct rte_flow_action *action,
1983 const struct rte_flow_attr *attr,
1984 struct rte_flow_error *error)
1986 const struct rte_flow_action_port_id *port_id;
1988 uint16_t esw_domain_id;
1989 uint16_t act_port_domain_id;
1992 if (!attr->transfer)
1993 return rte_flow_error_set(error, ENOTSUP,
1994 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1996 "port id action is valid in transfer"
1998 if (!action || !action->conf)
1999 return rte_flow_error_set(error, ENOTSUP,
2000 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2002 "port id action parameters must be"
2004 if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
2005 MLX5_FLOW_FATE_ESWITCH_ACTIONS))
2006 return rte_flow_error_set(error, EINVAL,
2007 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2008 "can have only one fate actions in"
2010 ret = mlx5_port_to_eswitch_info(dev->data->port_id,
2011 &esw_domain_id, NULL);
2013 return rte_flow_error_set(error, -ret,
2014 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2016 "failed to obtain E-Switch info");
2017 port_id = action->conf;
2018 port = port_id->original ? dev->data->port_id : port_id->id;
2019 ret = mlx5_port_to_eswitch_info(port, &act_port_domain_id, NULL);
2021 return rte_flow_error_set
2023 RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
2024 "failed to obtain E-Switch port id for port");
2025 if (act_port_domain_id != esw_domain_id)
2026 return rte_flow_error_set
2028 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2029 "port does not belong to"
2030 " E-Switch being configured");
2035 * Find existing modify-header resource or create and register a new one.
2037 * @param dev[in, out]
2038 * Pointer to rte_eth_dev structure.
2039 * @param[in, out] resource
2040 * Pointer to modify-header resource.
2041 * @parm[in, out] dev_flow
2042 * Pointer to the dev_flow.
2044 * pointer to error structure.
2047 * 0 on success otherwise -errno and errno is set.
2050 flow_dv_modify_hdr_resource_register
2051 (struct rte_eth_dev *dev,
2052 struct mlx5_flow_dv_modify_hdr_resource *resource,
2053 struct mlx5_flow *dev_flow,
2054 struct rte_flow_error *error)
2056 struct mlx5_priv *priv = dev->data->dev_private;
2057 struct mlx5_ibv_shared *sh = priv->sh;
2058 struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
2059 struct mlx5dv_dr_domain *ns;
2061 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2062 ns = sh->fdb_domain;
2063 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
2068 dev_flow->flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
2069 /* Lookup a matching resource from cache. */
2070 LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
2071 if (resource->ft_type == cache_resource->ft_type &&
2072 resource->actions_num == cache_resource->actions_num &&
2073 resource->flags == cache_resource->flags &&
2074 !memcmp((const void *)resource->actions,
2075 (const void *)cache_resource->actions,
2076 (resource->actions_num *
2077 sizeof(resource->actions[0])))) {
2078 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
2079 (void *)cache_resource,
2080 rte_atomic32_read(&cache_resource->refcnt));
2081 rte_atomic32_inc(&cache_resource->refcnt);
2082 dev_flow->dv.modify_hdr = cache_resource;
2086 /* Register new modify-header resource. */
2087 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
2088 if (!cache_resource)
2089 return rte_flow_error_set(error, ENOMEM,
2090 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2091 "cannot allocate resource memory");
2092 *cache_resource = *resource;
2093 cache_resource->verbs_action =
2094 mlx5_glue->dv_create_flow_action_modify_header
2095 (sh->ctx, cache_resource->ft_type,
2096 ns, cache_resource->flags,
2097 cache_resource->actions_num *
2098 sizeof(cache_resource->actions[0]),
2099 (uint64_t *)cache_resource->actions);
2100 if (!cache_resource->verbs_action) {
2101 rte_free(cache_resource);
2102 return rte_flow_error_set(error, ENOMEM,
2103 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2104 NULL, "cannot create action");
2106 rte_atomic32_init(&cache_resource->refcnt);
2107 rte_atomic32_inc(&cache_resource->refcnt);
2108 LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
2109 dev_flow->dv.modify_hdr = cache_resource;
2110 DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
2111 (void *)cache_resource,
2112 rte_atomic32_read(&cache_resource->refcnt));
2117 * Get or create a flow counter.
2120 * Pointer to the Ethernet device structure.
2122 * Indicate if this counter is shared with other flows.
2124 * Counter identifier.
2127 * pointer to flow counter on success, NULL otherwise and rte_errno is set.
2129 static struct mlx5_flow_counter *
2130 flow_dv_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
2132 struct mlx5_priv *priv = dev->data->dev_private;
2133 struct mlx5_flow_counter *cnt = NULL;
2134 struct mlx5_devx_counter_set *dcs = NULL;
2137 if (!priv->config.devx) {
2142 LIST_FOREACH(cnt, &priv->flow_counters, next) {
2143 if (cnt->shared && cnt->id == id) {
2149 cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
2150 dcs = rte_calloc(__func__, 1, sizeof(*dcs), 0);
2155 ret = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, dcs);
2158 struct mlx5_flow_counter tmpl = {
2164 tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
2170 LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
2180 * Release a flow counter.
2182 * @param[in] counter
2183 * Pointer to the counter handler.
2186 flow_dv_counter_release(struct mlx5_flow_counter *counter)
2192 if (--counter->ref_cnt == 0) {
2193 ret = mlx5_devx_cmd_flow_counter_free(counter->dcs->obj);
2195 DRV_LOG(ERR, "Failed to free devx counters, %d", ret);
2196 LIST_REMOVE(counter, next);
2197 rte_free(counter->dcs);
2203 * Verify the @p attributes will be correctly understood by the NIC and store
2204 * them in the @p flow if everything is correct.
2207 * Pointer to dev struct.
2208 * @param[in] attributes
2209 * Pointer to flow attributes
2211 * Pointer to error structure.
2214 * 0 on success, a negative errno value otherwise and rte_errno is set.
2217 flow_dv_validate_attributes(struct rte_eth_dev *dev,
2218 const struct rte_flow_attr *attributes,
2219 struct rte_flow_error *error)
2221 struct mlx5_priv *priv = dev->data->dev_private;
2222 uint32_t priority_max = priv->config.flow_prio - 1;
2224 #ifndef HAVE_MLX5DV_DR
2225 if (attributes->group)
2226 return rte_flow_error_set(error, ENOTSUP,
2227 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
2229 "groups is not supported");
2231 if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
2232 attributes->priority >= priority_max)
2233 return rte_flow_error_set(error, ENOTSUP,
2234 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2236 "priority out of range");
2237 if (attributes->transfer) {
2238 if (!priv->config.dv_esw_en)
2239 return rte_flow_error_set
2241 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2242 "E-Switch dr is not supported");
2243 if (!(priv->representor || priv->master))
2244 return rte_flow_error_set
2245 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2246 NULL, "E-Switch configurationd can only be"
2247 " done by a master or a representor device");
2248 if (attributes->egress)
2249 return rte_flow_error_set
2251 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
2252 "egress is not supported");
2253 if (attributes->group >= MLX5_MAX_TABLES_FDB)
2254 return rte_flow_error_set
2256 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2257 NULL, "group must be smaller than "
2258 RTE_STR(MLX5_MAX_FDB_TABLES));
2260 if (!(attributes->egress ^ attributes->ingress))
2261 return rte_flow_error_set(error, ENOTSUP,
2262 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
2263 "must specify exactly one of "
2264 "ingress or egress");
2269 * Internal validation function. For validating both actions and items.
2272 * Pointer to the rte_eth_dev structure.
2274 * Pointer to the flow attributes.
2276 * Pointer to the list of items.
2277 * @param[in] actions
2278 * Pointer to the list of actions.
2280 * Pointer to the error structure.
2283 * 0 on success, a negative errno value otherwise and rte_errno is set.
2286 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
2287 const struct rte_flow_item items[],
2288 const struct rte_flow_action actions[],
2289 struct rte_flow_error *error)
2292 uint64_t action_flags = 0;
2293 uint64_t item_flags = 0;
2294 uint64_t last_item = 0;
2295 uint8_t next_protocol = 0xff;
2297 const struct rte_flow_item *gre_item = NULL;
2298 struct rte_flow_item_tcp nic_tcp_mask = {
2301 .src_port = RTE_BE16(UINT16_MAX),
2302 .dst_port = RTE_BE16(UINT16_MAX),
2308 ret = flow_dv_validate_attributes(dev, attr, error);
2311 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2312 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2313 switch (items->type) {
2314 case RTE_FLOW_ITEM_TYPE_VOID:
2316 case RTE_FLOW_ITEM_TYPE_PORT_ID:
2317 ret = flow_dv_validate_item_port_id
2318 (dev, items, attr, item_flags, error);
2321 last_item |= MLX5_FLOW_ITEM_PORT_ID;
2323 case RTE_FLOW_ITEM_TYPE_ETH:
2324 ret = mlx5_flow_validate_item_eth(items, item_flags,
2328 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
2329 MLX5_FLOW_LAYER_OUTER_L2;
2331 case RTE_FLOW_ITEM_TYPE_VLAN:
2332 ret = mlx5_flow_validate_item_vlan(items, item_flags,
2336 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2337 MLX5_FLOW_LAYER_OUTER_VLAN;
2339 case RTE_FLOW_ITEM_TYPE_IPV4:
2340 ret = mlx5_flow_validate_item_ipv4(items, item_flags,
2344 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
2345 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
2346 if (items->mask != NULL &&
2347 ((const struct rte_flow_item_ipv4 *)
2348 items->mask)->hdr.next_proto_id) {
2350 ((const struct rte_flow_item_ipv4 *)
2351 (items->spec))->hdr.next_proto_id;
2353 ((const struct rte_flow_item_ipv4 *)
2354 (items->mask))->hdr.next_proto_id;
2356 /* Reset for inner layer. */
2357 next_protocol = 0xff;
2360 case RTE_FLOW_ITEM_TYPE_IPV6:
2361 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
2365 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
2366 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
2367 if (items->mask != NULL &&
2368 ((const struct rte_flow_item_ipv6 *)
2369 items->mask)->hdr.proto) {
2371 ((const struct rte_flow_item_ipv6 *)
2372 items->spec)->hdr.proto;
2374 ((const struct rte_flow_item_ipv6 *)
2375 items->mask)->hdr.proto;
2377 /* Reset for inner layer. */
2378 next_protocol = 0xff;
2381 case RTE_FLOW_ITEM_TYPE_TCP:
2382 ret = mlx5_flow_validate_item_tcp
2389 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
2390 MLX5_FLOW_LAYER_OUTER_L4_TCP;
2392 case RTE_FLOW_ITEM_TYPE_UDP:
2393 ret = mlx5_flow_validate_item_udp(items, item_flags,
2398 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
2399 MLX5_FLOW_LAYER_OUTER_L4_UDP;
2401 case RTE_FLOW_ITEM_TYPE_GRE:
2402 case RTE_FLOW_ITEM_TYPE_NVGRE:
2403 ret = mlx5_flow_validate_item_gre(items, item_flags,
2404 next_protocol, error);
2408 last_item = MLX5_FLOW_LAYER_GRE;
2410 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
2411 ret = mlx5_flow_validate_item_gre_key
2412 (items, item_flags, gre_item, error);
2415 item_flags |= MLX5_FLOW_LAYER_GRE_KEY;
2417 case RTE_FLOW_ITEM_TYPE_VXLAN:
2418 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
2422 last_item = MLX5_FLOW_LAYER_VXLAN;
2424 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
2425 ret = mlx5_flow_validate_item_vxlan_gpe(items,
2430 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
2432 case RTE_FLOW_ITEM_TYPE_MPLS:
2433 ret = mlx5_flow_validate_item_mpls(dev, items,
2438 last_item = MLX5_FLOW_LAYER_MPLS;
2440 case RTE_FLOW_ITEM_TYPE_META:
2441 ret = flow_dv_validate_item_meta(dev, items, attr,
2445 last_item = MLX5_FLOW_ITEM_METADATA;
2447 case RTE_FLOW_ITEM_TYPE_ICMP:
2448 ret = mlx5_flow_validate_item_icmp(items, item_flags,
2453 item_flags |= MLX5_FLOW_LAYER_ICMP;
2455 case RTE_FLOW_ITEM_TYPE_ICMP6:
2456 ret = mlx5_flow_validate_item_icmp6(items, item_flags,
2461 item_flags |= MLX5_FLOW_LAYER_ICMP6;
2464 return rte_flow_error_set(error, ENOTSUP,
2465 RTE_FLOW_ERROR_TYPE_ITEM,
2466 NULL, "item not supported");
2468 item_flags |= last_item;
2470 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2471 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
2472 return rte_flow_error_set(error, ENOTSUP,
2473 RTE_FLOW_ERROR_TYPE_ACTION,
2474 actions, "too many actions");
2475 switch (actions->type) {
2476 case RTE_FLOW_ACTION_TYPE_VOID:
2478 case RTE_FLOW_ACTION_TYPE_PORT_ID:
2479 ret = flow_dv_validate_action_port_id(dev,
2486 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
2489 case RTE_FLOW_ACTION_TYPE_FLAG:
2490 ret = mlx5_flow_validate_action_flag(action_flags,
2494 action_flags |= MLX5_FLOW_ACTION_FLAG;
2497 case RTE_FLOW_ACTION_TYPE_MARK:
2498 ret = mlx5_flow_validate_action_mark(actions,
2503 action_flags |= MLX5_FLOW_ACTION_MARK;
2506 case RTE_FLOW_ACTION_TYPE_DROP:
2507 ret = mlx5_flow_validate_action_drop(action_flags,
2511 action_flags |= MLX5_FLOW_ACTION_DROP;
2514 case RTE_FLOW_ACTION_TYPE_QUEUE:
2515 ret = mlx5_flow_validate_action_queue(actions,
2520 action_flags |= MLX5_FLOW_ACTION_QUEUE;
2523 case RTE_FLOW_ACTION_TYPE_RSS:
2524 ret = mlx5_flow_validate_action_rss(actions,
2530 action_flags |= MLX5_FLOW_ACTION_RSS;
2533 case RTE_FLOW_ACTION_TYPE_COUNT:
2534 ret = flow_dv_validate_action_count(dev, error);
2537 action_flags |= MLX5_FLOW_ACTION_COUNT;
2540 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2541 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
2542 ret = flow_dv_validate_action_l2_encap(action_flags,
2547 action_flags |= actions->type ==
2548 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
2549 MLX5_FLOW_ACTION_VXLAN_ENCAP :
2550 MLX5_FLOW_ACTION_NVGRE_ENCAP;
2553 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2554 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
2555 ret = flow_dv_validate_action_l2_decap(action_flags,
2559 action_flags |= actions->type ==
2560 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
2561 MLX5_FLOW_ACTION_VXLAN_DECAP :
2562 MLX5_FLOW_ACTION_NVGRE_DECAP;
2565 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
2566 ret = flow_dv_validate_action_raw_encap(action_flags,
2571 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
2574 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
2575 ret = flow_dv_validate_action_raw_decap(action_flags,
2580 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
2583 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
2584 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
2585 ret = flow_dv_validate_action_modify_mac(action_flags,
2591 /* Count all modify-header actions as one action. */
2592 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2594 action_flags |= actions->type ==
2595 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
2596 MLX5_FLOW_ACTION_SET_MAC_SRC :
2597 MLX5_FLOW_ACTION_SET_MAC_DST;
2600 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
2601 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
2602 ret = flow_dv_validate_action_modify_ipv4(action_flags,
2608 /* Count all modify-header actions as one action. */
2609 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2611 action_flags |= actions->type ==
2612 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
2613 MLX5_FLOW_ACTION_SET_IPV4_SRC :
2614 MLX5_FLOW_ACTION_SET_IPV4_DST;
2616 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
2617 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
2618 ret = flow_dv_validate_action_modify_ipv6(action_flags,
2624 /* Count all modify-header actions as one action. */
2625 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2627 action_flags |= actions->type ==
2628 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
2629 MLX5_FLOW_ACTION_SET_IPV6_SRC :
2630 MLX5_FLOW_ACTION_SET_IPV6_DST;
2632 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
2633 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
2634 ret = flow_dv_validate_action_modify_tp(action_flags,
2640 /* Count all modify-header actions as one action. */
2641 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2643 action_flags |= actions->type ==
2644 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
2645 MLX5_FLOW_ACTION_SET_TP_SRC :
2646 MLX5_FLOW_ACTION_SET_TP_DST;
2648 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
2649 case RTE_FLOW_ACTION_TYPE_SET_TTL:
2650 ret = flow_dv_validate_action_modify_ttl(action_flags,
2656 /* Count all modify-header actions as one action. */
2657 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2659 action_flags |= actions->type ==
2660 RTE_FLOW_ACTION_TYPE_SET_TTL ?
2661 MLX5_FLOW_ACTION_SET_TTL :
2662 MLX5_FLOW_ACTION_DEC_TTL;
2664 case RTE_FLOW_ACTION_TYPE_JUMP:
2665 ret = flow_dv_validate_action_jump(actions,
2666 attr->group, error);
2670 action_flags |= MLX5_FLOW_ACTION_JUMP;
2672 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
2673 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
2674 ret = flow_dv_validate_action_modify_tcp_seq
2681 /* Count all modify-header actions as one action. */
2682 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2684 action_flags |= actions->type ==
2685 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
2686 MLX5_FLOW_ACTION_INC_TCP_SEQ :
2687 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
2689 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
2690 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
2691 ret = flow_dv_validate_action_modify_tcp_ack
2698 /* Count all modify-header actions as one action. */
2699 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2701 action_flags |= actions->type ==
2702 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
2703 MLX5_FLOW_ACTION_INC_TCP_ACK :
2704 MLX5_FLOW_ACTION_DEC_TCP_ACK;
2707 return rte_flow_error_set(error, ENOTSUP,
2708 RTE_FLOW_ERROR_TYPE_ACTION,
2710 "action not supported");
2713 /* Eswitch has few restrictions on using items and actions */
2714 if (attr->transfer) {
2715 if (action_flags & MLX5_FLOW_ACTION_FLAG)
2716 return rte_flow_error_set(error, ENOTSUP,
2717 RTE_FLOW_ERROR_TYPE_ACTION,
2719 "unsupported action FLAG");
2720 if (action_flags & MLX5_FLOW_ACTION_MARK)
2721 return rte_flow_error_set(error, ENOTSUP,
2722 RTE_FLOW_ERROR_TYPE_ACTION,
2724 "unsupported action MARK");
2725 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
2726 return rte_flow_error_set(error, ENOTSUP,
2727 RTE_FLOW_ERROR_TYPE_ACTION,
2729 "unsupported action QUEUE");
2730 if (action_flags & MLX5_FLOW_ACTION_RSS)
2731 return rte_flow_error_set(error, ENOTSUP,
2732 RTE_FLOW_ERROR_TYPE_ACTION,
2734 "unsupported action RSS");
2735 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
2736 return rte_flow_error_set(error, EINVAL,
2737 RTE_FLOW_ERROR_TYPE_ACTION,
2739 "no fate action is found");
2741 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
2742 return rte_flow_error_set(error, EINVAL,
2743 RTE_FLOW_ERROR_TYPE_ACTION,
2745 "no fate action is found");
2751 * Internal preparation function. Allocates the DV flow size,
2752 * this size is constant.
2755 * Pointer to the flow attributes.
2757 * Pointer to the list of items.
2758 * @param[in] actions
2759 * Pointer to the list of actions.
2761 * Pointer to the error structure.
2764 * Pointer to mlx5_flow object on success,
2765 * otherwise NULL and rte_errno is set.
2767 static struct mlx5_flow *
2768 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
2769 const struct rte_flow_item items[] __rte_unused,
2770 const struct rte_flow_action actions[] __rte_unused,
2771 struct rte_flow_error *error)
2773 uint32_t size = sizeof(struct mlx5_flow);
2774 struct mlx5_flow *flow;
2776 flow = rte_calloc(__func__, 1, size, 0);
2778 rte_flow_error_set(error, ENOMEM,
2779 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2780 "not enough memory to create flow");
2783 flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
2789 * Sanity check for match mask and value. Similar to check_valid_spec() in
2790 * kernel driver. If unmasked bit is present in value, it returns failure.
2793 * pointer to match mask buffer.
2794 * @param match_value
2795 * pointer to match value buffer.
2798 * 0 if valid, -EINVAL otherwise.
2801 flow_dv_check_valid_spec(void *match_mask, void *match_value)
2803 uint8_t *m = match_mask;
2804 uint8_t *v = match_value;
2807 for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
2810 "match_value differs from match_criteria"
2811 " %p[%u] != %p[%u]",
2812 match_value, i, match_mask, i);
2821 * Add Ethernet item to matcher and to the value.
2823 * @param[in, out] matcher
2825 * @param[in, out] key
2826 * Flow matcher value.
2828 * Flow pattern to translate.
2830 * Item is inner pattern.
2833 flow_dv_translate_item_eth(void *matcher, void *key,
2834 const struct rte_flow_item *item, int inner)
2836 const struct rte_flow_item_eth *eth_m = item->mask;
2837 const struct rte_flow_item_eth *eth_v = item->spec;
2838 const struct rte_flow_item_eth nic_mask = {
2839 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2840 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2841 .type = RTE_BE16(0xffff),
2853 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2855 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2857 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2859 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2861 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
2862 ð_m->dst, sizeof(eth_m->dst));
2863 /* The value must be in the range of the mask. */
2864 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
2865 for (i = 0; i < sizeof(eth_m->dst); ++i)
2866 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
2867 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
2868 ð_m->src, sizeof(eth_m->src));
2869 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
2870 /* The value must be in the range of the mask. */
2871 for (i = 0; i < sizeof(eth_m->dst); ++i)
2872 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
2873 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
2874 rte_be_to_cpu_16(eth_m->type));
2875 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
2876 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
2880 * Add VLAN item to matcher and to the value.
2882 * @param[in, out] matcher
2884 * @param[in, out] key
2885 * Flow matcher value.
2887 * Flow pattern to translate.
2889 * Item is inner pattern.
2892 flow_dv_translate_item_vlan(void *matcher, void *key,
2893 const struct rte_flow_item *item,
2896 const struct rte_flow_item_vlan *vlan_m = item->mask;
2897 const struct rte_flow_item_vlan *vlan_v = item->spec;
2898 const struct rte_flow_item_vlan nic_mask = {
2899 .tci = RTE_BE16(0x0fff),
2900 .inner_type = RTE_BE16(0xffff),
2912 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2914 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2916 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2918 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2920 tci_m = rte_be_to_cpu_16(vlan_m->tci);
2921 tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
2922 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
2923 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
2924 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
2925 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
2926 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
2927 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
2928 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
2929 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
2933 * Add IPV4 item to matcher and to the value.
2935 * @param[in, out] matcher
2937 * @param[in, out] key
2938 * Flow matcher value.
2940 * Flow pattern to translate.
2942 * Item is inner pattern.
2944 * The group to insert the rule.
2947 flow_dv_translate_item_ipv4(void *matcher, void *key,
2948 const struct rte_flow_item *item,
2949 int inner, uint32_t group)
2951 const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
2952 const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
2953 const struct rte_flow_item_ipv4 nic_mask = {
2955 .src_addr = RTE_BE32(0xffffffff),
2956 .dst_addr = RTE_BE32(0xffffffff),
2957 .type_of_service = 0xff,
2958 .next_proto_id = 0xff,
2968 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2970 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2972 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2974 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2977 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2979 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
2980 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
2985 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2986 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2987 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2988 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2989 *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
2990 *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
2991 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2992 src_ipv4_src_ipv6.ipv4_layout.ipv4);
2993 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2994 src_ipv4_src_ipv6.ipv4_layout.ipv4);
2995 *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
2996 *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
2997 tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
2998 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
2999 ipv4_m->hdr.type_of_service);
3000 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
3001 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
3002 ipv4_m->hdr.type_of_service >> 2);
3003 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
3004 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
3005 ipv4_m->hdr.next_proto_id);
3006 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
3007 ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
3011 * Add IPV6 item to matcher and to the value.
3013 * @param[in, out] matcher
3015 * @param[in, out] key
3016 * Flow matcher value.
3018 * Flow pattern to translate.
3020 * Item is inner pattern.
3022 * The group to insert the rule.
3025 flow_dv_translate_item_ipv6(void *matcher, void *key,
3026 const struct rte_flow_item *item,
3027 int inner, uint32_t group)
3029 const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
3030 const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
3031 const struct rte_flow_item_ipv6 nic_mask = {
3034 "\xff\xff\xff\xff\xff\xff\xff\xff"
3035 "\xff\xff\xff\xff\xff\xff\xff\xff",
3037 "\xff\xff\xff\xff\xff\xff\xff\xff"
3038 "\xff\xff\xff\xff\xff\xff\xff\xff",
3039 .vtc_flow = RTE_BE32(0xffffffff),
3046 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3047 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3056 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3058 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3060 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3062 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3065 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
3067 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
3068 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
3073 size = sizeof(ipv6_m->hdr.dst_addr);
3074 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
3075 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
3076 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
3077 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
3078 memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
3079 for (i = 0; i < size; ++i)
3080 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
3081 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
3082 src_ipv4_src_ipv6.ipv6_layout.ipv6);
3083 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
3084 src_ipv4_src_ipv6.ipv6_layout.ipv6);
3085 memcpy(l24_m, ipv6_m->hdr.src_addr, size);
3086 for (i = 0; i < size; ++i)
3087 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
3089 vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
3090 vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
3091 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
3092 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
3093 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
3094 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
3097 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
3099 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
3102 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
3104 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
3108 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
3110 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
3111 ipv6_v->hdr.proto & ipv6_m->hdr.proto);
3115 * Add TCP item to matcher and to the value.
3117 * @param[in, out] matcher
3119 * @param[in, out] key
3120 * Flow matcher value.
3122 * Flow pattern to translate.
3124 * Item is inner pattern.
3127 flow_dv_translate_item_tcp(void *matcher, void *key,
3128 const struct rte_flow_item *item,
3131 const struct rte_flow_item_tcp *tcp_m = item->mask;
3132 const struct rte_flow_item_tcp *tcp_v = item->spec;
3137 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3139 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3141 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3143 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3145 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3146 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
3150 tcp_m = &rte_flow_item_tcp_mask;
3151 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
3152 rte_be_to_cpu_16(tcp_m->hdr.src_port));
3153 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
3154 rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
3155 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
3156 rte_be_to_cpu_16(tcp_m->hdr.dst_port));
3157 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
3158 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
3159 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
3160 tcp_m->hdr.tcp_flags);
3161 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
3162 (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
3166 * Add UDP item to matcher and to the value.
3168 * @param[in, out] matcher
3170 * @param[in, out] key
3171 * Flow matcher value.
3173 * Flow pattern to translate.
3175 * Item is inner pattern.
3178 flow_dv_translate_item_udp(void *matcher, void *key,
3179 const struct rte_flow_item *item,
3182 const struct rte_flow_item_udp *udp_m = item->mask;
3183 const struct rte_flow_item_udp *udp_v = item->spec;
3188 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3190 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3192 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3194 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3196 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3197 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
3201 udp_m = &rte_flow_item_udp_mask;
3202 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
3203 rte_be_to_cpu_16(udp_m->hdr.src_port));
3204 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
3205 rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
3206 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
3207 rte_be_to_cpu_16(udp_m->hdr.dst_port));
3208 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
3209 rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
3213 * Add GRE optional Key item to matcher and to the value.
3215 * @param[in, out] matcher
3217 * @param[in, out] key
3218 * Flow matcher value.
3220 * Flow pattern to translate.
3222 * Item is inner pattern.
3225 flow_dv_translate_item_gre_key(void *matcher, void *key,
3226 const struct rte_flow_item *item)
3228 const rte_be32_t *key_m = item->mask;
3229 const rte_be32_t *key_v = item->spec;
3230 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3231 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3232 rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
3237 key_m = &gre_key_default_mask;
3238 /* GRE K bit must be on and should already be validated */
3239 MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
3240 MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
3241 MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
3242 rte_be_to_cpu_32(*key_m) >> 8);
3243 MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
3244 rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
3245 MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
3246 rte_be_to_cpu_32(*key_m) & 0xFF);
3247 MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
3248 rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
3252 * Add GRE item to matcher and to the value.
3254 * @param[in, out] matcher
3256 * @param[in, out] key
3257 * Flow matcher value.
3259 * Flow pattern to translate.
3261 * Item is inner pattern.
3264 flow_dv_translate_item_gre(void *matcher, void *key,
3265 const struct rte_flow_item *item,
3268 const struct rte_flow_item_gre *gre_m = item->mask;
3269 const struct rte_flow_item_gre *gre_v = item->spec;
3272 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3273 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3280 uint16_t s_present:1;
3281 uint16_t k_present:1;
3282 uint16_t rsvd_bit1:1;
3283 uint16_t c_present:1;
3287 } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
3290 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3292 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3294 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3296 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3298 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3299 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
3303 gre_m = &rte_flow_item_gre_mask;
3304 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
3305 rte_be_to_cpu_16(gre_m->protocol));
3306 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
3307 rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
3308 gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
3309 gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
3310 MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
3311 gre_crks_rsvd0_ver_m.c_present);
3312 MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
3313 gre_crks_rsvd0_ver_v.c_present &
3314 gre_crks_rsvd0_ver_m.c_present);
3315 MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
3316 gre_crks_rsvd0_ver_m.k_present);
3317 MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
3318 gre_crks_rsvd0_ver_v.k_present &
3319 gre_crks_rsvd0_ver_m.k_present);
3320 MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
3321 gre_crks_rsvd0_ver_m.s_present);
3322 MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
3323 gre_crks_rsvd0_ver_v.s_present &
3324 gre_crks_rsvd0_ver_m.s_present);
3328 * Add NVGRE item to matcher and to the value.
3330 * @param[in, out] matcher
3332 * @param[in, out] key
3333 * Flow matcher value.
3335 * Flow pattern to translate.
3337 * Item is inner pattern.
3340 flow_dv_translate_item_nvgre(void *matcher, void *key,
3341 const struct rte_flow_item *item,
3344 const struct rte_flow_item_nvgre *nvgre_m = item->mask;
3345 const struct rte_flow_item_nvgre *nvgre_v = item->spec;
3346 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3347 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3348 const char *tni_flow_id_m = (const char *)nvgre_m->tni;
3349 const char *tni_flow_id_v = (const char *)nvgre_v->tni;
3355 flow_dv_translate_item_gre(matcher, key, item, inner);
3359 nvgre_m = &rte_flow_item_nvgre_mask;
3360 size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
3361 gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
3362 gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
3363 memcpy(gre_key_m, tni_flow_id_m, size);
3364 for (i = 0; i < size; ++i)
3365 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
3369 * Add VXLAN item to matcher and to the value.
3371 * @param[in, out] matcher
3373 * @param[in, out] key
3374 * Flow matcher value.
3376 * Flow pattern to translate.
3378 * Item is inner pattern.
3381 flow_dv_translate_item_vxlan(void *matcher, void *key,
3382 const struct rte_flow_item *item,
3385 const struct rte_flow_item_vxlan *vxlan_m = item->mask;
3386 const struct rte_flow_item_vxlan *vxlan_v = item->spec;
3389 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3390 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3398 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3400 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3402 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3404 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3406 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
3407 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
3408 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
3409 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
3410 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
3415 vxlan_m = &rte_flow_item_vxlan_mask;
3416 size = sizeof(vxlan_m->vni);
3417 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
3418 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
3419 memcpy(vni_m, vxlan_m->vni, size);
3420 for (i = 0; i < size; ++i)
3421 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
3425 * Add MPLS item to matcher and to the value.
3427 * @param[in, out] matcher
3429 * @param[in, out] key
3430 * Flow matcher value.
3432 * Flow pattern to translate.
3433 * @param[in] prev_layer
3434 * The protocol layer indicated in previous item.
3436 * Item is inner pattern.
3439 flow_dv_translate_item_mpls(void *matcher, void *key,
3440 const struct rte_flow_item *item,
3441 uint64_t prev_layer,
3444 const uint32_t *in_mpls_m = item->mask;
3445 const uint32_t *in_mpls_v = item->spec;
3446 uint32_t *out_mpls_m = 0;
3447 uint32_t *out_mpls_v = 0;
3448 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3449 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3450 void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
3452 void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
3453 void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
3454 void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3456 switch (prev_layer) {
3457 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
3458 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
3459 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
3460 MLX5_UDP_PORT_MPLS);
3462 case MLX5_FLOW_LAYER_GRE:
3463 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
3464 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
3465 RTE_ETHER_TYPE_MPLS);
3468 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3469 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
3476 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
3477 switch (prev_layer) {
3478 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
3480 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
3481 outer_first_mpls_over_udp);
3483 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
3484 outer_first_mpls_over_udp);
3486 case MLX5_FLOW_LAYER_GRE:
3488 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
3489 outer_first_mpls_over_gre);
3491 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
3492 outer_first_mpls_over_gre);
3495 /* Inner MPLS not over GRE is not supported. */
3498 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
3502 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
3508 if (out_mpls_m && out_mpls_v) {
3509 *out_mpls_m = *in_mpls_m;
3510 *out_mpls_v = *in_mpls_v & *in_mpls_m;
3515 * Add META item to matcher
3517 * @param[in, out] matcher
3519 * @param[in, out] key
3520 * Flow matcher value.
3522 * Flow pattern to translate.
3524 * Item is inner pattern.
3527 flow_dv_translate_item_meta(void *matcher, void *key,
3528 const struct rte_flow_item *item)
3530 const struct rte_flow_item_meta *meta_m;
3531 const struct rte_flow_item_meta *meta_v;
3533 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
3535 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
3537 meta_m = (const void *)item->mask;
3539 meta_m = &rte_flow_item_meta_mask;
3540 meta_v = (const void *)item->spec;
3542 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
3543 rte_be_to_cpu_32(meta_m->data));
3544 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
3545 rte_be_to_cpu_32(meta_v->data & meta_m->data));
3550 * Add source vport match to the specified matcher.
3552 * @param[in, out] matcher
3554 * @param[in, out] key
3555 * Flow matcher value.
3557 * Source vport value to match
3562 flow_dv_translate_item_source_vport(void *matcher, void *key,
3563 int16_t port, uint16_t mask)
3565 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3566 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3568 MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
3569 MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
3573 * Translate port-id item to eswitch match on port-id.
3576 * The devich to configure through.
3577 * @param[in, out] matcher
3579 * @param[in, out] key
3580 * Flow matcher value.
3582 * Flow pattern to translate.
3585 * 0 on success, a negative errno value otherwise.
3588 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
3589 void *key, const struct rte_flow_item *item)
3591 const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
3592 const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
3593 uint16_t mask, val, id;
3596 mask = pid_m ? pid_m->id : 0xffff;
3597 id = pid_v ? pid_v->id : dev->data->port_id;
3598 ret = mlx5_port_to_eswitch_info(id, NULL, &val);
3601 flow_dv_translate_item_source_vport(matcher, key, val, mask);
3606 * Add ICMP6 item to matcher and to the value.
3608 * @param[in, out] matcher
3610 * @param[in, out] key
3611 * Flow matcher value.
3613 * Flow pattern to translate.
3615 * Item is inner pattern.
3618 flow_dv_translate_item_icmp6(void *matcher, void *key,
3619 const struct rte_flow_item *item,
3622 const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
3623 const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
3626 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
3628 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
3630 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3632 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3634 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3636 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3638 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
3639 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
3643 icmp6_m = &rte_flow_item_icmp6_mask;
3644 MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
3645 MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
3646 icmp6_v->type & icmp6_m->type);
3647 MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
3648 MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
3649 icmp6_v->code & icmp6_m->code);
3653 * Add ICMP item to matcher and to the value.
3655 * @param[in, out] matcher
3657 * @param[in, out] key
3658 * Flow matcher value.
3660 * Flow pattern to translate.
3662 * Item is inner pattern.
3665 flow_dv_translate_item_icmp(void *matcher, void *key,
3666 const struct rte_flow_item *item,
3669 const struct rte_flow_item_icmp *icmp_m = item->mask;
3670 const struct rte_flow_item_icmp *icmp_v = item->spec;
3673 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
3675 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
3677 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3679 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3681 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3683 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3685 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
3686 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
3690 icmp_m = &rte_flow_item_icmp_mask;
3691 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
3692 icmp_m->hdr.icmp_type);
3693 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
3694 icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
3695 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
3696 icmp_m->hdr.icmp_code);
3697 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
3698 icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
3701 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
3703 #define HEADER_IS_ZERO(match_criteria, headers) \
3704 !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
3705 matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
3708 * Calculate flow matcher enable bitmap.
3710 * @param match_criteria
3711 * Pointer to flow matcher criteria.
3714 * Bitmap of enabled fields.
3717 flow_dv_matcher_enable(uint32_t *match_criteria)
3719 uint8_t match_criteria_enable;
3721 match_criteria_enable =
3722 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
3723 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
3724 match_criteria_enable |=
3725 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
3726 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
3727 match_criteria_enable |=
3728 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
3729 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
3730 match_criteria_enable |=
3731 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
3732 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
3733 #ifdef HAVE_MLX5DV_DR
3734 match_criteria_enable |=
3735 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
3736 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
3738 return match_criteria_enable;
3745 * @param dev[in, out]
3746 * Pointer to rte_eth_dev structure.
3747 * @param[in] table_id
3750 * Direction of the table.
3751 * @param[in] transfer
3752 * E-Switch or NIC flow.
3754 * pointer to error structure.
3757 * Returns tables resource based on the index, NULL in case of failed.
3759 static struct mlx5_flow_tbl_resource *
3760 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
3761 uint32_t table_id, uint8_t egress,
3763 struct rte_flow_error *error)
3765 struct mlx5_priv *priv = dev->data->dev_private;
3766 struct mlx5_ibv_shared *sh = priv->sh;
3767 struct mlx5_flow_tbl_resource *tbl;
3769 #ifdef HAVE_MLX5DV_DR
3771 tbl = &sh->fdb_tbl[table_id];
3773 tbl->obj = mlx5_glue->dr_create_flow_tbl
3774 (sh->fdb_domain, table_id);
3775 } else if (egress) {
3776 tbl = &sh->tx_tbl[table_id];
3778 tbl->obj = mlx5_glue->dr_create_flow_tbl
3779 (sh->tx_domain, table_id);
3781 tbl = &sh->rx_tbl[table_id];
3783 tbl->obj = mlx5_glue->dr_create_flow_tbl
3784 (sh->rx_domain, table_id);
3787 rte_flow_error_set(error, ENOMEM,
3788 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3789 NULL, "cannot create table");
3792 rte_atomic32_inc(&tbl->refcnt);
3798 return &sh->fdb_tbl[table_id];
3800 return &sh->tx_tbl[table_id];
3802 return &sh->rx_tbl[table_id];
3807 * Release a flow table.
3810 * Table resource to be released.
3813 * Returns 0 if table was released, else return 1;
3816 flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
3820 if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
3821 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
3829 * Register the flow matcher.
3831 * @param dev[in, out]
3832 * Pointer to rte_eth_dev structure.
3833 * @param[in, out] matcher
3834 * Pointer to flow matcher.
3835 * @parm[in, out] dev_flow
3836 * Pointer to the dev_flow.
3838 * pointer to error structure.
3841 * 0 on success otherwise -errno and errno is set.
3844 flow_dv_matcher_register(struct rte_eth_dev *dev,
3845 struct mlx5_flow_dv_matcher *matcher,
3846 struct mlx5_flow *dev_flow,
3847 struct rte_flow_error *error)
3849 struct mlx5_priv *priv = dev->data->dev_private;
3850 struct mlx5_ibv_shared *sh = priv->sh;
3851 struct mlx5_flow_dv_matcher *cache_matcher;
3852 struct mlx5dv_flow_matcher_attr dv_attr = {
3853 .type = IBV_FLOW_ATTR_NORMAL,
3854 .match_mask = (void *)&matcher->mask,
3856 struct mlx5_flow_tbl_resource *tbl = NULL;
3858 /* Lookup from cache. */
3859 LIST_FOREACH(cache_matcher, &sh->matchers, next) {
3860 if (matcher->crc == cache_matcher->crc &&
3861 matcher->priority == cache_matcher->priority &&
3862 matcher->egress == cache_matcher->egress &&
3863 matcher->group == cache_matcher->group &&
3864 matcher->transfer == cache_matcher->transfer &&
3865 !memcmp((const void *)matcher->mask.buf,
3866 (const void *)cache_matcher->mask.buf,
3867 cache_matcher->mask.size)) {
3869 "priority %hd use %s matcher %p: refcnt %d++",
3870 cache_matcher->priority,
3871 cache_matcher->egress ? "tx" : "rx",
3872 (void *)cache_matcher,
3873 rte_atomic32_read(&cache_matcher->refcnt));
3874 rte_atomic32_inc(&cache_matcher->refcnt);
3875 dev_flow->dv.matcher = cache_matcher;
3879 /* Register new matcher. */
3880 cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
3882 return rte_flow_error_set(error, ENOMEM,
3883 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3884 "cannot allocate matcher memory");
3885 tbl = flow_dv_tbl_resource_get(dev, matcher->group * MLX5_GROUP_FACTOR,
3886 matcher->egress, matcher->transfer,
3889 rte_free(cache_matcher);
3890 return rte_flow_error_set(error, ENOMEM,
3891 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3892 NULL, "cannot create table");
3894 *cache_matcher = *matcher;
3895 dv_attr.match_criteria_enable =
3896 flow_dv_matcher_enable(cache_matcher->mask.buf);
3897 dv_attr.priority = matcher->priority;
3898 if (matcher->egress)
3899 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
3900 cache_matcher->matcher_object =
3901 mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
3902 if (!cache_matcher->matcher_object) {
3903 rte_free(cache_matcher);
3904 #ifdef HAVE_MLX5DV_DR
3905 flow_dv_tbl_resource_release(tbl);
3907 return rte_flow_error_set(error, ENOMEM,
3908 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3909 NULL, "cannot create matcher");
3911 rte_atomic32_inc(&cache_matcher->refcnt);
3912 LIST_INSERT_HEAD(&sh->matchers, cache_matcher, next);
3913 dev_flow->dv.matcher = cache_matcher;
3914 DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
3915 cache_matcher->priority,
3916 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
3917 rte_atomic32_read(&cache_matcher->refcnt));
3918 rte_atomic32_inc(&tbl->refcnt);
3923 * Find existing tag resource or create and register a new one.
3925 * @param dev[in, out]
3926 * Pointer to rte_eth_dev structure.
3927 * @param[in, out] resource
3928 * Pointer to tag resource.
3929 * @parm[in, out] dev_flow
3930 * Pointer to the dev_flow.
3932 * pointer to error structure.
3935 * 0 on success otherwise -errno and errno is set.
3938 flow_dv_tag_resource_register
3939 (struct rte_eth_dev *dev,
3940 struct mlx5_flow_dv_tag_resource *resource,
3941 struct mlx5_flow *dev_flow,
3942 struct rte_flow_error *error)
3944 struct mlx5_priv *priv = dev->data->dev_private;
3945 struct mlx5_ibv_shared *sh = priv->sh;
3946 struct mlx5_flow_dv_tag_resource *cache_resource;
3948 /* Lookup a matching resource from cache. */
3949 LIST_FOREACH(cache_resource, &sh->tags, next) {
3950 if (resource->tag == cache_resource->tag) {
3951 DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
3952 (void *)cache_resource,
3953 rte_atomic32_read(&cache_resource->refcnt));
3954 rte_atomic32_inc(&cache_resource->refcnt);
3955 dev_flow->flow->tag_resource = cache_resource;
3959 /* Register new resource. */
3960 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
3961 if (!cache_resource)
3962 return rte_flow_error_set(error, ENOMEM,
3963 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3964 "cannot allocate resource memory");
3965 *cache_resource = *resource;
3966 cache_resource->action = mlx5_glue->dv_create_flow_action_tag
3968 if (!cache_resource->action) {
3969 rte_free(cache_resource);
3970 return rte_flow_error_set(error, ENOMEM,
3971 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3972 NULL, "cannot create action");
3974 rte_atomic32_init(&cache_resource->refcnt);
3975 rte_atomic32_inc(&cache_resource->refcnt);
3976 LIST_INSERT_HEAD(&sh->tags, cache_resource, next);
3977 dev_flow->flow->tag_resource = cache_resource;
3978 DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
3979 (void *)cache_resource,
3980 rte_atomic32_read(&cache_resource->refcnt));
3988 * Pointer to Ethernet device.
3990 * Pointer to mlx5_flow.
3993 * 1 while a reference on it exists, 0 when freed.
3996 flow_dv_tag_release(struct rte_eth_dev *dev,
3997 struct mlx5_flow_dv_tag_resource *tag)
4000 DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
4001 dev->data->port_id, (void *)tag,
4002 rte_atomic32_read(&tag->refcnt));
4003 if (rte_atomic32_dec_and_test(&tag->refcnt)) {
4004 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
4005 LIST_REMOVE(tag, next);
4006 DRV_LOG(DEBUG, "port %u tag %p: removed",
4007 dev->data->port_id, (void *)tag);
4015 * Translate port ID action to vport.
4018 * Pointer to rte_eth_dev structure.
4020 * Pointer to the port ID action.
4021 * @param[out] dst_port_id
4022 * The target port ID.
4024 * Pointer to the error structure.
4027 * 0 on success, a negative errno value otherwise and rte_errno is set.
4030 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
4031 const struct rte_flow_action *action,
4032 uint32_t *dst_port_id,
4033 struct rte_flow_error *error)
4038 const struct rte_flow_action_port_id *conf =
4039 (const struct rte_flow_action_port_id *)action->conf;
4041 port = conf->original ? dev->data->port_id : conf->id;
4042 ret = mlx5_port_to_eswitch_info(port, NULL, &port_id);
4044 return rte_flow_error_set(error, -ret,
4045 RTE_FLOW_ERROR_TYPE_ACTION,
4047 "No eswitch info was found for port");
4048 *dst_port_id = port_id;
4053 * Fill the flow with DV spec.
4056 * Pointer to rte_eth_dev structure.
4057 * @param[in, out] dev_flow
4058 * Pointer to the sub flow.
4060 * Pointer to the flow attributes.
4062 * Pointer to the list of items.
4063 * @param[in] actions
4064 * Pointer to the list of actions.
4066 * Pointer to the error structure.
4069 * 0 on success, a negative errno value otherwise and rte_errno is set.
4072 flow_dv_translate(struct rte_eth_dev *dev,
4073 struct mlx5_flow *dev_flow,
4074 const struct rte_flow_attr *attr,
4075 const struct rte_flow_item items[],
4076 const struct rte_flow_action actions[],
4077 struct rte_flow_error *error)
4079 struct mlx5_priv *priv = dev->data->dev_private;
4080 struct rte_flow *flow = dev_flow->flow;
4081 uint64_t item_flags = 0;
4082 uint64_t last_item = 0;
4083 uint64_t action_flags = 0;
4084 uint64_t priority = attr->priority;
4085 struct mlx5_flow_dv_matcher matcher = {
4087 .size = sizeof(matcher.mask.buf),
4091 bool actions_end = false;
4092 struct mlx5_flow_dv_modify_hdr_resource res = {
4093 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4094 MLX5DV_FLOW_TABLE_TYPE_NIC_RX
4096 union flow_dv_attr flow_attr = { .attr = 0 };
4097 struct mlx5_flow_dv_tag_resource tag_resource;
4098 uint32_t modify_action_position = UINT32_MAX;
4099 void *match_mask = matcher.mask.buf;
4100 void *match_value = dev_flow->dv.value.buf;
4102 flow->group = attr->group;
4104 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4105 if (priority == MLX5_FLOW_PRIO_RSVD)
4106 priority = priv->config.flow_prio - 1;
4107 for (; !actions_end ; actions++) {
4108 const struct rte_flow_action_queue *queue;
4109 const struct rte_flow_action_rss *rss;
4110 const struct rte_flow_action *action = actions;
4111 const struct rte_flow_action_count *count = action->conf;
4112 const uint8_t *rss_key;
4113 const struct rte_flow_action_jump *jump_data;
4114 struct mlx5_flow_dv_jump_tbl_resource jump_tbl_resource;
4115 struct mlx5_flow_tbl_resource *tbl;
4116 uint32_t port_id = 0;
4117 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
4119 switch (actions->type) {
4120 case RTE_FLOW_ACTION_TYPE_VOID:
4122 case RTE_FLOW_ACTION_TYPE_PORT_ID:
4123 if (flow_dv_translate_action_port_id(dev, action,
4126 port_id_resource.port_id = port_id;
4127 if (flow_dv_port_id_action_resource_register
4128 (dev, &port_id_resource, dev_flow, error))
4130 dev_flow->dv.actions[actions_n++] =
4131 dev_flow->dv.port_id_action->action;
4132 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
4134 case RTE_FLOW_ACTION_TYPE_FLAG:
4136 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
4137 if (!flow->tag_resource)
4138 if (flow_dv_tag_resource_register
4139 (dev, &tag_resource, dev_flow, error))
4141 dev_flow->dv.actions[actions_n++] =
4142 flow->tag_resource->action;
4143 action_flags |= MLX5_FLOW_ACTION_FLAG;
4145 case RTE_FLOW_ACTION_TYPE_MARK:
4146 tag_resource.tag = mlx5_flow_mark_set
4147 (((const struct rte_flow_action_mark *)
4148 (actions->conf))->id);
4149 if (!flow->tag_resource)
4150 if (flow_dv_tag_resource_register
4151 (dev, &tag_resource, dev_flow, error))
4153 dev_flow->dv.actions[actions_n++] =
4154 flow->tag_resource->action;
4155 action_flags |= MLX5_FLOW_ACTION_MARK;
4157 case RTE_FLOW_ACTION_TYPE_DROP:
4158 action_flags |= MLX5_FLOW_ACTION_DROP;
4160 case RTE_FLOW_ACTION_TYPE_QUEUE:
4161 queue = actions->conf;
4162 flow->rss.queue_num = 1;
4163 (*flow->queue)[0] = queue->index;
4164 action_flags |= MLX5_FLOW_ACTION_QUEUE;
4166 case RTE_FLOW_ACTION_TYPE_RSS:
4167 rss = actions->conf;
4169 memcpy((*flow->queue), rss->queue,
4170 rss->queue_num * sizeof(uint16_t));
4171 flow->rss.queue_num = rss->queue_num;
4172 /* NULL RSS key indicates default RSS key. */
4173 rss_key = !rss->key ? rss_hash_default_key : rss->key;
4174 memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
4175 /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
4176 flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
4177 flow->rss.level = rss->level;
4178 action_flags |= MLX5_FLOW_ACTION_RSS;
4180 case RTE_FLOW_ACTION_TYPE_COUNT:
4181 if (!priv->config.devx) {
4182 rte_errno = ENOTSUP;
4185 flow->counter = flow_dv_counter_new(dev, count->shared,
4187 if (flow->counter == NULL)
4189 dev_flow->dv.actions[actions_n++] =
4190 flow->counter->action;
4191 action_flags |= MLX5_FLOW_ACTION_COUNT;
4194 if (rte_errno == ENOTSUP)
4195 return rte_flow_error_set
4197 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4199 "count action not supported");
4201 return rte_flow_error_set
4203 RTE_FLOW_ERROR_TYPE_ACTION,
4205 "cannot create counter"
4207 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
4208 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
4209 if (flow_dv_create_action_l2_encap(dev, actions,
4214 dev_flow->dv.actions[actions_n++] =
4215 dev_flow->dv.encap_decap->verbs_action;
4216 action_flags |= actions->type ==
4217 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
4218 MLX5_FLOW_ACTION_VXLAN_ENCAP :
4219 MLX5_FLOW_ACTION_NVGRE_ENCAP;
4221 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
4222 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
4223 if (flow_dv_create_action_l2_decap(dev, dev_flow,
4227 dev_flow->dv.actions[actions_n++] =
4228 dev_flow->dv.encap_decap->verbs_action;
4229 action_flags |= actions->type ==
4230 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
4231 MLX5_FLOW_ACTION_VXLAN_DECAP :
4232 MLX5_FLOW_ACTION_NVGRE_DECAP;
4234 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4235 /* Handle encap with preceding decap. */
4236 if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
4237 if (flow_dv_create_action_raw_encap
4238 (dev, actions, dev_flow, attr, error))
4240 dev_flow->dv.actions[actions_n++] =
4241 dev_flow->dv.encap_decap->verbs_action;
4243 /* Handle encap without preceding decap. */
4244 if (flow_dv_create_action_l2_encap
4245 (dev, actions, dev_flow, attr->transfer,
4248 dev_flow->dv.actions[actions_n++] =
4249 dev_flow->dv.encap_decap->verbs_action;
4251 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
4253 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
4254 /* Check if this decap is followed by encap. */
4255 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
4256 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
4259 /* Handle decap only if it isn't followed by encap. */
4260 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4261 if (flow_dv_create_action_l2_decap
4262 (dev, dev_flow, attr->transfer, error))
4264 dev_flow->dv.actions[actions_n++] =
4265 dev_flow->dv.encap_decap->verbs_action;
4267 /* If decap is followed by encap, handle it at encap. */
4268 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
4270 case RTE_FLOW_ACTION_TYPE_JUMP:
4271 jump_data = action->conf;
4272 tbl = flow_dv_tbl_resource_get(dev, jump_data->group *
4275 attr->transfer, error);
4277 return rte_flow_error_set
4279 RTE_FLOW_ERROR_TYPE_ACTION,
4281 "cannot create jump action.");
4282 jump_tbl_resource.tbl = tbl;
4283 if (flow_dv_jump_tbl_resource_register
4284 (dev, &jump_tbl_resource, dev_flow, error)) {
4285 flow_dv_tbl_resource_release(tbl);
4286 return rte_flow_error_set
4288 RTE_FLOW_ERROR_TYPE_ACTION,
4290 "cannot create jump action.");
4292 dev_flow->dv.actions[actions_n++] =
4293 dev_flow->dv.jump->action;
4294 action_flags |= MLX5_FLOW_ACTION_JUMP;
4296 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
4297 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
4298 if (flow_dv_convert_action_modify_mac(&res, actions,
4301 action_flags |= actions->type ==
4302 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
4303 MLX5_FLOW_ACTION_SET_MAC_SRC :
4304 MLX5_FLOW_ACTION_SET_MAC_DST;
4306 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
4307 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
4308 if (flow_dv_convert_action_modify_ipv4(&res, actions,
4311 action_flags |= actions->type ==
4312 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
4313 MLX5_FLOW_ACTION_SET_IPV4_SRC :
4314 MLX5_FLOW_ACTION_SET_IPV4_DST;
4316 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
4317 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
4318 if (flow_dv_convert_action_modify_ipv6(&res, actions,
4321 action_flags |= actions->type ==
4322 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
4323 MLX5_FLOW_ACTION_SET_IPV6_SRC :
4324 MLX5_FLOW_ACTION_SET_IPV6_DST;
4326 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
4327 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
4328 if (flow_dv_convert_action_modify_tp(&res, actions,
4332 action_flags |= actions->type ==
4333 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
4334 MLX5_FLOW_ACTION_SET_TP_SRC :
4335 MLX5_FLOW_ACTION_SET_TP_DST;
4337 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
4338 if (flow_dv_convert_action_modify_dec_ttl(&res, items,
4342 action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
4344 case RTE_FLOW_ACTION_TYPE_SET_TTL:
4345 if (flow_dv_convert_action_modify_ttl(&res, actions,
4349 action_flags |= MLX5_FLOW_ACTION_SET_TTL;
4351 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
4352 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
4353 if (flow_dv_convert_action_modify_tcp_seq(&res, actions,
4356 action_flags |= actions->type ==
4357 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
4358 MLX5_FLOW_ACTION_INC_TCP_SEQ :
4359 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
4362 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
4363 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
4364 if (flow_dv_convert_action_modify_tcp_ack(&res, actions,
4367 action_flags |= actions->type ==
4368 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
4369 MLX5_FLOW_ACTION_INC_TCP_ACK :
4370 MLX5_FLOW_ACTION_DEC_TCP_ACK;
4372 case RTE_FLOW_ACTION_TYPE_END:
4374 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
4375 /* create modify action if needed. */
4376 if (flow_dv_modify_hdr_resource_register
4381 dev_flow->dv.actions[modify_action_position] =
4382 dev_flow->dv.modify_hdr->verbs_action;
4388 if ((action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) &&
4389 modify_action_position == UINT32_MAX)
4390 modify_action_position = actions_n++;
4392 dev_flow->dv.actions_n = actions_n;
4393 flow->actions = action_flags;
4394 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4395 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
4397 switch (items->type) {
4398 case RTE_FLOW_ITEM_TYPE_PORT_ID:
4399 flow_dv_translate_item_port_id(dev, match_mask,
4400 match_value, items);
4401 last_item = MLX5_FLOW_ITEM_PORT_ID;
4403 case RTE_FLOW_ITEM_TYPE_ETH:
4404 flow_dv_translate_item_eth(match_mask, match_value,
4406 matcher.priority = MLX5_PRIORITY_MAP_L2;
4407 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
4408 MLX5_FLOW_LAYER_OUTER_L2;
4410 case RTE_FLOW_ITEM_TYPE_VLAN:
4411 flow_dv_translate_item_vlan(match_mask, match_value,
4413 matcher.priority = MLX5_PRIORITY_MAP_L2;
4414 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
4415 MLX5_FLOW_LAYER_INNER_VLAN) :
4416 (MLX5_FLOW_LAYER_OUTER_L2 |
4417 MLX5_FLOW_LAYER_OUTER_VLAN);
4419 case RTE_FLOW_ITEM_TYPE_IPV4:
4420 flow_dv_translate_item_ipv4(match_mask, match_value,
4421 items, tunnel, attr->group);
4422 matcher.priority = MLX5_PRIORITY_MAP_L3;
4423 dev_flow->dv.hash_fields |=
4424 mlx5_flow_hashfields_adjust
4426 MLX5_IPV4_LAYER_TYPES,
4427 MLX5_IPV4_IBV_RX_HASH);
4428 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4429 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4431 case RTE_FLOW_ITEM_TYPE_IPV6:
4432 flow_dv_translate_item_ipv6(match_mask, match_value,
4433 items, tunnel, attr->group);
4434 matcher.priority = MLX5_PRIORITY_MAP_L3;
4435 dev_flow->dv.hash_fields |=
4436 mlx5_flow_hashfields_adjust
4438 MLX5_IPV6_LAYER_TYPES,
4439 MLX5_IPV6_IBV_RX_HASH);
4440 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4441 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4443 case RTE_FLOW_ITEM_TYPE_TCP:
4444 flow_dv_translate_item_tcp(match_mask, match_value,
4446 matcher.priority = MLX5_PRIORITY_MAP_L4;
4447 dev_flow->dv.hash_fields |=
4448 mlx5_flow_hashfields_adjust
4449 (dev_flow, tunnel, ETH_RSS_TCP,
4450 IBV_RX_HASH_SRC_PORT_TCP |
4451 IBV_RX_HASH_DST_PORT_TCP);
4452 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
4453 MLX5_FLOW_LAYER_OUTER_L4_TCP;
4455 case RTE_FLOW_ITEM_TYPE_UDP:
4456 flow_dv_translate_item_udp(match_mask, match_value,
4458 matcher.priority = MLX5_PRIORITY_MAP_L4;
4459 dev_flow->dv.hash_fields |=
4460 mlx5_flow_hashfields_adjust
4461 (dev_flow, tunnel, ETH_RSS_UDP,
4462 IBV_RX_HASH_SRC_PORT_UDP |
4463 IBV_RX_HASH_DST_PORT_UDP);
4464 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
4465 MLX5_FLOW_LAYER_OUTER_L4_UDP;
4467 case RTE_FLOW_ITEM_TYPE_GRE:
4468 flow_dv_translate_item_gre(match_mask, match_value,
4470 last_item = MLX5_FLOW_LAYER_GRE;
4472 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
4473 flow_dv_translate_item_gre_key(match_mask,
4474 match_value, items);
4475 item_flags |= MLX5_FLOW_LAYER_GRE_KEY;
4477 case RTE_FLOW_ITEM_TYPE_NVGRE:
4478 flow_dv_translate_item_nvgre(match_mask, match_value,
4480 last_item = MLX5_FLOW_LAYER_GRE;
4482 case RTE_FLOW_ITEM_TYPE_VXLAN:
4483 flow_dv_translate_item_vxlan(match_mask, match_value,
4485 last_item = MLX5_FLOW_LAYER_VXLAN;
4487 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4488 flow_dv_translate_item_vxlan(match_mask, match_value,
4490 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
4492 case RTE_FLOW_ITEM_TYPE_MPLS:
4493 flow_dv_translate_item_mpls(match_mask, match_value,
4494 items, last_item, tunnel);
4495 last_item = MLX5_FLOW_LAYER_MPLS;
4497 case RTE_FLOW_ITEM_TYPE_META:
4498 flow_dv_translate_item_meta(match_mask, match_value,
4500 last_item = MLX5_FLOW_ITEM_METADATA;
4502 case RTE_FLOW_ITEM_TYPE_ICMP:
4503 flow_dv_translate_item_icmp(match_mask, match_value,
4505 item_flags |= MLX5_FLOW_LAYER_ICMP;
4507 case RTE_FLOW_ITEM_TYPE_ICMP6:
4508 flow_dv_translate_item_icmp6(match_mask, match_value,
4510 item_flags |= MLX5_FLOW_LAYER_ICMP6;
4515 item_flags |= last_item;
4518 * In case of ingress traffic when E-Switch mode is enabled,
4519 * we have two cases where we need to set the source port manually.
4520 * The first one, is in case of Nic steering rule, and the second is
4521 * E-Switch rule where no port_id item was found. In both cases
4522 * the source port is set according the current port in use.
4524 if ((attr->ingress && !(item_flags & MLX5_FLOW_ITEM_PORT_ID)) &&
4525 (priv->representor || priv->master)) {
4526 if (flow_dv_translate_item_port_id(dev, match_mask,
4530 assert(!flow_dv_check_valid_spec(matcher.mask.buf,
4531 dev_flow->dv.value.buf));
4532 dev_flow->layers = item_flags;
4533 /* Register matcher. */
4534 matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
4536 matcher.priority = mlx5_flow_adjust_priority(dev, priority,
4538 matcher.egress = attr->egress;
4539 matcher.group = attr->group;
4540 matcher.transfer = attr->transfer;
4541 if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
4547 * Apply the flow to the NIC.
4550 * Pointer to the Ethernet device structure.
4551 * @param[in, out] flow
4552 * Pointer to flow structure.
4554 * Pointer to error structure.
4557 * 0 on success, a negative errno value otherwise and rte_errno is set.
4560 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
4561 struct rte_flow_error *error)
4563 struct mlx5_flow_dv *dv;
4564 struct mlx5_flow *dev_flow;
4565 struct mlx5_priv *priv = dev->data->dev_private;
4569 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4572 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
4573 if (flow->transfer) {
4574 dv->actions[n++] = priv->sh->esw_drop_action;
4576 dv->hrxq = mlx5_hrxq_drop_new(dev);
4580 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4582 "cannot get drop hash queue");
4585 dv->actions[n++] = dv->hrxq->action;
4587 } else if (flow->actions &
4588 (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
4589 struct mlx5_hrxq *hrxq;
4591 hrxq = mlx5_hrxq_get(dev, flow->key,
4592 MLX5_RSS_HASH_KEY_LEN,
4595 flow->rss.queue_num);
4597 hrxq = mlx5_hrxq_new
4598 (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
4599 dv->hash_fields, (*flow->queue),
4600 flow->rss.queue_num,
4601 !!(dev_flow->layers &
4602 MLX5_FLOW_LAYER_TUNNEL));
4606 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4607 "cannot get hash queue");
4611 dv->actions[n++] = dv->hrxq->action;
4614 mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
4615 (void *)&dv->value, n,
4618 rte_flow_error_set(error, errno,
4619 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4621 "hardware refuses to create flow");
4627 err = rte_errno; /* Save rte_errno before cleanup. */
4628 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4629 struct mlx5_flow_dv *dv = &dev_flow->dv;
4631 if (flow->actions & MLX5_FLOW_ACTION_DROP)
4632 mlx5_hrxq_drop_release(dev);
4634 mlx5_hrxq_release(dev, dv->hrxq);
4638 rte_errno = err; /* Restore rte_errno. */
4643 * Release the flow matcher.
4646 * Pointer to Ethernet device.
4648 * Pointer to mlx5_flow.
4651 * 1 while a reference on it exists, 0 when freed.
4654 flow_dv_matcher_release(struct rte_eth_dev *dev,
4655 struct mlx5_flow *flow)
4657 struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
4658 struct mlx5_priv *priv = dev->data->dev_private;
4659 struct mlx5_ibv_shared *sh = priv->sh;
4660 struct mlx5_flow_tbl_resource *tbl;
4662 assert(matcher->matcher_object);
4663 DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
4664 dev->data->port_id, (void *)matcher,
4665 rte_atomic32_read(&matcher->refcnt));
4666 if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
4667 claim_zero(mlx5_glue->dv_destroy_flow_matcher
4668 (matcher->matcher_object));
4669 LIST_REMOVE(matcher, next);
4670 if (matcher->egress)
4671 tbl = &sh->tx_tbl[matcher->group];
4673 tbl = &sh->rx_tbl[matcher->group];
4674 flow_dv_tbl_resource_release(tbl);
4676 DRV_LOG(DEBUG, "port %u matcher %p: removed",
4677 dev->data->port_id, (void *)matcher);
4684 * Release an encap/decap resource.
4687 * Pointer to mlx5_flow.
4690 * 1 while a reference on it exists, 0 when freed.
4693 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
4695 struct mlx5_flow_dv_encap_decap_resource *cache_resource =
4696 flow->dv.encap_decap;
4698 assert(cache_resource->verbs_action);
4699 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
4700 (void *)cache_resource,
4701 rte_atomic32_read(&cache_resource->refcnt));
4702 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4703 claim_zero(mlx5_glue->destroy_flow_action
4704 (cache_resource->verbs_action));
4705 LIST_REMOVE(cache_resource, next);
4706 rte_free(cache_resource);
4707 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
4708 (void *)cache_resource);
4715 * Release an jump to table action resource.
4718 * Pointer to mlx5_flow.
4721 * 1 while a reference on it exists, 0 when freed.
4724 flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow)
4726 struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
4729 assert(cache_resource->action);
4730 DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
4731 (void *)cache_resource,
4732 rte_atomic32_read(&cache_resource->refcnt));
4733 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4734 claim_zero(mlx5_glue->destroy_flow_action
4735 (cache_resource->action));
4736 LIST_REMOVE(cache_resource, next);
4737 flow_dv_tbl_resource_release(cache_resource->tbl);
4738 rte_free(cache_resource);
4739 DRV_LOG(DEBUG, "jump table resource %p: removed",
4740 (void *)cache_resource);
4747 * Release a modify-header resource.
4750 * Pointer to mlx5_flow.
4753 * 1 while a reference on it exists, 0 when freed.
4756 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
4758 struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
4759 flow->dv.modify_hdr;
4761 assert(cache_resource->verbs_action);
4762 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
4763 (void *)cache_resource,
4764 rte_atomic32_read(&cache_resource->refcnt));
4765 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4766 claim_zero(mlx5_glue->destroy_flow_action
4767 (cache_resource->verbs_action));
4768 LIST_REMOVE(cache_resource, next);
4769 rte_free(cache_resource);
4770 DRV_LOG(DEBUG, "modify-header resource %p: removed",
4771 (void *)cache_resource);
4778 * Release port ID action resource.
4781 * Pointer to mlx5_flow.
4784 * 1 while a reference on it exists, 0 when freed.
4787 flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
4789 struct mlx5_flow_dv_port_id_action_resource *cache_resource =
4790 flow->dv.port_id_action;
4792 assert(cache_resource->action);
4793 DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
4794 (void *)cache_resource,
4795 rte_atomic32_read(&cache_resource->refcnt));
4796 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4797 claim_zero(mlx5_glue->destroy_flow_action
4798 (cache_resource->action));
4799 LIST_REMOVE(cache_resource, next);
4800 rte_free(cache_resource);
4801 DRV_LOG(DEBUG, "port id action resource %p: removed",
4802 (void *)cache_resource);
4809 * Remove the flow from the NIC but keeps it in memory.
4812 * Pointer to Ethernet device.
4813 * @param[in, out] flow
4814 * Pointer to flow structure.
4817 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
4819 struct mlx5_flow_dv *dv;
4820 struct mlx5_flow *dev_flow;
4824 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4827 claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
4831 if (flow->actions & MLX5_FLOW_ACTION_DROP)
4832 mlx5_hrxq_drop_release(dev);
4834 mlx5_hrxq_release(dev, dv->hrxq);
4841 * Remove the flow from the NIC and the memory.
4844 * Pointer to the Ethernet device structure.
4845 * @param[in, out] flow
4846 * Pointer to flow structure.
4849 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
4851 struct mlx5_flow *dev_flow;
4855 flow_dv_remove(dev, flow);
4856 if (flow->counter) {
4857 flow_dv_counter_release(flow->counter);
4858 flow->counter = NULL;
4860 if (flow->tag_resource) {
4861 flow_dv_tag_release(dev, flow->tag_resource);
4862 flow->tag_resource = NULL;
4864 while (!LIST_EMPTY(&flow->dev_flows)) {
4865 dev_flow = LIST_FIRST(&flow->dev_flows);
4866 LIST_REMOVE(dev_flow, next);
4867 if (dev_flow->dv.matcher)
4868 flow_dv_matcher_release(dev, dev_flow);
4869 if (dev_flow->dv.encap_decap)
4870 flow_dv_encap_decap_resource_release(dev_flow);
4871 if (dev_flow->dv.modify_hdr)
4872 flow_dv_modify_hdr_resource_release(dev_flow);
4873 if (dev_flow->dv.jump)
4874 flow_dv_jump_tbl_resource_release(dev_flow);
4875 if (dev_flow->dv.port_id_action)
4876 flow_dv_port_id_action_resource_release(dev_flow);
4882 * Query a dv flow rule for its statistics via devx.
4885 * Pointer to Ethernet device.
4887 * Pointer to the sub flow.
4889 * data retrieved by the query.
4891 * Perform verbose error reporting if not NULL.
4894 * 0 on success, a negative errno value otherwise and rte_errno is set.
4897 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
4898 void *data, struct rte_flow_error *error)
4900 struct mlx5_priv *priv = dev->data->dev_private;
4901 struct rte_flow_query_count *qc = data;
4906 if (!priv->config.devx)
4907 return rte_flow_error_set(error, ENOTSUP,
4908 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4910 "counters are not supported");
4911 if (flow->counter) {
4912 err = mlx5_devx_cmd_flow_counter_query
4913 (flow->counter->dcs,
4914 qc->reset, &pkts, &bytes);
4916 return rte_flow_error_set
4918 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4920 "cannot read counters");
4923 qc->hits = pkts - flow->counter->hits;
4924 qc->bytes = bytes - flow->counter->bytes;
4926 flow->counter->hits = pkts;
4927 flow->counter->bytes = bytes;
4931 return rte_flow_error_set(error, EINVAL,
4932 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4934 "counters are not available");
4940 * @see rte_flow_query()
4944 flow_dv_query(struct rte_eth_dev *dev,
4945 struct rte_flow *flow __rte_unused,
4946 const struct rte_flow_action *actions __rte_unused,
4947 void *data __rte_unused,
4948 struct rte_flow_error *error __rte_unused)
4952 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4953 switch (actions->type) {
4954 case RTE_FLOW_ACTION_TYPE_VOID:
4956 case RTE_FLOW_ACTION_TYPE_COUNT:
4957 ret = flow_dv_query_count(dev, flow, data, error);
4960 return rte_flow_error_set(error, ENOTSUP,
4961 RTE_FLOW_ERROR_TYPE_ACTION,
4963 "action not supported");
4970 * Mutex-protected thunk to flow_dv_translate().
4973 flow_d_translate(struct rte_eth_dev *dev,
4974 struct mlx5_flow *dev_flow,
4975 const struct rte_flow_attr *attr,
4976 const struct rte_flow_item items[],
4977 const struct rte_flow_action actions[],
4978 struct rte_flow_error *error)
4982 flow_d_shared_lock(dev);
4983 ret = flow_dv_translate(dev, dev_flow, attr, items, actions, error);
4984 flow_d_shared_unlock(dev);
4989 * Mutex-protected thunk to flow_dv_apply().
4992 flow_d_apply(struct rte_eth_dev *dev,
4993 struct rte_flow *flow,
4994 struct rte_flow_error *error)
4998 flow_d_shared_lock(dev);
4999 ret = flow_dv_apply(dev, flow, error);
5000 flow_d_shared_unlock(dev);
5005 * Mutex-protected thunk to flow_dv_remove().
5008 flow_d_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
5010 flow_d_shared_lock(dev);
5011 flow_dv_remove(dev, flow);
5012 flow_d_shared_unlock(dev);
5016 * Mutex-protected thunk to flow_dv_destroy().
5019 flow_d_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
5021 flow_d_shared_lock(dev);
5022 flow_dv_destroy(dev, flow);
5023 flow_d_shared_unlock(dev);
5026 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
5027 .validate = flow_dv_validate,
5028 .prepare = flow_dv_prepare,
5029 .translate = flow_d_translate,
5030 .apply = flow_d_apply,
5031 .remove = flow_d_remove,
5032 .destroy = flow_d_destroy,
5033 .query = flow_dv_query,
5036 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */