1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #pragma GCC diagnostic ignored "-Wpedantic"
15 #include <infiniband/verbs.h>
17 #pragma GCC diagnostic error "-Wpedantic"
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_ethdev_driver.h>
24 #include <rte_flow_driver.h>
25 #include <rte_malloc.h>
30 #include "mlx5_defs.h"
31 #include "mlx5_glue.h"
32 #include "mlx5_flow.h"
34 #include "mlx5_rxtx.h"
36 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
38 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
39 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
42 #ifndef HAVE_MLX5DV_DR_ESWITCH
43 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
44 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
48 #ifndef HAVE_MLX5DV_DR
49 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
65 * Initialize flow attributes structure according to flow items' types.
68 * Pointer to item specification.
70 * Pointer to flow attributes structure.
73 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
75 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
77 case RTE_FLOW_ITEM_TYPE_IPV4:
80 case RTE_FLOW_ITEM_TYPE_IPV6:
83 case RTE_FLOW_ITEM_TYPE_UDP:
86 case RTE_FLOW_ITEM_TYPE_TCP:
96 struct field_modify_info {
97 uint32_t size; /* Size of field in protocol header, in bytes. */
98 uint32_t offset; /* Offset of field in protocol header, in bytes. */
99 enum mlx5_modification_field id;
102 struct field_modify_info modify_eth[] = {
103 {4, 0, MLX5_MODI_OUT_DMAC_47_16},
104 {2, 4, MLX5_MODI_OUT_DMAC_15_0},
105 {4, 6, MLX5_MODI_OUT_SMAC_47_16},
106 {2, 10, MLX5_MODI_OUT_SMAC_15_0},
110 struct field_modify_info modify_ipv4[] = {
111 {1, 8, MLX5_MODI_OUT_IPV4_TTL},
112 {4, 12, MLX5_MODI_OUT_SIPV4},
113 {4, 16, MLX5_MODI_OUT_DIPV4},
117 struct field_modify_info modify_ipv6[] = {
118 {1, 7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
119 {4, 8, MLX5_MODI_OUT_SIPV6_127_96},
120 {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
121 {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
122 {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
123 {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
124 {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
125 {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
126 {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
130 struct field_modify_info modify_udp[] = {
131 {2, 0, MLX5_MODI_OUT_UDP_SPORT},
132 {2, 2, MLX5_MODI_OUT_UDP_DPORT},
136 struct field_modify_info modify_tcp[] = {
137 {2, 0, MLX5_MODI_OUT_TCP_SPORT},
138 {2, 2, MLX5_MODI_OUT_TCP_DPORT},
139 {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
140 {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
145 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item, uint64_t *flags)
147 uint8_t next_protocol = 0xFF;
149 if (item->mask != NULL) {
150 switch (item->type) {
151 case RTE_FLOW_ITEM_TYPE_IPV4:
153 ((const struct rte_flow_item_ipv4 *)
154 (item->spec))->hdr.next_proto_id;
156 ((const struct rte_flow_item_ipv4 *)
157 (item->mask))->hdr.next_proto_id;
159 case RTE_FLOW_ITEM_TYPE_IPV6:
161 ((const struct rte_flow_item_ipv6 *)
162 (item->spec))->hdr.proto;
164 ((const struct rte_flow_item_ipv6 *)
165 (item->mask))->hdr.proto;
171 if (next_protocol == IPPROTO_IPIP)
172 *flags |= MLX5_FLOW_LAYER_IPIP;
173 if (next_protocol == IPPROTO_IPV6)
174 *flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
178 * Acquire the synchronizing object to protect multithreaded access
179 * to shared dv context. Lock occurs only if context is actually
180 * shared, i.e. we have multiport IB device and representors are
184 * Pointer to the rte_eth_dev structure.
187 flow_d_shared_lock(struct rte_eth_dev *dev)
189 struct mlx5_priv *priv = dev->data->dev_private;
190 struct mlx5_ibv_shared *sh = priv->sh;
192 if (sh->dv_refcnt > 1) {
195 ret = pthread_mutex_lock(&sh->dv_mutex);
202 flow_d_shared_unlock(struct rte_eth_dev *dev)
204 struct mlx5_priv *priv = dev->data->dev_private;
205 struct mlx5_ibv_shared *sh = priv->sh;
207 if (sh->dv_refcnt > 1) {
210 ret = pthread_mutex_unlock(&sh->dv_mutex);
217 * Convert modify-header action to DV specification.
220 * Pointer to item specification.
222 * Pointer to field modification information.
223 * @param[in,out] resource
224 * Pointer to the modify-header resource.
226 * Type of modification.
228 * Pointer to the error structure.
231 * 0 on success, a negative errno value otherwise and rte_errno is set.
234 flow_dv_convert_modify_action(struct rte_flow_item *item,
235 struct field_modify_info *field,
236 struct mlx5_flow_dv_modify_hdr_resource *resource,
238 struct rte_flow_error *error)
240 uint32_t i = resource->actions_num;
241 struct mlx5_modification_cmd *actions = resource->actions;
242 const uint8_t *spec = item->spec;
243 const uint8_t *mask = item->mask;
246 while (field->size) {
248 /* Generate modify command for each mask segment. */
249 memcpy(&set, &mask[field->offset], field->size);
251 if (i >= MLX5_MODIFY_NUM)
252 return rte_flow_error_set(error, EINVAL,
253 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
254 "too many items to modify");
255 actions[i].action_type = type;
256 actions[i].field = field->id;
257 actions[i].length = field->size ==
258 4 ? 0 : field->size * 8;
259 rte_memcpy(&actions[i].data[4 - field->size],
260 &spec[field->offset], field->size);
261 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
264 if (resource->actions_num != i)
265 resource->actions_num = i;
268 if (!resource->actions_num)
269 return rte_flow_error_set(error, EINVAL,
270 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
271 "invalid modification flow item");
276 * Convert modify-header set IPv4 address action to DV specification.
278 * @param[in,out] resource
279 * Pointer to the modify-header resource.
281 * Pointer to action specification.
283 * Pointer to the error structure.
286 * 0 on success, a negative errno value otherwise and rte_errno is set.
289 flow_dv_convert_action_modify_ipv4
290 (struct mlx5_flow_dv_modify_hdr_resource *resource,
291 const struct rte_flow_action *action,
292 struct rte_flow_error *error)
294 const struct rte_flow_action_set_ipv4 *conf =
295 (const struct rte_flow_action_set_ipv4 *)(action->conf);
296 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
297 struct rte_flow_item_ipv4 ipv4;
298 struct rte_flow_item_ipv4 ipv4_mask;
300 memset(&ipv4, 0, sizeof(ipv4));
301 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
302 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
303 ipv4.hdr.src_addr = conf->ipv4_addr;
304 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
306 ipv4.hdr.dst_addr = conf->ipv4_addr;
307 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
310 item.mask = &ipv4_mask;
311 return flow_dv_convert_modify_action(&item, modify_ipv4, resource,
312 MLX5_MODIFICATION_TYPE_SET, error);
316 * Convert modify-header set IPv6 address action to DV specification.
318 * @param[in,out] resource
319 * Pointer to the modify-header resource.
321 * Pointer to action specification.
323 * Pointer to the error structure.
326 * 0 on success, a negative errno value otherwise and rte_errno is set.
329 flow_dv_convert_action_modify_ipv6
330 (struct mlx5_flow_dv_modify_hdr_resource *resource,
331 const struct rte_flow_action *action,
332 struct rte_flow_error *error)
334 const struct rte_flow_action_set_ipv6 *conf =
335 (const struct rte_flow_action_set_ipv6 *)(action->conf);
336 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
337 struct rte_flow_item_ipv6 ipv6;
338 struct rte_flow_item_ipv6 ipv6_mask;
340 memset(&ipv6, 0, sizeof(ipv6));
341 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
342 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
343 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
344 sizeof(ipv6.hdr.src_addr));
345 memcpy(&ipv6_mask.hdr.src_addr,
346 &rte_flow_item_ipv6_mask.hdr.src_addr,
347 sizeof(ipv6.hdr.src_addr));
349 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
350 sizeof(ipv6.hdr.dst_addr));
351 memcpy(&ipv6_mask.hdr.dst_addr,
352 &rte_flow_item_ipv6_mask.hdr.dst_addr,
353 sizeof(ipv6.hdr.dst_addr));
356 item.mask = &ipv6_mask;
357 return flow_dv_convert_modify_action(&item, modify_ipv6, resource,
358 MLX5_MODIFICATION_TYPE_SET, error);
362 * Convert modify-header set MAC address action to DV specification.
364 * @param[in,out] resource
365 * Pointer to the modify-header resource.
367 * Pointer to action specification.
369 * Pointer to the error structure.
372 * 0 on success, a negative errno value otherwise and rte_errno is set.
375 flow_dv_convert_action_modify_mac
376 (struct mlx5_flow_dv_modify_hdr_resource *resource,
377 const struct rte_flow_action *action,
378 struct rte_flow_error *error)
380 const struct rte_flow_action_set_mac *conf =
381 (const struct rte_flow_action_set_mac *)(action->conf);
382 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
383 struct rte_flow_item_eth eth;
384 struct rte_flow_item_eth eth_mask;
386 memset(ð, 0, sizeof(eth));
387 memset(ð_mask, 0, sizeof(eth_mask));
388 if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
389 memcpy(ð.src.addr_bytes, &conf->mac_addr,
390 sizeof(eth.src.addr_bytes));
391 memcpy(ð_mask.src.addr_bytes,
392 &rte_flow_item_eth_mask.src.addr_bytes,
393 sizeof(eth_mask.src.addr_bytes));
395 memcpy(ð.dst.addr_bytes, &conf->mac_addr,
396 sizeof(eth.dst.addr_bytes));
397 memcpy(ð_mask.dst.addr_bytes,
398 &rte_flow_item_eth_mask.dst.addr_bytes,
399 sizeof(eth_mask.dst.addr_bytes));
402 item.mask = ð_mask;
403 return flow_dv_convert_modify_action(&item, modify_eth, resource,
404 MLX5_MODIFICATION_TYPE_SET, error);
408 * Convert modify-header set TP action to DV specification.
410 * @param[in,out] resource
411 * Pointer to the modify-header resource.
413 * Pointer to action specification.
415 * Pointer to rte_flow_item objects list.
417 * Pointer to flow attributes structure.
419 * Pointer to the error structure.
422 * 0 on success, a negative errno value otherwise and rte_errno is set.
425 flow_dv_convert_action_modify_tp
426 (struct mlx5_flow_dv_modify_hdr_resource *resource,
427 const struct rte_flow_action *action,
428 const struct rte_flow_item *items,
429 union flow_dv_attr *attr,
430 struct rte_flow_error *error)
432 const struct rte_flow_action_set_tp *conf =
433 (const struct rte_flow_action_set_tp *)(action->conf);
434 struct rte_flow_item item;
435 struct rte_flow_item_udp udp;
436 struct rte_flow_item_udp udp_mask;
437 struct rte_flow_item_tcp tcp;
438 struct rte_flow_item_tcp tcp_mask;
439 struct field_modify_info *field;
442 flow_dv_attr_init(items, attr);
444 memset(&udp, 0, sizeof(udp));
445 memset(&udp_mask, 0, sizeof(udp_mask));
446 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
447 udp.hdr.src_port = conf->port;
448 udp_mask.hdr.src_port =
449 rte_flow_item_udp_mask.hdr.src_port;
451 udp.hdr.dst_port = conf->port;
452 udp_mask.hdr.dst_port =
453 rte_flow_item_udp_mask.hdr.dst_port;
455 item.type = RTE_FLOW_ITEM_TYPE_UDP;
457 item.mask = &udp_mask;
461 memset(&tcp, 0, sizeof(tcp));
462 memset(&tcp_mask, 0, sizeof(tcp_mask));
463 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
464 tcp.hdr.src_port = conf->port;
465 tcp_mask.hdr.src_port =
466 rte_flow_item_tcp_mask.hdr.src_port;
468 tcp.hdr.dst_port = conf->port;
469 tcp_mask.hdr.dst_port =
470 rte_flow_item_tcp_mask.hdr.dst_port;
472 item.type = RTE_FLOW_ITEM_TYPE_TCP;
474 item.mask = &tcp_mask;
477 return flow_dv_convert_modify_action(&item, field, resource,
478 MLX5_MODIFICATION_TYPE_SET, error);
482 * Convert modify-header set TTL action to DV specification.
484 * @param[in,out] resource
485 * Pointer to the modify-header resource.
487 * Pointer to action specification.
489 * Pointer to rte_flow_item objects list.
491 * Pointer to flow attributes structure.
493 * Pointer to the error structure.
496 * 0 on success, a negative errno value otherwise and rte_errno is set.
499 flow_dv_convert_action_modify_ttl
500 (struct mlx5_flow_dv_modify_hdr_resource *resource,
501 const struct rte_flow_action *action,
502 const struct rte_flow_item *items,
503 union flow_dv_attr *attr,
504 struct rte_flow_error *error)
506 const struct rte_flow_action_set_ttl *conf =
507 (const struct rte_flow_action_set_ttl *)(action->conf);
508 struct rte_flow_item item;
509 struct rte_flow_item_ipv4 ipv4;
510 struct rte_flow_item_ipv4 ipv4_mask;
511 struct rte_flow_item_ipv6 ipv6;
512 struct rte_flow_item_ipv6 ipv6_mask;
513 struct field_modify_info *field;
516 flow_dv_attr_init(items, attr);
518 memset(&ipv4, 0, sizeof(ipv4));
519 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
520 ipv4.hdr.time_to_live = conf->ttl_value;
521 ipv4_mask.hdr.time_to_live = 0xFF;
522 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
524 item.mask = &ipv4_mask;
528 memset(&ipv6, 0, sizeof(ipv6));
529 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
530 ipv6.hdr.hop_limits = conf->ttl_value;
531 ipv6_mask.hdr.hop_limits = 0xFF;
532 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
534 item.mask = &ipv6_mask;
537 return flow_dv_convert_modify_action(&item, field, resource,
538 MLX5_MODIFICATION_TYPE_SET, error);
542 * Convert modify-header decrement TTL action to DV specification.
544 * @param[in,out] resource
545 * Pointer to the modify-header resource.
547 * Pointer to action specification.
549 * Pointer to rte_flow_item objects list.
551 * Pointer to flow attributes structure.
553 * Pointer to the error structure.
556 * 0 on success, a negative errno value otherwise and rte_errno is set.
559 flow_dv_convert_action_modify_dec_ttl
560 (struct mlx5_flow_dv_modify_hdr_resource *resource,
561 const struct rte_flow_item *items,
562 union flow_dv_attr *attr,
563 struct rte_flow_error *error)
565 struct rte_flow_item item;
566 struct rte_flow_item_ipv4 ipv4;
567 struct rte_flow_item_ipv4 ipv4_mask;
568 struct rte_flow_item_ipv6 ipv6;
569 struct rte_flow_item_ipv6 ipv6_mask;
570 struct field_modify_info *field;
573 flow_dv_attr_init(items, attr);
575 memset(&ipv4, 0, sizeof(ipv4));
576 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
577 ipv4.hdr.time_to_live = 0xFF;
578 ipv4_mask.hdr.time_to_live = 0xFF;
579 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
581 item.mask = &ipv4_mask;
585 memset(&ipv6, 0, sizeof(ipv6));
586 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
587 ipv6.hdr.hop_limits = 0xFF;
588 ipv6_mask.hdr.hop_limits = 0xFF;
589 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
591 item.mask = &ipv6_mask;
594 return flow_dv_convert_modify_action(&item, field, resource,
595 MLX5_MODIFICATION_TYPE_ADD, error);
599 * Convert modify-header increment/decrement TCP Sequence number
600 * to DV specification.
602 * @param[in,out] resource
603 * Pointer to the modify-header resource.
605 * Pointer to action specification.
607 * Pointer to the error structure.
610 * 0 on success, a negative errno value otherwise and rte_errno is set.
613 flow_dv_convert_action_modify_tcp_seq
614 (struct mlx5_flow_dv_modify_hdr_resource *resource,
615 const struct rte_flow_action *action,
616 struct rte_flow_error *error)
618 const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
619 uint64_t value = rte_be_to_cpu_32(*conf);
620 struct rte_flow_item item;
621 struct rte_flow_item_tcp tcp;
622 struct rte_flow_item_tcp tcp_mask;
624 memset(&tcp, 0, sizeof(tcp));
625 memset(&tcp_mask, 0, sizeof(tcp_mask));
626 if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
628 * The HW has no decrement operation, only increment operation.
629 * To simulate decrement X from Y using increment operation
630 * we need to add UINT32_MAX X times to Y.
631 * Each adding of UINT32_MAX decrements Y by 1.
634 tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
635 tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
636 item.type = RTE_FLOW_ITEM_TYPE_TCP;
638 item.mask = &tcp_mask;
639 return flow_dv_convert_modify_action(&item, modify_tcp, resource,
640 MLX5_MODIFICATION_TYPE_ADD, error);
644 * Convert modify-header increment/decrement TCP Acknowledgment number
645 * to DV specification.
647 * @param[in,out] resource
648 * Pointer to the modify-header resource.
650 * Pointer to action specification.
652 * Pointer to the error structure.
655 * 0 on success, a negative errno value otherwise and rte_errno is set.
658 flow_dv_convert_action_modify_tcp_ack
659 (struct mlx5_flow_dv_modify_hdr_resource *resource,
660 const struct rte_flow_action *action,
661 struct rte_flow_error *error)
663 const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
664 uint64_t value = rte_be_to_cpu_32(*conf);
665 struct rte_flow_item item;
666 struct rte_flow_item_tcp tcp;
667 struct rte_flow_item_tcp tcp_mask;
669 memset(&tcp, 0, sizeof(tcp));
670 memset(&tcp_mask, 0, sizeof(tcp_mask));
671 if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
673 * The HW has no decrement operation, only increment operation.
674 * To simulate decrement X from Y using increment operation
675 * we need to add UINT32_MAX X times to Y.
676 * Each adding of UINT32_MAX decrements Y by 1.
679 tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
680 tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
681 item.type = RTE_FLOW_ITEM_TYPE_TCP;
683 item.mask = &tcp_mask;
684 return flow_dv_convert_modify_action(&item, modify_tcp, resource,
685 MLX5_MODIFICATION_TYPE_ADD, error);
689 * Validate META item.
692 * Pointer to the rte_eth_dev structure.
694 * Item specification.
696 * Attributes of flow that includes this item.
698 * Pointer to error structure.
701 * 0 on success, a negative errno value otherwise and rte_errno is set.
704 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
705 const struct rte_flow_item *item,
706 const struct rte_flow_attr *attr,
707 struct rte_flow_error *error)
709 const struct rte_flow_item_meta *spec = item->spec;
710 const struct rte_flow_item_meta *mask = item->mask;
711 const struct rte_flow_item_meta nic_mask = {
712 .data = RTE_BE32(UINT32_MAX)
715 uint64_t offloads = dev->data->dev_conf.txmode.offloads;
717 if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
718 return rte_flow_error_set(error, EPERM,
719 RTE_FLOW_ERROR_TYPE_ITEM,
721 "match on metadata offload "
722 "configuration is off for this port");
724 return rte_flow_error_set(error, EINVAL,
725 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
727 "data cannot be empty");
729 return rte_flow_error_set(error, EINVAL,
730 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
732 "data cannot be zero");
734 mask = &rte_flow_item_meta_mask;
735 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
736 (const uint8_t *)&nic_mask,
737 sizeof(struct rte_flow_item_meta),
742 return rte_flow_error_set(error, ENOTSUP,
743 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
745 "pattern not supported for ingress");
750 * Validate vport item.
753 * Pointer to the rte_eth_dev structure.
755 * Item specification.
757 * Attributes of flow that includes this item.
758 * @param[in] item_flags
759 * Bit-fields that holds the items detected until now.
761 * Pointer to error structure.
764 * 0 on success, a negative errno value otherwise and rte_errno is set.
767 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
768 const struct rte_flow_item *item,
769 const struct rte_flow_attr *attr,
771 struct rte_flow_error *error)
773 const struct rte_flow_item_port_id *spec = item->spec;
774 const struct rte_flow_item_port_id *mask = item->mask;
775 const struct rte_flow_item_port_id switch_mask = {
778 uint16_t esw_domain_id;
779 uint16_t item_port_esw_domain_id;
783 return rte_flow_error_set(error, EINVAL,
784 RTE_FLOW_ERROR_TYPE_ITEM,
786 "match on port id is valid only"
787 " when transfer flag is enabled");
788 if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
789 return rte_flow_error_set(error, ENOTSUP,
790 RTE_FLOW_ERROR_TYPE_ITEM, item,
791 "multiple source ports are not"
795 if (mask->id != 0xffffffff)
796 return rte_flow_error_set(error, ENOTSUP,
797 RTE_FLOW_ERROR_TYPE_ITEM_MASK,
799 "no support for partial mask on"
801 ret = mlx5_flow_item_acceptable
802 (item, (const uint8_t *)mask,
803 (const uint8_t *)&rte_flow_item_port_id_mask,
804 sizeof(struct rte_flow_item_port_id),
810 ret = mlx5_port_to_eswitch_info(spec->id, &item_port_esw_domain_id,
813 return rte_flow_error_set(error, -ret,
814 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
815 "failed to obtain E-Switch info for"
817 ret = mlx5_port_to_eswitch_info(dev->data->port_id,
818 &esw_domain_id, NULL);
820 return rte_flow_error_set(error, -ret,
821 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
823 "failed to obtain E-Switch info");
824 if (item_port_esw_domain_id != esw_domain_id)
825 return rte_flow_error_set(error, -ret,
826 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
827 "cannot match on a port from a"
828 " different E-Switch");
833 * Validate count action.
838 * Pointer to error structure.
841 * 0 on success, a negative errno value otherwise and rte_errno is set.
844 flow_dv_validate_action_count(struct rte_eth_dev *dev,
845 struct rte_flow_error *error)
847 struct mlx5_priv *priv = dev->data->dev_private;
849 if (!priv->config.devx)
851 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
855 return rte_flow_error_set
857 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
859 "count action not supported");
863 * Validate the L2 encap action.
865 * @param[in] action_flags
866 * Holds the actions detected until now.
868 * Pointer to the encap action.
870 * Pointer to flow attributes
872 * Pointer to error structure.
875 * 0 on success, a negative errno value otherwise and rte_errno is set.
878 flow_dv_validate_action_l2_encap(uint64_t action_flags,
879 const struct rte_flow_action *action,
880 const struct rte_flow_attr *attr,
881 struct rte_flow_error *error)
884 return rte_flow_error_set(error, EINVAL,
885 RTE_FLOW_ERROR_TYPE_ACTION, action,
886 "configuration cannot be null");
887 if (action_flags & MLX5_FLOW_ACTION_DROP)
888 return rte_flow_error_set(error, EINVAL,
889 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
890 "can't drop and encap in same flow");
891 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
892 return rte_flow_error_set(error, EINVAL,
893 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
894 "can only have a single encap or"
895 " decap action in a flow");
896 if (!attr->transfer && attr->ingress)
897 return rte_flow_error_set(error, ENOTSUP,
898 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
900 "encap action not supported for "
906 * Validate the L2 decap action.
908 * @param[in] action_flags
909 * Holds the actions detected until now.
911 * Pointer to flow attributes
913 * Pointer to error structure.
916 * 0 on success, a negative errno value otherwise and rte_errno is set.
919 flow_dv_validate_action_l2_decap(uint64_t action_flags,
920 const struct rte_flow_attr *attr,
921 struct rte_flow_error *error)
923 if (action_flags & MLX5_FLOW_ACTION_DROP)
924 return rte_flow_error_set(error, EINVAL,
925 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
926 "can't drop and decap in same flow");
927 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
928 return rte_flow_error_set(error, EINVAL,
929 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
930 "can only have a single encap or"
931 " decap action in a flow");
932 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
933 return rte_flow_error_set(error, EINVAL,
934 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
935 "can't have decap action after"
938 return rte_flow_error_set(error, ENOTSUP,
939 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
941 "decap action not supported for "
947 * Validate the raw encap action.
949 * @param[in] action_flags
950 * Holds the actions detected until now.
952 * Pointer to the encap action.
954 * Pointer to flow attributes
956 * Pointer to error structure.
959 * 0 on success, a negative errno value otherwise and rte_errno is set.
962 flow_dv_validate_action_raw_encap(uint64_t action_flags,
963 const struct rte_flow_action *action,
964 const struct rte_flow_attr *attr,
965 struct rte_flow_error *error)
968 return rte_flow_error_set(error, EINVAL,
969 RTE_FLOW_ERROR_TYPE_ACTION, action,
970 "configuration cannot be null");
971 if (action_flags & MLX5_FLOW_ACTION_DROP)
972 return rte_flow_error_set(error, EINVAL,
973 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
974 "can't drop and encap in same flow");
975 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
976 return rte_flow_error_set(error, EINVAL,
977 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
978 "can only have a single encap"
979 " action in a flow");
980 /* encap without preceding decap is not supported for ingress */
981 if (!attr->transfer && attr->ingress &&
982 !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
983 return rte_flow_error_set(error, ENOTSUP,
984 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
986 "encap action not supported for "
992 * Validate the raw decap action.
994 * @param[in] action_flags
995 * Holds the actions detected until now.
997 * Pointer to the encap action.
999 * Pointer to flow attributes
1001 * Pointer to error structure.
1004 * 0 on success, a negative errno value otherwise and rte_errno is set.
1007 flow_dv_validate_action_raw_decap(uint64_t action_flags,
1008 const struct rte_flow_action *action,
1009 const struct rte_flow_attr *attr,
1010 struct rte_flow_error *error)
1012 if (action_flags & MLX5_FLOW_ACTION_DROP)
1013 return rte_flow_error_set(error, EINVAL,
1014 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1015 "can't drop and decap in same flow");
1016 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1017 return rte_flow_error_set(error, EINVAL,
1018 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1019 "can't have encap action before"
1021 if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
1022 return rte_flow_error_set(error, EINVAL,
1023 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1024 "can only have a single decap"
1025 " action in a flow");
1026 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
1027 return rte_flow_error_set(error, EINVAL,
1028 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1029 "can't have decap action after"
1031 /* decap action is valid on egress only if it is followed by encap */
1033 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
1034 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
1037 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
1038 return rte_flow_error_set
1040 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1041 NULL, "decap action not supported"
1048 * Find existing encap/decap resource or create and register a new one.
1050 * @param dev[in, out]
1051 * Pointer to rte_eth_dev structure.
1052 * @param[in, out] resource
1053 * Pointer to encap/decap resource.
1054 * @parm[in, out] dev_flow
1055 * Pointer to the dev_flow.
1057 * pointer to error structure.
1060 * 0 on success otherwise -errno and errno is set.
1063 flow_dv_encap_decap_resource_register
1064 (struct rte_eth_dev *dev,
1065 struct mlx5_flow_dv_encap_decap_resource *resource,
1066 struct mlx5_flow *dev_flow,
1067 struct rte_flow_error *error)
1069 struct mlx5_priv *priv = dev->data->dev_private;
1070 struct mlx5_ibv_shared *sh = priv->sh;
1071 struct mlx5_flow_dv_encap_decap_resource *cache_resource;
1072 struct rte_flow *flow = dev_flow->flow;
1073 struct mlx5dv_dr_domain *domain;
1075 resource->flags = flow->group ? 0 : 1;
1076 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
1077 domain = sh->fdb_domain;
1078 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
1079 domain = sh->rx_domain;
1081 domain = sh->tx_domain;
1083 /* Lookup a matching resource from cache. */
1084 LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
1085 if (resource->reformat_type == cache_resource->reformat_type &&
1086 resource->ft_type == cache_resource->ft_type &&
1087 resource->flags == cache_resource->flags &&
1088 resource->size == cache_resource->size &&
1089 !memcmp((const void *)resource->buf,
1090 (const void *)cache_resource->buf,
1092 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
1093 (void *)cache_resource,
1094 rte_atomic32_read(&cache_resource->refcnt));
1095 rte_atomic32_inc(&cache_resource->refcnt);
1096 dev_flow->dv.encap_decap = cache_resource;
1100 /* Register new encap/decap resource. */
1101 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1102 if (!cache_resource)
1103 return rte_flow_error_set(error, ENOMEM,
1104 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1105 "cannot allocate resource memory");
1106 *cache_resource = *resource;
1107 cache_resource->verbs_action =
1108 mlx5_glue->dv_create_flow_action_packet_reformat
1109 (sh->ctx, cache_resource->reformat_type,
1110 cache_resource->ft_type, domain, cache_resource->flags,
1111 cache_resource->size,
1112 (cache_resource->size ? cache_resource->buf : NULL));
1113 if (!cache_resource->verbs_action) {
1114 rte_free(cache_resource);
1115 return rte_flow_error_set(error, ENOMEM,
1116 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1117 NULL, "cannot create action");
1119 rte_atomic32_init(&cache_resource->refcnt);
1120 rte_atomic32_inc(&cache_resource->refcnt);
1121 LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
1122 dev_flow->dv.encap_decap = cache_resource;
1123 DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
1124 (void *)cache_resource,
1125 rte_atomic32_read(&cache_resource->refcnt));
1130 * Find existing table jump resource or create and register a new one.
1132 * @param dev[in, out]
1133 * Pointer to rte_eth_dev structure.
1134 * @param[in, out] resource
1135 * Pointer to jump table resource.
1136 * @parm[in, out] dev_flow
1137 * Pointer to the dev_flow.
1139 * pointer to error structure.
1142 * 0 on success otherwise -errno and errno is set.
1145 flow_dv_jump_tbl_resource_register
1146 (struct rte_eth_dev *dev,
1147 struct mlx5_flow_dv_jump_tbl_resource *resource,
1148 struct mlx5_flow *dev_flow,
1149 struct rte_flow_error *error)
1151 struct mlx5_priv *priv = dev->data->dev_private;
1152 struct mlx5_ibv_shared *sh = priv->sh;
1153 struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
1155 /* Lookup a matching resource from cache. */
1156 LIST_FOREACH(cache_resource, &sh->jump_tbl, next) {
1157 if (resource->tbl == cache_resource->tbl) {
1158 DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++",
1159 (void *)cache_resource,
1160 rte_atomic32_read(&cache_resource->refcnt));
1161 rte_atomic32_inc(&cache_resource->refcnt);
1162 dev_flow->dv.jump = cache_resource;
1166 /* Register new jump table resource. */
1167 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1168 if (!cache_resource)
1169 return rte_flow_error_set(error, ENOMEM,
1170 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1171 "cannot allocate resource memory");
1172 *cache_resource = *resource;
1173 cache_resource->action =
1174 mlx5_glue->dr_create_flow_action_dest_flow_tbl
1175 (resource->tbl->obj);
1176 if (!cache_resource->action) {
1177 rte_free(cache_resource);
1178 return rte_flow_error_set(error, ENOMEM,
1179 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1180 NULL, "cannot create action");
1182 rte_atomic32_init(&cache_resource->refcnt);
1183 rte_atomic32_inc(&cache_resource->refcnt);
1184 LIST_INSERT_HEAD(&sh->jump_tbl, cache_resource, next);
1185 dev_flow->dv.jump = cache_resource;
1186 DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++",
1187 (void *)cache_resource,
1188 rte_atomic32_read(&cache_resource->refcnt));
1193 * Find existing table port ID resource or create and register a new one.
1195 * @param dev[in, out]
1196 * Pointer to rte_eth_dev structure.
1197 * @param[in, out] resource
1198 * Pointer to port ID action resource.
1199 * @parm[in, out] dev_flow
1200 * Pointer to the dev_flow.
1202 * pointer to error structure.
1205 * 0 on success otherwise -errno and errno is set.
1208 flow_dv_port_id_action_resource_register
1209 (struct rte_eth_dev *dev,
1210 struct mlx5_flow_dv_port_id_action_resource *resource,
1211 struct mlx5_flow *dev_flow,
1212 struct rte_flow_error *error)
1214 struct mlx5_priv *priv = dev->data->dev_private;
1215 struct mlx5_ibv_shared *sh = priv->sh;
1216 struct mlx5_flow_dv_port_id_action_resource *cache_resource;
1218 /* Lookup a matching resource from cache. */
1219 LIST_FOREACH(cache_resource, &sh->port_id_action_list, next) {
1220 if (resource->port_id == cache_resource->port_id) {
1221 DRV_LOG(DEBUG, "port id action resource resource %p: "
1223 (void *)cache_resource,
1224 rte_atomic32_read(&cache_resource->refcnt));
1225 rte_atomic32_inc(&cache_resource->refcnt);
1226 dev_flow->dv.port_id_action = cache_resource;
1230 /* Register new port id action resource. */
1231 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1232 if (!cache_resource)
1233 return rte_flow_error_set(error, ENOMEM,
1234 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1235 "cannot allocate resource memory");
1236 *cache_resource = *resource;
1237 cache_resource->action =
1238 mlx5_glue->dr_create_flow_action_dest_vport
1239 (priv->sh->fdb_domain, resource->port_id);
1240 if (!cache_resource->action) {
1241 rte_free(cache_resource);
1242 return rte_flow_error_set(error, ENOMEM,
1243 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1244 NULL, "cannot create action");
1246 rte_atomic32_init(&cache_resource->refcnt);
1247 rte_atomic32_inc(&cache_resource->refcnt);
1248 LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
1249 dev_flow->dv.port_id_action = cache_resource;
1250 DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
1251 (void *)cache_resource,
1252 rte_atomic32_read(&cache_resource->refcnt));
1257 * Get the size of specific rte_flow_item_type
1259 * @param[in] item_type
1260 * Tested rte_flow_item_type.
1263 * sizeof struct item_type, 0 if void or irrelevant.
1266 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
1270 switch (item_type) {
1271 case RTE_FLOW_ITEM_TYPE_ETH:
1272 retval = sizeof(struct rte_flow_item_eth);
1274 case RTE_FLOW_ITEM_TYPE_VLAN:
1275 retval = sizeof(struct rte_flow_item_vlan);
1277 case RTE_FLOW_ITEM_TYPE_IPV4:
1278 retval = sizeof(struct rte_flow_item_ipv4);
1280 case RTE_FLOW_ITEM_TYPE_IPV6:
1281 retval = sizeof(struct rte_flow_item_ipv6);
1283 case RTE_FLOW_ITEM_TYPE_UDP:
1284 retval = sizeof(struct rte_flow_item_udp);
1286 case RTE_FLOW_ITEM_TYPE_TCP:
1287 retval = sizeof(struct rte_flow_item_tcp);
1289 case RTE_FLOW_ITEM_TYPE_VXLAN:
1290 retval = sizeof(struct rte_flow_item_vxlan);
1292 case RTE_FLOW_ITEM_TYPE_GRE:
1293 retval = sizeof(struct rte_flow_item_gre);
1295 case RTE_FLOW_ITEM_TYPE_NVGRE:
1296 retval = sizeof(struct rte_flow_item_nvgre);
1298 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1299 retval = sizeof(struct rte_flow_item_vxlan_gpe);
1301 case RTE_FLOW_ITEM_TYPE_MPLS:
1302 retval = sizeof(struct rte_flow_item_mpls);
1304 case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
1312 #define MLX5_ENCAP_IPV4_VERSION 0x40
1313 #define MLX5_ENCAP_IPV4_IHL_MIN 0x05
1314 #define MLX5_ENCAP_IPV4_TTL_DEF 0x40
1315 #define MLX5_ENCAP_IPV6_VTC_FLOW 0x60000000
1316 #define MLX5_ENCAP_IPV6_HOP_LIMIT 0xff
1317 #define MLX5_ENCAP_VXLAN_FLAGS 0x08000000
1318 #define MLX5_ENCAP_VXLAN_GPE_FLAGS 0x04
1321 * Convert the encap action data from list of rte_flow_item to raw buffer
1324 * Pointer to rte_flow_item objects list.
1326 * Pointer to the output buffer.
1328 * Pointer to the output buffer size.
1330 * Pointer to the error structure.
1333 * 0 on success, a negative errno value otherwise and rte_errno is set.
1336 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
1337 size_t *size, struct rte_flow_error *error)
1339 struct rte_ether_hdr *eth = NULL;
1340 struct rte_vlan_hdr *vlan = NULL;
1341 struct rte_ipv4_hdr *ipv4 = NULL;
1342 struct rte_ipv6_hdr *ipv6 = NULL;
1343 struct rte_udp_hdr *udp = NULL;
1344 struct rte_vxlan_hdr *vxlan = NULL;
1345 struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
1346 struct rte_gre_hdr *gre = NULL;
1348 size_t temp_size = 0;
1351 return rte_flow_error_set(error, EINVAL,
1352 RTE_FLOW_ERROR_TYPE_ACTION,
1353 NULL, "invalid empty data");
1354 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1355 len = flow_dv_get_item_len(items->type);
1356 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
1357 return rte_flow_error_set(error, EINVAL,
1358 RTE_FLOW_ERROR_TYPE_ACTION,
1359 (void *)items->type,
1360 "items total size is too big"
1361 " for encap action");
1362 rte_memcpy((void *)&buf[temp_size], items->spec, len);
1363 switch (items->type) {
1364 case RTE_FLOW_ITEM_TYPE_ETH:
1365 eth = (struct rte_ether_hdr *)&buf[temp_size];
1367 case RTE_FLOW_ITEM_TYPE_VLAN:
1368 vlan = (struct rte_vlan_hdr *)&buf[temp_size];
1370 return rte_flow_error_set(error, EINVAL,
1371 RTE_FLOW_ERROR_TYPE_ACTION,
1372 (void *)items->type,
1373 "eth header not found");
1374 if (!eth->ether_type)
1375 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
1377 case RTE_FLOW_ITEM_TYPE_IPV4:
1378 ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
1380 return rte_flow_error_set(error, EINVAL,
1381 RTE_FLOW_ERROR_TYPE_ACTION,
1382 (void *)items->type,
1383 "neither eth nor vlan"
1385 if (vlan && !vlan->eth_proto)
1386 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1387 else if (eth && !eth->ether_type)
1388 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1389 if (!ipv4->version_ihl)
1390 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
1391 MLX5_ENCAP_IPV4_IHL_MIN;
1392 if (!ipv4->time_to_live)
1393 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
1395 case RTE_FLOW_ITEM_TYPE_IPV6:
1396 ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
1398 return rte_flow_error_set(error, EINVAL,
1399 RTE_FLOW_ERROR_TYPE_ACTION,
1400 (void *)items->type,
1401 "neither eth nor vlan"
1403 if (vlan && !vlan->eth_proto)
1404 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1405 else if (eth && !eth->ether_type)
1406 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1407 if (!ipv6->vtc_flow)
1409 RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
1410 if (!ipv6->hop_limits)
1411 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
1413 case RTE_FLOW_ITEM_TYPE_UDP:
1414 udp = (struct rte_udp_hdr *)&buf[temp_size];
1416 return rte_flow_error_set(error, EINVAL,
1417 RTE_FLOW_ERROR_TYPE_ACTION,
1418 (void *)items->type,
1419 "ip header not found");
1420 if (ipv4 && !ipv4->next_proto_id)
1421 ipv4->next_proto_id = IPPROTO_UDP;
1422 else if (ipv6 && !ipv6->proto)
1423 ipv6->proto = IPPROTO_UDP;
1425 case RTE_FLOW_ITEM_TYPE_VXLAN:
1426 vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
1428 return rte_flow_error_set(error, EINVAL,
1429 RTE_FLOW_ERROR_TYPE_ACTION,
1430 (void *)items->type,
1431 "udp header not found");
1433 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
1434 if (!vxlan->vx_flags)
1436 RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
1438 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1439 vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
1441 return rte_flow_error_set(error, EINVAL,
1442 RTE_FLOW_ERROR_TYPE_ACTION,
1443 (void *)items->type,
1444 "udp header not found");
1445 if (!vxlan_gpe->proto)
1446 return rte_flow_error_set(error, EINVAL,
1447 RTE_FLOW_ERROR_TYPE_ACTION,
1448 (void *)items->type,
1449 "next protocol not found");
1452 RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
1453 if (!vxlan_gpe->vx_flags)
1454 vxlan_gpe->vx_flags =
1455 MLX5_ENCAP_VXLAN_GPE_FLAGS;
1457 case RTE_FLOW_ITEM_TYPE_GRE:
1458 case RTE_FLOW_ITEM_TYPE_NVGRE:
1459 gre = (struct rte_gre_hdr *)&buf[temp_size];
1461 return rte_flow_error_set(error, EINVAL,
1462 RTE_FLOW_ERROR_TYPE_ACTION,
1463 (void *)items->type,
1464 "next protocol not found");
1466 return rte_flow_error_set(error, EINVAL,
1467 RTE_FLOW_ERROR_TYPE_ACTION,
1468 (void *)items->type,
1469 "ip header not found");
1470 if (ipv4 && !ipv4->next_proto_id)
1471 ipv4->next_proto_id = IPPROTO_GRE;
1472 else if (ipv6 && !ipv6->proto)
1473 ipv6->proto = IPPROTO_GRE;
1475 case RTE_FLOW_ITEM_TYPE_VOID:
1478 return rte_flow_error_set(error, EINVAL,
1479 RTE_FLOW_ERROR_TYPE_ACTION,
1480 (void *)items->type,
1481 "unsupported item type");
1491 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
1493 struct rte_ether_hdr *eth = NULL;
1494 struct rte_vlan_hdr *vlan = NULL;
1495 struct rte_ipv6_hdr *ipv6 = NULL;
1496 struct rte_udp_hdr *udp = NULL;
1500 eth = (struct rte_ether_hdr *)data;
1501 next_hdr = (char *)(eth + 1);
1502 proto = RTE_BE16(eth->ether_type);
1505 while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
1506 next_hdr += sizeof(struct rte_vlan_hdr);
1507 vlan = (struct rte_vlan_hdr *)next_hdr;
1508 proto = RTE_BE16(vlan->eth_proto);
1511 /* HW calculates IPv4 csum. no need to proceed */
1512 if (proto == RTE_ETHER_TYPE_IPV4)
1515 /* non IPv4/IPv6 header. not supported */
1516 if (proto != RTE_ETHER_TYPE_IPV6) {
1517 return rte_flow_error_set(error, ENOTSUP,
1518 RTE_FLOW_ERROR_TYPE_ACTION,
1519 NULL, "Cannot offload non IPv4/IPv6");
1522 ipv6 = (struct rte_ipv6_hdr *)next_hdr;
1524 /* ignore non UDP */
1525 if (ipv6->proto != IPPROTO_UDP)
1528 udp = (struct rte_udp_hdr *)(ipv6 + 1);
1529 udp->dgram_cksum = 0;
1535 * Convert L2 encap action to DV specification.
1538 * Pointer to rte_eth_dev structure.
1540 * Pointer to action structure.
1541 * @param[in, out] dev_flow
1542 * Pointer to the mlx5_flow.
1543 * @param[in] transfer
1544 * Mark if the flow is E-Switch flow.
1546 * Pointer to the error structure.
1549 * 0 on success, a negative errno value otherwise and rte_errno is set.
1552 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
1553 const struct rte_flow_action *action,
1554 struct mlx5_flow *dev_flow,
1556 struct rte_flow_error *error)
1558 const struct rte_flow_item *encap_data;
1559 const struct rte_flow_action_raw_encap *raw_encap_data;
1560 struct mlx5_flow_dv_encap_decap_resource res = {
1562 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
1563 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
1564 MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
1567 if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
1569 (const struct rte_flow_action_raw_encap *)action->conf;
1570 res.size = raw_encap_data->size;
1571 memcpy(res.buf, raw_encap_data->data, res.size);
1572 if (flow_dv_zero_encap_udp_csum(res.buf, error))
1575 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
1577 ((const struct rte_flow_action_vxlan_encap *)
1578 action->conf)->definition;
1581 ((const struct rte_flow_action_nvgre_encap *)
1582 action->conf)->definition;
1583 if (flow_dv_convert_encap_data(encap_data, res.buf,
1587 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1588 return rte_flow_error_set(error, EINVAL,
1589 RTE_FLOW_ERROR_TYPE_ACTION,
1590 NULL, "can't create L2 encap action");
1595 * Convert L2 decap action to DV specification.
1598 * Pointer to rte_eth_dev structure.
1599 * @param[in, out] dev_flow
1600 * Pointer to the mlx5_flow.
1601 * @param[in] transfer
1602 * Mark if the flow is E-Switch flow.
1604 * Pointer to the error structure.
1607 * 0 on success, a negative errno value otherwise and rte_errno is set.
1610 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
1611 struct mlx5_flow *dev_flow,
1613 struct rte_flow_error *error)
1615 struct mlx5_flow_dv_encap_decap_resource res = {
1618 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
1619 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
1620 MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
1623 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1624 return rte_flow_error_set(error, EINVAL,
1625 RTE_FLOW_ERROR_TYPE_ACTION,
1626 NULL, "can't create L2 decap action");
1631 * Convert raw decap/encap (L3 tunnel) action to DV specification.
1634 * Pointer to rte_eth_dev structure.
1636 * Pointer to action structure.
1637 * @param[in, out] dev_flow
1638 * Pointer to the mlx5_flow.
1640 * Pointer to the flow attributes.
1642 * Pointer to the error structure.
1645 * 0 on success, a negative errno value otherwise and rte_errno is set.
1648 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
1649 const struct rte_flow_action *action,
1650 struct mlx5_flow *dev_flow,
1651 const struct rte_flow_attr *attr,
1652 struct rte_flow_error *error)
1654 const struct rte_flow_action_raw_encap *encap_data;
1655 struct mlx5_flow_dv_encap_decap_resource res;
1657 encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
1658 res.size = encap_data->size;
1659 memcpy(res.buf, encap_data->data, res.size);
1660 res.reformat_type = attr->egress ?
1661 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
1662 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
1664 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
1666 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
1667 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
1668 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1669 return rte_flow_error_set(error, EINVAL,
1670 RTE_FLOW_ERROR_TYPE_ACTION,
1671 NULL, "can't create encap action");
1676 * Validate the modify-header actions.
1678 * @param[in] action_flags
1679 * Holds the actions detected until now.
1681 * Pointer to the modify action.
1683 * Pointer to error structure.
1686 * 0 on success, a negative errno value otherwise and rte_errno is set.
1689 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
1690 const struct rte_flow_action *action,
1691 struct rte_flow_error *error)
1693 if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
1694 return rte_flow_error_set(error, EINVAL,
1695 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1696 NULL, "action configuration not set");
1697 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1698 return rte_flow_error_set(error, EINVAL,
1699 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1700 "can't have encap action before"
1706 * Validate the modify-header MAC address actions.
1708 * @param[in] action_flags
1709 * Holds the actions detected until now.
1711 * Pointer to the modify action.
1712 * @param[in] item_flags
1713 * Holds the items detected.
1715 * Pointer to error structure.
1718 * 0 on success, a negative errno value otherwise and rte_errno is set.
1721 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
1722 const struct rte_flow_action *action,
1723 const uint64_t item_flags,
1724 struct rte_flow_error *error)
1728 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1730 if (!(item_flags & MLX5_FLOW_LAYER_L2))
1731 return rte_flow_error_set(error, EINVAL,
1732 RTE_FLOW_ERROR_TYPE_ACTION,
1734 "no L2 item in pattern");
1740 * Validate the modify-header IPv4 address actions.
1742 * @param[in] action_flags
1743 * Holds the actions detected until now.
1745 * Pointer to the modify action.
1746 * @param[in] item_flags
1747 * Holds the items detected.
1749 * Pointer to error structure.
1752 * 0 on success, a negative errno value otherwise and rte_errno is set.
1755 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
1756 const struct rte_flow_action *action,
1757 const uint64_t item_flags,
1758 struct rte_flow_error *error)
1762 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1764 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
1765 return rte_flow_error_set(error, EINVAL,
1766 RTE_FLOW_ERROR_TYPE_ACTION,
1768 "no ipv4 item in pattern");
1774 * Validate the modify-header IPv6 address actions.
1776 * @param[in] action_flags
1777 * Holds the actions detected until now.
1779 * Pointer to the modify action.
1780 * @param[in] item_flags
1781 * Holds the items detected.
1783 * Pointer to error structure.
1786 * 0 on success, a negative errno value otherwise and rte_errno is set.
1789 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
1790 const struct rte_flow_action *action,
1791 const uint64_t item_flags,
1792 struct rte_flow_error *error)
1796 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1798 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
1799 return rte_flow_error_set(error, EINVAL,
1800 RTE_FLOW_ERROR_TYPE_ACTION,
1802 "no ipv6 item in pattern");
1808 * Validate the modify-header TP actions.
1810 * @param[in] action_flags
1811 * Holds the actions detected until now.
1813 * Pointer to the modify action.
1814 * @param[in] item_flags
1815 * Holds the items detected.
1817 * Pointer to error structure.
1820 * 0 on success, a negative errno value otherwise and rte_errno is set.
1823 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
1824 const struct rte_flow_action *action,
1825 const uint64_t item_flags,
1826 struct rte_flow_error *error)
1830 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1832 if (!(item_flags & MLX5_FLOW_LAYER_L4))
1833 return rte_flow_error_set(error, EINVAL,
1834 RTE_FLOW_ERROR_TYPE_ACTION,
1835 NULL, "no transport layer "
1842 * Validate the modify-header actions of increment/decrement
1843 * TCP Sequence-number.
1845 * @param[in] action_flags
1846 * Holds the actions detected until now.
1848 * Pointer to the modify action.
1849 * @param[in] item_flags
1850 * Holds the items detected.
1852 * Pointer to error structure.
1855 * 0 on success, a negative errno value otherwise and rte_errno is set.
1858 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
1859 const struct rte_flow_action *action,
1860 const uint64_t item_flags,
1861 struct rte_flow_error *error)
1865 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1867 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
1868 return rte_flow_error_set(error, EINVAL,
1869 RTE_FLOW_ERROR_TYPE_ACTION,
1870 NULL, "no TCP item in"
1872 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
1873 (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
1874 (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
1875 (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
1876 return rte_flow_error_set(error, EINVAL,
1877 RTE_FLOW_ERROR_TYPE_ACTION,
1879 "cannot decrease and increase"
1880 " TCP sequence number"
1881 " at the same time");
1887 * Validate the modify-header actions of increment/decrement
1888 * TCP Acknowledgment number.
1890 * @param[in] action_flags
1891 * Holds the actions detected until now.
1893 * Pointer to the modify action.
1894 * @param[in] item_flags
1895 * Holds the items detected.
1897 * Pointer to error structure.
1900 * 0 on success, a negative errno value otherwise and rte_errno is set.
1903 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
1904 const struct rte_flow_action *action,
1905 const uint64_t item_flags,
1906 struct rte_flow_error *error)
1910 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1912 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
1913 return rte_flow_error_set(error, EINVAL,
1914 RTE_FLOW_ERROR_TYPE_ACTION,
1915 NULL, "no TCP item in"
1917 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
1918 (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
1919 (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
1920 (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
1921 return rte_flow_error_set(error, EINVAL,
1922 RTE_FLOW_ERROR_TYPE_ACTION,
1924 "cannot decrease and increase"
1925 " TCP acknowledgment number"
1926 " at the same time");
1932 * Validate the modify-header TTL actions.
1934 * @param[in] action_flags
1935 * Holds the actions detected until now.
1937 * Pointer to the modify action.
1938 * @param[in] item_flags
1939 * Holds the items detected.
1941 * Pointer to error structure.
1944 * 0 on success, a negative errno value otherwise and rte_errno is set.
1947 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
1948 const struct rte_flow_action *action,
1949 const uint64_t item_flags,
1950 struct rte_flow_error *error)
1954 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1956 if (!(item_flags & MLX5_FLOW_LAYER_L3))
1957 return rte_flow_error_set(error, EINVAL,
1958 RTE_FLOW_ERROR_TYPE_ACTION,
1960 "no IP protocol in pattern");
1966 * Validate jump action.
1969 * Pointer to the modify action.
1971 * The group of the current flow.
1973 * Pointer to error structure.
1976 * 0 on success, a negative errno value otherwise and rte_errno is set.
1979 flow_dv_validate_action_jump(const struct rte_flow_action *action,
1981 struct rte_flow_error *error)
1983 if (action->type != RTE_FLOW_ACTION_TYPE_JUMP && !action->conf)
1984 return rte_flow_error_set(error, EINVAL,
1985 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1986 NULL, "action configuration not set");
1987 if (group >= ((const struct rte_flow_action_jump *)action->conf)->group)
1988 return rte_flow_error_set(error, EINVAL,
1989 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1990 "target group must be higher then"
1991 " the current flow group");
1996 * Validate the port_id action.
1999 * Pointer to rte_eth_dev structure.
2000 * @param[in] action_flags
2001 * Bit-fields that holds the actions detected until now.
2003 * Port_id RTE action structure.
2005 * Attributes of flow that includes this action.
2007 * Pointer to error structure.
2010 * 0 on success, a negative errno value otherwise and rte_errno is set.
2013 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
2014 uint64_t action_flags,
2015 const struct rte_flow_action *action,
2016 const struct rte_flow_attr *attr,
2017 struct rte_flow_error *error)
2019 const struct rte_flow_action_port_id *port_id;
2021 uint16_t esw_domain_id;
2022 uint16_t act_port_domain_id;
2025 if (!attr->transfer)
2026 return rte_flow_error_set(error, ENOTSUP,
2027 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2029 "port id action is valid in transfer"
2031 if (!action || !action->conf)
2032 return rte_flow_error_set(error, ENOTSUP,
2033 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2035 "port id action parameters must be"
2037 if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
2038 MLX5_FLOW_FATE_ESWITCH_ACTIONS))
2039 return rte_flow_error_set(error, EINVAL,
2040 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2041 "can have only one fate actions in"
2043 ret = mlx5_port_to_eswitch_info(dev->data->port_id,
2044 &esw_domain_id, NULL);
2046 return rte_flow_error_set(error, -ret,
2047 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2049 "failed to obtain E-Switch info");
2050 port_id = action->conf;
2051 port = port_id->original ? dev->data->port_id : port_id->id;
2052 ret = mlx5_port_to_eswitch_info(port, &act_port_domain_id, NULL);
2054 return rte_flow_error_set
2056 RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
2057 "failed to obtain E-Switch port id for port");
2058 if (act_port_domain_id != esw_domain_id)
2059 return rte_flow_error_set
2061 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2062 "port does not belong to"
2063 " E-Switch being configured");
2068 * Find existing modify-header resource or create and register a new one.
2070 * @param dev[in, out]
2071 * Pointer to rte_eth_dev structure.
2072 * @param[in, out] resource
2073 * Pointer to modify-header resource.
2074 * @parm[in, out] dev_flow
2075 * Pointer to the dev_flow.
2077 * pointer to error structure.
2080 * 0 on success otherwise -errno and errno is set.
2083 flow_dv_modify_hdr_resource_register
2084 (struct rte_eth_dev *dev,
2085 struct mlx5_flow_dv_modify_hdr_resource *resource,
2086 struct mlx5_flow *dev_flow,
2087 struct rte_flow_error *error)
2089 struct mlx5_priv *priv = dev->data->dev_private;
2090 struct mlx5_ibv_shared *sh = priv->sh;
2091 struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
2092 struct mlx5dv_dr_domain *ns;
2094 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2095 ns = sh->fdb_domain;
2096 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
2101 dev_flow->flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
2102 /* Lookup a matching resource from cache. */
2103 LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
2104 if (resource->ft_type == cache_resource->ft_type &&
2105 resource->actions_num == cache_resource->actions_num &&
2106 resource->flags == cache_resource->flags &&
2107 !memcmp((const void *)resource->actions,
2108 (const void *)cache_resource->actions,
2109 (resource->actions_num *
2110 sizeof(resource->actions[0])))) {
2111 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
2112 (void *)cache_resource,
2113 rte_atomic32_read(&cache_resource->refcnt));
2114 rte_atomic32_inc(&cache_resource->refcnt);
2115 dev_flow->dv.modify_hdr = cache_resource;
2119 /* Register new modify-header resource. */
2120 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
2121 if (!cache_resource)
2122 return rte_flow_error_set(error, ENOMEM,
2123 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2124 "cannot allocate resource memory");
2125 *cache_resource = *resource;
2126 cache_resource->verbs_action =
2127 mlx5_glue->dv_create_flow_action_modify_header
2128 (sh->ctx, cache_resource->ft_type,
2129 ns, cache_resource->flags,
2130 cache_resource->actions_num *
2131 sizeof(cache_resource->actions[0]),
2132 (uint64_t *)cache_resource->actions);
2133 if (!cache_resource->verbs_action) {
2134 rte_free(cache_resource);
2135 return rte_flow_error_set(error, ENOMEM,
2136 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2137 NULL, "cannot create action");
2139 rte_atomic32_init(&cache_resource->refcnt);
2140 rte_atomic32_inc(&cache_resource->refcnt);
2141 LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
2142 dev_flow->dv.modify_hdr = cache_resource;
2143 DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
2144 (void *)cache_resource,
2145 rte_atomic32_read(&cache_resource->refcnt));
2150 * Get or create a flow counter.
2153 * Pointer to the Ethernet device structure.
2155 * Indicate if this counter is shared with other flows.
2157 * Counter identifier.
2160 * pointer to flow counter on success, NULL otherwise and rte_errno is set.
2162 static struct mlx5_flow_counter *
2163 flow_dv_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
2165 struct mlx5_priv *priv = dev->data->dev_private;
2166 struct mlx5_flow_counter *cnt = NULL;
2167 struct mlx5_devx_counter_set *dcs = NULL;
2170 if (!priv->config.devx) {
2175 LIST_FOREACH(cnt, &priv->flow_counters, next) {
2176 if (cnt->shared && cnt->id == id) {
2182 cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
2183 dcs = rte_calloc(__func__, 1, sizeof(*dcs), 0);
2188 ret = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, dcs);
2191 struct mlx5_flow_counter tmpl = {
2197 tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
2203 LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
2213 * Release a flow counter.
2215 * @param[in] counter
2216 * Pointer to the counter handler.
2219 flow_dv_counter_release(struct mlx5_flow_counter *counter)
2225 if (--counter->ref_cnt == 0) {
2226 ret = mlx5_devx_cmd_flow_counter_free(counter->dcs->obj);
2228 DRV_LOG(ERR, "Failed to free devx counters, %d", ret);
2229 LIST_REMOVE(counter, next);
2230 rte_free(counter->dcs);
2236 * Verify the @p attributes will be correctly understood by the NIC and store
2237 * them in the @p flow if everything is correct.
2240 * Pointer to dev struct.
2241 * @param[in] attributes
2242 * Pointer to flow attributes
2244 * Pointer to error structure.
2247 * 0 on success, a negative errno value otherwise and rte_errno is set.
2250 flow_dv_validate_attributes(struct rte_eth_dev *dev,
2251 const struct rte_flow_attr *attributes,
2252 struct rte_flow_error *error)
2254 struct mlx5_priv *priv = dev->data->dev_private;
2255 uint32_t priority_max = priv->config.flow_prio - 1;
2257 #ifndef HAVE_MLX5DV_DR
2258 if (attributes->group)
2259 return rte_flow_error_set(error, ENOTSUP,
2260 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
2262 "groups is not supported");
2264 if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
2265 attributes->priority >= priority_max)
2266 return rte_flow_error_set(error, ENOTSUP,
2267 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2269 "priority out of range");
2270 if (attributes->transfer) {
2271 if (!priv->config.dv_esw_en)
2272 return rte_flow_error_set
2274 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2275 "E-Switch dr is not supported");
2276 if (!(priv->representor || priv->master))
2277 return rte_flow_error_set
2278 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2279 NULL, "E-Switch configurationd can only be"
2280 " done by a master or a representor device");
2281 if (attributes->egress)
2282 return rte_flow_error_set
2284 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
2285 "egress is not supported");
2286 if (attributes->group >= MLX5_MAX_TABLES_FDB)
2287 return rte_flow_error_set
2289 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2290 NULL, "group must be smaller than "
2291 RTE_STR(MLX5_MAX_FDB_TABLES));
2293 if (!(attributes->egress ^ attributes->ingress))
2294 return rte_flow_error_set(error, ENOTSUP,
2295 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
2296 "must specify exactly one of "
2297 "ingress or egress");
2302 * Internal validation function. For validating both actions and items.
2305 * Pointer to the rte_eth_dev structure.
2307 * Pointer to the flow attributes.
2309 * Pointer to the list of items.
2310 * @param[in] actions
2311 * Pointer to the list of actions.
2313 * Pointer to the error structure.
2316 * 0 on success, a negative errno value otherwise and rte_errno is set.
2319 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
2320 const struct rte_flow_item items[],
2321 const struct rte_flow_action actions[],
2322 struct rte_flow_error *error)
2325 uint64_t action_flags = 0;
2326 uint64_t item_flags = 0;
2327 uint64_t last_item = 0;
2328 uint8_t next_protocol = 0xff;
2330 const struct rte_flow_item *gre_item = NULL;
2331 struct rte_flow_item_tcp nic_tcp_mask = {
2334 .src_port = RTE_BE16(UINT16_MAX),
2335 .dst_port = RTE_BE16(UINT16_MAX),
2341 ret = flow_dv_validate_attributes(dev, attr, error);
2344 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2345 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2346 switch (items->type) {
2347 case RTE_FLOW_ITEM_TYPE_VOID:
2349 case RTE_FLOW_ITEM_TYPE_PORT_ID:
2350 ret = flow_dv_validate_item_port_id
2351 (dev, items, attr, item_flags, error);
2354 last_item |= MLX5_FLOW_ITEM_PORT_ID;
2356 case RTE_FLOW_ITEM_TYPE_ETH:
2357 ret = mlx5_flow_validate_item_eth(items, item_flags,
2361 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
2362 MLX5_FLOW_LAYER_OUTER_L2;
2364 case RTE_FLOW_ITEM_TYPE_VLAN:
2365 ret = mlx5_flow_validate_item_vlan(items, item_flags,
2369 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2370 MLX5_FLOW_LAYER_OUTER_VLAN;
2372 case RTE_FLOW_ITEM_TYPE_IPV4:
2373 ret = mlx5_flow_validate_item_ipv4(items, item_flags,
2377 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
2378 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
2379 if (items->mask != NULL &&
2380 ((const struct rte_flow_item_ipv4 *)
2381 items->mask)->hdr.next_proto_id) {
2383 ((const struct rte_flow_item_ipv4 *)
2384 (items->spec))->hdr.next_proto_id;
2386 ((const struct rte_flow_item_ipv4 *)
2387 (items->mask))->hdr.next_proto_id;
2389 /* Reset for inner layer. */
2390 next_protocol = 0xff;
2392 mlx5_flow_tunnel_ip_check(items, &last_item);
2394 case RTE_FLOW_ITEM_TYPE_IPV6:
2395 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
2399 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
2400 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
2401 if (items->mask != NULL &&
2402 ((const struct rte_flow_item_ipv6 *)
2403 items->mask)->hdr.proto) {
2405 ((const struct rte_flow_item_ipv6 *)
2406 items->spec)->hdr.proto;
2408 ((const struct rte_flow_item_ipv6 *)
2409 items->mask)->hdr.proto;
2411 /* Reset for inner layer. */
2412 next_protocol = 0xff;
2414 mlx5_flow_tunnel_ip_check(items, &last_item);
2416 case RTE_FLOW_ITEM_TYPE_TCP:
2417 ret = mlx5_flow_validate_item_tcp
2424 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
2425 MLX5_FLOW_LAYER_OUTER_L4_TCP;
2427 case RTE_FLOW_ITEM_TYPE_UDP:
2428 ret = mlx5_flow_validate_item_udp(items, item_flags,
2433 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
2434 MLX5_FLOW_LAYER_OUTER_L4_UDP;
2436 case RTE_FLOW_ITEM_TYPE_GRE:
2437 case RTE_FLOW_ITEM_TYPE_NVGRE:
2438 ret = mlx5_flow_validate_item_gre(items, item_flags,
2439 next_protocol, error);
2443 last_item = MLX5_FLOW_LAYER_GRE;
2445 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
2446 ret = mlx5_flow_validate_item_gre_key
2447 (items, item_flags, gre_item, error);
2450 item_flags |= MLX5_FLOW_LAYER_GRE_KEY;
2452 case RTE_FLOW_ITEM_TYPE_VXLAN:
2453 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
2457 last_item = MLX5_FLOW_LAYER_VXLAN;
2459 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
2460 ret = mlx5_flow_validate_item_vxlan_gpe(items,
2465 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
2467 case RTE_FLOW_ITEM_TYPE_MPLS:
2468 ret = mlx5_flow_validate_item_mpls(dev, items,
2473 last_item = MLX5_FLOW_LAYER_MPLS;
2475 case RTE_FLOW_ITEM_TYPE_META:
2476 ret = flow_dv_validate_item_meta(dev, items, attr,
2480 last_item = MLX5_FLOW_ITEM_METADATA;
2482 case RTE_FLOW_ITEM_TYPE_ICMP:
2483 ret = mlx5_flow_validate_item_icmp(items, item_flags,
2488 item_flags |= MLX5_FLOW_LAYER_ICMP;
2490 case RTE_FLOW_ITEM_TYPE_ICMP6:
2491 ret = mlx5_flow_validate_item_icmp6(items, item_flags,
2496 item_flags |= MLX5_FLOW_LAYER_ICMP6;
2499 return rte_flow_error_set(error, ENOTSUP,
2500 RTE_FLOW_ERROR_TYPE_ITEM,
2501 NULL, "item not supported");
2503 item_flags |= last_item;
2505 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2506 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
2507 return rte_flow_error_set(error, ENOTSUP,
2508 RTE_FLOW_ERROR_TYPE_ACTION,
2509 actions, "too many actions");
2510 switch (actions->type) {
2511 case RTE_FLOW_ACTION_TYPE_VOID:
2513 case RTE_FLOW_ACTION_TYPE_PORT_ID:
2514 ret = flow_dv_validate_action_port_id(dev,
2521 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
2524 case RTE_FLOW_ACTION_TYPE_FLAG:
2525 ret = mlx5_flow_validate_action_flag(action_flags,
2529 action_flags |= MLX5_FLOW_ACTION_FLAG;
2532 case RTE_FLOW_ACTION_TYPE_MARK:
2533 ret = mlx5_flow_validate_action_mark(actions,
2538 action_flags |= MLX5_FLOW_ACTION_MARK;
2541 case RTE_FLOW_ACTION_TYPE_DROP:
2542 ret = mlx5_flow_validate_action_drop(action_flags,
2546 action_flags |= MLX5_FLOW_ACTION_DROP;
2549 case RTE_FLOW_ACTION_TYPE_QUEUE:
2550 ret = mlx5_flow_validate_action_queue(actions,
2555 action_flags |= MLX5_FLOW_ACTION_QUEUE;
2558 case RTE_FLOW_ACTION_TYPE_RSS:
2559 ret = mlx5_flow_validate_action_rss(actions,
2565 action_flags |= MLX5_FLOW_ACTION_RSS;
2568 case RTE_FLOW_ACTION_TYPE_COUNT:
2569 ret = flow_dv_validate_action_count(dev, error);
2572 action_flags |= MLX5_FLOW_ACTION_COUNT;
2575 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2576 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
2577 ret = flow_dv_validate_action_l2_encap(action_flags,
2582 action_flags |= actions->type ==
2583 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
2584 MLX5_FLOW_ACTION_VXLAN_ENCAP :
2585 MLX5_FLOW_ACTION_NVGRE_ENCAP;
2588 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2589 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
2590 ret = flow_dv_validate_action_l2_decap(action_flags,
2594 action_flags |= actions->type ==
2595 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
2596 MLX5_FLOW_ACTION_VXLAN_DECAP :
2597 MLX5_FLOW_ACTION_NVGRE_DECAP;
2600 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
2601 ret = flow_dv_validate_action_raw_encap(action_flags,
2606 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
2609 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
2610 ret = flow_dv_validate_action_raw_decap(action_flags,
2615 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
2618 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
2619 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
2620 ret = flow_dv_validate_action_modify_mac(action_flags,
2626 /* Count all modify-header actions as one action. */
2627 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2629 action_flags |= actions->type ==
2630 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
2631 MLX5_FLOW_ACTION_SET_MAC_SRC :
2632 MLX5_FLOW_ACTION_SET_MAC_DST;
2635 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
2636 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
2637 ret = flow_dv_validate_action_modify_ipv4(action_flags,
2643 /* Count all modify-header actions as one action. */
2644 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2646 action_flags |= actions->type ==
2647 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
2648 MLX5_FLOW_ACTION_SET_IPV4_SRC :
2649 MLX5_FLOW_ACTION_SET_IPV4_DST;
2651 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
2652 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
2653 ret = flow_dv_validate_action_modify_ipv6(action_flags,
2659 /* Count all modify-header actions as one action. */
2660 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2662 action_flags |= actions->type ==
2663 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
2664 MLX5_FLOW_ACTION_SET_IPV6_SRC :
2665 MLX5_FLOW_ACTION_SET_IPV6_DST;
2667 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
2668 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
2669 ret = flow_dv_validate_action_modify_tp(action_flags,
2675 /* Count all modify-header actions as one action. */
2676 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2678 action_flags |= actions->type ==
2679 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
2680 MLX5_FLOW_ACTION_SET_TP_SRC :
2681 MLX5_FLOW_ACTION_SET_TP_DST;
2683 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
2684 case RTE_FLOW_ACTION_TYPE_SET_TTL:
2685 ret = flow_dv_validate_action_modify_ttl(action_flags,
2691 /* Count all modify-header actions as one action. */
2692 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2694 action_flags |= actions->type ==
2695 RTE_FLOW_ACTION_TYPE_SET_TTL ?
2696 MLX5_FLOW_ACTION_SET_TTL :
2697 MLX5_FLOW_ACTION_DEC_TTL;
2699 case RTE_FLOW_ACTION_TYPE_JUMP:
2700 ret = flow_dv_validate_action_jump(actions,
2701 attr->group, error);
2705 action_flags |= MLX5_FLOW_ACTION_JUMP;
2707 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
2708 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
2709 ret = flow_dv_validate_action_modify_tcp_seq
2716 /* Count all modify-header actions as one action. */
2717 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2719 action_flags |= actions->type ==
2720 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
2721 MLX5_FLOW_ACTION_INC_TCP_SEQ :
2722 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
2724 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
2725 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
2726 ret = flow_dv_validate_action_modify_tcp_ack
2733 /* Count all modify-header actions as one action. */
2734 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2736 action_flags |= actions->type ==
2737 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
2738 MLX5_FLOW_ACTION_INC_TCP_ACK :
2739 MLX5_FLOW_ACTION_DEC_TCP_ACK;
2742 return rte_flow_error_set(error, ENOTSUP,
2743 RTE_FLOW_ERROR_TYPE_ACTION,
2745 "action not supported");
2748 /* Eswitch has few restrictions on using items and actions */
2749 if (attr->transfer) {
2750 if (action_flags & MLX5_FLOW_ACTION_FLAG)
2751 return rte_flow_error_set(error, ENOTSUP,
2752 RTE_FLOW_ERROR_TYPE_ACTION,
2754 "unsupported action FLAG");
2755 if (action_flags & MLX5_FLOW_ACTION_MARK)
2756 return rte_flow_error_set(error, ENOTSUP,
2757 RTE_FLOW_ERROR_TYPE_ACTION,
2759 "unsupported action MARK");
2760 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
2761 return rte_flow_error_set(error, ENOTSUP,
2762 RTE_FLOW_ERROR_TYPE_ACTION,
2764 "unsupported action QUEUE");
2765 if (action_flags & MLX5_FLOW_ACTION_RSS)
2766 return rte_flow_error_set(error, ENOTSUP,
2767 RTE_FLOW_ERROR_TYPE_ACTION,
2769 "unsupported action RSS");
2770 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
2771 return rte_flow_error_set(error, EINVAL,
2772 RTE_FLOW_ERROR_TYPE_ACTION,
2774 "no fate action is found");
2776 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
2777 return rte_flow_error_set(error, EINVAL,
2778 RTE_FLOW_ERROR_TYPE_ACTION,
2780 "no fate action is found");
2786 * Internal preparation function. Allocates the DV flow size,
2787 * this size is constant.
2790 * Pointer to the flow attributes.
2792 * Pointer to the list of items.
2793 * @param[in] actions
2794 * Pointer to the list of actions.
2796 * Pointer to the error structure.
2799 * Pointer to mlx5_flow object on success,
2800 * otherwise NULL and rte_errno is set.
2802 static struct mlx5_flow *
2803 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
2804 const struct rte_flow_item items[] __rte_unused,
2805 const struct rte_flow_action actions[] __rte_unused,
2806 struct rte_flow_error *error)
2808 uint32_t size = sizeof(struct mlx5_flow);
2809 struct mlx5_flow *flow;
2811 flow = rte_calloc(__func__, 1, size, 0);
2813 rte_flow_error_set(error, ENOMEM,
2814 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2815 "not enough memory to create flow");
2818 flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
2824 * Sanity check for match mask and value. Similar to check_valid_spec() in
2825 * kernel driver. If unmasked bit is present in value, it returns failure.
2828 * pointer to match mask buffer.
2829 * @param match_value
2830 * pointer to match value buffer.
2833 * 0 if valid, -EINVAL otherwise.
2836 flow_dv_check_valid_spec(void *match_mask, void *match_value)
2838 uint8_t *m = match_mask;
2839 uint8_t *v = match_value;
2842 for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
2845 "match_value differs from match_criteria"
2846 " %p[%u] != %p[%u]",
2847 match_value, i, match_mask, i);
2856 * Add Ethernet item to matcher and to the value.
2858 * @param[in, out] matcher
2860 * @param[in, out] key
2861 * Flow matcher value.
2863 * Flow pattern to translate.
2865 * Item is inner pattern.
2868 flow_dv_translate_item_eth(void *matcher, void *key,
2869 const struct rte_flow_item *item, int inner)
2871 const struct rte_flow_item_eth *eth_m = item->mask;
2872 const struct rte_flow_item_eth *eth_v = item->spec;
2873 const struct rte_flow_item_eth nic_mask = {
2874 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2875 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2876 .type = RTE_BE16(0xffff),
2888 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2890 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2892 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2894 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2896 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
2897 ð_m->dst, sizeof(eth_m->dst));
2898 /* The value must be in the range of the mask. */
2899 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
2900 for (i = 0; i < sizeof(eth_m->dst); ++i)
2901 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
2902 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
2903 ð_m->src, sizeof(eth_m->src));
2904 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
2905 /* The value must be in the range of the mask. */
2906 for (i = 0; i < sizeof(eth_m->dst); ++i)
2907 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
2908 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
2909 rte_be_to_cpu_16(eth_m->type));
2910 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
2911 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
2915 * Add VLAN item to matcher and to the value.
2917 * @param[in, out] matcher
2919 * @param[in, out] key
2920 * Flow matcher value.
2922 * Flow pattern to translate.
2924 * Item is inner pattern.
2927 flow_dv_translate_item_vlan(void *matcher, void *key,
2928 const struct rte_flow_item *item,
2931 const struct rte_flow_item_vlan *vlan_m = item->mask;
2932 const struct rte_flow_item_vlan *vlan_v = item->spec;
2933 const struct rte_flow_item_vlan nic_mask = {
2934 .tci = RTE_BE16(0x0fff),
2935 .inner_type = RTE_BE16(0xffff),
2947 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2949 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2951 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2953 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2955 tci_m = rte_be_to_cpu_16(vlan_m->tci);
2956 tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
2957 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
2958 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
2959 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
2960 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
2961 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
2962 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
2963 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
2964 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
2968 * Add IPV4 item to matcher and to the value.
2970 * @param[in, out] matcher
2972 * @param[in, out] key
2973 * Flow matcher value.
2975 * Flow pattern to translate.
2977 * Item is inner pattern.
2979 * The group to insert the rule.
2982 flow_dv_translate_item_ipv4(void *matcher, void *key,
2983 const struct rte_flow_item *item,
2984 int inner, uint32_t group)
2986 const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
2987 const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
2988 const struct rte_flow_item_ipv4 nic_mask = {
2990 .src_addr = RTE_BE32(0xffffffff),
2991 .dst_addr = RTE_BE32(0xffffffff),
2992 .type_of_service = 0xff,
2993 .next_proto_id = 0xff,
3003 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3005 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3007 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3009 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3012 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
3014 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
3015 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
3020 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
3021 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
3022 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
3023 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
3024 *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
3025 *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
3026 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
3027 src_ipv4_src_ipv6.ipv4_layout.ipv4);
3028 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
3029 src_ipv4_src_ipv6.ipv4_layout.ipv4);
3030 *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
3031 *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
3032 tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
3033 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
3034 ipv4_m->hdr.type_of_service);
3035 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
3036 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
3037 ipv4_m->hdr.type_of_service >> 2);
3038 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
3039 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
3040 ipv4_m->hdr.next_proto_id);
3041 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
3042 ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
3046 * Add IPV6 item to matcher and to the value.
3048 * @param[in, out] matcher
3050 * @param[in, out] key
3051 * Flow matcher value.
3053 * Flow pattern to translate.
3055 * Item is inner pattern.
3057 * The group to insert the rule.
3060 flow_dv_translate_item_ipv6(void *matcher, void *key,
3061 const struct rte_flow_item *item,
3062 int inner, uint32_t group)
3064 const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
3065 const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
3066 const struct rte_flow_item_ipv6 nic_mask = {
3069 "\xff\xff\xff\xff\xff\xff\xff\xff"
3070 "\xff\xff\xff\xff\xff\xff\xff\xff",
3072 "\xff\xff\xff\xff\xff\xff\xff\xff"
3073 "\xff\xff\xff\xff\xff\xff\xff\xff",
3074 .vtc_flow = RTE_BE32(0xffffffff),
3081 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3082 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3091 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3093 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3095 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3097 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3100 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
3102 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
3103 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
3108 size = sizeof(ipv6_m->hdr.dst_addr);
3109 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
3110 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
3111 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
3112 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
3113 memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
3114 for (i = 0; i < size; ++i)
3115 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
3116 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
3117 src_ipv4_src_ipv6.ipv6_layout.ipv6);
3118 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
3119 src_ipv4_src_ipv6.ipv6_layout.ipv6);
3120 memcpy(l24_m, ipv6_m->hdr.src_addr, size);
3121 for (i = 0; i < size; ++i)
3122 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
3124 vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
3125 vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
3126 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
3127 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
3128 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
3129 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
3132 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
3134 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
3137 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
3139 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
3143 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
3145 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
3146 ipv6_v->hdr.proto & ipv6_m->hdr.proto);
3150 * Add TCP item to matcher and to the value.
3152 * @param[in, out] matcher
3154 * @param[in, out] key
3155 * Flow matcher value.
3157 * Flow pattern to translate.
3159 * Item is inner pattern.
3162 flow_dv_translate_item_tcp(void *matcher, void *key,
3163 const struct rte_flow_item *item,
3166 const struct rte_flow_item_tcp *tcp_m = item->mask;
3167 const struct rte_flow_item_tcp *tcp_v = item->spec;
3172 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3174 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3176 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3178 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3180 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3181 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
3185 tcp_m = &rte_flow_item_tcp_mask;
3186 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
3187 rte_be_to_cpu_16(tcp_m->hdr.src_port));
3188 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
3189 rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
3190 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
3191 rte_be_to_cpu_16(tcp_m->hdr.dst_port));
3192 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
3193 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
3194 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
3195 tcp_m->hdr.tcp_flags);
3196 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
3197 (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
3201 * Add UDP item to matcher and to the value.
3203 * @param[in, out] matcher
3205 * @param[in, out] key
3206 * Flow matcher value.
3208 * Flow pattern to translate.
3210 * Item is inner pattern.
3213 flow_dv_translate_item_udp(void *matcher, void *key,
3214 const struct rte_flow_item *item,
3217 const struct rte_flow_item_udp *udp_m = item->mask;
3218 const struct rte_flow_item_udp *udp_v = item->spec;
3223 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3225 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3227 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3229 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3231 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3232 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
3236 udp_m = &rte_flow_item_udp_mask;
3237 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
3238 rte_be_to_cpu_16(udp_m->hdr.src_port));
3239 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
3240 rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
3241 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
3242 rte_be_to_cpu_16(udp_m->hdr.dst_port));
3243 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
3244 rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
3248 * Add GRE optional Key item to matcher and to the value.
3250 * @param[in, out] matcher
3252 * @param[in, out] key
3253 * Flow matcher value.
3255 * Flow pattern to translate.
3257 * Item is inner pattern.
3260 flow_dv_translate_item_gre_key(void *matcher, void *key,
3261 const struct rte_flow_item *item)
3263 const rte_be32_t *key_m = item->mask;
3264 const rte_be32_t *key_v = item->spec;
3265 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3266 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3267 rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
3272 key_m = &gre_key_default_mask;
3273 /* GRE K bit must be on and should already be validated */
3274 MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
3275 MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
3276 MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
3277 rte_be_to_cpu_32(*key_m) >> 8);
3278 MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
3279 rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
3280 MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
3281 rte_be_to_cpu_32(*key_m) & 0xFF);
3282 MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
3283 rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
3287 * Add GRE item to matcher and to the value.
3289 * @param[in, out] matcher
3291 * @param[in, out] key
3292 * Flow matcher value.
3294 * Flow pattern to translate.
3296 * Item is inner pattern.
3299 flow_dv_translate_item_gre(void *matcher, void *key,
3300 const struct rte_flow_item *item,
3303 const struct rte_flow_item_gre *gre_m = item->mask;
3304 const struct rte_flow_item_gre *gre_v = item->spec;
3307 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3308 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3315 uint16_t s_present:1;
3316 uint16_t k_present:1;
3317 uint16_t rsvd_bit1:1;
3318 uint16_t c_present:1;
3322 } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
3325 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3327 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3329 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3331 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3333 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3334 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
3338 gre_m = &rte_flow_item_gre_mask;
3339 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
3340 rte_be_to_cpu_16(gre_m->protocol));
3341 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
3342 rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
3343 gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
3344 gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
3345 MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
3346 gre_crks_rsvd0_ver_m.c_present);
3347 MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
3348 gre_crks_rsvd0_ver_v.c_present &
3349 gre_crks_rsvd0_ver_m.c_present);
3350 MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
3351 gre_crks_rsvd0_ver_m.k_present);
3352 MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
3353 gre_crks_rsvd0_ver_v.k_present &
3354 gre_crks_rsvd0_ver_m.k_present);
3355 MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
3356 gre_crks_rsvd0_ver_m.s_present);
3357 MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
3358 gre_crks_rsvd0_ver_v.s_present &
3359 gre_crks_rsvd0_ver_m.s_present);
3363 * Add NVGRE item to matcher and to the value.
3365 * @param[in, out] matcher
3367 * @param[in, out] key
3368 * Flow matcher value.
3370 * Flow pattern to translate.
3372 * Item is inner pattern.
3375 flow_dv_translate_item_nvgre(void *matcher, void *key,
3376 const struct rte_flow_item *item,
3379 const struct rte_flow_item_nvgre *nvgre_m = item->mask;
3380 const struct rte_flow_item_nvgre *nvgre_v = item->spec;
3381 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3382 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3383 const char *tni_flow_id_m = (const char *)nvgre_m->tni;
3384 const char *tni_flow_id_v = (const char *)nvgre_v->tni;
3390 flow_dv_translate_item_gre(matcher, key, item, inner);
3394 nvgre_m = &rte_flow_item_nvgre_mask;
3395 size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
3396 gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
3397 gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
3398 memcpy(gre_key_m, tni_flow_id_m, size);
3399 for (i = 0; i < size; ++i)
3400 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
3404 * Add VXLAN item to matcher and to the value.
3406 * @param[in, out] matcher
3408 * @param[in, out] key
3409 * Flow matcher value.
3411 * Flow pattern to translate.
3413 * Item is inner pattern.
3416 flow_dv_translate_item_vxlan(void *matcher, void *key,
3417 const struct rte_flow_item *item,
3420 const struct rte_flow_item_vxlan *vxlan_m = item->mask;
3421 const struct rte_flow_item_vxlan *vxlan_v = item->spec;
3424 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3425 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3433 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3435 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3437 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3439 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3441 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
3442 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
3443 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
3444 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
3445 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
3450 vxlan_m = &rte_flow_item_vxlan_mask;
3451 size = sizeof(vxlan_m->vni);
3452 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
3453 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
3454 memcpy(vni_m, vxlan_m->vni, size);
3455 for (i = 0; i < size; ++i)
3456 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
3460 * Add MPLS item to matcher and to the value.
3462 * @param[in, out] matcher
3464 * @param[in, out] key
3465 * Flow matcher value.
3467 * Flow pattern to translate.
3468 * @param[in] prev_layer
3469 * The protocol layer indicated in previous item.
3471 * Item is inner pattern.
3474 flow_dv_translate_item_mpls(void *matcher, void *key,
3475 const struct rte_flow_item *item,
3476 uint64_t prev_layer,
3479 const uint32_t *in_mpls_m = item->mask;
3480 const uint32_t *in_mpls_v = item->spec;
3481 uint32_t *out_mpls_m = 0;
3482 uint32_t *out_mpls_v = 0;
3483 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3484 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3485 void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
3487 void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
3488 void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
3489 void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3491 switch (prev_layer) {
3492 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
3493 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
3494 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
3495 MLX5_UDP_PORT_MPLS);
3497 case MLX5_FLOW_LAYER_GRE:
3498 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
3499 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
3500 RTE_ETHER_TYPE_MPLS);
3503 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3504 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
3511 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
3512 switch (prev_layer) {
3513 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
3515 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
3516 outer_first_mpls_over_udp);
3518 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
3519 outer_first_mpls_over_udp);
3521 case MLX5_FLOW_LAYER_GRE:
3523 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
3524 outer_first_mpls_over_gre);
3526 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
3527 outer_first_mpls_over_gre);
3530 /* Inner MPLS not over GRE is not supported. */
3533 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
3537 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
3543 if (out_mpls_m && out_mpls_v) {
3544 *out_mpls_m = *in_mpls_m;
3545 *out_mpls_v = *in_mpls_v & *in_mpls_m;
3550 * Add META item to matcher
3552 * @param[in, out] matcher
3554 * @param[in, out] key
3555 * Flow matcher value.
3557 * Flow pattern to translate.
3559 * Item is inner pattern.
3562 flow_dv_translate_item_meta(void *matcher, void *key,
3563 const struct rte_flow_item *item)
3565 const struct rte_flow_item_meta *meta_m;
3566 const struct rte_flow_item_meta *meta_v;
3568 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
3570 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
3572 meta_m = (const void *)item->mask;
3574 meta_m = &rte_flow_item_meta_mask;
3575 meta_v = (const void *)item->spec;
3577 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
3578 rte_be_to_cpu_32(meta_m->data));
3579 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
3580 rte_be_to_cpu_32(meta_v->data & meta_m->data));
3585 * Add source vport match to the specified matcher.
3587 * @param[in, out] matcher
3589 * @param[in, out] key
3590 * Flow matcher value.
3592 * Source vport value to match
3597 flow_dv_translate_item_source_vport(void *matcher, void *key,
3598 int16_t port, uint16_t mask)
3600 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3601 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3603 MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
3604 MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
3608 * Translate port-id item to eswitch match on port-id.
3611 * The devich to configure through.
3612 * @param[in, out] matcher
3614 * @param[in, out] key
3615 * Flow matcher value.
3617 * Flow pattern to translate.
3620 * 0 on success, a negative errno value otherwise.
3623 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
3624 void *key, const struct rte_flow_item *item)
3626 const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
3627 const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
3628 uint16_t mask, val, id;
3631 mask = pid_m ? pid_m->id : 0xffff;
3632 id = pid_v ? pid_v->id : dev->data->port_id;
3633 ret = mlx5_port_to_eswitch_info(id, NULL, &val);
3636 flow_dv_translate_item_source_vport(matcher, key, val, mask);
3641 * Add ICMP6 item to matcher and to the value.
3643 * @param[in, out] matcher
3645 * @param[in, out] key
3646 * Flow matcher value.
3648 * Flow pattern to translate.
3650 * Item is inner pattern.
3653 flow_dv_translate_item_icmp6(void *matcher, void *key,
3654 const struct rte_flow_item *item,
3657 const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
3658 const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
3661 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
3663 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
3665 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3667 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3669 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3671 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3673 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
3674 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
3678 icmp6_m = &rte_flow_item_icmp6_mask;
3679 MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
3680 MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
3681 icmp6_v->type & icmp6_m->type);
3682 MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
3683 MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
3684 icmp6_v->code & icmp6_m->code);
3688 * Add ICMP item to matcher and to the value.
3690 * @param[in, out] matcher
3692 * @param[in, out] key
3693 * Flow matcher value.
3695 * Flow pattern to translate.
3697 * Item is inner pattern.
3700 flow_dv_translate_item_icmp(void *matcher, void *key,
3701 const struct rte_flow_item *item,
3704 const struct rte_flow_item_icmp *icmp_m = item->mask;
3705 const struct rte_flow_item_icmp *icmp_v = item->spec;
3708 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
3710 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
3712 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3714 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3716 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3718 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3720 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
3721 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
3725 icmp_m = &rte_flow_item_icmp_mask;
3726 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
3727 icmp_m->hdr.icmp_type);
3728 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
3729 icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
3730 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
3731 icmp_m->hdr.icmp_code);
3732 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
3733 icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
3736 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
3738 #define HEADER_IS_ZERO(match_criteria, headers) \
3739 !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
3740 matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
3743 * Calculate flow matcher enable bitmap.
3745 * @param match_criteria
3746 * Pointer to flow matcher criteria.
3749 * Bitmap of enabled fields.
3752 flow_dv_matcher_enable(uint32_t *match_criteria)
3754 uint8_t match_criteria_enable;
3756 match_criteria_enable =
3757 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
3758 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
3759 match_criteria_enable |=
3760 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
3761 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
3762 match_criteria_enable |=
3763 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
3764 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
3765 match_criteria_enable |=
3766 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
3767 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
3768 #ifdef HAVE_MLX5DV_DR
3769 match_criteria_enable |=
3770 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
3771 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
3773 return match_criteria_enable;
3780 * @param dev[in, out]
3781 * Pointer to rte_eth_dev structure.
3782 * @param[in] table_id
3785 * Direction of the table.
3786 * @param[in] transfer
3787 * E-Switch or NIC flow.
3789 * pointer to error structure.
3792 * Returns tables resource based on the index, NULL in case of failed.
3794 static struct mlx5_flow_tbl_resource *
3795 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
3796 uint32_t table_id, uint8_t egress,
3798 struct rte_flow_error *error)
3800 struct mlx5_priv *priv = dev->data->dev_private;
3801 struct mlx5_ibv_shared *sh = priv->sh;
3802 struct mlx5_flow_tbl_resource *tbl;
3804 #ifdef HAVE_MLX5DV_DR
3806 tbl = &sh->fdb_tbl[table_id];
3808 tbl->obj = mlx5_glue->dr_create_flow_tbl
3809 (sh->fdb_domain, table_id);
3810 } else if (egress) {
3811 tbl = &sh->tx_tbl[table_id];
3813 tbl->obj = mlx5_glue->dr_create_flow_tbl
3814 (sh->tx_domain, table_id);
3816 tbl = &sh->rx_tbl[table_id];
3818 tbl->obj = mlx5_glue->dr_create_flow_tbl
3819 (sh->rx_domain, table_id);
3822 rte_flow_error_set(error, ENOMEM,
3823 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3824 NULL, "cannot create table");
3827 rte_atomic32_inc(&tbl->refcnt);
3833 return &sh->fdb_tbl[table_id];
3835 return &sh->tx_tbl[table_id];
3837 return &sh->rx_tbl[table_id];
3842 * Release a flow table.
3845 * Table resource to be released.
3848 * Returns 0 if table was released, else return 1;
3851 flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
3855 if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
3856 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
3864 * Register the flow matcher.
3866 * @param dev[in, out]
3867 * Pointer to rte_eth_dev structure.
3868 * @param[in, out] matcher
3869 * Pointer to flow matcher.
3870 * @parm[in, out] dev_flow
3871 * Pointer to the dev_flow.
3873 * pointer to error structure.
3876 * 0 on success otherwise -errno and errno is set.
3879 flow_dv_matcher_register(struct rte_eth_dev *dev,
3880 struct mlx5_flow_dv_matcher *matcher,
3881 struct mlx5_flow *dev_flow,
3882 struct rte_flow_error *error)
3884 struct mlx5_priv *priv = dev->data->dev_private;
3885 struct mlx5_ibv_shared *sh = priv->sh;
3886 struct mlx5_flow_dv_matcher *cache_matcher;
3887 struct mlx5dv_flow_matcher_attr dv_attr = {
3888 .type = IBV_FLOW_ATTR_NORMAL,
3889 .match_mask = (void *)&matcher->mask,
3891 struct mlx5_flow_tbl_resource *tbl = NULL;
3893 /* Lookup from cache. */
3894 LIST_FOREACH(cache_matcher, &sh->matchers, next) {
3895 if (matcher->crc == cache_matcher->crc &&
3896 matcher->priority == cache_matcher->priority &&
3897 matcher->egress == cache_matcher->egress &&
3898 matcher->group == cache_matcher->group &&
3899 matcher->transfer == cache_matcher->transfer &&
3900 !memcmp((const void *)matcher->mask.buf,
3901 (const void *)cache_matcher->mask.buf,
3902 cache_matcher->mask.size)) {
3904 "priority %hd use %s matcher %p: refcnt %d++",
3905 cache_matcher->priority,
3906 cache_matcher->egress ? "tx" : "rx",
3907 (void *)cache_matcher,
3908 rte_atomic32_read(&cache_matcher->refcnt));
3909 rte_atomic32_inc(&cache_matcher->refcnt);
3910 dev_flow->dv.matcher = cache_matcher;
3914 /* Register new matcher. */
3915 cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
3917 return rte_flow_error_set(error, ENOMEM,
3918 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3919 "cannot allocate matcher memory");
3920 tbl = flow_dv_tbl_resource_get(dev, matcher->group * MLX5_GROUP_FACTOR,
3921 matcher->egress, matcher->transfer,
3924 rte_free(cache_matcher);
3925 return rte_flow_error_set(error, ENOMEM,
3926 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3927 NULL, "cannot create table");
3929 *cache_matcher = *matcher;
3930 dv_attr.match_criteria_enable =
3931 flow_dv_matcher_enable(cache_matcher->mask.buf);
3932 dv_attr.priority = matcher->priority;
3933 if (matcher->egress)
3934 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
3935 cache_matcher->matcher_object =
3936 mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
3937 if (!cache_matcher->matcher_object) {
3938 rte_free(cache_matcher);
3939 #ifdef HAVE_MLX5DV_DR
3940 flow_dv_tbl_resource_release(tbl);
3942 return rte_flow_error_set(error, ENOMEM,
3943 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3944 NULL, "cannot create matcher");
3946 rte_atomic32_inc(&cache_matcher->refcnt);
3947 LIST_INSERT_HEAD(&sh->matchers, cache_matcher, next);
3948 dev_flow->dv.matcher = cache_matcher;
3949 DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
3950 cache_matcher->priority,
3951 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
3952 rte_atomic32_read(&cache_matcher->refcnt));
3953 rte_atomic32_inc(&tbl->refcnt);
3958 * Find existing tag resource or create and register a new one.
3960 * @param dev[in, out]
3961 * Pointer to rte_eth_dev structure.
3962 * @param[in, out] resource
3963 * Pointer to tag resource.
3964 * @parm[in, out] dev_flow
3965 * Pointer to the dev_flow.
3967 * pointer to error structure.
3970 * 0 on success otherwise -errno and errno is set.
3973 flow_dv_tag_resource_register
3974 (struct rte_eth_dev *dev,
3975 struct mlx5_flow_dv_tag_resource *resource,
3976 struct mlx5_flow *dev_flow,
3977 struct rte_flow_error *error)
3979 struct mlx5_priv *priv = dev->data->dev_private;
3980 struct mlx5_ibv_shared *sh = priv->sh;
3981 struct mlx5_flow_dv_tag_resource *cache_resource;
3983 /* Lookup a matching resource from cache. */
3984 LIST_FOREACH(cache_resource, &sh->tags, next) {
3985 if (resource->tag == cache_resource->tag) {
3986 DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
3987 (void *)cache_resource,
3988 rte_atomic32_read(&cache_resource->refcnt));
3989 rte_atomic32_inc(&cache_resource->refcnt);
3990 dev_flow->flow->tag_resource = cache_resource;
3994 /* Register new resource. */
3995 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
3996 if (!cache_resource)
3997 return rte_flow_error_set(error, ENOMEM,
3998 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3999 "cannot allocate resource memory");
4000 *cache_resource = *resource;
4001 cache_resource->action = mlx5_glue->dv_create_flow_action_tag
4003 if (!cache_resource->action) {
4004 rte_free(cache_resource);
4005 return rte_flow_error_set(error, ENOMEM,
4006 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4007 NULL, "cannot create action");
4009 rte_atomic32_init(&cache_resource->refcnt);
4010 rte_atomic32_inc(&cache_resource->refcnt);
4011 LIST_INSERT_HEAD(&sh->tags, cache_resource, next);
4012 dev_flow->flow->tag_resource = cache_resource;
4013 DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
4014 (void *)cache_resource,
4015 rte_atomic32_read(&cache_resource->refcnt));
4023 * Pointer to Ethernet device.
4025 * Pointer to mlx5_flow.
4028 * 1 while a reference on it exists, 0 when freed.
4031 flow_dv_tag_release(struct rte_eth_dev *dev,
4032 struct mlx5_flow_dv_tag_resource *tag)
4035 DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
4036 dev->data->port_id, (void *)tag,
4037 rte_atomic32_read(&tag->refcnt));
4038 if (rte_atomic32_dec_and_test(&tag->refcnt)) {
4039 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
4040 LIST_REMOVE(tag, next);
4041 DRV_LOG(DEBUG, "port %u tag %p: removed",
4042 dev->data->port_id, (void *)tag);
4050 * Translate port ID action to vport.
4053 * Pointer to rte_eth_dev structure.
4055 * Pointer to the port ID action.
4056 * @param[out] dst_port_id
4057 * The target port ID.
4059 * Pointer to the error structure.
4062 * 0 on success, a negative errno value otherwise and rte_errno is set.
4065 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
4066 const struct rte_flow_action *action,
4067 uint32_t *dst_port_id,
4068 struct rte_flow_error *error)
4073 const struct rte_flow_action_port_id *conf =
4074 (const struct rte_flow_action_port_id *)action->conf;
4076 port = conf->original ? dev->data->port_id : conf->id;
4077 ret = mlx5_port_to_eswitch_info(port, NULL, &port_id);
4079 return rte_flow_error_set(error, -ret,
4080 RTE_FLOW_ERROR_TYPE_ACTION,
4082 "No eswitch info was found for port");
4083 *dst_port_id = port_id;
4088 * Fill the flow with DV spec.
4091 * Pointer to rte_eth_dev structure.
4092 * @param[in, out] dev_flow
4093 * Pointer to the sub flow.
4095 * Pointer to the flow attributes.
4097 * Pointer to the list of items.
4098 * @param[in] actions
4099 * Pointer to the list of actions.
4101 * Pointer to the error structure.
4104 * 0 on success, a negative errno value otherwise and rte_errno is set.
4107 flow_dv_translate(struct rte_eth_dev *dev,
4108 struct mlx5_flow *dev_flow,
4109 const struct rte_flow_attr *attr,
4110 const struct rte_flow_item items[],
4111 const struct rte_flow_action actions[],
4112 struct rte_flow_error *error)
4114 struct mlx5_priv *priv = dev->data->dev_private;
4115 struct rte_flow *flow = dev_flow->flow;
4116 uint64_t item_flags = 0;
4117 uint64_t last_item = 0;
4118 uint64_t action_flags = 0;
4119 uint64_t priority = attr->priority;
4120 struct mlx5_flow_dv_matcher matcher = {
4122 .size = sizeof(matcher.mask.buf),
4126 bool actions_end = false;
4127 struct mlx5_flow_dv_modify_hdr_resource res = {
4128 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4129 MLX5DV_FLOW_TABLE_TYPE_NIC_RX
4131 union flow_dv_attr flow_attr = { .attr = 0 };
4132 struct mlx5_flow_dv_tag_resource tag_resource;
4133 uint32_t modify_action_position = UINT32_MAX;
4134 void *match_mask = matcher.mask.buf;
4135 void *match_value = dev_flow->dv.value.buf;
4137 flow->group = attr->group;
4139 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4140 if (priority == MLX5_FLOW_PRIO_RSVD)
4141 priority = priv->config.flow_prio - 1;
4142 for (; !actions_end ; actions++) {
4143 const struct rte_flow_action_queue *queue;
4144 const struct rte_flow_action_rss *rss;
4145 const struct rte_flow_action *action = actions;
4146 const struct rte_flow_action_count *count = action->conf;
4147 const uint8_t *rss_key;
4148 const struct rte_flow_action_jump *jump_data;
4149 struct mlx5_flow_dv_jump_tbl_resource jump_tbl_resource;
4150 struct mlx5_flow_tbl_resource *tbl;
4151 uint32_t port_id = 0;
4152 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
4154 switch (actions->type) {
4155 case RTE_FLOW_ACTION_TYPE_VOID:
4157 case RTE_FLOW_ACTION_TYPE_PORT_ID:
4158 if (flow_dv_translate_action_port_id(dev, action,
4161 port_id_resource.port_id = port_id;
4162 if (flow_dv_port_id_action_resource_register
4163 (dev, &port_id_resource, dev_flow, error))
4165 dev_flow->dv.actions[actions_n++] =
4166 dev_flow->dv.port_id_action->action;
4167 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
4169 case RTE_FLOW_ACTION_TYPE_FLAG:
4171 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
4172 if (!flow->tag_resource)
4173 if (flow_dv_tag_resource_register
4174 (dev, &tag_resource, dev_flow, error))
4176 dev_flow->dv.actions[actions_n++] =
4177 flow->tag_resource->action;
4178 action_flags |= MLX5_FLOW_ACTION_FLAG;
4180 case RTE_FLOW_ACTION_TYPE_MARK:
4181 tag_resource.tag = mlx5_flow_mark_set
4182 (((const struct rte_flow_action_mark *)
4183 (actions->conf))->id);
4184 if (!flow->tag_resource)
4185 if (flow_dv_tag_resource_register
4186 (dev, &tag_resource, dev_flow, error))
4188 dev_flow->dv.actions[actions_n++] =
4189 flow->tag_resource->action;
4190 action_flags |= MLX5_FLOW_ACTION_MARK;
4192 case RTE_FLOW_ACTION_TYPE_DROP:
4193 action_flags |= MLX5_FLOW_ACTION_DROP;
4195 case RTE_FLOW_ACTION_TYPE_QUEUE:
4196 queue = actions->conf;
4197 flow->rss.queue_num = 1;
4198 (*flow->queue)[0] = queue->index;
4199 action_flags |= MLX5_FLOW_ACTION_QUEUE;
4201 case RTE_FLOW_ACTION_TYPE_RSS:
4202 rss = actions->conf;
4204 memcpy((*flow->queue), rss->queue,
4205 rss->queue_num * sizeof(uint16_t));
4206 flow->rss.queue_num = rss->queue_num;
4207 /* NULL RSS key indicates default RSS key. */
4208 rss_key = !rss->key ? rss_hash_default_key : rss->key;
4209 memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
4210 /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
4211 flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
4212 flow->rss.level = rss->level;
4213 action_flags |= MLX5_FLOW_ACTION_RSS;
4215 case RTE_FLOW_ACTION_TYPE_COUNT:
4216 if (!priv->config.devx) {
4217 rte_errno = ENOTSUP;
4220 flow->counter = flow_dv_counter_new(dev, count->shared,
4222 if (flow->counter == NULL)
4224 dev_flow->dv.actions[actions_n++] =
4225 flow->counter->action;
4226 action_flags |= MLX5_FLOW_ACTION_COUNT;
4229 if (rte_errno == ENOTSUP)
4230 return rte_flow_error_set
4232 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4234 "count action not supported");
4236 return rte_flow_error_set
4238 RTE_FLOW_ERROR_TYPE_ACTION,
4240 "cannot create counter"
4242 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
4243 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
4244 if (flow_dv_create_action_l2_encap(dev, actions,
4249 dev_flow->dv.actions[actions_n++] =
4250 dev_flow->dv.encap_decap->verbs_action;
4251 action_flags |= actions->type ==
4252 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
4253 MLX5_FLOW_ACTION_VXLAN_ENCAP :
4254 MLX5_FLOW_ACTION_NVGRE_ENCAP;
4256 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
4257 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
4258 if (flow_dv_create_action_l2_decap(dev, dev_flow,
4262 dev_flow->dv.actions[actions_n++] =
4263 dev_flow->dv.encap_decap->verbs_action;
4264 action_flags |= actions->type ==
4265 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
4266 MLX5_FLOW_ACTION_VXLAN_DECAP :
4267 MLX5_FLOW_ACTION_NVGRE_DECAP;
4269 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4270 /* Handle encap with preceding decap. */
4271 if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
4272 if (flow_dv_create_action_raw_encap
4273 (dev, actions, dev_flow, attr, error))
4275 dev_flow->dv.actions[actions_n++] =
4276 dev_flow->dv.encap_decap->verbs_action;
4278 /* Handle encap without preceding decap. */
4279 if (flow_dv_create_action_l2_encap
4280 (dev, actions, dev_flow, attr->transfer,
4283 dev_flow->dv.actions[actions_n++] =
4284 dev_flow->dv.encap_decap->verbs_action;
4286 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
4288 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
4289 /* Check if this decap is followed by encap. */
4290 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
4291 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
4294 /* Handle decap only if it isn't followed by encap. */
4295 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4296 if (flow_dv_create_action_l2_decap
4297 (dev, dev_flow, attr->transfer, error))
4299 dev_flow->dv.actions[actions_n++] =
4300 dev_flow->dv.encap_decap->verbs_action;
4302 /* If decap is followed by encap, handle it at encap. */
4303 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
4305 case RTE_FLOW_ACTION_TYPE_JUMP:
4306 jump_data = action->conf;
4307 tbl = flow_dv_tbl_resource_get(dev, jump_data->group *
4310 attr->transfer, error);
4312 return rte_flow_error_set
4314 RTE_FLOW_ERROR_TYPE_ACTION,
4316 "cannot create jump action.");
4317 jump_tbl_resource.tbl = tbl;
4318 if (flow_dv_jump_tbl_resource_register
4319 (dev, &jump_tbl_resource, dev_flow, error)) {
4320 flow_dv_tbl_resource_release(tbl);
4321 return rte_flow_error_set
4323 RTE_FLOW_ERROR_TYPE_ACTION,
4325 "cannot create jump action.");
4327 dev_flow->dv.actions[actions_n++] =
4328 dev_flow->dv.jump->action;
4329 action_flags |= MLX5_FLOW_ACTION_JUMP;
4331 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
4332 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
4333 if (flow_dv_convert_action_modify_mac(&res, actions,
4336 action_flags |= actions->type ==
4337 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
4338 MLX5_FLOW_ACTION_SET_MAC_SRC :
4339 MLX5_FLOW_ACTION_SET_MAC_DST;
4341 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
4342 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
4343 if (flow_dv_convert_action_modify_ipv4(&res, actions,
4346 action_flags |= actions->type ==
4347 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
4348 MLX5_FLOW_ACTION_SET_IPV4_SRC :
4349 MLX5_FLOW_ACTION_SET_IPV4_DST;
4351 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
4352 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
4353 if (flow_dv_convert_action_modify_ipv6(&res, actions,
4356 action_flags |= actions->type ==
4357 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
4358 MLX5_FLOW_ACTION_SET_IPV6_SRC :
4359 MLX5_FLOW_ACTION_SET_IPV6_DST;
4361 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
4362 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
4363 if (flow_dv_convert_action_modify_tp(&res, actions,
4367 action_flags |= actions->type ==
4368 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
4369 MLX5_FLOW_ACTION_SET_TP_SRC :
4370 MLX5_FLOW_ACTION_SET_TP_DST;
4372 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
4373 if (flow_dv_convert_action_modify_dec_ttl(&res, items,
4377 action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
4379 case RTE_FLOW_ACTION_TYPE_SET_TTL:
4380 if (flow_dv_convert_action_modify_ttl(&res, actions,
4384 action_flags |= MLX5_FLOW_ACTION_SET_TTL;
4386 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
4387 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
4388 if (flow_dv_convert_action_modify_tcp_seq(&res, actions,
4391 action_flags |= actions->type ==
4392 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
4393 MLX5_FLOW_ACTION_INC_TCP_SEQ :
4394 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
4397 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
4398 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
4399 if (flow_dv_convert_action_modify_tcp_ack(&res, actions,
4402 action_flags |= actions->type ==
4403 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
4404 MLX5_FLOW_ACTION_INC_TCP_ACK :
4405 MLX5_FLOW_ACTION_DEC_TCP_ACK;
4407 case RTE_FLOW_ACTION_TYPE_END:
4409 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
4410 /* create modify action if needed. */
4411 if (flow_dv_modify_hdr_resource_register
4416 dev_flow->dv.actions[modify_action_position] =
4417 dev_flow->dv.modify_hdr->verbs_action;
4423 if ((action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) &&
4424 modify_action_position == UINT32_MAX)
4425 modify_action_position = actions_n++;
4427 dev_flow->dv.actions_n = actions_n;
4428 flow->actions = action_flags;
4429 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4430 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
4432 switch (items->type) {
4433 case RTE_FLOW_ITEM_TYPE_PORT_ID:
4434 flow_dv_translate_item_port_id(dev, match_mask,
4435 match_value, items);
4436 last_item = MLX5_FLOW_ITEM_PORT_ID;
4438 case RTE_FLOW_ITEM_TYPE_ETH:
4439 flow_dv_translate_item_eth(match_mask, match_value,
4441 matcher.priority = MLX5_PRIORITY_MAP_L2;
4442 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
4443 MLX5_FLOW_LAYER_OUTER_L2;
4445 case RTE_FLOW_ITEM_TYPE_VLAN:
4446 flow_dv_translate_item_vlan(match_mask, match_value,
4448 matcher.priority = MLX5_PRIORITY_MAP_L2;
4449 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
4450 MLX5_FLOW_LAYER_INNER_VLAN) :
4451 (MLX5_FLOW_LAYER_OUTER_L2 |
4452 MLX5_FLOW_LAYER_OUTER_VLAN);
4454 case RTE_FLOW_ITEM_TYPE_IPV4:
4455 flow_dv_translate_item_ipv4(match_mask, match_value,
4456 items, tunnel, attr->group);
4457 matcher.priority = MLX5_PRIORITY_MAP_L3;
4458 dev_flow->dv.hash_fields |=
4459 mlx5_flow_hashfields_adjust
4461 MLX5_IPV4_LAYER_TYPES,
4462 MLX5_IPV4_IBV_RX_HASH);
4463 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4464 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4465 mlx5_flow_tunnel_ip_check(items, &last_item);
4467 case RTE_FLOW_ITEM_TYPE_IPV6:
4468 flow_dv_translate_item_ipv6(match_mask, match_value,
4469 items, tunnel, attr->group);
4470 matcher.priority = MLX5_PRIORITY_MAP_L3;
4471 dev_flow->dv.hash_fields |=
4472 mlx5_flow_hashfields_adjust
4474 MLX5_IPV6_LAYER_TYPES,
4475 MLX5_IPV6_IBV_RX_HASH);
4476 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4477 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4478 mlx5_flow_tunnel_ip_check(items, &last_item);
4480 case RTE_FLOW_ITEM_TYPE_TCP:
4481 flow_dv_translate_item_tcp(match_mask, match_value,
4483 matcher.priority = MLX5_PRIORITY_MAP_L4;
4484 dev_flow->dv.hash_fields |=
4485 mlx5_flow_hashfields_adjust
4486 (dev_flow, tunnel, ETH_RSS_TCP,
4487 IBV_RX_HASH_SRC_PORT_TCP |
4488 IBV_RX_HASH_DST_PORT_TCP);
4489 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
4490 MLX5_FLOW_LAYER_OUTER_L4_TCP;
4492 case RTE_FLOW_ITEM_TYPE_UDP:
4493 flow_dv_translate_item_udp(match_mask, match_value,
4495 matcher.priority = MLX5_PRIORITY_MAP_L4;
4496 dev_flow->dv.hash_fields |=
4497 mlx5_flow_hashfields_adjust
4498 (dev_flow, tunnel, ETH_RSS_UDP,
4499 IBV_RX_HASH_SRC_PORT_UDP |
4500 IBV_RX_HASH_DST_PORT_UDP);
4501 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
4502 MLX5_FLOW_LAYER_OUTER_L4_UDP;
4504 case RTE_FLOW_ITEM_TYPE_GRE:
4505 flow_dv_translate_item_gre(match_mask, match_value,
4507 last_item = MLX5_FLOW_LAYER_GRE;
4509 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
4510 flow_dv_translate_item_gre_key(match_mask,
4511 match_value, items);
4512 item_flags |= MLX5_FLOW_LAYER_GRE_KEY;
4514 case RTE_FLOW_ITEM_TYPE_NVGRE:
4515 flow_dv_translate_item_nvgre(match_mask, match_value,
4517 last_item = MLX5_FLOW_LAYER_GRE;
4519 case RTE_FLOW_ITEM_TYPE_VXLAN:
4520 flow_dv_translate_item_vxlan(match_mask, match_value,
4522 last_item = MLX5_FLOW_LAYER_VXLAN;
4524 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4525 flow_dv_translate_item_vxlan(match_mask, match_value,
4527 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
4529 case RTE_FLOW_ITEM_TYPE_MPLS:
4530 flow_dv_translate_item_mpls(match_mask, match_value,
4531 items, last_item, tunnel);
4532 last_item = MLX5_FLOW_LAYER_MPLS;
4534 case RTE_FLOW_ITEM_TYPE_META:
4535 flow_dv_translate_item_meta(match_mask, match_value,
4537 last_item = MLX5_FLOW_ITEM_METADATA;
4539 case RTE_FLOW_ITEM_TYPE_ICMP:
4540 flow_dv_translate_item_icmp(match_mask, match_value,
4542 item_flags |= MLX5_FLOW_LAYER_ICMP;
4544 case RTE_FLOW_ITEM_TYPE_ICMP6:
4545 flow_dv_translate_item_icmp6(match_mask, match_value,
4547 item_flags |= MLX5_FLOW_LAYER_ICMP6;
4552 item_flags |= last_item;
4555 * In case of ingress traffic when E-Switch mode is enabled,
4556 * we have two cases where we need to set the source port manually.
4557 * The first one, is in case of Nic steering rule, and the second is
4558 * E-Switch rule where no port_id item was found. In both cases
4559 * the source port is set according the current port in use.
4561 if ((attr->ingress && !(item_flags & MLX5_FLOW_ITEM_PORT_ID)) &&
4562 (priv->representor || priv->master)) {
4563 if (flow_dv_translate_item_port_id(dev, match_mask,
4567 assert(!flow_dv_check_valid_spec(matcher.mask.buf,
4568 dev_flow->dv.value.buf));
4569 dev_flow->layers = item_flags;
4570 /* Register matcher. */
4571 matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
4573 matcher.priority = mlx5_flow_adjust_priority(dev, priority,
4575 matcher.egress = attr->egress;
4576 matcher.group = attr->group;
4577 matcher.transfer = attr->transfer;
4578 if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
4584 * Apply the flow to the NIC.
4587 * Pointer to the Ethernet device structure.
4588 * @param[in, out] flow
4589 * Pointer to flow structure.
4591 * Pointer to error structure.
4594 * 0 on success, a negative errno value otherwise and rte_errno is set.
4597 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
4598 struct rte_flow_error *error)
4600 struct mlx5_flow_dv *dv;
4601 struct mlx5_flow *dev_flow;
4602 struct mlx5_priv *priv = dev->data->dev_private;
4606 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4609 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
4610 if (flow->transfer) {
4611 dv->actions[n++] = priv->sh->esw_drop_action;
4613 dv->hrxq = mlx5_hrxq_drop_new(dev);
4617 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4619 "cannot get drop hash queue");
4622 dv->actions[n++] = dv->hrxq->action;
4624 } else if (flow->actions &
4625 (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
4626 struct mlx5_hrxq *hrxq;
4628 hrxq = mlx5_hrxq_get(dev, flow->key,
4629 MLX5_RSS_HASH_KEY_LEN,
4632 flow->rss.queue_num);
4634 hrxq = mlx5_hrxq_new
4635 (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
4636 dv->hash_fields, (*flow->queue),
4637 flow->rss.queue_num,
4638 !!(dev_flow->layers &
4639 MLX5_FLOW_LAYER_TUNNEL));
4643 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4644 "cannot get hash queue");
4648 dv->actions[n++] = dv->hrxq->action;
4651 mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
4652 (void *)&dv->value, n,
4655 rte_flow_error_set(error, errno,
4656 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4658 "hardware refuses to create flow");
4664 err = rte_errno; /* Save rte_errno before cleanup. */
4665 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4666 struct mlx5_flow_dv *dv = &dev_flow->dv;
4668 if (flow->actions & MLX5_FLOW_ACTION_DROP)
4669 mlx5_hrxq_drop_release(dev);
4671 mlx5_hrxq_release(dev, dv->hrxq);
4675 rte_errno = err; /* Restore rte_errno. */
4680 * Release the flow matcher.
4683 * Pointer to Ethernet device.
4685 * Pointer to mlx5_flow.
4688 * 1 while a reference on it exists, 0 when freed.
4691 flow_dv_matcher_release(struct rte_eth_dev *dev,
4692 struct mlx5_flow *flow)
4694 struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
4695 struct mlx5_priv *priv = dev->data->dev_private;
4696 struct mlx5_ibv_shared *sh = priv->sh;
4697 struct mlx5_flow_tbl_resource *tbl;
4699 assert(matcher->matcher_object);
4700 DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
4701 dev->data->port_id, (void *)matcher,
4702 rte_atomic32_read(&matcher->refcnt));
4703 if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
4704 claim_zero(mlx5_glue->dv_destroy_flow_matcher
4705 (matcher->matcher_object));
4706 LIST_REMOVE(matcher, next);
4707 if (matcher->egress)
4708 tbl = &sh->tx_tbl[matcher->group];
4710 tbl = &sh->rx_tbl[matcher->group];
4711 flow_dv_tbl_resource_release(tbl);
4713 DRV_LOG(DEBUG, "port %u matcher %p: removed",
4714 dev->data->port_id, (void *)matcher);
4721 * Release an encap/decap resource.
4724 * Pointer to mlx5_flow.
4727 * 1 while a reference on it exists, 0 when freed.
4730 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
4732 struct mlx5_flow_dv_encap_decap_resource *cache_resource =
4733 flow->dv.encap_decap;
4735 assert(cache_resource->verbs_action);
4736 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
4737 (void *)cache_resource,
4738 rte_atomic32_read(&cache_resource->refcnt));
4739 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4740 claim_zero(mlx5_glue->destroy_flow_action
4741 (cache_resource->verbs_action));
4742 LIST_REMOVE(cache_resource, next);
4743 rte_free(cache_resource);
4744 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
4745 (void *)cache_resource);
4752 * Release an jump to table action resource.
4755 * Pointer to mlx5_flow.
4758 * 1 while a reference on it exists, 0 when freed.
4761 flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow)
4763 struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
4766 assert(cache_resource->action);
4767 DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
4768 (void *)cache_resource,
4769 rte_atomic32_read(&cache_resource->refcnt));
4770 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4771 claim_zero(mlx5_glue->destroy_flow_action
4772 (cache_resource->action));
4773 LIST_REMOVE(cache_resource, next);
4774 flow_dv_tbl_resource_release(cache_resource->tbl);
4775 rte_free(cache_resource);
4776 DRV_LOG(DEBUG, "jump table resource %p: removed",
4777 (void *)cache_resource);
4784 * Release a modify-header resource.
4787 * Pointer to mlx5_flow.
4790 * 1 while a reference on it exists, 0 when freed.
4793 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
4795 struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
4796 flow->dv.modify_hdr;
4798 assert(cache_resource->verbs_action);
4799 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
4800 (void *)cache_resource,
4801 rte_atomic32_read(&cache_resource->refcnt));
4802 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4803 claim_zero(mlx5_glue->destroy_flow_action
4804 (cache_resource->verbs_action));
4805 LIST_REMOVE(cache_resource, next);
4806 rte_free(cache_resource);
4807 DRV_LOG(DEBUG, "modify-header resource %p: removed",
4808 (void *)cache_resource);
4815 * Release port ID action resource.
4818 * Pointer to mlx5_flow.
4821 * 1 while a reference on it exists, 0 when freed.
4824 flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
4826 struct mlx5_flow_dv_port_id_action_resource *cache_resource =
4827 flow->dv.port_id_action;
4829 assert(cache_resource->action);
4830 DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
4831 (void *)cache_resource,
4832 rte_atomic32_read(&cache_resource->refcnt));
4833 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4834 claim_zero(mlx5_glue->destroy_flow_action
4835 (cache_resource->action));
4836 LIST_REMOVE(cache_resource, next);
4837 rte_free(cache_resource);
4838 DRV_LOG(DEBUG, "port id action resource %p: removed",
4839 (void *)cache_resource);
4846 * Remove the flow from the NIC but keeps it in memory.
4849 * Pointer to Ethernet device.
4850 * @param[in, out] flow
4851 * Pointer to flow structure.
4854 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
4856 struct mlx5_flow_dv *dv;
4857 struct mlx5_flow *dev_flow;
4861 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4864 claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
4868 if (flow->actions & MLX5_FLOW_ACTION_DROP)
4869 mlx5_hrxq_drop_release(dev);
4871 mlx5_hrxq_release(dev, dv->hrxq);
4878 * Remove the flow from the NIC and the memory.
4881 * Pointer to the Ethernet device structure.
4882 * @param[in, out] flow
4883 * Pointer to flow structure.
4886 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
4888 struct mlx5_flow *dev_flow;
4892 flow_dv_remove(dev, flow);
4893 if (flow->counter) {
4894 flow_dv_counter_release(flow->counter);
4895 flow->counter = NULL;
4897 if (flow->tag_resource) {
4898 flow_dv_tag_release(dev, flow->tag_resource);
4899 flow->tag_resource = NULL;
4901 while (!LIST_EMPTY(&flow->dev_flows)) {
4902 dev_flow = LIST_FIRST(&flow->dev_flows);
4903 LIST_REMOVE(dev_flow, next);
4904 if (dev_flow->dv.matcher)
4905 flow_dv_matcher_release(dev, dev_flow);
4906 if (dev_flow->dv.encap_decap)
4907 flow_dv_encap_decap_resource_release(dev_flow);
4908 if (dev_flow->dv.modify_hdr)
4909 flow_dv_modify_hdr_resource_release(dev_flow);
4910 if (dev_flow->dv.jump)
4911 flow_dv_jump_tbl_resource_release(dev_flow);
4912 if (dev_flow->dv.port_id_action)
4913 flow_dv_port_id_action_resource_release(dev_flow);
4919 * Query a dv flow rule for its statistics via devx.
4922 * Pointer to Ethernet device.
4924 * Pointer to the sub flow.
4926 * data retrieved by the query.
4928 * Perform verbose error reporting if not NULL.
4931 * 0 on success, a negative errno value otherwise and rte_errno is set.
4934 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
4935 void *data, struct rte_flow_error *error)
4937 struct mlx5_priv *priv = dev->data->dev_private;
4938 struct rte_flow_query_count *qc = data;
4943 if (!priv->config.devx)
4944 return rte_flow_error_set(error, ENOTSUP,
4945 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4947 "counters are not supported");
4948 if (flow->counter) {
4949 err = mlx5_devx_cmd_flow_counter_query
4950 (flow->counter->dcs,
4951 qc->reset, &pkts, &bytes);
4953 return rte_flow_error_set
4955 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4957 "cannot read counters");
4960 qc->hits = pkts - flow->counter->hits;
4961 qc->bytes = bytes - flow->counter->bytes;
4963 flow->counter->hits = pkts;
4964 flow->counter->bytes = bytes;
4968 return rte_flow_error_set(error, EINVAL,
4969 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4971 "counters are not available");
4977 * @see rte_flow_query()
4981 flow_dv_query(struct rte_eth_dev *dev,
4982 struct rte_flow *flow __rte_unused,
4983 const struct rte_flow_action *actions __rte_unused,
4984 void *data __rte_unused,
4985 struct rte_flow_error *error __rte_unused)
4989 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4990 switch (actions->type) {
4991 case RTE_FLOW_ACTION_TYPE_VOID:
4993 case RTE_FLOW_ACTION_TYPE_COUNT:
4994 ret = flow_dv_query_count(dev, flow, data, error);
4997 return rte_flow_error_set(error, ENOTSUP,
4998 RTE_FLOW_ERROR_TYPE_ACTION,
5000 "action not supported");
5007 * Mutex-protected thunk to flow_dv_translate().
5010 flow_d_translate(struct rte_eth_dev *dev,
5011 struct mlx5_flow *dev_flow,
5012 const struct rte_flow_attr *attr,
5013 const struct rte_flow_item items[],
5014 const struct rte_flow_action actions[],
5015 struct rte_flow_error *error)
5019 flow_d_shared_lock(dev);
5020 ret = flow_dv_translate(dev, dev_flow, attr, items, actions, error);
5021 flow_d_shared_unlock(dev);
5026 * Mutex-protected thunk to flow_dv_apply().
5029 flow_d_apply(struct rte_eth_dev *dev,
5030 struct rte_flow *flow,
5031 struct rte_flow_error *error)
5035 flow_d_shared_lock(dev);
5036 ret = flow_dv_apply(dev, flow, error);
5037 flow_d_shared_unlock(dev);
5042 * Mutex-protected thunk to flow_dv_remove().
5045 flow_d_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
5047 flow_d_shared_lock(dev);
5048 flow_dv_remove(dev, flow);
5049 flow_d_shared_unlock(dev);
5053 * Mutex-protected thunk to flow_dv_destroy().
5056 flow_d_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
5058 flow_d_shared_lock(dev);
5059 flow_dv_destroy(dev, flow);
5060 flow_d_shared_unlock(dev);
5063 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
5064 .validate = flow_dv_validate,
5065 .prepare = flow_dv_prepare,
5066 .translate = flow_d_translate,
5067 .apply = flow_d_apply,
5068 .remove = flow_d_remove,
5069 .destroy = flow_d_destroy,
5070 .query = flow_dv_query,
5073 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */