1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
14 #pragma GCC diagnostic ignored "-Wpedantic"
16 #include <infiniband/verbs.h>
18 #pragma GCC diagnostic error "-Wpedantic"
21 #include <rte_common.h>
22 #include <rte_ether.h>
23 #include <rte_ethdev_driver.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
31 #include "mlx5_defs.h"
32 #include "mlx5_glue.h"
33 #include "mlx5_flow.h"
35 #include "mlx5_rxtx.h"
37 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
39 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
40 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
43 #ifndef HAVE_MLX5DV_DR_ESWITCH
44 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
45 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
49 #ifndef HAVE_MLX5DV_DR
50 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
66 * Initialize flow attributes structure according to flow items' types.
69 * Pointer to item specification.
71 * Pointer to flow attributes structure.
74 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
76 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
78 case RTE_FLOW_ITEM_TYPE_IPV4:
81 case RTE_FLOW_ITEM_TYPE_IPV6:
84 case RTE_FLOW_ITEM_TYPE_UDP:
87 case RTE_FLOW_ITEM_TYPE_TCP:
97 struct field_modify_info {
98 uint32_t size; /* Size of field in protocol header, in bytes. */
99 uint32_t offset; /* Offset of field in protocol header, in bytes. */
100 enum mlx5_modification_field id;
103 struct field_modify_info modify_eth[] = {
104 {4, 0, MLX5_MODI_OUT_DMAC_47_16},
105 {2, 4, MLX5_MODI_OUT_DMAC_15_0},
106 {4, 6, MLX5_MODI_OUT_SMAC_47_16},
107 {2, 10, MLX5_MODI_OUT_SMAC_15_0},
111 struct field_modify_info modify_ipv4[] = {
112 {1, 8, MLX5_MODI_OUT_IPV4_TTL},
113 {4, 12, MLX5_MODI_OUT_SIPV4},
114 {4, 16, MLX5_MODI_OUT_DIPV4},
118 struct field_modify_info modify_ipv6[] = {
119 {1, 7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
120 {4, 8, MLX5_MODI_OUT_SIPV6_127_96},
121 {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
122 {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
123 {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
124 {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
125 {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
126 {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
127 {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
131 struct field_modify_info modify_udp[] = {
132 {2, 0, MLX5_MODI_OUT_UDP_SPORT},
133 {2, 2, MLX5_MODI_OUT_UDP_DPORT},
137 struct field_modify_info modify_tcp[] = {
138 {2, 0, MLX5_MODI_OUT_TCP_SPORT},
139 {2, 2, MLX5_MODI_OUT_TCP_DPORT},
140 {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
141 {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
146 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
147 uint8_t next_protocol, uint64_t *item_flags,
150 assert(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
151 item->type == RTE_FLOW_ITEM_TYPE_IPV6);
152 if (next_protocol == IPPROTO_IPIP) {
153 *item_flags |= MLX5_FLOW_LAYER_IPIP;
156 if (next_protocol == IPPROTO_IPV6) {
157 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
163 * Acquire the synchronizing object to protect multithreaded access
164 * to shared dv context. Lock occurs only if context is actually
165 * shared, i.e. we have multiport IB device and representors are
169 * Pointer to the rte_eth_dev structure.
172 flow_d_shared_lock(struct rte_eth_dev *dev)
174 struct mlx5_priv *priv = dev->data->dev_private;
175 struct mlx5_ibv_shared *sh = priv->sh;
177 if (sh->dv_refcnt > 1) {
180 ret = pthread_mutex_lock(&sh->dv_mutex);
187 flow_d_shared_unlock(struct rte_eth_dev *dev)
189 struct mlx5_priv *priv = dev->data->dev_private;
190 struct mlx5_ibv_shared *sh = priv->sh;
192 if (sh->dv_refcnt > 1) {
195 ret = pthread_mutex_unlock(&sh->dv_mutex);
202 * Convert modify-header action to DV specification.
205 * Pointer to item specification.
207 * Pointer to field modification information.
208 * @param[in,out] resource
209 * Pointer to the modify-header resource.
211 * Type of modification.
213 * Pointer to the error structure.
216 * 0 on success, a negative errno value otherwise and rte_errno is set.
219 flow_dv_convert_modify_action(struct rte_flow_item *item,
220 struct field_modify_info *field,
221 struct mlx5_flow_dv_modify_hdr_resource *resource,
223 struct rte_flow_error *error)
225 uint32_t i = resource->actions_num;
226 struct mlx5_modification_cmd *actions = resource->actions;
227 const uint8_t *spec = item->spec;
228 const uint8_t *mask = item->mask;
231 while (field->size) {
233 /* Generate modify command for each mask segment. */
234 memcpy(&set, &mask[field->offset], field->size);
236 if (i >= MLX5_MODIFY_NUM)
237 return rte_flow_error_set(error, EINVAL,
238 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
239 "too many items to modify");
240 actions[i].action_type = type;
241 actions[i].field = field->id;
242 actions[i].length = field->size ==
243 4 ? 0 : field->size * 8;
244 rte_memcpy(&actions[i].data[4 - field->size],
245 &spec[field->offset], field->size);
246 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
249 if (resource->actions_num != i)
250 resource->actions_num = i;
253 if (!resource->actions_num)
254 return rte_flow_error_set(error, EINVAL,
255 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
256 "invalid modification flow item");
261 * Convert modify-header set IPv4 address action to DV specification.
263 * @param[in,out] resource
264 * Pointer to the modify-header resource.
266 * Pointer to action specification.
268 * Pointer to the error structure.
271 * 0 on success, a negative errno value otherwise and rte_errno is set.
274 flow_dv_convert_action_modify_ipv4
275 (struct mlx5_flow_dv_modify_hdr_resource *resource,
276 const struct rte_flow_action *action,
277 struct rte_flow_error *error)
279 const struct rte_flow_action_set_ipv4 *conf =
280 (const struct rte_flow_action_set_ipv4 *)(action->conf);
281 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
282 struct rte_flow_item_ipv4 ipv4;
283 struct rte_flow_item_ipv4 ipv4_mask;
285 memset(&ipv4, 0, sizeof(ipv4));
286 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
287 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
288 ipv4.hdr.src_addr = conf->ipv4_addr;
289 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
291 ipv4.hdr.dst_addr = conf->ipv4_addr;
292 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
295 item.mask = &ipv4_mask;
296 return flow_dv_convert_modify_action(&item, modify_ipv4, resource,
297 MLX5_MODIFICATION_TYPE_SET, error);
301 * Convert modify-header set IPv6 address action to DV specification.
303 * @param[in,out] resource
304 * Pointer to the modify-header resource.
306 * Pointer to action specification.
308 * Pointer to the error structure.
311 * 0 on success, a negative errno value otherwise and rte_errno is set.
314 flow_dv_convert_action_modify_ipv6
315 (struct mlx5_flow_dv_modify_hdr_resource *resource,
316 const struct rte_flow_action *action,
317 struct rte_flow_error *error)
319 const struct rte_flow_action_set_ipv6 *conf =
320 (const struct rte_flow_action_set_ipv6 *)(action->conf);
321 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
322 struct rte_flow_item_ipv6 ipv6;
323 struct rte_flow_item_ipv6 ipv6_mask;
325 memset(&ipv6, 0, sizeof(ipv6));
326 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
327 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
328 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
329 sizeof(ipv6.hdr.src_addr));
330 memcpy(&ipv6_mask.hdr.src_addr,
331 &rte_flow_item_ipv6_mask.hdr.src_addr,
332 sizeof(ipv6.hdr.src_addr));
334 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
335 sizeof(ipv6.hdr.dst_addr));
336 memcpy(&ipv6_mask.hdr.dst_addr,
337 &rte_flow_item_ipv6_mask.hdr.dst_addr,
338 sizeof(ipv6.hdr.dst_addr));
341 item.mask = &ipv6_mask;
342 return flow_dv_convert_modify_action(&item, modify_ipv6, resource,
343 MLX5_MODIFICATION_TYPE_SET, error);
347 * Convert modify-header set MAC address action to DV specification.
349 * @param[in,out] resource
350 * Pointer to the modify-header resource.
352 * Pointer to action specification.
354 * Pointer to the error structure.
357 * 0 on success, a negative errno value otherwise and rte_errno is set.
360 flow_dv_convert_action_modify_mac
361 (struct mlx5_flow_dv_modify_hdr_resource *resource,
362 const struct rte_flow_action *action,
363 struct rte_flow_error *error)
365 const struct rte_flow_action_set_mac *conf =
366 (const struct rte_flow_action_set_mac *)(action->conf);
367 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
368 struct rte_flow_item_eth eth;
369 struct rte_flow_item_eth eth_mask;
371 memset(ð, 0, sizeof(eth));
372 memset(ð_mask, 0, sizeof(eth_mask));
373 if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
374 memcpy(ð.src.addr_bytes, &conf->mac_addr,
375 sizeof(eth.src.addr_bytes));
376 memcpy(ð_mask.src.addr_bytes,
377 &rte_flow_item_eth_mask.src.addr_bytes,
378 sizeof(eth_mask.src.addr_bytes));
380 memcpy(ð.dst.addr_bytes, &conf->mac_addr,
381 sizeof(eth.dst.addr_bytes));
382 memcpy(ð_mask.dst.addr_bytes,
383 &rte_flow_item_eth_mask.dst.addr_bytes,
384 sizeof(eth_mask.dst.addr_bytes));
387 item.mask = ð_mask;
388 return flow_dv_convert_modify_action(&item, modify_eth, resource,
389 MLX5_MODIFICATION_TYPE_SET, error);
393 * Convert modify-header set TP action to DV specification.
395 * @param[in,out] resource
396 * Pointer to the modify-header resource.
398 * Pointer to action specification.
400 * Pointer to rte_flow_item objects list.
402 * Pointer to flow attributes structure.
404 * Pointer to the error structure.
407 * 0 on success, a negative errno value otherwise and rte_errno is set.
410 flow_dv_convert_action_modify_tp
411 (struct mlx5_flow_dv_modify_hdr_resource *resource,
412 const struct rte_flow_action *action,
413 const struct rte_flow_item *items,
414 union flow_dv_attr *attr,
415 struct rte_flow_error *error)
417 const struct rte_flow_action_set_tp *conf =
418 (const struct rte_flow_action_set_tp *)(action->conf);
419 struct rte_flow_item item;
420 struct rte_flow_item_udp udp;
421 struct rte_flow_item_udp udp_mask;
422 struct rte_flow_item_tcp tcp;
423 struct rte_flow_item_tcp tcp_mask;
424 struct field_modify_info *field;
427 flow_dv_attr_init(items, attr);
429 memset(&udp, 0, sizeof(udp));
430 memset(&udp_mask, 0, sizeof(udp_mask));
431 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
432 udp.hdr.src_port = conf->port;
433 udp_mask.hdr.src_port =
434 rte_flow_item_udp_mask.hdr.src_port;
436 udp.hdr.dst_port = conf->port;
437 udp_mask.hdr.dst_port =
438 rte_flow_item_udp_mask.hdr.dst_port;
440 item.type = RTE_FLOW_ITEM_TYPE_UDP;
442 item.mask = &udp_mask;
446 memset(&tcp, 0, sizeof(tcp));
447 memset(&tcp_mask, 0, sizeof(tcp_mask));
448 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
449 tcp.hdr.src_port = conf->port;
450 tcp_mask.hdr.src_port =
451 rte_flow_item_tcp_mask.hdr.src_port;
453 tcp.hdr.dst_port = conf->port;
454 tcp_mask.hdr.dst_port =
455 rte_flow_item_tcp_mask.hdr.dst_port;
457 item.type = RTE_FLOW_ITEM_TYPE_TCP;
459 item.mask = &tcp_mask;
462 return flow_dv_convert_modify_action(&item, field, resource,
463 MLX5_MODIFICATION_TYPE_SET, error);
467 * Convert modify-header set TTL action to DV specification.
469 * @param[in,out] resource
470 * Pointer to the modify-header resource.
472 * Pointer to action specification.
474 * Pointer to rte_flow_item objects list.
476 * Pointer to flow attributes structure.
478 * Pointer to the error structure.
481 * 0 on success, a negative errno value otherwise and rte_errno is set.
484 flow_dv_convert_action_modify_ttl
485 (struct mlx5_flow_dv_modify_hdr_resource *resource,
486 const struct rte_flow_action *action,
487 const struct rte_flow_item *items,
488 union flow_dv_attr *attr,
489 struct rte_flow_error *error)
491 const struct rte_flow_action_set_ttl *conf =
492 (const struct rte_flow_action_set_ttl *)(action->conf);
493 struct rte_flow_item item;
494 struct rte_flow_item_ipv4 ipv4;
495 struct rte_flow_item_ipv4 ipv4_mask;
496 struct rte_flow_item_ipv6 ipv6;
497 struct rte_flow_item_ipv6 ipv6_mask;
498 struct field_modify_info *field;
501 flow_dv_attr_init(items, attr);
503 memset(&ipv4, 0, sizeof(ipv4));
504 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
505 ipv4.hdr.time_to_live = conf->ttl_value;
506 ipv4_mask.hdr.time_to_live = 0xFF;
507 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
509 item.mask = &ipv4_mask;
513 memset(&ipv6, 0, sizeof(ipv6));
514 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
515 ipv6.hdr.hop_limits = conf->ttl_value;
516 ipv6_mask.hdr.hop_limits = 0xFF;
517 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
519 item.mask = &ipv6_mask;
522 return flow_dv_convert_modify_action(&item, field, resource,
523 MLX5_MODIFICATION_TYPE_SET, error);
527 * Convert modify-header decrement TTL action to DV specification.
529 * @param[in,out] resource
530 * Pointer to the modify-header resource.
532 * Pointer to action specification.
534 * Pointer to rte_flow_item objects list.
536 * Pointer to flow attributes structure.
538 * Pointer to the error structure.
541 * 0 on success, a negative errno value otherwise and rte_errno is set.
544 flow_dv_convert_action_modify_dec_ttl
545 (struct mlx5_flow_dv_modify_hdr_resource *resource,
546 const struct rte_flow_item *items,
547 union flow_dv_attr *attr,
548 struct rte_flow_error *error)
550 struct rte_flow_item item;
551 struct rte_flow_item_ipv4 ipv4;
552 struct rte_flow_item_ipv4 ipv4_mask;
553 struct rte_flow_item_ipv6 ipv6;
554 struct rte_flow_item_ipv6 ipv6_mask;
555 struct field_modify_info *field;
558 flow_dv_attr_init(items, attr);
560 memset(&ipv4, 0, sizeof(ipv4));
561 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
562 ipv4.hdr.time_to_live = 0xFF;
563 ipv4_mask.hdr.time_to_live = 0xFF;
564 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
566 item.mask = &ipv4_mask;
570 memset(&ipv6, 0, sizeof(ipv6));
571 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
572 ipv6.hdr.hop_limits = 0xFF;
573 ipv6_mask.hdr.hop_limits = 0xFF;
574 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
576 item.mask = &ipv6_mask;
579 return flow_dv_convert_modify_action(&item, field, resource,
580 MLX5_MODIFICATION_TYPE_ADD, error);
584 * Convert modify-header increment/decrement TCP Sequence number
585 * to DV specification.
587 * @param[in,out] resource
588 * Pointer to the modify-header resource.
590 * Pointer to action specification.
592 * Pointer to the error structure.
595 * 0 on success, a negative errno value otherwise and rte_errno is set.
598 flow_dv_convert_action_modify_tcp_seq
599 (struct mlx5_flow_dv_modify_hdr_resource *resource,
600 const struct rte_flow_action *action,
601 struct rte_flow_error *error)
603 const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
604 uint64_t value = rte_be_to_cpu_32(*conf);
605 struct rte_flow_item item;
606 struct rte_flow_item_tcp tcp;
607 struct rte_flow_item_tcp tcp_mask;
609 memset(&tcp, 0, sizeof(tcp));
610 memset(&tcp_mask, 0, sizeof(tcp_mask));
611 if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
613 * The HW has no decrement operation, only increment operation.
614 * To simulate decrement X from Y using increment operation
615 * we need to add UINT32_MAX X times to Y.
616 * Each adding of UINT32_MAX decrements Y by 1.
619 tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
620 tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
621 item.type = RTE_FLOW_ITEM_TYPE_TCP;
623 item.mask = &tcp_mask;
624 return flow_dv_convert_modify_action(&item, modify_tcp, resource,
625 MLX5_MODIFICATION_TYPE_ADD, error);
629 * Convert modify-header increment/decrement TCP Acknowledgment number
630 * to DV specification.
632 * @param[in,out] resource
633 * Pointer to the modify-header resource.
635 * Pointer to action specification.
637 * Pointer to the error structure.
640 * 0 on success, a negative errno value otherwise and rte_errno is set.
643 flow_dv_convert_action_modify_tcp_ack
644 (struct mlx5_flow_dv_modify_hdr_resource *resource,
645 const struct rte_flow_action *action,
646 struct rte_flow_error *error)
648 const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
649 uint64_t value = rte_be_to_cpu_32(*conf);
650 struct rte_flow_item item;
651 struct rte_flow_item_tcp tcp;
652 struct rte_flow_item_tcp tcp_mask;
654 memset(&tcp, 0, sizeof(tcp));
655 memset(&tcp_mask, 0, sizeof(tcp_mask));
656 if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
658 * The HW has no decrement operation, only increment operation.
659 * To simulate decrement X from Y using increment operation
660 * we need to add UINT32_MAX X times to Y.
661 * Each adding of UINT32_MAX decrements Y by 1.
664 tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
665 tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
666 item.type = RTE_FLOW_ITEM_TYPE_TCP;
668 item.mask = &tcp_mask;
669 return flow_dv_convert_modify_action(&item, modify_tcp, resource,
670 MLX5_MODIFICATION_TYPE_ADD, error);
674 * Validate META item.
677 * Pointer to the rte_eth_dev structure.
679 * Item specification.
681 * Attributes of flow that includes this item.
683 * Pointer to error structure.
686 * 0 on success, a negative errno value otherwise and rte_errno is set.
689 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
690 const struct rte_flow_item *item,
691 const struct rte_flow_attr *attr,
692 struct rte_flow_error *error)
694 const struct rte_flow_item_meta *spec = item->spec;
695 const struct rte_flow_item_meta *mask = item->mask;
696 const struct rte_flow_item_meta nic_mask = {
697 .data = RTE_BE32(UINT32_MAX)
700 uint64_t offloads = dev->data->dev_conf.txmode.offloads;
702 if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
703 return rte_flow_error_set(error, EPERM,
704 RTE_FLOW_ERROR_TYPE_ITEM,
706 "match on metadata offload "
707 "configuration is off for this port");
709 return rte_flow_error_set(error, EINVAL,
710 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
712 "data cannot be empty");
714 return rte_flow_error_set(error, EINVAL,
715 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
717 "data cannot be zero");
719 mask = &rte_flow_item_meta_mask;
720 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
721 (const uint8_t *)&nic_mask,
722 sizeof(struct rte_flow_item_meta),
727 return rte_flow_error_set(error, ENOTSUP,
728 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
730 "pattern not supported for ingress");
735 * Validate vport item.
738 * Pointer to the rte_eth_dev structure.
740 * Item specification.
742 * Attributes of flow that includes this item.
743 * @param[in] item_flags
744 * Bit-fields that holds the items detected until now.
746 * Pointer to error structure.
749 * 0 on success, a negative errno value otherwise and rte_errno is set.
752 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
753 const struct rte_flow_item *item,
754 const struct rte_flow_attr *attr,
756 struct rte_flow_error *error)
758 const struct rte_flow_item_port_id *spec = item->spec;
759 const struct rte_flow_item_port_id *mask = item->mask;
760 const struct rte_flow_item_port_id switch_mask = {
763 uint16_t esw_domain_id;
764 uint16_t item_port_esw_domain_id;
768 return rte_flow_error_set(error, EINVAL,
769 RTE_FLOW_ERROR_TYPE_ITEM,
771 "match on port id is valid only"
772 " when transfer flag is enabled");
773 if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
774 return rte_flow_error_set(error, ENOTSUP,
775 RTE_FLOW_ERROR_TYPE_ITEM, item,
776 "multiple source ports are not"
780 if (mask->id != 0xffffffff)
781 return rte_flow_error_set(error, ENOTSUP,
782 RTE_FLOW_ERROR_TYPE_ITEM_MASK,
784 "no support for partial mask on"
786 ret = mlx5_flow_item_acceptable
787 (item, (const uint8_t *)mask,
788 (const uint8_t *)&rte_flow_item_port_id_mask,
789 sizeof(struct rte_flow_item_port_id),
795 ret = mlx5_port_to_eswitch_info(spec->id, &item_port_esw_domain_id,
798 return rte_flow_error_set(error, -ret,
799 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
800 "failed to obtain E-Switch info for"
802 ret = mlx5_port_to_eswitch_info(dev->data->port_id,
803 &esw_domain_id, NULL);
805 return rte_flow_error_set(error, -ret,
806 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
808 "failed to obtain E-Switch info");
809 if (item_port_esw_domain_id != esw_domain_id)
810 return rte_flow_error_set(error, -ret,
811 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
812 "cannot match on a port from a"
813 " different E-Switch");
818 * Validate count action.
823 * Pointer to error structure.
826 * 0 on success, a negative errno value otherwise and rte_errno is set.
829 flow_dv_validate_action_count(struct rte_eth_dev *dev,
830 struct rte_flow_error *error)
832 struct mlx5_priv *priv = dev->data->dev_private;
834 if (!priv->config.devx)
836 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
840 return rte_flow_error_set
842 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
844 "count action not supported");
848 * Validate the L2 encap action.
850 * @param[in] action_flags
851 * Holds the actions detected until now.
853 * Pointer to the encap action.
855 * Pointer to flow attributes
857 * Pointer to error structure.
860 * 0 on success, a negative errno value otherwise and rte_errno is set.
863 flow_dv_validate_action_l2_encap(uint64_t action_flags,
864 const struct rte_flow_action *action,
865 const struct rte_flow_attr *attr,
866 struct rte_flow_error *error)
869 return rte_flow_error_set(error, EINVAL,
870 RTE_FLOW_ERROR_TYPE_ACTION, action,
871 "configuration cannot be null");
872 if (action_flags & MLX5_FLOW_ACTION_DROP)
873 return rte_flow_error_set(error, EINVAL,
874 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
875 "can't drop and encap in same flow");
876 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
877 return rte_flow_error_set(error, EINVAL,
878 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
879 "can only have a single encap or"
880 " decap action in a flow");
881 if (!attr->transfer && attr->ingress)
882 return rte_flow_error_set(error, ENOTSUP,
883 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
885 "encap action not supported for "
891 * Validate the L2 decap action.
893 * @param[in] action_flags
894 * Holds the actions detected until now.
896 * Pointer to flow attributes
898 * Pointer to error structure.
901 * 0 on success, a negative errno value otherwise and rte_errno is set.
904 flow_dv_validate_action_l2_decap(uint64_t action_flags,
905 const struct rte_flow_attr *attr,
906 struct rte_flow_error *error)
908 if (action_flags & MLX5_FLOW_ACTION_DROP)
909 return rte_flow_error_set(error, EINVAL,
910 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
911 "can't drop and decap in same flow");
912 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
913 return rte_flow_error_set(error, EINVAL,
914 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
915 "can only have a single encap or"
916 " decap action in a flow");
917 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
918 return rte_flow_error_set(error, EINVAL,
919 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
920 "can't have decap action after"
923 return rte_flow_error_set(error, ENOTSUP,
924 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
926 "decap action not supported for "
932 * Validate the raw encap action.
934 * @param[in] action_flags
935 * Holds the actions detected until now.
937 * Pointer to the encap action.
939 * Pointer to flow attributes
941 * Pointer to error structure.
944 * 0 on success, a negative errno value otherwise and rte_errno is set.
947 flow_dv_validate_action_raw_encap(uint64_t action_flags,
948 const struct rte_flow_action *action,
949 const struct rte_flow_attr *attr,
950 struct rte_flow_error *error)
952 const struct rte_flow_action_raw_encap *raw_encap =
953 (const struct rte_flow_action_raw_encap *)action->conf;
955 return rte_flow_error_set(error, EINVAL,
956 RTE_FLOW_ERROR_TYPE_ACTION, action,
957 "configuration cannot be null");
958 if (action_flags & MLX5_FLOW_ACTION_DROP)
959 return rte_flow_error_set(error, EINVAL,
960 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
961 "can't drop and encap in same flow");
962 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
963 return rte_flow_error_set(error, EINVAL,
964 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
965 "can only have a single encap"
966 " action in a flow");
967 /* encap without preceding decap is not supported for ingress */
968 if (!attr->transfer && attr->ingress &&
969 !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
970 return rte_flow_error_set(error, ENOTSUP,
971 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
973 "encap action not supported for "
975 if (!raw_encap->size || !raw_encap->data)
976 return rte_flow_error_set(error, EINVAL,
977 RTE_FLOW_ERROR_TYPE_ACTION, action,
978 "raw encap data cannot be empty");
983 * Validate the raw decap action.
985 * @param[in] action_flags
986 * Holds the actions detected until now.
988 * Pointer to the encap action.
990 * Pointer to flow attributes
992 * Pointer to error structure.
995 * 0 on success, a negative errno value otherwise and rte_errno is set.
998 flow_dv_validate_action_raw_decap(uint64_t action_flags,
999 const struct rte_flow_action *action,
1000 const struct rte_flow_attr *attr,
1001 struct rte_flow_error *error)
1003 if (action_flags & MLX5_FLOW_ACTION_DROP)
1004 return rte_flow_error_set(error, EINVAL,
1005 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1006 "can't drop and decap in same flow");
1007 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1008 return rte_flow_error_set(error, EINVAL,
1009 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1010 "can't have encap action before"
1012 if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
1013 return rte_flow_error_set(error, EINVAL,
1014 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1015 "can only have a single decap"
1016 " action in a flow");
1017 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
1018 return rte_flow_error_set(error, EINVAL,
1019 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1020 "can't have decap action after"
1022 /* decap action is valid on egress only if it is followed by encap */
1024 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
1025 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
1028 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
1029 return rte_flow_error_set
1031 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1032 NULL, "decap action not supported"
1039 * Find existing encap/decap resource or create and register a new one.
1041 * @param dev[in, out]
1042 * Pointer to rte_eth_dev structure.
1043 * @param[in, out] resource
1044 * Pointer to encap/decap resource.
1045 * @parm[in, out] dev_flow
1046 * Pointer to the dev_flow.
1048 * pointer to error structure.
1051 * 0 on success otherwise -errno and errno is set.
1054 flow_dv_encap_decap_resource_register
1055 (struct rte_eth_dev *dev,
1056 struct mlx5_flow_dv_encap_decap_resource *resource,
1057 struct mlx5_flow *dev_flow,
1058 struct rte_flow_error *error)
1060 struct mlx5_priv *priv = dev->data->dev_private;
1061 struct mlx5_ibv_shared *sh = priv->sh;
1062 struct mlx5_flow_dv_encap_decap_resource *cache_resource;
1063 struct rte_flow *flow = dev_flow->flow;
1064 struct mlx5dv_dr_domain *domain;
1066 resource->flags = flow->group ? 0 : 1;
1067 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
1068 domain = sh->fdb_domain;
1069 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
1070 domain = sh->rx_domain;
1072 domain = sh->tx_domain;
1074 /* Lookup a matching resource from cache. */
1075 LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
1076 if (resource->reformat_type == cache_resource->reformat_type &&
1077 resource->ft_type == cache_resource->ft_type &&
1078 resource->flags == cache_resource->flags &&
1079 resource->size == cache_resource->size &&
1080 !memcmp((const void *)resource->buf,
1081 (const void *)cache_resource->buf,
1083 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
1084 (void *)cache_resource,
1085 rte_atomic32_read(&cache_resource->refcnt));
1086 rte_atomic32_inc(&cache_resource->refcnt);
1087 dev_flow->dv.encap_decap = cache_resource;
1091 /* Register new encap/decap resource. */
1092 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1093 if (!cache_resource)
1094 return rte_flow_error_set(error, ENOMEM,
1095 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1096 "cannot allocate resource memory");
1097 *cache_resource = *resource;
1098 cache_resource->verbs_action =
1099 mlx5_glue->dv_create_flow_action_packet_reformat
1100 (sh->ctx, cache_resource->reformat_type,
1101 cache_resource->ft_type, domain, cache_resource->flags,
1102 cache_resource->size,
1103 (cache_resource->size ? cache_resource->buf : NULL));
1104 if (!cache_resource->verbs_action) {
1105 rte_free(cache_resource);
1106 return rte_flow_error_set(error, ENOMEM,
1107 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1108 NULL, "cannot create action");
1110 rte_atomic32_init(&cache_resource->refcnt);
1111 rte_atomic32_inc(&cache_resource->refcnt);
1112 LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
1113 dev_flow->dv.encap_decap = cache_resource;
1114 DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
1115 (void *)cache_resource,
1116 rte_atomic32_read(&cache_resource->refcnt));
1121 * Find existing table jump resource or create and register a new one.
1123 * @param dev[in, out]
1124 * Pointer to rte_eth_dev structure.
1125 * @param[in, out] resource
1126 * Pointer to jump table resource.
1127 * @parm[in, out] dev_flow
1128 * Pointer to the dev_flow.
1130 * pointer to error structure.
1133 * 0 on success otherwise -errno and errno is set.
1136 flow_dv_jump_tbl_resource_register
1137 (struct rte_eth_dev *dev,
1138 struct mlx5_flow_dv_jump_tbl_resource *resource,
1139 struct mlx5_flow *dev_flow,
1140 struct rte_flow_error *error)
1142 struct mlx5_priv *priv = dev->data->dev_private;
1143 struct mlx5_ibv_shared *sh = priv->sh;
1144 struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
1146 /* Lookup a matching resource from cache. */
1147 LIST_FOREACH(cache_resource, &sh->jump_tbl, next) {
1148 if (resource->tbl == cache_resource->tbl) {
1149 DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++",
1150 (void *)cache_resource,
1151 rte_atomic32_read(&cache_resource->refcnt));
1152 rte_atomic32_inc(&cache_resource->refcnt);
1153 dev_flow->dv.jump = cache_resource;
1157 /* Register new jump table resource. */
1158 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1159 if (!cache_resource)
1160 return rte_flow_error_set(error, ENOMEM,
1161 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1162 "cannot allocate resource memory");
1163 *cache_resource = *resource;
1164 cache_resource->action =
1165 mlx5_glue->dr_create_flow_action_dest_flow_tbl
1166 (resource->tbl->obj);
1167 if (!cache_resource->action) {
1168 rte_free(cache_resource);
1169 return rte_flow_error_set(error, ENOMEM,
1170 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1171 NULL, "cannot create action");
1173 rte_atomic32_init(&cache_resource->refcnt);
1174 rte_atomic32_inc(&cache_resource->refcnt);
1175 LIST_INSERT_HEAD(&sh->jump_tbl, cache_resource, next);
1176 dev_flow->dv.jump = cache_resource;
1177 DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++",
1178 (void *)cache_resource,
1179 rte_atomic32_read(&cache_resource->refcnt));
1184 * Find existing table port ID resource or create and register a new one.
1186 * @param dev[in, out]
1187 * Pointer to rte_eth_dev structure.
1188 * @param[in, out] resource
1189 * Pointer to port ID action resource.
1190 * @parm[in, out] dev_flow
1191 * Pointer to the dev_flow.
1193 * pointer to error structure.
1196 * 0 on success otherwise -errno and errno is set.
1199 flow_dv_port_id_action_resource_register
1200 (struct rte_eth_dev *dev,
1201 struct mlx5_flow_dv_port_id_action_resource *resource,
1202 struct mlx5_flow *dev_flow,
1203 struct rte_flow_error *error)
1205 struct mlx5_priv *priv = dev->data->dev_private;
1206 struct mlx5_ibv_shared *sh = priv->sh;
1207 struct mlx5_flow_dv_port_id_action_resource *cache_resource;
1209 /* Lookup a matching resource from cache. */
1210 LIST_FOREACH(cache_resource, &sh->port_id_action_list, next) {
1211 if (resource->port_id == cache_resource->port_id) {
1212 DRV_LOG(DEBUG, "port id action resource resource %p: "
1214 (void *)cache_resource,
1215 rte_atomic32_read(&cache_resource->refcnt));
1216 rte_atomic32_inc(&cache_resource->refcnt);
1217 dev_flow->dv.port_id_action = cache_resource;
1221 /* Register new port id action resource. */
1222 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1223 if (!cache_resource)
1224 return rte_flow_error_set(error, ENOMEM,
1225 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1226 "cannot allocate resource memory");
1227 *cache_resource = *resource;
1228 cache_resource->action =
1229 mlx5_glue->dr_create_flow_action_dest_vport
1230 (priv->sh->fdb_domain, resource->port_id);
1231 if (!cache_resource->action) {
1232 rte_free(cache_resource);
1233 return rte_flow_error_set(error, ENOMEM,
1234 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1235 NULL, "cannot create action");
1237 rte_atomic32_init(&cache_resource->refcnt);
1238 rte_atomic32_inc(&cache_resource->refcnt);
1239 LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
1240 dev_flow->dv.port_id_action = cache_resource;
1241 DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
1242 (void *)cache_resource,
1243 rte_atomic32_read(&cache_resource->refcnt));
1248 * Get the size of specific rte_flow_item_type
1250 * @param[in] item_type
1251 * Tested rte_flow_item_type.
1254 * sizeof struct item_type, 0 if void or irrelevant.
1257 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
1261 switch (item_type) {
1262 case RTE_FLOW_ITEM_TYPE_ETH:
1263 retval = sizeof(struct rte_flow_item_eth);
1265 case RTE_FLOW_ITEM_TYPE_VLAN:
1266 retval = sizeof(struct rte_flow_item_vlan);
1268 case RTE_FLOW_ITEM_TYPE_IPV4:
1269 retval = sizeof(struct rte_flow_item_ipv4);
1271 case RTE_FLOW_ITEM_TYPE_IPV6:
1272 retval = sizeof(struct rte_flow_item_ipv6);
1274 case RTE_FLOW_ITEM_TYPE_UDP:
1275 retval = sizeof(struct rte_flow_item_udp);
1277 case RTE_FLOW_ITEM_TYPE_TCP:
1278 retval = sizeof(struct rte_flow_item_tcp);
1280 case RTE_FLOW_ITEM_TYPE_VXLAN:
1281 retval = sizeof(struct rte_flow_item_vxlan);
1283 case RTE_FLOW_ITEM_TYPE_GRE:
1284 retval = sizeof(struct rte_flow_item_gre);
1286 case RTE_FLOW_ITEM_TYPE_NVGRE:
1287 retval = sizeof(struct rte_flow_item_nvgre);
1289 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1290 retval = sizeof(struct rte_flow_item_vxlan_gpe);
1292 case RTE_FLOW_ITEM_TYPE_MPLS:
1293 retval = sizeof(struct rte_flow_item_mpls);
1295 case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
1303 #define MLX5_ENCAP_IPV4_VERSION 0x40
1304 #define MLX5_ENCAP_IPV4_IHL_MIN 0x05
1305 #define MLX5_ENCAP_IPV4_TTL_DEF 0x40
1306 #define MLX5_ENCAP_IPV6_VTC_FLOW 0x60000000
1307 #define MLX5_ENCAP_IPV6_HOP_LIMIT 0xff
1308 #define MLX5_ENCAP_VXLAN_FLAGS 0x08000000
1309 #define MLX5_ENCAP_VXLAN_GPE_FLAGS 0x04
1312 * Convert the encap action data from list of rte_flow_item to raw buffer
1315 * Pointer to rte_flow_item objects list.
1317 * Pointer to the output buffer.
1319 * Pointer to the output buffer size.
1321 * Pointer to the error structure.
1324 * 0 on success, a negative errno value otherwise and rte_errno is set.
1327 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
1328 size_t *size, struct rte_flow_error *error)
1330 struct rte_ether_hdr *eth = NULL;
1331 struct rte_vlan_hdr *vlan = NULL;
1332 struct rte_ipv4_hdr *ipv4 = NULL;
1333 struct rte_ipv6_hdr *ipv6 = NULL;
1334 struct rte_udp_hdr *udp = NULL;
1335 struct rte_vxlan_hdr *vxlan = NULL;
1336 struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
1337 struct rte_gre_hdr *gre = NULL;
1339 size_t temp_size = 0;
1342 return rte_flow_error_set(error, EINVAL,
1343 RTE_FLOW_ERROR_TYPE_ACTION,
1344 NULL, "invalid empty data");
1345 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1346 len = flow_dv_get_item_len(items->type);
1347 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
1348 return rte_flow_error_set(error, EINVAL,
1349 RTE_FLOW_ERROR_TYPE_ACTION,
1350 (void *)items->type,
1351 "items total size is too big"
1352 " for encap action");
1353 rte_memcpy((void *)&buf[temp_size], items->spec, len);
1354 switch (items->type) {
1355 case RTE_FLOW_ITEM_TYPE_ETH:
1356 eth = (struct rte_ether_hdr *)&buf[temp_size];
1358 case RTE_FLOW_ITEM_TYPE_VLAN:
1359 vlan = (struct rte_vlan_hdr *)&buf[temp_size];
1361 return rte_flow_error_set(error, EINVAL,
1362 RTE_FLOW_ERROR_TYPE_ACTION,
1363 (void *)items->type,
1364 "eth header not found");
1365 if (!eth->ether_type)
1366 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
1368 case RTE_FLOW_ITEM_TYPE_IPV4:
1369 ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
1371 return rte_flow_error_set(error, EINVAL,
1372 RTE_FLOW_ERROR_TYPE_ACTION,
1373 (void *)items->type,
1374 "neither eth nor vlan"
1376 if (vlan && !vlan->eth_proto)
1377 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1378 else if (eth && !eth->ether_type)
1379 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1380 if (!ipv4->version_ihl)
1381 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
1382 MLX5_ENCAP_IPV4_IHL_MIN;
1383 if (!ipv4->time_to_live)
1384 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
1386 case RTE_FLOW_ITEM_TYPE_IPV6:
1387 ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
1389 return rte_flow_error_set(error, EINVAL,
1390 RTE_FLOW_ERROR_TYPE_ACTION,
1391 (void *)items->type,
1392 "neither eth nor vlan"
1394 if (vlan && !vlan->eth_proto)
1395 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1396 else if (eth && !eth->ether_type)
1397 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1398 if (!ipv6->vtc_flow)
1400 RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
1401 if (!ipv6->hop_limits)
1402 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
1404 case RTE_FLOW_ITEM_TYPE_UDP:
1405 udp = (struct rte_udp_hdr *)&buf[temp_size];
1407 return rte_flow_error_set(error, EINVAL,
1408 RTE_FLOW_ERROR_TYPE_ACTION,
1409 (void *)items->type,
1410 "ip header not found");
1411 if (ipv4 && !ipv4->next_proto_id)
1412 ipv4->next_proto_id = IPPROTO_UDP;
1413 else if (ipv6 && !ipv6->proto)
1414 ipv6->proto = IPPROTO_UDP;
1416 case RTE_FLOW_ITEM_TYPE_VXLAN:
1417 vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
1419 return rte_flow_error_set(error, EINVAL,
1420 RTE_FLOW_ERROR_TYPE_ACTION,
1421 (void *)items->type,
1422 "udp header not found");
1424 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
1425 if (!vxlan->vx_flags)
1427 RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
1429 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1430 vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
1432 return rte_flow_error_set(error, EINVAL,
1433 RTE_FLOW_ERROR_TYPE_ACTION,
1434 (void *)items->type,
1435 "udp header not found");
1436 if (!vxlan_gpe->proto)
1437 return rte_flow_error_set(error, EINVAL,
1438 RTE_FLOW_ERROR_TYPE_ACTION,
1439 (void *)items->type,
1440 "next protocol not found");
1443 RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
1444 if (!vxlan_gpe->vx_flags)
1445 vxlan_gpe->vx_flags =
1446 MLX5_ENCAP_VXLAN_GPE_FLAGS;
1448 case RTE_FLOW_ITEM_TYPE_GRE:
1449 case RTE_FLOW_ITEM_TYPE_NVGRE:
1450 gre = (struct rte_gre_hdr *)&buf[temp_size];
1452 return rte_flow_error_set(error, EINVAL,
1453 RTE_FLOW_ERROR_TYPE_ACTION,
1454 (void *)items->type,
1455 "next protocol not found");
1457 return rte_flow_error_set(error, EINVAL,
1458 RTE_FLOW_ERROR_TYPE_ACTION,
1459 (void *)items->type,
1460 "ip header not found");
1461 if (ipv4 && !ipv4->next_proto_id)
1462 ipv4->next_proto_id = IPPROTO_GRE;
1463 else if (ipv6 && !ipv6->proto)
1464 ipv6->proto = IPPROTO_GRE;
1466 case RTE_FLOW_ITEM_TYPE_VOID:
1469 return rte_flow_error_set(error, EINVAL,
1470 RTE_FLOW_ERROR_TYPE_ACTION,
1471 (void *)items->type,
1472 "unsupported item type");
1482 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
1484 struct rte_ether_hdr *eth = NULL;
1485 struct rte_vlan_hdr *vlan = NULL;
1486 struct rte_ipv6_hdr *ipv6 = NULL;
1487 struct rte_udp_hdr *udp = NULL;
1491 eth = (struct rte_ether_hdr *)data;
1492 next_hdr = (char *)(eth + 1);
1493 proto = RTE_BE16(eth->ether_type);
1496 while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
1497 vlan = (struct rte_vlan_hdr *)next_hdr;
1498 proto = RTE_BE16(vlan->eth_proto);
1499 next_hdr += sizeof(struct rte_vlan_hdr);
1502 /* HW calculates IPv4 csum. no need to proceed */
1503 if (proto == RTE_ETHER_TYPE_IPV4)
1506 /* non IPv4/IPv6 header. not supported */
1507 if (proto != RTE_ETHER_TYPE_IPV6) {
1508 return rte_flow_error_set(error, ENOTSUP,
1509 RTE_FLOW_ERROR_TYPE_ACTION,
1510 NULL, "Cannot offload non IPv4/IPv6");
1513 ipv6 = (struct rte_ipv6_hdr *)next_hdr;
1515 /* ignore non UDP */
1516 if (ipv6->proto != IPPROTO_UDP)
1519 udp = (struct rte_udp_hdr *)(ipv6 + 1);
1520 udp->dgram_cksum = 0;
1526 * Convert L2 encap action to DV specification.
1529 * Pointer to rte_eth_dev structure.
1531 * Pointer to action structure.
1532 * @param[in, out] dev_flow
1533 * Pointer to the mlx5_flow.
1534 * @param[in] transfer
1535 * Mark if the flow is E-Switch flow.
1537 * Pointer to the error structure.
1540 * 0 on success, a negative errno value otherwise and rte_errno is set.
1543 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
1544 const struct rte_flow_action *action,
1545 struct mlx5_flow *dev_flow,
1547 struct rte_flow_error *error)
1549 const struct rte_flow_item *encap_data;
1550 const struct rte_flow_action_raw_encap *raw_encap_data;
1551 struct mlx5_flow_dv_encap_decap_resource res = {
1553 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
1554 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
1555 MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
1558 if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
1560 (const struct rte_flow_action_raw_encap *)action->conf;
1561 res.size = raw_encap_data->size;
1562 memcpy(res.buf, raw_encap_data->data, res.size);
1563 if (flow_dv_zero_encap_udp_csum(res.buf, error))
1566 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
1568 ((const struct rte_flow_action_vxlan_encap *)
1569 action->conf)->definition;
1572 ((const struct rte_flow_action_nvgre_encap *)
1573 action->conf)->definition;
1574 if (flow_dv_convert_encap_data(encap_data, res.buf,
1578 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1579 return rte_flow_error_set(error, EINVAL,
1580 RTE_FLOW_ERROR_TYPE_ACTION,
1581 NULL, "can't create L2 encap action");
1586 * Convert L2 decap action to DV specification.
1589 * Pointer to rte_eth_dev structure.
1590 * @param[in, out] dev_flow
1591 * Pointer to the mlx5_flow.
1592 * @param[in] transfer
1593 * Mark if the flow is E-Switch flow.
1595 * Pointer to the error structure.
1598 * 0 on success, a negative errno value otherwise and rte_errno is set.
1601 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
1602 struct mlx5_flow *dev_flow,
1604 struct rte_flow_error *error)
1606 struct mlx5_flow_dv_encap_decap_resource res = {
1609 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
1610 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
1611 MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
1614 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1615 return rte_flow_error_set(error, EINVAL,
1616 RTE_FLOW_ERROR_TYPE_ACTION,
1617 NULL, "can't create L2 decap action");
1622 * Convert raw decap/encap (L3 tunnel) action to DV specification.
1625 * Pointer to rte_eth_dev structure.
1627 * Pointer to action structure.
1628 * @param[in, out] dev_flow
1629 * Pointer to the mlx5_flow.
1631 * Pointer to the flow attributes.
1633 * Pointer to the error structure.
1636 * 0 on success, a negative errno value otherwise and rte_errno is set.
1639 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
1640 const struct rte_flow_action *action,
1641 struct mlx5_flow *dev_flow,
1642 const struct rte_flow_attr *attr,
1643 struct rte_flow_error *error)
1645 const struct rte_flow_action_raw_encap *encap_data;
1646 struct mlx5_flow_dv_encap_decap_resource res;
1648 encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
1649 res.size = encap_data->size;
1650 memcpy(res.buf, encap_data->data, res.size);
1651 res.reformat_type = attr->egress ?
1652 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
1653 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
1655 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
1657 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
1658 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
1659 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1660 return rte_flow_error_set(error, EINVAL,
1661 RTE_FLOW_ERROR_TYPE_ACTION,
1662 NULL, "can't create encap action");
1667 * Validate the modify-header actions.
1669 * @param[in] action_flags
1670 * Holds the actions detected until now.
1672 * Pointer to the modify action.
1674 * Pointer to error structure.
1677 * 0 on success, a negative errno value otherwise and rte_errno is set.
1680 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
1681 const struct rte_flow_action *action,
1682 struct rte_flow_error *error)
1684 if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
1685 return rte_flow_error_set(error, EINVAL,
1686 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1687 NULL, "action configuration not set");
1688 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1689 return rte_flow_error_set(error, EINVAL,
1690 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1691 "can't have encap action before"
1697 * Validate the modify-header MAC address actions.
1699 * @param[in] action_flags
1700 * Holds the actions detected until now.
1702 * Pointer to the modify action.
1703 * @param[in] item_flags
1704 * Holds the items detected.
1706 * Pointer to error structure.
1709 * 0 on success, a negative errno value otherwise and rte_errno is set.
1712 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
1713 const struct rte_flow_action *action,
1714 const uint64_t item_flags,
1715 struct rte_flow_error *error)
1719 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1721 if (!(item_flags & MLX5_FLOW_LAYER_L2))
1722 return rte_flow_error_set(error, EINVAL,
1723 RTE_FLOW_ERROR_TYPE_ACTION,
1725 "no L2 item in pattern");
1731 * Validate the modify-header IPv4 address actions.
1733 * @param[in] action_flags
1734 * Holds the actions detected until now.
1736 * Pointer to the modify action.
1737 * @param[in] item_flags
1738 * Holds the items detected.
1740 * Pointer to error structure.
1743 * 0 on success, a negative errno value otherwise and rte_errno is set.
1746 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
1747 const struct rte_flow_action *action,
1748 const uint64_t item_flags,
1749 struct rte_flow_error *error)
1753 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1755 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
1756 return rte_flow_error_set(error, EINVAL,
1757 RTE_FLOW_ERROR_TYPE_ACTION,
1759 "no ipv4 item in pattern");
1765 * Validate the modify-header IPv6 address actions.
1767 * @param[in] action_flags
1768 * Holds the actions detected until now.
1770 * Pointer to the modify action.
1771 * @param[in] item_flags
1772 * Holds the items detected.
1774 * Pointer to error structure.
1777 * 0 on success, a negative errno value otherwise and rte_errno is set.
1780 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
1781 const struct rte_flow_action *action,
1782 const uint64_t item_flags,
1783 struct rte_flow_error *error)
1787 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1789 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
1790 return rte_flow_error_set(error, EINVAL,
1791 RTE_FLOW_ERROR_TYPE_ACTION,
1793 "no ipv6 item in pattern");
1799 * Validate the modify-header TP actions.
1801 * @param[in] action_flags
1802 * Holds the actions detected until now.
1804 * Pointer to the modify action.
1805 * @param[in] item_flags
1806 * Holds the items detected.
1808 * Pointer to error structure.
1811 * 0 on success, a negative errno value otherwise and rte_errno is set.
1814 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
1815 const struct rte_flow_action *action,
1816 const uint64_t item_flags,
1817 struct rte_flow_error *error)
1821 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1823 if (!(item_flags & MLX5_FLOW_LAYER_L4))
1824 return rte_flow_error_set(error, EINVAL,
1825 RTE_FLOW_ERROR_TYPE_ACTION,
1826 NULL, "no transport layer "
1833 * Validate the modify-header actions of increment/decrement
1834 * TCP Sequence-number.
1836 * @param[in] action_flags
1837 * Holds the actions detected until now.
1839 * Pointer to the modify action.
1840 * @param[in] item_flags
1841 * Holds the items detected.
1843 * Pointer to error structure.
1846 * 0 on success, a negative errno value otherwise and rte_errno is set.
1849 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
1850 const struct rte_flow_action *action,
1851 const uint64_t item_flags,
1852 struct rte_flow_error *error)
1856 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1858 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
1859 return rte_flow_error_set(error, EINVAL,
1860 RTE_FLOW_ERROR_TYPE_ACTION,
1861 NULL, "no TCP item in"
1863 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
1864 (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
1865 (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
1866 (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
1867 return rte_flow_error_set(error, EINVAL,
1868 RTE_FLOW_ERROR_TYPE_ACTION,
1870 "cannot decrease and increase"
1871 " TCP sequence number"
1872 " at the same time");
1878 * Validate the modify-header actions of increment/decrement
1879 * TCP Acknowledgment number.
1881 * @param[in] action_flags
1882 * Holds the actions detected until now.
1884 * Pointer to the modify action.
1885 * @param[in] item_flags
1886 * Holds the items detected.
1888 * Pointer to error structure.
1891 * 0 on success, a negative errno value otherwise and rte_errno is set.
1894 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
1895 const struct rte_flow_action *action,
1896 const uint64_t item_flags,
1897 struct rte_flow_error *error)
1901 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1903 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
1904 return rte_flow_error_set(error, EINVAL,
1905 RTE_FLOW_ERROR_TYPE_ACTION,
1906 NULL, "no TCP item in"
1908 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
1909 (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
1910 (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
1911 (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
1912 return rte_flow_error_set(error, EINVAL,
1913 RTE_FLOW_ERROR_TYPE_ACTION,
1915 "cannot decrease and increase"
1916 " TCP acknowledgment number"
1917 " at the same time");
1923 * Validate the modify-header TTL actions.
1925 * @param[in] action_flags
1926 * Holds the actions detected until now.
1928 * Pointer to the modify action.
1929 * @param[in] item_flags
1930 * Holds the items detected.
1932 * Pointer to error structure.
1935 * 0 on success, a negative errno value otherwise and rte_errno is set.
1938 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
1939 const struct rte_flow_action *action,
1940 const uint64_t item_flags,
1941 struct rte_flow_error *error)
1945 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1947 if (!(item_flags & MLX5_FLOW_LAYER_L3))
1948 return rte_flow_error_set(error, EINVAL,
1949 RTE_FLOW_ERROR_TYPE_ACTION,
1951 "no IP protocol in pattern");
1957 * Validate jump action.
1960 * Pointer to the jump action.
1961 * @param[in] action_flags
1962 * Holds the actions detected until now.
1964 * The group of the current flow.
1966 * Pointer to error structure.
1969 * 0 on success, a negative errno value otherwise and rte_errno is set.
1972 flow_dv_validate_action_jump(const struct rte_flow_action *action,
1973 uint64_t action_flags,
1975 struct rte_flow_error *error)
1977 if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
1978 MLX5_FLOW_FATE_ESWITCH_ACTIONS))
1979 return rte_flow_error_set(error, EINVAL,
1980 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1981 "can't have 2 fate actions in"
1984 return rte_flow_error_set(error, EINVAL,
1985 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1986 NULL, "action configuration not set");
1987 if (group >= ((const struct rte_flow_action_jump *)action->conf)->group)
1988 return rte_flow_error_set(error, EINVAL,
1989 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1990 "target group must be higher then"
1991 " the current flow group");
1996 * Validate the port_id action.
1999 * Pointer to rte_eth_dev structure.
2000 * @param[in] action_flags
2001 * Bit-fields that holds the actions detected until now.
2003 * Port_id RTE action structure.
2005 * Attributes of flow that includes this action.
2007 * Pointer to error structure.
2010 * 0 on success, a negative errno value otherwise and rte_errno is set.
2013 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
2014 uint64_t action_flags,
2015 const struct rte_flow_action *action,
2016 const struct rte_flow_attr *attr,
2017 struct rte_flow_error *error)
2019 const struct rte_flow_action_port_id *port_id;
2021 uint16_t esw_domain_id;
2022 uint16_t act_port_domain_id;
2025 if (!attr->transfer)
2026 return rte_flow_error_set(error, ENOTSUP,
2027 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2029 "port id action is valid in transfer"
2031 if (!action || !action->conf)
2032 return rte_flow_error_set(error, ENOTSUP,
2033 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2035 "port id action parameters must be"
2037 if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
2038 MLX5_FLOW_FATE_ESWITCH_ACTIONS))
2039 return rte_flow_error_set(error, EINVAL,
2040 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2041 "can have only one fate actions in"
2043 ret = mlx5_port_to_eswitch_info(dev->data->port_id,
2044 &esw_domain_id, NULL);
2046 return rte_flow_error_set(error, -ret,
2047 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2049 "failed to obtain E-Switch info");
2050 port_id = action->conf;
2051 port = port_id->original ? dev->data->port_id : port_id->id;
2052 ret = mlx5_port_to_eswitch_info(port, &act_port_domain_id, NULL);
2054 return rte_flow_error_set
2056 RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
2057 "failed to obtain E-Switch port id for port");
2058 if (act_port_domain_id != esw_domain_id)
2059 return rte_flow_error_set
2061 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2062 "port does not belong to"
2063 " E-Switch being configured");
2068 * Find existing modify-header resource or create and register a new one.
2070 * @param dev[in, out]
2071 * Pointer to rte_eth_dev structure.
2072 * @param[in, out] resource
2073 * Pointer to modify-header resource.
2074 * @parm[in, out] dev_flow
2075 * Pointer to the dev_flow.
2077 * pointer to error structure.
2080 * 0 on success otherwise -errno and errno is set.
2083 flow_dv_modify_hdr_resource_register
2084 (struct rte_eth_dev *dev,
2085 struct mlx5_flow_dv_modify_hdr_resource *resource,
2086 struct mlx5_flow *dev_flow,
2087 struct rte_flow_error *error)
2089 struct mlx5_priv *priv = dev->data->dev_private;
2090 struct mlx5_ibv_shared *sh = priv->sh;
2091 struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
2092 struct mlx5dv_dr_domain *ns;
2094 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2095 ns = sh->fdb_domain;
2096 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
2101 dev_flow->flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
2102 /* Lookup a matching resource from cache. */
2103 LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
2104 if (resource->ft_type == cache_resource->ft_type &&
2105 resource->actions_num == cache_resource->actions_num &&
2106 resource->flags == cache_resource->flags &&
2107 !memcmp((const void *)resource->actions,
2108 (const void *)cache_resource->actions,
2109 (resource->actions_num *
2110 sizeof(resource->actions[0])))) {
2111 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
2112 (void *)cache_resource,
2113 rte_atomic32_read(&cache_resource->refcnt));
2114 rte_atomic32_inc(&cache_resource->refcnt);
2115 dev_flow->dv.modify_hdr = cache_resource;
2119 /* Register new modify-header resource. */
2120 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
2121 if (!cache_resource)
2122 return rte_flow_error_set(error, ENOMEM,
2123 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2124 "cannot allocate resource memory");
2125 *cache_resource = *resource;
2126 cache_resource->verbs_action =
2127 mlx5_glue->dv_create_flow_action_modify_header
2128 (sh->ctx, cache_resource->ft_type,
2129 ns, cache_resource->flags,
2130 cache_resource->actions_num *
2131 sizeof(cache_resource->actions[0]),
2132 (uint64_t *)cache_resource->actions);
2133 if (!cache_resource->verbs_action) {
2134 rte_free(cache_resource);
2135 return rte_flow_error_set(error, ENOMEM,
2136 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2137 NULL, "cannot create action");
2139 rte_atomic32_init(&cache_resource->refcnt);
2140 rte_atomic32_inc(&cache_resource->refcnt);
2141 LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
2142 dev_flow->dv.modify_hdr = cache_resource;
2143 DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
2144 (void *)cache_resource,
2145 rte_atomic32_read(&cache_resource->refcnt));
2149 #define MLX5_CNT_CONTAINER_RESIZE 64
2152 * Get or create a flow counter.
2155 * Pointer to the Ethernet device structure.
2157 * Indicate if this counter is shared with other flows.
2159 * Counter identifier.
2162 * pointer to flow counter on success, NULL otherwise and rte_errno is set.
2164 static struct mlx5_flow_counter *
2165 flow_dv_counter_alloc_fallback(struct rte_eth_dev *dev, uint32_t shared,
2168 struct mlx5_priv *priv = dev->data->dev_private;
2169 struct mlx5_flow_counter *cnt = NULL;
2170 struct mlx5_devx_obj *dcs = NULL;
2172 if (!priv->config.devx) {
2173 rte_errno = ENOTSUP;
2177 TAILQ_FOREACH(cnt, &priv->sh->cmng.flow_counters, next) {
2178 if (cnt->shared && cnt->id == id) {
2184 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
2187 cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
2189 claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
2193 struct mlx5_flow_counter tmpl = {
2199 tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
2201 claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
2207 TAILQ_INSERT_HEAD(&priv->sh->cmng.flow_counters, cnt, next);
2212 * Release a flow counter.
2215 * Pointer to the Ethernet device structure.
2216 * @param[in] counter
2217 * Pointer to the counter handler.
2220 flow_dv_counter_release_fallback(struct rte_eth_dev *dev,
2221 struct mlx5_flow_counter *counter)
2223 struct mlx5_priv *priv = dev->data->dev_private;
2227 if (--counter->ref_cnt == 0) {
2228 TAILQ_REMOVE(&priv->sh->cmng.flow_counters, counter, next);
2229 claim_zero(mlx5_devx_cmd_destroy(counter->dcs));
2235 * Query a devx flow counter.
2238 * Pointer to the Ethernet device structure.
2240 * Pointer to the flow counter.
2242 * The statistics value of packets.
2244 * The statistics value of bytes.
2247 * 0 on success, otherwise a negative errno value and rte_errno is set.
2250 _flow_dv_query_count_fallback(struct rte_eth_dev *dev __rte_unused,
2251 struct mlx5_flow_counter *cnt, uint64_t *pkts,
2254 return mlx5_devx_cmd_flow_counter_query(cnt->dcs, 0, 0, pkts, bytes,
2259 * Get a pool by a counter.
2262 * Pointer to the counter.
2267 static struct mlx5_flow_counter_pool *
2268 flow_dv_counter_pool_get(struct mlx5_flow_counter *cnt)
2271 cnt -= cnt->dcs->id % MLX5_COUNTERS_PER_POOL;
2272 return (struct mlx5_flow_counter_pool *)cnt - 1;
2278 * Get a pool by devx counter ID.
2281 * Pointer to the counter container.
2283 * The counter devx ID.
2286 * The counter pool pointer if exists, NULL otherwise,
2288 static struct mlx5_flow_counter_pool *
2289 flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id)
2291 struct mlx5_flow_counter_pool *pool;
2293 TAILQ_FOREACH(pool, &cont->pool_list, next) {
2294 int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
2295 MLX5_COUNTERS_PER_POOL;
2297 if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
2304 * Allocate a new memory for the counter values wrapped by all the needed
2308 * Pointer to the Ethernet device structure.
2310 * The raw memory areas - each one for MLX5_COUNTERS_PER_POOL counters.
2313 * The new memory management pointer on success, otherwise NULL and rte_errno
2316 static struct mlx5_counter_stats_mem_mng *
2317 flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n)
2319 struct mlx5_ibv_shared *sh = ((struct mlx5_priv *)
2320 (dev->data->dev_private))->sh;
2321 struct mlx5_devx_mkey_attr mkey_attr;
2322 struct mlx5_counter_stats_mem_mng *mem_mng;
2323 volatile struct flow_counter_stats *raw_data;
2324 int size = (sizeof(struct flow_counter_stats) *
2325 MLX5_COUNTERS_PER_POOL +
2326 sizeof(struct mlx5_counter_stats_raw)) * raws_n +
2327 sizeof(struct mlx5_counter_stats_mem_mng);
2328 uint8_t *mem = rte_calloc(__func__, 1, size, sysconf(_SC_PAGESIZE));
2335 mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
2336 size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
2337 mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size,
2338 IBV_ACCESS_LOCAL_WRITE);
2339 if (!mem_mng->umem) {
2344 mkey_attr.addr = (uintptr_t)mem;
2345 mkey_attr.size = size;
2346 mkey_attr.umem_id = mem_mng->umem->umem_id;
2347 mkey_attr.pd = sh->pdn;
2348 mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
2350 mlx5_glue->devx_umem_dereg(mem_mng->umem);
2355 mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
2356 raw_data = (volatile struct flow_counter_stats *)mem;
2357 for (i = 0; i < raws_n; ++i) {
2358 mem_mng->raws[i].mem_mng = mem_mng;
2359 mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
2361 LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
2366 * Resize a counter container.
2369 * Pointer to the Ethernet device structure.
2371 * Whether the pool is for counter that was allocated by batch command.
2374 * The new container pointer on success, otherwise NULL and rte_errno is set.
2376 static struct mlx5_pools_container *
2377 flow_dv_container_resize(struct rte_eth_dev *dev, uint32_t batch)
2379 struct mlx5_priv *priv = dev->data->dev_private;
2380 struct mlx5_pools_container *cont =
2381 MLX5_CNT_CONTAINER(priv->sh, batch, 0);
2382 struct mlx5_pools_container *new_cont =
2383 MLX5_CNT_CONTAINER_UNUSED(priv->sh, batch, 0);
2384 struct mlx5_counter_stats_mem_mng *mem_mng;
2385 uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE;
2386 uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
2389 if (cont != MLX5_CNT_CONTAINER(priv->sh, batch, 1)) {
2390 /* The last resize still hasn't detected by the host thread. */
2394 new_cont->pools = rte_calloc(__func__, 1, mem_size, 0);
2395 if (!new_cont->pools) {
2400 memcpy(new_cont->pools, cont->pools, cont->n *
2401 sizeof(struct mlx5_flow_counter_pool *));
2402 mem_mng = flow_dv_create_counter_stat_mem_mng(dev,
2403 MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES);
2405 rte_free(new_cont->pools);
2408 for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
2409 LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws,
2410 mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE +
2412 new_cont->n = resize;
2413 rte_atomic16_set(&new_cont->n_valid, rte_atomic16_read(&cont->n_valid));
2414 TAILQ_INIT(&new_cont->pool_list);
2415 TAILQ_CONCAT(&new_cont->pool_list, &cont->pool_list, next);
2416 new_cont->init_mem_mng = mem_mng;
2418 /* Flip the master container. */
2419 priv->sh->cmng.mhi[batch] ^= (uint8_t)1;
2424 * Query a devx flow counter.
2427 * Pointer to the Ethernet device structure.
2429 * Pointer to the flow counter.
2431 * The statistics value of packets.
2433 * The statistics value of bytes.
2436 * 0 on success, otherwise a negative errno value and rte_errno is set.
2439 _flow_dv_query_count(struct rte_eth_dev *dev,
2440 struct mlx5_flow_counter *cnt, uint64_t *pkts,
2443 struct mlx5_priv *priv = dev->data->dev_private;
2444 struct mlx5_flow_counter_pool *pool =
2445 flow_dv_counter_pool_get(cnt);
2446 int offset = cnt - &pool->counters_raw[0];
2448 if (priv->counter_fallback)
2449 return _flow_dv_query_count_fallback(dev, cnt, pkts, bytes);
2451 rte_spinlock_lock(&pool->sl);
2453 * The single counters allocation may allocate smaller ID than the
2454 * current allocated in parallel to the host reading.
2455 * In this case the new counter values must be reported as 0.
2457 if (unlikely(!cnt->batch && cnt->dcs->id < pool->raw->min_dcs_id)) {
2461 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
2462 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
2464 rte_spinlock_unlock(&pool->sl);
2469 * Create and initialize a new counter pool.
2472 * Pointer to the Ethernet device structure.
2474 * The devX counter handle.
2476 * Whether the pool is for counter that was allocated by batch command.
2479 * A new pool pointer on success, NULL otherwise and rte_errno is set.
2481 static struct mlx5_flow_counter_pool *
2482 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
2485 struct mlx5_priv *priv = dev->data->dev_private;
2486 struct mlx5_flow_counter_pool *pool;
2487 struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
2489 int16_t n_valid = rte_atomic16_read(&cont->n_valid);
2492 if (cont->n == n_valid) {
2493 cont = flow_dv_container_resize(dev, batch);
2497 size = sizeof(*pool) + MLX5_COUNTERS_PER_POOL *
2498 sizeof(struct mlx5_flow_counter);
2499 pool = rte_calloc(__func__, 1, size, 0);
2504 pool->min_dcs = dcs;
2505 pool->raw = cont->init_mem_mng->raws + n_valid %
2506 MLX5_CNT_CONTAINER_RESIZE;
2507 pool->raw_hw = NULL;
2508 rte_spinlock_init(&pool->sl);
2510 * The generation of the new allocated counters in this pool is 0, 2 in
2511 * the pool generation makes all the counters valid for allocation.
2513 rte_atomic64_set(&pool->query_gen, 0x2);
2514 TAILQ_INIT(&pool->counters);
2515 TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
2516 cont->pools[n_valid] = pool;
2517 /* Pool initialization must be updated before host thread access. */
2519 rte_atomic16_add(&cont->n_valid, 1);
2524 * Prepare a new counter and/or a new counter pool.
2527 * Pointer to the Ethernet device structure.
2528 * @param[out] cnt_free
2529 * Where to put the pointer of a new counter.
2531 * Whether the pool is for counter that was allocated by batch command.
2534 * The free counter pool pointer and @p cnt_free is set on success,
2535 * NULL otherwise and rte_errno is set.
2537 static struct mlx5_flow_counter_pool *
2538 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
2539 struct mlx5_flow_counter **cnt_free,
2542 struct mlx5_priv *priv = dev->data->dev_private;
2543 struct mlx5_flow_counter_pool *pool;
2544 struct mlx5_devx_obj *dcs = NULL;
2545 struct mlx5_flow_counter *cnt;
2549 /* bulk_bitmap must be 0 for single counter allocation. */
2550 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
2553 pool = flow_dv_find_pool_by_id
2554 (MLX5_CNT_CONTAINER(priv->sh, batch, 0), dcs->id);
2556 pool = flow_dv_pool_create(dev, dcs, batch);
2558 mlx5_devx_cmd_destroy(dcs);
2561 } else if (dcs->id < pool->min_dcs->id) {
2562 rte_atomic64_set(&pool->a64_dcs,
2563 (int64_t)(uintptr_t)dcs);
2565 cnt = &pool->counters_raw[dcs->id % MLX5_COUNTERS_PER_POOL];
2566 TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
2571 /* bulk_bitmap is in 128 counters units. */
2572 if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4)
2573 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
2575 rte_errno = ENODATA;
2578 pool = flow_dv_pool_create(dev, dcs, batch);
2580 mlx5_devx_cmd_destroy(dcs);
2583 for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
2584 cnt = &pool->counters_raw[i];
2586 TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
2588 *cnt_free = &pool->counters_raw[0];
2593 * Search for existed shared counter.
2596 * Pointer to the relevant counter pool container.
2598 * The shared counter ID to search.
2601 * NULL if not existed, otherwise pointer to the shared counter.
2603 static struct mlx5_flow_counter *
2604 flow_dv_counter_shared_search(struct mlx5_pools_container *cont,
2607 static struct mlx5_flow_counter *cnt;
2608 struct mlx5_flow_counter_pool *pool;
2611 TAILQ_FOREACH(pool, &cont->pool_list, next) {
2612 for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
2613 cnt = &pool->counters_raw[i];
2614 if (cnt->ref_cnt && cnt->shared && cnt->id == id)
2622 * Allocate a flow counter.
2625 * Pointer to the Ethernet device structure.
2627 * Indicate if this counter is shared with other flows.
2629 * Counter identifier.
2631 * Counter flow group.
2634 * pointer to flow counter on success, NULL otherwise and rte_errno is set.
2636 static struct mlx5_flow_counter *
2637 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
2640 struct mlx5_priv *priv = dev->data->dev_private;
2641 struct mlx5_flow_counter_pool *pool = NULL;
2642 struct mlx5_flow_counter *cnt_free = NULL;
2644 * Currently group 0 flow counter cannot be assigned to a flow if it is
2645 * not the first one in the batch counter allocation, so it is better
2646 * to allocate counters one by one for these flows in a separate
2648 * A counter can be shared between different groups so need to take
2649 * shared counters from the single container.
2651 uint32_t batch = (group && !shared) ? 1 : 0;
2652 struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
2655 if (priv->counter_fallback)
2656 return flow_dv_counter_alloc_fallback(dev, shared, id);
2657 if (!priv->config.devx) {
2658 rte_errno = ENOTSUP;
2662 cnt_free = flow_dv_counter_shared_search(cont, id);
2664 if (cnt_free->ref_cnt + 1 == 0) {
2668 cnt_free->ref_cnt++;
2672 /* Pools which has a free counters are in the start. */
2673 TAILQ_FOREACH(pool, &cont->pool_list, next) {
2675 * The free counter reset values must be updated between the
2676 * counter release to the counter allocation, so, at least one
2677 * query must be done in this time. ensure it by saving the
2678 * query generation in the release time.
2679 * The free list is sorted according to the generation - so if
2680 * the first one is not updated, all the others are not
2683 cnt_free = TAILQ_FIRST(&pool->counters);
2684 if (cnt_free && cnt_free->query_gen + 1 <
2685 rte_atomic64_read(&pool->query_gen))
2690 pool = flow_dv_counter_pool_prepare(dev, &cnt_free, batch);
2694 cnt_free->batch = batch;
2695 /* Create a DV counter action only in the first time usage. */
2696 if (!cnt_free->action) {
2698 struct mlx5_devx_obj *dcs;
2701 offset = cnt_free - &pool->counters_raw[0];
2702 dcs = pool->min_dcs;
2705 dcs = cnt_free->dcs;
2707 cnt_free->action = mlx5_glue->dv_create_flow_action_counter
2709 if (!cnt_free->action) {
2714 /* Update the counter reset values. */
2715 if (_flow_dv_query_count(dev, cnt_free, &cnt_free->hits,
2718 cnt_free->shared = shared;
2719 cnt_free->ref_cnt = 1;
2721 if (!priv->sh->cmng.query_thread_on)
2722 /* Start the asynchronous batch query by the host thread. */
2723 mlx5_set_query_alarm(priv->sh);
2724 TAILQ_REMOVE(&pool->counters, cnt_free, next);
2725 if (TAILQ_EMPTY(&pool->counters)) {
2726 /* Move the pool to the end of the container pool list. */
2727 TAILQ_REMOVE(&cont->pool_list, pool, next);
2728 TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
2734 * Release a flow counter.
2737 * Pointer to the Ethernet device structure.
2738 * @param[in] counter
2739 * Pointer to the counter handler.
2742 flow_dv_counter_release(struct rte_eth_dev *dev,
2743 struct mlx5_flow_counter *counter)
2745 struct mlx5_priv *priv = dev->data->dev_private;
2749 if (priv->counter_fallback) {
2750 flow_dv_counter_release_fallback(dev, counter);
2753 if (--counter->ref_cnt == 0) {
2754 struct mlx5_flow_counter_pool *pool =
2755 flow_dv_counter_pool_get(counter);
2757 /* Put the counter in the end - the last updated one. */
2758 TAILQ_INSERT_TAIL(&pool->counters, counter, next);
2759 counter->query_gen = rte_atomic64_read(&pool->query_gen);
2764 * Verify the @p attributes will be correctly understood by the NIC and store
2765 * them in the @p flow if everything is correct.
2768 * Pointer to dev struct.
2769 * @param[in] attributes
2770 * Pointer to flow attributes
2772 * Pointer to error structure.
2775 * 0 on success, a negative errno value otherwise and rte_errno is set.
2778 flow_dv_validate_attributes(struct rte_eth_dev *dev,
2779 const struct rte_flow_attr *attributes,
2780 struct rte_flow_error *error)
2782 struct mlx5_priv *priv = dev->data->dev_private;
2783 uint32_t priority_max = priv->config.flow_prio - 1;
2785 #ifndef HAVE_MLX5DV_DR
2786 if (attributes->group)
2787 return rte_flow_error_set(error, ENOTSUP,
2788 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
2790 "groups is not supported");
2792 if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
2793 attributes->priority >= priority_max)
2794 return rte_flow_error_set(error, ENOTSUP,
2795 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2797 "priority out of range");
2798 if (attributes->transfer) {
2799 if (!priv->config.dv_esw_en)
2800 return rte_flow_error_set
2802 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2803 "E-Switch dr is not supported");
2804 if (!(priv->representor || priv->master))
2805 return rte_flow_error_set
2806 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2807 NULL, "E-Switch configuration can only be"
2808 " done by a master or a representor device");
2809 if (attributes->egress)
2810 return rte_flow_error_set
2812 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
2813 "egress is not supported");
2814 if (attributes->group >= MLX5_MAX_TABLES_FDB)
2815 return rte_flow_error_set
2817 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2818 NULL, "group must be smaller than "
2819 RTE_STR(MLX5_MAX_TABLES_FDB));
2821 if (!(attributes->egress ^ attributes->ingress))
2822 return rte_flow_error_set(error, ENOTSUP,
2823 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
2824 "must specify exactly one of "
2825 "ingress or egress");
2830 * Internal validation function. For validating both actions and items.
2833 * Pointer to the rte_eth_dev structure.
2835 * Pointer to the flow attributes.
2837 * Pointer to the list of items.
2838 * @param[in] actions
2839 * Pointer to the list of actions.
2841 * Pointer to the error structure.
2844 * 0 on success, a negative errno value otherwise and rte_errno is set.
2847 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
2848 const struct rte_flow_item items[],
2849 const struct rte_flow_action actions[],
2850 struct rte_flow_error *error)
2853 uint64_t action_flags = 0;
2854 uint64_t item_flags = 0;
2855 uint64_t last_item = 0;
2856 uint8_t next_protocol = 0xff;
2858 const struct rte_flow_item *gre_item = NULL;
2859 struct rte_flow_item_tcp nic_tcp_mask = {
2862 .src_port = RTE_BE16(UINT16_MAX),
2863 .dst_port = RTE_BE16(UINT16_MAX),
2869 ret = flow_dv_validate_attributes(dev, attr, error);
2872 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2873 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2874 switch (items->type) {
2875 case RTE_FLOW_ITEM_TYPE_VOID:
2877 case RTE_FLOW_ITEM_TYPE_PORT_ID:
2878 ret = flow_dv_validate_item_port_id
2879 (dev, items, attr, item_flags, error);
2882 last_item = MLX5_FLOW_ITEM_PORT_ID;
2884 case RTE_FLOW_ITEM_TYPE_ETH:
2885 ret = mlx5_flow_validate_item_eth(items, item_flags,
2889 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
2890 MLX5_FLOW_LAYER_OUTER_L2;
2892 case RTE_FLOW_ITEM_TYPE_VLAN:
2893 ret = mlx5_flow_validate_item_vlan(items, item_flags,
2897 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2898 MLX5_FLOW_LAYER_OUTER_VLAN;
2900 case RTE_FLOW_ITEM_TYPE_IPV4:
2901 mlx5_flow_tunnel_ip_check(items, next_protocol,
2902 &item_flags, &tunnel);
2903 ret = mlx5_flow_validate_item_ipv4(items, item_flags,
2907 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
2908 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
2909 if (items->mask != NULL &&
2910 ((const struct rte_flow_item_ipv4 *)
2911 items->mask)->hdr.next_proto_id) {
2913 ((const struct rte_flow_item_ipv4 *)
2914 (items->spec))->hdr.next_proto_id;
2916 ((const struct rte_flow_item_ipv4 *)
2917 (items->mask))->hdr.next_proto_id;
2919 /* Reset for inner layer. */
2920 next_protocol = 0xff;
2923 case RTE_FLOW_ITEM_TYPE_IPV6:
2924 mlx5_flow_tunnel_ip_check(items, next_protocol,
2925 &item_flags, &tunnel);
2926 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
2930 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
2931 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
2932 if (items->mask != NULL &&
2933 ((const struct rte_flow_item_ipv6 *)
2934 items->mask)->hdr.proto) {
2936 ((const struct rte_flow_item_ipv6 *)
2937 items->spec)->hdr.proto;
2939 ((const struct rte_flow_item_ipv6 *)
2940 items->mask)->hdr.proto;
2942 /* Reset for inner layer. */
2943 next_protocol = 0xff;
2946 case RTE_FLOW_ITEM_TYPE_TCP:
2947 ret = mlx5_flow_validate_item_tcp
2954 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
2955 MLX5_FLOW_LAYER_OUTER_L4_TCP;
2957 case RTE_FLOW_ITEM_TYPE_UDP:
2958 ret = mlx5_flow_validate_item_udp(items, item_flags,
2963 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
2964 MLX5_FLOW_LAYER_OUTER_L4_UDP;
2966 case RTE_FLOW_ITEM_TYPE_GRE:
2967 ret = mlx5_flow_validate_item_gre(items, item_flags,
2968 next_protocol, error);
2972 last_item = MLX5_FLOW_LAYER_GRE;
2974 case RTE_FLOW_ITEM_TYPE_NVGRE:
2975 ret = mlx5_flow_validate_item_nvgre(items, item_flags,
2980 last_item = MLX5_FLOW_LAYER_NVGRE;
2982 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
2983 ret = mlx5_flow_validate_item_gre_key
2984 (items, item_flags, gre_item, error);
2987 last_item = MLX5_FLOW_LAYER_GRE_KEY;
2989 case RTE_FLOW_ITEM_TYPE_VXLAN:
2990 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
2994 last_item = MLX5_FLOW_LAYER_VXLAN;
2996 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
2997 ret = mlx5_flow_validate_item_vxlan_gpe(items,
3002 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
3004 case RTE_FLOW_ITEM_TYPE_MPLS:
3005 ret = mlx5_flow_validate_item_mpls(dev, items,
3010 last_item = MLX5_FLOW_LAYER_MPLS;
3012 case RTE_FLOW_ITEM_TYPE_META:
3013 ret = flow_dv_validate_item_meta(dev, items, attr,
3017 last_item = MLX5_FLOW_ITEM_METADATA;
3019 case RTE_FLOW_ITEM_TYPE_ICMP:
3020 ret = mlx5_flow_validate_item_icmp(items, item_flags,
3025 last_item = MLX5_FLOW_LAYER_ICMP;
3027 case RTE_FLOW_ITEM_TYPE_ICMP6:
3028 ret = mlx5_flow_validate_item_icmp6(items, item_flags,
3033 last_item = MLX5_FLOW_LAYER_ICMP6;
3036 return rte_flow_error_set(error, ENOTSUP,
3037 RTE_FLOW_ERROR_TYPE_ITEM,
3038 NULL, "item not supported");
3040 item_flags |= last_item;
3042 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3043 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
3044 return rte_flow_error_set(error, ENOTSUP,
3045 RTE_FLOW_ERROR_TYPE_ACTION,
3046 actions, "too many actions");
3047 switch (actions->type) {
3048 case RTE_FLOW_ACTION_TYPE_VOID:
3050 case RTE_FLOW_ACTION_TYPE_PORT_ID:
3051 ret = flow_dv_validate_action_port_id(dev,
3058 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
3061 case RTE_FLOW_ACTION_TYPE_FLAG:
3062 ret = mlx5_flow_validate_action_flag(action_flags,
3066 action_flags |= MLX5_FLOW_ACTION_FLAG;
3069 case RTE_FLOW_ACTION_TYPE_MARK:
3070 ret = mlx5_flow_validate_action_mark(actions,
3075 action_flags |= MLX5_FLOW_ACTION_MARK;
3078 case RTE_FLOW_ACTION_TYPE_DROP:
3079 ret = mlx5_flow_validate_action_drop(action_flags,
3083 action_flags |= MLX5_FLOW_ACTION_DROP;
3086 case RTE_FLOW_ACTION_TYPE_QUEUE:
3087 ret = mlx5_flow_validate_action_queue(actions,
3092 action_flags |= MLX5_FLOW_ACTION_QUEUE;
3095 case RTE_FLOW_ACTION_TYPE_RSS:
3096 ret = mlx5_flow_validate_action_rss(actions,
3102 action_flags |= MLX5_FLOW_ACTION_RSS;
3105 case RTE_FLOW_ACTION_TYPE_COUNT:
3106 ret = flow_dv_validate_action_count(dev, error);
3109 action_flags |= MLX5_FLOW_ACTION_COUNT;
3112 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3113 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3114 ret = flow_dv_validate_action_l2_encap(action_flags,
3119 action_flags |= actions->type ==
3120 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
3121 MLX5_FLOW_ACTION_VXLAN_ENCAP :
3122 MLX5_FLOW_ACTION_NVGRE_ENCAP;
3125 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3126 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
3127 ret = flow_dv_validate_action_l2_decap(action_flags,
3131 action_flags |= actions->type ==
3132 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
3133 MLX5_FLOW_ACTION_VXLAN_DECAP :
3134 MLX5_FLOW_ACTION_NVGRE_DECAP;
3137 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3138 ret = flow_dv_validate_action_raw_encap(action_flags,
3143 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
3146 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3147 ret = flow_dv_validate_action_raw_decap(action_flags,
3152 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
3155 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
3156 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
3157 ret = flow_dv_validate_action_modify_mac(action_flags,
3163 /* Count all modify-header actions as one action. */
3164 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3166 action_flags |= actions->type ==
3167 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
3168 MLX5_FLOW_ACTION_SET_MAC_SRC :
3169 MLX5_FLOW_ACTION_SET_MAC_DST;
3172 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
3173 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
3174 ret = flow_dv_validate_action_modify_ipv4(action_flags,
3180 /* Count all modify-header actions as one action. */
3181 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3183 action_flags |= actions->type ==
3184 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
3185 MLX5_FLOW_ACTION_SET_IPV4_SRC :
3186 MLX5_FLOW_ACTION_SET_IPV4_DST;
3188 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
3189 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
3190 ret = flow_dv_validate_action_modify_ipv6(action_flags,
3196 /* Count all modify-header actions as one action. */
3197 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3199 action_flags |= actions->type ==
3200 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
3201 MLX5_FLOW_ACTION_SET_IPV6_SRC :
3202 MLX5_FLOW_ACTION_SET_IPV6_DST;
3204 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
3205 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
3206 ret = flow_dv_validate_action_modify_tp(action_flags,
3212 /* Count all modify-header actions as one action. */
3213 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3215 action_flags |= actions->type ==
3216 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
3217 MLX5_FLOW_ACTION_SET_TP_SRC :
3218 MLX5_FLOW_ACTION_SET_TP_DST;
3220 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
3221 case RTE_FLOW_ACTION_TYPE_SET_TTL:
3222 ret = flow_dv_validate_action_modify_ttl(action_flags,
3228 /* Count all modify-header actions as one action. */
3229 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3231 action_flags |= actions->type ==
3232 RTE_FLOW_ACTION_TYPE_SET_TTL ?
3233 MLX5_FLOW_ACTION_SET_TTL :
3234 MLX5_FLOW_ACTION_DEC_TTL;
3236 case RTE_FLOW_ACTION_TYPE_JUMP:
3237 ret = flow_dv_validate_action_jump(actions,
3239 attr->group, error);
3243 action_flags |= MLX5_FLOW_ACTION_JUMP;
3245 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
3246 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
3247 ret = flow_dv_validate_action_modify_tcp_seq
3254 /* Count all modify-header actions as one action. */
3255 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3257 action_flags |= actions->type ==
3258 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
3259 MLX5_FLOW_ACTION_INC_TCP_SEQ :
3260 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
3262 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
3263 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
3264 ret = flow_dv_validate_action_modify_tcp_ack
3271 /* Count all modify-header actions as one action. */
3272 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3274 action_flags |= actions->type ==
3275 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
3276 MLX5_FLOW_ACTION_INC_TCP_ACK :
3277 MLX5_FLOW_ACTION_DEC_TCP_ACK;
3280 return rte_flow_error_set(error, ENOTSUP,
3281 RTE_FLOW_ERROR_TYPE_ACTION,
3283 "action not supported");
3286 /* Eswitch has few restrictions on using items and actions */
3287 if (attr->transfer) {
3288 if (action_flags & MLX5_FLOW_ACTION_FLAG)
3289 return rte_flow_error_set(error, ENOTSUP,
3290 RTE_FLOW_ERROR_TYPE_ACTION,
3292 "unsupported action FLAG");
3293 if (action_flags & MLX5_FLOW_ACTION_MARK)
3294 return rte_flow_error_set(error, ENOTSUP,
3295 RTE_FLOW_ERROR_TYPE_ACTION,
3297 "unsupported action MARK");
3298 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
3299 return rte_flow_error_set(error, ENOTSUP,
3300 RTE_FLOW_ERROR_TYPE_ACTION,
3302 "unsupported action QUEUE");
3303 if (action_flags & MLX5_FLOW_ACTION_RSS)
3304 return rte_flow_error_set(error, ENOTSUP,
3305 RTE_FLOW_ERROR_TYPE_ACTION,
3307 "unsupported action RSS");
3308 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3309 return rte_flow_error_set(error, EINVAL,
3310 RTE_FLOW_ERROR_TYPE_ACTION,
3312 "no fate action is found");
3314 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
3315 return rte_flow_error_set(error, EINVAL,
3316 RTE_FLOW_ERROR_TYPE_ACTION,
3318 "no fate action is found");
3324 * Internal preparation function. Allocates the DV flow size,
3325 * this size is constant.
3328 * Pointer to the flow attributes.
3330 * Pointer to the list of items.
3331 * @param[in] actions
3332 * Pointer to the list of actions.
3334 * Pointer to the error structure.
3337 * Pointer to mlx5_flow object on success,
3338 * otherwise NULL and rte_errno is set.
3340 static struct mlx5_flow *
3341 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
3342 const struct rte_flow_item items[] __rte_unused,
3343 const struct rte_flow_action actions[] __rte_unused,
3344 struct rte_flow_error *error)
3346 uint32_t size = sizeof(struct mlx5_flow);
3347 struct mlx5_flow *flow;
3349 flow = rte_calloc(__func__, 1, size, 0);
3351 rte_flow_error_set(error, ENOMEM,
3352 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3353 "not enough memory to create flow");
3356 flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
3362 * Sanity check for match mask and value. Similar to check_valid_spec() in
3363 * kernel driver. If unmasked bit is present in value, it returns failure.
3366 * pointer to match mask buffer.
3367 * @param match_value
3368 * pointer to match value buffer.
3371 * 0 if valid, -EINVAL otherwise.
3374 flow_dv_check_valid_spec(void *match_mask, void *match_value)
3376 uint8_t *m = match_mask;
3377 uint8_t *v = match_value;
3380 for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
3383 "match_value differs from match_criteria"
3384 " %p[%u] != %p[%u]",
3385 match_value, i, match_mask, i);
3394 * Add Ethernet item to matcher and to the value.
3396 * @param[in, out] matcher
3398 * @param[in, out] key
3399 * Flow matcher value.
3401 * Flow pattern to translate.
3403 * Item is inner pattern.
3406 flow_dv_translate_item_eth(void *matcher, void *key,
3407 const struct rte_flow_item *item, int inner)
3409 const struct rte_flow_item_eth *eth_m = item->mask;
3410 const struct rte_flow_item_eth *eth_v = item->spec;
3411 const struct rte_flow_item_eth nic_mask = {
3412 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
3413 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
3414 .type = RTE_BE16(0xffff),
3426 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3428 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3430 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3432 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3434 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
3435 ð_m->dst, sizeof(eth_m->dst));
3436 /* The value must be in the range of the mask. */
3437 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
3438 for (i = 0; i < sizeof(eth_m->dst); ++i)
3439 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
3440 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
3441 ð_m->src, sizeof(eth_m->src));
3442 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
3443 /* The value must be in the range of the mask. */
3444 for (i = 0; i < sizeof(eth_m->dst); ++i)
3445 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
3446 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
3447 rte_be_to_cpu_16(eth_m->type));
3448 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
3449 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
3453 * Add VLAN item to matcher and to the value.
3455 * @param[in, out] dev_flow
3457 * @param[in, out] matcher
3459 * @param[in, out] key
3460 * Flow matcher value.
3462 * Flow pattern to translate.
3464 * Item is inner pattern.
3467 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
3468 void *matcher, void *key,
3469 const struct rte_flow_item *item,
3472 const struct rte_flow_item_vlan *vlan_m = item->mask;
3473 const struct rte_flow_item_vlan *vlan_v = item->spec;
3482 vlan_m = &rte_flow_item_vlan_mask;
3484 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3486 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3488 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3490 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3492 * This is workaround, masks are not supported,
3493 * and pre-validated.
3495 dev_flow->dv.vf_vlan.tag =
3496 rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
3498 tci_m = rte_be_to_cpu_16(vlan_m->tci);
3499 tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
3500 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
3501 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
3502 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
3503 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
3504 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
3505 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
3506 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
3507 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
3508 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
3509 rte_be_to_cpu_16(vlan_m->inner_type));
3510 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
3511 rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
3515 * Add IPV4 item to matcher and to the value.
3517 * @param[in, out] matcher
3519 * @param[in, out] key
3520 * Flow matcher value.
3522 * Flow pattern to translate.
3524 * Item is inner pattern.
3526 * The group to insert the rule.
3529 flow_dv_translate_item_ipv4(void *matcher, void *key,
3530 const struct rte_flow_item *item,
3531 int inner, uint32_t group)
3533 const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
3534 const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
3535 const struct rte_flow_item_ipv4 nic_mask = {
3537 .src_addr = RTE_BE32(0xffffffff),
3538 .dst_addr = RTE_BE32(0xffffffff),
3539 .type_of_service = 0xff,
3540 .next_proto_id = 0xff,
3550 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3552 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3554 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3556 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3559 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
3561 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
3562 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
3567 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
3568 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
3569 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
3570 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
3571 *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
3572 *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
3573 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
3574 src_ipv4_src_ipv6.ipv4_layout.ipv4);
3575 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
3576 src_ipv4_src_ipv6.ipv4_layout.ipv4);
3577 *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
3578 *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
3579 tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
3580 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
3581 ipv4_m->hdr.type_of_service);
3582 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
3583 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
3584 ipv4_m->hdr.type_of_service >> 2);
3585 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
3586 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
3587 ipv4_m->hdr.next_proto_id);
3588 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
3589 ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
3593 * Add IPV6 item to matcher and to the value.
3595 * @param[in, out] matcher
3597 * @param[in, out] key
3598 * Flow matcher value.
3600 * Flow pattern to translate.
3602 * Item is inner pattern.
3604 * The group to insert the rule.
3607 flow_dv_translate_item_ipv6(void *matcher, void *key,
3608 const struct rte_flow_item *item,
3609 int inner, uint32_t group)
3611 const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
3612 const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
3613 const struct rte_flow_item_ipv6 nic_mask = {
3616 "\xff\xff\xff\xff\xff\xff\xff\xff"
3617 "\xff\xff\xff\xff\xff\xff\xff\xff",
3619 "\xff\xff\xff\xff\xff\xff\xff\xff"
3620 "\xff\xff\xff\xff\xff\xff\xff\xff",
3621 .vtc_flow = RTE_BE32(0xffffffff),
3628 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3629 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3638 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3640 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3642 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3644 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3647 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
3649 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
3650 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
3655 size = sizeof(ipv6_m->hdr.dst_addr);
3656 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
3657 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
3658 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
3659 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
3660 memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
3661 for (i = 0; i < size; ++i)
3662 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
3663 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
3664 src_ipv4_src_ipv6.ipv6_layout.ipv6);
3665 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
3666 src_ipv4_src_ipv6.ipv6_layout.ipv6);
3667 memcpy(l24_m, ipv6_m->hdr.src_addr, size);
3668 for (i = 0; i < size; ++i)
3669 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
3671 vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
3672 vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
3673 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
3674 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
3675 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
3676 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
3679 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
3681 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
3684 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
3686 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
3690 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
3692 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
3693 ipv6_v->hdr.proto & ipv6_m->hdr.proto);
3697 * Add TCP item to matcher and to the value.
3699 * @param[in, out] matcher
3701 * @param[in, out] key
3702 * Flow matcher value.
3704 * Flow pattern to translate.
3706 * Item is inner pattern.
3709 flow_dv_translate_item_tcp(void *matcher, void *key,
3710 const struct rte_flow_item *item,
3713 const struct rte_flow_item_tcp *tcp_m = item->mask;
3714 const struct rte_flow_item_tcp *tcp_v = item->spec;
3719 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3721 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3723 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3725 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3727 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3728 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
3732 tcp_m = &rte_flow_item_tcp_mask;
3733 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
3734 rte_be_to_cpu_16(tcp_m->hdr.src_port));
3735 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
3736 rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
3737 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
3738 rte_be_to_cpu_16(tcp_m->hdr.dst_port));
3739 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
3740 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
3741 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
3742 tcp_m->hdr.tcp_flags);
3743 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
3744 (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
3748 * Add UDP item to matcher and to the value.
3750 * @param[in, out] matcher
3752 * @param[in, out] key
3753 * Flow matcher value.
3755 * Flow pattern to translate.
3757 * Item is inner pattern.
3760 flow_dv_translate_item_udp(void *matcher, void *key,
3761 const struct rte_flow_item *item,
3764 const struct rte_flow_item_udp *udp_m = item->mask;
3765 const struct rte_flow_item_udp *udp_v = item->spec;
3770 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3772 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3774 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3776 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3778 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3779 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
3783 udp_m = &rte_flow_item_udp_mask;
3784 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
3785 rte_be_to_cpu_16(udp_m->hdr.src_port));
3786 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
3787 rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
3788 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
3789 rte_be_to_cpu_16(udp_m->hdr.dst_port));
3790 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
3791 rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
3795 * Add GRE optional Key item to matcher and to the value.
3797 * @param[in, out] matcher
3799 * @param[in, out] key
3800 * Flow matcher value.
3802 * Flow pattern to translate.
3804 * Item is inner pattern.
3807 flow_dv_translate_item_gre_key(void *matcher, void *key,
3808 const struct rte_flow_item *item)
3810 const rte_be32_t *key_m = item->mask;
3811 const rte_be32_t *key_v = item->spec;
3812 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3813 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3814 rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
3819 key_m = &gre_key_default_mask;
3820 /* GRE K bit must be on and should already be validated */
3821 MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
3822 MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
3823 MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
3824 rte_be_to_cpu_32(*key_m) >> 8);
3825 MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
3826 rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
3827 MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
3828 rte_be_to_cpu_32(*key_m) & 0xFF);
3829 MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
3830 rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
3834 * Add GRE item to matcher and to the value.
3836 * @param[in, out] matcher
3838 * @param[in, out] key
3839 * Flow matcher value.
3841 * Flow pattern to translate.
3843 * Item is inner pattern.
3846 flow_dv_translate_item_gre(void *matcher, void *key,
3847 const struct rte_flow_item *item,
3850 const struct rte_flow_item_gre *gre_m = item->mask;
3851 const struct rte_flow_item_gre *gre_v = item->spec;
3854 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3855 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3862 uint16_t s_present:1;
3863 uint16_t k_present:1;
3864 uint16_t rsvd_bit1:1;
3865 uint16_t c_present:1;
3869 } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
3872 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3874 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3876 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3878 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3880 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3881 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
3885 gre_m = &rte_flow_item_gre_mask;
3886 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
3887 rte_be_to_cpu_16(gre_m->protocol));
3888 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
3889 rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
3890 gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
3891 gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
3892 MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
3893 gre_crks_rsvd0_ver_m.c_present);
3894 MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
3895 gre_crks_rsvd0_ver_v.c_present &
3896 gre_crks_rsvd0_ver_m.c_present);
3897 MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
3898 gre_crks_rsvd0_ver_m.k_present);
3899 MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
3900 gre_crks_rsvd0_ver_v.k_present &
3901 gre_crks_rsvd0_ver_m.k_present);
3902 MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
3903 gre_crks_rsvd0_ver_m.s_present);
3904 MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
3905 gre_crks_rsvd0_ver_v.s_present &
3906 gre_crks_rsvd0_ver_m.s_present);
3910 * Add NVGRE item to matcher and to the value.
3912 * @param[in, out] matcher
3914 * @param[in, out] key
3915 * Flow matcher value.
3917 * Flow pattern to translate.
3919 * Item is inner pattern.
3922 flow_dv_translate_item_nvgre(void *matcher, void *key,
3923 const struct rte_flow_item *item,
3926 const struct rte_flow_item_nvgre *nvgre_m = item->mask;
3927 const struct rte_flow_item_nvgre *nvgre_v = item->spec;
3928 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3929 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3930 const char *tni_flow_id_m = (const char *)nvgre_m->tni;
3931 const char *tni_flow_id_v = (const char *)nvgre_v->tni;
3937 /* For NVGRE, GRE header fields must be set with defined values. */
3938 const struct rte_flow_item_gre gre_spec = {
3939 .c_rsvd0_ver = RTE_BE16(0x2000),
3940 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
3942 const struct rte_flow_item_gre gre_mask = {
3943 .c_rsvd0_ver = RTE_BE16(0xB000),
3944 .protocol = RTE_BE16(UINT16_MAX),
3946 const struct rte_flow_item gre_item = {
3951 flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
3955 nvgre_m = &rte_flow_item_nvgre_mask;
3956 size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
3957 gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
3958 gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
3959 memcpy(gre_key_m, tni_flow_id_m, size);
3960 for (i = 0; i < size; ++i)
3961 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
3965 * Add VXLAN item to matcher and to the value.
3967 * @param[in, out] matcher
3969 * @param[in, out] key
3970 * Flow matcher value.
3972 * Flow pattern to translate.
3974 * Item is inner pattern.
3977 flow_dv_translate_item_vxlan(void *matcher, void *key,
3978 const struct rte_flow_item *item,
3981 const struct rte_flow_item_vxlan *vxlan_m = item->mask;
3982 const struct rte_flow_item_vxlan *vxlan_v = item->spec;
3985 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3986 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3994 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3996 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3998 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4000 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4002 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
4003 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
4004 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
4005 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
4006 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
4011 vxlan_m = &rte_flow_item_vxlan_mask;
4012 size = sizeof(vxlan_m->vni);
4013 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
4014 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
4015 memcpy(vni_m, vxlan_m->vni, size);
4016 for (i = 0; i < size; ++i)
4017 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
4021 * Add MPLS item to matcher and to the value.
4023 * @param[in, out] matcher
4025 * @param[in, out] key
4026 * Flow matcher value.
4028 * Flow pattern to translate.
4029 * @param[in] prev_layer
4030 * The protocol layer indicated in previous item.
4032 * Item is inner pattern.
4035 flow_dv_translate_item_mpls(void *matcher, void *key,
4036 const struct rte_flow_item *item,
4037 uint64_t prev_layer,
4040 const uint32_t *in_mpls_m = item->mask;
4041 const uint32_t *in_mpls_v = item->spec;
4042 uint32_t *out_mpls_m = 0;
4043 uint32_t *out_mpls_v = 0;
4044 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4045 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4046 void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
4048 void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
4049 void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
4050 void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4052 switch (prev_layer) {
4053 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
4054 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
4055 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
4056 MLX5_UDP_PORT_MPLS);
4058 case MLX5_FLOW_LAYER_GRE:
4059 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
4060 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
4061 RTE_ETHER_TYPE_MPLS);
4064 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
4065 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
4072 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
4073 switch (prev_layer) {
4074 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
4076 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
4077 outer_first_mpls_over_udp);
4079 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
4080 outer_first_mpls_over_udp);
4082 case MLX5_FLOW_LAYER_GRE:
4084 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
4085 outer_first_mpls_over_gre);
4087 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
4088 outer_first_mpls_over_gre);
4091 /* Inner MPLS not over GRE is not supported. */
4094 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
4098 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
4104 if (out_mpls_m && out_mpls_v) {
4105 *out_mpls_m = *in_mpls_m;
4106 *out_mpls_v = *in_mpls_v & *in_mpls_m;
4111 * Add META item to matcher
4113 * @param[in, out] matcher
4115 * @param[in, out] key
4116 * Flow matcher value.
4118 * Flow pattern to translate.
4120 * Item is inner pattern.
4123 flow_dv_translate_item_meta(void *matcher, void *key,
4124 const struct rte_flow_item *item)
4126 const struct rte_flow_item_meta *meta_m;
4127 const struct rte_flow_item_meta *meta_v;
4129 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
4131 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
4133 meta_m = (const void *)item->mask;
4135 meta_m = &rte_flow_item_meta_mask;
4136 meta_v = (const void *)item->spec;
4138 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
4139 rte_be_to_cpu_32(meta_m->data));
4140 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
4141 rte_be_to_cpu_32(meta_v->data & meta_m->data));
4146 * Add source vport match to the specified matcher.
4148 * @param[in, out] matcher
4150 * @param[in, out] key
4151 * Flow matcher value.
4153 * Source vport value to match
4158 flow_dv_translate_item_source_vport(void *matcher, void *key,
4159 int16_t port, uint16_t mask)
4161 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4162 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4164 MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
4165 MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
4169 * Translate port-id item to eswitch match on port-id.
4172 * The devich to configure through.
4173 * @param[in, out] matcher
4175 * @param[in, out] key
4176 * Flow matcher value.
4178 * Flow pattern to translate.
4181 * 0 on success, a negative errno value otherwise.
4184 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
4185 void *key, const struct rte_flow_item *item)
4187 const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
4188 const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
4189 uint16_t mask, val, id;
4192 mask = pid_m ? pid_m->id : 0xffff;
4193 id = pid_v ? pid_v->id : dev->data->port_id;
4194 ret = mlx5_port_to_eswitch_info(id, NULL, &val);
4197 flow_dv_translate_item_source_vport(matcher, key, val, mask);
4202 * Add ICMP6 item to matcher and to the value.
4204 * @param[in, out] matcher
4206 * @param[in, out] key
4207 * Flow matcher value.
4209 * Flow pattern to translate.
4211 * Item is inner pattern.
4214 flow_dv_translate_item_icmp6(void *matcher, void *key,
4215 const struct rte_flow_item *item,
4218 const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
4219 const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
4222 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
4224 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
4226 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4228 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4230 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4232 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4234 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
4235 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
4239 icmp6_m = &rte_flow_item_icmp6_mask;
4240 MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
4241 MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
4242 icmp6_v->type & icmp6_m->type);
4243 MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
4244 MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
4245 icmp6_v->code & icmp6_m->code);
4249 * Add ICMP item to matcher and to the value.
4251 * @param[in, out] matcher
4253 * @param[in, out] key
4254 * Flow matcher value.
4256 * Flow pattern to translate.
4258 * Item is inner pattern.
4261 flow_dv_translate_item_icmp(void *matcher, void *key,
4262 const struct rte_flow_item *item,
4265 const struct rte_flow_item_icmp *icmp_m = item->mask;
4266 const struct rte_flow_item_icmp *icmp_v = item->spec;
4269 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
4271 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
4273 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4275 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4277 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4279 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4281 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
4282 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
4286 icmp_m = &rte_flow_item_icmp_mask;
4287 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
4288 icmp_m->hdr.icmp_type);
4289 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
4290 icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
4291 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
4292 icmp_m->hdr.icmp_code);
4293 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
4294 icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
4297 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
4299 #define HEADER_IS_ZERO(match_criteria, headers) \
4300 !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
4301 matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
4304 * Calculate flow matcher enable bitmap.
4306 * @param match_criteria
4307 * Pointer to flow matcher criteria.
4310 * Bitmap of enabled fields.
4313 flow_dv_matcher_enable(uint32_t *match_criteria)
4315 uint8_t match_criteria_enable;
4317 match_criteria_enable =
4318 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
4319 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
4320 match_criteria_enable |=
4321 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
4322 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
4323 match_criteria_enable |=
4324 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
4325 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
4326 match_criteria_enable |=
4327 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
4328 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
4329 match_criteria_enable |=
4330 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
4331 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
4332 return match_criteria_enable;
4339 * @param dev[in, out]
4340 * Pointer to rte_eth_dev structure.
4341 * @param[in] table_id
4344 * Direction of the table.
4345 * @param[in] transfer
4346 * E-Switch or NIC flow.
4348 * pointer to error structure.
4351 * Returns tables resource based on the index, NULL in case of failed.
4353 static struct mlx5_flow_tbl_resource *
4354 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
4355 uint32_t table_id, uint8_t egress,
4357 struct rte_flow_error *error)
4359 struct mlx5_priv *priv = dev->data->dev_private;
4360 struct mlx5_ibv_shared *sh = priv->sh;
4361 struct mlx5_flow_tbl_resource *tbl;
4363 #ifdef HAVE_MLX5DV_DR
4365 tbl = &sh->fdb_tbl[table_id];
4367 tbl->obj = mlx5_glue->dr_create_flow_tbl
4368 (sh->fdb_domain, table_id);
4369 } else if (egress) {
4370 tbl = &sh->tx_tbl[table_id];
4372 tbl->obj = mlx5_glue->dr_create_flow_tbl
4373 (sh->tx_domain, table_id);
4375 tbl = &sh->rx_tbl[table_id];
4377 tbl->obj = mlx5_glue->dr_create_flow_tbl
4378 (sh->rx_domain, table_id);
4381 rte_flow_error_set(error, ENOMEM,
4382 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4383 NULL, "cannot create table");
4386 rte_atomic32_inc(&tbl->refcnt);
4392 return &sh->fdb_tbl[table_id];
4394 return &sh->tx_tbl[table_id];
4396 return &sh->rx_tbl[table_id];
4401 * Release a flow table.
4404 * Table resource to be released.
4407 * Returns 0 if table was released, else return 1;
4410 flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
4414 if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
4415 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
4423 * Register the flow matcher.
4425 * @param dev[in, out]
4426 * Pointer to rte_eth_dev structure.
4427 * @param[in, out] matcher
4428 * Pointer to flow matcher.
4429 * @parm[in, out] dev_flow
4430 * Pointer to the dev_flow.
4432 * pointer to error structure.
4435 * 0 on success otherwise -errno and errno is set.
4438 flow_dv_matcher_register(struct rte_eth_dev *dev,
4439 struct mlx5_flow_dv_matcher *matcher,
4440 struct mlx5_flow *dev_flow,
4441 struct rte_flow_error *error)
4443 struct mlx5_priv *priv = dev->data->dev_private;
4444 struct mlx5_ibv_shared *sh = priv->sh;
4445 struct mlx5_flow_dv_matcher *cache_matcher;
4446 struct mlx5dv_flow_matcher_attr dv_attr = {
4447 .type = IBV_FLOW_ATTR_NORMAL,
4448 .match_mask = (void *)&matcher->mask,
4450 struct mlx5_flow_tbl_resource *tbl = NULL;
4452 /* Lookup from cache. */
4453 LIST_FOREACH(cache_matcher, &sh->matchers, next) {
4454 if (matcher->crc == cache_matcher->crc &&
4455 matcher->priority == cache_matcher->priority &&
4456 matcher->egress == cache_matcher->egress &&
4457 matcher->group == cache_matcher->group &&
4458 matcher->transfer == cache_matcher->transfer &&
4459 !memcmp((const void *)matcher->mask.buf,
4460 (const void *)cache_matcher->mask.buf,
4461 cache_matcher->mask.size)) {
4463 "priority %hd use %s matcher %p: refcnt %d++",
4464 cache_matcher->priority,
4465 cache_matcher->egress ? "tx" : "rx",
4466 (void *)cache_matcher,
4467 rte_atomic32_read(&cache_matcher->refcnt));
4468 rte_atomic32_inc(&cache_matcher->refcnt);
4469 dev_flow->dv.matcher = cache_matcher;
4473 /* Register new matcher. */
4474 cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
4476 return rte_flow_error_set(error, ENOMEM,
4477 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4478 "cannot allocate matcher memory");
4479 tbl = flow_dv_tbl_resource_get(dev, matcher->group * MLX5_GROUP_FACTOR,
4480 matcher->egress, matcher->transfer,
4483 rte_free(cache_matcher);
4484 return rte_flow_error_set(error, ENOMEM,
4485 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4486 NULL, "cannot create table");
4488 *cache_matcher = *matcher;
4489 dv_attr.match_criteria_enable =
4490 flow_dv_matcher_enable(cache_matcher->mask.buf);
4491 dv_attr.priority = matcher->priority;
4492 if (matcher->egress)
4493 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
4494 cache_matcher->matcher_object =
4495 mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
4496 if (!cache_matcher->matcher_object) {
4497 rte_free(cache_matcher);
4498 #ifdef HAVE_MLX5DV_DR
4499 flow_dv_tbl_resource_release(tbl);
4501 return rte_flow_error_set(error, ENOMEM,
4502 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4503 NULL, "cannot create matcher");
4505 rte_atomic32_inc(&cache_matcher->refcnt);
4506 LIST_INSERT_HEAD(&sh->matchers, cache_matcher, next);
4507 dev_flow->dv.matcher = cache_matcher;
4508 DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
4509 cache_matcher->priority,
4510 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
4511 rte_atomic32_read(&cache_matcher->refcnt));
4512 rte_atomic32_inc(&tbl->refcnt);
4517 * Find existing tag resource or create and register a new one.
4519 * @param dev[in, out]
4520 * Pointer to rte_eth_dev structure.
4521 * @param[in, out] resource
4522 * Pointer to tag resource.
4523 * @parm[in, out] dev_flow
4524 * Pointer to the dev_flow.
4526 * pointer to error structure.
4529 * 0 on success otherwise -errno and errno is set.
4532 flow_dv_tag_resource_register
4533 (struct rte_eth_dev *dev,
4534 struct mlx5_flow_dv_tag_resource *resource,
4535 struct mlx5_flow *dev_flow,
4536 struct rte_flow_error *error)
4538 struct mlx5_priv *priv = dev->data->dev_private;
4539 struct mlx5_ibv_shared *sh = priv->sh;
4540 struct mlx5_flow_dv_tag_resource *cache_resource;
4542 /* Lookup a matching resource from cache. */
4543 LIST_FOREACH(cache_resource, &sh->tags, next) {
4544 if (resource->tag == cache_resource->tag) {
4545 DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
4546 (void *)cache_resource,
4547 rte_atomic32_read(&cache_resource->refcnt));
4548 rte_atomic32_inc(&cache_resource->refcnt);
4549 dev_flow->flow->tag_resource = cache_resource;
4553 /* Register new resource. */
4554 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
4555 if (!cache_resource)
4556 return rte_flow_error_set(error, ENOMEM,
4557 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4558 "cannot allocate resource memory");
4559 *cache_resource = *resource;
4560 cache_resource->action = mlx5_glue->dv_create_flow_action_tag
4562 if (!cache_resource->action) {
4563 rte_free(cache_resource);
4564 return rte_flow_error_set(error, ENOMEM,
4565 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4566 NULL, "cannot create action");
4568 rte_atomic32_init(&cache_resource->refcnt);
4569 rte_atomic32_inc(&cache_resource->refcnt);
4570 LIST_INSERT_HEAD(&sh->tags, cache_resource, next);
4571 dev_flow->flow->tag_resource = cache_resource;
4572 DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
4573 (void *)cache_resource,
4574 rte_atomic32_read(&cache_resource->refcnt));
4582 * Pointer to Ethernet device.
4584 * Pointer to mlx5_flow.
4587 * 1 while a reference on it exists, 0 when freed.
4590 flow_dv_tag_release(struct rte_eth_dev *dev,
4591 struct mlx5_flow_dv_tag_resource *tag)
4594 DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
4595 dev->data->port_id, (void *)tag,
4596 rte_atomic32_read(&tag->refcnt));
4597 if (rte_atomic32_dec_and_test(&tag->refcnt)) {
4598 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
4599 LIST_REMOVE(tag, next);
4600 DRV_LOG(DEBUG, "port %u tag %p: removed",
4601 dev->data->port_id, (void *)tag);
4609 * Translate port ID action to vport.
4612 * Pointer to rte_eth_dev structure.
4614 * Pointer to the port ID action.
4615 * @param[out] dst_port_id
4616 * The target port ID.
4618 * Pointer to the error structure.
4621 * 0 on success, a negative errno value otherwise and rte_errno is set.
4624 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
4625 const struct rte_flow_action *action,
4626 uint32_t *dst_port_id,
4627 struct rte_flow_error *error)
4632 const struct rte_flow_action_port_id *conf =
4633 (const struct rte_flow_action_port_id *)action->conf;
4635 port = conf->original ? dev->data->port_id : conf->id;
4636 ret = mlx5_port_to_eswitch_info(port, NULL, &port_id);
4638 return rte_flow_error_set(error, -ret,
4639 RTE_FLOW_ERROR_TYPE_ACTION,
4641 "No eswitch info was found for port");
4642 *dst_port_id = port_id;
4647 * Fill the flow with DV spec.
4650 * Pointer to rte_eth_dev structure.
4651 * @param[in, out] dev_flow
4652 * Pointer to the sub flow.
4654 * Pointer to the flow attributes.
4656 * Pointer to the list of items.
4657 * @param[in] actions
4658 * Pointer to the list of actions.
4660 * Pointer to the error structure.
4663 * 0 on success, a negative errno value otherwise and rte_errno is set.
4666 flow_dv_translate(struct rte_eth_dev *dev,
4667 struct mlx5_flow *dev_flow,
4668 const struct rte_flow_attr *attr,
4669 const struct rte_flow_item items[],
4670 const struct rte_flow_action actions[],
4671 struct rte_flow_error *error)
4673 struct mlx5_priv *priv = dev->data->dev_private;
4674 struct rte_flow *flow = dev_flow->flow;
4675 uint64_t item_flags = 0;
4676 uint64_t last_item = 0;
4677 uint64_t action_flags = 0;
4678 uint64_t priority = attr->priority;
4679 struct mlx5_flow_dv_matcher matcher = {
4681 .size = sizeof(matcher.mask.buf),
4685 bool actions_end = false;
4686 struct mlx5_flow_dv_modify_hdr_resource res = {
4687 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4688 MLX5DV_FLOW_TABLE_TYPE_NIC_RX
4690 union flow_dv_attr flow_attr = { .attr = 0 };
4691 struct mlx5_flow_dv_tag_resource tag_resource;
4692 uint32_t modify_action_position = UINT32_MAX;
4693 void *match_mask = matcher.mask.buf;
4694 void *match_value = dev_flow->dv.value.buf;
4695 uint8_t next_protocol = 0xff;
4697 flow->group = attr->group;
4699 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4700 if (priority == MLX5_FLOW_PRIO_RSVD)
4701 priority = priv->config.flow_prio - 1;
4702 for (; !actions_end ; actions++) {
4703 const struct rte_flow_action_queue *queue;
4704 const struct rte_flow_action_rss *rss;
4705 const struct rte_flow_action *action = actions;
4706 const struct rte_flow_action_count *count = action->conf;
4707 const uint8_t *rss_key;
4708 const struct rte_flow_action_jump *jump_data;
4709 struct mlx5_flow_dv_jump_tbl_resource jump_tbl_resource;
4710 struct mlx5_flow_tbl_resource *tbl;
4711 uint32_t port_id = 0;
4712 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
4714 switch (actions->type) {
4715 case RTE_FLOW_ACTION_TYPE_VOID:
4717 case RTE_FLOW_ACTION_TYPE_PORT_ID:
4718 if (flow_dv_translate_action_port_id(dev, action,
4721 port_id_resource.port_id = port_id;
4722 if (flow_dv_port_id_action_resource_register
4723 (dev, &port_id_resource, dev_flow, error))
4725 dev_flow->dv.actions[actions_n++] =
4726 dev_flow->dv.port_id_action->action;
4727 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
4729 case RTE_FLOW_ACTION_TYPE_FLAG:
4731 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
4732 if (!flow->tag_resource)
4733 if (flow_dv_tag_resource_register
4734 (dev, &tag_resource, dev_flow, error))
4736 dev_flow->dv.actions[actions_n++] =
4737 flow->tag_resource->action;
4738 action_flags |= MLX5_FLOW_ACTION_FLAG;
4740 case RTE_FLOW_ACTION_TYPE_MARK:
4741 tag_resource.tag = mlx5_flow_mark_set
4742 (((const struct rte_flow_action_mark *)
4743 (actions->conf))->id);
4744 if (!flow->tag_resource)
4745 if (flow_dv_tag_resource_register
4746 (dev, &tag_resource, dev_flow, error))
4748 dev_flow->dv.actions[actions_n++] =
4749 flow->tag_resource->action;
4750 action_flags |= MLX5_FLOW_ACTION_MARK;
4752 case RTE_FLOW_ACTION_TYPE_DROP:
4753 action_flags |= MLX5_FLOW_ACTION_DROP;
4755 case RTE_FLOW_ACTION_TYPE_QUEUE:
4756 queue = actions->conf;
4757 flow->rss.queue_num = 1;
4758 (*flow->queue)[0] = queue->index;
4759 action_flags |= MLX5_FLOW_ACTION_QUEUE;
4761 case RTE_FLOW_ACTION_TYPE_RSS:
4762 rss = actions->conf;
4764 memcpy((*flow->queue), rss->queue,
4765 rss->queue_num * sizeof(uint16_t));
4766 flow->rss.queue_num = rss->queue_num;
4767 /* NULL RSS key indicates default RSS key. */
4768 rss_key = !rss->key ? rss_hash_default_key : rss->key;
4769 memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
4770 /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
4771 flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
4772 flow->rss.level = rss->level;
4773 action_flags |= MLX5_FLOW_ACTION_RSS;
4775 case RTE_FLOW_ACTION_TYPE_COUNT:
4776 if (!priv->config.devx) {
4777 rte_errno = ENOTSUP;
4780 flow->counter = flow_dv_counter_alloc(dev,
4784 if (flow->counter == NULL)
4786 dev_flow->dv.actions[actions_n++] =
4787 flow->counter->action;
4788 action_flags |= MLX5_FLOW_ACTION_COUNT;
4791 if (rte_errno == ENOTSUP)
4792 return rte_flow_error_set
4794 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4796 "count action not supported");
4798 return rte_flow_error_set
4800 RTE_FLOW_ERROR_TYPE_ACTION,
4802 "cannot create counter"
4804 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
4805 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
4806 if (flow_dv_create_action_l2_encap(dev, actions,
4811 dev_flow->dv.actions[actions_n++] =
4812 dev_flow->dv.encap_decap->verbs_action;
4813 action_flags |= actions->type ==
4814 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
4815 MLX5_FLOW_ACTION_VXLAN_ENCAP :
4816 MLX5_FLOW_ACTION_NVGRE_ENCAP;
4818 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
4819 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
4820 if (flow_dv_create_action_l2_decap(dev, dev_flow,
4824 dev_flow->dv.actions[actions_n++] =
4825 dev_flow->dv.encap_decap->verbs_action;
4826 action_flags |= actions->type ==
4827 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
4828 MLX5_FLOW_ACTION_VXLAN_DECAP :
4829 MLX5_FLOW_ACTION_NVGRE_DECAP;
4831 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4832 /* Handle encap with preceding decap. */
4833 if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
4834 if (flow_dv_create_action_raw_encap
4835 (dev, actions, dev_flow, attr, error))
4837 dev_flow->dv.actions[actions_n++] =
4838 dev_flow->dv.encap_decap->verbs_action;
4840 /* Handle encap without preceding decap. */
4841 if (flow_dv_create_action_l2_encap
4842 (dev, actions, dev_flow, attr->transfer,
4845 dev_flow->dv.actions[actions_n++] =
4846 dev_flow->dv.encap_decap->verbs_action;
4848 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
4850 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
4851 /* Check if this decap is followed by encap. */
4852 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
4853 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
4856 /* Handle decap only if it isn't followed by encap. */
4857 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4858 if (flow_dv_create_action_l2_decap
4859 (dev, dev_flow, attr->transfer, error))
4861 dev_flow->dv.actions[actions_n++] =
4862 dev_flow->dv.encap_decap->verbs_action;
4864 /* If decap is followed by encap, handle it at encap. */
4865 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
4867 case RTE_FLOW_ACTION_TYPE_JUMP:
4868 jump_data = action->conf;
4869 tbl = flow_dv_tbl_resource_get(dev, jump_data->group *
4872 attr->transfer, error);
4874 return rte_flow_error_set
4876 RTE_FLOW_ERROR_TYPE_ACTION,
4878 "cannot create jump action.");
4879 jump_tbl_resource.tbl = tbl;
4880 if (flow_dv_jump_tbl_resource_register
4881 (dev, &jump_tbl_resource, dev_flow, error)) {
4882 flow_dv_tbl_resource_release(tbl);
4883 return rte_flow_error_set
4885 RTE_FLOW_ERROR_TYPE_ACTION,
4887 "cannot create jump action.");
4889 dev_flow->dv.actions[actions_n++] =
4890 dev_flow->dv.jump->action;
4891 action_flags |= MLX5_FLOW_ACTION_JUMP;
4893 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
4894 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
4895 if (flow_dv_convert_action_modify_mac(&res, actions,
4898 action_flags |= actions->type ==
4899 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
4900 MLX5_FLOW_ACTION_SET_MAC_SRC :
4901 MLX5_FLOW_ACTION_SET_MAC_DST;
4903 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
4904 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
4905 if (flow_dv_convert_action_modify_ipv4(&res, actions,
4908 action_flags |= actions->type ==
4909 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
4910 MLX5_FLOW_ACTION_SET_IPV4_SRC :
4911 MLX5_FLOW_ACTION_SET_IPV4_DST;
4913 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
4914 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
4915 if (flow_dv_convert_action_modify_ipv6(&res, actions,
4918 action_flags |= actions->type ==
4919 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
4920 MLX5_FLOW_ACTION_SET_IPV6_SRC :
4921 MLX5_FLOW_ACTION_SET_IPV6_DST;
4923 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
4924 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
4925 if (flow_dv_convert_action_modify_tp(&res, actions,
4929 action_flags |= actions->type ==
4930 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
4931 MLX5_FLOW_ACTION_SET_TP_SRC :
4932 MLX5_FLOW_ACTION_SET_TP_DST;
4934 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
4935 if (flow_dv_convert_action_modify_dec_ttl(&res, items,
4939 action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
4941 case RTE_FLOW_ACTION_TYPE_SET_TTL:
4942 if (flow_dv_convert_action_modify_ttl(&res, actions,
4946 action_flags |= MLX5_FLOW_ACTION_SET_TTL;
4948 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
4949 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
4950 if (flow_dv_convert_action_modify_tcp_seq(&res, actions,
4953 action_flags |= actions->type ==
4954 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
4955 MLX5_FLOW_ACTION_INC_TCP_SEQ :
4956 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
4959 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
4960 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
4961 if (flow_dv_convert_action_modify_tcp_ack(&res, actions,
4964 action_flags |= actions->type ==
4965 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
4966 MLX5_FLOW_ACTION_INC_TCP_ACK :
4967 MLX5_FLOW_ACTION_DEC_TCP_ACK;
4969 case RTE_FLOW_ACTION_TYPE_END:
4971 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
4972 /* create modify action if needed. */
4973 if (flow_dv_modify_hdr_resource_register
4978 dev_flow->dv.actions[modify_action_position] =
4979 dev_flow->dv.modify_hdr->verbs_action;
4985 if ((action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) &&
4986 modify_action_position == UINT32_MAX)
4987 modify_action_position = actions_n++;
4989 dev_flow->dv.actions_n = actions_n;
4990 flow->actions = action_flags;
4991 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4992 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
4994 switch (items->type) {
4995 case RTE_FLOW_ITEM_TYPE_PORT_ID:
4996 flow_dv_translate_item_port_id(dev, match_mask,
4997 match_value, items);
4998 last_item = MLX5_FLOW_ITEM_PORT_ID;
5000 case RTE_FLOW_ITEM_TYPE_ETH:
5001 flow_dv_translate_item_eth(match_mask, match_value,
5003 matcher.priority = MLX5_PRIORITY_MAP_L2;
5004 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
5005 MLX5_FLOW_LAYER_OUTER_L2;
5007 case RTE_FLOW_ITEM_TYPE_VLAN:
5008 flow_dv_translate_item_vlan(dev_flow,
5009 match_mask, match_value,
5011 matcher.priority = MLX5_PRIORITY_MAP_L2;
5012 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
5013 MLX5_FLOW_LAYER_INNER_VLAN) :
5014 (MLX5_FLOW_LAYER_OUTER_L2 |
5015 MLX5_FLOW_LAYER_OUTER_VLAN);
5017 case RTE_FLOW_ITEM_TYPE_IPV4:
5018 mlx5_flow_tunnel_ip_check(items, next_protocol,
5019 &item_flags, &tunnel);
5020 flow_dv_translate_item_ipv4(match_mask, match_value,
5021 items, tunnel, attr->group);
5022 matcher.priority = MLX5_PRIORITY_MAP_L3;
5023 dev_flow->dv.hash_fields |=
5024 mlx5_flow_hashfields_adjust
5026 MLX5_IPV4_LAYER_TYPES,
5027 MLX5_IPV4_IBV_RX_HASH);
5028 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
5029 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
5030 if (items->mask != NULL &&
5031 ((const struct rte_flow_item_ipv4 *)
5032 items->mask)->hdr.next_proto_id) {
5034 ((const struct rte_flow_item_ipv4 *)
5035 (items->spec))->hdr.next_proto_id;
5037 ((const struct rte_flow_item_ipv4 *)
5038 (items->mask))->hdr.next_proto_id;
5040 /* Reset for inner layer. */
5041 next_protocol = 0xff;
5044 case RTE_FLOW_ITEM_TYPE_IPV6:
5045 mlx5_flow_tunnel_ip_check(items, next_protocol,
5046 &item_flags, &tunnel);
5047 flow_dv_translate_item_ipv6(match_mask, match_value,
5048 items, tunnel, attr->group);
5049 matcher.priority = MLX5_PRIORITY_MAP_L3;
5050 dev_flow->dv.hash_fields |=
5051 mlx5_flow_hashfields_adjust
5053 MLX5_IPV6_LAYER_TYPES,
5054 MLX5_IPV6_IBV_RX_HASH);
5055 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
5056 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
5057 if (items->mask != NULL &&
5058 ((const struct rte_flow_item_ipv6 *)
5059 items->mask)->hdr.proto) {
5061 ((const struct rte_flow_item_ipv6 *)
5062 items->spec)->hdr.proto;
5064 ((const struct rte_flow_item_ipv6 *)
5065 items->mask)->hdr.proto;
5067 /* Reset for inner layer. */
5068 next_protocol = 0xff;
5071 case RTE_FLOW_ITEM_TYPE_TCP:
5072 flow_dv_translate_item_tcp(match_mask, match_value,
5074 matcher.priority = MLX5_PRIORITY_MAP_L4;
5075 dev_flow->dv.hash_fields |=
5076 mlx5_flow_hashfields_adjust
5077 (dev_flow, tunnel, ETH_RSS_TCP,
5078 IBV_RX_HASH_SRC_PORT_TCP |
5079 IBV_RX_HASH_DST_PORT_TCP);
5080 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
5081 MLX5_FLOW_LAYER_OUTER_L4_TCP;
5083 case RTE_FLOW_ITEM_TYPE_UDP:
5084 flow_dv_translate_item_udp(match_mask, match_value,
5086 matcher.priority = MLX5_PRIORITY_MAP_L4;
5087 dev_flow->dv.hash_fields |=
5088 mlx5_flow_hashfields_adjust
5089 (dev_flow, tunnel, ETH_RSS_UDP,
5090 IBV_RX_HASH_SRC_PORT_UDP |
5091 IBV_RX_HASH_DST_PORT_UDP);
5092 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
5093 MLX5_FLOW_LAYER_OUTER_L4_UDP;
5095 case RTE_FLOW_ITEM_TYPE_GRE:
5096 flow_dv_translate_item_gre(match_mask, match_value,
5098 last_item = MLX5_FLOW_LAYER_GRE;
5100 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
5101 flow_dv_translate_item_gre_key(match_mask,
5102 match_value, items);
5103 last_item = MLX5_FLOW_LAYER_GRE_KEY;
5105 case RTE_FLOW_ITEM_TYPE_NVGRE:
5106 flow_dv_translate_item_nvgre(match_mask, match_value,
5108 last_item = MLX5_FLOW_LAYER_GRE;
5110 case RTE_FLOW_ITEM_TYPE_VXLAN:
5111 flow_dv_translate_item_vxlan(match_mask, match_value,
5113 last_item = MLX5_FLOW_LAYER_VXLAN;
5115 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5116 flow_dv_translate_item_vxlan(match_mask, match_value,
5118 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
5120 case RTE_FLOW_ITEM_TYPE_MPLS:
5121 flow_dv_translate_item_mpls(match_mask, match_value,
5122 items, last_item, tunnel);
5123 last_item = MLX5_FLOW_LAYER_MPLS;
5125 case RTE_FLOW_ITEM_TYPE_META:
5126 flow_dv_translate_item_meta(match_mask, match_value,
5128 last_item = MLX5_FLOW_ITEM_METADATA;
5130 case RTE_FLOW_ITEM_TYPE_ICMP:
5131 flow_dv_translate_item_icmp(match_mask, match_value,
5133 last_item = MLX5_FLOW_LAYER_ICMP;
5135 case RTE_FLOW_ITEM_TYPE_ICMP6:
5136 flow_dv_translate_item_icmp6(match_mask, match_value,
5138 last_item = MLX5_FLOW_LAYER_ICMP6;
5143 item_flags |= last_item;
5146 * In case of ingress traffic when E-Switch mode is enabled,
5147 * we have two cases where we need to set the source port manually.
5148 * The first one, is in case of Nic steering rule, and the second is
5149 * E-Switch rule where no port_id item was found. In both cases
5150 * the source port is set according the current port in use.
5152 if ((attr->ingress && !(item_flags & MLX5_FLOW_ITEM_PORT_ID)) &&
5153 (priv->representor || priv->master)) {
5154 if (flow_dv_translate_item_port_id(dev, match_mask,
5158 assert(!flow_dv_check_valid_spec(matcher.mask.buf,
5159 dev_flow->dv.value.buf));
5160 dev_flow->layers = item_flags;
5161 /* Register matcher. */
5162 matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
5164 matcher.priority = mlx5_flow_adjust_priority(dev, priority,
5166 matcher.egress = attr->egress;
5167 matcher.group = attr->group;
5168 matcher.transfer = attr->transfer;
5169 if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
5175 * Apply the flow to the NIC.
5178 * Pointer to the Ethernet device structure.
5179 * @param[in, out] flow
5180 * Pointer to flow structure.
5182 * Pointer to error structure.
5185 * 0 on success, a negative errno value otherwise and rte_errno is set.
5188 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
5189 struct rte_flow_error *error)
5191 struct mlx5_flow_dv *dv;
5192 struct mlx5_flow *dev_flow;
5193 struct mlx5_priv *priv = dev->data->dev_private;
5197 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
5200 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
5201 if (flow->transfer) {
5202 dv->actions[n++] = priv->sh->esw_drop_action;
5204 dv->hrxq = mlx5_hrxq_drop_new(dev);
5208 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5210 "cannot get drop hash queue");
5213 dv->actions[n++] = dv->hrxq->action;
5215 } else if (flow->actions &
5216 (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
5217 struct mlx5_hrxq *hrxq;
5219 hrxq = mlx5_hrxq_get(dev, flow->key,
5220 MLX5_RSS_HASH_KEY_LEN,
5223 flow->rss.queue_num);
5225 hrxq = mlx5_hrxq_new
5226 (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
5227 dv->hash_fields, (*flow->queue),
5228 flow->rss.queue_num,
5229 !!(dev_flow->layers &
5230 MLX5_FLOW_LAYER_TUNNEL));
5235 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5236 "cannot get hash queue");
5240 dv->actions[n++] = dv->hrxq->action;
5243 mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
5244 (void *)&dv->value, n,
5247 rte_flow_error_set(error, errno,
5248 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5250 "hardware refuses to create flow");
5253 if (priv->vmwa_context &&
5254 dev_flow->dv.vf_vlan.tag &&
5255 !dev_flow->dv.vf_vlan.created) {
5257 * The rule contains the VLAN pattern.
5258 * For VF we are going to create VLAN
5259 * interface to make hypervisor set correct
5260 * e-Switch vport context.
5262 mlx5_vlan_vmwa_acquire(dev, &dev_flow->dv.vf_vlan);
5267 err = rte_errno; /* Save rte_errno before cleanup. */
5268 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
5269 struct mlx5_flow_dv *dv = &dev_flow->dv;
5271 if (flow->actions & MLX5_FLOW_ACTION_DROP)
5272 mlx5_hrxq_drop_release(dev);
5274 mlx5_hrxq_release(dev, dv->hrxq);
5277 if (dev_flow->dv.vf_vlan.tag &&
5278 dev_flow->dv.vf_vlan.created)
5279 mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
5281 rte_errno = err; /* Restore rte_errno. */
5286 * Release the flow matcher.
5289 * Pointer to Ethernet device.
5291 * Pointer to mlx5_flow.
5294 * 1 while a reference on it exists, 0 when freed.
5297 flow_dv_matcher_release(struct rte_eth_dev *dev,
5298 struct mlx5_flow *flow)
5300 struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
5301 struct mlx5_priv *priv = dev->data->dev_private;
5302 struct mlx5_ibv_shared *sh = priv->sh;
5303 struct mlx5_flow_tbl_resource *tbl;
5305 assert(matcher->matcher_object);
5306 DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
5307 dev->data->port_id, (void *)matcher,
5308 rte_atomic32_read(&matcher->refcnt));
5309 if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
5310 claim_zero(mlx5_glue->dv_destroy_flow_matcher
5311 (matcher->matcher_object));
5312 LIST_REMOVE(matcher, next);
5313 if (matcher->egress)
5314 tbl = &sh->tx_tbl[matcher->group];
5316 tbl = &sh->rx_tbl[matcher->group];
5317 flow_dv_tbl_resource_release(tbl);
5319 DRV_LOG(DEBUG, "port %u matcher %p: removed",
5320 dev->data->port_id, (void *)matcher);
5327 * Release an encap/decap resource.
5330 * Pointer to mlx5_flow.
5333 * 1 while a reference on it exists, 0 when freed.
5336 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
5338 struct mlx5_flow_dv_encap_decap_resource *cache_resource =
5339 flow->dv.encap_decap;
5341 assert(cache_resource->verbs_action);
5342 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
5343 (void *)cache_resource,
5344 rte_atomic32_read(&cache_resource->refcnt));
5345 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
5346 claim_zero(mlx5_glue->destroy_flow_action
5347 (cache_resource->verbs_action));
5348 LIST_REMOVE(cache_resource, next);
5349 rte_free(cache_resource);
5350 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
5351 (void *)cache_resource);
5358 * Release an jump to table action resource.
5361 * Pointer to mlx5_flow.
5364 * 1 while a reference on it exists, 0 when freed.
5367 flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow)
5369 struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
5372 assert(cache_resource->action);
5373 DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
5374 (void *)cache_resource,
5375 rte_atomic32_read(&cache_resource->refcnt));
5376 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
5377 claim_zero(mlx5_glue->destroy_flow_action
5378 (cache_resource->action));
5379 LIST_REMOVE(cache_resource, next);
5380 flow_dv_tbl_resource_release(cache_resource->tbl);
5381 rte_free(cache_resource);
5382 DRV_LOG(DEBUG, "jump table resource %p: removed",
5383 (void *)cache_resource);
5390 * Release a modify-header resource.
5393 * Pointer to mlx5_flow.
5396 * 1 while a reference on it exists, 0 when freed.
5399 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
5401 struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
5402 flow->dv.modify_hdr;
5404 assert(cache_resource->verbs_action);
5405 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
5406 (void *)cache_resource,
5407 rte_atomic32_read(&cache_resource->refcnt));
5408 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
5409 claim_zero(mlx5_glue->destroy_flow_action
5410 (cache_resource->verbs_action));
5411 LIST_REMOVE(cache_resource, next);
5412 rte_free(cache_resource);
5413 DRV_LOG(DEBUG, "modify-header resource %p: removed",
5414 (void *)cache_resource);
5421 * Release port ID action resource.
5424 * Pointer to mlx5_flow.
5427 * 1 while a reference on it exists, 0 when freed.
5430 flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
5432 struct mlx5_flow_dv_port_id_action_resource *cache_resource =
5433 flow->dv.port_id_action;
5435 assert(cache_resource->action);
5436 DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
5437 (void *)cache_resource,
5438 rte_atomic32_read(&cache_resource->refcnt));
5439 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
5440 claim_zero(mlx5_glue->destroy_flow_action
5441 (cache_resource->action));
5442 LIST_REMOVE(cache_resource, next);
5443 rte_free(cache_resource);
5444 DRV_LOG(DEBUG, "port id action resource %p: removed",
5445 (void *)cache_resource);
5452 * Remove the flow from the NIC but keeps it in memory.
5455 * Pointer to Ethernet device.
5456 * @param[in, out] flow
5457 * Pointer to flow structure.
5460 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
5462 struct mlx5_flow_dv *dv;
5463 struct mlx5_flow *dev_flow;
5467 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
5470 claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
5474 if (flow->actions & MLX5_FLOW_ACTION_DROP)
5475 mlx5_hrxq_drop_release(dev);
5477 mlx5_hrxq_release(dev, dv->hrxq);
5480 if (dev_flow->dv.vf_vlan.tag &&
5481 dev_flow->dv.vf_vlan.created)
5482 mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
5487 * Remove the flow from the NIC and the memory.
5490 * Pointer to the Ethernet device structure.
5491 * @param[in, out] flow
5492 * Pointer to flow structure.
5495 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
5497 struct mlx5_flow *dev_flow;
5501 flow_dv_remove(dev, flow);
5502 if (flow->counter) {
5503 flow_dv_counter_release(dev, flow->counter);
5504 flow->counter = NULL;
5506 if (flow->tag_resource) {
5507 flow_dv_tag_release(dev, flow->tag_resource);
5508 flow->tag_resource = NULL;
5510 while (!LIST_EMPTY(&flow->dev_flows)) {
5511 dev_flow = LIST_FIRST(&flow->dev_flows);
5512 LIST_REMOVE(dev_flow, next);
5513 if (dev_flow->dv.matcher)
5514 flow_dv_matcher_release(dev, dev_flow);
5515 if (dev_flow->dv.encap_decap)
5516 flow_dv_encap_decap_resource_release(dev_flow);
5517 if (dev_flow->dv.modify_hdr)
5518 flow_dv_modify_hdr_resource_release(dev_flow);
5519 if (dev_flow->dv.jump)
5520 flow_dv_jump_tbl_resource_release(dev_flow);
5521 if (dev_flow->dv.port_id_action)
5522 flow_dv_port_id_action_resource_release(dev_flow);
5528 * Query a dv flow rule for its statistics via devx.
5531 * Pointer to Ethernet device.
5533 * Pointer to the sub flow.
5535 * data retrieved by the query.
5537 * Perform verbose error reporting if not NULL.
5540 * 0 on success, a negative errno value otherwise and rte_errno is set.
5543 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
5544 void *data, struct rte_flow_error *error)
5546 struct mlx5_priv *priv = dev->data->dev_private;
5547 struct rte_flow_query_count *qc = data;
5549 if (!priv->config.devx)
5550 return rte_flow_error_set(error, ENOTSUP,
5551 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5553 "counters are not supported");
5554 if (flow->counter) {
5555 uint64_t pkts, bytes;
5556 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
5560 return rte_flow_error_set(error, -err,
5561 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5562 NULL, "cannot read counters");
5565 qc->hits = pkts - flow->counter->hits;
5566 qc->bytes = bytes - flow->counter->bytes;
5568 flow->counter->hits = pkts;
5569 flow->counter->bytes = bytes;
5573 return rte_flow_error_set(error, EINVAL,
5574 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5576 "counters are not available");
5582 * @see rte_flow_query()
5586 flow_dv_query(struct rte_eth_dev *dev,
5587 struct rte_flow *flow __rte_unused,
5588 const struct rte_flow_action *actions __rte_unused,
5589 void *data __rte_unused,
5590 struct rte_flow_error *error __rte_unused)
5594 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5595 switch (actions->type) {
5596 case RTE_FLOW_ACTION_TYPE_VOID:
5598 case RTE_FLOW_ACTION_TYPE_COUNT:
5599 ret = flow_dv_query_count(dev, flow, data, error);
5602 return rte_flow_error_set(error, ENOTSUP,
5603 RTE_FLOW_ERROR_TYPE_ACTION,
5605 "action not supported");
5612 * Mutex-protected thunk to flow_dv_translate().
5615 flow_d_translate(struct rte_eth_dev *dev,
5616 struct mlx5_flow *dev_flow,
5617 const struct rte_flow_attr *attr,
5618 const struct rte_flow_item items[],
5619 const struct rte_flow_action actions[],
5620 struct rte_flow_error *error)
5624 flow_d_shared_lock(dev);
5625 ret = flow_dv_translate(dev, dev_flow, attr, items, actions, error);
5626 flow_d_shared_unlock(dev);
5631 * Mutex-protected thunk to flow_dv_apply().
5634 flow_d_apply(struct rte_eth_dev *dev,
5635 struct rte_flow *flow,
5636 struct rte_flow_error *error)
5640 flow_d_shared_lock(dev);
5641 ret = flow_dv_apply(dev, flow, error);
5642 flow_d_shared_unlock(dev);
5647 * Mutex-protected thunk to flow_dv_remove().
5650 flow_d_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
5652 flow_d_shared_lock(dev);
5653 flow_dv_remove(dev, flow);
5654 flow_d_shared_unlock(dev);
5658 * Mutex-protected thunk to flow_dv_destroy().
5661 flow_d_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
5663 flow_d_shared_lock(dev);
5664 flow_dv_destroy(dev, flow);
5665 flow_d_shared_unlock(dev);
5668 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
5669 .validate = flow_dv_validate,
5670 .prepare = flow_dv_prepare,
5671 .translate = flow_d_translate,
5672 .apply = flow_d_apply,
5673 .remove = flow_d_remove,
5674 .destroy = flow_d_destroy,
5675 .query = flow_dv_query,
5678 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */