1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
14 #pragma GCC diagnostic ignored "-Wpedantic"
16 #include <infiniband/verbs.h>
18 #pragma GCC diagnostic error "-Wpedantic"
21 #include <rte_common.h>
22 #include <rte_ether.h>
23 #include <rte_ethdev_driver.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
31 #include "mlx5_defs.h"
32 #include "mlx5_glue.h"
33 #include "mlx5_flow.h"
35 #include "mlx5_rxtx.h"
37 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
39 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
40 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
43 #ifndef HAVE_MLX5DV_DR_ESWITCH
44 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
45 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
49 #ifndef HAVE_MLX5DV_DR
50 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
53 /* VLAN header definitions */
54 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
55 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
56 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
57 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
58 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
73 * Initialize flow attributes structure according to flow items' types.
76 * Pointer to item specification.
78 * Pointer to flow attributes structure.
81 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
83 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
85 case RTE_FLOW_ITEM_TYPE_IPV4:
88 case RTE_FLOW_ITEM_TYPE_IPV6:
91 case RTE_FLOW_ITEM_TYPE_UDP:
94 case RTE_FLOW_ITEM_TYPE_TCP:
104 struct field_modify_info {
105 uint32_t size; /* Size of field in protocol header, in bytes. */
106 uint32_t offset; /* Offset of field in protocol header, in bytes. */
107 enum mlx5_modification_field id;
110 struct field_modify_info modify_eth[] = {
111 {4, 0, MLX5_MODI_OUT_DMAC_47_16},
112 {2, 4, MLX5_MODI_OUT_DMAC_15_0},
113 {4, 6, MLX5_MODI_OUT_SMAC_47_16},
114 {2, 10, MLX5_MODI_OUT_SMAC_15_0},
118 struct field_modify_info modify_vlan_out_first_vid[] = {
119 /* Size in bits !!! */
120 {12, 0, MLX5_MODI_OUT_FIRST_VID},
124 struct field_modify_info modify_ipv4[] = {
125 {1, 8, MLX5_MODI_OUT_IPV4_TTL},
126 {4, 12, MLX5_MODI_OUT_SIPV4},
127 {4, 16, MLX5_MODI_OUT_DIPV4},
131 struct field_modify_info modify_ipv6[] = {
132 {1, 7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
133 {4, 8, MLX5_MODI_OUT_SIPV6_127_96},
134 {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
135 {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
136 {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
137 {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
138 {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
139 {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
140 {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
144 struct field_modify_info modify_udp[] = {
145 {2, 0, MLX5_MODI_OUT_UDP_SPORT},
146 {2, 2, MLX5_MODI_OUT_UDP_DPORT},
150 struct field_modify_info modify_tcp[] = {
151 {2, 0, MLX5_MODI_OUT_TCP_SPORT},
152 {2, 2, MLX5_MODI_OUT_TCP_DPORT},
153 {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
154 {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
159 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
160 uint8_t next_protocol, uint64_t *item_flags,
163 assert(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
164 item->type == RTE_FLOW_ITEM_TYPE_IPV6);
165 if (next_protocol == IPPROTO_IPIP) {
166 *item_flags |= MLX5_FLOW_LAYER_IPIP;
169 if (next_protocol == IPPROTO_IPV6) {
170 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
176 * Acquire the synchronizing object to protect multithreaded access
177 * to shared dv context. Lock occurs only if context is actually
178 * shared, i.e. we have multiport IB device and representors are
182 * Pointer to the rte_eth_dev structure.
185 flow_d_shared_lock(struct rte_eth_dev *dev)
187 struct mlx5_priv *priv = dev->data->dev_private;
188 struct mlx5_ibv_shared *sh = priv->sh;
190 if (sh->dv_refcnt > 1) {
193 ret = pthread_mutex_lock(&sh->dv_mutex);
200 flow_d_shared_unlock(struct rte_eth_dev *dev)
202 struct mlx5_priv *priv = dev->data->dev_private;
203 struct mlx5_ibv_shared *sh = priv->sh;
205 if (sh->dv_refcnt > 1) {
208 ret = pthread_mutex_unlock(&sh->dv_mutex);
215 * Convert modify-header action to DV specification.
218 * Pointer to item specification.
220 * Pointer to field modification information.
221 * @param[in,out] resource
222 * Pointer to the modify-header resource.
224 * Type of modification.
226 * Pointer to the error structure.
229 * 0 on success, a negative errno value otherwise and rte_errno is set.
232 flow_dv_convert_modify_action(struct rte_flow_item *item,
233 struct field_modify_info *field,
234 struct mlx5_flow_dv_modify_hdr_resource *resource,
236 struct rte_flow_error *error)
238 uint32_t i = resource->actions_num;
239 struct mlx5_modification_cmd *actions = resource->actions;
240 const uint8_t *spec = item->spec;
241 const uint8_t *mask = item->mask;
244 while (field->size) {
246 /* Generate modify command for each mask segment. */
247 memcpy(&set, &mask[field->offset], field->size);
249 if (i >= MLX5_MODIFY_NUM)
250 return rte_flow_error_set(error, EINVAL,
251 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
252 "too many items to modify");
253 actions[i].action_type = type;
254 actions[i].field = field->id;
255 actions[i].length = field->size ==
256 4 ? 0 : field->size * 8;
257 rte_memcpy(&actions[i].data[4 - field->size],
258 &spec[field->offset], field->size);
259 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
262 if (resource->actions_num != i)
263 resource->actions_num = i;
266 if (!resource->actions_num)
267 return rte_flow_error_set(error, EINVAL,
268 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
269 "invalid modification flow item");
274 * Convert modify-header set IPv4 address action to DV specification.
276 * @param[in,out] resource
277 * Pointer to the modify-header resource.
279 * Pointer to action specification.
281 * Pointer to the error structure.
284 * 0 on success, a negative errno value otherwise and rte_errno is set.
287 flow_dv_convert_action_modify_ipv4
288 (struct mlx5_flow_dv_modify_hdr_resource *resource,
289 const struct rte_flow_action *action,
290 struct rte_flow_error *error)
292 const struct rte_flow_action_set_ipv4 *conf =
293 (const struct rte_flow_action_set_ipv4 *)(action->conf);
294 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
295 struct rte_flow_item_ipv4 ipv4;
296 struct rte_flow_item_ipv4 ipv4_mask;
298 memset(&ipv4, 0, sizeof(ipv4));
299 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
300 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
301 ipv4.hdr.src_addr = conf->ipv4_addr;
302 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
304 ipv4.hdr.dst_addr = conf->ipv4_addr;
305 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
308 item.mask = &ipv4_mask;
309 return flow_dv_convert_modify_action(&item, modify_ipv4, resource,
310 MLX5_MODIFICATION_TYPE_SET, error);
314 * Convert modify-header set IPv6 address action to DV specification.
316 * @param[in,out] resource
317 * Pointer to the modify-header resource.
319 * Pointer to action specification.
321 * Pointer to the error structure.
324 * 0 on success, a negative errno value otherwise and rte_errno is set.
327 flow_dv_convert_action_modify_ipv6
328 (struct mlx5_flow_dv_modify_hdr_resource *resource,
329 const struct rte_flow_action *action,
330 struct rte_flow_error *error)
332 const struct rte_flow_action_set_ipv6 *conf =
333 (const struct rte_flow_action_set_ipv6 *)(action->conf);
334 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
335 struct rte_flow_item_ipv6 ipv6;
336 struct rte_flow_item_ipv6 ipv6_mask;
338 memset(&ipv6, 0, sizeof(ipv6));
339 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
340 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
341 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
342 sizeof(ipv6.hdr.src_addr));
343 memcpy(&ipv6_mask.hdr.src_addr,
344 &rte_flow_item_ipv6_mask.hdr.src_addr,
345 sizeof(ipv6.hdr.src_addr));
347 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
348 sizeof(ipv6.hdr.dst_addr));
349 memcpy(&ipv6_mask.hdr.dst_addr,
350 &rte_flow_item_ipv6_mask.hdr.dst_addr,
351 sizeof(ipv6.hdr.dst_addr));
354 item.mask = &ipv6_mask;
355 return flow_dv_convert_modify_action(&item, modify_ipv6, resource,
356 MLX5_MODIFICATION_TYPE_SET, error);
360 * Convert modify-header set MAC address action to DV specification.
362 * @param[in,out] resource
363 * Pointer to the modify-header resource.
365 * Pointer to action specification.
367 * Pointer to the error structure.
370 * 0 on success, a negative errno value otherwise and rte_errno is set.
373 flow_dv_convert_action_modify_mac
374 (struct mlx5_flow_dv_modify_hdr_resource *resource,
375 const struct rte_flow_action *action,
376 struct rte_flow_error *error)
378 const struct rte_flow_action_set_mac *conf =
379 (const struct rte_flow_action_set_mac *)(action->conf);
380 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
381 struct rte_flow_item_eth eth;
382 struct rte_flow_item_eth eth_mask;
384 memset(ð, 0, sizeof(eth));
385 memset(ð_mask, 0, sizeof(eth_mask));
386 if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
387 memcpy(ð.src.addr_bytes, &conf->mac_addr,
388 sizeof(eth.src.addr_bytes));
389 memcpy(ð_mask.src.addr_bytes,
390 &rte_flow_item_eth_mask.src.addr_bytes,
391 sizeof(eth_mask.src.addr_bytes));
393 memcpy(ð.dst.addr_bytes, &conf->mac_addr,
394 sizeof(eth.dst.addr_bytes));
395 memcpy(ð_mask.dst.addr_bytes,
396 &rte_flow_item_eth_mask.dst.addr_bytes,
397 sizeof(eth_mask.dst.addr_bytes));
400 item.mask = ð_mask;
401 return flow_dv_convert_modify_action(&item, modify_eth, resource,
402 MLX5_MODIFICATION_TYPE_SET, error);
406 * Convert modify-header set VLAN VID action to DV specification.
408 * @param[in,out] resource
409 * Pointer to the modify-header resource.
411 * Pointer to action specification.
413 * Pointer to the error structure.
416 * 0 on success, a negative errno value otherwise and rte_errno is set.
419 flow_dv_convert_action_modify_vlan_vid
420 (struct mlx5_flow_dv_modify_hdr_resource *resource,
421 const struct rte_flow_action *action,
422 struct rte_flow_error *error)
424 const struct rte_flow_action_of_set_vlan_vid *conf =
425 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
426 int i = resource->actions_num;
427 struct mlx5_modification_cmd *actions = &resource->actions[i];
428 struct field_modify_info *field = modify_vlan_out_first_vid;
430 if (i >= MLX5_MODIFY_NUM)
431 return rte_flow_error_set(error, EINVAL,
432 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
433 "too many items to modify");
434 actions[i].action_type = MLX5_MODIFICATION_TYPE_SET;
435 actions[i].field = field->id;
436 actions[i].length = field->size;
437 actions[i].offset = field->offset;
438 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
439 actions[i].data1 = conf->vlan_vid;
440 actions[i].data1 = actions[i].data1 << 16;
441 resource->actions_num = ++i;
446 * Convert modify-header set TP action to DV specification.
448 * @param[in,out] resource
449 * Pointer to the modify-header resource.
451 * Pointer to action specification.
453 * Pointer to rte_flow_item objects list.
455 * Pointer to flow attributes structure.
457 * Pointer to the error structure.
460 * 0 on success, a negative errno value otherwise and rte_errno is set.
463 flow_dv_convert_action_modify_tp
464 (struct mlx5_flow_dv_modify_hdr_resource *resource,
465 const struct rte_flow_action *action,
466 const struct rte_flow_item *items,
467 union flow_dv_attr *attr,
468 struct rte_flow_error *error)
470 const struct rte_flow_action_set_tp *conf =
471 (const struct rte_flow_action_set_tp *)(action->conf);
472 struct rte_flow_item item;
473 struct rte_flow_item_udp udp;
474 struct rte_flow_item_udp udp_mask;
475 struct rte_flow_item_tcp tcp;
476 struct rte_flow_item_tcp tcp_mask;
477 struct field_modify_info *field;
480 flow_dv_attr_init(items, attr);
482 memset(&udp, 0, sizeof(udp));
483 memset(&udp_mask, 0, sizeof(udp_mask));
484 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
485 udp.hdr.src_port = conf->port;
486 udp_mask.hdr.src_port =
487 rte_flow_item_udp_mask.hdr.src_port;
489 udp.hdr.dst_port = conf->port;
490 udp_mask.hdr.dst_port =
491 rte_flow_item_udp_mask.hdr.dst_port;
493 item.type = RTE_FLOW_ITEM_TYPE_UDP;
495 item.mask = &udp_mask;
499 memset(&tcp, 0, sizeof(tcp));
500 memset(&tcp_mask, 0, sizeof(tcp_mask));
501 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
502 tcp.hdr.src_port = conf->port;
503 tcp_mask.hdr.src_port =
504 rte_flow_item_tcp_mask.hdr.src_port;
506 tcp.hdr.dst_port = conf->port;
507 tcp_mask.hdr.dst_port =
508 rte_flow_item_tcp_mask.hdr.dst_port;
510 item.type = RTE_FLOW_ITEM_TYPE_TCP;
512 item.mask = &tcp_mask;
515 return flow_dv_convert_modify_action(&item, field, resource,
516 MLX5_MODIFICATION_TYPE_SET, error);
520 * Convert modify-header set TTL action to DV specification.
522 * @param[in,out] resource
523 * Pointer to the modify-header resource.
525 * Pointer to action specification.
527 * Pointer to rte_flow_item objects list.
529 * Pointer to flow attributes structure.
531 * Pointer to the error structure.
534 * 0 on success, a negative errno value otherwise and rte_errno is set.
537 flow_dv_convert_action_modify_ttl
538 (struct mlx5_flow_dv_modify_hdr_resource *resource,
539 const struct rte_flow_action *action,
540 const struct rte_flow_item *items,
541 union flow_dv_attr *attr,
542 struct rte_flow_error *error)
544 const struct rte_flow_action_set_ttl *conf =
545 (const struct rte_flow_action_set_ttl *)(action->conf);
546 struct rte_flow_item item;
547 struct rte_flow_item_ipv4 ipv4;
548 struct rte_flow_item_ipv4 ipv4_mask;
549 struct rte_flow_item_ipv6 ipv6;
550 struct rte_flow_item_ipv6 ipv6_mask;
551 struct field_modify_info *field;
554 flow_dv_attr_init(items, attr);
556 memset(&ipv4, 0, sizeof(ipv4));
557 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
558 ipv4.hdr.time_to_live = conf->ttl_value;
559 ipv4_mask.hdr.time_to_live = 0xFF;
560 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
562 item.mask = &ipv4_mask;
566 memset(&ipv6, 0, sizeof(ipv6));
567 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
568 ipv6.hdr.hop_limits = conf->ttl_value;
569 ipv6_mask.hdr.hop_limits = 0xFF;
570 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
572 item.mask = &ipv6_mask;
575 return flow_dv_convert_modify_action(&item, field, resource,
576 MLX5_MODIFICATION_TYPE_SET, error);
580 * Convert modify-header decrement TTL action to DV specification.
582 * @param[in,out] resource
583 * Pointer to the modify-header resource.
585 * Pointer to action specification.
587 * Pointer to rte_flow_item objects list.
589 * Pointer to flow attributes structure.
591 * Pointer to the error structure.
594 * 0 on success, a negative errno value otherwise and rte_errno is set.
597 flow_dv_convert_action_modify_dec_ttl
598 (struct mlx5_flow_dv_modify_hdr_resource *resource,
599 const struct rte_flow_item *items,
600 union flow_dv_attr *attr,
601 struct rte_flow_error *error)
603 struct rte_flow_item item;
604 struct rte_flow_item_ipv4 ipv4;
605 struct rte_flow_item_ipv4 ipv4_mask;
606 struct rte_flow_item_ipv6 ipv6;
607 struct rte_flow_item_ipv6 ipv6_mask;
608 struct field_modify_info *field;
611 flow_dv_attr_init(items, attr);
613 memset(&ipv4, 0, sizeof(ipv4));
614 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
615 ipv4.hdr.time_to_live = 0xFF;
616 ipv4_mask.hdr.time_to_live = 0xFF;
617 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
619 item.mask = &ipv4_mask;
623 memset(&ipv6, 0, sizeof(ipv6));
624 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
625 ipv6.hdr.hop_limits = 0xFF;
626 ipv6_mask.hdr.hop_limits = 0xFF;
627 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
629 item.mask = &ipv6_mask;
632 return flow_dv_convert_modify_action(&item, field, resource,
633 MLX5_MODIFICATION_TYPE_ADD, error);
637 * Convert modify-header increment/decrement TCP Sequence number
638 * to DV specification.
640 * @param[in,out] resource
641 * Pointer to the modify-header resource.
643 * Pointer to action specification.
645 * Pointer to the error structure.
648 * 0 on success, a negative errno value otherwise and rte_errno is set.
651 flow_dv_convert_action_modify_tcp_seq
652 (struct mlx5_flow_dv_modify_hdr_resource *resource,
653 const struct rte_flow_action *action,
654 struct rte_flow_error *error)
656 const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
657 uint64_t value = rte_be_to_cpu_32(*conf);
658 struct rte_flow_item item;
659 struct rte_flow_item_tcp tcp;
660 struct rte_flow_item_tcp tcp_mask;
662 memset(&tcp, 0, sizeof(tcp));
663 memset(&tcp_mask, 0, sizeof(tcp_mask));
664 if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
666 * The HW has no decrement operation, only increment operation.
667 * To simulate decrement X from Y using increment operation
668 * we need to add UINT32_MAX X times to Y.
669 * Each adding of UINT32_MAX decrements Y by 1.
672 tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
673 tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
674 item.type = RTE_FLOW_ITEM_TYPE_TCP;
676 item.mask = &tcp_mask;
677 return flow_dv_convert_modify_action(&item, modify_tcp, resource,
678 MLX5_MODIFICATION_TYPE_ADD, error);
682 * Convert modify-header increment/decrement TCP Acknowledgment number
683 * to DV specification.
685 * @param[in,out] resource
686 * Pointer to the modify-header resource.
688 * Pointer to action specification.
690 * Pointer to the error structure.
693 * 0 on success, a negative errno value otherwise and rte_errno is set.
696 flow_dv_convert_action_modify_tcp_ack
697 (struct mlx5_flow_dv_modify_hdr_resource *resource,
698 const struct rte_flow_action *action,
699 struct rte_flow_error *error)
701 const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
702 uint64_t value = rte_be_to_cpu_32(*conf);
703 struct rte_flow_item item;
704 struct rte_flow_item_tcp tcp;
705 struct rte_flow_item_tcp tcp_mask;
707 memset(&tcp, 0, sizeof(tcp));
708 memset(&tcp_mask, 0, sizeof(tcp_mask));
709 if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
711 * The HW has no decrement operation, only increment operation.
712 * To simulate decrement X from Y using increment operation
713 * we need to add UINT32_MAX X times to Y.
714 * Each adding of UINT32_MAX decrements Y by 1.
717 tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
718 tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
719 item.type = RTE_FLOW_ITEM_TYPE_TCP;
721 item.mask = &tcp_mask;
722 return flow_dv_convert_modify_action(&item, modify_tcp, resource,
723 MLX5_MODIFICATION_TYPE_ADD, error);
727 * Validate META item.
730 * Pointer to the rte_eth_dev structure.
732 * Item specification.
734 * Attributes of flow that includes this item.
736 * Pointer to error structure.
739 * 0 on success, a negative errno value otherwise and rte_errno is set.
742 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
743 const struct rte_flow_item *item,
744 const struct rte_flow_attr *attr,
745 struct rte_flow_error *error)
747 const struct rte_flow_item_meta *spec = item->spec;
748 const struct rte_flow_item_meta *mask = item->mask;
749 const struct rte_flow_item_meta nic_mask = {
750 .data = RTE_BE32(UINT32_MAX)
753 uint64_t offloads = dev->data->dev_conf.txmode.offloads;
755 if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
756 return rte_flow_error_set(error, EPERM,
757 RTE_FLOW_ERROR_TYPE_ITEM,
759 "match on metadata offload "
760 "configuration is off for this port");
762 return rte_flow_error_set(error, EINVAL,
763 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
765 "data cannot be empty");
767 return rte_flow_error_set(error, EINVAL,
768 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
770 "data cannot be zero");
772 mask = &rte_flow_item_meta_mask;
773 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
774 (const uint8_t *)&nic_mask,
775 sizeof(struct rte_flow_item_meta),
780 return rte_flow_error_set(error, ENOTSUP,
781 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
783 "pattern not supported for ingress");
788 * Validate vport item.
791 * Pointer to the rte_eth_dev structure.
793 * Item specification.
795 * Attributes of flow that includes this item.
796 * @param[in] item_flags
797 * Bit-fields that holds the items detected until now.
799 * Pointer to error structure.
802 * 0 on success, a negative errno value otherwise and rte_errno is set.
805 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
806 const struct rte_flow_item *item,
807 const struct rte_flow_attr *attr,
809 struct rte_flow_error *error)
811 const struct rte_flow_item_port_id *spec = item->spec;
812 const struct rte_flow_item_port_id *mask = item->mask;
813 const struct rte_flow_item_port_id switch_mask = {
816 uint16_t esw_domain_id;
817 uint16_t item_port_esw_domain_id;
821 return rte_flow_error_set(error, EINVAL,
822 RTE_FLOW_ERROR_TYPE_ITEM,
824 "match on port id is valid only"
825 " when transfer flag is enabled");
826 if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
827 return rte_flow_error_set(error, ENOTSUP,
828 RTE_FLOW_ERROR_TYPE_ITEM, item,
829 "multiple source ports are not"
833 if (mask->id != 0xffffffff)
834 return rte_flow_error_set(error, ENOTSUP,
835 RTE_FLOW_ERROR_TYPE_ITEM_MASK,
837 "no support for partial mask on"
839 ret = mlx5_flow_item_acceptable
840 (item, (const uint8_t *)mask,
841 (const uint8_t *)&rte_flow_item_port_id_mask,
842 sizeof(struct rte_flow_item_port_id),
848 ret = mlx5_port_to_eswitch_info(spec->id, &item_port_esw_domain_id,
851 return rte_flow_error_set(error, -ret,
852 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
853 "failed to obtain E-Switch info for"
855 ret = mlx5_port_to_eswitch_info(dev->data->port_id,
856 &esw_domain_id, NULL);
858 return rte_flow_error_set(error, -ret,
859 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
861 "failed to obtain E-Switch info");
862 if (item_port_esw_domain_id != esw_domain_id)
863 return rte_flow_error_set(error, -ret,
864 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
865 "cannot match on a port from a"
866 " different E-Switch");
871 * Validate the pop VLAN action.
874 * Pointer to the rte_eth_dev structure.
875 * @param[in] action_flags
876 * Holds the actions detected until now.
878 * Pointer to the pop vlan action.
879 * @param[in] item_flags
880 * The items found in this flow rule.
882 * Pointer to flow attributes.
884 * Pointer to error structure.
887 * 0 on success, a negative errno value otherwise and rte_errno is set.
890 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
891 uint64_t action_flags,
892 const struct rte_flow_action *action,
894 const struct rte_flow_attr *attr,
895 struct rte_flow_error *error)
897 struct mlx5_priv *priv = dev->data->dev_private;
901 if (!priv->sh->pop_vlan_action)
902 return rte_flow_error_set(error, ENOTSUP,
903 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
905 "pop vlan action is not supported");
907 * Check for inconsistencies:
908 * fail strip_vlan in a flow that matches packets without VLAN tags.
909 * fail strip_vlan in a flow that matches packets without explicitly a
910 * matching on VLAN tag ?
912 if (action_flags & MLX5_FLOW_ACTION_OF_POP_VLAN)
913 return rte_flow_error_set(error, ENOTSUP,
914 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
916 "no support for multiple vlan pop "
918 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
919 return rte_flow_error_set(error, ENOTSUP,
920 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
922 "cannot pop vlan without a "
923 "match on (outer) vlan in the flow");
928 * Get VLAN default info from vlan match info.
931 * Pointer to the rte_eth_dev structure.
933 * the list of item specifications.
935 * pointer VLAN info to fill to.
937 * Pointer to error structure.
940 * 0 on success, a negative errno value otherwise and rte_errno is set.
943 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
944 struct rte_vlan_hdr *vlan)
946 const struct rte_flow_item_vlan nic_mask = {
947 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
948 MLX5DV_FLOW_VLAN_VID_MASK),
949 .inner_type = RTE_BE16(0xffff),
954 for (; items->type != RTE_FLOW_ITEM_TYPE_END &&
955 items->type != RTE_FLOW_ITEM_TYPE_VLAN; items++)
957 if (items->type == RTE_FLOW_ITEM_TYPE_VLAN) {
958 const struct rte_flow_item_vlan *vlan_m = items->mask;
959 const struct rte_flow_item_vlan *vlan_v = items->spec;
963 /* Only full match values are accepted */
964 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
965 MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
966 vlan->vlan_tci &= MLX5DV_FLOW_VLAN_PCP_MASK;
968 rte_be_to_cpu_16(vlan_v->tci &
969 MLX5DV_FLOW_VLAN_PCP_MASK_BE);
971 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
972 MLX5DV_FLOW_VLAN_VID_MASK_BE) {
973 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
975 rte_be_to_cpu_16(vlan_v->tci &
976 MLX5DV_FLOW_VLAN_VID_MASK_BE);
978 if (vlan_m->inner_type == nic_mask.inner_type)
979 vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
985 * Validate the push VLAN action.
987 * @param[in] action_flags
988 * Holds the actions detected until now.
990 * Pointer to the encap action.
992 * Pointer to flow attributes
994 * Pointer to error structure.
997 * 0 on success, a negative errno value otherwise and rte_errno is set.
1000 flow_dv_validate_action_push_vlan(uint64_t action_flags,
1001 const struct rte_flow_action *action,
1002 const struct rte_flow_attr *attr,
1003 struct rte_flow_error *error)
1005 const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
1007 if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
1008 push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
1009 return rte_flow_error_set(error, EINVAL,
1010 RTE_FLOW_ERROR_TYPE_ACTION, action,
1011 "invalid vlan ethertype");
1013 (MLX5_FLOW_ACTION_OF_POP_VLAN | MLX5_FLOW_ACTION_OF_PUSH_VLAN))
1014 return rte_flow_error_set(error, ENOTSUP,
1015 RTE_FLOW_ERROR_TYPE_ACTION, action,
1016 "no support for multiple VLAN "
1023 * Validate the set VLAN PCP.
1025 * @param[in] action_flags
1026 * Holds the actions detected until now.
1027 * @param[in] actions
1028 * Pointer to the list of actions remaining in the flow rule.
1030 * Pointer to flow attributes
1032 * Pointer to error structure.
1035 * 0 on success, a negative errno value otherwise and rte_errno is set.
1038 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
1039 const struct rte_flow_action actions[],
1040 struct rte_flow_error *error)
1042 const struct rte_flow_action *action = actions;
1043 const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
1045 if (conf->vlan_pcp > 7)
1046 return rte_flow_error_set(error, EINVAL,
1047 RTE_FLOW_ERROR_TYPE_ACTION, action,
1048 "VLAN PCP value is too big");
1049 if (mlx5_flow_find_action(actions,
1050 RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN) == NULL)
1051 return rte_flow_error_set(error, ENOTSUP,
1052 RTE_FLOW_ERROR_TYPE_ACTION, action,
1053 "set VLAN PCP can only be used "
1054 "with push VLAN action");
1055 if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
1056 return rte_flow_error_set(error, ENOTSUP,
1057 RTE_FLOW_ERROR_TYPE_ACTION, action,
1058 "set VLAN PCP action must precede "
1059 "the push VLAN action");
1064 * Validate the set VLAN VID.
1066 * @param[in] item_flags
1067 * Holds the items detected in this rule.
1068 * @param[in] actions
1069 * Pointer to the list of actions remaining in the flow rule.
1071 * Pointer to flow attributes
1073 * Pointer to error structure.
1076 * 0 on success, a negative errno value otherwise and rte_errno is set.
1079 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
1080 const struct rte_flow_action actions[],
1081 struct rte_flow_error *error)
1083 const struct rte_flow_action *action = actions;
1084 const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
1086 if (conf->vlan_vid > RTE_BE16(0xFFE))
1087 return rte_flow_error_set(error, EINVAL,
1088 RTE_FLOW_ERROR_TYPE_ACTION, action,
1089 "VLAN VID value is too big");
1090 /* If a push VLAN action follows then it will handle this action */
1091 if (mlx5_flow_find_action(actions,
1092 RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN))
1096 * Action is on an existing VLAN header:
1097 * Need to verify this is a single modify CID action.
1098 * Rule mast include a match on outer VLAN.
1100 if (mlx5_flow_find_action(++action,
1101 RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID))
1102 return rte_flow_error_set(error, ENOTSUP,
1103 RTE_FLOW_ERROR_TYPE_ACTION, action,
1104 "Multiple VLAN VID modifications are "
1106 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
1107 return rte_flow_error_set(error, EINVAL,
1108 RTE_FLOW_ERROR_TYPE_ACTION, action,
1109 "match on VLAN is required in order "
1115 * Validate count action.
1120 * Pointer to error structure.
1123 * 0 on success, a negative errno value otherwise and rte_errno is set.
1126 flow_dv_validate_action_count(struct rte_eth_dev *dev,
1127 struct rte_flow_error *error)
1129 struct mlx5_priv *priv = dev->data->dev_private;
1131 if (!priv->config.devx)
1133 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
1137 return rte_flow_error_set
1139 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1141 "count action not supported");
1145 * Validate the L2 encap action.
1147 * @param[in] action_flags
1148 * Holds the actions detected until now.
1150 * Pointer to the encap action.
1152 * Pointer to flow attributes
1154 * Pointer to error structure.
1157 * 0 on success, a negative errno value otherwise and rte_errno is set.
1160 flow_dv_validate_action_l2_encap(uint64_t action_flags,
1161 const struct rte_flow_action *action,
1162 const struct rte_flow_attr *attr,
1163 struct rte_flow_error *error)
1165 if (!(action->conf))
1166 return rte_flow_error_set(error, EINVAL,
1167 RTE_FLOW_ERROR_TYPE_ACTION, action,
1168 "configuration cannot be null");
1169 if (action_flags & MLX5_FLOW_ACTION_DROP)
1170 return rte_flow_error_set(error, EINVAL,
1171 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1172 "can't drop and encap in same flow");
1173 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
1174 return rte_flow_error_set(error, EINVAL,
1175 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1176 "can only have a single encap or"
1177 " decap action in a flow");
1178 if (!attr->transfer && attr->ingress)
1179 return rte_flow_error_set(error, ENOTSUP,
1180 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1182 "encap action not supported for "
1188 * Validate the L2 decap action.
1190 * @param[in] action_flags
1191 * Holds the actions detected until now.
1193 * Pointer to flow attributes
1195 * Pointer to error structure.
1198 * 0 on success, a negative errno value otherwise and rte_errno is set.
1201 flow_dv_validate_action_l2_decap(uint64_t action_flags,
1202 const struct rte_flow_attr *attr,
1203 struct rte_flow_error *error)
1205 if (action_flags & MLX5_FLOW_ACTION_DROP)
1206 return rte_flow_error_set(error, EINVAL,
1207 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1208 "can't drop and decap in same flow");
1209 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
1210 return rte_flow_error_set(error, EINVAL,
1211 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1212 "can only have a single encap or"
1213 " decap action in a flow");
1214 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
1215 return rte_flow_error_set(error, EINVAL,
1216 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1217 "can't have decap action after"
1220 return rte_flow_error_set(error, ENOTSUP,
1221 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1223 "decap action not supported for "
1229 * Validate the raw encap action.
1231 * @param[in] action_flags
1232 * Holds the actions detected until now.
1234 * Pointer to the encap action.
1236 * Pointer to flow attributes
1238 * Pointer to error structure.
1241 * 0 on success, a negative errno value otherwise and rte_errno is set.
1244 flow_dv_validate_action_raw_encap(uint64_t action_flags,
1245 const struct rte_flow_action *action,
1246 const struct rte_flow_attr *attr,
1247 struct rte_flow_error *error)
1249 const struct rte_flow_action_raw_encap *raw_encap =
1250 (const struct rte_flow_action_raw_encap *)action->conf;
1251 if (!(action->conf))
1252 return rte_flow_error_set(error, EINVAL,
1253 RTE_FLOW_ERROR_TYPE_ACTION, action,
1254 "configuration cannot be null");
1255 if (action_flags & MLX5_FLOW_ACTION_DROP)
1256 return rte_flow_error_set(error, EINVAL,
1257 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1258 "can't drop and encap in same flow");
1259 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1260 return rte_flow_error_set(error, EINVAL,
1261 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1262 "can only have a single encap"
1263 " action in a flow");
1264 /* encap without preceding decap is not supported for ingress */
1265 if (!attr->transfer && attr->ingress &&
1266 !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
1267 return rte_flow_error_set(error, ENOTSUP,
1268 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1270 "encap action not supported for "
1272 if (!raw_encap->size || !raw_encap->data)
1273 return rte_flow_error_set(error, EINVAL,
1274 RTE_FLOW_ERROR_TYPE_ACTION, action,
1275 "raw encap data cannot be empty");
1280 * Validate the raw decap action.
1282 * @param[in] action_flags
1283 * Holds the actions detected until now.
1285 * Pointer to the encap action.
1287 * Pointer to flow attributes
1289 * Pointer to error structure.
1292 * 0 on success, a negative errno value otherwise and rte_errno is set.
1295 flow_dv_validate_action_raw_decap(uint64_t action_flags,
1296 const struct rte_flow_action *action,
1297 const struct rte_flow_attr *attr,
1298 struct rte_flow_error *error)
1300 if (action_flags & MLX5_FLOW_ACTION_DROP)
1301 return rte_flow_error_set(error, EINVAL,
1302 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1303 "can't drop and decap in same flow");
1304 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1305 return rte_flow_error_set(error, EINVAL,
1306 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1307 "can't have encap action before"
1309 if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
1310 return rte_flow_error_set(error, EINVAL,
1311 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1312 "can only have a single decap"
1313 " action in a flow");
1314 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
1315 return rte_flow_error_set(error, EINVAL,
1316 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1317 "can't have decap action after"
1319 /* decap action is valid on egress only if it is followed by encap */
1321 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
1322 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
1325 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
1326 return rte_flow_error_set
1328 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1329 NULL, "decap action not supported"
1336 * Find existing encap/decap resource or create and register a new one.
1338 * @param dev[in, out]
1339 * Pointer to rte_eth_dev structure.
1340 * @param[in, out] resource
1341 * Pointer to encap/decap resource.
1342 * @parm[in, out] dev_flow
1343 * Pointer to the dev_flow.
1345 * pointer to error structure.
1348 * 0 on success otherwise -errno and errno is set.
1351 flow_dv_encap_decap_resource_register
1352 (struct rte_eth_dev *dev,
1353 struct mlx5_flow_dv_encap_decap_resource *resource,
1354 struct mlx5_flow *dev_flow,
1355 struct rte_flow_error *error)
1357 struct mlx5_priv *priv = dev->data->dev_private;
1358 struct mlx5_ibv_shared *sh = priv->sh;
1359 struct mlx5_flow_dv_encap_decap_resource *cache_resource;
1360 struct rte_flow *flow = dev_flow->flow;
1361 struct mlx5dv_dr_domain *domain;
1363 resource->flags = flow->group ? 0 : 1;
1364 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
1365 domain = sh->fdb_domain;
1366 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
1367 domain = sh->rx_domain;
1369 domain = sh->tx_domain;
1371 /* Lookup a matching resource from cache. */
1372 LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
1373 if (resource->reformat_type == cache_resource->reformat_type &&
1374 resource->ft_type == cache_resource->ft_type &&
1375 resource->flags == cache_resource->flags &&
1376 resource->size == cache_resource->size &&
1377 !memcmp((const void *)resource->buf,
1378 (const void *)cache_resource->buf,
1380 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
1381 (void *)cache_resource,
1382 rte_atomic32_read(&cache_resource->refcnt));
1383 rte_atomic32_inc(&cache_resource->refcnt);
1384 dev_flow->dv.encap_decap = cache_resource;
1388 /* Register new encap/decap resource. */
1389 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1390 if (!cache_resource)
1391 return rte_flow_error_set(error, ENOMEM,
1392 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1393 "cannot allocate resource memory");
1394 *cache_resource = *resource;
1395 cache_resource->verbs_action =
1396 mlx5_glue->dv_create_flow_action_packet_reformat
1397 (sh->ctx, cache_resource->reformat_type,
1398 cache_resource->ft_type, domain, cache_resource->flags,
1399 cache_resource->size,
1400 (cache_resource->size ? cache_resource->buf : NULL));
1401 if (!cache_resource->verbs_action) {
1402 rte_free(cache_resource);
1403 return rte_flow_error_set(error, ENOMEM,
1404 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1405 NULL, "cannot create action");
1407 rte_atomic32_init(&cache_resource->refcnt);
1408 rte_atomic32_inc(&cache_resource->refcnt);
1409 LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
1410 dev_flow->dv.encap_decap = cache_resource;
1411 DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
1412 (void *)cache_resource,
1413 rte_atomic32_read(&cache_resource->refcnt));
1418 * Find existing table jump resource or create and register a new one.
1420 * @param dev[in, out]
1421 * Pointer to rte_eth_dev structure.
1422 * @param[in, out] resource
1423 * Pointer to jump table resource.
1424 * @parm[in, out] dev_flow
1425 * Pointer to the dev_flow.
1427 * pointer to error structure.
1430 * 0 on success otherwise -errno and errno is set.
1433 flow_dv_jump_tbl_resource_register
1434 (struct rte_eth_dev *dev,
1435 struct mlx5_flow_dv_jump_tbl_resource *resource,
1436 struct mlx5_flow *dev_flow,
1437 struct rte_flow_error *error)
1439 struct mlx5_priv *priv = dev->data->dev_private;
1440 struct mlx5_ibv_shared *sh = priv->sh;
1441 struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
1443 /* Lookup a matching resource from cache. */
1444 LIST_FOREACH(cache_resource, &sh->jump_tbl, next) {
1445 if (resource->tbl == cache_resource->tbl) {
1446 DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++",
1447 (void *)cache_resource,
1448 rte_atomic32_read(&cache_resource->refcnt));
1449 rte_atomic32_inc(&cache_resource->refcnt);
1450 dev_flow->dv.jump = cache_resource;
1454 /* Register new jump table resource. */
1455 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1456 if (!cache_resource)
1457 return rte_flow_error_set(error, ENOMEM,
1458 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1459 "cannot allocate resource memory");
1460 *cache_resource = *resource;
1461 cache_resource->action =
1462 mlx5_glue->dr_create_flow_action_dest_flow_tbl
1463 (resource->tbl->obj);
1464 if (!cache_resource->action) {
1465 rte_free(cache_resource);
1466 return rte_flow_error_set(error, ENOMEM,
1467 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1468 NULL, "cannot create action");
1470 rte_atomic32_init(&cache_resource->refcnt);
1471 rte_atomic32_inc(&cache_resource->refcnt);
1472 LIST_INSERT_HEAD(&sh->jump_tbl, cache_resource, next);
1473 dev_flow->dv.jump = cache_resource;
1474 DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++",
1475 (void *)cache_resource,
1476 rte_atomic32_read(&cache_resource->refcnt));
1481 * Find existing table port ID resource or create and register a new one.
1483 * @param dev[in, out]
1484 * Pointer to rte_eth_dev structure.
1485 * @param[in, out] resource
1486 * Pointer to port ID action resource.
1487 * @parm[in, out] dev_flow
1488 * Pointer to the dev_flow.
1490 * pointer to error structure.
1493 * 0 on success otherwise -errno and errno is set.
1496 flow_dv_port_id_action_resource_register
1497 (struct rte_eth_dev *dev,
1498 struct mlx5_flow_dv_port_id_action_resource *resource,
1499 struct mlx5_flow *dev_flow,
1500 struct rte_flow_error *error)
1502 struct mlx5_priv *priv = dev->data->dev_private;
1503 struct mlx5_ibv_shared *sh = priv->sh;
1504 struct mlx5_flow_dv_port_id_action_resource *cache_resource;
1506 /* Lookup a matching resource from cache. */
1507 LIST_FOREACH(cache_resource, &sh->port_id_action_list, next) {
1508 if (resource->port_id == cache_resource->port_id) {
1509 DRV_LOG(DEBUG, "port id action resource resource %p: "
1511 (void *)cache_resource,
1512 rte_atomic32_read(&cache_resource->refcnt));
1513 rte_atomic32_inc(&cache_resource->refcnt);
1514 dev_flow->dv.port_id_action = cache_resource;
1518 /* Register new port id action resource. */
1519 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1520 if (!cache_resource)
1521 return rte_flow_error_set(error, ENOMEM,
1522 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1523 "cannot allocate resource memory");
1524 *cache_resource = *resource;
1525 cache_resource->action =
1526 mlx5_glue->dr_create_flow_action_dest_vport
1527 (priv->sh->fdb_domain, resource->port_id);
1528 if (!cache_resource->action) {
1529 rte_free(cache_resource);
1530 return rte_flow_error_set(error, ENOMEM,
1531 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1532 NULL, "cannot create action");
1534 rte_atomic32_init(&cache_resource->refcnt);
1535 rte_atomic32_inc(&cache_resource->refcnt);
1536 LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
1537 dev_flow->dv.port_id_action = cache_resource;
1538 DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
1539 (void *)cache_resource,
1540 rte_atomic32_read(&cache_resource->refcnt));
1545 * Find existing push vlan resource or create and register a new one.
1547 * @param dev[in, out]
1548 * Pointer to rte_eth_dev structure.
1549 * @param[in, out] resource
1550 * Pointer to port ID action resource.
1551 * @parm[in, out] dev_flow
1552 * Pointer to the dev_flow.
1554 * pointer to error structure.
1557 * 0 on success otherwise -errno and errno is set.
1560 flow_dv_push_vlan_action_resource_register
1561 (struct rte_eth_dev *dev,
1562 struct mlx5_flow_dv_push_vlan_action_resource *resource,
1563 struct mlx5_flow *dev_flow,
1564 struct rte_flow_error *error)
1566 struct mlx5_priv *priv = dev->data->dev_private;
1567 struct mlx5_ibv_shared *sh = priv->sh;
1568 struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
1569 struct mlx5dv_dr_domain *domain;
1571 /* Lookup a matching resource from cache. */
1572 LIST_FOREACH(cache_resource, &sh->push_vlan_action_list, next) {
1573 if (resource->vlan_tag == cache_resource->vlan_tag &&
1574 resource->ft_type == cache_resource->ft_type) {
1575 DRV_LOG(DEBUG, "push-VLAN action resource resource %p: "
1577 (void *)cache_resource,
1578 rte_atomic32_read(&cache_resource->refcnt));
1579 rte_atomic32_inc(&cache_resource->refcnt);
1580 dev_flow->dv.push_vlan_res = cache_resource;
1584 /* Register new push_vlan action resource. */
1585 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1586 if (!cache_resource)
1587 return rte_flow_error_set(error, ENOMEM,
1588 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1589 "cannot allocate resource memory");
1590 *cache_resource = *resource;
1591 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
1592 domain = sh->fdb_domain;
1593 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
1594 domain = sh->rx_domain;
1596 domain = sh->tx_domain;
1597 cache_resource->action =
1598 mlx5_glue->dr_create_flow_action_push_vlan(domain,
1599 resource->vlan_tag);
1600 if (!cache_resource->action) {
1601 rte_free(cache_resource);
1602 return rte_flow_error_set(error, ENOMEM,
1603 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1604 NULL, "cannot create action");
1606 rte_atomic32_init(&cache_resource->refcnt);
1607 rte_atomic32_inc(&cache_resource->refcnt);
1608 LIST_INSERT_HEAD(&sh->push_vlan_action_list, cache_resource, next);
1609 dev_flow->dv.push_vlan_res = cache_resource;
1610 DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
1611 (void *)cache_resource,
1612 rte_atomic32_read(&cache_resource->refcnt));
1616 * Get the size of specific rte_flow_item_type
1618 * @param[in] item_type
1619 * Tested rte_flow_item_type.
1622 * sizeof struct item_type, 0 if void or irrelevant.
1625 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
1629 switch (item_type) {
1630 case RTE_FLOW_ITEM_TYPE_ETH:
1631 retval = sizeof(struct rte_flow_item_eth);
1633 case RTE_FLOW_ITEM_TYPE_VLAN:
1634 retval = sizeof(struct rte_flow_item_vlan);
1636 case RTE_FLOW_ITEM_TYPE_IPV4:
1637 retval = sizeof(struct rte_flow_item_ipv4);
1639 case RTE_FLOW_ITEM_TYPE_IPV6:
1640 retval = sizeof(struct rte_flow_item_ipv6);
1642 case RTE_FLOW_ITEM_TYPE_UDP:
1643 retval = sizeof(struct rte_flow_item_udp);
1645 case RTE_FLOW_ITEM_TYPE_TCP:
1646 retval = sizeof(struct rte_flow_item_tcp);
1648 case RTE_FLOW_ITEM_TYPE_VXLAN:
1649 retval = sizeof(struct rte_flow_item_vxlan);
1651 case RTE_FLOW_ITEM_TYPE_GRE:
1652 retval = sizeof(struct rte_flow_item_gre);
1654 case RTE_FLOW_ITEM_TYPE_NVGRE:
1655 retval = sizeof(struct rte_flow_item_nvgre);
1657 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1658 retval = sizeof(struct rte_flow_item_vxlan_gpe);
1660 case RTE_FLOW_ITEM_TYPE_MPLS:
1661 retval = sizeof(struct rte_flow_item_mpls);
1663 case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
1671 #define MLX5_ENCAP_IPV4_VERSION 0x40
1672 #define MLX5_ENCAP_IPV4_IHL_MIN 0x05
1673 #define MLX5_ENCAP_IPV4_TTL_DEF 0x40
1674 #define MLX5_ENCAP_IPV6_VTC_FLOW 0x60000000
1675 #define MLX5_ENCAP_IPV6_HOP_LIMIT 0xff
1676 #define MLX5_ENCAP_VXLAN_FLAGS 0x08000000
1677 #define MLX5_ENCAP_VXLAN_GPE_FLAGS 0x04
1680 * Convert the encap action data from list of rte_flow_item to raw buffer
1683 * Pointer to rte_flow_item objects list.
1685 * Pointer to the output buffer.
1687 * Pointer to the output buffer size.
1689 * Pointer to the error structure.
1692 * 0 on success, a negative errno value otherwise and rte_errno is set.
1695 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
1696 size_t *size, struct rte_flow_error *error)
1698 struct rte_ether_hdr *eth = NULL;
1699 struct rte_vlan_hdr *vlan = NULL;
1700 struct rte_ipv4_hdr *ipv4 = NULL;
1701 struct rte_ipv6_hdr *ipv6 = NULL;
1702 struct rte_udp_hdr *udp = NULL;
1703 struct rte_vxlan_hdr *vxlan = NULL;
1704 struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
1705 struct rte_gre_hdr *gre = NULL;
1707 size_t temp_size = 0;
1710 return rte_flow_error_set(error, EINVAL,
1711 RTE_FLOW_ERROR_TYPE_ACTION,
1712 NULL, "invalid empty data");
1713 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1714 len = flow_dv_get_item_len(items->type);
1715 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
1716 return rte_flow_error_set(error, EINVAL,
1717 RTE_FLOW_ERROR_TYPE_ACTION,
1718 (void *)items->type,
1719 "items total size is too big"
1720 " for encap action");
1721 rte_memcpy((void *)&buf[temp_size], items->spec, len);
1722 switch (items->type) {
1723 case RTE_FLOW_ITEM_TYPE_ETH:
1724 eth = (struct rte_ether_hdr *)&buf[temp_size];
1726 case RTE_FLOW_ITEM_TYPE_VLAN:
1727 vlan = (struct rte_vlan_hdr *)&buf[temp_size];
1729 return rte_flow_error_set(error, EINVAL,
1730 RTE_FLOW_ERROR_TYPE_ACTION,
1731 (void *)items->type,
1732 "eth header not found");
1733 if (!eth->ether_type)
1734 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
1736 case RTE_FLOW_ITEM_TYPE_IPV4:
1737 ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
1739 return rte_flow_error_set(error, EINVAL,
1740 RTE_FLOW_ERROR_TYPE_ACTION,
1741 (void *)items->type,
1742 "neither eth nor vlan"
1744 if (vlan && !vlan->eth_proto)
1745 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1746 else if (eth && !eth->ether_type)
1747 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1748 if (!ipv4->version_ihl)
1749 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
1750 MLX5_ENCAP_IPV4_IHL_MIN;
1751 if (!ipv4->time_to_live)
1752 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
1754 case RTE_FLOW_ITEM_TYPE_IPV6:
1755 ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
1757 return rte_flow_error_set(error, EINVAL,
1758 RTE_FLOW_ERROR_TYPE_ACTION,
1759 (void *)items->type,
1760 "neither eth nor vlan"
1762 if (vlan && !vlan->eth_proto)
1763 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1764 else if (eth && !eth->ether_type)
1765 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1766 if (!ipv6->vtc_flow)
1768 RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
1769 if (!ipv6->hop_limits)
1770 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
1772 case RTE_FLOW_ITEM_TYPE_UDP:
1773 udp = (struct rte_udp_hdr *)&buf[temp_size];
1775 return rte_flow_error_set(error, EINVAL,
1776 RTE_FLOW_ERROR_TYPE_ACTION,
1777 (void *)items->type,
1778 "ip header not found");
1779 if (ipv4 && !ipv4->next_proto_id)
1780 ipv4->next_proto_id = IPPROTO_UDP;
1781 else if (ipv6 && !ipv6->proto)
1782 ipv6->proto = IPPROTO_UDP;
1784 case RTE_FLOW_ITEM_TYPE_VXLAN:
1785 vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
1787 return rte_flow_error_set(error, EINVAL,
1788 RTE_FLOW_ERROR_TYPE_ACTION,
1789 (void *)items->type,
1790 "udp header not found");
1792 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
1793 if (!vxlan->vx_flags)
1795 RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
1797 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1798 vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
1800 return rte_flow_error_set(error, EINVAL,
1801 RTE_FLOW_ERROR_TYPE_ACTION,
1802 (void *)items->type,
1803 "udp header not found");
1804 if (!vxlan_gpe->proto)
1805 return rte_flow_error_set(error, EINVAL,
1806 RTE_FLOW_ERROR_TYPE_ACTION,
1807 (void *)items->type,
1808 "next protocol not found");
1811 RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
1812 if (!vxlan_gpe->vx_flags)
1813 vxlan_gpe->vx_flags =
1814 MLX5_ENCAP_VXLAN_GPE_FLAGS;
1816 case RTE_FLOW_ITEM_TYPE_GRE:
1817 case RTE_FLOW_ITEM_TYPE_NVGRE:
1818 gre = (struct rte_gre_hdr *)&buf[temp_size];
1820 return rte_flow_error_set(error, EINVAL,
1821 RTE_FLOW_ERROR_TYPE_ACTION,
1822 (void *)items->type,
1823 "next protocol not found");
1825 return rte_flow_error_set(error, EINVAL,
1826 RTE_FLOW_ERROR_TYPE_ACTION,
1827 (void *)items->type,
1828 "ip header not found");
1829 if (ipv4 && !ipv4->next_proto_id)
1830 ipv4->next_proto_id = IPPROTO_GRE;
1831 else if (ipv6 && !ipv6->proto)
1832 ipv6->proto = IPPROTO_GRE;
1834 case RTE_FLOW_ITEM_TYPE_VOID:
1837 return rte_flow_error_set(error, EINVAL,
1838 RTE_FLOW_ERROR_TYPE_ACTION,
1839 (void *)items->type,
1840 "unsupported item type");
1850 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
1852 struct rte_ether_hdr *eth = NULL;
1853 struct rte_vlan_hdr *vlan = NULL;
1854 struct rte_ipv6_hdr *ipv6 = NULL;
1855 struct rte_udp_hdr *udp = NULL;
1859 eth = (struct rte_ether_hdr *)data;
1860 next_hdr = (char *)(eth + 1);
1861 proto = RTE_BE16(eth->ether_type);
1864 while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
1865 vlan = (struct rte_vlan_hdr *)next_hdr;
1866 proto = RTE_BE16(vlan->eth_proto);
1867 next_hdr += sizeof(struct rte_vlan_hdr);
1870 /* HW calculates IPv4 csum. no need to proceed */
1871 if (proto == RTE_ETHER_TYPE_IPV4)
1874 /* non IPv4/IPv6 header. not supported */
1875 if (proto != RTE_ETHER_TYPE_IPV6) {
1876 return rte_flow_error_set(error, ENOTSUP,
1877 RTE_FLOW_ERROR_TYPE_ACTION,
1878 NULL, "Cannot offload non IPv4/IPv6");
1881 ipv6 = (struct rte_ipv6_hdr *)next_hdr;
1883 /* ignore non UDP */
1884 if (ipv6->proto != IPPROTO_UDP)
1887 udp = (struct rte_udp_hdr *)(ipv6 + 1);
1888 udp->dgram_cksum = 0;
1894 * Convert L2 encap action to DV specification.
1897 * Pointer to rte_eth_dev structure.
1899 * Pointer to action structure.
1900 * @param[in, out] dev_flow
1901 * Pointer to the mlx5_flow.
1902 * @param[in] transfer
1903 * Mark if the flow is E-Switch flow.
1905 * Pointer to the error structure.
1908 * 0 on success, a negative errno value otherwise and rte_errno is set.
1911 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
1912 const struct rte_flow_action *action,
1913 struct mlx5_flow *dev_flow,
1915 struct rte_flow_error *error)
1917 const struct rte_flow_item *encap_data;
1918 const struct rte_flow_action_raw_encap *raw_encap_data;
1919 struct mlx5_flow_dv_encap_decap_resource res = {
1921 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
1922 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
1923 MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
1926 if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
1928 (const struct rte_flow_action_raw_encap *)action->conf;
1929 res.size = raw_encap_data->size;
1930 memcpy(res.buf, raw_encap_data->data, res.size);
1931 if (flow_dv_zero_encap_udp_csum(res.buf, error))
1934 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
1936 ((const struct rte_flow_action_vxlan_encap *)
1937 action->conf)->definition;
1940 ((const struct rte_flow_action_nvgre_encap *)
1941 action->conf)->definition;
1942 if (flow_dv_convert_encap_data(encap_data, res.buf,
1946 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1947 return rte_flow_error_set(error, EINVAL,
1948 RTE_FLOW_ERROR_TYPE_ACTION,
1949 NULL, "can't create L2 encap action");
1954 * Convert L2 decap action to DV specification.
1957 * Pointer to rte_eth_dev structure.
1958 * @param[in, out] dev_flow
1959 * Pointer to the mlx5_flow.
1960 * @param[in] transfer
1961 * Mark if the flow is E-Switch flow.
1963 * Pointer to the error structure.
1966 * 0 on success, a negative errno value otherwise and rte_errno is set.
1969 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
1970 struct mlx5_flow *dev_flow,
1972 struct rte_flow_error *error)
1974 struct mlx5_flow_dv_encap_decap_resource res = {
1977 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
1978 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
1979 MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
1982 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1983 return rte_flow_error_set(error, EINVAL,
1984 RTE_FLOW_ERROR_TYPE_ACTION,
1985 NULL, "can't create L2 decap action");
1990 * Convert raw decap/encap (L3 tunnel) action to DV specification.
1993 * Pointer to rte_eth_dev structure.
1995 * Pointer to action structure.
1996 * @param[in, out] dev_flow
1997 * Pointer to the mlx5_flow.
1999 * Pointer to the flow attributes.
2001 * Pointer to the error structure.
2004 * 0 on success, a negative errno value otherwise and rte_errno is set.
2007 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
2008 const struct rte_flow_action *action,
2009 struct mlx5_flow *dev_flow,
2010 const struct rte_flow_attr *attr,
2011 struct rte_flow_error *error)
2013 const struct rte_flow_action_raw_encap *encap_data;
2014 struct mlx5_flow_dv_encap_decap_resource res;
2016 encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
2017 res.size = encap_data->size;
2018 memcpy(res.buf, encap_data->data, res.size);
2019 res.reformat_type = attr->egress ?
2020 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
2021 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
2023 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
2025 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
2026 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
2027 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
2028 return rte_flow_error_set(error, EINVAL,
2029 RTE_FLOW_ERROR_TYPE_ACTION,
2030 NULL, "can't create encap action");
2035 * Create action push VLAN.
2038 * Pointer to rte_eth_dev structure.
2039 * @param[in] vlan_tag
2040 * the vlan tag to push to the Ethernet header.
2041 * @param[in, out] dev_flow
2042 * Pointer to the mlx5_flow.
2044 * Pointer to the flow attributes.
2046 * Pointer to the error structure.
2049 * 0 on success, a negative errno value otherwise and rte_errno is set.
2052 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
2053 const struct rte_flow_attr *attr,
2054 const struct rte_vlan_hdr *vlan,
2055 struct mlx5_flow *dev_flow,
2056 struct rte_flow_error *error)
2058 struct mlx5_flow_dv_push_vlan_action_resource res;
2061 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
2064 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
2066 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
2067 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
2068 return flow_dv_push_vlan_action_resource_register
2069 (dev, &res, dev_flow, error);
2073 * Validate the modify-header actions.
2075 * @param[in] action_flags
2076 * Holds the actions detected until now.
2078 * Pointer to the modify action.
2080 * Pointer to error structure.
2083 * 0 on success, a negative errno value otherwise and rte_errno is set.
2086 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
2087 const struct rte_flow_action *action,
2088 struct rte_flow_error *error)
2090 if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
2091 return rte_flow_error_set(error, EINVAL,
2092 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2093 NULL, "action configuration not set");
2094 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
2095 return rte_flow_error_set(error, EINVAL,
2096 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2097 "can't have encap action before"
2103 * Validate the modify-header MAC address actions.
2105 * @param[in] action_flags
2106 * Holds the actions detected until now.
2108 * Pointer to the modify action.
2109 * @param[in] item_flags
2110 * Holds the items detected.
2112 * Pointer to error structure.
2115 * 0 on success, a negative errno value otherwise and rte_errno is set.
2118 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
2119 const struct rte_flow_action *action,
2120 const uint64_t item_flags,
2121 struct rte_flow_error *error)
2125 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2127 if (!(item_flags & MLX5_FLOW_LAYER_L2))
2128 return rte_flow_error_set(error, EINVAL,
2129 RTE_FLOW_ERROR_TYPE_ACTION,
2131 "no L2 item in pattern");
2137 * Validate the modify-header IPv4 address actions.
2139 * @param[in] action_flags
2140 * Holds the actions detected until now.
2142 * Pointer to the modify action.
2143 * @param[in] item_flags
2144 * Holds the items detected.
2146 * Pointer to error structure.
2149 * 0 on success, a negative errno value otherwise and rte_errno is set.
2152 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
2153 const struct rte_flow_action *action,
2154 const uint64_t item_flags,
2155 struct rte_flow_error *error)
2159 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2161 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
2162 return rte_flow_error_set(error, EINVAL,
2163 RTE_FLOW_ERROR_TYPE_ACTION,
2165 "no ipv4 item in pattern");
2171 * Validate the modify-header IPv6 address actions.
2173 * @param[in] action_flags
2174 * Holds the actions detected until now.
2176 * Pointer to the modify action.
2177 * @param[in] item_flags
2178 * Holds the items detected.
2180 * Pointer to error structure.
2183 * 0 on success, a negative errno value otherwise and rte_errno is set.
2186 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
2187 const struct rte_flow_action *action,
2188 const uint64_t item_flags,
2189 struct rte_flow_error *error)
2193 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2195 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
2196 return rte_flow_error_set(error, EINVAL,
2197 RTE_FLOW_ERROR_TYPE_ACTION,
2199 "no ipv6 item in pattern");
2205 * Validate the modify-header TP actions.
2207 * @param[in] action_flags
2208 * Holds the actions detected until now.
2210 * Pointer to the modify action.
2211 * @param[in] item_flags
2212 * Holds the items detected.
2214 * Pointer to error structure.
2217 * 0 on success, a negative errno value otherwise and rte_errno is set.
2220 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
2221 const struct rte_flow_action *action,
2222 const uint64_t item_flags,
2223 struct rte_flow_error *error)
2227 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2229 if (!(item_flags & MLX5_FLOW_LAYER_L4))
2230 return rte_flow_error_set(error, EINVAL,
2231 RTE_FLOW_ERROR_TYPE_ACTION,
2232 NULL, "no transport layer "
2239 * Validate the modify-header actions of increment/decrement
2240 * TCP Sequence-number.
2242 * @param[in] action_flags
2243 * Holds the actions detected until now.
2245 * Pointer to the modify action.
2246 * @param[in] item_flags
2247 * Holds the items detected.
2249 * Pointer to error structure.
2252 * 0 on success, a negative errno value otherwise and rte_errno is set.
2255 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
2256 const struct rte_flow_action *action,
2257 const uint64_t item_flags,
2258 struct rte_flow_error *error)
2262 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2264 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
2265 return rte_flow_error_set(error, EINVAL,
2266 RTE_FLOW_ERROR_TYPE_ACTION,
2267 NULL, "no TCP item in"
2269 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
2270 (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
2271 (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
2272 (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
2273 return rte_flow_error_set(error, EINVAL,
2274 RTE_FLOW_ERROR_TYPE_ACTION,
2276 "cannot decrease and increase"
2277 " TCP sequence number"
2278 " at the same time");
2284 * Validate the modify-header actions of increment/decrement
2285 * TCP Acknowledgment number.
2287 * @param[in] action_flags
2288 * Holds the actions detected until now.
2290 * Pointer to the modify action.
2291 * @param[in] item_flags
2292 * Holds the items detected.
2294 * Pointer to error structure.
2297 * 0 on success, a negative errno value otherwise and rte_errno is set.
2300 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
2301 const struct rte_flow_action *action,
2302 const uint64_t item_flags,
2303 struct rte_flow_error *error)
2307 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2309 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
2310 return rte_flow_error_set(error, EINVAL,
2311 RTE_FLOW_ERROR_TYPE_ACTION,
2312 NULL, "no TCP item in"
2314 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
2315 (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
2316 (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
2317 (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
2318 return rte_flow_error_set(error, EINVAL,
2319 RTE_FLOW_ERROR_TYPE_ACTION,
2321 "cannot decrease and increase"
2322 " TCP acknowledgment number"
2323 " at the same time");
2329 * Validate the modify-header TTL actions.
2331 * @param[in] action_flags
2332 * Holds the actions detected until now.
2334 * Pointer to the modify action.
2335 * @param[in] item_flags
2336 * Holds the items detected.
2338 * Pointer to error structure.
2341 * 0 on success, a negative errno value otherwise and rte_errno is set.
2344 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
2345 const struct rte_flow_action *action,
2346 const uint64_t item_flags,
2347 struct rte_flow_error *error)
2351 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2353 if (!(item_flags & MLX5_FLOW_LAYER_L3))
2354 return rte_flow_error_set(error, EINVAL,
2355 RTE_FLOW_ERROR_TYPE_ACTION,
2357 "no IP protocol in pattern");
2363 * Validate jump action.
2366 * Pointer to the jump action.
2367 * @param[in] action_flags
2368 * Holds the actions detected until now.
2369 * @param[in] attributes
2370 * Pointer to flow attributes
2371 * @param[in] external
2372 * Action belongs to flow rule created by request external to PMD.
2374 * Pointer to error structure.
2377 * 0 on success, a negative errno value otherwise and rte_errno is set.
2380 flow_dv_validate_action_jump(const struct rte_flow_action *action,
2381 uint64_t action_flags,
2382 const struct rte_flow_attr *attributes,
2383 bool external, struct rte_flow_error *error)
2385 uint32_t max_group = attributes->transfer ? MLX5_MAX_TABLES_FDB :
2387 uint32_t target_group, table;
2390 if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
2391 MLX5_FLOW_FATE_ESWITCH_ACTIONS))
2392 return rte_flow_error_set(error, EINVAL,
2393 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2394 "can't have 2 fate actions in"
2397 return rte_flow_error_set(error, EINVAL,
2398 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2399 NULL, "action configuration not set");
2401 ((const struct rte_flow_action_jump *)action->conf)->group;
2402 ret = mlx5_flow_group_to_table(attributes, external, target_group,
2406 if (table >= max_group)
2407 return rte_flow_error_set(error, EINVAL,
2408 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
2409 "target group index out of range");
2410 if (attributes->group >= target_group)
2411 return rte_flow_error_set(error, EINVAL,
2412 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2413 "target group must be higher than"
2414 " the current flow group");
2419 * Validate the port_id action.
2422 * Pointer to rte_eth_dev structure.
2423 * @param[in] action_flags
2424 * Bit-fields that holds the actions detected until now.
2426 * Port_id RTE action structure.
2428 * Attributes of flow that includes this action.
2430 * Pointer to error structure.
2433 * 0 on success, a negative errno value otherwise and rte_errno is set.
2436 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
2437 uint64_t action_flags,
2438 const struct rte_flow_action *action,
2439 const struct rte_flow_attr *attr,
2440 struct rte_flow_error *error)
2442 const struct rte_flow_action_port_id *port_id;
2444 uint16_t esw_domain_id;
2445 uint16_t act_port_domain_id;
2448 if (!attr->transfer)
2449 return rte_flow_error_set(error, ENOTSUP,
2450 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2452 "port id action is valid in transfer"
2454 if (!action || !action->conf)
2455 return rte_flow_error_set(error, ENOTSUP,
2456 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2458 "port id action parameters must be"
2460 if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
2461 MLX5_FLOW_FATE_ESWITCH_ACTIONS))
2462 return rte_flow_error_set(error, EINVAL,
2463 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2464 "can have only one fate actions in"
2466 ret = mlx5_port_to_eswitch_info(dev->data->port_id,
2467 &esw_domain_id, NULL);
2469 return rte_flow_error_set(error, -ret,
2470 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2472 "failed to obtain E-Switch info");
2473 port_id = action->conf;
2474 port = port_id->original ? dev->data->port_id : port_id->id;
2475 ret = mlx5_port_to_eswitch_info(port, &act_port_domain_id, NULL);
2477 return rte_flow_error_set
2479 RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
2480 "failed to obtain E-Switch port id for port");
2481 if (act_port_domain_id != esw_domain_id)
2482 return rte_flow_error_set
2484 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2485 "port does not belong to"
2486 " E-Switch being configured");
2491 * Find existing modify-header resource or create and register a new one.
2493 * @param dev[in, out]
2494 * Pointer to rte_eth_dev structure.
2495 * @param[in, out] resource
2496 * Pointer to modify-header resource.
2497 * @parm[in, out] dev_flow
2498 * Pointer to the dev_flow.
2500 * pointer to error structure.
2503 * 0 on success otherwise -errno and errno is set.
2506 flow_dv_modify_hdr_resource_register
2507 (struct rte_eth_dev *dev,
2508 struct mlx5_flow_dv_modify_hdr_resource *resource,
2509 struct mlx5_flow *dev_flow,
2510 struct rte_flow_error *error)
2512 struct mlx5_priv *priv = dev->data->dev_private;
2513 struct mlx5_ibv_shared *sh = priv->sh;
2514 struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
2515 struct mlx5dv_dr_domain *ns;
2517 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2518 ns = sh->fdb_domain;
2519 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
2524 dev_flow->flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
2525 /* Lookup a matching resource from cache. */
2526 LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
2527 if (resource->ft_type == cache_resource->ft_type &&
2528 resource->actions_num == cache_resource->actions_num &&
2529 resource->flags == cache_resource->flags &&
2530 !memcmp((const void *)resource->actions,
2531 (const void *)cache_resource->actions,
2532 (resource->actions_num *
2533 sizeof(resource->actions[0])))) {
2534 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
2535 (void *)cache_resource,
2536 rte_atomic32_read(&cache_resource->refcnt));
2537 rte_atomic32_inc(&cache_resource->refcnt);
2538 dev_flow->dv.modify_hdr = cache_resource;
2542 /* Register new modify-header resource. */
2543 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
2544 if (!cache_resource)
2545 return rte_flow_error_set(error, ENOMEM,
2546 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2547 "cannot allocate resource memory");
2548 *cache_resource = *resource;
2549 cache_resource->verbs_action =
2550 mlx5_glue->dv_create_flow_action_modify_header
2551 (sh->ctx, cache_resource->ft_type,
2552 ns, cache_resource->flags,
2553 cache_resource->actions_num *
2554 sizeof(cache_resource->actions[0]),
2555 (uint64_t *)cache_resource->actions);
2556 if (!cache_resource->verbs_action) {
2557 rte_free(cache_resource);
2558 return rte_flow_error_set(error, ENOMEM,
2559 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2560 NULL, "cannot create action");
2562 rte_atomic32_init(&cache_resource->refcnt);
2563 rte_atomic32_inc(&cache_resource->refcnt);
2564 LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
2565 dev_flow->dv.modify_hdr = cache_resource;
2566 DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
2567 (void *)cache_resource,
2568 rte_atomic32_read(&cache_resource->refcnt));
2572 #define MLX5_CNT_CONTAINER_RESIZE 64
2575 * Get or create a flow counter.
2578 * Pointer to the Ethernet device structure.
2580 * Indicate if this counter is shared with other flows.
2582 * Counter identifier.
2585 * pointer to flow counter on success, NULL otherwise and rte_errno is set.
2587 static struct mlx5_flow_counter *
2588 flow_dv_counter_alloc_fallback(struct rte_eth_dev *dev, uint32_t shared,
2591 struct mlx5_priv *priv = dev->data->dev_private;
2592 struct mlx5_flow_counter *cnt = NULL;
2593 struct mlx5_devx_obj *dcs = NULL;
2595 if (!priv->config.devx) {
2596 rte_errno = ENOTSUP;
2600 TAILQ_FOREACH(cnt, &priv->sh->cmng.flow_counters, next) {
2601 if (cnt->shared && cnt->id == id) {
2607 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
2610 cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
2612 claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
2616 struct mlx5_flow_counter tmpl = {
2622 tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
2624 claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
2630 TAILQ_INSERT_HEAD(&priv->sh->cmng.flow_counters, cnt, next);
2635 * Release a flow counter.
2638 * Pointer to the Ethernet device structure.
2639 * @param[in] counter
2640 * Pointer to the counter handler.
2643 flow_dv_counter_release_fallback(struct rte_eth_dev *dev,
2644 struct mlx5_flow_counter *counter)
2646 struct mlx5_priv *priv = dev->data->dev_private;
2650 if (--counter->ref_cnt == 0) {
2651 TAILQ_REMOVE(&priv->sh->cmng.flow_counters, counter, next);
2652 claim_zero(mlx5_devx_cmd_destroy(counter->dcs));
2658 * Query a devx flow counter.
2661 * Pointer to the Ethernet device structure.
2663 * Pointer to the flow counter.
2665 * The statistics value of packets.
2667 * The statistics value of bytes.
2670 * 0 on success, otherwise a negative errno value and rte_errno is set.
2673 _flow_dv_query_count_fallback(struct rte_eth_dev *dev __rte_unused,
2674 struct mlx5_flow_counter *cnt, uint64_t *pkts,
2677 return mlx5_devx_cmd_flow_counter_query(cnt->dcs, 0, 0, pkts, bytes,
2682 * Get a pool by a counter.
2685 * Pointer to the counter.
2690 static struct mlx5_flow_counter_pool *
2691 flow_dv_counter_pool_get(struct mlx5_flow_counter *cnt)
2694 cnt -= cnt->dcs->id % MLX5_COUNTERS_PER_POOL;
2695 return (struct mlx5_flow_counter_pool *)cnt - 1;
2701 * Get a pool by devx counter ID.
2704 * Pointer to the counter container.
2706 * The counter devx ID.
2709 * The counter pool pointer if exists, NULL otherwise,
2711 static struct mlx5_flow_counter_pool *
2712 flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id)
2714 struct mlx5_flow_counter_pool *pool;
2716 TAILQ_FOREACH(pool, &cont->pool_list, next) {
2717 int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
2718 MLX5_COUNTERS_PER_POOL;
2720 if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
2727 * Allocate a new memory for the counter values wrapped by all the needed
2731 * Pointer to the Ethernet device structure.
2733 * The raw memory areas - each one for MLX5_COUNTERS_PER_POOL counters.
2736 * The new memory management pointer on success, otherwise NULL and rte_errno
2739 static struct mlx5_counter_stats_mem_mng *
2740 flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n)
2742 struct mlx5_ibv_shared *sh = ((struct mlx5_priv *)
2743 (dev->data->dev_private))->sh;
2744 struct mlx5_devx_mkey_attr mkey_attr;
2745 struct mlx5_counter_stats_mem_mng *mem_mng;
2746 volatile struct flow_counter_stats *raw_data;
2747 int size = (sizeof(struct flow_counter_stats) *
2748 MLX5_COUNTERS_PER_POOL +
2749 sizeof(struct mlx5_counter_stats_raw)) * raws_n +
2750 sizeof(struct mlx5_counter_stats_mem_mng);
2751 uint8_t *mem = rte_calloc(__func__, 1, size, sysconf(_SC_PAGESIZE));
2758 mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
2759 size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
2760 mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size,
2761 IBV_ACCESS_LOCAL_WRITE);
2762 if (!mem_mng->umem) {
2767 mkey_attr.addr = (uintptr_t)mem;
2768 mkey_attr.size = size;
2769 mkey_attr.umem_id = mem_mng->umem->umem_id;
2770 mkey_attr.pd = sh->pdn;
2771 mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
2773 mlx5_glue->devx_umem_dereg(mem_mng->umem);
2778 mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
2779 raw_data = (volatile struct flow_counter_stats *)mem;
2780 for (i = 0; i < raws_n; ++i) {
2781 mem_mng->raws[i].mem_mng = mem_mng;
2782 mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
2784 LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
2789 * Resize a counter container.
2792 * Pointer to the Ethernet device structure.
2794 * Whether the pool is for counter that was allocated by batch command.
2797 * The new container pointer on success, otherwise NULL and rte_errno is set.
2799 static struct mlx5_pools_container *
2800 flow_dv_container_resize(struct rte_eth_dev *dev, uint32_t batch)
2802 struct mlx5_priv *priv = dev->data->dev_private;
2803 struct mlx5_pools_container *cont =
2804 MLX5_CNT_CONTAINER(priv->sh, batch, 0);
2805 struct mlx5_pools_container *new_cont =
2806 MLX5_CNT_CONTAINER_UNUSED(priv->sh, batch, 0);
2807 struct mlx5_counter_stats_mem_mng *mem_mng;
2808 uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE;
2809 uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
2812 if (cont != MLX5_CNT_CONTAINER(priv->sh, batch, 1)) {
2813 /* The last resize still hasn't detected by the host thread. */
2817 new_cont->pools = rte_calloc(__func__, 1, mem_size, 0);
2818 if (!new_cont->pools) {
2823 memcpy(new_cont->pools, cont->pools, cont->n *
2824 sizeof(struct mlx5_flow_counter_pool *));
2825 mem_mng = flow_dv_create_counter_stat_mem_mng(dev,
2826 MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES);
2828 rte_free(new_cont->pools);
2831 for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
2832 LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws,
2833 mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE +
2835 new_cont->n = resize;
2836 rte_atomic16_set(&new_cont->n_valid, rte_atomic16_read(&cont->n_valid));
2837 TAILQ_INIT(&new_cont->pool_list);
2838 TAILQ_CONCAT(&new_cont->pool_list, &cont->pool_list, next);
2839 new_cont->init_mem_mng = mem_mng;
2841 /* Flip the master container. */
2842 priv->sh->cmng.mhi[batch] ^= (uint8_t)1;
2847 * Query a devx flow counter.
2850 * Pointer to the Ethernet device structure.
2852 * Pointer to the flow counter.
2854 * The statistics value of packets.
2856 * The statistics value of bytes.
2859 * 0 on success, otherwise a negative errno value and rte_errno is set.
2862 _flow_dv_query_count(struct rte_eth_dev *dev,
2863 struct mlx5_flow_counter *cnt, uint64_t *pkts,
2866 struct mlx5_priv *priv = dev->data->dev_private;
2867 struct mlx5_flow_counter_pool *pool =
2868 flow_dv_counter_pool_get(cnt);
2869 int offset = cnt - &pool->counters_raw[0];
2871 if (priv->counter_fallback)
2872 return _flow_dv_query_count_fallback(dev, cnt, pkts, bytes);
2874 rte_spinlock_lock(&pool->sl);
2876 * The single counters allocation may allocate smaller ID than the
2877 * current allocated in parallel to the host reading.
2878 * In this case the new counter values must be reported as 0.
2880 if (unlikely(!cnt->batch && cnt->dcs->id < pool->raw->min_dcs_id)) {
2884 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
2885 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
2887 rte_spinlock_unlock(&pool->sl);
2892 * Create and initialize a new counter pool.
2895 * Pointer to the Ethernet device structure.
2897 * The devX counter handle.
2899 * Whether the pool is for counter that was allocated by batch command.
2902 * A new pool pointer on success, NULL otherwise and rte_errno is set.
2904 static struct mlx5_flow_counter_pool *
2905 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
2908 struct mlx5_priv *priv = dev->data->dev_private;
2909 struct mlx5_flow_counter_pool *pool;
2910 struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
2912 int16_t n_valid = rte_atomic16_read(&cont->n_valid);
2915 if (cont->n == n_valid) {
2916 cont = flow_dv_container_resize(dev, batch);
2920 size = sizeof(*pool) + MLX5_COUNTERS_PER_POOL *
2921 sizeof(struct mlx5_flow_counter);
2922 pool = rte_calloc(__func__, 1, size, 0);
2927 pool->min_dcs = dcs;
2928 pool->raw = cont->init_mem_mng->raws + n_valid %
2929 MLX5_CNT_CONTAINER_RESIZE;
2930 pool->raw_hw = NULL;
2931 rte_spinlock_init(&pool->sl);
2933 * The generation of the new allocated counters in this pool is 0, 2 in
2934 * the pool generation makes all the counters valid for allocation.
2936 rte_atomic64_set(&pool->query_gen, 0x2);
2937 TAILQ_INIT(&pool->counters);
2938 TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
2939 cont->pools[n_valid] = pool;
2940 /* Pool initialization must be updated before host thread access. */
2942 rte_atomic16_add(&cont->n_valid, 1);
2947 * Prepare a new counter and/or a new counter pool.
2950 * Pointer to the Ethernet device structure.
2951 * @param[out] cnt_free
2952 * Where to put the pointer of a new counter.
2954 * Whether the pool is for counter that was allocated by batch command.
2957 * The free counter pool pointer and @p cnt_free is set on success,
2958 * NULL otherwise and rte_errno is set.
2960 static struct mlx5_flow_counter_pool *
2961 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
2962 struct mlx5_flow_counter **cnt_free,
2965 struct mlx5_priv *priv = dev->data->dev_private;
2966 struct mlx5_flow_counter_pool *pool;
2967 struct mlx5_devx_obj *dcs = NULL;
2968 struct mlx5_flow_counter *cnt;
2972 /* bulk_bitmap must be 0 for single counter allocation. */
2973 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
2976 pool = flow_dv_find_pool_by_id
2977 (MLX5_CNT_CONTAINER(priv->sh, batch, 0), dcs->id);
2979 pool = flow_dv_pool_create(dev, dcs, batch);
2981 mlx5_devx_cmd_destroy(dcs);
2984 } else if (dcs->id < pool->min_dcs->id) {
2985 rte_atomic64_set(&pool->a64_dcs,
2986 (int64_t)(uintptr_t)dcs);
2988 cnt = &pool->counters_raw[dcs->id % MLX5_COUNTERS_PER_POOL];
2989 TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
2994 /* bulk_bitmap is in 128 counters units. */
2995 if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4)
2996 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
2998 rte_errno = ENODATA;
3001 pool = flow_dv_pool_create(dev, dcs, batch);
3003 mlx5_devx_cmd_destroy(dcs);
3006 for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
3007 cnt = &pool->counters_raw[i];
3009 TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
3011 *cnt_free = &pool->counters_raw[0];
3016 * Search for existed shared counter.
3019 * Pointer to the relevant counter pool container.
3021 * The shared counter ID to search.
3024 * NULL if not existed, otherwise pointer to the shared counter.
3026 static struct mlx5_flow_counter *
3027 flow_dv_counter_shared_search(struct mlx5_pools_container *cont,
3030 static struct mlx5_flow_counter *cnt;
3031 struct mlx5_flow_counter_pool *pool;
3034 TAILQ_FOREACH(pool, &cont->pool_list, next) {
3035 for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
3036 cnt = &pool->counters_raw[i];
3037 if (cnt->ref_cnt && cnt->shared && cnt->id == id)
3045 * Allocate a flow counter.
3048 * Pointer to the Ethernet device structure.
3050 * Indicate if this counter is shared with other flows.
3052 * Counter identifier.
3054 * Counter flow group.
3057 * pointer to flow counter on success, NULL otherwise and rte_errno is set.
3059 static struct mlx5_flow_counter *
3060 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
3063 struct mlx5_priv *priv = dev->data->dev_private;
3064 struct mlx5_flow_counter_pool *pool = NULL;
3065 struct mlx5_flow_counter *cnt_free = NULL;
3067 * Currently group 0 flow counter cannot be assigned to a flow if it is
3068 * not the first one in the batch counter allocation, so it is better
3069 * to allocate counters one by one for these flows in a separate
3071 * A counter can be shared between different groups so need to take
3072 * shared counters from the single container.
3074 uint32_t batch = (group && !shared) ? 1 : 0;
3075 struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
3078 if (priv->counter_fallback)
3079 return flow_dv_counter_alloc_fallback(dev, shared, id);
3080 if (!priv->config.devx) {
3081 rte_errno = ENOTSUP;
3085 cnt_free = flow_dv_counter_shared_search(cont, id);
3087 if (cnt_free->ref_cnt + 1 == 0) {
3091 cnt_free->ref_cnt++;
3095 /* Pools which has a free counters are in the start. */
3096 TAILQ_FOREACH(pool, &cont->pool_list, next) {
3098 * The free counter reset values must be updated between the
3099 * counter release to the counter allocation, so, at least one
3100 * query must be done in this time. ensure it by saving the
3101 * query generation in the release time.
3102 * The free list is sorted according to the generation - so if
3103 * the first one is not updated, all the others are not
3106 cnt_free = TAILQ_FIRST(&pool->counters);
3107 if (cnt_free && cnt_free->query_gen + 1 <
3108 rte_atomic64_read(&pool->query_gen))
3113 pool = flow_dv_counter_pool_prepare(dev, &cnt_free, batch);
3117 cnt_free->batch = batch;
3118 /* Create a DV counter action only in the first time usage. */
3119 if (!cnt_free->action) {
3121 struct mlx5_devx_obj *dcs;
3124 offset = cnt_free - &pool->counters_raw[0];
3125 dcs = pool->min_dcs;
3128 dcs = cnt_free->dcs;
3130 cnt_free->action = mlx5_glue->dv_create_flow_action_counter
3132 if (!cnt_free->action) {
3137 /* Update the counter reset values. */
3138 if (_flow_dv_query_count(dev, cnt_free, &cnt_free->hits,
3141 cnt_free->shared = shared;
3142 cnt_free->ref_cnt = 1;
3144 if (!priv->sh->cmng.query_thread_on)
3145 /* Start the asynchronous batch query by the host thread. */
3146 mlx5_set_query_alarm(priv->sh);
3147 TAILQ_REMOVE(&pool->counters, cnt_free, next);
3148 if (TAILQ_EMPTY(&pool->counters)) {
3149 /* Move the pool to the end of the container pool list. */
3150 TAILQ_REMOVE(&cont->pool_list, pool, next);
3151 TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
3157 * Release a flow counter.
3160 * Pointer to the Ethernet device structure.
3161 * @param[in] counter
3162 * Pointer to the counter handler.
3165 flow_dv_counter_release(struct rte_eth_dev *dev,
3166 struct mlx5_flow_counter *counter)
3168 struct mlx5_priv *priv = dev->data->dev_private;
3172 if (priv->counter_fallback) {
3173 flow_dv_counter_release_fallback(dev, counter);
3176 if (--counter->ref_cnt == 0) {
3177 struct mlx5_flow_counter_pool *pool =
3178 flow_dv_counter_pool_get(counter);
3180 /* Put the counter in the end - the last updated one. */
3181 TAILQ_INSERT_TAIL(&pool->counters, counter, next);
3182 counter->query_gen = rte_atomic64_read(&pool->query_gen);
3187 * Verify the @p attributes will be correctly understood by the NIC and store
3188 * them in the @p flow if everything is correct.
3191 * Pointer to dev struct.
3192 * @param[in] attributes
3193 * Pointer to flow attributes
3194 * @param[in] external
3195 * This flow rule is created by request external to PMD.
3197 * Pointer to error structure.
3200 * 0 on success, a negative errno value otherwise and rte_errno is set.
3203 flow_dv_validate_attributes(struct rte_eth_dev *dev,
3204 const struct rte_flow_attr *attributes,
3205 bool external __rte_unused,
3206 struct rte_flow_error *error)
3208 struct mlx5_priv *priv = dev->data->dev_private;
3209 uint32_t priority_max = priv->config.flow_prio - 1;
3211 #ifndef HAVE_MLX5DV_DR
3212 if (attributes->group)
3213 return rte_flow_error_set(error, ENOTSUP,
3214 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
3216 "groups are not supported");
3218 uint32_t max_group = attributes->transfer ? MLX5_MAX_TABLES_FDB :
3223 ret = mlx5_flow_group_to_table(attributes, external,
3228 if (table >= max_group)
3229 return rte_flow_error_set(error, EINVAL,
3230 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
3231 "group index out of range");
3233 if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
3234 attributes->priority >= priority_max)
3235 return rte_flow_error_set(error, ENOTSUP,
3236 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
3238 "priority out of range");
3239 if (attributes->transfer) {
3240 if (!priv->config.dv_esw_en)
3241 return rte_flow_error_set
3243 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3244 "E-Switch dr is not supported");
3245 if (!(priv->representor || priv->master))
3246 return rte_flow_error_set
3247 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3248 NULL, "E-Switch configuration can only be"
3249 " done by a master or a representor device");
3250 if (attributes->egress)
3251 return rte_flow_error_set
3253 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
3254 "egress is not supported");
3256 if (!(attributes->egress ^ attributes->ingress))
3257 return rte_flow_error_set(error, ENOTSUP,
3258 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
3259 "must specify exactly one of "
3260 "ingress or egress");
3265 * Internal validation function. For validating both actions and items.
3268 * Pointer to the rte_eth_dev structure.
3270 * Pointer to the flow attributes.
3272 * Pointer to the list of items.
3273 * @param[in] actions
3274 * Pointer to the list of actions.
3275 * @param[in] external
3276 * This flow rule is created by request external to PMD.
3278 * Pointer to the error structure.
3281 * 0 on success, a negative errno value otherwise and rte_errno is set.
3284 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
3285 const struct rte_flow_item items[],
3286 const struct rte_flow_action actions[],
3287 bool external, struct rte_flow_error *error)
3290 uint64_t action_flags = 0;
3291 uint64_t item_flags = 0;
3292 uint64_t last_item = 0;
3293 uint8_t next_protocol = 0xff;
3295 const struct rte_flow_item *gre_item = NULL;
3296 struct rte_flow_item_tcp nic_tcp_mask = {
3299 .src_port = RTE_BE16(UINT16_MAX),
3300 .dst_port = RTE_BE16(UINT16_MAX),
3306 ret = flow_dv_validate_attributes(dev, attr, external, error);
3309 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3310 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
3311 switch (items->type) {
3312 case RTE_FLOW_ITEM_TYPE_VOID:
3314 case RTE_FLOW_ITEM_TYPE_PORT_ID:
3315 ret = flow_dv_validate_item_port_id
3316 (dev, items, attr, item_flags, error);
3319 last_item = MLX5_FLOW_ITEM_PORT_ID;
3321 case RTE_FLOW_ITEM_TYPE_ETH:
3322 ret = mlx5_flow_validate_item_eth(items, item_flags,
3326 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
3327 MLX5_FLOW_LAYER_OUTER_L2;
3329 case RTE_FLOW_ITEM_TYPE_VLAN:
3330 ret = mlx5_flow_validate_item_vlan(items, item_flags,
3334 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
3335 MLX5_FLOW_LAYER_OUTER_VLAN;
3337 case RTE_FLOW_ITEM_TYPE_IPV4:
3338 mlx5_flow_tunnel_ip_check(items, next_protocol,
3339 &item_flags, &tunnel);
3340 ret = mlx5_flow_validate_item_ipv4(items, item_flags,
3344 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3345 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3346 if (items->mask != NULL &&
3347 ((const struct rte_flow_item_ipv4 *)
3348 items->mask)->hdr.next_proto_id) {
3350 ((const struct rte_flow_item_ipv4 *)
3351 (items->spec))->hdr.next_proto_id;
3353 ((const struct rte_flow_item_ipv4 *)
3354 (items->mask))->hdr.next_proto_id;
3356 /* Reset for inner layer. */
3357 next_protocol = 0xff;
3360 case RTE_FLOW_ITEM_TYPE_IPV6:
3361 mlx5_flow_tunnel_ip_check(items, next_protocol,
3362 &item_flags, &tunnel);
3363 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
3367 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3368 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3369 if (items->mask != NULL &&
3370 ((const struct rte_flow_item_ipv6 *)
3371 items->mask)->hdr.proto) {
3373 ((const struct rte_flow_item_ipv6 *)
3374 items->spec)->hdr.proto;
3376 ((const struct rte_flow_item_ipv6 *)
3377 items->mask)->hdr.proto;
3379 /* Reset for inner layer. */
3380 next_protocol = 0xff;
3383 case RTE_FLOW_ITEM_TYPE_TCP:
3384 ret = mlx5_flow_validate_item_tcp
3391 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
3392 MLX5_FLOW_LAYER_OUTER_L4_TCP;
3394 case RTE_FLOW_ITEM_TYPE_UDP:
3395 ret = mlx5_flow_validate_item_udp(items, item_flags,
3400 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
3401 MLX5_FLOW_LAYER_OUTER_L4_UDP;
3403 case RTE_FLOW_ITEM_TYPE_GRE:
3404 ret = mlx5_flow_validate_item_gre(items, item_flags,
3405 next_protocol, error);
3409 last_item = MLX5_FLOW_LAYER_GRE;
3411 case RTE_FLOW_ITEM_TYPE_NVGRE:
3412 ret = mlx5_flow_validate_item_nvgre(items, item_flags,
3417 last_item = MLX5_FLOW_LAYER_NVGRE;
3419 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
3420 ret = mlx5_flow_validate_item_gre_key
3421 (items, item_flags, gre_item, error);
3424 last_item = MLX5_FLOW_LAYER_GRE_KEY;
3426 case RTE_FLOW_ITEM_TYPE_VXLAN:
3427 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
3431 last_item = MLX5_FLOW_LAYER_VXLAN;
3433 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3434 ret = mlx5_flow_validate_item_vxlan_gpe(items,
3439 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
3441 case RTE_FLOW_ITEM_TYPE_MPLS:
3442 ret = mlx5_flow_validate_item_mpls(dev, items,
3447 last_item = MLX5_FLOW_LAYER_MPLS;
3449 case RTE_FLOW_ITEM_TYPE_META:
3450 ret = flow_dv_validate_item_meta(dev, items, attr,
3454 last_item = MLX5_FLOW_ITEM_METADATA;
3456 case RTE_FLOW_ITEM_TYPE_ICMP:
3457 ret = mlx5_flow_validate_item_icmp(items, item_flags,
3462 last_item = MLX5_FLOW_LAYER_ICMP;
3464 case RTE_FLOW_ITEM_TYPE_ICMP6:
3465 ret = mlx5_flow_validate_item_icmp6(items, item_flags,
3470 last_item = MLX5_FLOW_LAYER_ICMP6;
3473 return rte_flow_error_set(error, ENOTSUP,
3474 RTE_FLOW_ERROR_TYPE_ITEM,
3475 NULL, "item not supported");
3477 item_flags |= last_item;
3479 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3480 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
3481 return rte_flow_error_set(error, ENOTSUP,
3482 RTE_FLOW_ERROR_TYPE_ACTION,
3483 actions, "too many actions");
3484 switch (actions->type) {
3485 case RTE_FLOW_ACTION_TYPE_VOID:
3487 case RTE_FLOW_ACTION_TYPE_PORT_ID:
3488 ret = flow_dv_validate_action_port_id(dev,
3495 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
3498 case RTE_FLOW_ACTION_TYPE_FLAG:
3499 ret = mlx5_flow_validate_action_flag(action_flags,
3503 action_flags |= MLX5_FLOW_ACTION_FLAG;
3506 case RTE_FLOW_ACTION_TYPE_MARK:
3507 ret = mlx5_flow_validate_action_mark(actions,
3512 action_flags |= MLX5_FLOW_ACTION_MARK;
3515 case RTE_FLOW_ACTION_TYPE_DROP:
3516 ret = mlx5_flow_validate_action_drop(action_flags,
3520 action_flags |= MLX5_FLOW_ACTION_DROP;
3523 case RTE_FLOW_ACTION_TYPE_QUEUE:
3524 ret = mlx5_flow_validate_action_queue(actions,
3529 action_flags |= MLX5_FLOW_ACTION_QUEUE;
3532 case RTE_FLOW_ACTION_TYPE_RSS:
3533 ret = mlx5_flow_validate_action_rss(actions,
3539 action_flags |= MLX5_FLOW_ACTION_RSS;
3542 case RTE_FLOW_ACTION_TYPE_COUNT:
3543 ret = flow_dv_validate_action_count(dev, error);
3546 action_flags |= MLX5_FLOW_ACTION_COUNT;
3549 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
3550 if (flow_dv_validate_action_pop_vlan(dev,
3556 action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
3559 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3560 ret = flow_dv_validate_action_push_vlan(action_flags,
3565 action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
3568 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
3569 ret = flow_dv_validate_action_set_vlan_pcp
3570 (action_flags, actions, error);
3573 /* Count PCP with push_vlan command. */
3575 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
3576 ret = flow_dv_validate_action_set_vlan_vid
3577 (item_flags, actions, error);
3580 /* Count VID with push_vlan command. */
3582 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3583 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3584 ret = flow_dv_validate_action_l2_encap(action_flags,
3589 action_flags |= actions->type ==
3590 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
3591 MLX5_FLOW_ACTION_VXLAN_ENCAP :
3592 MLX5_FLOW_ACTION_NVGRE_ENCAP;
3595 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3596 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
3597 ret = flow_dv_validate_action_l2_decap(action_flags,
3601 action_flags |= actions->type ==
3602 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
3603 MLX5_FLOW_ACTION_VXLAN_DECAP :
3604 MLX5_FLOW_ACTION_NVGRE_DECAP;
3607 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3608 ret = flow_dv_validate_action_raw_encap(action_flags,
3613 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
3616 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3617 ret = flow_dv_validate_action_raw_decap(action_flags,
3622 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
3625 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
3626 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
3627 ret = flow_dv_validate_action_modify_mac(action_flags,
3633 /* Count all modify-header actions as one action. */
3634 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3636 action_flags |= actions->type ==
3637 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
3638 MLX5_FLOW_ACTION_SET_MAC_SRC :
3639 MLX5_FLOW_ACTION_SET_MAC_DST;
3642 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
3643 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
3644 ret = flow_dv_validate_action_modify_ipv4(action_flags,
3650 /* Count all modify-header actions as one action. */
3651 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3653 action_flags |= actions->type ==
3654 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
3655 MLX5_FLOW_ACTION_SET_IPV4_SRC :
3656 MLX5_FLOW_ACTION_SET_IPV4_DST;
3658 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
3659 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
3660 ret = flow_dv_validate_action_modify_ipv6(action_flags,
3666 /* Count all modify-header actions as one action. */
3667 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3669 action_flags |= actions->type ==
3670 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
3671 MLX5_FLOW_ACTION_SET_IPV6_SRC :
3672 MLX5_FLOW_ACTION_SET_IPV6_DST;
3674 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
3675 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
3676 ret = flow_dv_validate_action_modify_tp(action_flags,
3682 /* Count all modify-header actions as one action. */
3683 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3685 action_flags |= actions->type ==
3686 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
3687 MLX5_FLOW_ACTION_SET_TP_SRC :
3688 MLX5_FLOW_ACTION_SET_TP_DST;
3690 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
3691 case RTE_FLOW_ACTION_TYPE_SET_TTL:
3692 ret = flow_dv_validate_action_modify_ttl(action_flags,
3698 /* Count all modify-header actions as one action. */
3699 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3701 action_flags |= actions->type ==
3702 RTE_FLOW_ACTION_TYPE_SET_TTL ?
3703 MLX5_FLOW_ACTION_SET_TTL :
3704 MLX5_FLOW_ACTION_DEC_TTL;
3706 case RTE_FLOW_ACTION_TYPE_JUMP:
3707 ret = flow_dv_validate_action_jump(actions,
3714 action_flags |= MLX5_FLOW_ACTION_JUMP;
3716 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
3717 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
3718 ret = flow_dv_validate_action_modify_tcp_seq
3725 /* Count all modify-header actions as one action. */
3726 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3728 action_flags |= actions->type ==
3729 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
3730 MLX5_FLOW_ACTION_INC_TCP_SEQ :
3731 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
3733 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
3734 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
3735 ret = flow_dv_validate_action_modify_tcp_ack
3742 /* Count all modify-header actions as one action. */
3743 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3745 action_flags |= actions->type ==
3746 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
3747 MLX5_FLOW_ACTION_INC_TCP_ACK :
3748 MLX5_FLOW_ACTION_DEC_TCP_ACK;
3751 return rte_flow_error_set(error, ENOTSUP,
3752 RTE_FLOW_ERROR_TYPE_ACTION,
3754 "action not supported");
3757 if ((action_flags & MLX5_FLOW_LAYER_TUNNEL) &&
3758 (action_flags & MLX5_FLOW_VLAN_ACTIONS))
3759 return rte_flow_error_set(error, ENOTSUP,
3760 RTE_FLOW_ERROR_TYPE_ACTION,
3762 "can't have vxlan and vlan"
3763 " actions in the same rule");
3764 /* Eswitch has few restrictions on using items and actions */
3765 if (attr->transfer) {
3766 if (action_flags & MLX5_FLOW_ACTION_FLAG)
3767 return rte_flow_error_set(error, ENOTSUP,
3768 RTE_FLOW_ERROR_TYPE_ACTION,
3770 "unsupported action FLAG");
3771 if (action_flags & MLX5_FLOW_ACTION_MARK)
3772 return rte_flow_error_set(error, ENOTSUP,
3773 RTE_FLOW_ERROR_TYPE_ACTION,
3775 "unsupported action MARK");
3776 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
3777 return rte_flow_error_set(error, ENOTSUP,
3778 RTE_FLOW_ERROR_TYPE_ACTION,
3780 "unsupported action QUEUE");
3781 if (action_flags & MLX5_FLOW_ACTION_RSS)
3782 return rte_flow_error_set(error, ENOTSUP,
3783 RTE_FLOW_ERROR_TYPE_ACTION,
3785 "unsupported action RSS");
3786 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3787 return rte_flow_error_set(error, EINVAL,
3788 RTE_FLOW_ERROR_TYPE_ACTION,
3790 "no fate action is found");
3792 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
3793 return rte_flow_error_set(error, EINVAL,
3794 RTE_FLOW_ERROR_TYPE_ACTION,
3796 "no fate action is found");
3802 * Internal preparation function. Allocates the DV flow size,
3803 * this size is constant.
3806 * Pointer to the flow attributes.
3808 * Pointer to the list of items.
3809 * @param[in] actions
3810 * Pointer to the list of actions.
3812 * Pointer to the error structure.
3815 * Pointer to mlx5_flow object on success,
3816 * otherwise NULL and rte_errno is set.
3818 static struct mlx5_flow *
3819 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
3820 const struct rte_flow_item items[] __rte_unused,
3821 const struct rte_flow_action actions[] __rte_unused,
3822 struct rte_flow_error *error)
3824 uint32_t size = sizeof(struct mlx5_flow);
3825 struct mlx5_flow *flow;
3827 flow = rte_calloc(__func__, 1, size, 0);
3829 rte_flow_error_set(error, ENOMEM,
3830 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3831 "not enough memory to create flow");
3834 flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
3840 * Sanity check for match mask and value. Similar to check_valid_spec() in
3841 * kernel driver. If unmasked bit is present in value, it returns failure.
3844 * pointer to match mask buffer.
3845 * @param match_value
3846 * pointer to match value buffer.
3849 * 0 if valid, -EINVAL otherwise.
3852 flow_dv_check_valid_spec(void *match_mask, void *match_value)
3854 uint8_t *m = match_mask;
3855 uint8_t *v = match_value;
3858 for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
3861 "match_value differs from match_criteria"
3862 " %p[%u] != %p[%u]",
3863 match_value, i, match_mask, i);
3872 * Add Ethernet item to matcher and to the value.
3874 * @param[in, out] matcher
3876 * @param[in, out] key
3877 * Flow matcher value.
3879 * Flow pattern to translate.
3881 * Item is inner pattern.
3884 flow_dv_translate_item_eth(void *matcher, void *key,
3885 const struct rte_flow_item *item, int inner)
3887 const struct rte_flow_item_eth *eth_m = item->mask;
3888 const struct rte_flow_item_eth *eth_v = item->spec;
3889 const struct rte_flow_item_eth nic_mask = {
3890 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
3891 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
3892 .type = RTE_BE16(0xffff),
3904 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3906 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3908 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3910 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3912 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
3913 ð_m->dst, sizeof(eth_m->dst));
3914 /* The value must be in the range of the mask. */
3915 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
3916 for (i = 0; i < sizeof(eth_m->dst); ++i)
3917 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
3918 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
3919 ð_m->src, sizeof(eth_m->src));
3920 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
3921 /* The value must be in the range of the mask. */
3922 for (i = 0; i < sizeof(eth_m->dst); ++i)
3923 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
3924 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
3925 rte_be_to_cpu_16(eth_m->type));
3926 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
3927 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
3931 * Add VLAN item to matcher and to the value.
3933 * @param[in, out] dev_flow
3935 * @param[in, out] matcher
3937 * @param[in, out] key
3938 * Flow matcher value.
3940 * Flow pattern to translate.
3942 * Item is inner pattern.
3945 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
3946 void *matcher, void *key,
3947 const struct rte_flow_item *item,
3950 const struct rte_flow_item_vlan *vlan_m = item->mask;
3951 const struct rte_flow_item_vlan *vlan_v = item->spec;
3960 vlan_m = &rte_flow_item_vlan_mask;
3962 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3964 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3966 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3968 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3970 * This is workaround, masks are not supported,
3971 * and pre-validated.
3973 dev_flow->dv.vf_vlan.tag =
3974 rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
3976 tci_m = rte_be_to_cpu_16(vlan_m->tci);
3977 tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
3978 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
3979 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
3980 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
3981 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
3982 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
3983 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
3984 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
3985 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
3986 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
3987 rte_be_to_cpu_16(vlan_m->inner_type));
3988 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
3989 rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
3993 * Add IPV4 item to matcher and to the value.
3995 * @param[in, out] matcher
3997 * @param[in, out] key
3998 * Flow matcher value.
4000 * Flow pattern to translate.
4002 * Item is inner pattern.
4004 * The group to insert the rule.
4007 flow_dv_translate_item_ipv4(void *matcher, void *key,
4008 const struct rte_flow_item *item,
4009 int inner, uint32_t group)
4011 const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
4012 const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
4013 const struct rte_flow_item_ipv4 nic_mask = {
4015 .src_addr = RTE_BE32(0xffffffff),
4016 .dst_addr = RTE_BE32(0xffffffff),
4017 .type_of_service = 0xff,
4018 .next_proto_id = 0xff,
4028 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4030 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4032 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4034 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4037 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
4039 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
4040 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
4045 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
4046 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
4047 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
4048 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
4049 *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
4050 *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
4051 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
4052 src_ipv4_src_ipv6.ipv4_layout.ipv4);
4053 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
4054 src_ipv4_src_ipv6.ipv4_layout.ipv4);
4055 *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
4056 *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
4057 tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
4058 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
4059 ipv4_m->hdr.type_of_service);
4060 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
4061 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
4062 ipv4_m->hdr.type_of_service >> 2);
4063 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
4064 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
4065 ipv4_m->hdr.next_proto_id);
4066 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
4067 ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
4071 * Add IPV6 item to matcher and to the value.
4073 * @param[in, out] matcher
4075 * @param[in, out] key
4076 * Flow matcher value.
4078 * Flow pattern to translate.
4080 * Item is inner pattern.
4082 * The group to insert the rule.
4085 flow_dv_translate_item_ipv6(void *matcher, void *key,
4086 const struct rte_flow_item *item,
4087 int inner, uint32_t group)
4089 const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
4090 const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
4091 const struct rte_flow_item_ipv6 nic_mask = {
4094 "\xff\xff\xff\xff\xff\xff\xff\xff"
4095 "\xff\xff\xff\xff\xff\xff\xff\xff",
4097 "\xff\xff\xff\xff\xff\xff\xff\xff"
4098 "\xff\xff\xff\xff\xff\xff\xff\xff",
4099 .vtc_flow = RTE_BE32(0xffffffff),
4106 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4107 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4116 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4118 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4120 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4122 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4125 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
4127 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
4128 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
4133 size = sizeof(ipv6_m->hdr.dst_addr);
4134 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
4135 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
4136 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
4137 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
4138 memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
4139 for (i = 0; i < size; ++i)
4140 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
4141 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
4142 src_ipv4_src_ipv6.ipv6_layout.ipv6);
4143 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
4144 src_ipv4_src_ipv6.ipv6_layout.ipv6);
4145 memcpy(l24_m, ipv6_m->hdr.src_addr, size);
4146 for (i = 0; i < size; ++i)
4147 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
4149 vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
4150 vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
4151 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
4152 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
4153 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
4154 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
4157 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
4159 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
4162 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
4164 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
4168 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
4170 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
4171 ipv6_v->hdr.proto & ipv6_m->hdr.proto);
4175 * Add TCP item to matcher and to the value.
4177 * @param[in, out] matcher
4179 * @param[in, out] key
4180 * Flow matcher value.
4182 * Flow pattern to translate.
4184 * Item is inner pattern.
4187 flow_dv_translate_item_tcp(void *matcher, void *key,
4188 const struct rte_flow_item *item,
4191 const struct rte_flow_item_tcp *tcp_m = item->mask;
4192 const struct rte_flow_item_tcp *tcp_v = item->spec;
4197 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4199 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4201 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4203 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4205 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
4206 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
4210 tcp_m = &rte_flow_item_tcp_mask;
4211 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
4212 rte_be_to_cpu_16(tcp_m->hdr.src_port));
4213 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
4214 rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
4215 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
4216 rte_be_to_cpu_16(tcp_m->hdr.dst_port));
4217 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
4218 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
4219 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
4220 tcp_m->hdr.tcp_flags);
4221 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
4222 (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
4226 * Add UDP item to matcher and to the value.
4228 * @param[in, out] matcher
4230 * @param[in, out] key
4231 * Flow matcher value.
4233 * Flow pattern to translate.
4235 * Item is inner pattern.
4238 flow_dv_translate_item_udp(void *matcher, void *key,
4239 const struct rte_flow_item *item,
4242 const struct rte_flow_item_udp *udp_m = item->mask;
4243 const struct rte_flow_item_udp *udp_v = item->spec;
4248 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4250 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4252 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4254 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4256 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
4257 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
4261 udp_m = &rte_flow_item_udp_mask;
4262 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
4263 rte_be_to_cpu_16(udp_m->hdr.src_port));
4264 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
4265 rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
4266 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
4267 rte_be_to_cpu_16(udp_m->hdr.dst_port));
4268 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
4269 rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
4273 * Add GRE optional Key item to matcher and to the value.
4275 * @param[in, out] matcher
4277 * @param[in, out] key
4278 * Flow matcher value.
4280 * Flow pattern to translate.
4282 * Item is inner pattern.
4285 flow_dv_translate_item_gre_key(void *matcher, void *key,
4286 const struct rte_flow_item *item)
4288 const rte_be32_t *key_m = item->mask;
4289 const rte_be32_t *key_v = item->spec;
4290 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4291 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4292 rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
4297 key_m = &gre_key_default_mask;
4298 /* GRE K bit must be on and should already be validated */
4299 MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
4300 MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
4301 MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
4302 rte_be_to_cpu_32(*key_m) >> 8);
4303 MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
4304 rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
4305 MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
4306 rte_be_to_cpu_32(*key_m) & 0xFF);
4307 MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
4308 rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
4312 * Add GRE item to matcher and to the value.
4314 * @param[in, out] matcher
4316 * @param[in, out] key
4317 * Flow matcher value.
4319 * Flow pattern to translate.
4321 * Item is inner pattern.
4324 flow_dv_translate_item_gre(void *matcher, void *key,
4325 const struct rte_flow_item *item,
4328 const struct rte_flow_item_gre *gre_m = item->mask;
4329 const struct rte_flow_item_gre *gre_v = item->spec;
4332 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4333 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4340 uint16_t s_present:1;
4341 uint16_t k_present:1;
4342 uint16_t rsvd_bit1:1;
4343 uint16_t c_present:1;
4347 } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
4350 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4352 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4354 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4356 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4358 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
4359 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
4363 gre_m = &rte_flow_item_gre_mask;
4364 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
4365 rte_be_to_cpu_16(gre_m->protocol));
4366 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
4367 rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
4368 gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
4369 gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
4370 MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
4371 gre_crks_rsvd0_ver_m.c_present);
4372 MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
4373 gre_crks_rsvd0_ver_v.c_present &
4374 gre_crks_rsvd0_ver_m.c_present);
4375 MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
4376 gre_crks_rsvd0_ver_m.k_present);
4377 MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
4378 gre_crks_rsvd0_ver_v.k_present &
4379 gre_crks_rsvd0_ver_m.k_present);
4380 MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
4381 gre_crks_rsvd0_ver_m.s_present);
4382 MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
4383 gre_crks_rsvd0_ver_v.s_present &
4384 gre_crks_rsvd0_ver_m.s_present);
4388 * Add NVGRE item to matcher and to the value.
4390 * @param[in, out] matcher
4392 * @param[in, out] key
4393 * Flow matcher value.
4395 * Flow pattern to translate.
4397 * Item is inner pattern.
4400 flow_dv_translate_item_nvgre(void *matcher, void *key,
4401 const struct rte_flow_item *item,
4404 const struct rte_flow_item_nvgre *nvgre_m = item->mask;
4405 const struct rte_flow_item_nvgre *nvgre_v = item->spec;
4406 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4407 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4408 const char *tni_flow_id_m = (const char *)nvgre_m->tni;
4409 const char *tni_flow_id_v = (const char *)nvgre_v->tni;
4415 /* For NVGRE, GRE header fields must be set with defined values. */
4416 const struct rte_flow_item_gre gre_spec = {
4417 .c_rsvd0_ver = RTE_BE16(0x2000),
4418 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
4420 const struct rte_flow_item_gre gre_mask = {
4421 .c_rsvd0_ver = RTE_BE16(0xB000),
4422 .protocol = RTE_BE16(UINT16_MAX),
4424 const struct rte_flow_item gre_item = {
4429 flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
4433 nvgre_m = &rte_flow_item_nvgre_mask;
4434 size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
4435 gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
4436 gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
4437 memcpy(gre_key_m, tni_flow_id_m, size);
4438 for (i = 0; i < size; ++i)
4439 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
4443 * Add VXLAN item to matcher and to the value.
4445 * @param[in, out] matcher
4447 * @param[in, out] key
4448 * Flow matcher value.
4450 * Flow pattern to translate.
4452 * Item is inner pattern.
4455 flow_dv_translate_item_vxlan(void *matcher, void *key,
4456 const struct rte_flow_item *item,
4459 const struct rte_flow_item_vxlan *vxlan_m = item->mask;
4460 const struct rte_flow_item_vxlan *vxlan_v = item->spec;
4463 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4464 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4472 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4474 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4476 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4478 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4480 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
4481 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
4482 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
4483 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
4484 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
4489 vxlan_m = &rte_flow_item_vxlan_mask;
4490 size = sizeof(vxlan_m->vni);
4491 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
4492 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
4493 memcpy(vni_m, vxlan_m->vni, size);
4494 for (i = 0; i < size; ++i)
4495 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
4499 * Add MPLS item to matcher and to the value.
4501 * @param[in, out] matcher
4503 * @param[in, out] key
4504 * Flow matcher value.
4506 * Flow pattern to translate.
4507 * @param[in] prev_layer
4508 * The protocol layer indicated in previous item.
4510 * Item is inner pattern.
4513 flow_dv_translate_item_mpls(void *matcher, void *key,
4514 const struct rte_flow_item *item,
4515 uint64_t prev_layer,
4518 const uint32_t *in_mpls_m = item->mask;
4519 const uint32_t *in_mpls_v = item->spec;
4520 uint32_t *out_mpls_m = 0;
4521 uint32_t *out_mpls_v = 0;
4522 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4523 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4524 void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
4526 void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
4527 void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
4528 void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4530 switch (prev_layer) {
4531 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
4532 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
4533 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
4534 MLX5_UDP_PORT_MPLS);
4536 case MLX5_FLOW_LAYER_GRE:
4537 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
4538 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
4539 RTE_ETHER_TYPE_MPLS);
4542 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
4543 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
4550 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
4551 switch (prev_layer) {
4552 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
4554 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
4555 outer_first_mpls_over_udp);
4557 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
4558 outer_first_mpls_over_udp);
4560 case MLX5_FLOW_LAYER_GRE:
4562 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
4563 outer_first_mpls_over_gre);
4565 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
4566 outer_first_mpls_over_gre);
4569 /* Inner MPLS not over GRE is not supported. */
4572 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
4576 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
4582 if (out_mpls_m && out_mpls_v) {
4583 *out_mpls_m = *in_mpls_m;
4584 *out_mpls_v = *in_mpls_v & *in_mpls_m;
4589 * Add META item to matcher
4591 * @param[in, out] matcher
4593 * @param[in, out] key
4594 * Flow matcher value.
4596 * Flow pattern to translate.
4598 * Item is inner pattern.
4601 flow_dv_translate_item_meta(void *matcher, void *key,
4602 const struct rte_flow_item *item)
4604 const struct rte_flow_item_meta *meta_m;
4605 const struct rte_flow_item_meta *meta_v;
4607 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
4609 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
4611 meta_m = (const void *)item->mask;
4613 meta_m = &rte_flow_item_meta_mask;
4614 meta_v = (const void *)item->spec;
4616 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
4617 rte_be_to_cpu_32(meta_m->data));
4618 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
4619 rte_be_to_cpu_32(meta_v->data & meta_m->data));
4624 * Add source vport match to the specified matcher.
4626 * @param[in, out] matcher
4628 * @param[in, out] key
4629 * Flow matcher value.
4631 * Source vport value to match
4636 flow_dv_translate_item_source_vport(void *matcher, void *key,
4637 int16_t port, uint16_t mask)
4639 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4640 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4642 MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
4643 MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
4647 * Translate port-id item to eswitch match on port-id.
4650 * The devich to configure through.
4651 * @param[in, out] matcher
4653 * @param[in, out] key
4654 * Flow matcher value.
4656 * Flow pattern to translate.
4659 * 0 on success, a negative errno value otherwise.
4662 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
4663 void *key, const struct rte_flow_item *item)
4665 const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
4666 const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
4667 uint16_t mask, val, id;
4670 mask = pid_m ? pid_m->id : 0xffff;
4671 id = pid_v ? pid_v->id : dev->data->port_id;
4672 ret = mlx5_port_to_eswitch_info(id, NULL, &val);
4675 flow_dv_translate_item_source_vport(matcher, key, val, mask);
4680 * Add ICMP6 item to matcher and to the value.
4682 * @param[in, out] matcher
4684 * @param[in, out] key
4685 * Flow matcher value.
4687 * Flow pattern to translate.
4689 * Item is inner pattern.
4692 flow_dv_translate_item_icmp6(void *matcher, void *key,
4693 const struct rte_flow_item *item,
4696 const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
4697 const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
4700 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
4702 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
4704 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4706 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4708 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4710 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4712 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
4713 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
4717 icmp6_m = &rte_flow_item_icmp6_mask;
4718 MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
4719 MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
4720 icmp6_v->type & icmp6_m->type);
4721 MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
4722 MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
4723 icmp6_v->code & icmp6_m->code);
4727 * Add ICMP item to matcher and to the value.
4729 * @param[in, out] matcher
4731 * @param[in, out] key
4732 * Flow matcher value.
4734 * Flow pattern to translate.
4736 * Item is inner pattern.
4739 flow_dv_translate_item_icmp(void *matcher, void *key,
4740 const struct rte_flow_item *item,
4743 const struct rte_flow_item_icmp *icmp_m = item->mask;
4744 const struct rte_flow_item_icmp *icmp_v = item->spec;
4747 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
4749 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
4751 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4753 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4755 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4757 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4759 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
4760 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
4764 icmp_m = &rte_flow_item_icmp_mask;
4765 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
4766 icmp_m->hdr.icmp_type);
4767 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
4768 icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
4769 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
4770 icmp_m->hdr.icmp_code);
4771 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
4772 icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
4775 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
4777 #define HEADER_IS_ZERO(match_criteria, headers) \
4778 !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
4779 matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
4782 * Calculate flow matcher enable bitmap.
4784 * @param match_criteria
4785 * Pointer to flow matcher criteria.
4788 * Bitmap of enabled fields.
4791 flow_dv_matcher_enable(uint32_t *match_criteria)
4793 uint8_t match_criteria_enable;
4795 match_criteria_enable =
4796 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
4797 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
4798 match_criteria_enable |=
4799 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
4800 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
4801 match_criteria_enable |=
4802 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
4803 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
4804 match_criteria_enable |=
4805 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
4806 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
4807 match_criteria_enable |=
4808 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
4809 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
4810 return match_criteria_enable;
4817 * @param dev[in, out]
4818 * Pointer to rte_eth_dev structure.
4819 * @param[in] table_id
4822 * Direction of the table.
4823 * @param[in] transfer
4824 * E-Switch or NIC flow.
4826 * pointer to error structure.
4829 * Returns tables resource based on the index, NULL in case of failed.
4831 static struct mlx5_flow_tbl_resource *
4832 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
4833 uint32_t table_id, uint8_t egress,
4835 struct rte_flow_error *error)
4837 struct mlx5_priv *priv = dev->data->dev_private;
4838 struct mlx5_ibv_shared *sh = priv->sh;
4839 struct mlx5_flow_tbl_resource *tbl;
4841 #ifdef HAVE_MLX5DV_DR
4843 tbl = &sh->fdb_tbl[table_id];
4845 tbl->obj = mlx5_glue->dr_create_flow_tbl
4846 (sh->fdb_domain, table_id);
4847 } else if (egress) {
4848 tbl = &sh->tx_tbl[table_id];
4850 tbl->obj = mlx5_glue->dr_create_flow_tbl
4851 (sh->tx_domain, table_id);
4853 tbl = &sh->rx_tbl[table_id];
4855 tbl->obj = mlx5_glue->dr_create_flow_tbl
4856 (sh->rx_domain, table_id);
4859 rte_flow_error_set(error, ENOMEM,
4860 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4861 NULL, "cannot create table");
4864 rte_atomic32_inc(&tbl->refcnt);
4870 return &sh->fdb_tbl[table_id];
4872 return &sh->tx_tbl[table_id];
4874 return &sh->rx_tbl[table_id];
4879 * Release a flow table.
4882 * Table resource to be released.
4885 * Returns 0 if table was released, else return 1;
4888 flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
4892 if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
4893 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
4901 * Register the flow matcher.
4903 * @param dev[in, out]
4904 * Pointer to rte_eth_dev structure.
4905 * @param[in, out] matcher
4906 * Pointer to flow matcher.
4907 * @parm[in, out] dev_flow
4908 * Pointer to the dev_flow.
4910 * pointer to error structure.
4913 * 0 on success otherwise -errno and errno is set.
4916 flow_dv_matcher_register(struct rte_eth_dev *dev,
4917 struct mlx5_flow_dv_matcher *matcher,
4918 struct mlx5_flow *dev_flow,
4919 struct rte_flow_error *error)
4921 struct mlx5_priv *priv = dev->data->dev_private;
4922 struct mlx5_ibv_shared *sh = priv->sh;
4923 struct mlx5_flow_dv_matcher *cache_matcher;
4924 struct mlx5dv_flow_matcher_attr dv_attr = {
4925 .type = IBV_FLOW_ATTR_NORMAL,
4926 .match_mask = (void *)&matcher->mask,
4928 struct mlx5_flow_tbl_resource *tbl = NULL;
4930 /* Lookup from cache. */
4931 LIST_FOREACH(cache_matcher, &sh->matchers, next) {
4932 if (matcher->crc == cache_matcher->crc &&
4933 matcher->priority == cache_matcher->priority &&
4934 matcher->egress == cache_matcher->egress &&
4935 matcher->group == cache_matcher->group &&
4936 matcher->transfer == cache_matcher->transfer &&
4937 !memcmp((const void *)matcher->mask.buf,
4938 (const void *)cache_matcher->mask.buf,
4939 cache_matcher->mask.size)) {
4941 "priority %hd use %s matcher %p: refcnt %d++",
4942 cache_matcher->priority,
4943 cache_matcher->egress ? "tx" : "rx",
4944 (void *)cache_matcher,
4945 rte_atomic32_read(&cache_matcher->refcnt));
4946 rte_atomic32_inc(&cache_matcher->refcnt);
4947 dev_flow->dv.matcher = cache_matcher;
4951 /* Register new matcher. */
4952 cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
4954 return rte_flow_error_set(error, ENOMEM,
4955 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4956 "cannot allocate matcher memory");
4957 tbl = flow_dv_tbl_resource_get(dev, matcher->group,
4958 matcher->egress, matcher->transfer,
4961 rte_free(cache_matcher);
4962 return rte_flow_error_set(error, ENOMEM,
4963 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4964 NULL, "cannot create table");
4966 *cache_matcher = *matcher;
4967 dv_attr.match_criteria_enable =
4968 flow_dv_matcher_enable(cache_matcher->mask.buf);
4969 dv_attr.priority = matcher->priority;
4970 if (matcher->egress)
4971 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
4972 cache_matcher->matcher_object =
4973 mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
4974 if (!cache_matcher->matcher_object) {
4975 rte_free(cache_matcher);
4976 #ifdef HAVE_MLX5DV_DR
4977 flow_dv_tbl_resource_release(tbl);
4979 return rte_flow_error_set(error, ENOMEM,
4980 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4981 NULL, "cannot create matcher");
4983 rte_atomic32_inc(&cache_matcher->refcnt);
4984 LIST_INSERT_HEAD(&sh->matchers, cache_matcher, next);
4985 dev_flow->dv.matcher = cache_matcher;
4986 DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
4987 cache_matcher->priority,
4988 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
4989 rte_atomic32_read(&cache_matcher->refcnt));
4990 rte_atomic32_inc(&tbl->refcnt);
4995 * Find existing tag resource or create and register a new one.
4997 * @param dev[in, out]
4998 * Pointer to rte_eth_dev structure.
4999 * @param[in, out] resource
5000 * Pointer to tag resource.
5001 * @parm[in, out] dev_flow
5002 * Pointer to the dev_flow.
5004 * pointer to error structure.
5007 * 0 on success otherwise -errno and errno is set.
5010 flow_dv_tag_resource_register
5011 (struct rte_eth_dev *dev,
5012 struct mlx5_flow_dv_tag_resource *resource,
5013 struct mlx5_flow *dev_flow,
5014 struct rte_flow_error *error)
5016 struct mlx5_priv *priv = dev->data->dev_private;
5017 struct mlx5_ibv_shared *sh = priv->sh;
5018 struct mlx5_flow_dv_tag_resource *cache_resource;
5020 /* Lookup a matching resource from cache. */
5021 LIST_FOREACH(cache_resource, &sh->tags, next) {
5022 if (resource->tag == cache_resource->tag) {
5023 DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
5024 (void *)cache_resource,
5025 rte_atomic32_read(&cache_resource->refcnt));
5026 rte_atomic32_inc(&cache_resource->refcnt);
5027 dev_flow->flow->tag_resource = cache_resource;
5031 /* Register new resource. */
5032 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
5033 if (!cache_resource)
5034 return rte_flow_error_set(error, ENOMEM,
5035 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5036 "cannot allocate resource memory");
5037 *cache_resource = *resource;
5038 cache_resource->action = mlx5_glue->dv_create_flow_action_tag
5040 if (!cache_resource->action) {
5041 rte_free(cache_resource);
5042 return rte_flow_error_set(error, ENOMEM,
5043 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5044 NULL, "cannot create action");
5046 rte_atomic32_init(&cache_resource->refcnt);
5047 rte_atomic32_inc(&cache_resource->refcnt);
5048 LIST_INSERT_HEAD(&sh->tags, cache_resource, next);
5049 dev_flow->flow->tag_resource = cache_resource;
5050 DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
5051 (void *)cache_resource,
5052 rte_atomic32_read(&cache_resource->refcnt));
5060 * Pointer to Ethernet device.
5062 * Pointer to mlx5_flow.
5065 * 1 while a reference on it exists, 0 when freed.
5068 flow_dv_tag_release(struct rte_eth_dev *dev,
5069 struct mlx5_flow_dv_tag_resource *tag)
5072 DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
5073 dev->data->port_id, (void *)tag,
5074 rte_atomic32_read(&tag->refcnt));
5075 if (rte_atomic32_dec_and_test(&tag->refcnt)) {
5076 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
5077 LIST_REMOVE(tag, next);
5078 DRV_LOG(DEBUG, "port %u tag %p: removed",
5079 dev->data->port_id, (void *)tag);
5087 * Translate port ID action to vport.
5090 * Pointer to rte_eth_dev structure.
5092 * Pointer to the port ID action.
5093 * @param[out] dst_port_id
5094 * The target port ID.
5096 * Pointer to the error structure.
5099 * 0 on success, a negative errno value otherwise and rte_errno is set.
5102 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
5103 const struct rte_flow_action *action,
5104 uint32_t *dst_port_id,
5105 struct rte_flow_error *error)
5110 const struct rte_flow_action_port_id *conf =
5111 (const struct rte_flow_action_port_id *)action->conf;
5113 port = conf->original ? dev->data->port_id : conf->id;
5114 ret = mlx5_port_to_eswitch_info(port, NULL, &port_id);
5116 return rte_flow_error_set(error, -ret,
5117 RTE_FLOW_ERROR_TYPE_ACTION,
5119 "No eswitch info was found for port");
5120 *dst_port_id = port_id;
5125 * Fill the flow with DV spec.
5128 * Pointer to rte_eth_dev structure.
5129 * @param[in, out] dev_flow
5130 * Pointer to the sub flow.
5132 * Pointer to the flow attributes.
5134 * Pointer to the list of items.
5135 * @param[in] actions
5136 * Pointer to the list of actions.
5138 * Pointer to the error structure.
5141 * 0 on success, a negative errno value otherwise and rte_errno is set.
5144 flow_dv_translate(struct rte_eth_dev *dev,
5145 struct mlx5_flow *dev_flow,
5146 const struct rte_flow_attr *attr,
5147 const struct rte_flow_item items[],
5148 const struct rte_flow_action actions[],
5149 struct rte_flow_error *error)
5151 struct mlx5_priv *priv = dev->data->dev_private;
5152 struct rte_flow *flow = dev_flow->flow;
5153 uint64_t item_flags = 0;
5154 uint64_t last_item = 0;
5155 uint64_t action_flags = 0;
5156 uint64_t priority = attr->priority;
5157 struct mlx5_flow_dv_matcher matcher = {
5159 .size = sizeof(matcher.mask.buf),
5163 bool actions_end = false;
5164 struct mlx5_flow_dv_modify_hdr_resource res = {
5165 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
5166 MLX5DV_FLOW_TABLE_TYPE_NIC_RX
5168 union flow_dv_attr flow_attr = { .attr = 0 };
5169 struct mlx5_flow_dv_tag_resource tag_resource;
5170 uint32_t modify_action_position = UINT32_MAX;
5171 void *match_mask = matcher.mask.buf;
5172 void *match_value = dev_flow->dv.value.buf;
5173 uint8_t next_protocol = 0xff;
5174 struct rte_vlan_hdr vlan = { 0 };
5175 bool vlan_inherited = false;
5180 ret = mlx5_flow_group_to_table(attr, dev_flow->external, attr->group,
5184 flow->group = table;
5186 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
5187 if (priority == MLX5_FLOW_PRIO_RSVD)
5188 priority = priv->config.flow_prio - 1;
5189 for (; !actions_end ; actions++) {
5190 const struct rte_flow_action_queue *queue;
5191 const struct rte_flow_action_rss *rss;
5192 const struct rte_flow_action *action = actions;
5193 const struct rte_flow_action_count *count = action->conf;
5194 const uint8_t *rss_key;
5195 const struct rte_flow_action_jump *jump_data;
5196 struct mlx5_flow_dv_jump_tbl_resource jump_tbl_resource;
5197 struct mlx5_flow_tbl_resource *tbl;
5198 uint32_t port_id = 0;
5199 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
5201 switch (actions->type) {
5202 case RTE_FLOW_ACTION_TYPE_VOID:
5204 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5205 if (flow_dv_translate_action_port_id(dev, action,
5208 port_id_resource.port_id = port_id;
5209 if (flow_dv_port_id_action_resource_register
5210 (dev, &port_id_resource, dev_flow, error))
5212 dev_flow->dv.actions[actions_n++] =
5213 dev_flow->dv.port_id_action->action;
5214 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5216 case RTE_FLOW_ACTION_TYPE_FLAG:
5218 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
5219 if (!flow->tag_resource)
5220 if (flow_dv_tag_resource_register
5221 (dev, &tag_resource, dev_flow, error))
5223 dev_flow->dv.actions[actions_n++] =
5224 flow->tag_resource->action;
5225 action_flags |= MLX5_FLOW_ACTION_FLAG;
5227 case RTE_FLOW_ACTION_TYPE_MARK:
5228 tag_resource.tag = mlx5_flow_mark_set
5229 (((const struct rte_flow_action_mark *)
5230 (actions->conf))->id);
5231 if (!flow->tag_resource)
5232 if (flow_dv_tag_resource_register
5233 (dev, &tag_resource, dev_flow, error))
5235 dev_flow->dv.actions[actions_n++] =
5236 flow->tag_resource->action;
5237 action_flags |= MLX5_FLOW_ACTION_MARK;
5239 case RTE_FLOW_ACTION_TYPE_DROP:
5240 action_flags |= MLX5_FLOW_ACTION_DROP;
5242 case RTE_FLOW_ACTION_TYPE_QUEUE:
5243 queue = actions->conf;
5244 flow->rss.queue_num = 1;
5245 (*flow->queue)[0] = queue->index;
5246 action_flags |= MLX5_FLOW_ACTION_QUEUE;
5248 case RTE_FLOW_ACTION_TYPE_RSS:
5249 rss = actions->conf;
5251 memcpy((*flow->queue), rss->queue,
5252 rss->queue_num * sizeof(uint16_t));
5253 flow->rss.queue_num = rss->queue_num;
5254 /* NULL RSS key indicates default RSS key. */
5255 rss_key = !rss->key ? rss_hash_default_key : rss->key;
5256 memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
5257 /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
5258 flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
5259 flow->rss.level = rss->level;
5260 action_flags |= MLX5_FLOW_ACTION_RSS;
5262 case RTE_FLOW_ACTION_TYPE_COUNT:
5263 if (!priv->config.devx) {
5264 rte_errno = ENOTSUP;
5267 flow->counter = flow_dv_counter_alloc(dev,
5271 if (flow->counter == NULL)
5273 dev_flow->dv.actions[actions_n++] =
5274 flow->counter->action;
5275 action_flags |= MLX5_FLOW_ACTION_COUNT;
5278 if (rte_errno == ENOTSUP)
5279 return rte_flow_error_set
5281 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5283 "count action not supported");
5285 return rte_flow_error_set
5287 RTE_FLOW_ERROR_TYPE_ACTION,
5289 "cannot create counter"
5292 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
5293 dev_flow->dv.actions[actions_n++] =
5294 priv->sh->pop_vlan_action;
5295 action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
5297 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
5298 if (!vlan_inherited) {
5299 flow_dev_get_vlan_info_from_items(items, &vlan);
5300 vlan_inherited = true;
5302 vlan.eth_proto = rte_be_to_cpu_16
5303 ((((const struct rte_flow_action_of_push_vlan *)
5304 actions->conf)->ethertype));
5305 if (flow_dv_create_action_push_vlan
5306 (dev, attr, &vlan, dev_flow, error))
5308 dev_flow->dv.actions[actions_n++] =
5309 dev_flow->dv.push_vlan_res->action;
5310 action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
5311 /* Push VLAN command is also handling this VLAN_VID */
5312 action_flags &= ~MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
5314 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
5315 if (!vlan_inherited) {
5316 flow_dev_get_vlan_info_from_items(items, &vlan);
5317 vlan_inherited = true;
5320 ((const struct rte_flow_action_of_set_vlan_pcp *)
5321 actions->conf)->vlan_pcp;
5322 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
5323 vlan.vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
5324 vlan.vlan_tci |= vlan_tci;
5325 /* Push VLAN command will use this value */
5327 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5328 if (!vlan_inherited) {
5329 flow_dev_get_vlan_info_from_items(items, &vlan);
5330 vlan_inherited = true;
5332 vlan.vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
5333 vlan.vlan_tci |= rte_be_to_cpu_16
5334 (((const struct rte_flow_action_of_set_vlan_vid *)
5335 actions->conf)->vlan_vid);
5336 /* Push VLAN command will use this value */
5337 if (mlx5_flow_find_action
5339 RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN))
5341 /* If no VLAN push - this is a modify header action */
5342 if (flow_dv_convert_action_modify_vlan_vid
5343 (&res, actions, error))
5345 action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
5347 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5348 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5349 if (flow_dv_create_action_l2_encap(dev, actions,
5354 dev_flow->dv.actions[actions_n++] =
5355 dev_flow->dv.encap_decap->verbs_action;
5356 action_flags |= actions->type ==
5357 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
5358 MLX5_FLOW_ACTION_VXLAN_ENCAP :
5359 MLX5_FLOW_ACTION_NVGRE_ENCAP;
5361 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
5362 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
5363 if (flow_dv_create_action_l2_decap(dev, dev_flow,
5367 dev_flow->dv.actions[actions_n++] =
5368 dev_flow->dv.encap_decap->verbs_action;
5369 action_flags |= actions->type ==
5370 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
5371 MLX5_FLOW_ACTION_VXLAN_DECAP :
5372 MLX5_FLOW_ACTION_NVGRE_DECAP;
5374 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5375 /* Handle encap with preceding decap. */
5376 if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
5377 if (flow_dv_create_action_raw_encap
5378 (dev, actions, dev_flow, attr, error))
5380 dev_flow->dv.actions[actions_n++] =
5381 dev_flow->dv.encap_decap->verbs_action;
5383 /* Handle encap without preceding decap. */
5384 if (flow_dv_create_action_l2_encap
5385 (dev, actions, dev_flow, attr->transfer,
5388 dev_flow->dv.actions[actions_n++] =
5389 dev_flow->dv.encap_decap->verbs_action;
5391 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
5393 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5394 /* Check if this decap is followed by encap. */
5395 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
5396 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
5399 /* Handle decap only if it isn't followed by encap. */
5400 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
5401 if (flow_dv_create_action_l2_decap
5402 (dev, dev_flow, attr->transfer, error))
5404 dev_flow->dv.actions[actions_n++] =
5405 dev_flow->dv.encap_decap->verbs_action;
5407 /* If decap is followed by encap, handle it at encap. */
5408 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
5410 case RTE_FLOW_ACTION_TYPE_JUMP:
5411 jump_data = action->conf;
5412 ret = mlx5_flow_group_to_table(attr, dev_flow->external,
5413 jump_data->group, &table,
5417 tbl = flow_dv_tbl_resource_get(dev, table,
5419 attr->transfer, error);
5421 return rte_flow_error_set
5423 RTE_FLOW_ERROR_TYPE_ACTION,
5425 "cannot create jump action.");
5426 jump_tbl_resource.tbl = tbl;
5427 if (flow_dv_jump_tbl_resource_register
5428 (dev, &jump_tbl_resource, dev_flow, error)) {
5429 flow_dv_tbl_resource_release(tbl);
5430 return rte_flow_error_set
5432 RTE_FLOW_ERROR_TYPE_ACTION,
5434 "cannot create jump action.");
5436 dev_flow->dv.actions[actions_n++] =
5437 dev_flow->dv.jump->action;
5438 action_flags |= MLX5_FLOW_ACTION_JUMP;
5440 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
5441 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
5442 if (flow_dv_convert_action_modify_mac(&res, actions,
5445 action_flags |= actions->type ==
5446 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
5447 MLX5_FLOW_ACTION_SET_MAC_SRC :
5448 MLX5_FLOW_ACTION_SET_MAC_DST;
5450 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
5451 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
5452 if (flow_dv_convert_action_modify_ipv4(&res, actions,
5455 action_flags |= actions->type ==
5456 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
5457 MLX5_FLOW_ACTION_SET_IPV4_SRC :
5458 MLX5_FLOW_ACTION_SET_IPV4_DST;
5460 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
5461 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
5462 if (flow_dv_convert_action_modify_ipv6(&res, actions,
5465 action_flags |= actions->type ==
5466 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
5467 MLX5_FLOW_ACTION_SET_IPV6_SRC :
5468 MLX5_FLOW_ACTION_SET_IPV6_DST;
5470 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
5471 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
5472 if (flow_dv_convert_action_modify_tp(&res, actions,
5476 action_flags |= actions->type ==
5477 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
5478 MLX5_FLOW_ACTION_SET_TP_SRC :
5479 MLX5_FLOW_ACTION_SET_TP_DST;
5481 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
5482 if (flow_dv_convert_action_modify_dec_ttl(&res, items,
5486 action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
5488 case RTE_FLOW_ACTION_TYPE_SET_TTL:
5489 if (flow_dv_convert_action_modify_ttl(&res, actions,
5493 action_flags |= MLX5_FLOW_ACTION_SET_TTL;
5495 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
5496 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
5497 if (flow_dv_convert_action_modify_tcp_seq(&res, actions,
5500 action_flags |= actions->type ==
5501 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
5502 MLX5_FLOW_ACTION_INC_TCP_SEQ :
5503 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
5506 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
5507 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
5508 if (flow_dv_convert_action_modify_tcp_ack(&res, actions,
5511 action_flags |= actions->type ==
5512 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
5513 MLX5_FLOW_ACTION_INC_TCP_ACK :
5514 MLX5_FLOW_ACTION_DEC_TCP_ACK;
5516 case RTE_FLOW_ACTION_TYPE_END:
5518 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
5519 /* create modify action if needed. */
5520 if (flow_dv_modify_hdr_resource_register
5525 dev_flow->dv.actions[modify_action_position] =
5526 dev_flow->dv.modify_hdr->verbs_action;
5532 if ((action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) &&
5533 modify_action_position == UINT32_MAX)
5534 modify_action_position = actions_n++;
5536 dev_flow->dv.actions_n = actions_n;
5537 flow->actions = action_flags;
5538 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
5539 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
5541 switch (items->type) {
5542 case RTE_FLOW_ITEM_TYPE_PORT_ID:
5543 flow_dv_translate_item_port_id(dev, match_mask,
5544 match_value, items);
5545 last_item = MLX5_FLOW_ITEM_PORT_ID;
5547 case RTE_FLOW_ITEM_TYPE_ETH:
5548 flow_dv_translate_item_eth(match_mask, match_value,
5550 matcher.priority = MLX5_PRIORITY_MAP_L2;
5551 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
5552 MLX5_FLOW_LAYER_OUTER_L2;
5554 case RTE_FLOW_ITEM_TYPE_VLAN:
5555 flow_dv_translate_item_vlan(dev_flow,
5556 match_mask, match_value,
5558 matcher.priority = MLX5_PRIORITY_MAP_L2;
5559 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
5560 MLX5_FLOW_LAYER_INNER_VLAN) :
5561 (MLX5_FLOW_LAYER_OUTER_L2 |
5562 MLX5_FLOW_LAYER_OUTER_VLAN);
5564 case RTE_FLOW_ITEM_TYPE_IPV4:
5565 mlx5_flow_tunnel_ip_check(items, next_protocol,
5566 &item_flags, &tunnel);
5567 flow_dv_translate_item_ipv4(match_mask, match_value,
5568 items, tunnel, flow->group);
5569 matcher.priority = MLX5_PRIORITY_MAP_L3;
5570 dev_flow->dv.hash_fields |=
5571 mlx5_flow_hashfields_adjust
5573 MLX5_IPV4_LAYER_TYPES,
5574 MLX5_IPV4_IBV_RX_HASH);
5575 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
5576 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
5577 if (items->mask != NULL &&
5578 ((const struct rte_flow_item_ipv4 *)
5579 items->mask)->hdr.next_proto_id) {
5581 ((const struct rte_flow_item_ipv4 *)
5582 (items->spec))->hdr.next_proto_id;
5584 ((const struct rte_flow_item_ipv4 *)
5585 (items->mask))->hdr.next_proto_id;
5587 /* Reset for inner layer. */
5588 next_protocol = 0xff;
5591 case RTE_FLOW_ITEM_TYPE_IPV6:
5592 mlx5_flow_tunnel_ip_check(items, next_protocol,
5593 &item_flags, &tunnel);
5594 flow_dv_translate_item_ipv6(match_mask, match_value,
5595 items, tunnel, flow->group);
5596 matcher.priority = MLX5_PRIORITY_MAP_L3;
5597 dev_flow->dv.hash_fields |=
5598 mlx5_flow_hashfields_adjust
5600 MLX5_IPV6_LAYER_TYPES,
5601 MLX5_IPV6_IBV_RX_HASH);
5602 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
5603 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
5604 if (items->mask != NULL &&
5605 ((const struct rte_flow_item_ipv6 *)
5606 items->mask)->hdr.proto) {
5608 ((const struct rte_flow_item_ipv6 *)
5609 items->spec)->hdr.proto;
5611 ((const struct rte_flow_item_ipv6 *)
5612 items->mask)->hdr.proto;
5614 /* Reset for inner layer. */
5615 next_protocol = 0xff;
5618 case RTE_FLOW_ITEM_TYPE_TCP:
5619 flow_dv_translate_item_tcp(match_mask, match_value,
5621 matcher.priority = MLX5_PRIORITY_MAP_L4;
5622 dev_flow->dv.hash_fields |=
5623 mlx5_flow_hashfields_adjust
5624 (dev_flow, tunnel, ETH_RSS_TCP,
5625 IBV_RX_HASH_SRC_PORT_TCP |
5626 IBV_RX_HASH_DST_PORT_TCP);
5627 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
5628 MLX5_FLOW_LAYER_OUTER_L4_TCP;
5630 case RTE_FLOW_ITEM_TYPE_UDP:
5631 flow_dv_translate_item_udp(match_mask, match_value,
5633 matcher.priority = MLX5_PRIORITY_MAP_L4;
5634 dev_flow->dv.hash_fields |=
5635 mlx5_flow_hashfields_adjust
5636 (dev_flow, tunnel, ETH_RSS_UDP,
5637 IBV_RX_HASH_SRC_PORT_UDP |
5638 IBV_RX_HASH_DST_PORT_UDP);
5639 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
5640 MLX5_FLOW_LAYER_OUTER_L4_UDP;
5642 case RTE_FLOW_ITEM_TYPE_GRE:
5643 flow_dv_translate_item_gre(match_mask, match_value,
5645 last_item = MLX5_FLOW_LAYER_GRE;
5647 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
5648 flow_dv_translate_item_gre_key(match_mask,
5649 match_value, items);
5650 last_item = MLX5_FLOW_LAYER_GRE_KEY;
5652 case RTE_FLOW_ITEM_TYPE_NVGRE:
5653 flow_dv_translate_item_nvgre(match_mask, match_value,
5655 last_item = MLX5_FLOW_LAYER_GRE;
5657 case RTE_FLOW_ITEM_TYPE_VXLAN:
5658 flow_dv_translate_item_vxlan(match_mask, match_value,
5660 last_item = MLX5_FLOW_LAYER_VXLAN;
5662 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5663 flow_dv_translate_item_vxlan(match_mask, match_value,
5665 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
5667 case RTE_FLOW_ITEM_TYPE_MPLS:
5668 flow_dv_translate_item_mpls(match_mask, match_value,
5669 items, last_item, tunnel);
5670 last_item = MLX5_FLOW_LAYER_MPLS;
5672 case RTE_FLOW_ITEM_TYPE_META:
5673 flow_dv_translate_item_meta(match_mask, match_value,
5675 last_item = MLX5_FLOW_ITEM_METADATA;
5677 case RTE_FLOW_ITEM_TYPE_ICMP:
5678 flow_dv_translate_item_icmp(match_mask, match_value,
5680 last_item = MLX5_FLOW_LAYER_ICMP;
5682 case RTE_FLOW_ITEM_TYPE_ICMP6:
5683 flow_dv_translate_item_icmp6(match_mask, match_value,
5685 last_item = MLX5_FLOW_LAYER_ICMP6;
5690 item_flags |= last_item;
5693 * In case of ingress traffic when E-Switch mode is enabled,
5694 * we have two cases where we need to set the source port manually.
5695 * The first one, is in case of Nic steering rule, and the second is
5696 * E-Switch rule where no port_id item was found. In both cases
5697 * the source port is set according the current port in use.
5699 if ((attr->ingress && !(item_flags & MLX5_FLOW_ITEM_PORT_ID)) &&
5700 (priv->representor || priv->master)) {
5701 if (flow_dv_translate_item_port_id(dev, match_mask,
5705 assert(!flow_dv_check_valid_spec(matcher.mask.buf,
5706 dev_flow->dv.value.buf));
5707 dev_flow->layers = item_flags;
5708 /* Register matcher. */
5709 matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
5711 matcher.priority = mlx5_flow_adjust_priority(dev, priority,
5713 matcher.egress = attr->egress;
5714 matcher.group = flow->group;
5715 matcher.transfer = attr->transfer;
5716 if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
5722 * Apply the flow to the NIC.
5725 * Pointer to the Ethernet device structure.
5726 * @param[in, out] flow
5727 * Pointer to flow structure.
5729 * Pointer to error structure.
5732 * 0 on success, a negative errno value otherwise and rte_errno is set.
5735 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
5736 struct rte_flow_error *error)
5738 struct mlx5_flow_dv *dv;
5739 struct mlx5_flow *dev_flow;
5740 struct mlx5_priv *priv = dev->data->dev_private;
5744 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
5747 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
5748 if (flow->transfer) {
5749 dv->actions[n++] = priv->sh->esw_drop_action;
5751 dv->hrxq = mlx5_hrxq_drop_new(dev);
5755 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5757 "cannot get drop hash queue");
5760 dv->actions[n++] = dv->hrxq->action;
5762 } else if (flow->actions &
5763 (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
5764 struct mlx5_hrxq *hrxq;
5766 hrxq = mlx5_hrxq_get(dev, flow->key,
5767 MLX5_RSS_HASH_KEY_LEN,
5770 flow->rss.queue_num);
5772 hrxq = mlx5_hrxq_new
5773 (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
5774 dv->hash_fields, (*flow->queue),
5775 flow->rss.queue_num,
5776 !!(dev_flow->layers &
5777 MLX5_FLOW_LAYER_TUNNEL));
5782 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5783 "cannot get hash queue");
5787 dv->actions[n++] = dv->hrxq->action;
5790 mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
5791 (void *)&dv->value, n,
5794 rte_flow_error_set(error, errno,
5795 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5797 "hardware refuses to create flow");
5800 if (priv->vmwa_context &&
5801 dev_flow->dv.vf_vlan.tag &&
5802 !dev_flow->dv.vf_vlan.created) {
5804 * The rule contains the VLAN pattern.
5805 * For VF we are going to create VLAN
5806 * interface to make hypervisor set correct
5807 * e-Switch vport context.
5809 mlx5_vlan_vmwa_acquire(dev, &dev_flow->dv.vf_vlan);
5814 err = rte_errno; /* Save rte_errno before cleanup. */
5815 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
5816 struct mlx5_flow_dv *dv = &dev_flow->dv;
5818 if (flow->actions & MLX5_FLOW_ACTION_DROP)
5819 mlx5_hrxq_drop_release(dev);
5821 mlx5_hrxq_release(dev, dv->hrxq);
5824 if (dev_flow->dv.vf_vlan.tag &&
5825 dev_flow->dv.vf_vlan.created)
5826 mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
5828 rte_errno = err; /* Restore rte_errno. */
5833 * Release the flow matcher.
5836 * Pointer to Ethernet device.
5838 * Pointer to mlx5_flow.
5841 * 1 while a reference on it exists, 0 when freed.
5844 flow_dv_matcher_release(struct rte_eth_dev *dev,
5845 struct mlx5_flow *flow)
5847 struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
5848 struct mlx5_priv *priv = dev->data->dev_private;
5849 struct mlx5_ibv_shared *sh = priv->sh;
5850 struct mlx5_flow_tbl_resource *tbl;
5852 assert(matcher->matcher_object);
5853 DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
5854 dev->data->port_id, (void *)matcher,
5855 rte_atomic32_read(&matcher->refcnt));
5856 if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
5857 claim_zero(mlx5_glue->dv_destroy_flow_matcher
5858 (matcher->matcher_object));
5859 LIST_REMOVE(matcher, next);
5860 if (matcher->egress)
5861 tbl = &sh->tx_tbl[matcher->group];
5863 tbl = &sh->rx_tbl[matcher->group];
5864 flow_dv_tbl_resource_release(tbl);
5866 DRV_LOG(DEBUG, "port %u matcher %p: removed",
5867 dev->data->port_id, (void *)matcher);
5874 * Release an encap/decap resource.
5877 * Pointer to mlx5_flow.
5880 * 1 while a reference on it exists, 0 when freed.
5883 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
5885 struct mlx5_flow_dv_encap_decap_resource *cache_resource =
5886 flow->dv.encap_decap;
5888 assert(cache_resource->verbs_action);
5889 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
5890 (void *)cache_resource,
5891 rte_atomic32_read(&cache_resource->refcnt));
5892 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
5893 claim_zero(mlx5_glue->destroy_flow_action
5894 (cache_resource->verbs_action));
5895 LIST_REMOVE(cache_resource, next);
5896 rte_free(cache_resource);
5897 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
5898 (void *)cache_resource);
5905 * Release an jump to table action resource.
5908 * Pointer to mlx5_flow.
5911 * 1 while a reference on it exists, 0 when freed.
5914 flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow)
5916 struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
5919 assert(cache_resource->action);
5920 DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
5921 (void *)cache_resource,
5922 rte_atomic32_read(&cache_resource->refcnt));
5923 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
5924 claim_zero(mlx5_glue->destroy_flow_action
5925 (cache_resource->action));
5926 LIST_REMOVE(cache_resource, next);
5927 flow_dv_tbl_resource_release(cache_resource->tbl);
5928 rte_free(cache_resource);
5929 DRV_LOG(DEBUG, "jump table resource %p: removed",
5930 (void *)cache_resource);
5937 * Release a modify-header resource.
5940 * Pointer to mlx5_flow.
5943 * 1 while a reference on it exists, 0 when freed.
5946 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
5948 struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
5949 flow->dv.modify_hdr;
5951 assert(cache_resource->verbs_action);
5952 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
5953 (void *)cache_resource,
5954 rte_atomic32_read(&cache_resource->refcnt));
5955 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
5956 claim_zero(mlx5_glue->destroy_flow_action
5957 (cache_resource->verbs_action));
5958 LIST_REMOVE(cache_resource, next);
5959 rte_free(cache_resource);
5960 DRV_LOG(DEBUG, "modify-header resource %p: removed",
5961 (void *)cache_resource);
5968 * Release port ID action resource.
5971 * Pointer to mlx5_flow.
5974 * 1 while a reference on it exists, 0 when freed.
5977 flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
5979 struct mlx5_flow_dv_port_id_action_resource *cache_resource =
5980 flow->dv.port_id_action;
5982 assert(cache_resource->action);
5983 DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
5984 (void *)cache_resource,
5985 rte_atomic32_read(&cache_resource->refcnt));
5986 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
5987 claim_zero(mlx5_glue->destroy_flow_action
5988 (cache_resource->action));
5989 LIST_REMOVE(cache_resource, next);
5990 rte_free(cache_resource);
5991 DRV_LOG(DEBUG, "port id action resource %p: removed",
5992 (void *)cache_resource);
5999 * Release push vlan action resource.
6002 * Pointer to mlx5_flow.
6005 * 1 while a reference on it exists, 0 when freed.
6008 flow_dv_push_vlan_action_resource_release(struct mlx5_flow *flow)
6010 struct mlx5_flow_dv_push_vlan_action_resource *cache_resource =
6011 flow->dv.push_vlan_res;
6013 assert(cache_resource->action);
6014 DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
6015 (void *)cache_resource,
6016 rte_atomic32_read(&cache_resource->refcnt));
6017 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
6018 claim_zero(mlx5_glue->destroy_flow_action
6019 (cache_resource->action));
6020 LIST_REMOVE(cache_resource, next);
6021 rte_free(cache_resource);
6022 DRV_LOG(DEBUG, "push vlan action resource %p: removed",
6023 (void *)cache_resource);
6030 * Remove the flow from the NIC but keeps it in memory.
6033 * Pointer to Ethernet device.
6034 * @param[in, out] flow
6035 * Pointer to flow structure.
6038 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
6040 struct mlx5_flow_dv *dv;
6041 struct mlx5_flow *dev_flow;
6045 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
6048 claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
6052 if (flow->actions & MLX5_FLOW_ACTION_DROP)
6053 mlx5_hrxq_drop_release(dev);
6055 mlx5_hrxq_release(dev, dv->hrxq);
6058 if (dev_flow->dv.vf_vlan.tag &&
6059 dev_flow->dv.vf_vlan.created)
6060 mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
6065 * Remove the flow from the NIC and the memory.
6068 * Pointer to the Ethernet device structure.
6069 * @param[in, out] flow
6070 * Pointer to flow structure.
6073 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
6075 struct mlx5_flow *dev_flow;
6079 flow_dv_remove(dev, flow);
6080 if (flow->counter) {
6081 flow_dv_counter_release(dev, flow->counter);
6082 flow->counter = NULL;
6084 if (flow->tag_resource) {
6085 flow_dv_tag_release(dev, flow->tag_resource);
6086 flow->tag_resource = NULL;
6088 while (!LIST_EMPTY(&flow->dev_flows)) {
6089 dev_flow = LIST_FIRST(&flow->dev_flows);
6090 LIST_REMOVE(dev_flow, next);
6091 if (dev_flow->dv.matcher)
6092 flow_dv_matcher_release(dev, dev_flow);
6093 if (dev_flow->dv.encap_decap)
6094 flow_dv_encap_decap_resource_release(dev_flow);
6095 if (dev_flow->dv.modify_hdr)
6096 flow_dv_modify_hdr_resource_release(dev_flow);
6097 if (dev_flow->dv.jump)
6098 flow_dv_jump_tbl_resource_release(dev_flow);
6099 if (dev_flow->dv.port_id_action)
6100 flow_dv_port_id_action_resource_release(dev_flow);
6101 if (dev_flow->dv.push_vlan_res)
6102 flow_dv_push_vlan_action_resource_release(dev_flow);
6108 * Query a dv flow rule for its statistics via devx.
6111 * Pointer to Ethernet device.
6113 * Pointer to the sub flow.
6115 * data retrieved by the query.
6117 * Perform verbose error reporting if not NULL.
6120 * 0 on success, a negative errno value otherwise and rte_errno is set.
6123 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
6124 void *data, struct rte_flow_error *error)
6126 struct mlx5_priv *priv = dev->data->dev_private;
6127 struct rte_flow_query_count *qc = data;
6129 if (!priv->config.devx)
6130 return rte_flow_error_set(error, ENOTSUP,
6131 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6133 "counters are not supported");
6134 if (flow->counter) {
6135 uint64_t pkts, bytes;
6136 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
6140 return rte_flow_error_set(error, -err,
6141 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6142 NULL, "cannot read counters");
6145 qc->hits = pkts - flow->counter->hits;
6146 qc->bytes = bytes - flow->counter->bytes;
6148 flow->counter->hits = pkts;
6149 flow->counter->bytes = bytes;
6153 return rte_flow_error_set(error, EINVAL,
6154 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6156 "counters are not available");
6162 * @see rte_flow_query()
6166 flow_dv_query(struct rte_eth_dev *dev,
6167 struct rte_flow *flow __rte_unused,
6168 const struct rte_flow_action *actions __rte_unused,
6169 void *data __rte_unused,
6170 struct rte_flow_error *error __rte_unused)
6174 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
6175 switch (actions->type) {
6176 case RTE_FLOW_ACTION_TYPE_VOID:
6178 case RTE_FLOW_ACTION_TYPE_COUNT:
6179 ret = flow_dv_query_count(dev, flow, data, error);
6182 return rte_flow_error_set(error, ENOTSUP,
6183 RTE_FLOW_ERROR_TYPE_ACTION,
6185 "action not supported");
6192 * Mutex-protected thunk to flow_dv_translate().
6195 flow_d_translate(struct rte_eth_dev *dev,
6196 struct mlx5_flow *dev_flow,
6197 const struct rte_flow_attr *attr,
6198 const struct rte_flow_item items[],
6199 const struct rte_flow_action actions[],
6200 struct rte_flow_error *error)
6204 flow_d_shared_lock(dev);
6205 ret = flow_dv_translate(dev, dev_flow, attr, items, actions, error);
6206 flow_d_shared_unlock(dev);
6211 * Mutex-protected thunk to flow_dv_apply().
6214 flow_d_apply(struct rte_eth_dev *dev,
6215 struct rte_flow *flow,
6216 struct rte_flow_error *error)
6220 flow_d_shared_lock(dev);
6221 ret = flow_dv_apply(dev, flow, error);
6222 flow_d_shared_unlock(dev);
6227 * Mutex-protected thunk to flow_dv_remove().
6230 flow_d_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
6232 flow_d_shared_lock(dev);
6233 flow_dv_remove(dev, flow);
6234 flow_d_shared_unlock(dev);
6238 * Mutex-protected thunk to flow_dv_destroy().
6241 flow_d_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
6243 flow_d_shared_lock(dev);
6244 flow_dv_destroy(dev, flow);
6245 flow_d_shared_unlock(dev);
6248 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
6249 .validate = flow_dv_validate,
6250 .prepare = flow_dv_prepare,
6251 .translate = flow_d_translate,
6252 .apply = flow_d_apply,
6253 .remove = flow_d_remove,
6254 .destroy = flow_d_destroy,
6255 .query = flow_dv_query,
6258 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */