1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
14 #pragma GCC diagnostic ignored "-Wpedantic"
16 #include <infiniband/verbs.h>
18 #pragma GCC diagnostic error "-Wpedantic"
21 #include <rte_common.h>
22 #include <rte_ether.h>
23 #include <rte_ethdev_driver.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
29 #include <rte_vxlan.h>
33 #include "mlx5_defs.h"
34 #include "mlx5_glue.h"
35 #include "mlx5_flow.h"
37 #include "mlx5_rxtx.h"
39 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
41 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
42 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
45 #ifndef HAVE_MLX5DV_DR_ESWITCH
46 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
47 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
51 #ifndef HAVE_MLX5DV_DR
52 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
55 #define MLX5_ENCAPSULATION_DECISION_SIZE (sizeof(struct rte_flow_item_eth) + \
56 sizeof(struct rte_flow_item_ipv4))
57 /* VLAN header definitions */
58 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
59 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
60 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
61 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
62 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
77 * Initialize flow attributes structure according to flow items' types.
79 * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
80 * mode. For tunnel mode, the items to be modified are the outermost ones.
83 * Pointer to item specification.
85 * Pointer to flow attributes structure.
88 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
90 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
92 case RTE_FLOW_ITEM_TYPE_IPV4:
96 case RTE_FLOW_ITEM_TYPE_IPV6:
100 case RTE_FLOW_ITEM_TYPE_UDP:
104 case RTE_FLOW_ITEM_TYPE_TCP:
116 * Convert rte_mtr_color to mlx5 color.
125 rte_col_2_mlx5_col(enum rte_color rcol)
128 case RTE_COLOR_GREEN:
129 return MLX5_FLOW_COLOR_GREEN;
130 case RTE_COLOR_YELLOW:
131 return MLX5_FLOW_COLOR_YELLOW;
133 return MLX5_FLOW_COLOR_RED;
137 return MLX5_FLOW_COLOR_UNDEFINED;
140 struct field_modify_info {
141 uint32_t size; /* Size of field in protocol header, in bytes. */
142 uint32_t offset; /* Offset of field in protocol header, in bytes. */
143 enum mlx5_modification_field id;
146 struct field_modify_info modify_eth[] = {
147 {4, 0, MLX5_MODI_OUT_DMAC_47_16},
148 {2, 4, MLX5_MODI_OUT_DMAC_15_0},
149 {4, 6, MLX5_MODI_OUT_SMAC_47_16},
150 {2, 10, MLX5_MODI_OUT_SMAC_15_0},
154 struct field_modify_info modify_vlan_out_first_vid[] = {
155 /* Size in bits !!! */
156 {12, 0, MLX5_MODI_OUT_FIRST_VID},
160 struct field_modify_info modify_ipv4[] = {
161 {1, 1, MLX5_MODI_OUT_IP_DSCP},
162 {1, 8, MLX5_MODI_OUT_IPV4_TTL},
163 {4, 12, MLX5_MODI_OUT_SIPV4},
164 {4, 16, MLX5_MODI_OUT_DIPV4},
168 struct field_modify_info modify_ipv6[] = {
169 {1, 0, MLX5_MODI_OUT_IP_DSCP},
170 {1, 7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
171 {4, 8, MLX5_MODI_OUT_SIPV6_127_96},
172 {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
173 {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
174 {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
175 {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
176 {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
177 {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
178 {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
182 struct field_modify_info modify_udp[] = {
183 {2, 0, MLX5_MODI_OUT_UDP_SPORT},
184 {2, 2, MLX5_MODI_OUT_UDP_DPORT},
188 struct field_modify_info modify_tcp[] = {
189 {2, 0, MLX5_MODI_OUT_TCP_SPORT},
190 {2, 2, MLX5_MODI_OUT_TCP_DPORT},
191 {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
192 {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
197 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
198 uint8_t next_protocol, uint64_t *item_flags,
201 assert(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
202 item->type == RTE_FLOW_ITEM_TYPE_IPV6);
203 if (next_protocol == IPPROTO_IPIP) {
204 *item_flags |= MLX5_FLOW_LAYER_IPIP;
207 if (next_protocol == IPPROTO_IPV6) {
208 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
214 * Acquire the synchronizing object to protect multithreaded access
215 * to shared dv context. Lock occurs only if context is actually
216 * shared, i.e. we have multiport IB device and representors are
220 * Pointer to the rte_eth_dev structure.
223 flow_dv_shared_lock(struct rte_eth_dev *dev)
225 struct mlx5_priv *priv = dev->data->dev_private;
226 struct mlx5_ibv_shared *sh = priv->sh;
228 if (sh->dv_refcnt > 1) {
231 ret = pthread_mutex_lock(&sh->dv_mutex);
238 flow_dv_shared_unlock(struct rte_eth_dev *dev)
240 struct mlx5_priv *priv = dev->data->dev_private;
241 struct mlx5_ibv_shared *sh = priv->sh;
243 if (sh->dv_refcnt > 1) {
246 ret = pthread_mutex_unlock(&sh->dv_mutex);
252 /* Update VLAN's VID/PCP based on input rte_flow_action.
255 * Pointer to struct rte_flow_action.
257 * Pointer to struct rte_vlan_hdr.
260 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
261 struct rte_vlan_hdr *vlan)
264 if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
266 ((const struct rte_flow_action_of_set_vlan_pcp *)
267 action->conf)->vlan_pcp;
268 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
269 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
270 vlan->vlan_tci |= vlan_tci;
271 } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
272 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
273 vlan->vlan_tci |= rte_be_to_cpu_16
274 (((const struct rte_flow_action_of_set_vlan_vid *)
275 action->conf)->vlan_vid);
280 * Fetch 1, 2, 3 or 4 byte field from the byte array
281 * and return as unsigned integer in host-endian format.
284 * Pointer to data array.
286 * Size of field to extract.
289 * converted field in host endian format.
291 static inline uint32_t
292 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
301 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
304 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
305 ret = (ret << 8) | *(data + sizeof(uint16_t));
308 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
319 * Convert modify-header action to DV specification.
321 * Data length of each action is determined by provided field description
322 * and the item mask. Data bit offset and width of each action is determined
323 * by provided item mask.
326 * Pointer to item specification.
328 * Pointer to field modification information.
329 * For MLX5_MODIFICATION_TYPE_SET specifies destination field.
330 * For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
331 * For MLX5_MODIFICATION_TYPE_COPY specifies source field.
333 * Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
334 * Negative offset value sets the same offset as source offset.
335 * size field is ignored, value is taken from source field.
336 * @param[in,out] resource
337 * Pointer to the modify-header resource.
339 * Type of modification.
341 * Pointer to the error structure.
344 * 0 on success, a negative errno value otherwise and rte_errno is set.
347 flow_dv_convert_modify_action(struct rte_flow_item *item,
348 struct field_modify_info *field,
349 struct field_modify_info *dcopy,
350 struct mlx5_flow_dv_modify_hdr_resource *resource,
351 uint32_t type, struct rte_flow_error *error)
353 uint32_t i = resource->actions_num;
354 struct mlx5_modification_cmd *actions = resource->actions;
357 * The item and mask are provided in big-endian format.
358 * The fields should be presented as in big-endian format either.
359 * Mask must be always present, it defines the actual field width.
369 if (i >= MLX5_MODIFY_NUM)
370 return rte_flow_error_set(error, EINVAL,
371 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
372 "too many items to modify");
373 /* Fetch variable byte size mask from the array. */
374 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
375 field->offset, field->size);
380 /* Deduce actual data width in bits from mask value. */
381 off_b = rte_bsf32(mask);
382 size_b = sizeof(uint32_t) * CHAR_BIT -
383 off_b - __builtin_clz(mask);
385 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
386 actions[i].action_type = type;
387 actions[i].field = field->id;
388 actions[i].offset = off_b;
389 actions[i].length = size_b;
390 /* Convert entire record to expected big-endian format. */
391 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
392 if (type == MLX5_MODIFICATION_TYPE_COPY) {
394 actions[i].dst_field = dcopy->id;
395 actions[i].dst_offset =
396 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
397 /* Convert entire record to big-endian format. */
398 actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
401 data = flow_dv_fetch_field((const uint8_t *)item->spec +
402 field->offset, field->size);
403 /* Shift out the trailing masked bits from data. */
404 data = (data & mask) >> off_b;
405 actions[i].data1 = rte_cpu_to_be_32(data);
409 } while (field->size);
410 resource->actions_num = i;
411 if (!resource->actions_num)
412 return rte_flow_error_set(error, EINVAL,
413 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
414 "invalid modification flow item");
419 * Convert modify-header set IPv4 address action to DV specification.
421 * @param[in,out] resource
422 * Pointer to the modify-header resource.
424 * Pointer to action specification.
426 * Pointer to the error structure.
429 * 0 on success, a negative errno value otherwise and rte_errno is set.
432 flow_dv_convert_action_modify_ipv4
433 (struct mlx5_flow_dv_modify_hdr_resource *resource,
434 const struct rte_flow_action *action,
435 struct rte_flow_error *error)
437 const struct rte_flow_action_set_ipv4 *conf =
438 (const struct rte_flow_action_set_ipv4 *)(action->conf);
439 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
440 struct rte_flow_item_ipv4 ipv4;
441 struct rte_flow_item_ipv4 ipv4_mask;
443 memset(&ipv4, 0, sizeof(ipv4));
444 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
445 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
446 ipv4.hdr.src_addr = conf->ipv4_addr;
447 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
449 ipv4.hdr.dst_addr = conf->ipv4_addr;
450 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
453 item.mask = &ipv4_mask;
454 return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
455 MLX5_MODIFICATION_TYPE_SET, error);
459 * Convert modify-header set IPv6 address action to DV specification.
461 * @param[in,out] resource
462 * Pointer to the modify-header resource.
464 * Pointer to action specification.
466 * Pointer to the error structure.
469 * 0 on success, a negative errno value otherwise and rte_errno is set.
472 flow_dv_convert_action_modify_ipv6
473 (struct mlx5_flow_dv_modify_hdr_resource *resource,
474 const struct rte_flow_action *action,
475 struct rte_flow_error *error)
477 const struct rte_flow_action_set_ipv6 *conf =
478 (const struct rte_flow_action_set_ipv6 *)(action->conf);
479 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
480 struct rte_flow_item_ipv6 ipv6;
481 struct rte_flow_item_ipv6 ipv6_mask;
483 memset(&ipv6, 0, sizeof(ipv6));
484 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
485 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
486 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
487 sizeof(ipv6.hdr.src_addr));
488 memcpy(&ipv6_mask.hdr.src_addr,
489 &rte_flow_item_ipv6_mask.hdr.src_addr,
490 sizeof(ipv6.hdr.src_addr));
492 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
493 sizeof(ipv6.hdr.dst_addr));
494 memcpy(&ipv6_mask.hdr.dst_addr,
495 &rte_flow_item_ipv6_mask.hdr.dst_addr,
496 sizeof(ipv6.hdr.dst_addr));
499 item.mask = &ipv6_mask;
500 return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
501 MLX5_MODIFICATION_TYPE_SET, error);
505 * Convert modify-header set MAC address action to DV specification.
507 * @param[in,out] resource
508 * Pointer to the modify-header resource.
510 * Pointer to action specification.
512 * Pointer to the error structure.
515 * 0 on success, a negative errno value otherwise and rte_errno is set.
518 flow_dv_convert_action_modify_mac
519 (struct mlx5_flow_dv_modify_hdr_resource *resource,
520 const struct rte_flow_action *action,
521 struct rte_flow_error *error)
523 const struct rte_flow_action_set_mac *conf =
524 (const struct rte_flow_action_set_mac *)(action->conf);
525 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
526 struct rte_flow_item_eth eth;
527 struct rte_flow_item_eth eth_mask;
529 memset(ð, 0, sizeof(eth));
530 memset(ð_mask, 0, sizeof(eth_mask));
531 if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
532 memcpy(ð.src.addr_bytes, &conf->mac_addr,
533 sizeof(eth.src.addr_bytes));
534 memcpy(ð_mask.src.addr_bytes,
535 &rte_flow_item_eth_mask.src.addr_bytes,
536 sizeof(eth_mask.src.addr_bytes));
538 memcpy(ð.dst.addr_bytes, &conf->mac_addr,
539 sizeof(eth.dst.addr_bytes));
540 memcpy(ð_mask.dst.addr_bytes,
541 &rte_flow_item_eth_mask.dst.addr_bytes,
542 sizeof(eth_mask.dst.addr_bytes));
545 item.mask = ð_mask;
546 return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
547 MLX5_MODIFICATION_TYPE_SET, error);
551 * Convert modify-header set VLAN VID action to DV specification.
553 * @param[in,out] resource
554 * Pointer to the modify-header resource.
556 * Pointer to action specification.
558 * Pointer to the error structure.
561 * 0 on success, a negative errno value otherwise and rte_errno is set.
564 flow_dv_convert_action_modify_vlan_vid
565 (struct mlx5_flow_dv_modify_hdr_resource *resource,
566 const struct rte_flow_action *action,
567 struct rte_flow_error *error)
569 const struct rte_flow_action_of_set_vlan_vid *conf =
570 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
571 int i = resource->actions_num;
572 struct mlx5_modification_cmd *actions = &resource->actions[i];
573 struct field_modify_info *field = modify_vlan_out_first_vid;
575 if (i >= MLX5_MODIFY_NUM)
576 return rte_flow_error_set(error, EINVAL,
577 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
578 "too many items to modify");
579 actions[i].action_type = MLX5_MODIFICATION_TYPE_SET;
580 actions[i].field = field->id;
581 actions[i].length = field->size;
582 actions[i].offset = field->offset;
583 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
584 actions[i].data1 = conf->vlan_vid;
585 actions[i].data1 = actions[i].data1 << 16;
586 resource->actions_num = ++i;
591 * Convert modify-header set TP action to DV specification.
593 * @param[in,out] resource
594 * Pointer to the modify-header resource.
596 * Pointer to action specification.
598 * Pointer to rte_flow_item objects list.
600 * Pointer to flow attributes structure.
602 * Pointer to the error structure.
605 * 0 on success, a negative errno value otherwise and rte_errno is set.
608 flow_dv_convert_action_modify_tp
609 (struct mlx5_flow_dv_modify_hdr_resource *resource,
610 const struct rte_flow_action *action,
611 const struct rte_flow_item *items,
612 union flow_dv_attr *attr,
613 struct rte_flow_error *error)
615 const struct rte_flow_action_set_tp *conf =
616 (const struct rte_flow_action_set_tp *)(action->conf);
617 struct rte_flow_item item;
618 struct rte_flow_item_udp udp;
619 struct rte_flow_item_udp udp_mask;
620 struct rte_flow_item_tcp tcp;
621 struct rte_flow_item_tcp tcp_mask;
622 struct field_modify_info *field;
625 flow_dv_attr_init(items, attr);
627 memset(&udp, 0, sizeof(udp));
628 memset(&udp_mask, 0, sizeof(udp_mask));
629 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
630 udp.hdr.src_port = conf->port;
631 udp_mask.hdr.src_port =
632 rte_flow_item_udp_mask.hdr.src_port;
634 udp.hdr.dst_port = conf->port;
635 udp_mask.hdr.dst_port =
636 rte_flow_item_udp_mask.hdr.dst_port;
638 item.type = RTE_FLOW_ITEM_TYPE_UDP;
640 item.mask = &udp_mask;
644 memset(&tcp, 0, sizeof(tcp));
645 memset(&tcp_mask, 0, sizeof(tcp_mask));
646 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
647 tcp.hdr.src_port = conf->port;
648 tcp_mask.hdr.src_port =
649 rte_flow_item_tcp_mask.hdr.src_port;
651 tcp.hdr.dst_port = conf->port;
652 tcp_mask.hdr.dst_port =
653 rte_flow_item_tcp_mask.hdr.dst_port;
655 item.type = RTE_FLOW_ITEM_TYPE_TCP;
657 item.mask = &tcp_mask;
660 return flow_dv_convert_modify_action(&item, field, NULL, resource,
661 MLX5_MODIFICATION_TYPE_SET, error);
665 * Convert modify-header set TTL action to DV specification.
667 * @param[in,out] resource
668 * Pointer to the modify-header resource.
670 * Pointer to action specification.
672 * Pointer to rte_flow_item objects list.
674 * Pointer to flow attributes structure.
676 * Pointer to the error structure.
679 * 0 on success, a negative errno value otherwise and rte_errno is set.
682 flow_dv_convert_action_modify_ttl
683 (struct mlx5_flow_dv_modify_hdr_resource *resource,
684 const struct rte_flow_action *action,
685 const struct rte_flow_item *items,
686 union flow_dv_attr *attr,
687 struct rte_flow_error *error)
689 const struct rte_flow_action_set_ttl *conf =
690 (const struct rte_flow_action_set_ttl *)(action->conf);
691 struct rte_flow_item item;
692 struct rte_flow_item_ipv4 ipv4;
693 struct rte_flow_item_ipv4 ipv4_mask;
694 struct rte_flow_item_ipv6 ipv6;
695 struct rte_flow_item_ipv6 ipv6_mask;
696 struct field_modify_info *field;
699 flow_dv_attr_init(items, attr);
701 memset(&ipv4, 0, sizeof(ipv4));
702 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
703 ipv4.hdr.time_to_live = conf->ttl_value;
704 ipv4_mask.hdr.time_to_live = 0xFF;
705 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
707 item.mask = &ipv4_mask;
711 memset(&ipv6, 0, sizeof(ipv6));
712 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
713 ipv6.hdr.hop_limits = conf->ttl_value;
714 ipv6_mask.hdr.hop_limits = 0xFF;
715 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
717 item.mask = &ipv6_mask;
720 return flow_dv_convert_modify_action(&item, field, NULL, resource,
721 MLX5_MODIFICATION_TYPE_SET, error);
725 * Convert modify-header decrement TTL action to DV specification.
727 * @param[in,out] resource
728 * Pointer to the modify-header resource.
730 * Pointer to action specification.
732 * Pointer to rte_flow_item objects list.
734 * Pointer to flow attributes structure.
736 * Pointer to the error structure.
739 * 0 on success, a negative errno value otherwise and rte_errno is set.
742 flow_dv_convert_action_modify_dec_ttl
743 (struct mlx5_flow_dv_modify_hdr_resource *resource,
744 const struct rte_flow_item *items,
745 union flow_dv_attr *attr,
746 struct rte_flow_error *error)
748 struct rte_flow_item item;
749 struct rte_flow_item_ipv4 ipv4;
750 struct rte_flow_item_ipv4 ipv4_mask;
751 struct rte_flow_item_ipv6 ipv6;
752 struct rte_flow_item_ipv6 ipv6_mask;
753 struct field_modify_info *field;
756 flow_dv_attr_init(items, attr);
758 memset(&ipv4, 0, sizeof(ipv4));
759 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
760 ipv4.hdr.time_to_live = 0xFF;
761 ipv4_mask.hdr.time_to_live = 0xFF;
762 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
764 item.mask = &ipv4_mask;
768 memset(&ipv6, 0, sizeof(ipv6));
769 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
770 ipv6.hdr.hop_limits = 0xFF;
771 ipv6_mask.hdr.hop_limits = 0xFF;
772 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
774 item.mask = &ipv6_mask;
777 return flow_dv_convert_modify_action(&item, field, NULL, resource,
778 MLX5_MODIFICATION_TYPE_ADD, error);
782 * Convert modify-header increment/decrement TCP Sequence number
783 * to DV specification.
785 * @param[in,out] resource
786 * Pointer to the modify-header resource.
788 * Pointer to action specification.
790 * Pointer to the error structure.
793 * 0 on success, a negative errno value otherwise and rte_errno is set.
796 flow_dv_convert_action_modify_tcp_seq
797 (struct mlx5_flow_dv_modify_hdr_resource *resource,
798 const struct rte_flow_action *action,
799 struct rte_flow_error *error)
801 const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
802 uint64_t value = rte_be_to_cpu_32(*conf);
803 struct rte_flow_item item;
804 struct rte_flow_item_tcp tcp;
805 struct rte_flow_item_tcp tcp_mask;
807 memset(&tcp, 0, sizeof(tcp));
808 memset(&tcp_mask, 0, sizeof(tcp_mask));
809 if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
811 * The HW has no decrement operation, only increment operation.
812 * To simulate decrement X from Y using increment operation
813 * we need to add UINT32_MAX X times to Y.
814 * Each adding of UINT32_MAX decrements Y by 1.
817 tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
818 tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
819 item.type = RTE_FLOW_ITEM_TYPE_TCP;
821 item.mask = &tcp_mask;
822 return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
823 MLX5_MODIFICATION_TYPE_ADD, error);
827 * Convert modify-header increment/decrement TCP Acknowledgment number
828 * to DV specification.
830 * @param[in,out] resource
831 * Pointer to the modify-header resource.
833 * Pointer to action specification.
835 * Pointer to the error structure.
838 * 0 on success, a negative errno value otherwise and rte_errno is set.
841 flow_dv_convert_action_modify_tcp_ack
842 (struct mlx5_flow_dv_modify_hdr_resource *resource,
843 const struct rte_flow_action *action,
844 struct rte_flow_error *error)
846 const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
847 uint64_t value = rte_be_to_cpu_32(*conf);
848 struct rte_flow_item item;
849 struct rte_flow_item_tcp tcp;
850 struct rte_flow_item_tcp tcp_mask;
852 memset(&tcp, 0, sizeof(tcp));
853 memset(&tcp_mask, 0, sizeof(tcp_mask));
854 if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
856 * The HW has no decrement operation, only increment operation.
857 * To simulate decrement X from Y using increment operation
858 * we need to add UINT32_MAX X times to Y.
859 * Each adding of UINT32_MAX decrements Y by 1.
862 tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
863 tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
864 item.type = RTE_FLOW_ITEM_TYPE_TCP;
866 item.mask = &tcp_mask;
867 return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
868 MLX5_MODIFICATION_TYPE_ADD, error);
871 static enum mlx5_modification_field reg_to_field[] = {
872 [REG_NONE] = MLX5_MODI_OUT_NONE,
873 [REG_A] = MLX5_MODI_META_DATA_REG_A,
874 [REG_B] = MLX5_MODI_META_DATA_REG_B,
875 [REG_C_0] = MLX5_MODI_META_REG_C_0,
876 [REG_C_1] = MLX5_MODI_META_REG_C_1,
877 [REG_C_2] = MLX5_MODI_META_REG_C_2,
878 [REG_C_3] = MLX5_MODI_META_REG_C_3,
879 [REG_C_4] = MLX5_MODI_META_REG_C_4,
880 [REG_C_5] = MLX5_MODI_META_REG_C_5,
881 [REG_C_6] = MLX5_MODI_META_REG_C_6,
882 [REG_C_7] = MLX5_MODI_META_REG_C_7,
886 * Convert register set to DV specification.
888 * @param[in,out] resource
889 * Pointer to the modify-header resource.
891 * Pointer to action specification.
893 * Pointer to the error structure.
896 * 0 on success, a negative errno value otherwise and rte_errno is set.
899 flow_dv_convert_action_set_reg
900 (struct mlx5_flow_dv_modify_hdr_resource *resource,
901 const struct rte_flow_action *action,
902 struct rte_flow_error *error)
904 const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
905 struct mlx5_modification_cmd *actions = resource->actions;
906 uint32_t i = resource->actions_num;
908 if (i >= MLX5_MODIFY_NUM)
909 return rte_flow_error_set(error, EINVAL,
910 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
911 "too many items to modify");
912 assert(conf->id != REG_NONE);
913 assert(conf->id < RTE_DIM(reg_to_field));
914 actions[i].action_type = MLX5_MODIFICATION_TYPE_SET;
915 actions[i].field = reg_to_field[conf->id];
916 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
917 actions[i].data1 = rte_cpu_to_be_32(conf->data);
919 resource->actions_num = i;
920 if (!resource->actions_num)
921 return rte_flow_error_set(error, EINVAL,
922 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
923 "invalid modification flow item");
928 * Convert SET_TAG action to DV specification.
931 * Pointer to the rte_eth_dev structure.
932 * @param[in,out] resource
933 * Pointer to the modify-header resource.
935 * Pointer to action specification.
937 * Pointer to the error structure.
940 * 0 on success, a negative errno value otherwise and rte_errno is set.
943 flow_dv_convert_action_set_tag
944 (struct rte_eth_dev *dev,
945 struct mlx5_flow_dv_modify_hdr_resource *resource,
946 const struct rte_flow_action_set_tag *conf,
947 struct rte_flow_error *error)
949 rte_be32_t data = rte_cpu_to_be_32(conf->data);
950 rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
951 struct rte_flow_item item = {
955 struct field_modify_info reg_c_x[] = {
958 enum mlx5_modification_field reg_type;
961 ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
964 assert(ret != REG_NONE);
965 assert((unsigned int)ret < RTE_DIM(reg_to_field));
966 reg_type = reg_to_field[ret];
967 assert(reg_type > 0);
968 reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
969 return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
970 MLX5_MODIFICATION_TYPE_SET, error);
974 * Convert internal COPY_REG action to DV specification.
977 * Pointer to the rte_eth_dev structure.
979 * Pointer to the modify-header resource.
981 * Pointer to action specification.
983 * Pointer to the error structure.
986 * 0 on success, a negative errno value otherwise and rte_errno is set.
989 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
990 struct mlx5_flow_dv_modify_hdr_resource *res,
991 const struct rte_flow_action *action,
992 struct rte_flow_error *error)
994 const struct mlx5_flow_action_copy_mreg *conf = action->conf;
995 rte_be32_t mask = RTE_BE32(UINT32_MAX);
996 struct rte_flow_item item = {
1000 struct field_modify_info reg_src[] = {
1001 {4, 0, reg_to_field[conf->src]},
1004 struct field_modify_info reg_dst = {
1006 .id = reg_to_field[conf->dst],
1008 /* Adjust reg_c[0] usage according to reported mask. */
1009 if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1010 struct mlx5_priv *priv = dev->data->dev_private;
1011 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1014 assert(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1015 if (conf->dst == REG_C_0) {
1016 /* Copy to reg_c[0], within mask only. */
1017 reg_dst.offset = rte_bsf32(reg_c0);
1019 * Mask is ignoring the enianness, because
1020 * there is no conversion in datapath.
1022 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1023 /* Copy from destination lower bits to reg_c[0]. */
1024 mask = reg_c0 >> reg_dst.offset;
1026 /* Copy from destination upper bits to reg_c[0]. */
1027 mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1028 rte_fls_u32(reg_c0));
1031 mask = rte_cpu_to_be_32(reg_c0);
1032 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1033 /* Copy from reg_c[0] to destination lower bits. */
1036 /* Copy from reg_c[0] to destination upper bits. */
1037 reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1038 (rte_fls_u32(reg_c0) -
1043 return flow_dv_convert_modify_action(&item,
1044 reg_src, ®_dst, res,
1045 MLX5_MODIFICATION_TYPE_COPY,
1050 * Convert MARK action to DV specification. This routine is used
1051 * in extensive metadata only and requires metadata register to be
1052 * handled. In legacy mode hardware tag resource is engaged.
1055 * Pointer to the rte_eth_dev structure.
1057 * Pointer to MARK action specification.
1058 * @param[in,out] resource
1059 * Pointer to the modify-header resource.
1061 * Pointer to the error structure.
1064 * 0 on success, a negative errno value otherwise and rte_errno is set.
1067 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1068 const struct rte_flow_action_mark *conf,
1069 struct mlx5_flow_dv_modify_hdr_resource *resource,
1070 struct rte_flow_error *error)
1072 struct mlx5_priv *priv = dev->data->dev_private;
1073 rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1074 priv->sh->dv_mark_mask);
1075 rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1076 struct rte_flow_item item = {
1080 struct field_modify_info reg_c_x[] = {
1081 {4, 0, 0}, /* dynamic instead of MLX5_MODI_META_REG_C_1. */
1084 enum modify_reg reg;
1087 return rte_flow_error_set(error, EINVAL,
1088 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1089 NULL, "zero mark action mask");
1090 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1094 if (reg == REG_C_0) {
1095 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1096 uint32_t shl_c0 = rte_bsf32(msk_c0);
1098 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1099 mask = rte_cpu_to_be_32(mask) & msk_c0;
1100 mask = rte_cpu_to_be_32(mask << shl_c0);
1102 reg_c_x[0].id = reg_to_field[reg];
1103 return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1104 MLX5_MODIFICATION_TYPE_SET, error);
1108 * Get metadata register index for specified steering domain.
1111 * Pointer to the rte_eth_dev structure.
1113 * Attributes of flow to determine steering domain.
1115 * Pointer to the error structure.
1118 * positive index on success, a negative errno value otherwise
1119 * and rte_errno is set.
1121 static enum modify_reg
1122 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1123 const struct rte_flow_attr *attr,
1124 struct rte_flow_error *error)
1126 enum modify_reg reg =
1127 mlx5_flow_get_reg_id(dev, attr->transfer ?
1131 MLX5_METADATA_RX, 0, error);
1133 return rte_flow_error_set(error,
1134 ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1135 NULL, "unavailable "
1136 "metadata register");
1141 * Convert SET_META action to DV specification.
1144 * Pointer to the rte_eth_dev structure.
1145 * @param[in,out] resource
1146 * Pointer to the modify-header resource.
1148 * Attributes of flow that includes this item.
1150 * Pointer to action specification.
1152 * Pointer to the error structure.
1155 * 0 on success, a negative errno value otherwise and rte_errno is set.
1158 flow_dv_convert_action_set_meta
1159 (struct rte_eth_dev *dev,
1160 struct mlx5_flow_dv_modify_hdr_resource *resource,
1161 const struct rte_flow_attr *attr,
1162 const struct rte_flow_action_set_meta *conf,
1163 struct rte_flow_error *error)
1165 uint32_t data = conf->data;
1166 uint32_t mask = conf->mask;
1167 struct rte_flow_item item = {
1171 struct field_modify_info reg_c_x[] = {
1174 enum modify_reg reg = flow_dv_get_metadata_reg(dev, attr, error);
1179 * In datapath code there is no endianness
1180 * coversions for perfromance reasons, all
1181 * pattern conversions are done in rte_flow.
1183 if (reg == REG_C_0) {
1184 struct mlx5_priv *priv = dev->data->dev_private;
1185 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1189 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1190 shl_c0 = rte_bsf32(msk_c0);
1192 shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
1196 assert(!(~msk_c0 & rte_cpu_to_be_32(mask)));
1198 reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1199 /* The routine expects parameters in memory as big-endian ones. */
1200 return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1201 MLX5_MODIFICATION_TYPE_SET, error);
1205 * Convert modify-header set IPv4 DSCP action to DV specification.
1207 * @param[in,out] resource
1208 * Pointer to the modify-header resource.
1210 * Pointer to action specification.
1212 * Pointer to the error structure.
1215 * 0 on success, a negative errno value otherwise and rte_errno is set.
1218 flow_dv_convert_action_modify_ipv4_dscp
1219 (struct mlx5_flow_dv_modify_hdr_resource *resource,
1220 const struct rte_flow_action *action,
1221 struct rte_flow_error *error)
1223 const struct rte_flow_action_set_dscp *conf =
1224 (const struct rte_flow_action_set_dscp *)(action->conf);
1225 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1226 struct rte_flow_item_ipv4 ipv4;
1227 struct rte_flow_item_ipv4 ipv4_mask;
1229 memset(&ipv4, 0, sizeof(ipv4));
1230 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1231 ipv4.hdr.type_of_service = conf->dscp;
1232 ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1234 item.mask = &ipv4_mask;
1235 return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1236 MLX5_MODIFICATION_TYPE_SET, error);
1240 * Convert modify-header set IPv6 DSCP action to DV specification.
1242 * @param[in,out] resource
1243 * Pointer to the modify-header resource.
1245 * Pointer to action specification.
1247 * Pointer to the error structure.
1250 * 0 on success, a negative errno value otherwise and rte_errno is set.
1253 flow_dv_convert_action_modify_ipv6_dscp
1254 (struct mlx5_flow_dv_modify_hdr_resource *resource,
1255 const struct rte_flow_action *action,
1256 struct rte_flow_error *error)
1258 const struct rte_flow_action_set_dscp *conf =
1259 (const struct rte_flow_action_set_dscp *)(action->conf);
1260 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1261 struct rte_flow_item_ipv6 ipv6;
1262 struct rte_flow_item_ipv6 ipv6_mask;
1264 memset(&ipv6, 0, sizeof(ipv6));
1265 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1267 * Even though the DSCP bits offset of IPv6 is not byte aligned,
1268 * rdma-core only accept the DSCP bits byte aligned start from
1269 * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1270 * bits in IPv6 case as rdma-core requires byte aligned value.
1272 ipv6.hdr.vtc_flow = conf->dscp;
1273 ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1275 item.mask = &ipv6_mask;
1276 return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1277 MLX5_MODIFICATION_TYPE_SET, error);
1281 * Validate MARK item.
1284 * Pointer to the rte_eth_dev structure.
1286 * Item specification.
1288 * Attributes of flow that includes this item.
1290 * Pointer to error structure.
1293 * 0 on success, a negative errno value otherwise and rte_errno is set.
1296 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1297 const struct rte_flow_item *item,
1298 const struct rte_flow_attr *attr __rte_unused,
1299 struct rte_flow_error *error)
1301 struct mlx5_priv *priv = dev->data->dev_private;
1302 struct mlx5_dev_config *config = &priv->config;
1303 const struct rte_flow_item_mark *spec = item->spec;
1304 const struct rte_flow_item_mark *mask = item->mask;
1305 const struct rte_flow_item_mark nic_mask = {
1306 .id = priv->sh->dv_mark_mask,
1310 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1311 return rte_flow_error_set(error, ENOTSUP,
1312 RTE_FLOW_ERROR_TYPE_ITEM, item,
1313 "extended metadata feature"
1315 if (!mlx5_flow_ext_mreg_supported(dev))
1316 return rte_flow_error_set(error, ENOTSUP,
1317 RTE_FLOW_ERROR_TYPE_ITEM, item,
1318 "extended metadata register"
1319 " isn't supported");
1321 return rte_flow_error_set(error, ENOTSUP,
1322 RTE_FLOW_ERROR_TYPE_ITEM, item,
1323 "extended metadata register"
1324 " isn't available");
1325 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1329 return rte_flow_error_set(error, EINVAL,
1330 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1332 "data cannot be empty");
1333 if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1334 return rte_flow_error_set(error, EINVAL,
1335 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1337 "mark id exceeds the limit");
1340 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1341 (const uint8_t *)&nic_mask,
1342 sizeof(struct rte_flow_item_mark),
1350 * Validate META item.
1353 * Pointer to the rte_eth_dev structure.
1355 * Item specification.
1357 * Attributes of flow that includes this item.
1359 * Pointer to error structure.
1362 * 0 on success, a negative errno value otherwise and rte_errno is set.
1365 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1366 const struct rte_flow_item *item,
1367 const struct rte_flow_attr *attr,
1368 struct rte_flow_error *error)
1370 struct mlx5_priv *priv = dev->data->dev_private;
1371 struct mlx5_dev_config *config = &priv->config;
1372 const struct rte_flow_item_meta *spec = item->spec;
1373 const struct rte_flow_item_meta *mask = item->mask;
1374 struct rte_flow_item_meta nic_mask = {
1377 enum modify_reg reg;
1381 return rte_flow_error_set(error, EINVAL,
1382 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1384 "data cannot be empty");
1386 return rte_flow_error_set(error, EINVAL,
1387 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1388 "data cannot be zero");
1389 if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1390 if (!mlx5_flow_ext_mreg_supported(dev))
1391 return rte_flow_error_set(error, ENOTSUP,
1392 RTE_FLOW_ERROR_TYPE_ITEM, item,
1393 "extended metadata register"
1394 " isn't supported");
1395 reg = flow_dv_get_metadata_reg(dev, attr, error);
1399 return rte_flow_error_set(error, ENOTSUP,
1400 RTE_FLOW_ERROR_TYPE_ITEM, item,
1404 nic_mask.data = priv->sh->dv_meta_mask;
1407 mask = &rte_flow_item_meta_mask;
1408 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1409 (const uint8_t *)&nic_mask,
1410 sizeof(struct rte_flow_item_meta),
1416 * Validate TAG item.
1419 * Pointer to the rte_eth_dev structure.
1421 * Item specification.
1423 * Attributes of flow that includes this item.
1425 * Pointer to error structure.
1428 * 0 on success, a negative errno value otherwise and rte_errno is set.
1431 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
1432 const struct rte_flow_item *item,
1433 const struct rte_flow_attr *attr __rte_unused,
1434 struct rte_flow_error *error)
1436 const struct rte_flow_item_tag *spec = item->spec;
1437 const struct rte_flow_item_tag *mask = item->mask;
1438 const struct rte_flow_item_tag nic_mask = {
1439 .data = RTE_BE32(UINT32_MAX),
1444 if (!mlx5_flow_ext_mreg_supported(dev))
1445 return rte_flow_error_set(error, ENOTSUP,
1446 RTE_FLOW_ERROR_TYPE_ITEM, item,
1447 "extensive metadata register"
1448 " isn't supported");
1450 return rte_flow_error_set(error, EINVAL,
1451 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1453 "data cannot be empty");
1455 mask = &rte_flow_item_tag_mask;
1456 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1457 (const uint8_t *)&nic_mask,
1458 sizeof(struct rte_flow_item_tag),
1462 if (mask->index != 0xff)
1463 return rte_flow_error_set(error, EINVAL,
1464 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1465 "partial mask for tag index"
1466 " is not supported");
1467 ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
1470 assert(ret != REG_NONE);
1475 * Validate vport item.
1478 * Pointer to the rte_eth_dev structure.
1480 * Item specification.
1482 * Attributes of flow that includes this item.
1483 * @param[in] item_flags
1484 * Bit-fields that holds the items detected until now.
1486 * Pointer to error structure.
1489 * 0 on success, a negative errno value otherwise and rte_errno is set.
1492 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
1493 const struct rte_flow_item *item,
1494 const struct rte_flow_attr *attr,
1495 uint64_t item_flags,
1496 struct rte_flow_error *error)
1498 const struct rte_flow_item_port_id *spec = item->spec;
1499 const struct rte_flow_item_port_id *mask = item->mask;
1500 const struct rte_flow_item_port_id switch_mask = {
1503 struct mlx5_priv *esw_priv;
1504 struct mlx5_priv *dev_priv;
1507 if (!attr->transfer)
1508 return rte_flow_error_set(error, EINVAL,
1509 RTE_FLOW_ERROR_TYPE_ITEM,
1511 "match on port id is valid only"
1512 " when transfer flag is enabled");
1513 if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
1514 return rte_flow_error_set(error, ENOTSUP,
1515 RTE_FLOW_ERROR_TYPE_ITEM, item,
1516 "multiple source ports are not"
1519 mask = &switch_mask;
1520 if (mask->id != 0xffffffff)
1521 return rte_flow_error_set(error, ENOTSUP,
1522 RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1524 "no support for partial mask on"
1526 ret = mlx5_flow_item_acceptable
1527 (item, (const uint8_t *)mask,
1528 (const uint8_t *)&rte_flow_item_port_id_mask,
1529 sizeof(struct rte_flow_item_port_id),
1535 esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
1537 return rte_flow_error_set(error, rte_errno,
1538 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1539 "failed to obtain E-Switch info for"
1541 dev_priv = mlx5_dev_to_eswitch_info(dev);
1543 return rte_flow_error_set(error, rte_errno,
1544 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1546 "failed to obtain E-Switch info");
1547 if (esw_priv->domain_id != dev_priv->domain_id)
1548 return rte_flow_error_set(error, EINVAL,
1549 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1550 "cannot match on a port from a"
1551 " different E-Switch");
1556 * Validate GTP item.
1559 * Pointer to the rte_eth_dev structure.
1561 * Item specification.
1562 * @param[in] item_flags
1563 * Bit-fields that holds the items detected until now.
1565 * Pointer to error structure.
1568 * 0 on success, a negative errno value otherwise and rte_errno is set.
1571 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
1572 const struct rte_flow_item *item,
1573 uint64_t item_flags,
1574 struct rte_flow_error *error)
1576 struct mlx5_priv *priv = dev->data->dev_private;
1577 const struct rte_flow_item_gtp *mask = item->mask;
1578 const struct rte_flow_item_gtp nic_mask = {
1580 .teid = RTE_BE32(0xffffffff),
1583 if (!priv->config.hca_attr.tunnel_stateless_gtp)
1584 return rte_flow_error_set(error, ENOTSUP,
1585 RTE_FLOW_ERROR_TYPE_ITEM, item,
1586 "GTP support is not enabled");
1587 if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1588 return rte_flow_error_set(error, ENOTSUP,
1589 RTE_FLOW_ERROR_TYPE_ITEM, item,
1590 "multiple tunnel layers not"
1592 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1593 return rte_flow_error_set(error, EINVAL,
1594 RTE_FLOW_ERROR_TYPE_ITEM, item,
1595 "no outer UDP layer found");
1597 mask = &rte_flow_item_gtp_mask;
1598 return mlx5_flow_item_acceptable
1599 (item, (const uint8_t *)mask,
1600 (const uint8_t *)&nic_mask,
1601 sizeof(struct rte_flow_item_gtp),
1606 * Validate the pop VLAN action.
1609 * Pointer to the rte_eth_dev structure.
1610 * @param[in] action_flags
1611 * Holds the actions detected until now.
1613 * Pointer to the pop vlan action.
1614 * @param[in] item_flags
1615 * The items found in this flow rule.
1617 * Pointer to flow attributes.
1619 * Pointer to error structure.
1622 * 0 on success, a negative errno value otherwise and rte_errno is set.
1625 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
1626 uint64_t action_flags,
1627 const struct rte_flow_action *action,
1628 uint64_t item_flags,
1629 const struct rte_flow_attr *attr,
1630 struct rte_flow_error *error)
1632 struct mlx5_priv *priv = dev->data->dev_private;
1636 if (!priv->sh->pop_vlan_action)
1637 return rte_flow_error_set(error, ENOTSUP,
1638 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1640 "pop vlan action is not supported");
1642 * Check for inconsistencies:
1643 * fail strip_vlan in a flow that matches packets without VLAN tags.
1644 * fail strip_vlan in a flow that matches packets without explicitly a
1645 * matching on VLAN tag ?
1647 if (action_flags & MLX5_FLOW_ACTION_OF_POP_VLAN)
1648 return rte_flow_error_set(error, ENOTSUP,
1649 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1651 "no support for multiple vlan pop "
1653 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
1654 return rte_flow_error_set(error, ENOTSUP,
1655 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1657 "cannot pop vlan without a "
1658 "match on (outer) vlan in the flow");
1659 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1660 return rte_flow_error_set(error, EINVAL,
1661 RTE_FLOW_ERROR_TYPE_ACTION, action,
1662 "wrong action order, port_id should "
1663 "be after pop VLAN action");
1668 * Get VLAN default info from vlan match info.
1671 * Pointer to the rte_eth_dev structure.
1673 * the list of item specifications.
1675 * pointer VLAN info to fill to.
1677 * Pointer to error structure.
1680 * 0 on success, a negative errno value otherwise and rte_errno is set.
1683 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
1684 struct rte_vlan_hdr *vlan)
1686 const struct rte_flow_item_vlan nic_mask = {
1687 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
1688 MLX5DV_FLOW_VLAN_VID_MASK),
1689 .inner_type = RTE_BE16(0xffff),
1694 for (; items->type != RTE_FLOW_ITEM_TYPE_END &&
1695 items->type != RTE_FLOW_ITEM_TYPE_VLAN; items++)
1697 if (items->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1698 const struct rte_flow_item_vlan *vlan_m = items->mask;
1699 const struct rte_flow_item_vlan *vlan_v = items->spec;
1703 /* Only full match values are accepted */
1704 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
1705 MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
1706 vlan->vlan_tci &= MLX5DV_FLOW_VLAN_PCP_MASK;
1708 rte_be_to_cpu_16(vlan_v->tci &
1709 MLX5DV_FLOW_VLAN_PCP_MASK_BE);
1711 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
1712 MLX5DV_FLOW_VLAN_VID_MASK_BE) {
1713 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
1715 rte_be_to_cpu_16(vlan_v->tci &
1716 MLX5DV_FLOW_VLAN_VID_MASK_BE);
1718 if (vlan_m->inner_type == nic_mask.inner_type)
1719 vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
1720 vlan_m->inner_type);
1725 * Validate the push VLAN action.
1727 * @param[in] action_flags
1728 * Holds the actions detected until now.
1730 * Pointer to the encap action.
1732 * Pointer to flow attributes
1734 * Pointer to error structure.
1737 * 0 on success, a negative errno value otherwise and rte_errno is set.
1740 flow_dv_validate_action_push_vlan(uint64_t action_flags,
1741 uint64_t item_flags,
1742 const struct rte_flow_action *action,
1743 const struct rte_flow_attr *attr,
1744 struct rte_flow_error *error)
1746 const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
1748 if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
1749 push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
1750 return rte_flow_error_set(error, EINVAL,
1751 RTE_FLOW_ERROR_TYPE_ACTION, action,
1752 "invalid vlan ethertype");
1754 (MLX5_FLOW_ACTION_OF_POP_VLAN | MLX5_FLOW_ACTION_OF_PUSH_VLAN))
1755 return rte_flow_error_set(error, ENOTSUP,
1756 RTE_FLOW_ERROR_TYPE_ACTION, action,
1757 "no support for multiple VLAN "
1759 if (!mlx5_flow_find_action
1760 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) &&
1761 !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
1762 return rte_flow_error_set(error, ENOTSUP,
1763 RTE_FLOW_ERROR_TYPE_ACTION, action,
1764 "push VLAN needs to match on VLAN in order to "
1765 "get VLAN VID information because there is "
1766 "no followed set VLAN VID action");
1767 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1768 return rte_flow_error_set(error, EINVAL,
1769 RTE_FLOW_ERROR_TYPE_ACTION, action,
1770 "wrong action order, port_id should "
1771 "be after push VLAN");
1777 * Validate the set VLAN PCP.
1779 * @param[in] action_flags
1780 * Holds the actions detected until now.
1781 * @param[in] actions
1782 * Pointer to the list of actions remaining in the flow rule.
1784 * Pointer to flow attributes
1786 * Pointer to error structure.
1789 * 0 on success, a negative errno value otherwise and rte_errno is set.
1792 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
1793 const struct rte_flow_action actions[],
1794 struct rte_flow_error *error)
1796 const struct rte_flow_action *action = actions;
1797 const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
1799 if (conf->vlan_pcp > 7)
1800 return rte_flow_error_set(error, EINVAL,
1801 RTE_FLOW_ERROR_TYPE_ACTION, action,
1802 "VLAN PCP value is too big");
1803 if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
1804 return rte_flow_error_set(error, ENOTSUP,
1805 RTE_FLOW_ERROR_TYPE_ACTION, action,
1806 "set VLAN PCP action must follow "
1807 "the push VLAN action");
1808 if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
1809 return rte_flow_error_set(error, ENOTSUP,
1810 RTE_FLOW_ERROR_TYPE_ACTION, action,
1811 "Multiple VLAN PCP modification are "
1813 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1814 return rte_flow_error_set(error, EINVAL,
1815 RTE_FLOW_ERROR_TYPE_ACTION, action,
1816 "wrong action order, port_id should "
1817 "be after set VLAN PCP");
1822 * Validate the set VLAN VID.
1824 * @param[in] item_flags
1825 * Holds the items detected in this rule.
1826 * @param[in] actions
1827 * Pointer to the list of actions remaining in the flow rule.
1829 * Pointer to flow attributes
1831 * Pointer to error structure.
1834 * 0 on success, a negative errno value otherwise and rte_errno is set.
1837 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
1838 uint64_t action_flags,
1839 const struct rte_flow_action actions[],
1840 struct rte_flow_error *error)
1842 const struct rte_flow_action *action = actions;
1843 const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
1845 if (conf->vlan_vid > RTE_BE16(0xFFE))
1846 return rte_flow_error_set(error, EINVAL,
1847 RTE_FLOW_ERROR_TYPE_ACTION, action,
1848 "VLAN VID value is too big");
1849 /* there is an of_push_vlan action before us */
1850 if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) {
1851 if (mlx5_flow_find_action(actions + 1,
1852 RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID))
1853 return rte_flow_error_set(error, ENOTSUP,
1854 RTE_FLOW_ERROR_TYPE_ACTION, action,
1855 "Multiple VLAN VID modifications are "
1862 * Action is on an existing VLAN header:
1863 * Need to verify this is a single modify CID action.
1864 * Rule mast include a match on outer VLAN.
1866 if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
1867 return rte_flow_error_set(error, ENOTSUP,
1868 RTE_FLOW_ERROR_TYPE_ACTION, action,
1869 "Multiple VLAN VID modifications are "
1871 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
1872 return rte_flow_error_set(error, EINVAL,
1873 RTE_FLOW_ERROR_TYPE_ACTION, action,
1874 "match on VLAN is required in order "
1876 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1877 return rte_flow_error_set(error, EINVAL,
1878 RTE_FLOW_ERROR_TYPE_ACTION, action,
1879 "wrong action order, port_id should "
1880 "be after set VLAN VID");
1885 * Validate the FLAG action.
1888 * Pointer to the rte_eth_dev structure.
1889 * @param[in] action_flags
1890 * Holds the actions detected until now.
1892 * Pointer to flow attributes
1894 * Pointer to error structure.
1897 * 0 on success, a negative errno value otherwise and rte_errno is set.
1900 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
1901 uint64_t action_flags,
1902 const struct rte_flow_attr *attr,
1903 struct rte_flow_error *error)
1905 struct mlx5_priv *priv = dev->data->dev_private;
1906 struct mlx5_dev_config *config = &priv->config;
1909 /* Fall back if no extended metadata register support. */
1910 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1911 return mlx5_flow_validate_action_flag(action_flags, attr,
1913 /* Extensive metadata mode requires registers. */
1914 if (!mlx5_flow_ext_mreg_supported(dev))
1915 return rte_flow_error_set(error, ENOTSUP,
1916 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1917 "no metadata registers "
1918 "to support flag action");
1919 if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
1920 return rte_flow_error_set(error, ENOTSUP,
1921 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1922 "extended metadata register"
1923 " isn't available");
1924 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1928 if (action_flags & MLX5_FLOW_ACTION_DROP)
1929 return rte_flow_error_set(error, EINVAL,
1930 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1931 "can't drop and flag in same flow");
1932 if (action_flags & MLX5_FLOW_ACTION_MARK)
1933 return rte_flow_error_set(error, EINVAL,
1934 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1935 "can't mark and flag in same flow");
1936 if (action_flags & MLX5_FLOW_ACTION_FLAG)
1937 return rte_flow_error_set(error, EINVAL,
1938 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1940 " actions in same flow");
1945 * Validate MARK action.
1948 * Pointer to the rte_eth_dev structure.
1950 * Pointer to action.
1951 * @param[in] action_flags
1952 * Holds the actions detected until now.
1954 * Pointer to flow attributes
1956 * Pointer to error structure.
1959 * 0 on success, a negative errno value otherwise and rte_errno is set.
1962 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
1963 const struct rte_flow_action *action,
1964 uint64_t action_flags,
1965 const struct rte_flow_attr *attr,
1966 struct rte_flow_error *error)
1968 struct mlx5_priv *priv = dev->data->dev_private;
1969 struct mlx5_dev_config *config = &priv->config;
1970 const struct rte_flow_action_mark *mark = action->conf;
1973 /* Fall back if no extended metadata register support. */
1974 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1975 return mlx5_flow_validate_action_mark(action, action_flags,
1977 /* Extensive metadata mode requires registers. */
1978 if (!mlx5_flow_ext_mreg_supported(dev))
1979 return rte_flow_error_set(error, ENOTSUP,
1980 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1981 "no metadata registers "
1982 "to support mark action");
1983 if (!priv->sh->dv_mark_mask)
1984 return rte_flow_error_set(error, ENOTSUP,
1985 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1986 "extended metadata register"
1987 " isn't available");
1988 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1993 return rte_flow_error_set(error, EINVAL,
1994 RTE_FLOW_ERROR_TYPE_ACTION, action,
1995 "configuration cannot be null");
1996 if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
1997 return rte_flow_error_set(error, EINVAL,
1998 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2000 "mark id exceeds the limit");
2001 if (action_flags & MLX5_FLOW_ACTION_DROP)
2002 return rte_flow_error_set(error, EINVAL,
2003 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2004 "can't drop and mark in same flow");
2005 if (action_flags & MLX5_FLOW_ACTION_FLAG)
2006 return rte_flow_error_set(error, EINVAL,
2007 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2008 "can't flag and mark in same flow");
2009 if (action_flags & MLX5_FLOW_ACTION_MARK)
2010 return rte_flow_error_set(error, EINVAL,
2011 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2012 "can't have 2 mark actions in same"
2018 * Validate SET_META action.
2021 * Pointer to the rte_eth_dev structure.
2023 * Pointer to the encap action.
2024 * @param[in] action_flags
2025 * Holds the actions detected until now.
2027 * Pointer to flow attributes
2029 * Pointer to error structure.
2032 * 0 on success, a negative errno value otherwise and rte_errno is set.
2035 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
2036 const struct rte_flow_action *action,
2037 uint64_t action_flags __rte_unused,
2038 const struct rte_flow_attr *attr,
2039 struct rte_flow_error *error)
2041 const struct rte_flow_action_set_meta *conf;
2042 uint32_t nic_mask = UINT32_MAX;
2043 enum modify_reg reg;
2045 if (!mlx5_flow_ext_mreg_supported(dev))
2046 return rte_flow_error_set(error, ENOTSUP,
2047 RTE_FLOW_ERROR_TYPE_ACTION, action,
2048 "extended metadata register"
2049 " isn't supported");
2050 reg = flow_dv_get_metadata_reg(dev, attr, error);
2053 if (reg != REG_A && reg != REG_B) {
2054 struct mlx5_priv *priv = dev->data->dev_private;
2056 nic_mask = priv->sh->dv_meta_mask;
2058 if (!(action->conf))
2059 return rte_flow_error_set(error, EINVAL,
2060 RTE_FLOW_ERROR_TYPE_ACTION, action,
2061 "configuration cannot be null");
2062 conf = (const struct rte_flow_action_set_meta *)action->conf;
2064 return rte_flow_error_set(error, EINVAL,
2065 RTE_FLOW_ERROR_TYPE_ACTION, action,
2066 "zero mask doesn't have any effect");
2067 if (conf->mask & ~nic_mask)
2068 return rte_flow_error_set(error, EINVAL,
2069 RTE_FLOW_ERROR_TYPE_ACTION, action,
2070 "meta data must be within reg C0");
2071 if (!(conf->data & conf->mask))
2072 return rte_flow_error_set(error, EINVAL,
2073 RTE_FLOW_ERROR_TYPE_ACTION, action,
2074 "zero value has no effect");
2079 * Validate SET_TAG action.
2082 * Pointer to the rte_eth_dev structure.
2084 * Pointer to the encap action.
2085 * @param[in] action_flags
2086 * Holds the actions detected until now.
2088 * Pointer to flow attributes
2090 * Pointer to error structure.
2093 * 0 on success, a negative errno value otherwise and rte_errno is set.
2096 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
2097 const struct rte_flow_action *action,
2098 uint64_t action_flags,
2099 const struct rte_flow_attr *attr,
2100 struct rte_flow_error *error)
2102 const struct rte_flow_action_set_tag *conf;
2103 const uint64_t terminal_action_flags =
2104 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
2105 MLX5_FLOW_ACTION_RSS;
2108 if (!mlx5_flow_ext_mreg_supported(dev))
2109 return rte_flow_error_set(error, ENOTSUP,
2110 RTE_FLOW_ERROR_TYPE_ACTION, action,
2111 "extensive metadata register"
2112 " isn't supported");
2113 if (!(action->conf))
2114 return rte_flow_error_set(error, EINVAL,
2115 RTE_FLOW_ERROR_TYPE_ACTION, action,
2116 "configuration cannot be null");
2117 conf = (const struct rte_flow_action_set_tag *)action->conf;
2119 return rte_flow_error_set(error, EINVAL,
2120 RTE_FLOW_ERROR_TYPE_ACTION, action,
2121 "zero mask doesn't have any effect");
2122 ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
2125 if (!attr->transfer && attr->ingress &&
2126 (action_flags & terminal_action_flags))
2127 return rte_flow_error_set(error, EINVAL,
2128 RTE_FLOW_ERROR_TYPE_ACTION, action,
2129 "set_tag has no effect"
2130 " with terminal actions");
2135 * Validate count action.
2140 * Pointer to error structure.
2143 * 0 on success, a negative errno value otherwise and rte_errno is set.
2146 flow_dv_validate_action_count(struct rte_eth_dev *dev,
2147 struct rte_flow_error *error)
2149 struct mlx5_priv *priv = dev->data->dev_private;
2151 if (!priv->config.devx)
2153 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
2157 return rte_flow_error_set
2159 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2161 "count action not supported");
2165 * Validate the L2 encap action.
2167 * @param[in] action_flags
2168 * Holds the actions detected until now.
2170 * Pointer to the encap action.
2172 * Pointer to flow attributes
2174 * Pointer to error structure.
2177 * 0 on success, a negative errno value otherwise and rte_errno is set.
2180 flow_dv_validate_action_l2_encap(uint64_t action_flags,
2181 const struct rte_flow_action *action,
2182 const struct rte_flow_attr *attr,
2183 struct rte_flow_error *error)
2185 if (!(action->conf))
2186 return rte_flow_error_set(error, EINVAL,
2187 RTE_FLOW_ERROR_TYPE_ACTION, action,
2188 "configuration cannot be null");
2189 if (action_flags & MLX5_FLOW_ACTION_DROP)
2190 return rte_flow_error_set(error, EINVAL,
2191 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2192 "can't drop and encap in same flow");
2193 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
2194 return rte_flow_error_set(error, EINVAL,
2195 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2196 "can only have a single encap or"
2197 " decap action in a flow");
2198 if (!attr->transfer && attr->ingress)
2199 return rte_flow_error_set(error, ENOTSUP,
2200 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2202 "encap action not supported for "
2208 * Validate the L2 decap action.
2210 * @param[in] action_flags
2211 * Holds the actions detected until now.
2213 * Pointer to flow attributes
2215 * Pointer to error structure.
2218 * 0 on success, a negative errno value otherwise and rte_errno is set.
2221 flow_dv_validate_action_l2_decap(uint64_t action_flags,
2222 const struct rte_flow_attr *attr,
2223 struct rte_flow_error *error)
2225 if (action_flags & MLX5_FLOW_ACTION_DROP)
2226 return rte_flow_error_set(error, EINVAL,
2227 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2228 "can't drop and decap in same flow");
2229 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
2230 return rte_flow_error_set(error, EINVAL,
2231 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2232 "can only have a single encap or"
2233 " decap action in a flow");
2234 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
2235 return rte_flow_error_set(error, EINVAL,
2236 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2237 "can't have decap action after"
2240 return rte_flow_error_set(error, ENOTSUP,
2241 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2243 "decap action not supported for "
2249 * Validate the raw encap action.
2251 * @param[in] action_flags
2252 * Holds the actions detected until now.
2254 * Pointer to the encap action.
2256 * Pointer to flow attributes
2258 * Pointer to error structure.
2261 * 0 on success, a negative errno value otherwise and rte_errno is set.
2264 flow_dv_validate_action_raw_encap(uint64_t action_flags,
2265 const struct rte_flow_action *action,
2266 const struct rte_flow_attr *attr,
2267 struct rte_flow_error *error)
2269 const struct rte_flow_action_raw_encap *raw_encap =
2270 (const struct rte_flow_action_raw_encap *)action->conf;
2271 if (!(action->conf))
2272 return rte_flow_error_set(error, EINVAL,
2273 RTE_FLOW_ERROR_TYPE_ACTION, action,
2274 "configuration cannot be null");
2275 if (action_flags & MLX5_FLOW_ACTION_DROP)
2276 return rte_flow_error_set(error, EINVAL,
2277 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2278 "can't drop and encap in same flow");
2279 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
2280 return rte_flow_error_set(error, EINVAL,
2281 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2282 "can only have a single encap"
2283 " action in a flow");
2284 /* encap without preceding decap is not supported for ingress */
2285 if (!attr->transfer && attr->ingress &&
2286 !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
2287 return rte_flow_error_set(error, ENOTSUP,
2288 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2290 "encap action not supported for "
2292 if (!raw_encap->size || !raw_encap->data)
2293 return rte_flow_error_set(error, EINVAL,
2294 RTE_FLOW_ERROR_TYPE_ACTION, action,
2295 "raw encap data cannot be empty");
2300 * Validate the raw decap action.
2302 * @param[in] action_flags
2303 * Holds the actions detected until now.
2305 * Pointer to the encap action.
2307 * Pointer to flow attributes
2309 * Pointer to error structure.
2312 * 0 on success, a negative errno value otherwise and rte_errno is set.
2315 flow_dv_validate_action_raw_decap(uint64_t action_flags,
2316 const struct rte_flow_action *action,
2317 const struct rte_flow_attr *attr,
2318 struct rte_flow_error *error)
2320 const struct rte_flow_action_raw_decap *decap = action->conf;
2322 if (action_flags & MLX5_FLOW_ACTION_DROP)
2323 return rte_flow_error_set(error, EINVAL,
2324 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2325 "can't drop and decap in same flow");
2326 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
2327 return rte_flow_error_set(error, EINVAL,
2328 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2329 "can't have encap action before"
2331 if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
2332 return rte_flow_error_set(error, EINVAL,
2333 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2334 "can only have a single decap"
2335 " action in a flow");
2336 /* decap action is valid on egress only if it is followed by encap */
2337 if (attr->egress && decap &&
2338 decap->size > MLX5_ENCAPSULATION_DECISION_SIZE) {
2339 return rte_flow_error_set(error, ENOTSUP,
2340 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2341 NULL, "decap action not supported"
2343 } else if (decap && decap->size > MLX5_ENCAPSULATION_DECISION_SIZE &&
2344 (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) {
2345 return rte_flow_error_set(error, EINVAL,
2346 RTE_FLOW_ERROR_TYPE_ACTION,
2348 "can't have decap action "
2349 "after modify action");
2355 * Find existing encap/decap resource or create and register a new one.
2357 * @param[in, out] dev
2358 * Pointer to rte_eth_dev structure.
2359 * @param[in, out] resource
2360 * Pointer to encap/decap resource.
2361 * @parm[in, out] dev_flow
2362 * Pointer to the dev_flow.
2364 * pointer to error structure.
2367 * 0 on success otherwise -errno and errno is set.
2370 flow_dv_encap_decap_resource_register
2371 (struct rte_eth_dev *dev,
2372 struct mlx5_flow_dv_encap_decap_resource *resource,
2373 struct mlx5_flow *dev_flow,
2374 struct rte_flow_error *error)
2376 struct mlx5_priv *priv = dev->data->dev_private;
2377 struct mlx5_ibv_shared *sh = priv->sh;
2378 struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2379 struct mlx5dv_dr_domain *domain;
2381 resource->flags = dev_flow->group ? 0 : 1;
2382 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2383 domain = sh->fdb_domain;
2384 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
2385 domain = sh->rx_domain;
2387 domain = sh->tx_domain;
2389 /* Lookup a matching resource from cache. */
2390 LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
2391 if (resource->reformat_type == cache_resource->reformat_type &&
2392 resource->ft_type == cache_resource->ft_type &&
2393 resource->flags == cache_resource->flags &&
2394 resource->size == cache_resource->size &&
2395 !memcmp((const void *)resource->buf,
2396 (const void *)cache_resource->buf,
2398 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
2399 (void *)cache_resource,
2400 rte_atomic32_read(&cache_resource->refcnt));
2401 rte_atomic32_inc(&cache_resource->refcnt);
2402 dev_flow->dv.encap_decap = cache_resource;
2406 /* Register new encap/decap resource. */
2407 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
2408 if (!cache_resource)
2409 return rte_flow_error_set(error, ENOMEM,
2410 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2411 "cannot allocate resource memory");
2412 *cache_resource = *resource;
2413 cache_resource->verbs_action =
2414 mlx5_glue->dv_create_flow_action_packet_reformat
2415 (sh->ctx, cache_resource->reformat_type,
2416 cache_resource->ft_type, domain, cache_resource->flags,
2417 cache_resource->size,
2418 (cache_resource->size ? cache_resource->buf : NULL));
2419 if (!cache_resource->verbs_action) {
2420 rte_free(cache_resource);
2421 return rte_flow_error_set(error, ENOMEM,
2422 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2423 NULL, "cannot create action");
2425 rte_atomic32_init(&cache_resource->refcnt);
2426 rte_atomic32_inc(&cache_resource->refcnt);
2427 LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
2428 dev_flow->dv.encap_decap = cache_resource;
2429 DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
2430 (void *)cache_resource,
2431 rte_atomic32_read(&cache_resource->refcnt));
2436 * Find existing table jump resource or create and register a new one.
2438 * @param[in, out] dev
2439 * Pointer to rte_eth_dev structure.
2440 * @param[in, out] tbl
2441 * Pointer to flow table resource.
2442 * @parm[in, out] dev_flow
2443 * Pointer to the dev_flow.
2445 * pointer to error structure.
2448 * 0 on success otherwise -errno and errno is set.
2451 flow_dv_jump_tbl_resource_register
2452 (struct rte_eth_dev *dev __rte_unused,
2453 struct mlx5_flow_tbl_resource *tbl,
2454 struct mlx5_flow *dev_flow,
2455 struct rte_flow_error *error)
2457 struct mlx5_flow_tbl_data_entry *tbl_data =
2458 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
2462 cnt = rte_atomic32_read(&tbl_data->jump.refcnt);
2464 tbl_data->jump.action =
2465 mlx5_glue->dr_create_flow_action_dest_flow_tbl
2467 if (!tbl_data->jump.action)
2468 return rte_flow_error_set(error, ENOMEM,
2469 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2470 NULL, "cannot create jump action");
2471 DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++",
2472 (void *)&tbl_data->jump, cnt);
2474 assert(tbl_data->jump.action);
2475 DRV_LOG(DEBUG, "existed jump table resource %p: refcnt %d++",
2476 (void *)&tbl_data->jump, cnt);
2478 rte_atomic32_inc(&tbl_data->jump.refcnt);
2479 dev_flow->dv.jump = &tbl_data->jump;
2484 * Find existing table port ID resource or create and register a new one.
2486 * @param[in, out] dev
2487 * Pointer to rte_eth_dev structure.
2488 * @param[in, out] resource
2489 * Pointer to port ID action resource.
2490 * @parm[in, out] dev_flow
2491 * Pointer to the dev_flow.
2493 * pointer to error structure.
2496 * 0 on success otherwise -errno and errno is set.
2499 flow_dv_port_id_action_resource_register
2500 (struct rte_eth_dev *dev,
2501 struct mlx5_flow_dv_port_id_action_resource *resource,
2502 struct mlx5_flow *dev_flow,
2503 struct rte_flow_error *error)
2505 struct mlx5_priv *priv = dev->data->dev_private;
2506 struct mlx5_ibv_shared *sh = priv->sh;
2507 struct mlx5_flow_dv_port_id_action_resource *cache_resource;
2509 /* Lookup a matching resource from cache. */
2510 LIST_FOREACH(cache_resource, &sh->port_id_action_list, next) {
2511 if (resource->port_id == cache_resource->port_id) {
2512 DRV_LOG(DEBUG, "port id action resource resource %p: "
2514 (void *)cache_resource,
2515 rte_atomic32_read(&cache_resource->refcnt));
2516 rte_atomic32_inc(&cache_resource->refcnt);
2517 dev_flow->dv.port_id_action = cache_resource;
2521 /* Register new port id action resource. */
2522 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
2523 if (!cache_resource)
2524 return rte_flow_error_set(error, ENOMEM,
2525 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2526 "cannot allocate resource memory");
2527 *cache_resource = *resource;
2529 * Depending on rdma_core version the glue routine calls
2530 * either mlx5dv_dr_action_create_dest_ib_port(domain, ibv_port)
2531 * or mlx5dv_dr_action_create_dest_vport(domain, vport_id).
2533 cache_resource->action =
2534 mlx5_glue->dr_create_flow_action_dest_port
2535 (priv->sh->fdb_domain, resource->port_id);
2536 if (!cache_resource->action) {
2537 rte_free(cache_resource);
2538 return rte_flow_error_set(error, ENOMEM,
2539 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2540 NULL, "cannot create action");
2542 rte_atomic32_init(&cache_resource->refcnt);
2543 rte_atomic32_inc(&cache_resource->refcnt);
2544 LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
2545 dev_flow->dv.port_id_action = cache_resource;
2546 DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
2547 (void *)cache_resource,
2548 rte_atomic32_read(&cache_resource->refcnt));
2553 * Find existing push vlan resource or create and register a new one.
2555 * @param [in, out] dev
2556 * Pointer to rte_eth_dev structure.
2557 * @param[in, out] resource
2558 * Pointer to port ID action resource.
2559 * @parm[in, out] dev_flow
2560 * Pointer to the dev_flow.
2562 * pointer to error structure.
2565 * 0 on success otherwise -errno and errno is set.
2568 flow_dv_push_vlan_action_resource_register
2569 (struct rte_eth_dev *dev,
2570 struct mlx5_flow_dv_push_vlan_action_resource *resource,
2571 struct mlx5_flow *dev_flow,
2572 struct rte_flow_error *error)
2574 struct mlx5_priv *priv = dev->data->dev_private;
2575 struct mlx5_ibv_shared *sh = priv->sh;
2576 struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
2577 struct mlx5dv_dr_domain *domain;
2579 /* Lookup a matching resource from cache. */
2580 LIST_FOREACH(cache_resource, &sh->push_vlan_action_list, next) {
2581 if (resource->vlan_tag == cache_resource->vlan_tag &&
2582 resource->ft_type == cache_resource->ft_type) {
2583 DRV_LOG(DEBUG, "push-VLAN action resource resource %p: "
2585 (void *)cache_resource,
2586 rte_atomic32_read(&cache_resource->refcnt));
2587 rte_atomic32_inc(&cache_resource->refcnt);
2588 dev_flow->dv.push_vlan_res = cache_resource;
2592 /* Register new push_vlan action resource. */
2593 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
2594 if (!cache_resource)
2595 return rte_flow_error_set(error, ENOMEM,
2596 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2597 "cannot allocate resource memory");
2598 *cache_resource = *resource;
2599 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2600 domain = sh->fdb_domain;
2601 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
2602 domain = sh->rx_domain;
2604 domain = sh->tx_domain;
2605 cache_resource->action =
2606 mlx5_glue->dr_create_flow_action_push_vlan(domain,
2607 resource->vlan_tag);
2608 if (!cache_resource->action) {
2609 rte_free(cache_resource);
2610 return rte_flow_error_set(error, ENOMEM,
2611 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2612 NULL, "cannot create action");
2614 rte_atomic32_init(&cache_resource->refcnt);
2615 rte_atomic32_inc(&cache_resource->refcnt);
2616 LIST_INSERT_HEAD(&sh->push_vlan_action_list, cache_resource, next);
2617 dev_flow->dv.push_vlan_res = cache_resource;
2618 DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
2619 (void *)cache_resource,
2620 rte_atomic32_read(&cache_resource->refcnt));
2624 * Get the size of specific rte_flow_item_type
2626 * @param[in] item_type
2627 * Tested rte_flow_item_type.
2630 * sizeof struct item_type, 0 if void or irrelevant.
2633 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
2637 switch (item_type) {
2638 case RTE_FLOW_ITEM_TYPE_ETH:
2639 retval = sizeof(struct rte_flow_item_eth);
2641 case RTE_FLOW_ITEM_TYPE_VLAN:
2642 retval = sizeof(struct rte_flow_item_vlan);
2644 case RTE_FLOW_ITEM_TYPE_IPV4:
2645 retval = sizeof(struct rte_flow_item_ipv4);
2647 case RTE_FLOW_ITEM_TYPE_IPV6:
2648 retval = sizeof(struct rte_flow_item_ipv6);
2650 case RTE_FLOW_ITEM_TYPE_UDP:
2651 retval = sizeof(struct rte_flow_item_udp);
2653 case RTE_FLOW_ITEM_TYPE_TCP:
2654 retval = sizeof(struct rte_flow_item_tcp);
2656 case RTE_FLOW_ITEM_TYPE_VXLAN:
2657 retval = sizeof(struct rte_flow_item_vxlan);
2659 case RTE_FLOW_ITEM_TYPE_GRE:
2660 retval = sizeof(struct rte_flow_item_gre);
2662 case RTE_FLOW_ITEM_TYPE_NVGRE:
2663 retval = sizeof(struct rte_flow_item_nvgre);
2665 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
2666 retval = sizeof(struct rte_flow_item_vxlan_gpe);
2668 case RTE_FLOW_ITEM_TYPE_MPLS:
2669 retval = sizeof(struct rte_flow_item_mpls);
2671 case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
2679 #define MLX5_ENCAP_IPV4_VERSION 0x40
2680 #define MLX5_ENCAP_IPV4_IHL_MIN 0x05
2681 #define MLX5_ENCAP_IPV4_TTL_DEF 0x40
2682 #define MLX5_ENCAP_IPV6_VTC_FLOW 0x60000000
2683 #define MLX5_ENCAP_IPV6_HOP_LIMIT 0xff
2684 #define MLX5_ENCAP_VXLAN_FLAGS 0x08000000
2685 #define MLX5_ENCAP_VXLAN_GPE_FLAGS 0x04
2688 * Convert the encap action data from list of rte_flow_item to raw buffer
2691 * Pointer to rte_flow_item objects list.
2693 * Pointer to the output buffer.
2695 * Pointer to the output buffer size.
2697 * Pointer to the error structure.
2700 * 0 on success, a negative errno value otherwise and rte_errno is set.
2703 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
2704 size_t *size, struct rte_flow_error *error)
2706 struct rte_ether_hdr *eth = NULL;
2707 struct rte_vlan_hdr *vlan = NULL;
2708 struct rte_ipv4_hdr *ipv4 = NULL;
2709 struct rte_ipv6_hdr *ipv6 = NULL;
2710 struct rte_udp_hdr *udp = NULL;
2711 struct rte_vxlan_hdr *vxlan = NULL;
2712 struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
2713 struct rte_gre_hdr *gre = NULL;
2715 size_t temp_size = 0;
2718 return rte_flow_error_set(error, EINVAL,
2719 RTE_FLOW_ERROR_TYPE_ACTION,
2720 NULL, "invalid empty data");
2721 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2722 len = flow_dv_get_item_len(items->type);
2723 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
2724 return rte_flow_error_set(error, EINVAL,
2725 RTE_FLOW_ERROR_TYPE_ACTION,
2726 (void *)items->type,
2727 "items total size is too big"
2728 " for encap action");
2729 rte_memcpy((void *)&buf[temp_size], items->spec, len);
2730 switch (items->type) {
2731 case RTE_FLOW_ITEM_TYPE_ETH:
2732 eth = (struct rte_ether_hdr *)&buf[temp_size];
2734 case RTE_FLOW_ITEM_TYPE_VLAN:
2735 vlan = (struct rte_vlan_hdr *)&buf[temp_size];
2737 return rte_flow_error_set(error, EINVAL,
2738 RTE_FLOW_ERROR_TYPE_ACTION,
2739 (void *)items->type,
2740 "eth header not found");
2741 if (!eth->ether_type)
2742 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
2744 case RTE_FLOW_ITEM_TYPE_IPV4:
2745 ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
2747 return rte_flow_error_set(error, EINVAL,
2748 RTE_FLOW_ERROR_TYPE_ACTION,
2749 (void *)items->type,
2750 "neither eth nor vlan"
2752 if (vlan && !vlan->eth_proto)
2753 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
2754 else if (eth && !eth->ether_type)
2755 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
2756 if (!ipv4->version_ihl)
2757 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
2758 MLX5_ENCAP_IPV4_IHL_MIN;
2759 if (!ipv4->time_to_live)
2760 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
2762 case RTE_FLOW_ITEM_TYPE_IPV6:
2763 ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
2765 return rte_flow_error_set(error, EINVAL,
2766 RTE_FLOW_ERROR_TYPE_ACTION,
2767 (void *)items->type,
2768 "neither eth nor vlan"
2770 if (vlan && !vlan->eth_proto)
2771 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
2772 else if (eth && !eth->ether_type)
2773 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
2774 if (!ipv6->vtc_flow)
2776 RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
2777 if (!ipv6->hop_limits)
2778 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
2780 case RTE_FLOW_ITEM_TYPE_UDP:
2781 udp = (struct rte_udp_hdr *)&buf[temp_size];
2783 return rte_flow_error_set(error, EINVAL,
2784 RTE_FLOW_ERROR_TYPE_ACTION,
2785 (void *)items->type,
2786 "ip header not found");
2787 if (ipv4 && !ipv4->next_proto_id)
2788 ipv4->next_proto_id = IPPROTO_UDP;
2789 else if (ipv6 && !ipv6->proto)
2790 ipv6->proto = IPPROTO_UDP;
2792 case RTE_FLOW_ITEM_TYPE_VXLAN:
2793 vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
2795 return rte_flow_error_set(error, EINVAL,
2796 RTE_FLOW_ERROR_TYPE_ACTION,
2797 (void *)items->type,
2798 "udp header not found");
2800 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
2801 if (!vxlan->vx_flags)
2803 RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
2805 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
2806 vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
2808 return rte_flow_error_set(error, EINVAL,
2809 RTE_FLOW_ERROR_TYPE_ACTION,
2810 (void *)items->type,
2811 "udp header not found");
2812 if (!vxlan_gpe->proto)
2813 return rte_flow_error_set(error, EINVAL,
2814 RTE_FLOW_ERROR_TYPE_ACTION,
2815 (void *)items->type,
2816 "next protocol not found");
2819 RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
2820 if (!vxlan_gpe->vx_flags)
2821 vxlan_gpe->vx_flags =
2822 MLX5_ENCAP_VXLAN_GPE_FLAGS;
2824 case RTE_FLOW_ITEM_TYPE_GRE:
2825 case RTE_FLOW_ITEM_TYPE_NVGRE:
2826 gre = (struct rte_gre_hdr *)&buf[temp_size];
2828 return rte_flow_error_set(error, EINVAL,
2829 RTE_FLOW_ERROR_TYPE_ACTION,
2830 (void *)items->type,
2831 "next protocol not found");
2833 return rte_flow_error_set(error, EINVAL,
2834 RTE_FLOW_ERROR_TYPE_ACTION,
2835 (void *)items->type,
2836 "ip header not found");
2837 if (ipv4 && !ipv4->next_proto_id)
2838 ipv4->next_proto_id = IPPROTO_GRE;
2839 else if (ipv6 && !ipv6->proto)
2840 ipv6->proto = IPPROTO_GRE;
2842 case RTE_FLOW_ITEM_TYPE_VOID:
2845 return rte_flow_error_set(error, EINVAL,
2846 RTE_FLOW_ERROR_TYPE_ACTION,
2847 (void *)items->type,
2848 "unsupported item type");
2858 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
2860 struct rte_ether_hdr *eth = NULL;
2861 struct rte_vlan_hdr *vlan = NULL;
2862 struct rte_ipv6_hdr *ipv6 = NULL;
2863 struct rte_udp_hdr *udp = NULL;
2867 eth = (struct rte_ether_hdr *)data;
2868 next_hdr = (char *)(eth + 1);
2869 proto = RTE_BE16(eth->ether_type);
2872 while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
2873 vlan = (struct rte_vlan_hdr *)next_hdr;
2874 proto = RTE_BE16(vlan->eth_proto);
2875 next_hdr += sizeof(struct rte_vlan_hdr);
2878 /* HW calculates IPv4 csum. no need to proceed */
2879 if (proto == RTE_ETHER_TYPE_IPV4)
2882 /* non IPv4/IPv6 header. not supported */
2883 if (proto != RTE_ETHER_TYPE_IPV6) {
2884 return rte_flow_error_set(error, ENOTSUP,
2885 RTE_FLOW_ERROR_TYPE_ACTION,
2886 NULL, "Cannot offload non IPv4/IPv6");
2889 ipv6 = (struct rte_ipv6_hdr *)next_hdr;
2891 /* ignore non UDP */
2892 if (ipv6->proto != IPPROTO_UDP)
2895 udp = (struct rte_udp_hdr *)(ipv6 + 1);
2896 udp->dgram_cksum = 0;
2902 * Convert L2 encap action to DV specification.
2905 * Pointer to rte_eth_dev structure.
2907 * Pointer to action structure.
2908 * @param[in, out] dev_flow
2909 * Pointer to the mlx5_flow.
2910 * @param[in] transfer
2911 * Mark if the flow is E-Switch flow.
2913 * Pointer to the error structure.
2916 * 0 on success, a negative errno value otherwise and rte_errno is set.
2919 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
2920 const struct rte_flow_action *action,
2921 struct mlx5_flow *dev_flow,
2923 struct rte_flow_error *error)
2925 const struct rte_flow_item *encap_data;
2926 const struct rte_flow_action_raw_encap *raw_encap_data;
2927 struct mlx5_flow_dv_encap_decap_resource res = {
2929 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
2930 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
2931 MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
2934 if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
2936 (const struct rte_flow_action_raw_encap *)action->conf;
2937 res.size = raw_encap_data->size;
2938 memcpy(res.buf, raw_encap_data->data, res.size);
2939 if (flow_dv_zero_encap_udp_csum(res.buf, error))
2942 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
2944 ((const struct rte_flow_action_vxlan_encap *)
2945 action->conf)->definition;
2948 ((const struct rte_flow_action_nvgre_encap *)
2949 action->conf)->definition;
2950 if (flow_dv_convert_encap_data(encap_data, res.buf,
2954 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
2955 return rte_flow_error_set(error, EINVAL,
2956 RTE_FLOW_ERROR_TYPE_ACTION,
2957 NULL, "can't create L2 encap action");
2962 * Convert L2 decap action to DV specification.
2965 * Pointer to rte_eth_dev structure.
2966 * @param[in, out] dev_flow
2967 * Pointer to the mlx5_flow.
2968 * @param[in] transfer
2969 * Mark if the flow is E-Switch flow.
2971 * Pointer to the error structure.
2974 * 0 on success, a negative errno value otherwise and rte_errno is set.
2977 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
2978 struct mlx5_flow *dev_flow,
2980 struct rte_flow_error *error)
2982 struct mlx5_flow_dv_encap_decap_resource res = {
2985 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
2986 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
2987 MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
2990 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
2991 return rte_flow_error_set(error, EINVAL,
2992 RTE_FLOW_ERROR_TYPE_ACTION,
2993 NULL, "can't create L2 decap action");
2998 * Convert raw decap/encap (L3 tunnel) action to DV specification.
3001 * Pointer to rte_eth_dev structure.
3003 * Pointer to action structure.
3004 * @param[in, out] dev_flow
3005 * Pointer to the mlx5_flow.
3007 * Pointer to the flow attributes.
3009 * Pointer to the error structure.
3012 * 0 on success, a negative errno value otherwise and rte_errno is set.
3015 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
3016 const struct rte_flow_action *action,
3017 struct mlx5_flow *dev_flow,
3018 const struct rte_flow_attr *attr,
3019 struct rte_flow_error *error)
3021 const struct rte_flow_action_raw_encap *encap_data;
3022 struct mlx5_flow_dv_encap_decap_resource res;
3024 encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
3025 res.size = encap_data->size;
3026 memcpy(res.buf, encap_data->data, res.size);
3027 res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
3028 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
3029 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
3031 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3033 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3034 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3035 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3036 return rte_flow_error_set(error, EINVAL,
3037 RTE_FLOW_ERROR_TYPE_ACTION,
3038 NULL, "can't create encap action");
3043 * Create action push VLAN.
3046 * Pointer to rte_eth_dev structure.
3047 * @param[in] vlan_tag
3048 * the vlan tag to push to the Ethernet header.
3049 * @param[in, out] dev_flow
3050 * Pointer to the mlx5_flow.
3052 * Pointer to the flow attributes.
3054 * Pointer to the error structure.
3057 * 0 on success, a negative errno value otherwise and rte_errno is set.
3060 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
3061 const struct rte_flow_attr *attr,
3062 const struct rte_vlan_hdr *vlan,
3063 struct mlx5_flow *dev_flow,
3064 struct rte_flow_error *error)
3066 struct mlx5_flow_dv_push_vlan_action_resource res;
3069 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
3072 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3074 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3075 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3076 return flow_dv_push_vlan_action_resource_register
3077 (dev, &res, dev_flow, error);
3081 * Validate the modify-header actions.
3083 * @param[in] action_flags
3084 * Holds the actions detected until now.
3086 * Pointer to the modify action.
3088 * Pointer to error structure.
3091 * 0 on success, a negative errno value otherwise and rte_errno is set.
3094 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
3095 const struct rte_flow_action *action,
3096 struct rte_flow_error *error)
3098 if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
3099 return rte_flow_error_set(error, EINVAL,
3100 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3101 NULL, "action configuration not set");
3102 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
3103 return rte_flow_error_set(error, EINVAL,
3104 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3105 "can't have encap action before"
3111 * Validate the modify-header MAC address actions.
3113 * @param[in] action_flags
3114 * Holds the actions detected until now.
3116 * Pointer to the modify action.
3117 * @param[in] item_flags
3118 * Holds the items detected.
3120 * Pointer to error structure.
3123 * 0 on success, a negative errno value otherwise and rte_errno is set.
3126 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
3127 const struct rte_flow_action *action,
3128 const uint64_t item_flags,
3129 struct rte_flow_error *error)
3133 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3135 if (!(item_flags & MLX5_FLOW_LAYER_L2))
3136 return rte_flow_error_set(error, EINVAL,
3137 RTE_FLOW_ERROR_TYPE_ACTION,
3139 "no L2 item in pattern");
3145 * Validate the modify-header IPv4 address actions.
3147 * @param[in] action_flags
3148 * Holds the actions detected until now.
3150 * Pointer to the modify action.
3151 * @param[in] item_flags
3152 * Holds the items detected.
3154 * Pointer to error structure.
3157 * 0 on success, a negative errno value otherwise and rte_errno is set.
3160 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
3161 const struct rte_flow_action *action,
3162 const uint64_t item_flags,
3163 struct rte_flow_error *error)
3167 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3169 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
3170 return rte_flow_error_set(error, EINVAL,
3171 RTE_FLOW_ERROR_TYPE_ACTION,
3173 "no ipv4 item in pattern");
3179 * Validate the modify-header IPv6 address actions.
3181 * @param[in] action_flags
3182 * Holds the actions detected until now.
3184 * Pointer to the modify action.
3185 * @param[in] item_flags
3186 * Holds the items detected.
3188 * Pointer to error structure.
3191 * 0 on success, a negative errno value otherwise and rte_errno is set.
3194 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
3195 const struct rte_flow_action *action,
3196 const uint64_t item_flags,
3197 struct rte_flow_error *error)
3201 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3203 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
3204 return rte_flow_error_set(error, EINVAL,
3205 RTE_FLOW_ERROR_TYPE_ACTION,
3207 "no ipv6 item in pattern");
3213 * Validate the modify-header TP actions.
3215 * @param[in] action_flags
3216 * Holds the actions detected until now.
3218 * Pointer to the modify action.
3219 * @param[in] item_flags
3220 * Holds the items detected.
3222 * Pointer to error structure.
3225 * 0 on success, a negative errno value otherwise and rte_errno is set.
3228 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
3229 const struct rte_flow_action *action,
3230 const uint64_t item_flags,
3231 struct rte_flow_error *error)
3235 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3237 if (!(item_flags & MLX5_FLOW_LAYER_L4))
3238 return rte_flow_error_set(error, EINVAL,
3239 RTE_FLOW_ERROR_TYPE_ACTION,
3240 NULL, "no transport layer "
3247 * Validate the modify-header actions of increment/decrement
3248 * TCP Sequence-number.
3250 * @param[in] action_flags
3251 * Holds the actions detected until now.
3253 * Pointer to the modify action.
3254 * @param[in] item_flags
3255 * Holds the items detected.
3257 * Pointer to error structure.
3260 * 0 on success, a negative errno value otherwise and rte_errno is set.
3263 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
3264 const struct rte_flow_action *action,
3265 const uint64_t item_flags,
3266 struct rte_flow_error *error)
3270 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3272 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
3273 return rte_flow_error_set(error, EINVAL,
3274 RTE_FLOW_ERROR_TYPE_ACTION,
3275 NULL, "no TCP item in"
3277 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
3278 (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
3279 (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
3280 (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
3281 return rte_flow_error_set(error, EINVAL,
3282 RTE_FLOW_ERROR_TYPE_ACTION,
3284 "cannot decrease and increase"
3285 " TCP sequence number"
3286 " at the same time");
3292 * Validate the modify-header actions of increment/decrement
3293 * TCP Acknowledgment number.
3295 * @param[in] action_flags
3296 * Holds the actions detected until now.
3298 * Pointer to the modify action.
3299 * @param[in] item_flags
3300 * Holds the items detected.
3302 * Pointer to error structure.
3305 * 0 on success, a negative errno value otherwise and rte_errno is set.
3308 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
3309 const struct rte_flow_action *action,
3310 const uint64_t item_flags,
3311 struct rte_flow_error *error)
3315 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3317 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
3318 return rte_flow_error_set(error, EINVAL,
3319 RTE_FLOW_ERROR_TYPE_ACTION,
3320 NULL, "no TCP item in"
3322 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
3323 (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
3324 (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
3325 (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
3326 return rte_flow_error_set(error, EINVAL,
3327 RTE_FLOW_ERROR_TYPE_ACTION,
3329 "cannot decrease and increase"
3330 " TCP acknowledgment number"
3331 " at the same time");
3337 * Validate the modify-header TTL actions.
3339 * @param[in] action_flags
3340 * Holds the actions detected until now.
3342 * Pointer to the modify action.
3343 * @param[in] item_flags
3344 * Holds the items detected.
3346 * Pointer to error structure.
3349 * 0 on success, a negative errno value otherwise and rte_errno is set.
3352 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
3353 const struct rte_flow_action *action,
3354 const uint64_t item_flags,
3355 struct rte_flow_error *error)
3359 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3361 if (!(item_flags & MLX5_FLOW_LAYER_L3))
3362 return rte_flow_error_set(error, EINVAL,
3363 RTE_FLOW_ERROR_TYPE_ACTION,
3365 "no IP protocol in pattern");
3371 * Validate jump action.
3374 * Pointer to the jump action.
3375 * @param[in] action_flags
3376 * Holds the actions detected until now.
3377 * @param[in] attributes
3378 * Pointer to flow attributes
3379 * @param[in] external
3380 * Action belongs to flow rule created by request external to PMD.
3382 * Pointer to error structure.
3385 * 0 on success, a negative errno value otherwise and rte_errno is set.
3388 flow_dv_validate_action_jump(const struct rte_flow_action *action,
3389 uint64_t action_flags,
3390 const struct rte_flow_attr *attributes,
3391 bool external, struct rte_flow_error *error)
3393 uint32_t target_group, table;
3396 if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
3397 MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3398 return rte_flow_error_set(error, EINVAL,
3399 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3400 "can't have 2 fate actions in"
3402 if (action_flags & MLX5_FLOW_ACTION_METER)
3403 return rte_flow_error_set(error, ENOTSUP,
3404 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3405 "jump with meter not support");
3407 return rte_flow_error_set(error, EINVAL,
3408 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3409 NULL, "action configuration not set");
3411 ((const struct rte_flow_action_jump *)action->conf)->group;
3412 ret = mlx5_flow_group_to_table(attributes, external, target_group,
3416 if (attributes->group == target_group)
3417 return rte_flow_error_set(error, EINVAL,
3418 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3419 "target group must be other than"
3420 " the current flow group");
3425 * Validate the port_id action.
3428 * Pointer to rte_eth_dev structure.
3429 * @param[in] action_flags
3430 * Bit-fields that holds the actions detected until now.
3432 * Port_id RTE action structure.
3434 * Attributes of flow that includes this action.
3436 * Pointer to error structure.
3439 * 0 on success, a negative errno value otherwise and rte_errno is set.
3442 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
3443 uint64_t action_flags,
3444 const struct rte_flow_action *action,
3445 const struct rte_flow_attr *attr,
3446 struct rte_flow_error *error)
3448 const struct rte_flow_action_port_id *port_id;
3449 struct mlx5_priv *act_priv;
3450 struct mlx5_priv *dev_priv;
3453 if (!attr->transfer)
3454 return rte_flow_error_set(error, ENOTSUP,
3455 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3457 "port id action is valid in transfer"
3459 if (!action || !action->conf)
3460 return rte_flow_error_set(error, ENOTSUP,
3461 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3463 "port id action parameters must be"
3465 if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
3466 MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3467 return rte_flow_error_set(error, EINVAL,
3468 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3469 "can have only one fate actions in"
3471 dev_priv = mlx5_dev_to_eswitch_info(dev);
3473 return rte_flow_error_set(error, rte_errno,
3474 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3476 "failed to obtain E-Switch info");
3477 port_id = action->conf;
3478 port = port_id->original ? dev->data->port_id : port_id->id;
3479 act_priv = mlx5_port_to_eswitch_info(port, false);
3481 return rte_flow_error_set
3483 RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
3484 "failed to obtain E-Switch port id for port");
3485 if (act_priv->domain_id != dev_priv->domain_id)
3486 return rte_flow_error_set
3488 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3489 "port does not belong to"
3490 " E-Switch being configured");
3495 * Get the maximum number of modify header actions.
3498 * Pointer to rte_eth_dev structure.
3501 * Max number of modify header actions device can support.
3504 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev)
3507 * There's no way to directly query the max cap. Although it has to be
3508 * acquried by iterative trial, it is a safe assumption that more
3509 * actions are supported by FW if extensive metadata register is
3512 return mlx5_flow_ext_mreg_supported(dev) ? MLX5_MODIFY_NUM :
3513 MLX5_MODIFY_NUM_NO_MREG;
3517 * Validate the meter action.
3520 * Pointer to rte_eth_dev structure.
3521 * @param[in] action_flags
3522 * Bit-fields that holds the actions detected until now.
3524 * Pointer to the meter action.
3526 * Attributes of flow that includes this action.
3528 * Pointer to error structure.
3531 * 0 on success, a negative errno value otherwise and rte_ernno is set.
3534 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
3535 uint64_t action_flags,
3536 const struct rte_flow_action *action,
3537 const struct rte_flow_attr *attr,
3538 struct rte_flow_error *error)
3540 struct mlx5_priv *priv = dev->data->dev_private;
3541 const struct rte_flow_action_meter *am = action->conf;
3542 struct mlx5_flow_meter *fm;
3545 return rte_flow_error_set(error, EINVAL,
3546 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3547 "meter action conf is NULL");
3549 if (action_flags & MLX5_FLOW_ACTION_METER)
3550 return rte_flow_error_set(error, ENOTSUP,
3551 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3552 "meter chaining not support");
3553 if (action_flags & MLX5_FLOW_ACTION_JUMP)
3554 return rte_flow_error_set(error, ENOTSUP,
3555 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3556 "meter with jump not support");
3558 return rte_flow_error_set(error, ENOTSUP,
3559 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3561 "meter action not supported");
3562 fm = mlx5_flow_meter_find(priv, am->mtr_id);
3564 return rte_flow_error_set(error, EINVAL,
3565 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3567 if (fm->ref_cnt && (!(fm->attr.transfer == attr->transfer ||
3568 (!fm->attr.ingress && !attr->ingress && attr->egress) ||
3569 (!fm->attr.egress && !attr->egress && attr->ingress))))
3570 return rte_flow_error_set(error, EINVAL,
3571 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3572 "Flow attributes are either invalid "
3573 "or have a conflict with current "
3574 "meter attributes");
3579 * Validate the modify-header IPv4 DSCP actions.
3581 * @param[in] action_flags
3582 * Holds the actions detected until now.
3584 * Pointer to the modify action.
3585 * @param[in] item_flags
3586 * Holds the items detected.
3588 * Pointer to error structure.
3591 * 0 on success, a negative errno value otherwise and rte_errno is set.
3594 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
3595 const struct rte_flow_action *action,
3596 const uint64_t item_flags,
3597 struct rte_flow_error *error)
3601 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3603 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
3604 return rte_flow_error_set(error, EINVAL,
3605 RTE_FLOW_ERROR_TYPE_ACTION,
3607 "no ipv4 item in pattern");
3613 * Validate the modify-header IPv6 DSCP actions.
3615 * @param[in] action_flags
3616 * Holds the actions detected until now.
3618 * Pointer to the modify action.
3619 * @param[in] item_flags
3620 * Holds the items detected.
3622 * Pointer to error structure.
3625 * 0 on success, a negative errno value otherwise and rte_errno is set.
3628 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
3629 const struct rte_flow_action *action,
3630 const uint64_t item_flags,
3631 struct rte_flow_error *error)
3635 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3637 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
3638 return rte_flow_error_set(error, EINVAL,
3639 RTE_FLOW_ERROR_TYPE_ACTION,
3641 "no ipv6 item in pattern");
3647 * Find existing modify-header resource or create and register a new one.
3649 * @param dev[in, out]
3650 * Pointer to rte_eth_dev structure.
3651 * @param[in, out] resource
3652 * Pointer to modify-header resource.
3653 * @parm[in, out] dev_flow
3654 * Pointer to the dev_flow.
3656 * pointer to error structure.
3659 * 0 on success otherwise -errno and errno is set.
3662 flow_dv_modify_hdr_resource_register
3663 (struct rte_eth_dev *dev,
3664 struct mlx5_flow_dv_modify_hdr_resource *resource,
3665 struct mlx5_flow *dev_flow,
3666 struct rte_flow_error *error)
3668 struct mlx5_priv *priv = dev->data->dev_private;
3669 struct mlx5_ibv_shared *sh = priv->sh;
3670 struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
3671 struct mlx5dv_dr_domain *ns;
3673 if (resource->actions_num > flow_dv_modify_hdr_action_max(dev))
3674 return rte_flow_error_set(error, EOVERFLOW,
3675 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3676 "too many modify header items");
3677 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3678 ns = sh->fdb_domain;
3679 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
3684 dev_flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
3685 /* Lookup a matching resource from cache. */
3686 LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
3687 if (resource->ft_type == cache_resource->ft_type &&
3688 resource->actions_num == cache_resource->actions_num &&
3689 resource->flags == cache_resource->flags &&
3690 !memcmp((const void *)resource->actions,
3691 (const void *)cache_resource->actions,
3692 (resource->actions_num *
3693 sizeof(resource->actions[0])))) {
3694 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
3695 (void *)cache_resource,
3696 rte_atomic32_read(&cache_resource->refcnt));
3697 rte_atomic32_inc(&cache_resource->refcnt);
3698 dev_flow->dv.modify_hdr = cache_resource;
3702 /* Register new modify-header resource. */
3703 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
3704 if (!cache_resource)
3705 return rte_flow_error_set(error, ENOMEM,
3706 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3707 "cannot allocate resource memory");
3708 *cache_resource = *resource;
3709 cache_resource->verbs_action =
3710 mlx5_glue->dv_create_flow_action_modify_header
3711 (sh->ctx, cache_resource->ft_type,
3712 ns, cache_resource->flags,
3713 cache_resource->actions_num *
3714 sizeof(cache_resource->actions[0]),
3715 (uint64_t *)cache_resource->actions);
3716 if (!cache_resource->verbs_action) {
3717 rte_free(cache_resource);
3718 return rte_flow_error_set(error, ENOMEM,
3719 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3720 NULL, "cannot create action");
3722 rte_atomic32_init(&cache_resource->refcnt);
3723 rte_atomic32_inc(&cache_resource->refcnt);
3724 LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
3725 dev_flow->dv.modify_hdr = cache_resource;
3726 DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
3727 (void *)cache_resource,
3728 rte_atomic32_read(&cache_resource->refcnt));
3732 #define MLX5_CNT_CONTAINER_RESIZE 64
3735 * Get or create a flow counter.
3738 * Pointer to the Ethernet device structure.
3740 * Indicate if this counter is shared with other flows.
3742 * Counter identifier.
3745 * pointer to flow counter on success, NULL otherwise and rte_errno is set.
3747 static struct mlx5_flow_counter *
3748 flow_dv_counter_alloc_fallback(struct rte_eth_dev *dev, uint32_t shared,
3751 struct mlx5_priv *priv = dev->data->dev_private;
3752 struct mlx5_flow_counter *cnt = NULL;
3753 struct mlx5_devx_obj *dcs = NULL;
3755 if (!priv->config.devx) {
3756 rte_errno = ENOTSUP;
3760 TAILQ_FOREACH(cnt, &priv->sh->cmng.flow_counters, next) {
3761 if (cnt->shared && cnt->id == id) {
3767 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
3770 cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
3772 claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
3776 struct mlx5_flow_counter tmpl = {
3782 tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
3784 claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
3790 TAILQ_INSERT_HEAD(&priv->sh->cmng.flow_counters, cnt, next);
3795 * Release a flow counter.
3798 * Pointer to the Ethernet device structure.
3799 * @param[in] counter
3800 * Pointer to the counter handler.
3803 flow_dv_counter_release_fallback(struct rte_eth_dev *dev,
3804 struct mlx5_flow_counter *counter)
3806 struct mlx5_priv *priv = dev->data->dev_private;
3810 if (--counter->ref_cnt == 0) {
3811 TAILQ_REMOVE(&priv->sh->cmng.flow_counters, counter, next);
3812 claim_zero(mlx5_devx_cmd_destroy(counter->dcs));
3818 * Query a devx flow counter.
3821 * Pointer to the Ethernet device structure.
3823 * Pointer to the flow counter.
3825 * The statistics value of packets.
3827 * The statistics value of bytes.
3830 * 0 on success, otherwise a negative errno value and rte_errno is set.
3833 _flow_dv_query_count_fallback(struct rte_eth_dev *dev __rte_unused,
3834 struct mlx5_flow_counter *cnt, uint64_t *pkts,
3837 return mlx5_devx_cmd_flow_counter_query(cnt->dcs, 0, 0, pkts, bytes,
3842 * Get a pool by a counter.
3845 * Pointer to the counter.
3850 static struct mlx5_flow_counter_pool *
3851 flow_dv_counter_pool_get(struct mlx5_flow_counter *cnt)
3854 cnt -= cnt->dcs->id % MLX5_COUNTERS_PER_POOL;
3855 return (struct mlx5_flow_counter_pool *)cnt - 1;
3861 * Get a pool by devx counter ID.
3864 * Pointer to the counter container.
3866 * The counter devx ID.
3869 * The counter pool pointer if exists, NULL otherwise,
3871 static struct mlx5_flow_counter_pool *
3872 flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id)
3874 struct mlx5_flow_counter_pool *pool;
3876 TAILQ_FOREACH(pool, &cont->pool_list, next) {
3877 int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
3878 MLX5_COUNTERS_PER_POOL;
3880 if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
3887 * Allocate a new memory for the counter values wrapped by all the needed
3891 * Pointer to the Ethernet device structure.
3893 * The raw memory areas - each one for MLX5_COUNTERS_PER_POOL counters.
3896 * The new memory management pointer on success, otherwise NULL and rte_errno
3899 static struct mlx5_counter_stats_mem_mng *
3900 flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n)
3902 struct mlx5_ibv_shared *sh = ((struct mlx5_priv *)
3903 (dev->data->dev_private))->sh;
3904 struct mlx5_devx_mkey_attr mkey_attr;
3905 struct mlx5_counter_stats_mem_mng *mem_mng;
3906 volatile struct flow_counter_stats *raw_data;
3907 int size = (sizeof(struct flow_counter_stats) *
3908 MLX5_COUNTERS_PER_POOL +
3909 sizeof(struct mlx5_counter_stats_raw)) * raws_n +
3910 sizeof(struct mlx5_counter_stats_mem_mng);
3911 uint8_t *mem = rte_calloc(__func__, 1, size, sysconf(_SC_PAGESIZE));
3918 mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
3919 size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
3920 mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size,
3921 IBV_ACCESS_LOCAL_WRITE);
3922 if (!mem_mng->umem) {
3927 mkey_attr.addr = (uintptr_t)mem;
3928 mkey_attr.size = size;
3929 mkey_attr.umem_id = mem_mng->umem->umem_id;
3930 mkey_attr.pd = sh->pdn;
3931 mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
3933 mlx5_glue->devx_umem_dereg(mem_mng->umem);
3938 mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
3939 raw_data = (volatile struct flow_counter_stats *)mem;
3940 for (i = 0; i < raws_n; ++i) {
3941 mem_mng->raws[i].mem_mng = mem_mng;
3942 mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
3944 LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
3949 * Resize a counter container.
3952 * Pointer to the Ethernet device structure.
3954 * Whether the pool is for counter that was allocated by batch command.
3957 * The new container pointer on success, otherwise NULL and rte_errno is set.
3959 static struct mlx5_pools_container *
3960 flow_dv_container_resize(struct rte_eth_dev *dev, uint32_t batch)
3962 struct mlx5_priv *priv = dev->data->dev_private;
3963 struct mlx5_pools_container *cont =
3964 MLX5_CNT_CONTAINER(priv->sh, batch, 0);
3965 struct mlx5_pools_container *new_cont =
3966 MLX5_CNT_CONTAINER_UNUSED(priv->sh, batch, 0);
3967 struct mlx5_counter_stats_mem_mng *mem_mng;
3968 uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE;
3969 uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
3972 if (cont != MLX5_CNT_CONTAINER(priv->sh, batch, 1)) {
3973 /* The last resize still hasn't detected by the host thread. */
3977 new_cont->pools = rte_calloc(__func__, 1, mem_size, 0);
3978 if (!new_cont->pools) {
3983 memcpy(new_cont->pools, cont->pools, cont->n *
3984 sizeof(struct mlx5_flow_counter_pool *));
3985 mem_mng = flow_dv_create_counter_stat_mem_mng(dev,
3986 MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES);
3988 rte_free(new_cont->pools);
3991 for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
3992 LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws,
3993 mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE +
3995 new_cont->n = resize;
3996 rte_atomic16_set(&new_cont->n_valid, rte_atomic16_read(&cont->n_valid));
3997 TAILQ_INIT(&new_cont->pool_list);
3998 TAILQ_CONCAT(&new_cont->pool_list, &cont->pool_list, next);
3999 new_cont->init_mem_mng = mem_mng;
4001 /* Flip the master container. */
4002 priv->sh->cmng.mhi[batch] ^= (uint8_t)1;
4007 * Query a devx flow counter.
4010 * Pointer to the Ethernet device structure.
4012 * Pointer to the flow counter.
4014 * The statistics value of packets.
4016 * The statistics value of bytes.
4019 * 0 on success, otherwise a negative errno value and rte_errno is set.
4022 _flow_dv_query_count(struct rte_eth_dev *dev,
4023 struct mlx5_flow_counter *cnt, uint64_t *pkts,
4026 struct mlx5_priv *priv = dev->data->dev_private;
4027 struct mlx5_flow_counter_pool *pool =
4028 flow_dv_counter_pool_get(cnt);
4029 int offset = cnt - &pool->counters_raw[0];
4031 if (priv->counter_fallback)
4032 return _flow_dv_query_count_fallback(dev, cnt, pkts, bytes);
4034 rte_spinlock_lock(&pool->sl);
4036 * The single counters allocation may allocate smaller ID than the
4037 * current allocated in parallel to the host reading.
4038 * In this case the new counter values must be reported as 0.
4040 if (unlikely(!cnt->batch && cnt->dcs->id < pool->raw->min_dcs_id)) {
4044 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
4045 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
4047 rte_spinlock_unlock(&pool->sl);
4052 * Create and initialize a new counter pool.
4055 * Pointer to the Ethernet device structure.
4057 * The devX counter handle.
4059 * Whether the pool is for counter that was allocated by batch command.
4062 * A new pool pointer on success, NULL otherwise and rte_errno is set.
4064 static struct mlx5_flow_counter_pool *
4065 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
4068 struct mlx5_priv *priv = dev->data->dev_private;
4069 struct mlx5_flow_counter_pool *pool;
4070 struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
4072 int16_t n_valid = rte_atomic16_read(&cont->n_valid);
4075 if (cont->n == n_valid) {
4076 cont = flow_dv_container_resize(dev, batch);
4080 size = sizeof(*pool) + MLX5_COUNTERS_PER_POOL *
4081 sizeof(struct mlx5_flow_counter);
4082 pool = rte_calloc(__func__, 1, size, 0);
4087 pool->min_dcs = dcs;
4088 pool->raw = cont->init_mem_mng->raws + n_valid %
4089 MLX5_CNT_CONTAINER_RESIZE;
4090 pool->raw_hw = NULL;
4091 rte_spinlock_init(&pool->sl);
4093 * The generation of the new allocated counters in this pool is 0, 2 in
4094 * the pool generation makes all the counters valid for allocation.
4096 rte_atomic64_set(&pool->query_gen, 0x2);
4097 TAILQ_INIT(&pool->counters);
4098 TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
4099 cont->pools[n_valid] = pool;
4100 /* Pool initialization must be updated before host thread access. */
4102 rte_atomic16_add(&cont->n_valid, 1);
4107 * Prepare a new counter and/or a new counter pool.
4110 * Pointer to the Ethernet device structure.
4111 * @param[out] cnt_free
4112 * Where to put the pointer of a new counter.
4114 * Whether the pool is for counter that was allocated by batch command.
4117 * The free counter pool pointer and @p cnt_free is set on success,
4118 * NULL otherwise and rte_errno is set.
4120 static struct mlx5_flow_counter_pool *
4121 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
4122 struct mlx5_flow_counter **cnt_free,
4125 struct mlx5_priv *priv = dev->data->dev_private;
4126 struct mlx5_flow_counter_pool *pool;
4127 struct mlx5_devx_obj *dcs = NULL;
4128 struct mlx5_flow_counter *cnt;
4132 /* bulk_bitmap must be 0 for single counter allocation. */
4133 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
4136 pool = flow_dv_find_pool_by_id
4137 (MLX5_CNT_CONTAINER(priv->sh, batch, 0), dcs->id);
4139 pool = flow_dv_pool_create(dev, dcs, batch);
4141 mlx5_devx_cmd_destroy(dcs);
4144 } else if (dcs->id < pool->min_dcs->id) {
4145 rte_atomic64_set(&pool->a64_dcs,
4146 (int64_t)(uintptr_t)dcs);
4148 cnt = &pool->counters_raw[dcs->id % MLX5_COUNTERS_PER_POOL];
4149 TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
4154 /* bulk_bitmap is in 128 counters units. */
4155 if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4)
4156 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
4158 rte_errno = ENODATA;
4161 pool = flow_dv_pool_create(dev, dcs, batch);
4163 mlx5_devx_cmd_destroy(dcs);
4166 for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
4167 cnt = &pool->counters_raw[i];
4169 TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
4171 *cnt_free = &pool->counters_raw[0];
4176 * Search for existed shared counter.
4179 * Pointer to the relevant counter pool container.
4181 * The shared counter ID to search.
4184 * NULL if not existed, otherwise pointer to the shared counter.
4186 static struct mlx5_flow_counter *
4187 flow_dv_counter_shared_search(struct mlx5_pools_container *cont,
4190 static struct mlx5_flow_counter *cnt;
4191 struct mlx5_flow_counter_pool *pool;
4194 TAILQ_FOREACH(pool, &cont->pool_list, next) {
4195 for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
4196 cnt = &pool->counters_raw[i];
4197 if (cnt->ref_cnt && cnt->shared && cnt->id == id)
4205 * Allocate a flow counter.
4208 * Pointer to the Ethernet device structure.
4210 * Indicate if this counter is shared with other flows.
4212 * Counter identifier.
4214 * Counter flow group.
4217 * pointer to flow counter on success, NULL otherwise and rte_errno is set.
4219 static struct mlx5_flow_counter *
4220 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
4223 struct mlx5_priv *priv = dev->data->dev_private;
4224 struct mlx5_flow_counter_pool *pool = NULL;
4225 struct mlx5_flow_counter *cnt_free = NULL;
4227 * Currently group 0 flow counter cannot be assigned to a flow if it is
4228 * not the first one in the batch counter allocation, so it is better
4229 * to allocate counters one by one for these flows in a separate
4231 * A counter can be shared between different groups so need to take
4232 * shared counters from the single container.
4234 uint32_t batch = (group && !shared) ? 1 : 0;
4235 struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
4238 if (priv->counter_fallback)
4239 return flow_dv_counter_alloc_fallback(dev, shared, id);
4240 if (!priv->config.devx) {
4241 rte_errno = ENOTSUP;
4245 cnt_free = flow_dv_counter_shared_search(cont, id);
4247 if (cnt_free->ref_cnt + 1 == 0) {
4251 cnt_free->ref_cnt++;
4255 /* Pools which has a free counters are in the start. */
4256 TAILQ_FOREACH(pool, &cont->pool_list, next) {
4258 * The free counter reset values must be updated between the
4259 * counter release to the counter allocation, so, at least one
4260 * query must be done in this time. ensure it by saving the
4261 * query generation in the release time.
4262 * The free list is sorted according to the generation - so if
4263 * the first one is not updated, all the others are not
4266 cnt_free = TAILQ_FIRST(&pool->counters);
4267 if (cnt_free && cnt_free->query_gen + 1 <
4268 rte_atomic64_read(&pool->query_gen))
4273 pool = flow_dv_counter_pool_prepare(dev, &cnt_free, batch);
4277 cnt_free->batch = batch;
4278 /* Create a DV counter action only in the first time usage. */
4279 if (!cnt_free->action) {
4281 struct mlx5_devx_obj *dcs;
4284 offset = cnt_free - &pool->counters_raw[0];
4285 dcs = pool->min_dcs;
4288 dcs = cnt_free->dcs;
4290 cnt_free->action = mlx5_glue->dv_create_flow_action_counter
4292 if (!cnt_free->action) {
4297 /* Update the counter reset values. */
4298 if (_flow_dv_query_count(dev, cnt_free, &cnt_free->hits,
4301 cnt_free->shared = shared;
4302 cnt_free->ref_cnt = 1;
4304 if (!priv->sh->cmng.query_thread_on)
4305 /* Start the asynchronous batch query by the host thread. */
4306 mlx5_set_query_alarm(priv->sh);
4307 TAILQ_REMOVE(&pool->counters, cnt_free, next);
4308 if (TAILQ_EMPTY(&pool->counters)) {
4309 /* Move the pool to the end of the container pool list. */
4310 TAILQ_REMOVE(&cont->pool_list, pool, next);
4311 TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
4317 * Release a flow counter.
4320 * Pointer to the Ethernet device structure.
4321 * @param[in] counter
4322 * Pointer to the counter handler.
4325 flow_dv_counter_release(struct rte_eth_dev *dev,
4326 struct mlx5_flow_counter *counter)
4328 struct mlx5_priv *priv = dev->data->dev_private;
4332 if (priv->counter_fallback) {
4333 flow_dv_counter_release_fallback(dev, counter);
4336 if (--counter->ref_cnt == 0) {
4337 struct mlx5_flow_counter_pool *pool =
4338 flow_dv_counter_pool_get(counter);
4340 /* Put the counter in the end - the last updated one. */
4341 TAILQ_INSERT_TAIL(&pool->counters, counter, next);
4342 counter->query_gen = rte_atomic64_read(&pool->query_gen);
4347 * Verify the @p attributes will be correctly understood by the NIC and store
4348 * them in the @p flow if everything is correct.
4351 * Pointer to dev struct.
4352 * @param[in] attributes
4353 * Pointer to flow attributes
4354 * @param[in] external
4355 * This flow rule is created by request external to PMD.
4357 * Pointer to error structure.
4360 * 0 on success, a negative errno value otherwise and rte_errno is set.
4363 flow_dv_validate_attributes(struct rte_eth_dev *dev,
4364 const struct rte_flow_attr *attributes,
4365 bool external __rte_unused,
4366 struct rte_flow_error *error)
4368 struct mlx5_priv *priv = dev->data->dev_private;
4369 uint32_t priority_max = priv->config.flow_prio - 1;
4371 #ifndef HAVE_MLX5DV_DR
4372 if (attributes->group)
4373 return rte_flow_error_set(error, ENOTSUP,
4374 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
4376 "groups are not supported");
4381 ret = mlx5_flow_group_to_table(attributes, external,
4387 if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
4388 attributes->priority >= priority_max)
4389 return rte_flow_error_set(error, ENOTSUP,
4390 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
4392 "priority out of range");
4393 if (attributes->transfer) {
4394 if (!priv->config.dv_esw_en)
4395 return rte_flow_error_set
4397 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4398 "E-Switch dr is not supported");
4399 if (!(priv->representor || priv->master))
4400 return rte_flow_error_set
4401 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4402 NULL, "E-Switch configuration can only be"
4403 " done by a master or a representor device");
4404 if (attributes->egress)
4405 return rte_flow_error_set
4407 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
4408 "egress is not supported");
4410 if (!(attributes->egress ^ attributes->ingress))
4411 return rte_flow_error_set(error, ENOTSUP,
4412 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
4413 "must specify exactly one of "
4414 "ingress or egress");
4419 * Internal validation function. For validating both actions and items.
4422 * Pointer to the rte_eth_dev structure.
4424 * Pointer to the flow attributes.
4426 * Pointer to the list of items.
4427 * @param[in] actions
4428 * Pointer to the list of actions.
4429 * @param[in] external
4430 * This flow rule is created by request external to PMD.
4432 * Pointer to the error structure.
4435 * 0 on success, a negative errno value otherwise and rte_errno is set.
4438 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
4439 const struct rte_flow_item items[],
4440 const struct rte_flow_action actions[],
4441 bool external, struct rte_flow_error *error)
4444 uint64_t action_flags = 0;
4445 uint64_t item_flags = 0;
4446 uint64_t last_item = 0;
4447 uint8_t next_protocol = 0xff;
4448 uint16_t ether_type = 0;
4450 const struct rte_flow_item *gre_item = NULL;
4451 struct rte_flow_item_tcp nic_tcp_mask = {
4454 .src_port = RTE_BE16(UINT16_MAX),
4455 .dst_port = RTE_BE16(UINT16_MAX),
4458 struct mlx5_priv *priv = dev->data->dev_private;
4459 struct mlx5_dev_config *dev_conf = &priv->config;
4463 ret = flow_dv_validate_attributes(dev, attr, external, error);
4466 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4467 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
4468 int type = items->type;
4471 case RTE_FLOW_ITEM_TYPE_VOID:
4473 case RTE_FLOW_ITEM_TYPE_PORT_ID:
4474 ret = flow_dv_validate_item_port_id
4475 (dev, items, attr, item_flags, error);
4478 last_item = MLX5_FLOW_ITEM_PORT_ID;
4480 case RTE_FLOW_ITEM_TYPE_ETH:
4481 ret = mlx5_flow_validate_item_eth(items, item_flags,
4485 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
4486 MLX5_FLOW_LAYER_OUTER_L2;
4487 if (items->mask != NULL && items->spec != NULL) {
4489 ((const struct rte_flow_item_eth *)
4492 ((const struct rte_flow_item_eth *)
4494 ether_type = rte_be_to_cpu_16(ether_type);
4499 case RTE_FLOW_ITEM_TYPE_VLAN:
4500 ret = mlx5_flow_validate_item_vlan(items, item_flags,
4504 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
4505 MLX5_FLOW_LAYER_OUTER_VLAN;
4506 if (items->mask != NULL && items->spec != NULL) {
4508 ((const struct rte_flow_item_vlan *)
4509 items->spec)->inner_type;
4511 ((const struct rte_flow_item_vlan *)
4512 items->mask)->inner_type;
4513 ether_type = rte_be_to_cpu_16(ether_type);
4518 case RTE_FLOW_ITEM_TYPE_IPV4:
4519 mlx5_flow_tunnel_ip_check(items, next_protocol,
4520 &item_flags, &tunnel);
4521 ret = mlx5_flow_validate_item_ipv4(items, item_flags,
4527 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4528 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4529 if (items->mask != NULL &&
4530 ((const struct rte_flow_item_ipv4 *)
4531 items->mask)->hdr.next_proto_id) {
4533 ((const struct rte_flow_item_ipv4 *)
4534 (items->spec))->hdr.next_proto_id;
4536 ((const struct rte_flow_item_ipv4 *)
4537 (items->mask))->hdr.next_proto_id;
4539 /* Reset for inner layer. */
4540 next_protocol = 0xff;
4543 case RTE_FLOW_ITEM_TYPE_IPV6:
4544 mlx5_flow_tunnel_ip_check(items, next_protocol,
4545 &item_flags, &tunnel);
4546 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
4552 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4553 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4554 if (items->mask != NULL &&
4555 ((const struct rte_flow_item_ipv6 *)
4556 items->mask)->hdr.proto) {
4558 ((const struct rte_flow_item_ipv6 *)
4559 items->spec)->hdr.proto;
4561 ((const struct rte_flow_item_ipv6 *)
4562 items->mask)->hdr.proto;
4564 /* Reset for inner layer. */
4565 next_protocol = 0xff;
4568 case RTE_FLOW_ITEM_TYPE_TCP:
4569 ret = mlx5_flow_validate_item_tcp
4576 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
4577 MLX5_FLOW_LAYER_OUTER_L4_TCP;
4579 case RTE_FLOW_ITEM_TYPE_UDP:
4580 ret = mlx5_flow_validate_item_udp(items, item_flags,
4585 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
4586 MLX5_FLOW_LAYER_OUTER_L4_UDP;
4588 case RTE_FLOW_ITEM_TYPE_GRE:
4589 ret = mlx5_flow_validate_item_gre(items, item_flags,
4590 next_protocol, error);
4594 last_item = MLX5_FLOW_LAYER_GRE;
4596 case RTE_FLOW_ITEM_TYPE_NVGRE:
4597 ret = mlx5_flow_validate_item_nvgre(items, item_flags,
4602 last_item = MLX5_FLOW_LAYER_NVGRE;
4604 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
4605 ret = mlx5_flow_validate_item_gre_key
4606 (items, item_flags, gre_item, error);
4609 last_item = MLX5_FLOW_LAYER_GRE_KEY;
4611 case RTE_FLOW_ITEM_TYPE_VXLAN:
4612 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
4616 last_item = MLX5_FLOW_LAYER_VXLAN;
4618 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4619 ret = mlx5_flow_validate_item_vxlan_gpe(items,
4624 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
4626 case RTE_FLOW_ITEM_TYPE_GENEVE:
4627 ret = mlx5_flow_validate_item_geneve(items,
4632 last_item = MLX5_FLOW_LAYER_GENEVE;
4634 case RTE_FLOW_ITEM_TYPE_MPLS:
4635 ret = mlx5_flow_validate_item_mpls(dev, items,
4640 last_item = MLX5_FLOW_LAYER_MPLS;
4643 case RTE_FLOW_ITEM_TYPE_MARK:
4644 ret = flow_dv_validate_item_mark(dev, items, attr,
4648 last_item = MLX5_FLOW_ITEM_MARK;
4650 case RTE_FLOW_ITEM_TYPE_META:
4651 ret = flow_dv_validate_item_meta(dev, items, attr,
4655 last_item = MLX5_FLOW_ITEM_METADATA;
4657 case RTE_FLOW_ITEM_TYPE_ICMP:
4658 ret = mlx5_flow_validate_item_icmp(items, item_flags,
4663 last_item = MLX5_FLOW_LAYER_ICMP;
4665 case RTE_FLOW_ITEM_TYPE_ICMP6:
4666 ret = mlx5_flow_validate_item_icmp6(items, item_flags,
4671 last_item = MLX5_FLOW_LAYER_ICMP6;
4673 case RTE_FLOW_ITEM_TYPE_TAG:
4674 ret = flow_dv_validate_item_tag(dev, items,
4678 last_item = MLX5_FLOW_ITEM_TAG;
4680 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
4681 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
4683 case RTE_FLOW_ITEM_TYPE_GTP:
4684 ret = flow_dv_validate_item_gtp(dev, items, item_flags,
4688 last_item = MLX5_FLOW_LAYER_GTP;
4691 return rte_flow_error_set(error, ENOTSUP,
4692 RTE_FLOW_ERROR_TYPE_ITEM,
4693 NULL, "item not supported");
4695 item_flags |= last_item;
4697 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4698 int type = actions->type;
4699 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
4700 return rte_flow_error_set(error, ENOTSUP,
4701 RTE_FLOW_ERROR_TYPE_ACTION,
4702 actions, "too many actions");
4704 case RTE_FLOW_ACTION_TYPE_VOID:
4706 case RTE_FLOW_ACTION_TYPE_PORT_ID:
4707 ret = flow_dv_validate_action_port_id(dev,
4714 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
4717 case RTE_FLOW_ACTION_TYPE_FLAG:
4718 ret = flow_dv_validate_action_flag(dev, action_flags,
4722 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
4723 /* Count all modify-header actions as one. */
4724 if (!(action_flags &
4725 MLX5_FLOW_MODIFY_HDR_ACTIONS))
4727 action_flags |= MLX5_FLOW_ACTION_FLAG |
4728 MLX5_FLOW_ACTION_MARK_EXT;
4730 action_flags |= MLX5_FLOW_ACTION_FLAG;
4734 case RTE_FLOW_ACTION_TYPE_MARK:
4735 ret = flow_dv_validate_action_mark(dev, actions,
4740 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
4741 /* Count all modify-header actions as one. */
4742 if (!(action_flags &
4743 MLX5_FLOW_MODIFY_HDR_ACTIONS))
4745 action_flags |= MLX5_FLOW_ACTION_MARK |
4746 MLX5_FLOW_ACTION_MARK_EXT;
4748 action_flags |= MLX5_FLOW_ACTION_MARK;
4752 case RTE_FLOW_ACTION_TYPE_SET_META:
4753 ret = flow_dv_validate_action_set_meta(dev, actions,
4758 /* Count all modify-header actions as one action. */
4759 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4761 action_flags |= MLX5_FLOW_ACTION_SET_META;
4763 case RTE_FLOW_ACTION_TYPE_SET_TAG:
4764 ret = flow_dv_validate_action_set_tag(dev, actions,
4769 /* Count all modify-header actions as one action. */
4770 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4772 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
4774 case RTE_FLOW_ACTION_TYPE_DROP:
4775 ret = mlx5_flow_validate_action_drop(action_flags,
4779 action_flags |= MLX5_FLOW_ACTION_DROP;
4782 case RTE_FLOW_ACTION_TYPE_QUEUE:
4783 ret = mlx5_flow_validate_action_queue(actions,
4788 action_flags |= MLX5_FLOW_ACTION_QUEUE;
4791 case RTE_FLOW_ACTION_TYPE_RSS:
4792 ret = mlx5_flow_validate_action_rss(actions,
4798 action_flags |= MLX5_FLOW_ACTION_RSS;
4801 case RTE_FLOW_ACTION_TYPE_COUNT:
4802 ret = flow_dv_validate_action_count(dev, error);
4805 action_flags |= MLX5_FLOW_ACTION_COUNT;
4808 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
4809 if (flow_dv_validate_action_pop_vlan(dev,
4815 action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
4818 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
4819 ret = flow_dv_validate_action_push_vlan(action_flags,
4825 action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
4828 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
4829 ret = flow_dv_validate_action_set_vlan_pcp
4830 (action_flags, actions, error);
4833 /* Count PCP with push_vlan command. */
4834 action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
4836 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
4837 ret = flow_dv_validate_action_set_vlan_vid
4838 (item_flags, action_flags,
4842 /* Count VID with push_vlan command. */
4843 action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
4845 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
4846 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
4847 ret = flow_dv_validate_action_l2_encap(action_flags,
4852 action_flags |= actions->type ==
4853 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
4854 MLX5_FLOW_ACTION_VXLAN_ENCAP :
4855 MLX5_FLOW_ACTION_NVGRE_ENCAP;
4858 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
4859 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
4860 ret = flow_dv_validate_action_l2_decap(action_flags,
4864 action_flags |= actions->type ==
4865 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
4866 MLX5_FLOW_ACTION_VXLAN_DECAP :
4867 MLX5_FLOW_ACTION_NVGRE_DECAP;
4870 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4871 ret = flow_dv_validate_action_raw_encap(action_flags,
4876 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
4879 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
4880 ret = flow_dv_validate_action_raw_decap(action_flags,
4885 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
4888 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
4889 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
4890 ret = flow_dv_validate_action_modify_mac(action_flags,
4896 /* Count all modify-header actions as one action. */
4897 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4899 action_flags |= actions->type ==
4900 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
4901 MLX5_FLOW_ACTION_SET_MAC_SRC :
4902 MLX5_FLOW_ACTION_SET_MAC_DST;
4905 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
4906 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
4907 ret = flow_dv_validate_action_modify_ipv4(action_flags,
4913 /* Count all modify-header actions as one action. */
4914 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4916 action_flags |= actions->type ==
4917 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
4918 MLX5_FLOW_ACTION_SET_IPV4_SRC :
4919 MLX5_FLOW_ACTION_SET_IPV4_DST;
4921 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
4922 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
4923 ret = flow_dv_validate_action_modify_ipv6(action_flags,
4929 /* Count all modify-header actions as one action. */
4930 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4932 action_flags |= actions->type ==
4933 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
4934 MLX5_FLOW_ACTION_SET_IPV6_SRC :
4935 MLX5_FLOW_ACTION_SET_IPV6_DST;
4937 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
4938 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
4939 ret = flow_dv_validate_action_modify_tp(action_flags,
4945 /* Count all modify-header actions as one action. */
4946 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4948 action_flags |= actions->type ==
4949 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
4950 MLX5_FLOW_ACTION_SET_TP_SRC :
4951 MLX5_FLOW_ACTION_SET_TP_DST;
4953 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
4954 case RTE_FLOW_ACTION_TYPE_SET_TTL:
4955 ret = flow_dv_validate_action_modify_ttl(action_flags,
4961 /* Count all modify-header actions as one action. */
4962 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4964 action_flags |= actions->type ==
4965 RTE_FLOW_ACTION_TYPE_SET_TTL ?
4966 MLX5_FLOW_ACTION_SET_TTL :
4967 MLX5_FLOW_ACTION_DEC_TTL;
4969 case RTE_FLOW_ACTION_TYPE_JUMP:
4970 ret = flow_dv_validate_action_jump(actions,
4977 action_flags |= MLX5_FLOW_ACTION_JUMP;
4979 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
4980 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
4981 ret = flow_dv_validate_action_modify_tcp_seq
4988 /* Count all modify-header actions as one action. */
4989 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4991 action_flags |= actions->type ==
4992 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
4993 MLX5_FLOW_ACTION_INC_TCP_SEQ :
4994 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
4996 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
4997 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
4998 ret = flow_dv_validate_action_modify_tcp_ack
5005 /* Count all modify-header actions as one action. */
5006 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5008 action_flags |= actions->type ==
5009 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
5010 MLX5_FLOW_ACTION_INC_TCP_ACK :
5011 MLX5_FLOW_ACTION_DEC_TCP_ACK;
5013 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
5014 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
5015 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
5017 case RTE_FLOW_ACTION_TYPE_METER:
5018 ret = mlx5_flow_validate_action_meter(dev,
5024 action_flags |= MLX5_FLOW_ACTION_METER;
5027 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
5028 ret = flow_dv_validate_action_modify_ipv4_dscp
5035 /* Count all modify-header actions as one action. */
5036 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5038 action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
5040 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
5041 ret = flow_dv_validate_action_modify_ipv6_dscp
5048 /* Count all modify-header actions as one action. */
5049 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5051 action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
5054 return rte_flow_error_set(error, ENOTSUP,
5055 RTE_FLOW_ERROR_TYPE_ACTION,
5057 "action not supported");
5060 if ((action_flags & MLX5_FLOW_LAYER_TUNNEL) &&
5061 (action_flags & MLX5_FLOW_VLAN_ACTIONS))
5062 return rte_flow_error_set(error, ENOTSUP,
5063 RTE_FLOW_ERROR_TYPE_ACTION,
5065 "can't have vxlan and vlan"
5066 " actions in the same rule");
5067 /* Eswitch has few restrictions on using items and actions */
5068 if (attr->transfer) {
5069 if (!mlx5_flow_ext_mreg_supported(dev) &&
5070 action_flags & MLX5_FLOW_ACTION_FLAG)
5071 return rte_flow_error_set(error, ENOTSUP,
5072 RTE_FLOW_ERROR_TYPE_ACTION,
5074 "unsupported action FLAG");
5075 if (!mlx5_flow_ext_mreg_supported(dev) &&
5076 action_flags & MLX5_FLOW_ACTION_MARK)
5077 return rte_flow_error_set(error, ENOTSUP,
5078 RTE_FLOW_ERROR_TYPE_ACTION,
5080 "unsupported action MARK");
5081 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
5082 return rte_flow_error_set(error, ENOTSUP,
5083 RTE_FLOW_ERROR_TYPE_ACTION,
5085 "unsupported action QUEUE");
5086 if (action_flags & MLX5_FLOW_ACTION_RSS)
5087 return rte_flow_error_set(error, ENOTSUP,
5088 RTE_FLOW_ERROR_TYPE_ACTION,
5090 "unsupported action RSS");
5091 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
5092 return rte_flow_error_set(error, EINVAL,
5093 RTE_FLOW_ERROR_TYPE_ACTION,
5095 "no fate action is found");
5097 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
5098 return rte_flow_error_set(error, EINVAL,
5099 RTE_FLOW_ERROR_TYPE_ACTION,
5101 "no fate action is found");
5107 * Internal preparation function. Allocates the DV flow size,
5108 * this size is constant.
5111 * Pointer to the flow attributes.
5113 * Pointer to the list of items.
5114 * @param[in] actions
5115 * Pointer to the list of actions.
5117 * Pointer to the error structure.
5120 * Pointer to mlx5_flow object on success,
5121 * otherwise NULL and rte_errno is set.
5123 static struct mlx5_flow *
5124 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
5125 const struct rte_flow_item items[] __rte_unused,
5126 const struct rte_flow_action actions[] __rte_unused,
5127 struct rte_flow_error *error)
5129 size_t size = sizeof(struct mlx5_flow);
5130 struct mlx5_flow *dev_flow;
5132 dev_flow = rte_calloc(__func__, 1, size, 0);
5134 rte_flow_error_set(error, ENOMEM,
5135 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5136 "not enough memory to create flow");
5139 dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
5140 dev_flow->ingress = attr->ingress;
5141 dev_flow->transfer = attr->transfer;
5147 * Sanity check for match mask and value. Similar to check_valid_spec() in
5148 * kernel driver. If unmasked bit is present in value, it returns failure.
5151 * pointer to match mask buffer.
5152 * @param match_value
5153 * pointer to match value buffer.
5156 * 0 if valid, -EINVAL otherwise.
5159 flow_dv_check_valid_spec(void *match_mask, void *match_value)
5161 uint8_t *m = match_mask;
5162 uint8_t *v = match_value;
5165 for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
5168 "match_value differs from match_criteria"
5169 " %p[%u] != %p[%u]",
5170 match_value, i, match_mask, i);
5179 * Add Ethernet item to matcher and to the value.
5181 * @param[in, out] matcher
5183 * @param[in, out] key
5184 * Flow matcher value.
5186 * Flow pattern to translate.
5188 * Item is inner pattern.
5191 flow_dv_translate_item_eth(void *matcher, void *key,
5192 const struct rte_flow_item *item, int inner)
5194 const struct rte_flow_item_eth *eth_m = item->mask;
5195 const struct rte_flow_item_eth *eth_v = item->spec;
5196 const struct rte_flow_item_eth nic_mask = {
5197 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
5198 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
5199 .type = RTE_BE16(0xffff),
5211 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5213 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5215 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5217 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5219 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
5220 ð_m->dst, sizeof(eth_m->dst));
5221 /* The value must be in the range of the mask. */
5222 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
5223 for (i = 0; i < sizeof(eth_m->dst); ++i)
5224 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
5225 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
5226 ð_m->src, sizeof(eth_m->src));
5227 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
5228 /* The value must be in the range of the mask. */
5229 for (i = 0; i < sizeof(eth_m->dst); ++i)
5230 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
5231 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
5232 rte_be_to_cpu_16(eth_m->type));
5233 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
5234 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
5238 * Add VLAN item to matcher and to the value.
5240 * @param[in, out] dev_flow
5242 * @param[in, out] matcher
5244 * @param[in, out] key
5245 * Flow matcher value.
5247 * Flow pattern to translate.
5249 * Item is inner pattern.
5252 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
5253 void *matcher, void *key,
5254 const struct rte_flow_item *item,
5257 const struct rte_flow_item_vlan *vlan_m = item->mask;
5258 const struct rte_flow_item_vlan *vlan_v = item->spec;
5267 vlan_m = &rte_flow_item_vlan_mask;
5269 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5271 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5273 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5275 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5277 * This is workaround, masks are not supported,
5278 * and pre-validated.
5280 dev_flow->dv.vf_vlan.tag =
5281 rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
5283 tci_m = rte_be_to_cpu_16(vlan_m->tci);
5284 tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
5285 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
5286 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
5287 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
5288 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
5289 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
5290 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
5291 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
5292 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
5293 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
5294 rte_be_to_cpu_16(vlan_m->inner_type));
5295 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
5296 rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
5300 * Add IPV4 item to matcher and to the value.
5302 * @param[in, out] matcher
5304 * @param[in, out] key
5305 * Flow matcher value.
5307 * Flow pattern to translate.
5309 * Item is inner pattern.
5311 * The group to insert the rule.
5314 flow_dv_translate_item_ipv4(void *matcher, void *key,
5315 const struct rte_flow_item *item,
5316 int inner, uint32_t group)
5318 const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
5319 const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
5320 const struct rte_flow_item_ipv4 nic_mask = {
5322 .src_addr = RTE_BE32(0xffffffff),
5323 .dst_addr = RTE_BE32(0xffffffff),
5324 .type_of_service = 0xff,
5325 .next_proto_id = 0xff,
5335 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5337 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5339 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5341 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5344 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
5346 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
5347 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
5352 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
5353 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
5354 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
5355 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
5356 *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
5357 *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
5358 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
5359 src_ipv4_src_ipv6.ipv4_layout.ipv4);
5360 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
5361 src_ipv4_src_ipv6.ipv4_layout.ipv4);
5362 *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
5363 *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
5364 tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
5365 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
5366 ipv4_m->hdr.type_of_service);
5367 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
5368 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
5369 ipv4_m->hdr.type_of_service >> 2);
5370 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
5371 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
5372 ipv4_m->hdr.next_proto_id);
5373 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
5374 ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
5378 * Add IPV6 item to matcher and to the value.
5380 * @param[in, out] matcher
5382 * @param[in, out] key
5383 * Flow matcher value.
5385 * Flow pattern to translate.
5387 * Item is inner pattern.
5389 * The group to insert the rule.
5392 flow_dv_translate_item_ipv6(void *matcher, void *key,
5393 const struct rte_flow_item *item,
5394 int inner, uint32_t group)
5396 const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
5397 const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
5398 const struct rte_flow_item_ipv6 nic_mask = {
5401 "\xff\xff\xff\xff\xff\xff\xff\xff"
5402 "\xff\xff\xff\xff\xff\xff\xff\xff",
5404 "\xff\xff\xff\xff\xff\xff\xff\xff"
5405 "\xff\xff\xff\xff\xff\xff\xff\xff",
5406 .vtc_flow = RTE_BE32(0xffffffff),
5413 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5414 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5423 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5425 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5427 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5429 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5432 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
5434 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
5435 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
5440 size = sizeof(ipv6_m->hdr.dst_addr);
5441 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
5442 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
5443 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
5444 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
5445 memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
5446 for (i = 0; i < size; ++i)
5447 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
5448 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
5449 src_ipv4_src_ipv6.ipv6_layout.ipv6);
5450 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
5451 src_ipv4_src_ipv6.ipv6_layout.ipv6);
5452 memcpy(l24_m, ipv6_m->hdr.src_addr, size);
5453 for (i = 0; i < size; ++i)
5454 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
5456 vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
5457 vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
5458 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
5459 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
5460 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
5461 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
5464 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
5466 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
5469 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
5471 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
5475 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
5477 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
5478 ipv6_v->hdr.proto & ipv6_m->hdr.proto);
5482 * Add TCP item to matcher and to the value.
5484 * @param[in, out] matcher
5486 * @param[in, out] key
5487 * Flow matcher value.
5489 * Flow pattern to translate.
5491 * Item is inner pattern.
5494 flow_dv_translate_item_tcp(void *matcher, void *key,
5495 const struct rte_flow_item *item,
5498 const struct rte_flow_item_tcp *tcp_m = item->mask;
5499 const struct rte_flow_item_tcp *tcp_v = item->spec;
5504 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5506 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5508 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5510 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5512 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
5513 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
5517 tcp_m = &rte_flow_item_tcp_mask;
5518 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
5519 rte_be_to_cpu_16(tcp_m->hdr.src_port));
5520 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
5521 rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
5522 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
5523 rte_be_to_cpu_16(tcp_m->hdr.dst_port));
5524 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
5525 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
5526 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
5527 tcp_m->hdr.tcp_flags);
5528 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
5529 (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
5533 * Add UDP item to matcher and to the value.
5535 * @param[in, out] matcher
5537 * @param[in, out] key
5538 * Flow matcher value.
5540 * Flow pattern to translate.
5542 * Item is inner pattern.
5545 flow_dv_translate_item_udp(void *matcher, void *key,
5546 const struct rte_flow_item *item,
5549 const struct rte_flow_item_udp *udp_m = item->mask;
5550 const struct rte_flow_item_udp *udp_v = item->spec;
5555 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5557 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5559 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5561 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5563 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
5564 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
5568 udp_m = &rte_flow_item_udp_mask;
5569 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
5570 rte_be_to_cpu_16(udp_m->hdr.src_port));
5571 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
5572 rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
5573 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
5574 rte_be_to_cpu_16(udp_m->hdr.dst_port));
5575 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
5576 rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
5580 * Add GRE optional Key item to matcher and to the value.
5582 * @param[in, out] matcher
5584 * @param[in, out] key
5585 * Flow matcher value.
5587 * Flow pattern to translate.
5589 * Item is inner pattern.
5592 flow_dv_translate_item_gre_key(void *matcher, void *key,
5593 const struct rte_flow_item *item)
5595 const rte_be32_t *key_m = item->mask;
5596 const rte_be32_t *key_v = item->spec;
5597 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5598 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5599 rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
5604 key_m = &gre_key_default_mask;
5605 /* GRE K bit must be on and should already be validated */
5606 MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
5607 MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
5608 MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
5609 rte_be_to_cpu_32(*key_m) >> 8);
5610 MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
5611 rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
5612 MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
5613 rte_be_to_cpu_32(*key_m) & 0xFF);
5614 MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
5615 rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
5619 * Add GRE item to matcher and to the value.
5621 * @param[in, out] matcher
5623 * @param[in, out] key
5624 * Flow matcher value.
5626 * Flow pattern to translate.
5628 * Item is inner pattern.
5631 flow_dv_translate_item_gre(void *matcher, void *key,
5632 const struct rte_flow_item *item,
5635 const struct rte_flow_item_gre *gre_m = item->mask;
5636 const struct rte_flow_item_gre *gre_v = item->spec;
5639 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5640 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5647 uint16_t s_present:1;
5648 uint16_t k_present:1;
5649 uint16_t rsvd_bit1:1;
5650 uint16_t c_present:1;
5654 } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
5657 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5659 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5661 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5663 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5665 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
5666 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
5670 gre_m = &rte_flow_item_gre_mask;
5671 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
5672 rte_be_to_cpu_16(gre_m->protocol));
5673 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
5674 rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
5675 gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
5676 gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
5677 MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
5678 gre_crks_rsvd0_ver_m.c_present);
5679 MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
5680 gre_crks_rsvd0_ver_v.c_present &
5681 gre_crks_rsvd0_ver_m.c_present);
5682 MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
5683 gre_crks_rsvd0_ver_m.k_present);
5684 MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
5685 gre_crks_rsvd0_ver_v.k_present &
5686 gre_crks_rsvd0_ver_m.k_present);
5687 MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
5688 gre_crks_rsvd0_ver_m.s_present);
5689 MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
5690 gre_crks_rsvd0_ver_v.s_present &
5691 gre_crks_rsvd0_ver_m.s_present);
5695 * Add NVGRE item to matcher and to the value.
5697 * @param[in, out] matcher
5699 * @param[in, out] key
5700 * Flow matcher value.
5702 * Flow pattern to translate.
5704 * Item is inner pattern.
5707 flow_dv_translate_item_nvgre(void *matcher, void *key,
5708 const struct rte_flow_item *item,
5711 const struct rte_flow_item_nvgre *nvgre_m = item->mask;
5712 const struct rte_flow_item_nvgre *nvgre_v = item->spec;
5713 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5714 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5715 const char *tni_flow_id_m = (const char *)nvgre_m->tni;
5716 const char *tni_flow_id_v = (const char *)nvgre_v->tni;
5722 /* For NVGRE, GRE header fields must be set with defined values. */
5723 const struct rte_flow_item_gre gre_spec = {
5724 .c_rsvd0_ver = RTE_BE16(0x2000),
5725 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
5727 const struct rte_flow_item_gre gre_mask = {
5728 .c_rsvd0_ver = RTE_BE16(0xB000),
5729 .protocol = RTE_BE16(UINT16_MAX),
5731 const struct rte_flow_item gre_item = {
5736 flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
5740 nvgre_m = &rte_flow_item_nvgre_mask;
5741 size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
5742 gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
5743 gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
5744 memcpy(gre_key_m, tni_flow_id_m, size);
5745 for (i = 0; i < size; ++i)
5746 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
5750 * Add VXLAN item to matcher and to the value.
5752 * @param[in, out] matcher
5754 * @param[in, out] key
5755 * Flow matcher value.
5757 * Flow pattern to translate.
5759 * Item is inner pattern.
5762 flow_dv_translate_item_vxlan(void *matcher, void *key,
5763 const struct rte_flow_item *item,
5766 const struct rte_flow_item_vxlan *vxlan_m = item->mask;
5767 const struct rte_flow_item_vxlan *vxlan_v = item->spec;
5770 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5771 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5779 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5781 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5783 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5785 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5787 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
5788 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
5789 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
5790 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
5791 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
5796 vxlan_m = &rte_flow_item_vxlan_mask;
5797 size = sizeof(vxlan_m->vni);
5798 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
5799 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
5800 memcpy(vni_m, vxlan_m->vni, size);
5801 for (i = 0; i < size; ++i)
5802 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
5806 * Add Geneve item to matcher and to the value.
5808 * @param[in, out] matcher
5810 * @param[in, out] key
5811 * Flow matcher value.
5813 * Flow pattern to translate.
5815 * Item is inner pattern.
5819 flow_dv_translate_item_geneve(void *matcher, void *key,
5820 const struct rte_flow_item *item, int inner)
5822 const struct rte_flow_item_geneve *geneve_m = item->mask;
5823 const struct rte_flow_item_geneve *geneve_v = item->spec;
5826 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5827 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5836 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5838 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5840 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5842 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5844 dport = MLX5_UDP_PORT_GENEVE;
5845 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
5846 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
5847 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
5852 geneve_m = &rte_flow_item_geneve_mask;
5853 size = sizeof(geneve_m->vni);
5854 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
5855 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
5856 memcpy(vni_m, geneve_m->vni, size);
5857 for (i = 0; i < size; ++i)
5858 vni_v[i] = vni_m[i] & geneve_v->vni[i];
5859 MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
5860 rte_be_to_cpu_16(geneve_m->protocol));
5861 MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
5862 rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
5863 gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
5864 gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
5865 MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
5866 MLX5_GENEVE_OAMF_VAL(gbhdr_m));
5867 MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
5868 MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
5869 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
5870 MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
5871 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
5872 MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
5873 MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
5877 * Add MPLS item to matcher and to the value.
5879 * @param[in, out] matcher
5881 * @param[in, out] key
5882 * Flow matcher value.
5884 * Flow pattern to translate.
5885 * @param[in] prev_layer
5886 * The protocol layer indicated in previous item.
5888 * Item is inner pattern.
5891 flow_dv_translate_item_mpls(void *matcher, void *key,
5892 const struct rte_flow_item *item,
5893 uint64_t prev_layer,
5896 const uint32_t *in_mpls_m = item->mask;
5897 const uint32_t *in_mpls_v = item->spec;
5898 uint32_t *out_mpls_m = 0;
5899 uint32_t *out_mpls_v = 0;
5900 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5901 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5902 void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
5904 void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
5905 void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
5906 void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5908 switch (prev_layer) {
5909 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
5910 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
5911 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
5912 MLX5_UDP_PORT_MPLS);
5914 case MLX5_FLOW_LAYER_GRE:
5915 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
5916 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
5917 RTE_ETHER_TYPE_MPLS);
5920 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
5921 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
5928 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
5929 switch (prev_layer) {
5930 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
5932 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
5933 outer_first_mpls_over_udp);
5935 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
5936 outer_first_mpls_over_udp);
5938 case MLX5_FLOW_LAYER_GRE:
5940 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
5941 outer_first_mpls_over_gre);
5943 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
5944 outer_first_mpls_over_gre);
5947 /* Inner MPLS not over GRE is not supported. */
5950 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
5954 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
5960 if (out_mpls_m && out_mpls_v) {
5961 *out_mpls_m = *in_mpls_m;
5962 *out_mpls_v = *in_mpls_v & *in_mpls_m;
5967 * Add metadata register item to matcher
5969 * @param[in, out] matcher
5971 * @param[in, out] key
5972 * Flow matcher value.
5973 * @param[in] reg_type
5974 * Type of device metadata register
5981 flow_dv_match_meta_reg(void *matcher, void *key,
5982 enum modify_reg reg_type,
5983 uint32_t data, uint32_t mask)
5986 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
5988 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
5994 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
5995 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
5998 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
5999 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
6003 * The metadata register C0 field might be divided into
6004 * source vport index and META item value, we should set
6005 * this field according to specified mask, not as whole one.
6007 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
6009 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
6010 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
6013 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
6016 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
6017 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
6020 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
6021 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
6024 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
6025 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
6028 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
6029 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
6032 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
6033 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
6036 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
6037 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
6040 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
6041 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
6050 * Add MARK item to matcher
6053 * The device to configure through.
6054 * @param[in, out] matcher
6056 * @param[in, out] key
6057 * Flow matcher value.
6059 * Flow pattern to translate.
6062 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
6063 void *matcher, void *key,
6064 const struct rte_flow_item *item)
6066 struct mlx5_priv *priv = dev->data->dev_private;
6067 const struct rte_flow_item_mark *mark;
6071 mark = item->mask ? (const void *)item->mask :
6072 &rte_flow_item_mark_mask;
6073 mask = mark->id & priv->sh->dv_mark_mask;
6074 mark = (const void *)item->spec;
6076 value = mark->id & priv->sh->dv_mark_mask & mask;
6078 enum modify_reg reg;
6080 /* Get the metadata register index for the mark. */
6081 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
6083 if (reg == REG_C_0) {
6084 struct mlx5_priv *priv = dev->data->dev_private;
6085 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
6086 uint32_t shl_c0 = rte_bsf32(msk_c0);
6092 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
6097 * Add META item to matcher
6100 * The devich to configure through.
6101 * @param[in, out] matcher
6103 * @param[in, out] key
6104 * Flow matcher value.
6106 * Attributes of flow that includes this item.
6108 * Flow pattern to translate.
6111 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
6112 void *matcher, void *key,
6113 const struct rte_flow_attr *attr,
6114 const struct rte_flow_item *item)
6116 const struct rte_flow_item_meta *meta_m;
6117 const struct rte_flow_item_meta *meta_v;
6119 meta_m = (const void *)item->mask;
6121 meta_m = &rte_flow_item_meta_mask;
6122 meta_v = (const void *)item->spec;
6124 enum modify_reg reg;
6125 uint32_t value = meta_v->data;
6126 uint32_t mask = meta_m->data;
6128 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
6132 * In datapath code there is no endianness
6133 * coversions for perfromance reasons, all
6134 * pattern conversions are done in rte_flow.
6136 value = rte_cpu_to_be_32(value);
6137 mask = rte_cpu_to_be_32(mask);
6138 if (reg == REG_C_0) {
6139 struct mlx5_priv *priv = dev->data->dev_private;
6140 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
6141 uint32_t shl_c0 = rte_bsf32(msk_c0);
6142 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
6143 uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
6151 assert(!(~msk_c0 & mask));
6153 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
6158 * Add vport metadata Reg C0 item to matcher
6160 * @param[in, out] matcher
6162 * @param[in, out] key
6163 * Flow matcher value.
6165 * Flow pattern to translate.
6168 flow_dv_translate_item_meta_vport(void *matcher, void *key,
6169 uint32_t value, uint32_t mask)
6171 flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
6175 * Add tag item to matcher
6178 * The devich to configure through.
6179 * @param[in, out] matcher
6181 * @param[in, out] key
6182 * Flow matcher value.
6184 * Flow pattern to translate.
6187 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
6188 void *matcher, void *key,
6189 const struct rte_flow_item *item)
6191 const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
6192 const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
6193 uint32_t mask, value;
6196 value = tag_v->data;
6197 mask = tag_m ? tag_m->data : UINT32_MAX;
6198 if (tag_v->id == REG_C_0) {
6199 struct mlx5_priv *priv = dev->data->dev_private;
6200 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
6201 uint32_t shl_c0 = rte_bsf32(msk_c0);
6207 flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
6211 * Add TAG item to matcher
6214 * The devich to configure through.
6215 * @param[in, out] matcher
6217 * @param[in, out] key
6218 * Flow matcher value.
6220 * Flow pattern to translate.
6223 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
6224 void *matcher, void *key,
6225 const struct rte_flow_item *item)
6227 const struct rte_flow_item_tag *tag_v = item->spec;
6228 const struct rte_flow_item_tag *tag_m = item->mask;
6229 enum modify_reg reg;
6232 tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
6233 /* Get the metadata register index for the tag. */
6234 reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
6236 flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
6240 * Add source vport match to the specified matcher.
6242 * @param[in, out] matcher
6244 * @param[in, out] key
6245 * Flow matcher value.
6247 * Source vport value to match
6252 flow_dv_translate_item_source_vport(void *matcher, void *key,
6253 int16_t port, uint16_t mask)
6255 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6256 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6258 MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
6259 MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
6263 * Translate port-id item to eswitch match on port-id.
6266 * The devich to configure through.
6267 * @param[in, out] matcher
6269 * @param[in, out] key
6270 * Flow matcher value.
6272 * Flow pattern to translate.
6275 * 0 on success, a negative errno value otherwise.
6278 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
6279 void *key, const struct rte_flow_item *item)
6281 const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
6282 const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
6283 struct mlx5_priv *priv;
6286 mask = pid_m ? pid_m->id : 0xffff;
6287 id = pid_v ? pid_v->id : dev->data->port_id;
6288 priv = mlx5_port_to_eswitch_info(id, item == NULL);
6291 /* Translate to vport field or to metadata, depending on mode. */
6292 if (priv->vport_meta_mask)
6293 flow_dv_translate_item_meta_vport(matcher, key,
6294 priv->vport_meta_tag,
6295 priv->vport_meta_mask);
6297 flow_dv_translate_item_source_vport(matcher, key,
6298 priv->vport_id, mask);
6303 * Add ICMP6 item to matcher and to the value.
6305 * @param[in, out] matcher
6307 * @param[in, out] key
6308 * Flow matcher value.
6310 * Flow pattern to translate.
6312 * Item is inner pattern.
6315 flow_dv_translate_item_icmp6(void *matcher, void *key,
6316 const struct rte_flow_item *item,
6319 const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
6320 const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
6323 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
6325 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
6327 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6329 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6331 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6333 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6335 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
6336 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
6340 icmp6_m = &rte_flow_item_icmp6_mask;
6341 MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
6342 MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
6343 icmp6_v->type & icmp6_m->type);
6344 MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
6345 MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
6346 icmp6_v->code & icmp6_m->code);
6350 * Add ICMP item to matcher and to the value.
6352 * @param[in, out] matcher
6354 * @param[in, out] key
6355 * Flow matcher value.
6357 * Flow pattern to translate.
6359 * Item is inner pattern.
6362 flow_dv_translate_item_icmp(void *matcher, void *key,
6363 const struct rte_flow_item *item,
6366 const struct rte_flow_item_icmp *icmp_m = item->mask;
6367 const struct rte_flow_item_icmp *icmp_v = item->spec;
6370 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
6372 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
6374 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6376 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6378 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6380 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6382 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
6383 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
6387 icmp_m = &rte_flow_item_icmp_mask;
6388 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
6389 icmp_m->hdr.icmp_type);
6390 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
6391 icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
6392 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
6393 icmp_m->hdr.icmp_code);
6394 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
6395 icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
6399 * Add GTP item to matcher and to the value.
6401 * @param[in, out] matcher
6403 * @param[in, out] key
6404 * Flow matcher value.
6406 * Flow pattern to translate.
6408 * Item is inner pattern.
6411 flow_dv_translate_item_gtp(void *matcher, void *key,
6412 const struct rte_flow_item *item, int inner)
6414 const struct rte_flow_item_gtp *gtp_m = item->mask;
6415 const struct rte_flow_item_gtp *gtp_v = item->spec;
6418 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
6420 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
6421 uint16_t dport = RTE_GTPU_UDP_PORT;
6424 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6426 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6428 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6430 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6432 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
6433 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
6434 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
6439 gtp_m = &rte_flow_item_gtp_mask;
6440 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
6441 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
6442 gtp_v->msg_type & gtp_m->msg_type);
6443 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
6444 rte_be_to_cpu_32(gtp_m->teid));
6445 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
6446 rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
6449 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
6451 #define HEADER_IS_ZERO(match_criteria, headers) \
6452 !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
6453 matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
6456 * Calculate flow matcher enable bitmap.
6458 * @param match_criteria
6459 * Pointer to flow matcher criteria.
6462 * Bitmap of enabled fields.
6465 flow_dv_matcher_enable(uint32_t *match_criteria)
6467 uint8_t match_criteria_enable;
6469 match_criteria_enable =
6470 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
6471 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
6472 match_criteria_enable |=
6473 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
6474 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
6475 match_criteria_enable |=
6476 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
6477 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
6478 match_criteria_enable |=
6479 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
6480 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
6481 match_criteria_enable |=
6482 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
6483 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
6484 return match_criteria_enable;
6491 * @param[in, out] dev
6492 * Pointer to rte_eth_dev structure.
6493 * @param[in] table_id
6496 * Direction of the table.
6497 * @param[in] transfer
6498 * E-Switch or NIC flow.
6500 * pointer to error structure.
6503 * Returns tables resource based on the index, NULL in case of failed.
6505 static struct mlx5_flow_tbl_resource *
6506 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
6507 uint32_t table_id, uint8_t egress,
6509 struct rte_flow_error *error)
6511 struct mlx5_priv *priv = dev->data->dev_private;
6512 struct mlx5_ibv_shared *sh = priv->sh;
6513 struct mlx5_flow_tbl_resource *tbl;
6514 union mlx5_flow_tbl_key table_key = {
6516 .table_id = table_id,
6518 .domain = !!transfer,
6519 .direction = !!egress,
6522 struct mlx5_hlist_entry *pos = mlx5_hlist_lookup(sh->flow_tbls,
6524 struct mlx5_flow_tbl_data_entry *tbl_data;
6529 tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
6531 tbl = &tbl_data->tbl;
6532 rte_atomic32_inc(&tbl->refcnt);
6535 tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0);
6537 rte_flow_error_set(error, ENOMEM,
6538 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6540 "cannot allocate flow table data entry");
6543 tbl = &tbl_data->tbl;
6544 pos = &tbl_data->entry;
6546 domain = sh->fdb_domain;
6548 domain = sh->tx_domain;
6550 domain = sh->rx_domain;
6551 tbl->obj = mlx5_glue->dr_create_flow_tbl(domain, table_id);
6553 rte_flow_error_set(error, ENOMEM,
6554 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6555 NULL, "cannot create flow table object");
6560 * No multi-threads now, but still better to initialize the reference
6561 * count before insert it into the hash list.
6563 rte_atomic32_init(&tbl->refcnt);
6564 /* Jump action reference count is initialized here. */
6565 rte_atomic32_init(&tbl_data->jump.refcnt);
6566 pos->key = table_key.v64;
6567 ret = mlx5_hlist_insert(sh->flow_tbls, pos);
6569 rte_flow_error_set(error, -ret,
6570 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6571 "cannot insert flow table data entry");
6572 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
6575 rte_atomic32_inc(&tbl->refcnt);
6580 * Release a flow table.
6583 * Pointer to rte_eth_dev structure.
6585 * Table resource to be released.
6588 * Returns 0 if table was released, else return 1;
6591 flow_dv_tbl_resource_release(struct rte_eth_dev *dev,
6592 struct mlx5_flow_tbl_resource *tbl)
6594 struct mlx5_priv *priv = dev->data->dev_private;
6595 struct mlx5_ibv_shared *sh = priv->sh;
6596 struct mlx5_flow_tbl_data_entry *tbl_data =
6597 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
6601 if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
6602 struct mlx5_hlist_entry *pos = &tbl_data->entry;
6604 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
6606 /* remove the entry from the hash list and free memory. */
6607 mlx5_hlist_remove(sh->flow_tbls, pos);
6615 * Register the flow matcher.
6617 * @param[in, out] dev
6618 * Pointer to rte_eth_dev structure.
6619 * @param[in, out] matcher
6620 * Pointer to flow matcher.
6621 * @param[in, out] key
6622 * Pointer to flow table key.
6623 * @parm[in, out] dev_flow
6624 * Pointer to the dev_flow.
6626 * pointer to error structure.
6629 * 0 on success otherwise -errno and errno is set.
6632 flow_dv_matcher_register(struct rte_eth_dev *dev,
6633 struct mlx5_flow_dv_matcher *matcher,
6634 union mlx5_flow_tbl_key *key,
6635 struct mlx5_flow *dev_flow,
6636 struct rte_flow_error *error)
6638 struct mlx5_priv *priv = dev->data->dev_private;
6639 struct mlx5_ibv_shared *sh = priv->sh;
6640 struct mlx5_flow_dv_matcher *cache_matcher;
6641 struct mlx5dv_flow_matcher_attr dv_attr = {
6642 .type = IBV_FLOW_ATTR_NORMAL,
6643 .match_mask = (void *)&matcher->mask,
6645 struct mlx5_flow_tbl_resource *tbl;
6646 struct mlx5_flow_tbl_data_entry *tbl_data;
6648 tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
6649 key->domain, error);
6651 return -rte_errno; /* No need to refill the error info */
6652 tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
6653 /* Lookup from cache. */
6654 LIST_FOREACH(cache_matcher, &tbl_data->matchers, next) {
6655 if (matcher->crc == cache_matcher->crc &&
6656 matcher->priority == cache_matcher->priority &&
6657 !memcmp((const void *)matcher->mask.buf,
6658 (const void *)cache_matcher->mask.buf,
6659 cache_matcher->mask.size)) {
6661 "%s group %u priority %hd use %s "
6662 "matcher %p: refcnt %d++",
6663 key->domain ? "FDB" : "NIC", key->table_id,
6664 cache_matcher->priority,
6665 key->direction ? "tx" : "rx",
6666 (void *)cache_matcher,
6667 rte_atomic32_read(&cache_matcher->refcnt));
6668 rte_atomic32_inc(&cache_matcher->refcnt);
6669 dev_flow->dv.matcher = cache_matcher;
6670 /* old matcher should not make the table ref++. */
6671 flow_dv_tbl_resource_release(dev, tbl);
6675 /* Register new matcher. */
6676 cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
6677 if (!cache_matcher) {
6678 flow_dv_tbl_resource_release(dev, tbl);
6679 return rte_flow_error_set(error, ENOMEM,
6680 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6681 "cannot allocate matcher memory");
6683 *cache_matcher = *matcher;
6684 dv_attr.match_criteria_enable =
6685 flow_dv_matcher_enable(cache_matcher->mask.buf);
6686 dv_attr.priority = matcher->priority;
6688 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
6689 cache_matcher->matcher_object =
6690 mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
6691 if (!cache_matcher->matcher_object) {
6692 rte_free(cache_matcher);
6693 #ifdef HAVE_MLX5DV_DR
6694 flow_dv_tbl_resource_release(dev, tbl);
6696 return rte_flow_error_set(error, ENOMEM,
6697 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6698 NULL, "cannot create matcher");
6700 /* Save the table information */
6701 cache_matcher->tbl = tbl;
6702 rte_atomic32_init(&cache_matcher->refcnt);
6703 /* only matcher ref++, table ref++ already done above in get API. */
6704 rte_atomic32_inc(&cache_matcher->refcnt);
6705 LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next);
6706 dev_flow->dv.matcher = cache_matcher;
6707 DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d",
6708 key->domain ? "FDB" : "NIC", key->table_id,
6709 cache_matcher->priority,
6710 key->direction ? "tx" : "rx", (void *)cache_matcher,
6711 rte_atomic32_read(&cache_matcher->refcnt));
6716 * Find existing tag resource or create and register a new one.
6718 * @param dev[in, out]
6719 * Pointer to rte_eth_dev structure.
6720 * @param[in, out] tag_be24
6721 * Tag value in big endian then R-shift 8.
6722 * @parm[in, out] dev_flow
6723 * Pointer to the dev_flow.
6725 * pointer to error structure.
6728 * 0 on success otherwise -errno and errno is set.
6731 flow_dv_tag_resource_register
6732 (struct rte_eth_dev *dev,
6734 struct mlx5_flow *dev_flow,
6735 struct rte_flow_error *error)
6737 struct mlx5_priv *priv = dev->data->dev_private;
6738 struct mlx5_ibv_shared *sh = priv->sh;
6739 struct mlx5_flow_dv_tag_resource *cache_resource;
6740 struct mlx5_hlist_entry *entry;
6742 /* Lookup a matching resource from cache. */
6743 entry = mlx5_hlist_lookup(sh->tag_table, (uint64_t)tag_be24);
6745 cache_resource = container_of
6746 (entry, struct mlx5_flow_dv_tag_resource, entry);
6747 rte_atomic32_inc(&cache_resource->refcnt);
6748 dev_flow->dv.tag_resource = cache_resource;
6749 DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
6750 (void *)cache_resource,
6751 rte_atomic32_read(&cache_resource->refcnt));
6754 /* Register new resource. */
6755 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
6756 if (!cache_resource)
6757 return rte_flow_error_set(error, ENOMEM,
6758 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6759 "cannot allocate resource memory");
6760 cache_resource->entry.key = (uint64_t)tag_be24;
6761 cache_resource->action = mlx5_glue->dv_create_flow_action_tag(tag_be24);
6762 if (!cache_resource->action) {
6763 rte_free(cache_resource);
6764 return rte_flow_error_set(error, ENOMEM,
6765 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6766 NULL, "cannot create action");
6768 rte_atomic32_init(&cache_resource->refcnt);
6769 rte_atomic32_inc(&cache_resource->refcnt);
6770 if (mlx5_hlist_insert(sh->tag_table, &cache_resource->entry)) {
6771 mlx5_glue->destroy_flow_action(cache_resource->action);
6772 rte_free(cache_resource);
6773 return rte_flow_error_set(error, EEXIST,
6774 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6775 NULL, "cannot insert tag");
6777 dev_flow->dv.tag_resource = cache_resource;
6778 DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++",
6779 (void *)cache_resource,
6780 rte_atomic32_read(&cache_resource->refcnt));
6788 * Pointer to Ethernet device.
6790 * Pointer to mlx5_flow.
6793 * 1 while a reference on it exists, 0 when freed.
6796 flow_dv_tag_release(struct rte_eth_dev *dev,
6797 struct mlx5_flow_dv_tag_resource *tag)
6799 struct mlx5_priv *priv = dev->data->dev_private;
6800 struct mlx5_ibv_shared *sh = priv->sh;
6803 DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
6804 dev->data->port_id, (void *)tag,
6805 rte_atomic32_read(&tag->refcnt));
6806 if (rte_atomic32_dec_and_test(&tag->refcnt)) {
6807 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
6808 mlx5_hlist_remove(sh->tag_table, &tag->entry);
6809 DRV_LOG(DEBUG, "port %u tag %p: removed",
6810 dev->data->port_id, (void *)tag);
6818 * Translate port ID action to vport.
6821 * Pointer to rte_eth_dev structure.
6823 * Pointer to the port ID action.
6824 * @param[out] dst_port_id
6825 * The target port ID.
6827 * Pointer to the error structure.
6830 * 0 on success, a negative errno value otherwise and rte_errno is set.
6833 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
6834 const struct rte_flow_action *action,
6835 uint32_t *dst_port_id,
6836 struct rte_flow_error *error)
6839 struct mlx5_priv *priv;
6840 const struct rte_flow_action_port_id *conf =
6841 (const struct rte_flow_action_port_id *)action->conf;
6843 port = conf->original ? dev->data->port_id : conf->id;
6844 priv = mlx5_port_to_eswitch_info(port, false);
6846 return rte_flow_error_set(error, -rte_errno,
6847 RTE_FLOW_ERROR_TYPE_ACTION,
6849 "No eswitch info was found for port");
6850 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
6852 * This parameter is transferred to
6853 * mlx5dv_dr_action_create_dest_ib_port().
6855 *dst_port_id = priv->ibv_port;
6858 * Legacy mode, no LAG configurations is supported.
6859 * This parameter is transferred to
6860 * mlx5dv_dr_action_create_dest_vport().
6862 *dst_port_id = priv->vport_id;
6868 * Add Tx queue matcher
6871 * Pointer to the dev struct.
6872 * @param[in, out] matcher
6874 * @param[in, out] key
6875 * Flow matcher value.
6877 * Flow pattern to translate.
6879 * Item is inner pattern.
6882 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
6883 void *matcher, void *key,
6884 const struct rte_flow_item *item)
6886 const struct mlx5_rte_flow_item_tx_queue *queue_m;
6887 const struct mlx5_rte_flow_item_tx_queue *queue_v;
6889 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6891 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6892 struct mlx5_txq_ctrl *txq;
6896 queue_m = (const void *)item->mask;
6899 queue_v = (const void *)item->spec;
6902 txq = mlx5_txq_get(dev, queue_v->queue);
6905 queue = txq->obj->sq->id;
6906 MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
6907 MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
6908 queue & queue_m->queue);
6909 mlx5_txq_release(dev, queue_v->queue);
6913 * Set the hash fields according to the @p flow information.
6915 * @param[in] dev_flow
6916 * Pointer to the mlx5_flow.
6919 flow_dv_hashfields_set(struct mlx5_flow *dev_flow)
6921 struct rte_flow *flow = dev_flow->flow;
6922 uint64_t items = dev_flow->layers;
6924 uint64_t rss_types = rte_eth_rss_hf_refine(flow->rss.types);
6926 dev_flow->hash_fields = 0;
6927 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
6928 if (flow->rss.level >= 2) {
6929 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
6933 if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
6934 (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
6935 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
6936 if (rss_types & ETH_RSS_L3_SRC_ONLY)
6937 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
6938 else if (rss_types & ETH_RSS_L3_DST_ONLY)
6939 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
6941 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
6943 } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
6944 (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
6945 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
6946 if (rss_types & ETH_RSS_L3_SRC_ONLY)
6947 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
6948 else if (rss_types & ETH_RSS_L3_DST_ONLY)
6949 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
6951 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
6954 if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
6955 (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
6956 if (rss_types & ETH_RSS_UDP) {
6957 if (rss_types & ETH_RSS_L4_SRC_ONLY)
6958 dev_flow->hash_fields |=
6959 IBV_RX_HASH_SRC_PORT_UDP;
6960 else if (rss_types & ETH_RSS_L4_DST_ONLY)
6961 dev_flow->hash_fields |=
6962 IBV_RX_HASH_DST_PORT_UDP;
6964 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
6966 } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
6967 (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
6968 if (rss_types & ETH_RSS_TCP) {
6969 if (rss_types & ETH_RSS_L4_SRC_ONLY)
6970 dev_flow->hash_fields |=
6971 IBV_RX_HASH_SRC_PORT_TCP;
6972 else if (rss_types & ETH_RSS_L4_DST_ONLY)
6973 dev_flow->hash_fields |=
6974 IBV_RX_HASH_DST_PORT_TCP;
6976 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
6982 * Fill the flow with DV spec, lock free
6983 * (mutex should be acquired by caller).
6986 * Pointer to rte_eth_dev structure.
6987 * @param[in, out] dev_flow
6988 * Pointer to the sub flow.
6990 * Pointer to the flow attributes.
6992 * Pointer to the list of items.
6993 * @param[in] actions
6994 * Pointer to the list of actions.
6996 * Pointer to the error structure.
6999 * 0 on success, a negative errno value otherwise and rte_errno is set.
7002 __flow_dv_translate(struct rte_eth_dev *dev,
7003 struct mlx5_flow *dev_flow,
7004 const struct rte_flow_attr *attr,
7005 const struct rte_flow_item items[],
7006 const struct rte_flow_action actions[],
7007 struct rte_flow_error *error)
7009 struct mlx5_priv *priv = dev->data->dev_private;
7010 struct mlx5_dev_config *dev_conf = &priv->config;
7011 struct rte_flow *flow = dev_flow->flow;
7012 uint64_t item_flags = 0;
7013 uint64_t last_item = 0;
7014 uint64_t action_flags = 0;
7015 uint64_t priority = attr->priority;
7016 struct mlx5_flow_dv_matcher matcher = {
7018 .size = sizeof(matcher.mask.buf),
7022 bool actions_end = false;
7023 struct mlx5_flow_dv_modify_hdr_resource mhdr_res = {
7024 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
7025 MLX5DV_FLOW_TABLE_TYPE_NIC_RX
7027 union flow_dv_attr flow_attr = { .attr = 0 };
7029 union mlx5_flow_tbl_key tbl_key;
7030 uint32_t modify_action_position = UINT32_MAX;
7031 void *match_mask = matcher.mask.buf;
7032 void *match_value = dev_flow->dv.value.buf;
7033 uint8_t next_protocol = 0xff;
7034 struct rte_vlan_hdr vlan = { 0 };
7038 ret = mlx5_flow_group_to_table(attr, dev_flow->external, attr->group,
7042 dev_flow->group = table;
7044 mhdr_res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
7045 if (priority == MLX5_FLOW_PRIO_RSVD)
7046 priority = dev_conf->flow_prio - 1;
7047 for (; !actions_end ; actions++) {
7048 const struct rte_flow_action_queue *queue;
7049 const struct rte_flow_action_rss *rss;
7050 const struct rte_flow_action *action = actions;
7051 const struct rte_flow_action_count *count = action->conf;
7052 const uint8_t *rss_key;
7053 const struct rte_flow_action_jump *jump_data;
7054 const struct rte_flow_action_meter *mtr;
7055 struct mlx5_flow_tbl_resource *tbl;
7056 uint32_t port_id = 0;
7057 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
7058 int action_type = actions->type;
7059 const struct rte_flow_action *found_action = NULL;
7061 switch (action_type) {
7062 case RTE_FLOW_ACTION_TYPE_VOID:
7064 case RTE_FLOW_ACTION_TYPE_PORT_ID:
7065 if (flow_dv_translate_action_port_id(dev, action,
7068 port_id_resource.port_id = port_id;
7069 if (flow_dv_port_id_action_resource_register
7070 (dev, &port_id_resource, dev_flow, error))
7072 dev_flow->dv.actions[actions_n++] =
7073 dev_flow->dv.port_id_action->action;
7074 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
7076 case RTE_FLOW_ACTION_TYPE_FLAG:
7077 action_flags |= MLX5_FLOW_ACTION_FLAG;
7078 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7079 struct rte_flow_action_mark mark = {
7080 .id = MLX5_FLOW_MARK_DEFAULT,
7083 if (flow_dv_convert_action_mark(dev, &mark,
7087 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
7090 tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
7091 if (!dev_flow->dv.tag_resource)
7092 if (flow_dv_tag_resource_register
7093 (dev, tag_be, dev_flow, error))
7095 dev_flow->dv.actions[actions_n++] =
7096 dev_flow->dv.tag_resource->action;
7098 case RTE_FLOW_ACTION_TYPE_MARK:
7099 action_flags |= MLX5_FLOW_ACTION_MARK;
7100 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7101 const struct rte_flow_action_mark *mark =
7102 (const struct rte_flow_action_mark *)
7105 if (flow_dv_convert_action_mark(dev, mark,
7109 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
7113 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
7114 /* Legacy (non-extensive) MARK action. */
7115 tag_be = mlx5_flow_mark_set
7116 (((const struct rte_flow_action_mark *)
7117 (actions->conf))->id);
7118 if (!dev_flow->dv.tag_resource)
7119 if (flow_dv_tag_resource_register
7120 (dev, tag_be, dev_flow, error))
7122 dev_flow->dv.actions[actions_n++] =
7123 dev_flow->dv.tag_resource->action;
7125 case RTE_FLOW_ACTION_TYPE_SET_META:
7126 if (flow_dv_convert_action_set_meta
7127 (dev, &mhdr_res, attr,
7128 (const struct rte_flow_action_set_meta *)
7129 actions->conf, error))
7131 action_flags |= MLX5_FLOW_ACTION_SET_META;
7133 case RTE_FLOW_ACTION_TYPE_SET_TAG:
7134 if (flow_dv_convert_action_set_tag
7136 (const struct rte_flow_action_set_tag *)
7137 actions->conf, error))
7139 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7141 case RTE_FLOW_ACTION_TYPE_DROP:
7142 action_flags |= MLX5_FLOW_ACTION_DROP;
7144 case RTE_FLOW_ACTION_TYPE_QUEUE:
7145 assert(flow->rss.queue);
7146 queue = actions->conf;
7147 flow->rss.queue_num = 1;
7148 (*flow->rss.queue)[0] = queue->index;
7149 action_flags |= MLX5_FLOW_ACTION_QUEUE;
7151 case RTE_FLOW_ACTION_TYPE_RSS:
7152 assert(flow->rss.queue);
7153 rss = actions->conf;
7154 if (flow->rss.queue)
7155 memcpy((*flow->rss.queue), rss->queue,
7156 rss->queue_num * sizeof(uint16_t));
7157 flow->rss.queue_num = rss->queue_num;
7158 /* NULL RSS key indicates default RSS key. */
7159 rss_key = !rss->key ? rss_hash_default_key : rss->key;
7160 memcpy(flow->rss.key, rss_key, MLX5_RSS_HASH_KEY_LEN);
7162 * rss->level and rss.types should be set in advance
7163 * when expanding items for RSS.
7165 action_flags |= MLX5_FLOW_ACTION_RSS;
7167 case RTE_FLOW_ACTION_TYPE_COUNT:
7168 if (!dev_conf->devx) {
7169 rte_errno = ENOTSUP;
7172 flow->counter = flow_dv_counter_alloc(dev,
7176 if (flow->counter == NULL)
7178 dev_flow->dv.actions[actions_n++] =
7179 flow->counter->action;
7180 action_flags |= MLX5_FLOW_ACTION_COUNT;
7183 if (rte_errno == ENOTSUP)
7184 return rte_flow_error_set
7186 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7188 "count action not supported");
7190 return rte_flow_error_set
7192 RTE_FLOW_ERROR_TYPE_ACTION,
7194 "cannot create counter"
7197 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
7198 dev_flow->dv.actions[actions_n++] =
7199 priv->sh->pop_vlan_action;
7200 action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
7202 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7203 flow_dev_get_vlan_info_from_items(items, &vlan);
7204 vlan.eth_proto = rte_be_to_cpu_16
7205 ((((const struct rte_flow_action_of_push_vlan *)
7206 actions->conf)->ethertype));
7207 found_action = mlx5_flow_find_action
7209 RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
7211 mlx5_update_vlan_vid_pcp(found_action, &vlan);
7212 found_action = mlx5_flow_find_action
7214 RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
7216 mlx5_update_vlan_vid_pcp(found_action, &vlan);
7217 if (flow_dv_create_action_push_vlan
7218 (dev, attr, &vlan, dev_flow, error))
7220 dev_flow->dv.actions[actions_n++] =
7221 dev_flow->dv.push_vlan_res->action;
7222 action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
7224 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
7225 /* of_vlan_push action handled this action */
7226 assert(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN);
7228 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7229 if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7231 flow_dev_get_vlan_info_from_items(items, &vlan);
7232 mlx5_update_vlan_vid_pcp(actions, &vlan);
7233 /* If no VLAN push - this is a modify header action */
7234 if (flow_dv_convert_action_modify_vlan_vid
7235 (&mhdr_res, actions, error))
7237 action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
7239 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7240 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7241 if (flow_dv_create_action_l2_encap(dev, actions,
7246 dev_flow->dv.actions[actions_n++] =
7247 dev_flow->dv.encap_decap->verbs_action;
7248 action_flags |= actions->type ==
7249 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
7250 MLX5_FLOW_ACTION_VXLAN_ENCAP :
7251 MLX5_FLOW_ACTION_NVGRE_ENCAP;
7253 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7254 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7255 if (flow_dv_create_action_l2_decap(dev, dev_flow,
7259 dev_flow->dv.actions[actions_n++] =
7260 dev_flow->dv.encap_decap->verbs_action;
7261 action_flags |= actions->type ==
7262 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
7263 MLX5_FLOW_ACTION_VXLAN_DECAP :
7264 MLX5_FLOW_ACTION_NVGRE_DECAP;
7266 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7267 /* Handle encap with preceding decap. */
7268 if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
7269 if (flow_dv_create_action_raw_encap
7270 (dev, actions, dev_flow, attr, error))
7272 dev_flow->dv.actions[actions_n++] =
7273 dev_flow->dv.encap_decap->verbs_action;
7275 /* Handle encap without preceding decap. */
7276 if (flow_dv_create_action_l2_encap
7277 (dev, actions, dev_flow, attr->transfer,
7280 dev_flow->dv.actions[actions_n++] =
7281 dev_flow->dv.encap_decap->verbs_action;
7283 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
7285 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7286 /* Check if this decap is followed by encap. */
7287 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
7288 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
7291 /* Handle decap only if it isn't followed by encap. */
7292 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7293 if (flow_dv_create_action_l2_decap
7294 (dev, dev_flow, attr->transfer, error))
7296 dev_flow->dv.actions[actions_n++] =
7297 dev_flow->dv.encap_decap->verbs_action;
7299 /* If decap is followed by encap, handle it at encap. */
7300 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
7302 case RTE_FLOW_ACTION_TYPE_JUMP:
7303 jump_data = action->conf;
7304 ret = mlx5_flow_group_to_table(attr, dev_flow->external,
7305 jump_data->group, &table,
7309 tbl = flow_dv_tbl_resource_get(dev, table,
7311 attr->transfer, error);
7313 return rte_flow_error_set
7315 RTE_FLOW_ERROR_TYPE_ACTION,
7317 "cannot create jump action.");
7318 if (flow_dv_jump_tbl_resource_register
7319 (dev, tbl, dev_flow, error)) {
7320 flow_dv_tbl_resource_release(dev, tbl);
7321 return rte_flow_error_set
7323 RTE_FLOW_ERROR_TYPE_ACTION,
7325 "cannot create jump action.");
7327 dev_flow->dv.actions[actions_n++] =
7328 dev_flow->dv.jump->action;
7329 action_flags |= MLX5_FLOW_ACTION_JUMP;
7331 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
7332 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
7333 if (flow_dv_convert_action_modify_mac
7334 (&mhdr_res, actions, error))
7336 action_flags |= actions->type ==
7337 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
7338 MLX5_FLOW_ACTION_SET_MAC_SRC :
7339 MLX5_FLOW_ACTION_SET_MAC_DST;
7341 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
7342 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
7343 if (flow_dv_convert_action_modify_ipv4
7344 (&mhdr_res, actions, error))
7346 action_flags |= actions->type ==
7347 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
7348 MLX5_FLOW_ACTION_SET_IPV4_SRC :
7349 MLX5_FLOW_ACTION_SET_IPV4_DST;
7351 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
7352 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
7353 if (flow_dv_convert_action_modify_ipv6
7354 (&mhdr_res, actions, error))
7356 action_flags |= actions->type ==
7357 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
7358 MLX5_FLOW_ACTION_SET_IPV6_SRC :
7359 MLX5_FLOW_ACTION_SET_IPV6_DST;
7361 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
7362 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
7363 if (flow_dv_convert_action_modify_tp
7364 (&mhdr_res, actions, items,
7367 action_flags |= actions->type ==
7368 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
7369 MLX5_FLOW_ACTION_SET_TP_SRC :
7370 MLX5_FLOW_ACTION_SET_TP_DST;
7372 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
7373 if (flow_dv_convert_action_modify_dec_ttl
7374 (&mhdr_res, items, &flow_attr, error))
7376 action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
7378 case RTE_FLOW_ACTION_TYPE_SET_TTL:
7379 if (flow_dv_convert_action_modify_ttl
7380 (&mhdr_res, actions, items,
7383 action_flags |= MLX5_FLOW_ACTION_SET_TTL;
7385 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
7386 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
7387 if (flow_dv_convert_action_modify_tcp_seq
7388 (&mhdr_res, actions, error))
7390 action_flags |= actions->type ==
7391 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
7392 MLX5_FLOW_ACTION_INC_TCP_SEQ :
7393 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
7396 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
7397 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
7398 if (flow_dv_convert_action_modify_tcp_ack
7399 (&mhdr_res, actions, error))
7401 action_flags |= actions->type ==
7402 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
7403 MLX5_FLOW_ACTION_INC_TCP_ACK :
7404 MLX5_FLOW_ACTION_DEC_TCP_ACK;
7406 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
7407 if (flow_dv_convert_action_set_reg
7408 (&mhdr_res, actions, error))
7410 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7412 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
7413 if (flow_dv_convert_action_copy_mreg
7414 (dev, &mhdr_res, actions, error))
7416 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7418 case RTE_FLOW_ACTION_TYPE_METER:
7419 mtr = actions->conf;
7421 flow->meter = mlx5_flow_meter_attach(priv,
7425 return rte_flow_error_set(error,
7427 RTE_FLOW_ERROR_TYPE_ACTION,
7430 "or invalid parameters");
7432 /* Set the meter action. */
7433 dev_flow->dv.actions[actions_n++] =
7434 flow->meter->mfts->meter_action;
7435 action_flags |= MLX5_FLOW_ACTION_METER;
7437 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
7438 if (flow_dv_convert_action_modify_ipv4_dscp(&mhdr_res,
7441 action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
7443 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
7444 if (flow_dv_convert_action_modify_ipv6_dscp(&mhdr_res,
7447 action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
7449 case RTE_FLOW_ACTION_TYPE_END:
7451 if (mhdr_res.actions_num) {
7452 /* create modify action if needed. */
7453 if (flow_dv_modify_hdr_resource_register
7454 (dev, &mhdr_res, dev_flow, error))
7456 dev_flow->dv.actions[modify_action_position] =
7457 dev_flow->dv.modify_hdr->verbs_action;
7463 if (mhdr_res.actions_num &&
7464 modify_action_position == UINT32_MAX)
7465 modify_action_position = actions_n++;
7467 dev_flow->dv.actions_n = actions_n;
7468 dev_flow->actions = action_flags;
7469 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
7470 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
7471 int item_type = items->type;
7473 switch (item_type) {
7474 case RTE_FLOW_ITEM_TYPE_PORT_ID:
7475 flow_dv_translate_item_port_id(dev, match_mask,
7476 match_value, items);
7477 last_item = MLX5_FLOW_ITEM_PORT_ID;
7479 case RTE_FLOW_ITEM_TYPE_ETH:
7480 flow_dv_translate_item_eth(match_mask, match_value,
7482 matcher.priority = MLX5_PRIORITY_MAP_L2;
7483 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
7484 MLX5_FLOW_LAYER_OUTER_L2;
7486 case RTE_FLOW_ITEM_TYPE_VLAN:
7487 flow_dv_translate_item_vlan(dev_flow,
7488 match_mask, match_value,
7490 matcher.priority = MLX5_PRIORITY_MAP_L2;
7491 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
7492 MLX5_FLOW_LAYER_INNER_VLAN) :
7493 (MLX5_FLOW_LAYER_OUTER_L2 |
7494 MLX5_FLOW_LAYER_OUTER_VLAN);
7496 case RTE_FLOW_ITEM_TYPE_IPV4:
7497 mlx5_flow_tunnel_ip_check(items, next_protocol,
7498 &item_flags, &tunnel);
7499 flow_dv_translate_item_ipv4(match_mask, match_value,
7502 matcher.priority = MLX5_PRIORITY_MAP_L3;
7503 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
7504 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
7505 if (items->mask != NULL &&
7506 ((const struct rte_flow_item_ipv4 *)
7507 items->mask)->hdr.next_proto_id) {
7509 ((const struct rte_flow_item_ipv4 *)
7510 (items->spec))->hdr.next_proto_id;
7512 ((const struct rte_flow_item_ipv4 *)
7513 (items->mask))->hdr.next_proto_id;
7515 /* Reset for inner layer. */
7516 next_protocol = 0xff;
7519 case RTE_FLOW_ITEM_TYPE_IPV6:
7520 mlx5_flow_tunnel_ip_check(items, next_protocol,
7521 &item_flags, &tunnel);
7522 flow_dv_translate_item_ipv6(match_mask, match_value,
7525 matcher.priority = MLX5_PRIORITY_MAP_L3;
7526 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
7527 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
7528 if (items->mask != NULL &&
7529 ((const struct rte_flow_item_ipv6 *)
7530 items->mask)->hdr.proto) {
7532 ((const struct rte_flow_item_ipv6 *)
7533 items->spec)->hdr.proto;
7535 ((const struct rte_flow_item_ipv6 *)
7536 items->mask)->hdr.proto;
7538 /* Reset for inner layer. */
7539 next_protocol = 0xff;
7542 case RTE_FLOW_ITEM_TYPE_TCP:
7543 flow_dv_translate_item_tcp(match_mask, match_value,
7545 matcher.priority = MLX5_PRIORITY_MAP_L4;
7546 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
7547 MLX5_FLOW_LAYER_OUTER_L4_TCP;
7549 case RTE_FLOW_ITEM_TYPE_UDP:
7550 flow_dv_translate_item_udp(match_mask, match_value,
7552 matcher.priority = MLX5_PRIORITY_MAP_L4;
7553 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
7554 MLX5_FLOW_LAYER_OUTER_L4_UDP;
7556 case RTE_FLOW_ITEM_TYPE_GRE:
7557 flow_dv_translate_item_gre(match_mask, match_value,
7559 last_item = MLX5_FLOW_LAYER_GRE;
7561 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
7562 flow_dv_translate_item_gre_key(match_mask,
7563 match_value, items);
7564 last_item = MLX5_FLOW_LAYER_GRE_KEY;
7566 case RTE_FLOW_ITEM_TYPE_NVGRE:
7567 flow_dv_translate_item_nvgre(match_mask, match_value,
7569 last_item = MLX5_FLOW_LAYER_GRE;
7571 case RTE_FLOW_ITEM_TYPE_VXLAN:
7572 flow_dv_translate_item_vxlan(match_mask, match_value,
7574 last_item = MLX5_FLOW_LAYER_VXLAN;
7576 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
7577 flow_dv_translate_item_vxlan(match_mask, match_value,
7579 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
7581 case RTE_FLOW_ITEM_TYPE_GENEVE:
7582 flow_dv_translate_item_geneve(match_mask, match_value,
7584 last_item = MLX5_FLOW_LAYER_GENEVE;
7586 case RTE_FLOW_ITEM_TYPE_MPLS:
7587 flow_dv_translate_item_mpls(match_mask, match_value,
7588 items, last_item, tunnel);
7589 last_item = MLX5_FLOW_LAYER_MPLS;
7591 case RTE_FLOW_ITEM_TYPE_MARK:
7592 flow_dv_translate_item_mark(dev, match_mask,
7593 match_value, items);
7594 last_item = MLX5_FLOW_ITEM_MARK;
7596 case RTE_FLOW_ITEM_TYPE_META:
7597 flow_dv_translate_item_meta(dev, match_mask,
7598 match_value, attr, items);
7599 last_item = MLX5_FLOW_ITEM_METADATA;
7601 case RTE_FLOW_ITEM_TYPE_ICMP:
7602 flow_dv_translate_item_icmp(match_mask, match_value,
7604 last_item = MLX5_FLOW_LAYER_ICMP;
7606 case RTE_FLOW_ITEM_TYPE_ICMP6:
7607 flow_dv_translate_item_icmp6(match_mask, match_value,
7609 last_item = MLX5_FLOW_LAYER_ICMP6;
7611 case RTE_FLOW_ITEM_TYPE_TAG:
7612 flow_dv_translate_item_tag(dev, match_mask,
7613 match_value, items);
7614 last_item = MLX5_FLOW_ITEM_TAG;
7616 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
7617 flow_dv_translate_mlx5_item_tag(dev, match_mask,
7618 match_value, items);
7619 last_item = MLX5_FLOW_ITEM_TAG;
7621 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
7622 flow_dv_translate_item_tx_queue(dev, match_mask,
7625 last_item = MLX5_FLOW_ITEM_TX_QUEUE;
7627 case RTE_FLOW_ITEM_TYPE_GTP:
7628 flow_dv_translate_item_gtp(match_mask, match_value,
7630 last_item = MLX5_FLOW_LAYER_GTP;
7635 item_flags |= last_item;
7638 * In case of ingress traffic when E-Switch mode is enabled,
7639 * we have two cases where we need to set the source port manually.
7640 * The first one, is in case of Nic steering rule, and the second is
7641 * E-Switch rule where no port_id item was found. In both cases
7642 * the source port is set according the current port in use.
7644 if ((attr->ingress && !(item_flags & MLX5_FLOW_ITEM_PORT_ID)) &&
7645 (priv->representor || priv->master)) {
7646 if (flow_dv_translate_item_port_id(dev, match_mask,
7650 assert(!flow_dv_check_valid_spec(matcher.mask.buf,
7651 dev_flow->dv.value.buf));
7652 dev_flow->layers = item_flags;
7653 if (action_flags & MLX5_FLOW_ACTION_RSS)
7654 flow_dv_hashfields_set(dev_flow);
7655 /* Register matcher. */
7656 matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
7658 matcher.priority = mlx5_flow_adjust_priority(dev, priority,
7660 /* reserved field no needs to be set to 0 here. */
7661 tbl_key.domain = attr->transfer;
7662 tbl_key.direction = attr->egress;
7663 tbl_key.table_id = dev_flow->group;
7664 if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
7670 * Apply the flow to the NIC, lock free,
7671 * (mutex should be acquired by caller).
7674 * Pointer to the Ethernet device structure.
7675 * @param[in, out] flow
7676 * Pointer to flow structure.
7678 * Pointer to error structure.
7681 * 0 on success, a negative errno value otherwise and rte_errno is set.
7684 __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
7685 struct rte_flow_error *error)
7687 struct mlx5_flow_dv *dv;
7688 struct mlx5_flow *dev_flow;
7689 struct mlx5_priv *priv = dev->data->dev_private;
7693 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
7696 if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) {
7697 if (dev_flow->transfer) {
7698 dv->actions[n++] = priv->sh->esw_drop_action;
7700 dv->hrxq = mlx5_hrxq_drop_new(dev);
7704 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7706 "cannot get drop hash queue");
7709 dv->actions[n++] = dv->hrxq->action;
7711 } else if (dev_flow->actions &
7712 (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
7713 struct mlx5_hrxq *hrxq;
7715 assert(flow->rss.queue);
7716 hrxq = mlx5_hrxq_get(dev, flow->rss.key,
7717 MLX5_RSS_HASH_KEY_LEN,
7718 dev_flow->hash_fields,
7720 flow->rss.queue_num);
7722 hrxq = mlx5_hrxq_new
7723 (dev, flow->rss.key,
7724 MLX5_RSS_HASH_KEY_LEN,
7725 dev_flow->hash_fields,
7727 flow->rss.queue_num,
7728 !!(dev_flow->layers &
7729 MLX5_FLOW_LAYER_TUNNEL));
7734 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7735 "cannot get hash queue");
7739 dv->actions[n++] = dv->hrxq->action;
7742 mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
7743 (void *)&dv->value, n,
7746 rte_flow_error_set(error, errno,
7747 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7749 "hardware refuses to create flow");
7752 if (priv->vmwa_context &&
7753 dev_flow->dv.vf_vlan.tag &&
7754 !dev_flow->dv.vf_vlan.created) {
7756 * The rule contains the VLAN pattern.
7757 * For VF we are going to create VLAN
7758 * interface to make hypervisor set correct
7759 * e-Switch vport context.
7761 mlx5_vlan_vmwa_acquire(dev, &dev_flow->dv.vf_vlan);
7766 err = rte_errno; /* Save rte_errno before cleanup. */
7767 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
7768 struct mlx5_flow_dv *dv = &dev_flow->dv;
7770 if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
7771 mlx5_hrxq_drop_release(dev);
7773 mlx5_hrxq_release(dev, dv->hrxq);
7776 if (dev_flow->dv.vf_vlan.tag &&
7777 dev_flow->dv.vf_vlan.created)
7778 mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
7780 rte_errno = err; /* Restore rte_errno. */
7785 * Release the flow matcher.
7788 * Pointer to Ethernet device.
7790 * Pointer to mlx5_flow.
7793 * 1 while a reference on it exists, 0 when freed.
7796 flow_dv_matcher_release(struct rte_eth_dev *dev,
7797 struct mlx5_flow *flow)
7799 struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
7801 assert(matcher->matcher_object);
7802 DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
7803 dev->data->port_id, (void *)matcher,
7804 rte_atomic32_read(&matcher->refcnt));
7805 if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
7806 claim_zero(mlx5_glue->dv_destroy_flow_matcher
7807 (matcher->matcher_object));
7808 LIST_REMOVE(matcher, next);
7809 /* table ref-- in release interface. */
7810 flow_dv_tbl_resource_release(dev, matcher->tbl);
7812 DRV_LOG(DEBUG, "port %u matcher %p: removed",
7813 dev->data->port_id, (void *)matcher);
7820 * Release an encap/decap resource.
7823 * Pointer to mlx5_flow.
7826 * 1 while a reference on it exists, 0 when freed.
7829 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
7831 struct mlx5_flow_dv_encap_decap_resource *cache_resource =
7832 flow->dv.encap_decap;
7834 assert(cache_resource->verbs_action);
7835 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
7836 (void *)cache_resource,
7837 rte_atomic32_read(&cache_resource->refcnt));
7838 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
7839 claim_zero(mlx5_glue->destroy_flow_action
7840 (cache_resource->verbs_action));
7841 LIST_REMOVE(cache_resource, next);
7842 rte_free(cache_resource);
7843 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
7844 (void *)cache_resource);
7851 * Release an jump to table action resource.
7854 * Pointer to Ethernet device.
7856 * Pointer to mlx5_flow.
7859 * 1 while a reference on it exists, 0 when freed.
7862 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
7863 struct mlx5_flow *flow)
7865 struct mlx5_flow_dv_jump_tbl_resource *cache_resource = flow->dv.jump;
7866 struct mlx5_flow_tbl_data_entry *tbl_data =
7867 container_of(cache_resource,
7868 struct mlx5_flow_tbl_data_entry, jump);
7870 assert(cache_resource->action);
7871 DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
7872 (void *)cache_resource,
7873 rte_atomic32_read(&cache_resource->refcnt));
7874 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
7875 claim_zero(mlx5_glue->destroy_flow_action
7876 (cache_resource->action));
7877 /* jump action memory free is inside the table release. */
7878 flow_dv_tbl_resource_release(dev, &tbl_data->tbl);
7879 DRV_LOG(DEBUG, "jump table resource %p: removed",
7880 (void *)cache_resource);
7887 * Release a modify-header resource.
7890 * Pointer to mlx5_flow.
7893 * 1 while a reference on it exists, 0 when freed.
7896 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
7898 struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
7899 flow->dv.modify_hdr;
7901 assert(cache_resource->verbs_action);
7902 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
7903 (void *)cache_resource,
7904 rte_atomic32_read(&cache_resource->refcnt));
7905 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
7906 claim_zero(mlx5_glue->destroy_flow_action
7907 (cache_resource->verbs_action));
7908 LIST_REMOVE(cache_resource, next);
7909 rte_free(cache_resource);
7910 DRV_LOG(DEBUG, "modify-header resource %p: removed",
7911 (void *)cache_resource);
7918 * Release port ID action resource.
7921 * Pointer to mlx5_flow.
7924 * 1 while a reference on it exists, 0 when freed.
7927 flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
7929 struct mlx5_flow_dv_port_id_action_resource *cache_resource =
7930 flow->dv.port_id_action;
7932 assert(cache_resource->action);
7933 DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
7934 (void *)cache_resource,
7935 rte_atomic32_read(&cache_resource->refcnt));
7936 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
7937 claim_zero(mlx5_glue->destroy_flow_action
7938 (cache_resource->action));
7939 LIST_REMOVE(cache_resource, next);
7940 rte_free(cache_resource);
7941 DRV_LOG(DEBUG, "port id action resource %p: removed",
7942 (void *)cache_resource);
7949 * Release push vlan action resource.
7952 * Pointer to mlx5_flow.
7955 * 1 while a reference on it exists, 0 when freed.
7958 flow_dv_push_vlan_action_resource_release(struct mlx5_flow *flow)
7960 struct mlx5_flow_dv_push_vlan_action_resource *cache_resource =
7961 flow->dv.push_vlan_res;
7963 assert(cache_resource->action);
7964 DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
7965 (void *)cache_resource,
7966 rte_atomic32_read(&cache_resource->refcnt));
7967 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
7968 claim_zero(mlx5_glue->destroy_flow_action
7969 (cache_resource->action));
7970 LIST_REMOVE(cache_resource, next);
7971 rte_free(cache_resource);
7972 DRV_LOG(DEBUG, "push vlan action resource %p: removed",
7973 (void *)cache_resource);
7980 * Remove the flow from the NIC but keeps it in memory.
7981 * Lock free, (mutex should be acquired by caller).
7984 * Pointer to Ethernet device.
7985 * @param[in, out] flow
7986 * Pointer to flow structure.
7989 __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
7991 struct mlx5_flow_dv *dv;
7992 struct mlx5_flow *dev_flow;
7996 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
7999 claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
8003 if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
8004 mlx5_hrxq_drop_release(dev);
8006 mlx5_hrxq_release(dev, dv->hrxq);
8009 if (dev_flow->dv.vf_vlan.tag &&
8010 dev_flow->dv.vf_vlan.created)
8011 mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
8016 * Remove the flow from the NIC and the memory.
8017 * Lock free, (mutex should be acquired by caller).
8020 * Pointer to the Ethernet device structure.
8021 * @param[in, out] flow
8022 * Pointer to flow structure.
8025 __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
8027 struct mlx5_flow *dev_flow;
8031 __flow_dv_remove(dev, flow);
8032 if (flow->counter) {
8033 flow_dv_counter_release(dev, flow->counter);
8034 flow->counter = NULL;
8037 mlx5_flow_meter_detach(flow->meter);
8040 while (!LIST_EMPTY(&flow->dev_flows)) {
8041 dev_flow = LIST_FIRST(&flow->dev_flows);
8042 LIST_REMOVE(dev_flow, next);
8043 if (dev_flow->dv.matcher)
8044 flow_dv_matcher_release(dev, dev_flow);
8045 if (dev_flow->dv.encap_decap)
8046 flow_dv_encap_decap_resource_release(dev_flow);
8047 if (dev_flow->dv.modify_hdr)
8048 flow_dv_modify_hdr_resource_release(dev_flow);
8049 if (dev_flow->dv.jump)
8050 flow_dv_jump_tbl_resource_release(dev, dev_flow);
8051 if (dev_flow->dv.port_id_action)
8052 flow_dv_port_id_action_resource_release(dev_flow);
8053 if (dev_flow->dv.push_vlan_res)
8054 flow_dv_push_vlan_action_resource_release(dev_flow);
8055 if (dev_flow->dv.tag_resource)
8056 flow_dv_tag_release(dev, dev_flow->dv.tag_resource);
8062 * Query a dv flow rule for its statistics via devx.
8065 * Pointer to Ethernet device.
8067 * Pointer to the sub flow.
8069 * data retrieved by the query.
8071 * Perform verbose error reporting if not NULL.
8074 * 0 on success, a negative errno value otherwise and rte_errno is set.
8077 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
8078 void *data, struct rte_flow_error *error)
8080 struct mlx5_priv *priv = dev->data->dev_private;
8081 struct rte_flow_query_count *qc = data;
8083 if (!priv->config.devx)
8084 return rte_flow_error_set(error, ENOTSUP,
8085 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8087 "counters are not supported");
8088 if (flow->counter) {
8089 uint64_t pkts, bytes;
8090 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
8094 return rte_flow_error_set(error, -err,
8095 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8096 NULL, "cannot read counters");
8099 qc->hits = pkts - flow->counter->hits;
8100 qc->bytes = bytes - flow->counter->bytes;
8102 flow->counter->hits = pkts;
8103 flow->counter->bytes = bytes;
8107 return rte_flow_error_set(error, EINVAL,
8108 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8110 "counters are not available");
8116 * @see rte_flow_query()
8120 flow_dv_query(struct rte_eth_dev *dev,
8121 struct rte_flow *flow __rte_unused,
8122 const struct rte_flow_action *actions __rte_unused,
8123 void *data __rte_unused,
8124 struct rte_flow_error *error __rte_unused)
8128 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
8129 switch (actions->type) {
8130 case RTE_FLOW_ACTION_TYPE_VOID:
8132 case RTE_FLOW_ACTION_TYPE_COUNT:
8133 ret = flow_dv_query_count(dev, flow, data, error);
8136 return rte_flow_error_set(error, ENOTSUP,
8137 RTE_FLOW_ERROR_TYPE_ACTION,
8139 "action not supported");
8146 * Destroy the meter table set.
8147 * Lock free, (mutex should be acquired by caller).
8150 * Pointer to Ethernet device.
8152 * Pointer to the meter table set.
8158 flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
8159 struct mlx5_meter_domains_infos *tbl)
8161 struct mlx5_priv *priv = dev->data->dev_private;
8162 struct mlx5_meter_domains_infos *mtd =
8163 (struct mlx5_meter_domains_infos *)tbl;
8165 if (!mtd || !priv->config.dv_flow_en)
8167 if (mtd->ingress.policer_rules[RTE_MTR_DROPPED])
8168 claim_zero(mlx5_glue->dv_destroy_flow
8169 (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
8170 if (mtd->egress.policer_rules[RTE_MTR_DROPPED])
8171 claim_zero(mlx5_glue->dv_destroy_flow
8172 (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
8173 if (mtd->transfer.policer_rules[RTE_MTR_DROPPED])
8174 claim_zero(mlx5_glue->dv_destroy_flow
8175 (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
8176 if (mtd->egress.color_matcher)
8177 claim_zero(mlx5_glue->dv_destroy_flow_matcher
8178 (mtd->egress.color_matcher));
8179 if (mtd->egress.any_matcher)
8180 claim_zero(mlx5_glue->dv_destroy_flow_matcher
8181 (mtd->egress.any_matcher));
8182 if (mtd->egress.tbl)
8183 claim_zero(flow_dv_tbl_resource_release(dev,
8185 if (mtd->ingress.color_matcher)
8186 claim_zero(mlx5_glue->dv_destroy_flow_matcher
8187 (mtd->ingress.color_matcher));
8188 if (mtd->ingress.any_matcher)
8189 claim_zero(mlx5_glue->dv_destroy_flow_matcher
8190 (mtd->ingress.any_matcher));
8191 if (mtd->ingress.tbl)
8192 claim_zero(flow_dv_tbl_resource_release(dev,
8194 if (mtd->transfer.color_matcher)
8195 claim_zero(mlx5_glue->dv_destroy_flow_matcher
8196 (mtd->transfer.color_matcher));
8197 if (mtd->transfer.any_matcher)
8198 claim_zero(mlx5_glue->dv_destroy_flow_matcher
8199 (mtd->transfer.any_matcher));
8200 if (mtd->transfer.tbl)
8201 claim_zero(flow_dv_tbl_resource_release(dev,
8202 mtd->transfer.tbl));
8204 claim_zero(mlx5_glue->destroy_flow_action(mtd->drop_actn));
8209 /* Number of meter flow actions, count and jump or count and drop. */
8210 #define METER_ACTIONS 2
8213 * Create specify domain meter table and suffix table.
8216 * Pointer to Ethernet device.
8217 * @param[in,out] mtb
8218 * Pointer to DV meter table set.
8221 * @param[in] transfer
8223 * @param[in] color_reg_c_idx
8224 * Reg C index for color match.
8227 * 0 on success, -1 otherwise and rte_errno is set.
8230 flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev,
8231 struct mlx5_meter_domains_infos *mtb,
8232 uint8_t egress, uint8_t transfer,
8233 uint32_t color_reg_c_idx)
8235 struct mlx5_priv *priv = dev->data->dev_private;
8236 struct mlx5_ibv_shared *sh = priv->sh;
8237 struct mlx5_flow_dv_match_params mask = {
8238 .size = sizeof(mask.buf),
8240 struct mlx5_flow_dv_match_params value = {
8241 .size = sizeof(value.buf),
8243 struct mlx5dv_flow_matcher_attr dv_attr = {
8244 .type = IBV_FLOW_ATTR_NORMAL,
8246 .match_criteria_enable = 0,
8247 .match_mask = (void *)&mask,
8249 void *actions[METER_ACTIONS];
8250 struct mlx5_flow_tbl_resource **sfx_tbl;
8251 struct mlx5_meter_domain_info *dtb;
8252 struct rte_flow_error error;
8256 sfx_tbl = &sh->fdb_mtr_sfx_tbl;
8257 dtb = &mtb->transfer;
8258 } else if (egress) {
8259 sfx_tbl = &sh->tx_mtr_sfx_tbl;
8262 sfx_tbl = &sh->rx_mtr_sfx_tbl;
8263 dtb = &mtb->ingress;
8265 /* If the suffix table in missing, create it. */
8267 *sfx_tbl = flow_dv_tbl_resource_get(dev,
8268 MLX5_FLOW_TABLE_LEVEL_SUFFIX,
8269 egress, transfer, &error);
8271 DRV_LOG(ERR, "Failed to create meter suffix table.");
8275 /* Create the meter table with METER level. */
8276 dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER,
8277 egress, transfer, &error);
8279 DRV_LOG(ERR, "Failed to create meter policer table.");
8282 /* Create matchers, Any and Color. */
8283 dv_attr.priority = 3;
8284 dv_attr.match_criteria_enable = 0;
8285 dtb->any_matcher = mlx5_glue->dv_create_flow_matcher(sh->ctx,
8288 if (!dtb->any_matcher) {
8289 DRV_LOG(ERR, "Failed to create meter"
8290 " policer default matcher.");
8293 dv_attr.priority = 0;
8294 dv_attr.match_criteria_enable =
8295 1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
8296 flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx,
8297 rte_col_2_mlx5_col(RTE_COLORS), UINT32_MAX);
8298 dtb->color_matcher = mlx5_glue->dv_create_flow_matcher(sh->ctx,
8301 if (!dtb->color_matcher) {
8302 DRV_LOG(ERR, "Failed to create meter policer color matcher.");
8305 if (mtb->count_actns[RTE_MTR_DROPPED])
8306 actions[i++] = mtb->count_actns[RTE_MTR_DROPPED];
8307 actions[i++] = mtb->drop_actn;
8308 /* Default rule: lowest priority, match any, actions: drop. */
8309 dtb->policer_rules[RTE_MTR_DROPPED] =
8310 mlx5_glue->dv_create_flow(dtb->any_matcher,
8311 (void *)&value, i, actions);
8312 if (!dtb->policer_rules[RTE_MTR_DROPPED]) {
8313 DRV_LOG(ERR, "Failed to create meter policer drop rule.");
8322 * Create the needed meter and suffix tables.
8323 * Lock free, (mutex should be acquired by caller).
8326 * Pointer to Ethernet device.
8328 * Pointer to the flow meter.
8331 * Pointer to table set on success, NULL otherwise and rte_errno is set.
8333 static struct mlx5_meter_domains_infos *
8334 flow_dv_create_mtr_tbl(struct rte_eth_dev *dev,
8335 const struct mlx5_flow_meter *fm)
8337 struct mlx5_priv *priv = dev->data->dev_private;
8338 struct mlx5_meter_domains_infos *mtb;
8342 if (!priv->mtr_en) {
8343 rte_errno = ENOTSUP;
8346 mtb = rte_calloc(__func__, 1, sizeof(*mtb), 0);
8348 DRV_LOG(ERR, "Failed to allocate memory for meter.");
8351 /* Create meter count actions */
8352 for (i = 0; i <= RTE_MTR_DROPPED; i++) {
8353 if (!fm->policer_stats.cnt[i])
8355 mtb->count_actns[i] = fm->policer_stats.cnt[i]->action;
8357 /* Create drop action. */
8358 mtb->drop_actn = mlx5_glue->dr_create_flow_action_drop();
8359 if (!mtb->drop_actn) {
8360 DRV_LOG(ERR, "Failed to create drop action.");
8363 /* Egress meter table. */
8364 ret = flow_dv_prepare_mtr_tables(dev, mtb, 1, 0, priv->mtr_color_reg);
8366 DRV_LOG(ERR, "Failed to prepare egress meter table.");
8369 /* Ingress meter table. */
8370 ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 0, priv->mtr_color_reg);
8372 DRV_LOG(ERR, "Failed to prepare ingress meter table.");
8375 /* FDB meter table. */
8376 if (priv->config.dv_esw_en) {
8377 ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 1,
8378 priv->mtr_color_reg);
8380 DRV_LOG(ERR, "Failed to prepare fdb meter table.");
8386 flow_dv_destroy_mtr_tbl(dev, mtb);
8391 * Destroy domain policer rule.
8394 * Pointer to domain table.
8397 flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt)
8401 for (i = 0; i < RTE_MTR_DROPPED; i++) {
8402 if (dt->policer_rules[i]) {
8403 claim_zero(mlx5_glue->dv_destroy_flow
8404 (dt->policer_rules[i]));
8405 dt->policer_rules[i] = NULL;
8408 if (dt->jump_actn) {
8409 claim_zero(mlx5_glue->destroy_flow_action(dt->jump_actn));
8410 dt->jump_actn = NULL;
8415 * Destroy policer rules.
8418 * Pointer to Ethernet device.
8420 * Pointer to flow meter structure.
8422 * Pointer to flow attributes.
8428 flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused,
8429 const struct mlx5_flow_meter *fm,
8430 const struct rte_flow_attr *attr)
8432 struct mlx5_meter_domains_infos *mtb = fm ? fm->mfts : NULL;
8437 flow_dv_destroy_domain_policer_rule(&mtb->egress);
8439 flow_dv_destroy_domain_policer_rule(&mtb->ingress);
8441 flow_dv_destroy_domain_policer_rule(&mtb->transfer);
8446 * Create specify domain meter policer rule.
8449 * Pointer to flow meter structure.
8451 * Pointer to DV meter table set.
8453 * Pointer to suffix table.
8454 * @param[in] mtr_reg_c
8455 * Color match REG_C.
8458 * 0 on success, -1 otherwise.
8461 flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm,
8462 struct mlx5_meter_domain_info *dtb,
8463 struct mlx5_flow_tbl_resource *sfx_tb,
8466 struct mlx5_flow_dv_match_params matcher = {
8467 .size = sizeof(matcher.buf),
8469 struct mlx5_flow_dv_match_params value = {
8470 .size = sizeof(value.buf),
8472 struct mlx5_meter_domains_infos *mtb = fm->mfts;
8473 void *actions[METER_ACTIONS];
8476 /* Create jump action. */
8479 if (!dtb->jump_actn)
8481 mlx5_glue->dr_create_flow_action_dest_flow_tbl
8483 if (!dtb->jump_actn) {
8484 DRV_LOG(ERR, "Failed to create policer jump action.");
8487 for (i = 0; i < RTE_MTR_DROPPED; i++) {
8490 flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c,
8491 rte_col_2_mlx5_col(i), UINT32_MAX);
8492 if (mtb->count_actns[i])
8493 actions[j++] = mtb->count_actns[i];
8494 if (fm->params.action[i] == MTR_POLICER_ACTION_DROP)
8495 actions[j++] = mtb->drop_actn;
8497 actions[j++] = dtb->jump_actn;
8498 dtb->policer_rules[i] =
8499 mlx5_glue->dv_create_flow(dtb->color_matcher,
8502 if (!dtb->policer_rules[i]) {
8503 DRV_LOG(ERR, "Failed to create policer rule.");
8514 * Create policer rules.
8517 * Pointer to Ethernet device.
8519 * Pointer to flow meter structure.
8521 * Pointer to flow attributes.
8524 * 0 on success, -1 otherwise.
8527 flow_dv_create_policer_rules(struct rte_eth_dev *dev,
8528 struct mlx5_flow_meter *fm,
8529 const struct rte_flow_attr *attr)
8531 struct mlx5_priv *priv = dev->data->dev_private;
8532 struct mlx5_meter_domains_infos *mtb = fm->mfts;
8536 ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress,
8537 priv->sh->tx_mtr_sfx_tbl,
8538 priv->mtr_color_reg);
8540 DRV_LOG(ERR, "Failed to create egress policer.");
8544 if (attr->ingress) {
8545 ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress,
8546 priv->sh->rx_mtr_sfx_tbl,
8547 priv->mtr_color_reg);
8549 DRV_LOG(ERR, "Failed to create ingress policer.");
8553 if (attr->transfer) {
8554 ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer,
8555 priv->sh->fdb_mtr_sfx_tbl,
8556 priv->mtr_color_reg);
8558 DRV_LOG(ERR, "Failed to create transfer policer.");
8564 flow_dv_destroy_policer_rules(dev, fm, attr);
8569 * Query a devx counter.
8572 * Pointer to the Ethernet device structure.
8574 * Pointer to the flow counter.
8576 * Set to clear the counter statistics.
8578 * The statistics value of packets.
8580 * The statistics value of bytes.
8583 * 0 on success, otherwise return -1.
8586 flow_dv_counter_query(struct rte_eth_dev *dev,
8587 struct mlx5_flow_counter *cnt, bool clear,
8588 uint64_t *pkts, uint64_t *bytes)
8590 struct mlx5_priv *priv = dev->data->dev_private;
8591 uint64_t inn_pkts, inn_bytes;
8594 if (!priv->config.devx)
8596 ret = _flow_dv_query_count(dev, cnt, &inn_pkts, &inn_bytes);
8599 *pkts = inn_pkts - cnt->hits;
8600 *bytes = inn_bytes - cnt->bytes;
8602 cnt->hits = inn_pkts;
8603 cnt->bytes = inn_bytes;
8609 * Mutex-protected thunk to lock-free __flow_dv_translate().
8612 flow_dv_translate(struct rte_eth_dev *dev,
8613 struct mlx5_flow *dev_flow,
8614 const struct rte_flow_attr *attr,
8615 const struct rte_flow_item items[],
8616 const struct rte_flow_action actions[],
8617 struct rte_flow_error *error)
8621 flow_dv_shared_lock(dev);
8622 ret = __flow_dv_translate(dev, dev_flow, attr, items, actions, error);
8623 flow_dv_shared_unlock(dev);
8628 * Mutex-protected thunk to lock-free __flow_dv_apply().
8631 flow_dv_apply(struct rte_eth_dev *dev,
8632 struct rte_flow *flow,
8633 struct rte_flow_error *error)
8637 flow_dv_shared_lock(dev);
8638 ret = __flow_dv_apply(dev, flow, error);
8639 flow_dv_shared_unlock(dev);
8644 * Mutex-protected thunk to lock-free __flow_dv_remove().
8647 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
8649 flow_dv_shared_lock(dev);
8650 __flow_dv_remove(dev, flow);
8651 flow_dv_shared_unlock(dev);
8655 * Mutex-protected thunk to lock-free __flow_dv_destroy().
8658 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
8660 flow_dv_shared_lock(dev);
8661 __flow_dv_destroy(dev, flow);
8662 flow_dv_shared_unlock(dev);
8666 * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
8668 static struct mlx5_flow_counter *
8669 flow_dv_counter_allocate(struct rte_eth_dev *dev)
8671 struct mlx5_flow_counter *cnt;
8673 flow_dv_shared_lock(dev);
8674 cnt = flow_dv_counter_alloc(dev, 0, 0, 1);
8675 flow_dv_shared_unlock(dev);
8680 * Mutex-protected thunk to lock-free flow_dv_counter_release().
8683 flow_dv_counter_free(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt)
8685 flow_dv_shared_lock(dev);
8686 flow_dv_counter_release(dev, cnt);
8687 flow_dv_shared_unlock(dev);
8690 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
8691 .validate = flow_dv_validate,
8692 .prepare = flow_dv_prepare,
8693 .translate = flow_dv_translate,
8694 .apply = flow_dv_apply,
8695 .remove = flow_dv_remove,
8696 .destroy = flow_dv_destroy,
8697 .query = flow_dv_query,
8698 .create_mtr_tbls = flow_dv_create_mtr_tbl,
8699 .destroy_mtr_tbls = flow_dv_destroy_mtr_tbl,
8700 .create_policer_rules = flow_dv_create_policer_rules,
8701 .destroy_policer_rules = flow_dv_destroy_policer_rules,
8702 .counter_alloc = flow_dv_counter_allocate,
8703 .counter_free = flow_dv_counter_free,
8704 .counter_query = flow_dv_counter_query,
8707 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */