1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <rte_ethdev_driver.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
20 #include <rte_vxlan.h>
22 #include <rte_eal_paging.h>
25 #include <mlx5_glue.h>
26 #include <mlx5_devx_cmds.h>
28 #include <mlx5_malloc.h>
30 #include "mlx5_defs.h"
32 #include "mlx5_common_os.h"
33 #include "mlx5_flow.h"
34 #include "mlx5_flow_os.h"
35 #include "mlx5_rxtx.h"
36 #include "rte_pmd_mlx5.h"
38 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
40 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
41 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
44 #ifndef HAVE_MLX5DV_DR_ESWITCH
45 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
46 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
50 #ifndef HAVE_MLX5DV_DR
51 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
54 /* VLAN header definitions */
55 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
56 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
57 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
58 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
59 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
74 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
75 struct mlx5_flow_tbl_resource *tbl);
78 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
79 uint32_t encap_decap_idx);
82 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
86 * Initialize flow attributes structure according to flow items' types.
88 * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
89 * mode. For tunnel mode, the items to be modified are the outermost ones.
92 * Pointer to item specification.
94 * Pointer to flow attributes structure.
96 * Pointer to the sub flow.
97 * @param[in] tunnel_decap
98 * Whether action is after tunnel decapsulation.
101 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
102 struct mlx5_flow *dev_flow, bool tunnel_decap)
104 uint64_t layers = dev_flow->handle->layers;
107 * If layers is already initialized, it means this dev_flow is the
108 * suffix flow, the layers flags is set by the prefix flow. Need to
109 * use the layer flags from prefix flow as the suffix flow may not
110 * have the user defined items as the flow is split.
113 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
115 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
117 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
119 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
124 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
125 uint8_t next_protocol = 0xff;
126 switch (item->type) {
127 case RTE_FLOW_ITEM_TYPE_GRE:
128 case RTE_FLOW_ITEM_TYPE_NVGRE:
129 case RTE_FLOW_ITEM_TYPE_VXLAN:
130 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
131 case RTE_FLOW_ITEM_TYPE_GENEVE:
132 case RTE_FLOW_ITEM_TYPE_MPLS:
136 case RTE_FLOW_ITEM_TYPE_IPV4:
139 if (item->mask != NULL &&
140 ((const struct rte_flow_item_ipv4 *)
141 item->mask)->hdr.next_proto_id)
143 ((const struct rte_flow_item_ipv4 *)
144 (item->spec))->hdr.next_proto_id &
145 ((const struct rte_flow_item_ipv4 *)
146 (item->mask))->hdr.next_proto_id;
147 if ((next_protocol == IPPROTO_IPIP ||
148 next_protocol == IPPROTO_IPV6) && tunnel_decap)
151 case RTE_FLOW_ITEM_TYPE_IPV6:
154 if (item->mask != NULL &&
155 ((const struct rte_flow_item_ipv6 *)
156 item->mask)->hdr.proto)
158 ((const struct rte_flow_item_ipv6 *)
159 (item->spec))->hdr.proto &
160 ((const struct rte_flow_item_ipv6 *)
161 (item->mask))->hdr.proto;
162 if ((next_protocol == IPPROTO_IPIP ||
163 next_protocol == IPPROTO_IPV6) && tunnel_decap)
166 case RTE_FLOW_ITEM_TYPE_UDP:
170 case RTE_FLOW_ITEM_TYPE_TCP:
182 * Convert rte_mtr_color to mlx5 color.
191 rte_col_2_mlx5_col(enum rte_color rcol)
194 case RTE_COLOR_GREEN:
195 return MLX5_FLOW_COLOR_GREEN;
196 case RTE_COLOR_YELLOW:
197 return MLX5_FLOW_COLOR_YELLOW;
199 return MLX5_FLOW_COLOR_RED;
203 return MLX5_FLOW_COLOR_UNDEFINED;
206 struct field_modify_info {
207 uint32_t size; /* Size of field in protocol header, in bytes. */
208 uint32_t offset; /* Offset of field in protocol header, in bytes. */
209 enum mlx5_modification_field id;
212 struct field_modify_info modify_eth[] = {
213 {4, 0, MLX5_MODI_OUT_DMAC_47_16},
214 {2, 4, MLX5_MODI_OUT_DMAC_15_0},
215 {4, 6, MLX5_MODI_OUT_SMAC_47_16},
216 {2, 10, MLX5_MODI_OUT_SMAC_15_0},
220 struct field_modify_info modify_vlan_out_first_vid[] = {
221 /* Size in bits !!! */
222 {12, 0, MLX5_MODI_OUT_FIRST_VID},
226 struct field_modify_info modify_ipv4[] = {
227 {1, 1, MLX5_MODI_OUT_IP_DSCP},
228 {1, 8, MLX5_MODI_OUT_IPV4_TTL},
229 {4, 12, MLX5_MODI_OUT_SIPV4},
230 {4, 16, MLX5_MODI_OUT_DIPV4},
234 struct field_modify_info modify_ipv6[] = {
235 {1, 0, MLX5_MODI_OUT_IP_DSCP},
236 {1, 7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
237 {4, 8, MLX5_MODI_OUT_SIPV6_127_96},
238 {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
239 {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
240 {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
241 {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
242 {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
243 {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
244 {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
248 struct field_modify_info modify_udp[] = {
249 {2, 0, MLX5_MODI_OUT_UDP_SPORT},
250 {2, 2, MLX5_MODI_OUT_UDP_DPORT},
254 struct field_modify_info modify_tcp[] = {
255 {2, 0, MLX5_MODI_OUT_TCP_SPORT},
256 {2, 2, MLX5_MODI_OUT_TCP_DPORT},
257 {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
258 {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
263 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
264 uint8_t next_protocol, uint64_t *item_flags,
267 MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
268 item->type == RTE_FLOW_ITEM_TYPE_IPV6);
269 if (next_protocol == IPPROTO_IPIP) {
270 *item_flags |= MLX5_FLOW_LAYER_IPIP;
273 if (next_protocol == IPPROTO_IPV6) {
274 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
280 * Acquire the synchronizing object to protect multithreaded access
281 * to shared dv context. Lock occurs only if context is actually
282 * shared, i.e. we have multiport IB device and representors are
286 * Pointer to the rte_eth_dev structure.
289 flow_dv_shared_lock(struct rte_eth_dev *dev)
291 struct mlx5_priv *priv = dev->data->dev_private;
292 struct mlx5_dev_ctx_shared *sh = priv->sh;
294 if (sh->refcnt > 1) {
297 ret = pthread_mutex_lock(&sh->dv_mutex);
304 flow_dv_shared_unlock(struct rte_eth_dev *dev)
306 struct mlx5_priv *priv = dev->data->dev_private;
307 struct mlx5_dev_ctx_shared *sh = priv->sh;
309 if (sh->refcnt > 1) {
312 ret = pthread_mutex_unlock(&sh->dv_mutex);
318 /* Update VLAN's VID/PCP based on input rte_flow_action.
321 * Pointer to struct rte_flow_action.
323 * Pointer to struct rte_vlan_hdr.
326 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
327 struct rte_vlan_hdr *vlan)
330 if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
332 ((const struct rte_flow_action_of_set_vlan_pcp *)
333 action->conf)->vlan_pcp;
334 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
335 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
336 vlan->vlan_tci |= vlan_tci;
337 } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
338 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
339 vlan->vlan_tci |= rte_be_to_cpu_16
340 (((const struct rte_flow_action_of_set_vlan_vid *)
341 action->conf)->vlan_vid);
346 * Fetch 1, 2, 3 or 4 byte field from the byte array
347 * and return as unsigned integer in host-endian format.
350 * Pointer to data array.
352 * Size of field to extract.
355 * converted field in host endian format.
357 static inline uint32_t
358 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
367 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
370 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
371 ret = (ret << 8) | *(data + sizeof(uint16_t));
374 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
385 * Convert modify-header action to DV specification.
387 * Data length of each action is determined by provided field description
388 * and the item mask. Data bit offset and width of each action is determined
389 * by provided item mask.
392 * Pointer to item specification.
394 * Pointer to field modification information.
395 * For MLX5_MODIFICATION_TYPE_SET specifies destination field.
396 * For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
397 * For MLX5_MODIFICATION_TYPE_COPY specifies source field.
399 * Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
400 * Negative offset value sets the same offset as source offset.
401 * size field is ignored, value is taken from source field.
402 * @param[in,out] resource
403 * Pointer to the modify-header resource.
405 * Type of modification.
407 * Pointer to the error structure.
410 * 0 on success, a negative errno value otherwise and rte_errno is set.
413 flow_dv_convert_modify_action(struct rte_flow_item *item,
414 struct field_modify_info *field,
415 struct field_modify_info *dcopy,
416 struct mlx5_flow_dv_modify_hdr_resource *resource,
417 uint32_t type, struct rte_flow_error *error)
419 uint32_t i = resource->actions_num;
420 struct mlx5_modification_cmd *actions = resource->actions;
423 * The item and mask are provided in big-endian format.
424 * The fields should be presented as in big-endian format either.
425 * Mask must be always present, it defines the actual field width.
427 MLX5_ASSERT(item->mask);
428 MLX5_ASSERT(field->size);
435 if (i >= MLX5_MAX_MODIFY_NUM)
436 return rte_flow_error_set(error, EINVAL,
437 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
438 "too many items to modify");
439 /* Fetch variable byte size mask from the array. */
440 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
441 field->offset, field->size);
446 /* Deduce actual data width in bits from mask value. */
447 off_b = rte_bsf32(mask);
448 size_b = sizeof(uint32_t) * CHAR_BIT -
449 off_b - __builtin_clz(mask);
451 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
452 actions[i] = (struct mlx5_modification_cmd) {
458 /* Convert entire record to expected big-endian format. */
459 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
460 if (type == MLX5_MODIFICATION_TYPE_COPY) {
462 actions[i].dst_field = dcopy->id;
463 actions[i].dst_offset =
464 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
465 /* Convert entire record to big-endian format. */
466 actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
468 MLX5_ASSERT(item->spec);
469 data = flow_dv_fetch_field((const uint8_t *)item->spec +
470 field->offset, field->size);
471 /* Shift out the trailing masked bits from data. */
472 data = (data & mask) >> off_b;
473 actions[i].data1 = rte_cpu_to_be_32(data);
477 } while (field->size);
478 if (resource->actions_num == i)
479 return rte_flow_error_set(error, EINVAL,
480 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
481 "invalid modification flow item");
482 resource->actions_num = i;
487 * Convert modify-header set IPv4 address action to DV specification.
489 * @param[in,out] resource
490 * Pointer to the modify-header resource.
492 * Pointer to action specification.
494 * Pointer to the error structure.
497 * 0 on success, a negative errno value otherwise and rte_errno is set.
500 flow_dv_convert_action_modify_ipv4
501 (struct mlx5_flow_dv_modify_hdr_resource *resource,
502 const struct rte_flow_action *action,
503 struct rte_flow_error *error)
505 const struct rte_flow_action_set_ipv4 *conf =
506 (const struct rte_flow_action_set_ipv4 *)(action->conf);
507 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
508 struct rte_flow_item_ipv4 ipv4;
509 struct rte_flow_item_ipv4 ipv4_mask;
511 memset(&ipv4, 0, sizeof(ipv4));
512 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
513 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
514 ipv4.hdr.src_addr = conf->ipv4_addr;
515 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
517 ipv4.hdr.dst_addr = conf->ipv4_addr;
518 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
521 item.mask = &ipv4_mask;
522 return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
523 MLX5_MODIFICATION_TYPE_SET, error);
527 * Convert modify-header set IPv6 address action to DV specification.
529 * @param[in,out] resource
530 * Pointer to the modify-header resource.
532 * Pointer to action specification.
534 * Pointer to the error structure.
537 * 0 on success, a negative errno value otherwise and rte_errno is set.
540 flow_dv_convert_action_modify_ipv6
541 (struct mlx5_flow_dv_modify_hdr_resource *resource,
542 const struct rte_flow_action *action,
543 struct rte_flow_error *error)
545 const struct rte_flow_action_set_ipv6 *conf =
546 (const struct rte_flow_action_set_ipv6 *)(action->conf);
547 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
548 struct rte_flow_item_ipv6 ipv6;
549 struct rte_flow_item_ipv6 ipv6_mask;
551 memset(&ipv6, 0, sizeof(ipv6));
552 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
553 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
554 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
555 sizeof(ipv6.hdr.src_addr));
556 memcpy(&ipv6_mask.hdr.src_addr,
557 &rte_flow_item_ipv6_mask.hdr.src_addr,
558 sizeof(ipv6.hdr.src_addr));
560 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
561 sizeof(ipv6.hdr.dst_addr));
562 memcpy(&ipv6_mask.hdr.dst_addr,
563 &rte_flow_item_ipv6_mask.hdr.dst_addr,
564 sizeof(ipv6.hdr.dst_addr));
567 item.mask = &ipv6_mask;
568 return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
569 MLX5_MODIFICATION_TYPE_SET, error);
573 * Convert modify-header set MAC address action to DV specification.
575 * @param[in,out] resource
576 * Pointer to the modify-header resource.
578 * Pointer to action specification.
580 * Pointer to the error structure.
583 * 0 on success, a negative errno value otherwise and rte_errno is set.
586 flow_dv_convert_action_modify_mac
587 (struct mlx5_flow_dv_modify_hdr_resource *resource,
588 const struct rte_flow_action *action,
589 struct rte_flow_error *error)
591 const struct rte_flow_action_set_mac *conf =
592 (const struct rte_flow_action_set_mac *)(action->conf);
593 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
594 struct rte_flow_item_eth eth;
595 struct rte_flow_item_eth eth_mask;
597 memset(ð, 0, sizeof(eth));
598 memset(ð_mask, 0, sizeof(eth_mask));
599 if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
600 memcpy(ð.src.addr_bytes, &conf->mac_addr,
601 sizeof(eth.src.addr_bytes));
602 memcpy(ð_mask.src.addr_bytes,
603 &rte_flow_item_eth_mask.src.addr_bytes,
604 sizeof(eth_mask.src.addr_bytes));
606 memcpy(ð.dst.addr_bytes, &conf->mac_addr,
607 sizeof(eth.dst.addr_bytes));
608 memcpy(ð_mask.dst.addr_bytes,
609 &rte_flow_item_eth_mask.dst.addr_bytes,
610 sizeof(eth_mask.dst.addr_bytes));
613 item.mask = ð_mask;
614 return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
615 MLX5_MODIFICATION_TYPE_SET, error);
619 * Convert modify-header set VLAN VID action to DV specification.
621 * @param[in,out] resource
622 * Pointer to the modify-header resource.
624 * Pointer to action specification.
626 * Pointer to the error structure.
629 * 0 on success, a negative errno value otherwise and rte_errno is set.
632 flow_dv_convert_action_modify_vlan_vid
633 (struct mlx5_flow_dv_modify_hdr_resource *resource,
634 const struct rte_flow_action *action,
635 struct rte_flow_error *error)
637 const struct rte_flow_action_of_set_vlan_vid *conf =
638 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
639 int i = resource->actions_num;
640 struct mlx5_modification_cmd *actions = resource->actions;
641 struct field_modify_info *field = modify_vlan_out_first_vid;
643 if (i >= MLX5_MAX_MODIFY_NUM)
644 return rte_flow_error_set(error, EINVAL,
645 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
646 "too many items to modify");
647 actions[i] = (struct mlx5_modification_cmd) {
648 .action_type = MLX5_MODIFICATION_TYPE_SET,
650 .length = field->size,
651 .offset = field->offset,
653 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
654 actions[i].data1 = conf->vlan_vid;
655 actions[i].data1 = actions[i].data1 << 16;
656 resource->actions_num = ++i;
661 * Convert modify-header set TP action to DV specification.
663 * @param[in,out] resource
664 * Pointer to the modify-header resource.
666 * Pointer to action specification.
668 * Pointer to rte_flow_item objects list.
670 * Pointer to flow attributes structure.
671 * @param[in] dev_flow
672 * Pointer to the sub flow.
673 * @param[in] tunnel_decap
674 * Whether action is after tunnel decapsulation.
676 * Pointer to the error structure.
679 * 0 on success, a negative errno value otherwise and rte_errno is set.
682 flow_dv_convert_action_modify_tp
683 (struct mlx5_flow_dv_modify_hdr_resource *resource,
684 const struct rte_flow_action *action,
685 const struct rte_flow_item *items,
686 union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
687 bool tunnel_decap, struct rte_flow_error *error)
689 const struct rte_flow_action_set_tp *conf =
690 (const struct rte_flow_action_set_tp *)(action->conf);
691 struct rte_flow_item item;
692 struct rte_flow_item_udp udp;
693 struct rte_flow_item_udp udp_mask;
694 struct rte_flow_item_tcp tcp;
695 struct rte_flow_item_tcp tcp_mask;
696 struct field_modify_info *field;
699 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
701 memset(&udp, 0, sizeof(udp));
702 memset(&udp_mask, 0, sizeof(udp_mask));
703 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
704 udp.hdr.src_port = conf->port;
705 udp_mask.hdr.src_port =
706 rte_flow_item_udp_mask.hdr.src_port;
708 udp.hdr.dst_port = conf->port;
709 udp_mask.hdr.dst_port =
710 rte_flow_item_udp_mask.hdr.dst_port;
712 item.type = RTE_FLOW_ITEM_TYPE_UDP;
714 item.mask = &udp_mask;
717 MLX5_ASSERT(attr->tcp);
718 memset(&tcp, 0, sizeof(tcp));
719 memset(&tcp_mask, 0, sizeof(tcp_mask));
720 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
721 tcp.hdr.src_port = conf->port;
722 tcp_mask.hdr.src_port =
723 rte_flow_item_tcp_mask.hdr.src_port;
725 tcp.hdr.dst_port = conf->port;
726 tcp_mask.hdr.dst_port =
727 rte_flow_item_tcp_mask.hdr.dst_port;
729 item.type = RTE_FLOW_ITEM_TYPE_TCP;
731 item.mask = &tcp_mask;
734 return flow_dv_convert_modify_action(&item, field, NULL, resource,
735 MLX5_MODIFICATION_TYPE_SET, error);
739 * Convert modify-header set TTL action to DV specification.
741 * @param[in,out] resource
742 * Pointer to the modify-header resource.
744 * Pointer to action specification.
746 * Pointer to rte_flow_item objects list.
748 * Pointer to flow attributes structure.
749 * @param[in] dev_flow
750 * Pointer to the sub flow.
751 * @param[in] tunnel_decap
752 * Whether action is after tunnel decapsulation.
754 * Pointer to the error structure.
757 * 0 on success, a negative errno value otherwise and rte_errno is set.
760 flow_dv_convert_action_modify_ttl
761 (struct mlx5_flow_dv_modify_hdr_resource *resource,
762 const struct rte_flow_action *action,
763 const struct rte_flow_item *items,
764 union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
765 bool tunnel_decap, struct rte_flow_error *error)
767 const struct rte_flow_action_set_ttl *conf =
768 (const struct rte_flow_action_set_ttl *)(action->conf);
769 struct rte_flow_item item;
770 struct rte_flow_item_ipv4 ipv4;
771 struct rte_flow_item_ipv4 ipv4_mask;
772 struct rte_flow_item_ipv6 ipv6;
773 struct rte_flow_item_ipv6 ipv6_mask;
774 struct field_modify_info *field;
777 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
779 memset(&ipv4, 0, sizeof(ipv4));
780 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
781 ipv4.hdr.time_to_live = conf->ttl_value;
782 ipv4_mask.hdr.time_to_live = 0xFF;
783 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
785 item.mask = &ipv4_mask;
788 MLX5_ASSERT(attr->ipv6);
789 memset(&ipv6, 0, sizeof(ipv6));
790 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
791 ipv6.hdr.hop_limits = conf->ttl_value;
792 ipv6_mask.hdr.hop_limits = 0xFF;
793 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
795 item.mask = &ipv6_mask;
798 return flow_dv_convert_modify_action(&item, field, NULL, resource,
799 MLX5_MODIFICATION_TYPE_SET, error);
803 * Convert modify-header decrement TTL action to DV specification.
805 * @param[in,out] resource
806 * Pointer to the modify-header resource.
808 * Pointer to action specification.
810 * Pointer to rte_flow_item objects list.
812 * Pointer to flow attributes structure.
813 * @param[in] dev_flow
814 * Pointer to the sub flow.
815 * @param[in] tunnel_decap
816 * Whether action is after tunnel decapsulation.
818 * Pointer to the error structure.
821 * 0 on success, a negative errno value otherwise and rte_errno is set.
824 flow_dv_convert_action_modify_dec_ttl
825 (struct mlx5_flow_dv_modify_hdr_resource *resource,
826 const struct rte_flow_item *items,
827 union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
828 bool tunnel_decap, struct rte_flow_error *error)
830 struct rte_flow_item item;
831 struct rte_flow_item_ipv4 ipv4;
832 struct rte_flow_item_ipv4 ipv4_mask;
833 struct rte_flow_item_ipv6 ipv6;
834 struct rte_flow_item_ipv6 ipv6_mask;
835 struct field_modify_info *field;
838 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
840 memset(&ipv4, 0, sizeof(ipv4));
841 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
842 ipv4.hdr.time_to_live = 0xFF;
843 ipv4_mask.hdr.time_to_live = 0xFF;
844 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
846 item.mask = &ipv4_mask;
849 MLX5_ASSERT(attr->ipv6);
850 memset(&ipv6, 0, sizeof(ipv6));
851 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
852 ipv6.hdr.hop_limits = 0xFF;
853 ipv6_mask.hdr.hop_limits = 0xFF;
854 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
856 item.mask = &ipv6_mask;
859 return flow_dv_convert_modify_action(&item, field, NULL, resource,
860 MLX5_MODIFICATION_TYPE_ADD, error);
864 * Convert modify-header increment/decrement TCP Sequence number
865 * to DV specification.
867 * @param[in,out] resource
868 * Pointer to the modify-header resource.
870 * Pointer to action specification.
872 * Pointer to the error structure.
875 * 0 on success, a negative errno value otherwise and rte_errno is set.
878 flow_dv_convert_action_modify_tcp_seq
879 (struct mlx5_flow_dv_modify_hdr_resource *resource,
880 const struct rte_flow_action *action,
881 struct rte_flow_error *error)
883 const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
884 uint64_t value = rte_be_to_cpu_32(*conf);
885 struct rte_flow_item item;
886 struct rte_flow_item_tcp tcp;
887 struct rte_flow_item_tcp tcp_mask;
889 memset(&tcp, 0, sizeof(tcp));
890 memset(&tcp_mask, 0, sizeof(tcp_mask));
891 if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
893 * The HW has no decrement operation, only increment operation.
894 * To simulate decrement X from Y using increment operation
895 * we need to add UINT32_MAX X times to Y.
896 * Each adding of UINT32_MAX decrements Y by 1.
899 tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
900 tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
901 item.type = RTE_FLOW_ITEM_TYPE_TCP;
903 item.mask = &tcp_mask;
904 return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
905 MLX5_MODIFICATION_TYPE_ADD, error);
909 * Convert modify-header increment/decrement TCP Acknowledgment number
910 * to DV specification.
912 * @param[in,out] resource
913 * Pointer to the modify-header resource.
915 * Pointer to action specification.
917 * Pointer to the error structure.
920 * 0 on success, a negative errno value otherwise and rte_errno is set.
923 flow_dv_convert_action_modify_tcp_ack
924 (struct mlx5_flow_dv_modify_hdr_resource *resource,
925 const struct rte_flow_action *action,
926 struct rte_flow_error *error)
928 const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
929 uint64_t value = rte_be_to_cpu_32(*conf);
930 struct rte_flow_item item;
931 struct rte_flow_item_tcp tcp;
932 struct rte_flow_item_tcp tcp_mask;
934 memset(&tcp, 0, sizeof(tcp));
935 memset(&tcp_mask, 0, sizeof(tcp_mask));
936 if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
938 * The HW has no decrement operation, only increment operation.
939 * To simulate decrement X from Y using increment operation
940 * we need to add UINT32_MAX X times to Y.
941 * Each adding of UINT32_MAX decrements Y by 1.
944 tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
945 tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
946 item.type = RTE_FLOW_ITEM_TYPE_TCP;
948 item.mask = &tcp_mask;
949 return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
950 MLX5_MODIFICATION_TYPE_ADD, error);
953 static enum mlx5_modification_field reg_to_field[] = {
954 [REG_NON] = MLX5_MODI_OUT_NONE,
955 [REG_A] = MLX5_MODI_META_DATA_REG_A,
956 [REG_B] = MLX5_MODI_META_DATA_REG_B,
957 [REG_C_0] = MLX5_MODI_META_REG_C_0,
958 [REG_C_1] = MLX5_MODI_META_REG_C_1,
959 [REG_C_2] = MLX5_MODI_META_REG_C_2,
960 [REG_C_3] = MLX5_MODI_META_REG_C_3,
961 [REG_C_4] = MLX5_MODI_META_REG_C_4,
962 [REG_C_5] = MLX5_MODI_META_REG_C_5,
963 [REG_C_6] = MLX5_MODI_META_REG_C_6,
964 [REG_C_7] = MLX5_MODI_META_REG_C_7,
968 * Convert register set to DV specification.
970 * @param[in,out] resource
971 * Pointer to the modify-header resource.
973 * Pointer to action specification.
975 * Pointer to the error structure.
978 * 0 on success, a negative errno value otherwise and rte_errno is set.
981 flow_dv_convert_action_set_reg
982 (struct mlx5_flow_dv_modify_hdr_resource *resource,
983 const struct rte_flow_action *action,
984 struct rte_flow_error *error)
986 const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
987 struct mlx5_modification_cmd *actions = resource->actions;
988 uint32_t i = resource->actions_num;
990 if (i >= MLX5_MAX_MODIFY_NUM)
991 return rte_flow_error_set(error, EINVAL,
992 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
993 "too many items to modify");
994 MLX5_ASSERT(conf->id != REG_NON);
995 MLX5_ASSERT(conf->id < RTE_DIM(reg_to_field));
996 actions[i] = (struct mlx5_modification_cmd) {
997 .action_type = MLX5_MODIFICATION_TYPE_SET,
998 .field = reg_to_field[conf->id],
1000 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
1001 actions[i].data1 = rte_cpu_to_be_32(conf->data);
1003 resource->actions_num = i;
1008 * Convert SET_TAG action to DV specification.
1011 * Pointer to the rte_eth_dev structure.
1012 * @param[in,out] resource
1013 * Pointer to the modify-header resource.
1015 * Pointer to action specification.
1017 * Pointer to the error structure.
1020 * 0 on success, a negative errno value otherwise and rte_errno is set.
1023 flow_dv_convert_action_set_tag
1024 (struct rte_eth_dev *dev,
1025 struct mlx5_flow_dv_modify_hdr_resource *resource,
1026 const struct rte_flow_action_set_tag *conf,
1027 struct rte_flow_error *error)
1029 rte_be32_t data = rte_cpu_to_be_32(conf->data);
1030 rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1031 struct rte_flow_item item = {
1035 struct field_modify_info reg_c_x[] = {
1038 enum mlx5_modification_field reg_type;
1041 ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1044 MLX5_ASSERT(ret != REG_NON);
1045 MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1046 reg_type = reg_to_field[ret];
1047 MLX5_ASSERT(reg_type > 0);
1048 reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1049 return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1050 MLX5_MODIFICATION_TYPE_SET, error);
1054 * Convert internal COPY_REG action to DV specification.
1057 * Pointer to the rte_eth_dev structure.
1058 * @param[in,out] res
1059 * Pointer to the modify-header resource.
1061 * Pointer to action specification.
1063 * Pointer to the error structure.
1066 * 0 on success, a negative errno value otherwise and rte_errno is set.
1069 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1070 struct mlx5_flow_dv_modify_hdr_resource *res,
1071 const struct rte_flow_action *action,
1072 struct rte_flow_error *error)
1074 const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1075 rte_be32_t mask = RTE_BE32(UINT32_MAX);
1076 struct rte_flow_item item = {
1080 struct field_modify_info reg_src[] = {
1081 {4, 0, reg_to_field[conf->src]},
1084 struct field_modify_info reg_dst = {
1086 .id = reg_to_field[conf->dst],
1088 /* Adjust reg_c[0] usage according to reported mask. */
1089 if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1090 struct mlx5_priv *priv = dev->data->dev_private;
1091 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1093 MLX5_ASSERT(reg_c0);
1094 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1095 if (conf->dst == REG_C_0) {
1096 /* Copy to reg_c[0], within mask only. */
1097 reg_dst.offset = rte_bsf32(reg_c0);
1099 * Mask is ignoring the enianness, because
1100 * there is no conversion in datapath.
1102 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1103 /* Copy from destination lower bits to reg_c[0]. */
1104 mask = reg_c0 >> reg_dst.offset;
1106 /* Copy from destination upper bits to reg_c[0]. */
1107 mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1108 rte_fls_u32(reg_c0));
1111 mask = rte_cpu_to_be_32(reg_c0);
1112 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1113 /* Copy from reg_c[0] to destination lower bits. */
1116 /* Copy from reg_c[0] to destination upper bits. */
1117 reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1118 (rte_fls_u32(reg_c0) -
1123 return flow_dv_convert_modify_action(&item,
1124 reg_src, ®_dst, res,
1125 MLX5_MODIFICATION_TYPE_COPY,
1130 * Convert MARK action to DV specification. This routine is used
1131 * in extensive metadata only and requires metadata register to be
1132 * handled. In legacy mode hardware tag resource is engaged.
1135 * Pointer to the rte_eth_dev structure.
1137 * Pointer to MARK action specification.
1138 * @param[in,out] resource
1139 * Pointer to the modify-header resource.
1141 * Pointer to the error structure.
1144 * 0 on success, a negative errno value otherwise and rte_errno is set.
1147 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1148 const struct rte_flow_action_mark *conf,
1149 struct mlx5_flow_dv_modify_hdr_resource *resource,
1150 struct rte_flow_error *error)
1152 struct mlx5_priv *priv = dev->data->dev_private;
1153 rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1154 priv->sh->dv_mark_mask);
1155 rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1156 struct rte_flow_item item = {
1160 struct field_modify_info reg_c_x[] = {
1166 return rte_flow_error_set(error, EINVAL,
1167 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1168 NULL, "zero mark action mask");
1169 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1172 MLX5_ASSERT(reg > 0);
1173 if (reg == REG_C_0) {
1174 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1175 uint32_t shl_c0 = rte_bsf32(msk_c0);
1177 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1178 mask = rte_cpu_to_be_32(mask) & msk_c0;
1179 mask = rte_cpu_to_be_32(mask << shl_c0);
1181 reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1182 return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1183 MLX5_MODIFICATION_TYPE_SET, error);
1187 * Get metadata register index for specified steering domain.
1190 * Pointer to the rte_eth_dev structure.
1192 * Attributes of flow to determine steering domain.
1194 * Pointer to the error structure.
1197 * positive index on success, a negative errno value otherwise
1198 * and rte_errno is set.
1200 static enum modify_reg
1201 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1202 const struct rte_flow_attr *attr,
1203 struct rte_flow_error *error)
1206 mlx5_flow_get_reg_id(dev, attr->transfer ?
1210 MLX5_METADATA_RX, 0, error);
1212 return rte_flow_error_set(error,
1213 ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1214 NULL, "unavailable "
1215 "metadata register");
1220 * Convert SET_META action to DV specification.
1223 * Pointer to the rte_eth_dev structure.
1224 * @param[in,out] resource
1225 * Pointer to the modify-header resource.
1227 * Attributes of flow that includes this item.
1229 * Pointer to action specification.
1231 * Pointer to the error structure.
1234 * 0 on success, a negative errno value otherwise and rte_errno is set.
1237 flow_dv_convert_action_set_meta
1238 (struct rte_eth_dev *dev,
1239 struct mlx5_flow_dv_modify_hdr_resource *resource,
1240 const struct rte_flow_attr *attr,
1241 const struct rte_flow_action_set_meta *conf,
1242 struct rte_flow_error *error)
1244 uint32_t data = conf->data;
1245 uint32_t mask = conf->mask;
1246 struct rte_flow_item item = {
1250 struct field_modify_info reg_c_x[] = {
1253 int reg = flow_dv_get_metadata_reg(dev, attr, error);
1258 * In datapath code there is no endianness
1259 * coversions for perfromance reasons, all
1260 * pattern conversions are done in rte_flow.
1262 if (reg == REG_C_0) {
1263 struct mlx5_priv *priv = dev->data->dev_private;
1264 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1267 MLX5_ASSERT(msk_c0);
1268 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1269 shl_c0 = rte_bsf32(msk_c0);
1271 shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
1275 MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
1277 reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1278 /* The routine expects parameters in memory as big-endian ones. */
1279 return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1280 MLX5_MODIFICATION_TYPE_SET, error);
1284 * Convert modify-header set IPv4 DSCP action to DV specification.
1286 * @param[in,out] resource
1287 * Pointer to the modify-header resource.
1289 * Pointer to action specification.
1291 * Pointer to the error structure.
1294 * 0 on success, a negative errno value otherwise and rte_errno is set.
1297 flow_dv_convert_action_modify_ipv4_dscp
1298 (struct mlx5_flow_dv_modify_hdr_resource *resource,
1299 const struct rte_flow_action *action,
1300 struct rte_flow_error *error)
1302 const struct rte_flow_action_set_dscp *conf =
1303 (const struct rte_flow_action_set_dscp *)(action->conf);
1304 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1305 struct rte_flow_item_ipv4 ipv4;
1306 struct rte_flow_item_ipv4 ipv4_mask;
1308 memset(&ipv4, 0, sizeof(ipv4));
1309 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1310 ipv4.hdr.type_of_service = conf->dscp;
1311 ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1313 item.mask = &ipv4_mask;
1314 return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1315 MLX5_MODIFICATION_TYPE_SET, error);
1319 * Convert modify-header set IPv6 DSCP action to DV specification.
1321 * @param[in,out] resource
1322 * Pointer to the modify-header resource.
1324 * Pointer to action specification.
1326 * Pointer to the error structure.
1329 * 0 on success, a negative errno value otherwise and rte_errno is set.
1332 flow_dv_convert_action_modify_ipv6_dscp
1333 (struct mlx5_flow_dv_modify_hdr_resource *resource,
1334 const struct rte_flow_action *action,
1335 struct rte_flow_error *error)
1337 const struct rte_flow_action_set_dscp *conf =
1338 (const struct rte_flow_action_set_dscp *)(action->conf);
1339 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1340 struct rte_flow_item_ipv6 ipv6;
1341 struct rte_flow_item_ipv6 ipv6_mask;
1343 memset(&ipv6, 0, sizeof(ipv6));
1344 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1346 * Even though the DSCP bits offset of IPv6 is not byte aligned,
1347 * rdma-core only accept the DSCP bits byte aligned start from
1348 * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1349 * bits in IPv6 case as rdma-core requires byte aligned value.
1351 ipv6.hdr.vtc_flow = conf->dscp;
1352 ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1354 item.mask = &ipv6_mask;
1355 return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1356 MLX5_MODIFICATION_TYPE_SET, error);
1360 * Validate MARK item.
1363 * Pointer to the rte_eth_dev structure.
1365 * Item specification.
1367 * Attributes of flow that includes this item.
1369 * Pointer to error structure.
1372 * 0 on success, a negative errno value otherwise and rte_errno is set.
1375 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1376 const struct rte_flow_item *item,
1377 const struct rte_flow_attr *attr __rte_unused,
1378 struct rte_flow_error *error)
1380 struct mlx5_priv *priv = dev->data->dev_private;
1381 struct mlx5_dev_config *config = &priv->config;
1382 const struct rte_flow_item_mark *spec = item->spec;
1383 const struct rte_flow_item_mark *mask = item->mask;
1384 const struct rte_flow_item_mark nic_mask = {
1385 .id = priv->sh->dv_mark_mask,
1389 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1390 return rte_flow_error_set(error, ENOTSUP,
1391 RTE_FLOW_ERROR_TYPE_ITEM, item,
1392 "extended metadata feature"
1394 if (!mlx5_flow_ext_mreg_supported(dev))
1395 return rte_flow_error_set(error, ENOTSUP,
1396 RTE_FLOW_ERROR_TYPE_ITEM, item,
1397 "extended metadata register"
1398 " isn't supported");
1400 return rte_flow_error_set(error, ENOTSUP,
1401 RTE_FLOW_ERROR_TYPE_ITEM, item,
1402 "extended metadata register"
1403 " isn't available");
1404 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1408 return rte_flow_error_set(error, EINVAL,
1409 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1411 "data cannot be empty");
1412 if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1413 return rte_flow_error_set(error, EINVAL,
1414 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1416 "mark id exceeds the limit");
1420 return rte_flow_error_set(error, EINVAL,
1421 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1422 "mask cannot be zero");
1424 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1425 (const uint8_t *)&nic_mask,
1426 sizeof(struct rte_flow_item_mark),
1427 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1434 * Validate META item.
1437 * Pointer to the rte_eth_dev structure.
1439 * Item specification.
1441 * Attributes of flow that includes this item.
1443 * Pointer to error structure.
1446 * 0 on success, a negative errno value otherwise and rte_errno is set.
1449 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1450 const struct rte_flow_item *item,
1451 const struct rte_flow_attr *attr,
1452 struct rte_flow_error *error)
1454 struct mlx5_priv *priv = dev->data->dev_private;
1455 struct mlx5_dev_config *config = &priv->config;
1456 const struct rte_flow_item_meta *spec = item->spec;
1457 const struct rte_flow_item_meta *mask = item->mask;
1458 struct rte_flow_item_meta nic_mask = {
1465 return rte_flow_error_set(error, EINVAL,
1466 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1468 "data cannot be empty");
1469 if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1470 if (!mlx5_flow_ext_mreg_supported(dev))
1471 return rte_flow_error_set(error, ENOTSUP,
1472 RTE_FLOW_ERROR_TYPE_ITEM, item,
1473 "extended metadata register"
1474 " isn't supported");
1475 reg = flow_dv_get_metadata_reg(dev, attr, error);
1479 return rte_flow_error_set(error, ENOTSUP,
1480 RTE_FLOW_ERROR_TYPE_ITEM, item,
1484 nic_mask.data = priv->sh->dv_meta_mask;
1485 } else if (attr->transfer) {
1486 return rte_flow_error_set(error, ENOTSUP,
1487 RTE_FLOW_ERROR_TYPE_ITEM, item,
1488 "extended metadata feature "
1489 "should be enabled when "
1490 "meta item is requested "
1491 "with e-switch mode ");
1494 mask = &rte_flow_item_meta_mask;
1496 return rte_flow_error_set(error, EINVAL,
1497 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1498 "mask cannot be zero");
1500 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1501 (const uint8_t *)&nic_mask,
1502 sizeof(struct rte_flow_item_meta),
1503 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1508 * Validate TAG item.
1511 * Pointer to the rte_eth_dev structure.
1513 * Item specification.
1515 * Attributes of flow that includes this item.
1517 * Pointer to error structure.
1520 * 0 on success, a negative errno value otherwise and rte_errno is set.
1523 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
1524 const struct rte_flow_item *item,
1525 const struct rte_flow_attr *attr __rte_unused,
1526 struct rte_flow_error *error)
1528 const struct rte_flow_item_tag *spec = item->spec;
1529 const struct rte_flow_item_tag *mask = item->mask;
1530 const struct rte_flow_item_tag nic_mask = {
1531 .data = RTE_BE32(UINT32_MAX),
1536 if (!mlx5_flow_ext_mreg_supported(dev))
1537 return rte_flow_error_set(error, ENOTSUP,
1538 RTE_FLOW_ERROR_TYPE_ITEM, item,
1539 "extensive metadata register"
1540 " isn't supported");
1542 return rte_flow_error_set(error, EINVAL,
1543 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1545 "data cannot be empty");
1547 mask = &rte_flow_item_tag_mask;
1549 return rte_flow_error_set(error, EINVAL,
1550 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1551 "mask cannot be zero");
1553 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1554 (const uint8_t *)&nic_mask,
1555 sizeof(struct rte_flow_item_tag),
1556 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1559 if (mask->index != 0xff)
1560 return rte_flow_error_set(error, EINVAL,
1561 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1562 "partial mask for tag index"
1563 " is not supported");
1564 ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
1567 MLX5_ASSERT(ret != REG_NON);
1572 * Validate vport item.
1575 * Pointer to the rte_eth_dev structure.
1577 * Item specification.
1579 * Attributes of flow that includes this item.
1580 * @param[in] item_flags
1581 * Bit-fields that holds the items detected until now.
1583 * Pointer to error structure.
1586 * 0 on success, a negative errno value otherwise and rte_errno is set.
1589 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
1590 const struct rte_flow_item *item,
1591 const struct rte_flow_attr *attr,
1592 uint64_t item_flags,
1593 struct rte_flow_error *error)
1595 const struct rte_flow_item_port_id *spec = item->spec;
1596 const struct rte_flow_item_port_id *mask = item->mask;
1597 const struct rte_flow_item_port_id switch_mask = {
1600 struct mlx5_priv *esw_priv;
1601 struct mlx5_priv *dev_priv;
1604 if (!attr->transfer)
1605 return rte_flow_error_set(error, EINVAL,
1606 RTE_FLOW_ERROR_TYPE_ITEM,
1608 "match on port id is valid only"
1609 " when transfer flag is enabled");
1610 if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
1611 return rte_flow_error_set(error, ENOTSUP,
1612 RTE_FLOW_ERROR_TYPE_ITEM, item,
1613 "multiple source ports are not"
1616 mask = &switch_mask;
1617 if (mask->id != 0xffffffff)
1618 return rte_flow_error_set(error, ENOTSUP,
1619 RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1621 "no support for partial mask on"
1623 ret = mlx5_flow_item_acceptable
1624 (item, (const uint8_t *)mask,
1625 (const uint8_t *)&rte_flow_item_port_id_mask,
1626 sizeof(struct rte_flow_item_port_id),
1627 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1632 esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
1634 return rte_flow_error_set(error, rte_errno,
1635 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1636 "failed to obtain E-Switch info for"
1638 dev_priv = mlx5_dev_to_eswitch_info(dev);
1640 return rte_flow_error_set(error, rte_errno,
1641 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1643 "failed to obtain E-Switch info");
1644 if (esw_priv->domain_id != dev_priv->domain_id)
1645 return rte_flow_error_set(error, EINVAL,
1646 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1647 "cannot match on a port from a"
1648 " different E-Switch");
1653 * Validate VLAN item.
1656 * Item specification.
1657 * @param[in] item_flags
1658 * Bit-fields that holds the items detected until now.
1660 * Ethernet device flow is being created on.
1662 * Pointer to error structure.
1665 * 0 on success, a negative errno value otherwise and rte_errno is set.
1668 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
1669 uint64_t item_flags,
1670 struct rte_eth_dev *dev,
1671 struct rte_flow_error *error)
1673 const struct rte_flow_item_vlan *mask = item->mask;
1674 const struct rte_flow_item_vlan nic_mask = {
1675 .tci = RTE_BE16(UINT16_MAX),
1676 .inner_type = RTE_BE16(UINT16_MAX),
1679 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1681 const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
1682 MLX5_FLOW_LAYER_INNER_L4) :
1683 (MLX5_FLOW_LAYER_OUTER_L3 |
1684 MLX5_FLOW_LAYER_OUTER_L4);
1685 const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1686 MLX5_FLOW_LAYER_OUTER_VLAN;
1688 if (item_flags & vlanm)
1689 return rte_flow_error_set(error, EINVAL,
1690 RTE_FLOW_ERROR_TYPE_ITEM, item,
1691 "multiple VLAN layers not supported");
1692 else if ((item_flags & l34m) != 0)
1693 return rte_flow_error_set(error, EINVAL,
1694 RTE_FLOW_ERROR_TYPE_ITEM, item,
1695 "VLAN cannot follow L3/L4 layer");
1697 mask = &rte_flow_item_vlan_mask;
1698 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1699 (const uint8_t *)&nic_mask,
1700 sizeof(struct rte_flow_item_vlan),
1701 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1704 if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
1705 struct mlx5_priv *priv = dev->data->dev_private;
1707 if (priv->vmwa_context) {
1709 * Non-NULL context means we have a virtual machine
1710 * and SR-IOV enabled, we have to create VLAN interface
1711 * to make hypervisor to setup E-Switch vport
1712 * context correctly. We avoid creating the multiple
1713 * VLAN interfaces, so we cannot support VLAN tag mask.
1715 return rte_flow_error_set(error, EINVAL,
1716 RTE_FLOW_ERROR_TYPE_ITEM,
1718 "VLAN tag mask is not"
1719 " supported in virtual"
1727 * GTP flags are contained in 1 byte of the format:
1728 * -------------------------------------------
1729 * | bit | 0 - 2 | 3 | 4 | 5 | 6 | 7 |
1730 * |-----------------------------------------|
1731 * | value | Version | PT | Res | E | S | PN |
1732 * -------------------------------------------
1734 * Matching is supported only for GTP flags E, S, PN.
1736 #define MLX5_GTP_FLAGS_MASK 0x07
1739 * Validate GTP item.
1742 * Pointer to the rte_eth_dev structure.
1744 * Item specification.
1745 * @param[in] item_flags
1746 * Bit-fields that holds the items detected until now.
1748 * Pointer to error structure.
1751 * 0 on success, a negative errno value otherwise and rte_errno is set.
1754 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
1755 const struct rte_flow_item *item,
1756 uint64_t item_flags,
1757 struct rte_flow_error *error)
1759 struct mlx5_priv *priv = dev->data->dev_private;
1760 const struct rte_flow_item_gtp *spec = item->spec;
1761 const struct rte_flow_item_gtp *mask = item->mask;
1762 const struct rte_flow_item_gtp nic_mask = {
1763 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
1765 .teid = RTE_BE32(0xffffffff),
1768 if (!priv->config.hca_attr.tunnel_stateless_gtp)
1769 return rte_flow_error_set(error, ENOTSUP,
1770 RTE_FLOW_ERROR_TYPE_ITEM, item,
1771 "GTP support is not enabled");
1772 if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1773 return rte_flow_error_set(error, ENOTSUP,
1774 RTE_FLOW_ERROR_TYPE_ITEM, item,
1775 "multiple tunnel layers not"
1777 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1778 return rte_flow_error_set(error, EINVAL,
1779 RTE_FLOW_ERROR_TYPE_ITEM, item,
1780 "no outer UDP layer found");
1782 mask = &rte_flow_item_gtp_mask;
1783 if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
1784 return rte_flow_error_set(error, ENOTSUP,
1785 RTE_FLOW_ERROR_TYPE_ITEM, item,
1786 "Match is supported for GTP"
1788 return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1789 (const uint8_t *)&nic_mask,
1790 sizeof(struct rte_flow_item_gtp),
1791 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1795 * Validate IPV4 item.
1796 * Use existing validation function mlx5_flow_validate_item_ipv4(), and
1797 * add specific validation of fragment_offset field,
1800 * Item specification.
1801 * @param[in] item_flags
1802 * Bit-fields that holds the items detected until now.
1804 * Pointer to error structure.
1807 * 0 on success, a negative errno value otherwise and rte_errno is set.
1810 flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
1811 uint64_t item_flags,
1813 uint16_t ether_type,
1814 struct rte_flow_error *error)
1817 const struct rte_flow_item_ipv4 *spec = item->spec;
1818 const struct rte_flow_item_ipv4 *last = item->last;
1819 const struct rte_flow_item_ipv4 *mask = item->mask;
1820 rte_be16_t fragment_offset_spec = 0;
1821 rte_be16_t fragment_offset_last = 0;
1822 const struct rte_flow_item_ipv4 nic_ipv4_mask = {
1824 .src_addr = RTE_BE32(0xffffffff),
1825 .dst_addr = RTE_BE32(0xffffffff),
1826 .type_of_service = 0xff,
1827 .fragment_offset = RTE_BE16(0xffff),
1828 .next_proto_id = 0xff,
1829 .time_to_live = 0xff,
1833 ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
1834 ether_type, &nic_ipv4_mask,
1835 MLX5_ITEM_RANGE_ACCEPTED, error);
1839 fragment_offset_spec = spec->hdr.fragment_offset &
1840 mask->hdr.fragment_offset;
1841 if (!fragment_offset_spec)
1844 * spec and mask are valid, enforce using full mask to make sure the
1845 * complete value is used correctly.
1847 if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1848 != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1849 return rte_flow_error_set(error, EINVAL,
1850 RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1851 item, "must use full mask for"
1852 " fragment_offset");
1854 * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
1855 * indicating this is 1st fragment of fragmented packet.
1856 * This is not yet supported in MLX5, return appropriate error message.
1858 if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
1859 return rte_flow_error_set(error, ENOTSUP,
1860 RTE_FLOW_ERROR_TYPE_ITEM, item,
1861 "match on first fragment not "
1863 if (fragment_offset_spec && !last)
1864 return rte_flow_error_set(error, ENOTSUP,
1865 RTE_FLOW_ERROR_TYPE_ITEM, item,
1866 "specified value not supported");
1867 /* spec and last are valid, validate the specified range. */
1868 fragment_offset_last = last->hdr.fragment_offset &
1869 mask->hdr.fragment_offset;
1871 * Match on fragment_offset spec 0x2001 and last 0x3fff
1872 * means MF is 1 and frag-offset is > 0.
1873 * This packet is fragment 2nd and onward, excluding last.
1874 * This is not yet supported in MLX5, return appropriate
1877 if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
1878 fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1879 return rte_flow_error_set(error, ENOTSUP,
1880 RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1881 last, "match on following "
1882 "fragments not supported");
1884 * Match on fragment_offset spec 0x0001 and last 0x1fff
1885 * means MF is 0 and frag-offset is > 0.
1886 * This packet is last fragment of fragmented packet.
1887 * This is not yet supported in MLX5, return appropriate
1890 if (fragment_offset_spec == RTE_BE16(1) &&
1891 fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
1892 return rte_flow_error_set(error, ENOTSUP,
1893 RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1894 last, "match on last "
1895 "fragment not supported");
1897 * Match on fragment_offset spec 0x0001 and last 0x3fff
1898 * means MF and/or frag-offset is not 0.
1899 * This is a fragmented packet.
1900 * Other range values are invalid and rejected.
1902 if (!(fragment_offset_spec == RTE_BE16(1) &&
1903 fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
1904 return rte_flow_error_set(error, ENOTSUP,
1905 RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
1906 "specified range not supported");
1911 * Validate IPV6 fragment extension item.
1914 * Item specification.
1915 * @param[in] item_flags
1916 * Bit-fields that holds the items detected until now.
1918 * Pointer to error structure.
1921 * 0 on success, a negative errno value otherwise and rte_errno is set.
1924 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
1925 uint64_t item_flags,
1926 struct rte_flow_error *error)
1928 const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
1929 const struct rte_flow_item_ipv6_frag_ext *last = item->last;
1930 const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
1931 rte_be16_t frag_data_spec = 0;
1932 rte_be16_t frag_data_last = 0;
1933 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1934 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1935 MLX5_FLOW_LAYER_OUTER_L4;
1937 struct rte_flow_item_ipv6_frag_ext nic_mask = {
1939 .next_header = 0xff,
1940 .frag_data = RTE_BE16(0xffff),
1944 if (item_flags & l4m)
1945 return rte_flow_error_set(error, EINVAL,
1946 RTE_FLOW_ERROR_TYPE_ITEM, item,
1947 "ipv6 fragment extension item cannot "
1949 if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
1950 (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
1951 return rte_flow_error_set(error, EINVAL,
1952 RTE_FLOW_ERROR_TYPE_ITEM, item,
1953 "ipv6 fragment extension item must "
1954 "follow ipv6 item");
1956 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
1957 if (!frag_data_spec)
1960 * spec and mask are valid, enforce using full mask to make sure the
1961 * complete value is used correctly.
1963 if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
1964 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
1965 return rte_flow_error_set(error, EINVAL,
1966 RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1967 item, "must use full mask for"
1970 * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
1971 * This is 1st fragment of fragmented packet.
1973 if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
1974 return rte_flow_error_set(error, ENOTSUP,
1975 RTE_FLOW_ERROR_TYPE_ITEM, item,
1976 "match on first fragment not "
1978 if (frag_data_spec && !last)
1979 return rte_flow_error_set(error, EINVAL,
1980 RTE_FLOW_ERROR_TYPE_ITEM, item,
1981 "specified value not supported");
1982 ret = mlx5_flow_item_acceptable
1983 (item, (const uint8_t *)mask,
1984 (const uint8_t *)&nic_mask,
1985 sizeof(struct rte_flow_item_ipv6_frag_ext),
1986 MLX5_ITEM_RANGE_ACCEPTED, error);
1989 /* spec and last are valid, validate the specified range. */
1990 frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
1992 * Match on frag_data spec 0x0009 and last 0xfff9
1993 * means M is 1 and frag-offset is > 0.
1994 * This packet is fragment 2nd and onward, excluding last.
1995 * This is not yet supported in MLX5, return appropriate
1998 if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
1999 RTE_IPV6_EHDR_MF_MASK) &&
2000 frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2001 return rte_flow_error_set(error, ENOTSUP,
2002 RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2003 last, "match on following "
2004 "fragments not supported");
2006 * Match on frag_data spec 0x0008 and last 0xfff8
2007 * means M is 0 and frag-offset is > 0.
2008 * This packet is last fragment of fragmented packet.
2009 * This is not yet supported in MLX5, return appropriate
2012 if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2013 frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2014 return rte_flow_error_set(error, ENOTSUP,
2015 RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2016 last, "match on last "
2017 "fragment not supported");
2018 /* Other range values are invalid and rejected. */
2019 return rte_flow_error_set(error, EINVAL,
2020 RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2021 "specified range not supported");
2025 * Validate the pop VLAN action.
2028 * Pointer to the rte_eth_dev structure.
2029 * @param[in] action_flags
2030 * Holds the actions detected until now.
2032 * Pointer to the pop vlan action.
2033 * @param[in] item_flags
2034 * The items found in this flow rule.
2036 * Pointer to flow attributes.
2038 * Pointer to error structure.
2041 * 0 on success, a negative errno value otherwise and rte_errno is set.
2044 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2045 uint64_t action_flags,
2046 const struct rte_flow_action *action,
2047 uint64_t item_flags,
2048 const struct rte_flow_attr *attr,
2049 struct rte_flow_error *error)
2051 const struct mlx5_priv *priv = dev->data->dev_private;
2055 if (!priv->sh->pop_vlan_action)
2056 return rte_flow_error_set(error, ENOTSUP,
2057 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2059 "pop vlan action is not supported");
2061 return rte_flow_error_set(error, ENOTSUP,
2062 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2064 "pop vlan action not supported for "
2066 if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2067 return rte_flow_error_set(error, ENOTSUP,
2068 RTE_FLOW_ERROR_TYPE_ACTION, action,
2069 "no support for multiple VLAN "
2071 /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2072 if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2073 !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2074 return rte_flow_error_set(error, ENOTSUP,
2075 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2077 "cannot pop vlan after decap without "
2078 "match on inner vlan in the flow");
2079 /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2080 if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2081 !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2082 return rte_flow_error_set(error, ENOTSUP,
2083 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2085 "cannot pop vlan without a "
2086 "match on (outer) vlan in the flow");
2087 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2088 return rte_flow_error_set(error, EINVAL,
2089 RTE_FLOW_ERROR_TYPE_ACTION, action,
2090 "wrong action order, port_id should "
2091 "be after pop VLAN action");
2092 if (!attr->transfer && priv->representor)
2093 return rte_flow_error_set(error, ENOTSUP,
2094 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2095 "pop vlan action for VF representor "
2096 "not supported on NIC table");
2101 * Get VLAN default info from vlan match info.
2104 * the list of item specifications.
2106 * pointer VLAN info to fill to.
2109 * 0 on success, a negative errno value otherwise and rte_errno is set.
2112 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2113 struct rte_vlan_hdr *vlan)
2115 const struct rte_flow_item_vlan nic_mask = {
2116 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2117 MLX5DV_FLOW_VLAN_VID_MASK),
2118 .inner_type = RTE_BE16(0xffff),
2123 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2124 int type = items->type;
2126 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2127 type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2130 if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2131 const struct rte_flow_item_vlan *vlan_m = items->mask;
2132 const struct rte_flow_item_vlan *vlan_v = items->spec;
2134 /* If VLAN item in pattern doesn't contain data, return here. */
2139 /* Only full match values are accepted */
2140 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2141 MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2142 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2144 rte_be_to_cpu_16(vlan_v->tci &
2145 MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2147 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2148 MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2149 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2151 rte_be_to_cpu_16(vlan_v->tci &
2152 MLX5DV_FLOW_VLAN_VID_MASK_BE);
2154 if (vlan_m->inner_type == nic_mask.inner_type)
2155 vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2156 vlan_m->inner_type);
2161 * Validate the push VLAN action.
2164 * Pointer to the rte_eth_dev structure.
2165 * @param[in] action_flags
2166 * Holds the actions detected until now.
2167 * @param[in] item_flags
2168 * The items found in this flow rule.
2170 * Pointer to the action structure.
2172 * Pointer to flow attributes
2174 * Pointer to error structure.
2177 * 0 on success, a negative errno value otherwise and rte_errno is set.
2180 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2181 uint64_t action_flags,
2182 const struct rte_flow_item_vlan *vlan_m,
2183 const struct rte_flow_action *action,
2184 const struct rte_flow_attr *attr,
2185 struct rte_flow_error *error)
2187 const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2188 const struct mlx5_priv *priv = dev->data->dev_private;
2190 if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2191 push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2192 return rte_flow_error_set(error, EINVAL,
2193 RTE_FLOW_ERROR_TYPE_ACTION, action,
2194 "invalid vlan ethertype");
2195 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2196 return rte_flow_error_set(error, EINVAL,
2197 RTE_FLOW_ERROR_TYPE_ACTION, action,
2198 "wrong action order, port_id should "
2199 "be after push VLAN");
2200 if (!attr->transfer && priv->representor)
2201 return rte_flow_error_set(error, ENOTSUP,
2202 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2203 "push vlan action for VF representor "
2204 "not supported on NIC table");
2206 (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2207 (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2208 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2209 !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2210 !(mlx5_flow_find_action
2211 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2212 return rte_flow_error_set(error, EINVAL,
2213 RTE_FLOW_ERROR_TYPE_ACTION, action,
2214 "not full match mask on VLAN PCP and "
2215 "there is no of_set_vlan_pcp action, "
2216 "push VLAN action cannot figure out "
2219 (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2220 (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2221 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2222 !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2223 !(mlx5_flow_find_action
2224 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2225 return rte_flow_error_set(error, EINVAL,
2226 RTE_FLOW_ERROR_TYPE_ACTION, action,
2227 "not full match mask on VLAN VID and "
2228 "there is no of_set_vlan_vid action, "
2229 "push VLAN action cannot figure out "
2236 * Validate the set VLAN PCP.
2238 * @param[in] action_flags
2239 * Holds the actions detected until now.
2240 * @param[in] actions
2241 * Pointer to the list of actions remaining in the flow rule.
2243 * Pointer to error structure.
2246 * 0 on success, a negative errno value otherwise and rte_errno is set.
2249 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2250 const struct rte_flow_action actions[],
2251 struct rte_flow_error *error)
2253 const struct rte_flow_action *action = actions;
2254 const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2256 if (conf->vlan_pcp > 7)
2257 return rte_flow_error_set(error, EINVAL,
2258 RTE_FLOW_ERROR_TYPE_ACTION, action,
2259 "VLAN PCP value is too big");
2260 if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2261 return rte_flow_error_set(error, ENOTSUP,
2262 RTE_FLOW_ERROR_TYPE_ACTION, action,
2263 "set VLAN PCP action must follow "
2264 "the push VLAN action");
2265 if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2266 return rte_flow_error_set(error, ENOTSUP,
2267 RTE_FLOW_ERROR_TYPE_ACTION, action,
2268 "Multiple VLAN PCP modification are "
2270 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2271 return rte_flow_error_set(error, EINVAL,
2272 RTE_FLOW_ERROR_TYPE_ACTION, action,
2273 "wrong action order, port_id should "
2274 "be after set VLAN PCP");
2279 * Validate the set VLAN VID.
2281 * @param[in] item_flags
2282 * Holds the items detected in this rule.
2283 * @param[in] action_flags
2284 * Holds the actions detected until now.
2285 * @param[in] actions
2286 * Pointer to the list of actions remaining in the flow rule.
2288 * Pointer to error structure.
2291 * 0 on success, a negative errno value otherwise and rte_errno is set.
2294 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2295 uint64_t action_flags,
2296 const struct rte_flow_action actions[],
2297 struct rte_flow_error *error)
2299 const struct rte_flow_action *action = actions;
2300 const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2302 if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2303 return rte_flow_error_set(error, EINVAL,
2304 RTE_FLOW_ERROR_TYPE_ACTION, action,
2305 "VLAN VID value is too big");
2306 if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
2307 !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2308 return rte_flow_error_set(error, ENOTSUP,
2309 RTE_FLOW_ERROR_TYPE_ACTION, action,
2310 "set VLAN VID action must follow push"
2311 " VLAN action or match on VLAN item");
2312 if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
2313 return rte_flow_error_set(error, ENOTSUP,
2314 RTE_FLOW_ERROR_TYPE_ACTION, action,
2315 "Multiple VLAN VID modifications are "
2317 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2318 return rte_flow_error_set(error, EINVAL,
2319 RTE_FLOW_ERROR_TYPE_ACTION, action,
2320 "wrong action order, port_id should "
2321 "be after set VLAN VID");
2326 * Validate the FLAG action.
2329 * Pointer to the rte_eth_dev structure.
2330 * @param[in] action_flags
2331 * Holds the actions detected until now.
2333 * Pointer to flow attributes
2335 * Pointer to error structure.
2338 * 0 on success, a negative errno value otherwise and rte_errno is set.
2341 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
2342 uint64_t action_flags,
2343 const struct rte_flow_attr *attr,
2344 struct rte_flow_error *error)
2346 struct mlx5_priv *priv = dev->data->dev_private;
2347 struct mlx5_dev_config *config = &priv->config;
2350 /* Fall back if no extended metadata register support. */
2351 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2352 return mlx5_flow_validate_action_flag(action_flags, attr,
2354 /* Extensive metadata mode requires registers. */
2355 if (!mlx5_flow_ext_mreg_supported(dev))
2356 return rte_flow_error_set(error, ENOTSUP,
2357 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2358 "no metadata registers "
2359 "to support flag action");
2360 if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
2361 return rte_flow_error_set(error, ENOTSUP,
2362 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2363 "extended metadata register"
2364 " isn't available");
2365 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2368 MLX5_ASSERT(ret > 0);
2369 if (action_flags & MLX5_FLOW_ACTION_MARK)
2370 return rte_flow_error_set(error, EINVAL,
2371 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2372 "can't mark and flag in same flow");
2373 if (action_flags & MLX5_FLOW_ACTION_FLAG)
2374 return rte_flow_error_set(error, EINVAL,
2375 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2377 " actions in same flow");
2382 * Validate MARK action.
2385 * Pointer to the rte_eth_dev structure.
2387 * Pointer to action.
2388 * @param[in] action_flags
2389 * Holds the actions detected until now.
2391 * Pointer to flow attributes
2393 * Pointer to error structure.
2396 * 0 on success, a negative errno value otherwise and rte_errno is set.
2399 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
2400 const struct rte_flow_action *action,
2401 uint64_t action_flags,
2402 const struct rte_flow_attr *attr,
2403 struct rte_flow_error *error)
2405 struct mlx5_priv *priv = dev->data->dev_private;
2406 struct mlx5_dev_config *config = &priv->config;
2407 const struct rte_flow_action_mark *mark = action->conf;
2410 /* Fall back if no extended metadata register support. */
2411 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2412 return mlx5_flow_validate_action_mark(action, action_flags,
2414 /* Extensive metadata mode requires registers. */
2415 if (!mlx5_flow_ext_mreg_supported(dev))
2416 return rte_flow_error_set(error, ENOTSUP,
2417 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2418 "no metadata registers "
2419 "to support mark action");
2420 if (!priv->sh->dv_mark_mask)
2421 return rte_flow_error_set(error, ENOTSUP,
2422 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2423 "extended metadata register"
2424 " isn't available");
2425 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2428 MLX5_ASSERT(ret > 0);
2430 return rte_flow_error_set(error, EINVAL,
2431 RTE_FLOW_ERROR_TYPE_ACTION, action,
2432 "configuration cannot be null");
2433 if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
2434 return rte_flow_error_set(error, EINVAL,
2435 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2437 "mark id exceeds the limit");
2438 if (action_flags & MLX5_FLOW_ACTION_FLAG)
2439 return rte_flow_error_set(error, EINVAL,
2440 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2441 "can't flag and mark in same flow");
2442 if (action_flags & MLX5_FLOW_ACTION_MARK)
2443 return rte_flow_error_set(error, EINVAL,
2444 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2445 "can't have 2 mark actions in same"
2451 * Validate SET_META action.
2454 * Pointer to the rte_eth_dev structure.
2456 * Pointer to the action structure.
2457 * @param[in] action_flags
2458 * Holds the actions detected until now.
2460 * Pointer to flow attributes
2462 * Pointer to error structure.
2465 * 0 on success, a negative errno value otherwise and rte_errno is set.
2468 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
2469 const struct rte_flow_action *action,
2470 uint64_t action_flags __rte_unused,
2471 const struct rte_flow_attr *attr,
2472 struct rte_flow_error *error)
2474 const struct rte_flow_action_set_meta *conf;
2475 uint32_t nic_mask = UINT32_MAX;
2478 if (!mlx5_flow_ext_mreg_supported(dev))
2479 return rte_flow_error_set(error, ENOTSUP,
2480 RTE_FLOW_ERROR_TYPE_ACTION, action,
2481 "extended metadata register"
2482 " isn't supported");
2483 reg = flow_dv_get_metadata_reg(dev, attr, error);
2486 if (reg != REG_A && reg != REG_B) {
2487 struct mlx5_priv *priv = dev->data->dev_private;
2489 nic_mask = priv->sh->dv_meta_mask;
2491 if (!(action->conf))
2492 return rte_flow_error_set(error, EINVAL,
2493 RTE_FLOW_ERROR_TYPE_ACTION, action,
2494 "configuration cannot be null");
2495 conf = (const struct rte_flow_action_set_meta *)action->conf;
2497 return rte_flow_error_set(error, EINVAL,
2498 RTE_FLOW_ERROR_TYPE_ACTION, action,
2499 "zero mask doesn't have any effect");
2500 if (conf->mask & ~nic_mask)
2501 return rte_flow_error_set(error, EINVAL,
2502 RTE_FLOW_ERROR_TYPE_ACTION, action,
2503 "meta data must be within reg C0");
2508 * Validate SET_TAG action.
2511 * Pointer to the rte_eth_dev structure.
2513 * Pointer to the action structure.
2514 * @param[in] action_flags
2515 * Holds the actions detected until now.
2517 * Pointer to flow attributes
2519 * Pointer to error structure.
2522 * 0 on success, a negative errno value otherwise and rte_errno is set.
2525 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
2526 const struct rte_flow_action *action,
2527 uint64_t action_flags,
2528 const struct rte_flow_attr *attr,
2529 struct rte_flow_error *error)
2531 const struct rte_flow_action_set_tag *conf;
2532 const uint64_t terminal_action_flags =
2533 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
2534 MLX5_FLOW_ACTION_RSS;
2537 if (!mlx5_flow_ext_mreg_supported(dev))
2538 return rte_flow_error_set(error, ENOTSUP,
2539 RTE_FLOW_ERROR_TYPE_ACTION, action,
2540 "extensive metadata register"
2541 " isn't supported");
2542 if (!(action->conf))
2543 return rte_flow_error_set(error, EINVAL,
2544 RTE_FLOW_ERROR_TYPE_ACTION, action,
2545 "configuration cannot be null");
2546 conf = (const struct rte_flow_action_set_tag *)action->conf;
2548 return rte_flow_error_set(error, EINVAL,
2549 RTE_FLOW_ERROR_TYPE_ACTION, action,
2550 "zero mask doesn't have any effect");
2551 ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
2554 if (!attr->transfer && attr->ingress &&
2555 (action_flags & terminal_action_flags))
2556 return rte_flow_error_set(error, EINVAL,
2557 RTE_FLOW_ERROR_TYPE_ACTION, action,
2558 "set_tag has no effect"
2559 " with terminal actions");
2564 * Validate count action.
2567 * Pointer to rte_eth_dev structure.
2569 * Pointer to error structure.
2572 * 0 on success, a negative errno value otherwise and rte_errno is set.
2575 flow_dv_validate_action_count(struct rte_eth_dev *dev,
2576 struct rte_flow_error *error)
2578 struct mlx5_priv *priv = dev->data->dev_private;
2580 if (!priv->config.devx)
2582 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
2586 return rte_flow_error_set
2588 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2590 "count action not supported");
2594 * Validate the L2 encap action.
2597 * Pointer to the rte_eth_dev structure.
2598 * @param[in] action_flags
2599 * Holds the actions detected until now.
2601 * Pointer to the action structure.
2603 * Pointer to flow attributes.
2605 * Pointer to error structure.
2608 * 0 on success, a negative errno value otherwise and rte_errno is set.
2611 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
2612 uint64_t action_flags,
2613 const struct rte_flow_action *action,
2614 const struct rte_flow_attr *attr,
2615 struct rte_flow_error *error)
2617 const struct mlx5_priv *priv = dev->data->dev_private;
2619 if (!(action->conf))
2620 return rte_flow_error_set(error, EINVAL,
2621 RTE_FLOW_ERROR_TYPE_ACTION, action,
2622 "configuration cannot be null");
2623 if (action_flags & MLX5_FLOW_ACTION_ENCAP)
2624 return rte_flow_error_set(error, EINVAL,
2625 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2626 "can only have a single encap action "
2628 if (!attr->transfer && priv->representor)
2629 return rte_flow_error_set(error, ENOTSUP,
2630 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2631 "encap action for VF representor "
2632 "not supported on NIC table");
2637 * Validate a decap action.
2640 * Pointer to the rte_eth_dev structure.
2641 * @param[in] action_flags
2642 * Holds the actions detected until now.
2644 * Pointer to flow attributes
2646 * Pointer to error structure.
2649 * 0 on success, a negative errno value otherwise and rte_errno is set.
2652 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
2653 uint64_t action_flags,
2654 const struct rte_flow_attr *attr,
2655 struct rte_flow_error *error)
2657 const struct mlx5_priv *priv = dev->data->dev_private;
2659 if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
2660 !priv->config.decap_en)
2661 return rte_flow_error_set(error, ENOTSUP,
2662 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2663 "decap is not enabled");
2664 if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
2665 return rte_flow_error_set(error, ENOTSUP,
2666 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2668 MLX5_FLOW_ACTION_DECAP ? "can only "
2669 "have a single decap action" : "decap "
2670 "after encap is not supported");
2671 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
2672 return rte_flow_error_set(error, EINVAL,
2673 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2674 "can't have decap action after"
2677 return rte_flow_error_set(error, ENOTSUP,
2678 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2680 "decap action not supported for "
2682 if (!attr->transfer && priv->representor)
2683 return rte_flow_error_set(error, ENOTSUP,
2684 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2685 "decap action for VF representor "
2686 "not supported on NIC table");
2690 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
2693 * Validate the raw encap and decap actions.
2696 * Pointer to the rte_eth_dev structure.
2698 * Pointer to the decap action.
2700 * Pointer to the encap action.
2702 * Pointer to flow attributes
2703 * @param[in/out] action_flags
2704 * Holds the actions detected until now.
2705 * @param[out] actions_n
2706 * pointer to the number of actions counter.
2708 * Pointer to error structure.
2711 * 0 on success, a negative errno value otherwise and rte_errno is set.
2714 flow_dv_validate_action_raw_encap_decap
2715 (struct rte_eth_dev *dev,
2716 const struct rte_flow_action_raw_decap *decap,
2717 const struct rte_flow_action_raw_encap *encap,
2718 const struct rte_flow_attr *attr, uint64_t *action_flags,
2719 int *actions_n, struct rte_flow_error *error)
2721 const struct mlx5_priv *priv = dev->data->dev_private;
2724 if (encap && (!encap->size || !encap->data))
2725 return rte_flow_error_set(error, EINVAL,
2726 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2727 "raw encap data cannot be empty");
2728 if (decap && encap) {
2729 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
2730 encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
2733 else if (encap->size <=
2734 MLX5_ENCAPSULATION_DECISION_SIZE &&
2736 MLX5_ENCAPSULATION_DECISION_SIZE)
2739 else if (encap->size >
2740 MLX5_ENCAPSULATION_DECISION_SIZE &&
2742 MLX5_ENCAPSULATION_DECISION_SIZE)
2743 /* 2 L2 actions: encap and decap. */
2746 return rte_flow_error_set(error,
2748 RTE_FLOW_ERROR_TYPE_ACTION,
2749 NULL, "unsupported too small "
2750 "raw decap and too small raw "
2751 "encap combination");
2754 ret = flow_dv_validate_action_decap(dev, *action_flags, attr,
2758 *action_flags |= MLX5_FLOW_ACTION_DECAP;
2762 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
2763 return rte_flow_error_set(error, ENOTSUP,
2764 RTE_FLOW_ERROR_TYPE_ACTION,
2766 "small raw encap size");
2767 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
2768 return rte_flow_error_set(error, EINVAL,
2769 RTE_FLOW_ERROR_TYPE_ACTION,
2771 "more than one encap action");
2772 if (!attr->transfer && priv->representor)
2773 return rte_flow_error_set
2775 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2776 "encap action for VF representor "
2777 "not supported on NIC table");
2778 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
2785 * Match encap_decap resource.
2788 * Pointer to the hash list.
2790 * Pointer to exist resource entry object.
2792 * Key of the new entry.
2794 * Pointer to new encap_decap resource.
2797 * 0 on matching, none-zero otherwise.
2800 flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused,
2801 struct mlx5_hlist_entry *entry,
2802 uint64_t key __rte_unused, void *cb_ctx)
2804 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2805 struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
2806 struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2808 cache_resource = container_of(entry,
2809 struct mlx5_flow_dv_encap_decap_resource,
2811 if (resource->entry.key == cache_resource->entry.key &&
2812 resource->reformat_type == cache_resource->reformat_type &&
2813 resource->ft_type == cache_resource->ft_type &&
2814 resource->flags == cache_resource->flags &&
2815 resource->size == cache_resource->size &&
2816 !memcmp((const void *)resource->buf,
2817 (const void *)cache_resource->buf,
2824 * Allocate encap_decap resource.
2827 * Pointer to the hash list.
2829 * Pointer to exist resource entry object.
2831 * Pointer to new encap_decap resource.
2834 * 0 on matching, none-zero otherwise.
2836 struct mlx5_hlist_entry *
2837 flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
2838 uint64_t key __rte_unused,
2841 struct mlx5_dev_ctx_shared *sh = list->ctx;
2842 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2843 struct mlx5dv_dr_domain *domain;
2844 struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
2845 struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2849 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2850 domain = sh->fdb_domain;
2851 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
2852 domain = sh->rx_domain;
2854 domain = sh->tx_domain;
2855 /* Register new encap/decap resource. */
2856 cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
2858 if (!cache_resource) {
2859 rte_flow_error_set(ctx->error, ENOMEM,
2860 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2861 "cannot allocate resource memory");
2864 *cache_resource = *resource;
2865 cache_resource->idx = idx;
2866 ret = mlx5_flow_os_create_flow_action_packet_reformat
2867 (sh->ctx, domain, cache_resource,
2868 &cache_resource->action);
2870 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
2871 rte_flow_error_set(ctx->error, ENOMEM,
2872 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2873 NULL, "cannot create action");
2877 return &cache_resource->entry;
2881 * Find existing encap/decap resource or create and register a new one.
2883 * @param[in, out] dev
2884 * Pointer to rte_eth_dev structure.
2885 * @param[in, out] resource
2886 * Pointer to encap/decap resource.
2887 * @parm[in, out] dev_flow
2888 * Pointer to the dev_flow.
2890 * pointer to error structure.
2893 * 0 on success otherwise -errno and errno is set.
2896 flow_dv_encap_decap_resource_register
2897 (struct rte_eth_dev *dev,
2898 struct mlx5_flow_dv_encap_decap_resource *resource,
2899 struct mlx5_flow *dev_flow,
2900 struct rte_flow_error *error)
2902 struct mlx5_priv *priv = dev->data->dev_private;
2903 struct mlx5_dev_ctx_shared *sh = priv->sh;
2904 struct mlx5_hlist_entry *entry;
2905 union mlx5_flow_encap_decap_key encap_decap_key = {
2907 .ft_type = resource->ft_type,
2908 .refmt_type = resource->reformat_type,
2909 .buf_size = resource->size,
2910 .table_level = !!dev_flow->dv.group,
2914 struct mlx5_flow_cb_ctx ctx = {
2919 resource->flags = dev_flow->dv.group ? 0 : 1;
2920 encap_decap_key.cksum = __rte_raw_cksum(resource->buf,
2922 resource->entry.key = encap_decap_key.v64;
2923 entry = mlx5_hlist_register(sh->encaps_decaps, resource->entry.key,
2927 resource = container_of(entry, typeof(*resource), entry);
2928 dev_flow->dv.encap_decap = resource;
2929 dev_flow->handle->dvh.rix_encap_decap = resource->idx;
2934 * Find existing table jump resource or create and register a new one.
2936 * @param[in, out] dev
2937 * Pointer to rte_eth_dev structure.
2938 * @param[in, out] tbl
2939 * Pointer to flow table resource.
2940 * @parm[in, out] dev_flow
2941 * Pointer to the dev_flow.
2943 * pointer to error structure.
2946 * 0 on success otherwise -errno and errno is set.
2949 flow_dv_jump_tbl_resource_register
2950 (struct rte_eth_dev *dev __rte_unused,
2951 struct mlx5_flow_tbl_resource *tbl,
2952 struct mlx5_flow *dev_flow,
2953 struct rte_flow_error *error __rte_unused)
2955 struct mlx5_flow_tbl_data_entry *tbl_data =
2956 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
2959 MLX5_ASSERT(tbl_data->jump.action);
2960 dev_flow->handle->rix_jump = tbl_data->idx;
2961 dev_flow->dv.jump = &tbl_data->jump;
2966 flow_dv_port_id_match_cb(struct mlx5_cache_list *list __rte_unused,
2967 struct mlx5_cache_entry *entry, void *cb_ctx)
2969 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2970 struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
2971 struct mlx5_flow_dv_port_id_action_resource *res =
2972 container_of(entry, typeof(*res), entry);
2974 return ref->port_id != res->port_id;
2977 struct mlx5_cache_entry *
2978 flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
2979 struct mlx5_cache_entry *entry __rte_unused,
2982 struct mlx5_dev_ctx_shared *sh = list->ctx;
2983 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2984 struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
2985 struct mlx5_flow_dv_port_id_action_resource *cache;
2989 /* Register new port id action resource. */
2990 cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
2992 rte_flow_error_set(ctx->error, ENOMEM,
2993 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2994 "cannot allocate port_id action cache memory");
2998 ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3002 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3003 rte_flow_error_set(ctx->error, ENOMEM,
3004 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3005 "cannot create action");
3008 return &cache->entry;
3012 * Find existing table port ID resource or create and register a new one.
3014 * @param[in, out] dev
3015 * Pointer to rte_eth_dev structure.
3016 * @param[in, out] resource
3017 * Pointer to port ID action resource.
3018 * @parm[in, out] dev_flow
3019 * Pointer to the dev_flow.
3021 * pointer to error structure.
3024 * 0 on success otherwise -errno and errno is set.
3027 flow_dv_port_id_action_resource_register
3028 (struct rte_eth_dev *dev,
3029 struct mlx5_flow_dv_port_id_action_resource *resource,
3030 struct mlx5_flow *dev_flow,
3031 struct rte_flow_error *error)
3033 struct mlx5_priv *priv = dev->data->dev_private;
3034 struct mlx5_cache_entry *entry;
3035 struct mlx5_flow_dv_port_id_action_resource *cache;
3036 struct mlx5_flow_cb_ctx ctx = {
3041 entry = mlx5_cache_register(&priv->sh->port_id_action_list, &ctx);
3044 cache = container_of(entry, typeof(*cache), entry);
3045 dev_flow->dv.port_id_action = cache;
3046 dev_flow->handle->rix_port_id_action = cache->idx;
3051 * Find existing push vlan resource or create and register a new one.
3053 * @param [in, out] dev
3054 * Pointer to rte_eth_dev structure.
3055 * @param[in, out] resource
3056 * Pointer to port ID action resource.
3057 * @parm[in, out] dev_flow
3058 * Pointer to the dev_flow.
3060 * pointer to error structure.
3063 * 0 on success otherwise -errno and errno is set.
3066 flow_dv_push_vlan_action_resource_register
3067 (struct rte_eth_dev *dev,
3068 struct mlx5_flow_dv_push_vlan_action_resource *resource,
3069 struct mlx5_flow *dev_flow,
3070 struct rte_flow_error *error)
3072 struct mlx5_priv *priv = dev->data->dev_private;
3073 struct mlx5_dev_ctx_shared *sh = priv->sh;
3074 struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
3075 struct mlx5dv_dr_domain *domain;
3079 /* Lookup a matching resource from cache. */
3080 ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
3081 sh->push_vlan_action_list, idx, cache_resource, next) {
3082 if (resource->vlan_tag == cache_resource->vlan_tag &&
3083 resource->ft_type == cache_resource->ft_type) {
3084 DRV_LOG(DEBUG, "push-VLAN action resource resource %p: "
3086 (void *)cache_resource,
3087 __atomic_load_n(&cache_resource->refcnt,
3089 __atomic_fetch_add(&cache_resource->refcnt, 1,
3091 dev_flow->handle->dvh.rix_push_vlan = idx;
3092 dev_flow->dv.push_vlan_res = cache_resource;
3096 /* Register new push_vlan action resource. */
3097 cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
3098 &dev_flow->handle->dvh.rix_push_vlan);
3099 if (!cache_resource)
3100 return rte_flow_error_set(error, ENOMEM,
3101 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3102 "cannot allocate resource memory");
3103 *cache_resource = *resource;
3104 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3105 domain = sh->fdb_domain;
3106 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3107 domain = sh->rx_domain;
3109 domain = sh->tx_domain;
3110 ret = mlx5_flow_os_create_flow_action_push_vlan
3111 (domain, resource->vlan_tag,
3112 &cache_resource->action);
3114 mlx5_free(cache_resource);
3115 return rte_flow_error_set(error, ENOMEM,
3116 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3117 NULL, "cannot create action");
3119 __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED);
3120 ILIST_INSERT(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
3121 &sh->push_vlan_action_list,
3122 dev_flow->handle->dvh.rix_push_vlan,
3123 cache_resource, next);
3124 dev_flow->dv.push_vlan_res = cache_resource;
3125 DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
3126 (void *)cache_resource,
3127 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
3131 * Get the size of specific rte_flow_item_type hdr size
3133 * @param[in] item_type
3134 * Tested rte_flow_item_type.
3137 * sizeof struct item_type, 0 if void or irrelevant.
3140 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
3144 switch (item_type) {
3145 case RTE_FLOW_ITEM_TYPE_ETH:
3146 retval = sizeof(struct rte_ether_hdr);
3148 case RTE_FLOW_ITEM_TYPE_VLAN:
3149 retval = sizeof(struct rte_vlan_hdr);
3151 case RTE_FLOW_ITEM_TYPE_IPV4:
3152 retval = sizeof(struct rte_ipv4_hdr);
3154 case RTE_FLOW_ITEM_TYPE_IPV6:
3155 retval = sizeof(struct rte_ipv6_hdr);
3157 case RTE_FLOW_ITEM_TYPE_UDP:
3158 retval = sizeof(struct rte_udp_hdr);
3160 case RTE_FLOW_ITEM_TYPE_TCP:
3161 retval = sizeof(struct rte_tcp_hdr);
3163 case RTE_FLOW_ITEM_TYPE_VXLAN:
3164 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3165 retval = sizeof(struct rte_vxlan_hdr);
3167 case RTE_FLOW_ITEM_TYPE_GRE:
3168 case RTE_FLOW_ITEM_TYPE_NVGRE:
3169 retval = sizeof(struct rte_gre_hdr);
3171 case RTE_FLOW_ITEM_TYPE_MPLS:
3172 retval = sizeof(struct rte_mpls_hdr);
3174 case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
3182 #define MLX5_ENCAP_IPV4_VERSION 0x40
3183 #define MLX5_ENCAP_IPV4_IHL_MIN 0x05
3184 #define MLX5_ENCAP_IPV4_TTL_DEF 0x40
3185 #define MLX5_ENCAP_IPV6_VTC_FLOW 0x60000000
3186 #define MLX5_ENCAP_IPV6_HOP_LIMIT 0xff
3187 #define MLX5_ENCAP_VXLAN_FLAGS 0x08000000
3188 #define MLX5_ENCAP_VXLAN_GPE_FLAGS 0x04
3191 * Convert the encap action data from list of rte_flow_item to raw buffer
3194 * Pointer to rte_flow_item objects list.
3196 * Pointer to the output buffer.
3198 * Pointer to the output buffer size.
3200 * Pointer to the error structure.
3203 * 0 on success, a negative errno value otherwise and rte_errno is set.
3206 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
3207 size_t *size, struct rte_flow_error *error)
3209 struct rte_ether_hdr *eth = NULL;
3210 struct rte_vlan_hdr *vlan = NULL;
3211 struct rte_ipv4_hdr *ipv4 = NULL;
3212 struct rte_ipv6_hdr *ipv6 = NULL;
3213 struct rte_udp_hdr *udp = NULL;
3214 struct rte_vxlan_hdr *vxlan = NULL;
3215 struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
3216 struct rte_gre_hdr *gre = NULL;
3218 size_t temp_size = 0;
3221 return rte_flow_error_set(error, EINVAL,
3222 RTE_FLOW_ERROR_TYPE_ACTION,
3223 NULL, "invalid empty data");
3224 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3225 len = flow_dv_get_item_hdr_len(items->type);
3226 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
3227 return rte_flow_error_set(error, EINVAL,
3228 RTE_FLOW_ERROR_TYPE_ACTION,
3229 (void *)items->type,
3230 "items total size is too big"
3231 " for encap action");
3232 rte_memcpy((void *)&buf[temp_size], items->spec, len);
3233 switch (items->type) {
3234 case RTE_FLOW_ITEM_TYPE_ETH:
3235 eth = (struct rte_ether_hdr *)&buf[temp_size];
3237 case RTE_FLOW_ITEM_TYPE_VLAN:
3238 vlan = (struct rte_vlan_hdr *)&buf[temp_size];
3240 return rte_flow_error_set(error, EINVAL,
3241 RTE_FLOW_ERROR_TYPE_ACTION,
3242 (void *)items->type,
3243 "eth header not found");
3244 if (!eth->ether_type)
3245 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
3247 case RTE_FLOW_ITEM_TYPE_IPV4:
3248 ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
3250 return rte_flow_error_set(error, EINVAL,
3251 RTE_FLOW_ERROR_TYPE_ACTION,
3252 (void *)items->type,
3253 "neither eth nor vlan"
3255 if (vlan && !vlan->eth_proto)
3256 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3257 else if (eth && !eth->ether_type)
3258 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3259 if (!ipv4->version_ihl)
3260 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
3261 MLX5_ENCAP_IPV4_IHL_MIN;
3262 if (!ipv4->time_to_live)
3263 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
3265 case RTE_FLOW_ITEM_TYPE_IPV6:
3266 ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
3268 return rte_flow_error_set(error, EINVAL,
3269 RTE_FLOW_ERROR_TYPE_ACTION,
3270 (void *)items->type,
3271 "neither eth nor vlan"
3273 if (vlan && !vlan->eth_proto)
3274 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3275 else if (eth && !eth->ether_type)
3276 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3277 if (!ipv6->vtc_flow)
3279 RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
3280 if (!ipv6->hop_limits)
3281 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
3283 case RTE_FLOW_ITEM_TYPE_UDP:
3284 udp = (struct rte_udp_hdr *)&buf[temp_size];
3286 return rte_flow_error_set(error, EINVAL,
3287 RTE_FLOW_ERROR_TYPE_ACTION,
3288 (void *)items->type,
3289 "ip header not found");
3290 if (ipv4 && !ipv4->next_proto_id)
3291 ipv4->next_proto_id = IPPROTO_UDP;
3292 else if (ipv6 && !ipv6->proto)
3293 ipv6->proto = IPPROTO_UDP;
3295 case RTE_FLOW_ITEM_TYPE_VXLAN:
3296 vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
3298 return rte_flow_error_set(error, EINVAL,
3299 RTE_FLOW_ERROR_TYPE_ACTION,
3300 (void *)items->type,
3301 "udp header not found");
3303 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
3304 if (!vxlan->vx_flags)
3306 RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
3308 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3309 vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
3311 return rte_flow_error_set(error, EINVAL,
3312 RTE_FLOW_ERROR_TYPE_ACTION,
3313 (void *)items->type,
3314 "udp header not found");
3315 if (!vxlan_gpe->proto)
3316 return rte_flow_error_set(error, EINVAL,
3317 RTE_FLOW_ERROR_TYPE_ACTION,
3318 (void *)items->type,
3319 "next protocol not found");
3322 RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
3323 if (!vxlan_gpe->vx_flags)
3324 vxlan_gpe->vx_flags =
3325 MLX5_ENCAP_VXLAN_GPE_FLAGS;
3327 case RTE_FLOW_ITEM_TYPE_GRE:
3328 case RTE_FLOW_ITEM_TYPE_NVGRE:
3329 gre = (struct rte_gre_hdr *)&buf[temp_size];
3331 return rte_flow_error_set(error, EINVAL,
3332 RTE_FLOW_ERROR_TYPE_ACTION,
3333 (void *)items->type,
3334 "next protocol not found");
3336 return rte_flow_error_set(error, EINVAL,
3337 RTE_FLOW_ERROR_TYPE_ACTION,
3338 (void *)items->type,
3339 "ip header not found");
3340 if (ipv4 && !ipv4->next_proto_id)
3341 ipv4->next_proto_id = IPPROTO_GRE;
3342 else if (ipv6 && !ipv6->proto)
3343 ipv6->proto = IPPROTO_GRE;
3345 case RTE_FLOW_ITEM_TYPE_VOID:
3348 return rte_flow_error_set(error, EINVAL,
3349 RTE_FLOW_ERROR_TYPE_ACTION,
3350 (void *)items->type,
3351 "unsupported item type");
3361 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
3363 struct rte_ether_hdr *eth = NULL;
3364 struct rte_vlan_hdr *vlan = NULL;
3365 struct rte_ipv6_hdr *ipv6 = NULL;
3366 struct rte_udp_hdr *udp = NULL;
3370 eth = (struct rte_ether_hdr *)data;
3371 next_hdr = (char *)(eth + 1);
3372 proto = RTE_BE16(eth->ether_type);
3375 while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
3376 vlan = (struct rte_vlan_hdr *)next_hdr;
3377 proto = RTE_BE16(vlan->eth_proto);
3378 next_hdr += sizeof(struct rte_vlan_hdr);
3381 /* HW calculates IPv4 csum. no need to proceed */
3382 if (proto == RTE_ETHER_TYPE_IPV4)
3385 /* non IPv4/IPv6 header. not supported */
3386 if (proto != RTE_ETHER_TYPE_IPV6) {
3387 return rte_flow_error_set(error, ENOTSUP,
3388 RTE_FLOW_ERROR_TYPE_ACTION,
3389 NULL, "Cannot offload non IPv4/IPv6");
3392 ipv6 = (struct rte_ipv6_hdr *)next_hdr;
3394 /* ignore non UDP */
3395 if (ipv6->proto != IPPROTO_UDP)
3398 udp = (struct rte_udp_hdr *)(ipv6 + 1);
3399 udp->dgram_cksum = 0;
3405 * Convert L2 encap action to DV specification.
3408 * Pointer to rte_eth_dev structure.
3410 * Pointer to action structure.
3411 * @param[in, out] dev_flow
3412 * Pointer to the mlx5_flow.
3413 * @param[in] transfer
3414 * Mark if the flow is E-Switch flow.
3416 * Pointer to the error structure.
3419 * 0 on success, a negative errno value otherwise and rte_errno is set.
3422 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
3423 const struct rte_flow_action *action,
3424 struct mlx5_flow *dev_flow,
3426 struct rte_flow_error *error)
3428 const struct rte_flow_item *encap_data;
3429 const struct rte_flow_action_raw_encap *raw_encap_data;
3430 struct mlx5_flow_dv_encap_decap_resource res = {
3432 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
3433 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3434 MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
3437 if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3439 (const struct rte_flow_action_raw_encap *)action->conf;
3440 res.size = raw_encap_data->size;
3441 memcpy(res.buf, raw_encap_data->data, res.size);
3443 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
3445 ((const struct rte_flow_action_vxlan_encap *)
3446 action->conf)->definition;
3449 ((const struct rte_flow_action_nvgre_encap *)
3450 action->conf)->definition;
3451 if (flow_dv_convert_encap_data(encap_data, res.buf,
3455 if (flow_dv_zero_encap_udp_csum(res.buf, error))
3457 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3458 return rte_flow_error_set(error, EINVAL,
3459 RTE_FLOW_ERROR_TYPE_ACTION,
3460 NULL, "can't create L2 encap action");
3465 * Convert L2 decap action to DV specification.
3468 * Pointer to rte_eth_dev structure.
3469 * @param[in, out] dev_flow
3470 * Pointer to the mlx5_flow.
3471 * @param[in] transfer
3472 * Mark if the flow is E-Switch flow.
3474 * Pointer to the error structure.
3477 * 0 on success, a negative errno value otherwise and rte_errno is set.
3480 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
3481 struct mlx5_flow *dev_flow,
3483 struct rte_flow_error *error)
3485 struct mlx5_flow_dv_encap_decap_resource res = {
3488 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
3489 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3490 MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
3493 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3494 return rte_flow_error_set(error, EINVAL,
3495 RTE_FLOW_ERROR_TYPE_ACTION,
3496 NULL, "can't create L2 decap action");
3501 * Convert raw decap/encap (L3 tunnel) action to DV specification.
3504 * Pointer to rte_eth_dev structure.
3506 * Pointer to action structure.
3507 * @param[in, out] dev_flow
3508 * Pointer to the mlx5_flow.
3510 * Pointer to the flow attributes.
3512 * Pointer to the error structure.
3515 * 0 on success, a negative errno value otherwise and rte_errno is set.
3518 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
3519 const struct rte_flow_action *action,
3520 struct mlx5_flow *dev_flow,
3521 const struct rte_flow_attr *attr,
3522 struct rte_flow_error *error)
3524 const struct rte_flow_action_raw_encap *encap_data;
3525 struct mlx5_flow_dv_encap_decap_resource res;
3527 memset(&res, 0, sizeof(res));
3528 encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
3529 res.size = encap_data->size;
3530 memcpy(res.buf, encap_data->data, res.size);
3531 res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
3532 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
3533 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
3535 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3537 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3538 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3539 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3540 return rte_flow_error_set(error, EINVAL,
3541 RTE_FLOW_ERROR_TYPE_ACTION,
3542 NULL, "can't create encap action");
3547 * Create action push VLAN.
3550 * Pointer to rte_eth_dev structure.
3552 * Pointer to the flow attributes.
3554 * Pointer to the vlan to push to the Ethernet header.
3555 * @param[in, out] dev_flow
3556 * Pointer to the mlx5_flow.
3558 * Pointer to the error structure.
3561 * 0 on success, a negative errno value otherwise and rte_errno is set.
3564 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
3565 const struct rte_flow_attr *attr,
3566 const struct rte_vlan_hdr *vlan,
3567 struct mlx5_flow *dev_flow,
3568 struct rte_flow_error *error)
3570 struct mlx5_flow_dv_push_vlan_action_resource res;
3572 memset(&res, 0, sizeof(res));
3574 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
3577 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3579 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3580 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3581 return flow_dv_push_vlan_action_resource_register
3582 (dev, &res, dev_flow, error);
3585 static int fdb_mirror;
3588 * Validate the modify-header actions.
3590 * @param[in] action_flags
3591 * Holds the actions detected until now.
3593 * Pointer to the modify action.
3595 * Pointer to error structure.
3598 * 0 on success, a negative errno value otherwise and rte_errno is set.
3601 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
3602 const struct rte_flow_action *action,
3603 struct rte_flow_error *error)
3605 if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
3606 return rte_flow_error_set(error, EINVAL,
3607 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3608 NULL, "action configuration not set");
3609 if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3610 return rte_flow_error_set(error, EINVAL,
3611 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3612 "can't have encap action before"
3614 if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror)
3615 return rte_flow_error_set(error, EINVAL,
3616 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3617 "can't support sample action before"
3618 " modify action for E-Switch"
3624 * Validate the modify-header MAC address actions.
3626 * @param[in] action_flags
3627 * Holds the actions detected until now.
3629 * Pointer to the modify action.
3630 * @param[in] item_flags
3631 * Holds the items detected.
3633 * Pointer to error structure.
3636 * 0 on success, a negative errno value otherwise and rte_errno is set.
3639 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
3640 const struct rte_flow_action *action,
3641 const uint64_t item_flags,
3642 struct rte_flow_error *error)
3646 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3648 if (!(item_flags & MLX5_FLOW_LAYER_L2))
3649 return rte_flow_error_set(error, EINVAL,
3650 RTE_FLOW_ERROR_TYPE_ACTION,
3652 "no L2 item in pattern");
3658 * Validate the modify-header IPv4 address actions.
3660 * @param[in] action_flags
3661 * Holds the actions detected until now.
3663 * Pointer to the modify action.
3664 * @param[in] item_flags
3665 * Holds the items detected.
3667 * Pointer to error structure.
3670 * 0 on success, a negative errno value otherwise and rte_errno is set.
3673 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
3674 const struct rte_flow_action *action,
3675 const uint64_t item_flags,
3676 struct rte_flow_error *error)
3681 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3683 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3684 MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3685 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3686 if (!(item_flags & layer))
3687 return rte_flow_error_set(error, EINVAL,
3688 RTE_FLOW_ERROR_TYPE_ACTION,
3690 "no ipv4 item in pattern");
3696 * Validate the modify-header IPv6 address actions.
3698 * @param[in] action_flags
3699 * Holds the actions detected until now.
3701 * Pointer to the modify action.
3702 * @param[in] item_flags
3703 * Holds the items detected.
3705 * Pointer to error structure.
3708 * 0 on success, a negative errno value otherwise and rte_errno is set.
3711 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
3712 const struct rte_flow_action *action,
3713 const uint64_t item_flags,
3714 struct rte_flow_error *error)
3719 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3721 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3722 MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3723 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3724 if (!(item_flags & layer))
3725 return rte_flow_error_set(error, EINVAL,
3726 RTE_FLOW_ERROR_TYPE_ACTION,
3728 "no ipv6 item in pattern");
3734 * Validate the modify-header TP actions.
3736 * @param[in] action_flags
3737 * Holds the actions detected until now.
3739 * Pointer to the modify action.
3740 * @param[in] item_flags
3741 * Holds the items detected.
3743 * Pointer to error structure.
3746 * 0 on success, a negative errno value otherwise and rte_errno is set.
3749 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
3750 const struct rte_flow_action *action,
3751 const uint64_t item_flags,
3752 struct rte_flow_error *error)
3757 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3759 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3760 MLX5_FLOW_LAYER_INNER_L4 :
3761 MLX5_FLOW_LAYER_OUTER_L4;
3762 if (!(item_flags & layer))
3763 return rte_flow_error_set(error, EINVAL,
3764 RTE_FLOW_ERROR_TYPE_ACTION,
3765 NULL, "no transport layer "
3772 * Validate the modify-header actions of increment/decrement
3773 * TCP Sequence-number.
3775 * @param[in] action_flags
3776 * Holds the actions detected until now.
3778 * Pointer to the modify action.
3779 * @param[in] item_flags
3780 * Holds the items detected.
3782 * Pointer to error structure.
3785 * 0 on success, a negative errno value otherwise and rte_errno is set.
3788 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
3789 const struct rte_flow_action *action,
3790 const uint64_t item_flags,
3791 struct rte_flow_error *error)
3796 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3798 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3799 MLX5_FLOW_LAYER_INNER_L4_TCP :
3800 MLX5_FLOW_LAYER_OUTER_L4_TCP;
3801 if (!(item_flags & layer))
3802 return rte_flow_error_set(error, EINVAL,
3803 RTE_FLOW_ERROR_TYPE_ACTION,
3804 NULL, "no TCP item in"
3806 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
3807 (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
3808 (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
3809 (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
3810 return rte_flow_error_set(error, EINVAL,
3811 RTE_FLOW_ERROR_TYPE_ACTION,
3813 "cannot decrease and increase"
3814 " TCP sequence number"
3815 " at the same time");
3821 * Validate the modify-header actions of increment/decrement
3822 * TCP Acknowledgment number.
3824 * @param[in] action_flags
3825 * Holds the actions detected until now.
3827 * Pointer to the modify action.
3828 * @param[in] item_flags
3829 * Holds the items detected.
3831 * Pointer to error structure.
3834 * 0 on success, a negative errno value otherwise and rte_errno is set.
3837 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
3838 const struct rte_flow_action *action,
3839 const uint64_t item_flags,
3840 struct rte_flow_error *error)
3845 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3847 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3848 MLX5_FLOW_LAYER_INNER_L4_TCP :
3849 MLX5_FLOW_LAYER_OUTER_L4_TCP;
3850 if (!(item_flags & layer))
3851 return rte_flow_error_set(error, EINVAL,
3852 RTE_FLOW_ERROR_TYPE_ACTION,
3853 NULL, "no TCP item in"
3855 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
3856 (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
3857 (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
3858 (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
3859 return rte_flow_error_set(error, EINVAL,
3860 RTE_FLOW_ERROR_TYPE_ACTION,
3862 "cannot decrease and increase"
3863 " TCP acknowledgment number"
3864 " at the same time");
3870 * Validate the modify-header TTL actions.
3872 * @param[in] action_flags
3873 * Holds the actions detected until now.
3875 * Pointer to the modify action.
3876 * @param[in] item_flags
3877 * Holds the items detected.
3879 * Pointer to error structure.
3882 * 0 on success, a negative errno value otherwise and rte_errno is set.
3885 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
3886 const struct rte_flow_action *action,
3887 const uint64_t item_flags,
3888 struct rte_flow_error *error)
3893 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3895 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3896 MLX5_FLOW_LAYER_INNER_L3 :
3897 MLX5_FLOW_LAYER_OUTER_L3;
3898 if (!(item_flags & layer))
3899 return rte_flow_error_set(error, EINVAL,
3900 RTE_FLOW_ERROR_TYPE_ACTION,
3902 "no IP protocol in pattern");
3908 * Validate jump action.
3911 * Pointer to the jump action.
3912 * @param[in] action_flags
3913 * Holds the actions detected until now.
3914 * @param[in] attributes
3915 * Pointer to flow attributes
3916 * @param[in] external
3917 * Action belongs to flow rule created by request external to PMD.
3919 * Pointer to error structure.
3922 * 0 on success, a negative errno value otherwise and rte_errno is set.
3925 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
3926 const struct mlx5_flow_tunnel *tunnel,
3927 const struct rte_flow_action *action,
3928 uint64_t action_flags,
3929 const struct rte_flow_attr *attributes,
3930 bool external, struct rte_flow_error *error)
3932 uint32_t target_group, table;
3934 struct flow_grp_info grp_info = {
3935 .external = !!external,
3936 .transfer = !!attributes->transfer,
3940 if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
3941 MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3942 return rte_flow_error_set(error, EINVAL,
3943 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3944 "can't have 2 fate actions in"
3946 if (action_flags & MLX5_FLOW_ACTION_METER)
3947 return rte_flow_error_set(error, ENOTSUP,
3948 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3949 "jump with meter not support");
3950 if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror)
3951 return rte_flow_error_set(error, EINVAL,
3952 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3953 "E-Switch mirroring can't support"
3954 " Sample action and jump action in"
3957 return rte_flow_error_set(error, EINVAL,
3958 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3959 NULL, "action configuration not set");
3961 ((const struct rte_flow_action_jump *)action->conf)->group;
3962 ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
3966 if (attributes->group == target_group &&
3967 !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
3968 MLX5_FLOW_ACTION_TUNNEL_MATCH)))
3969 return rte_flow_error_set(error, EINVAL,
3970 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3971 "target group must be other than"
3972 " the current flow group");
3977 * Validate the port_id action.
3980 * Pointer to rte_eth_dev structure.
3981 * @param[in] action_flags
3982 * Bit-fields that holds the actions detected until now.
3984 * Port_id RTE action structure.
3986 * Attributes of flow that includes this action.
3988 * Pointer to error structure.
3991 * 0 on success, a negative errno value otherwise and rte_errno is set.
3994 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
3995 uint64_t action_flags,
3996 const struct rte_flow_action *action,
3997 const struct rte_flow_attr *attr,
3998 struct rte_flow_error *error)
4000 const struct rte_flow_action_port_id *port_id;
4001 struct mlx5_priv *act_priv;
4002 struct mlx5_priv *dev_priv;
4005 if (!attr->transfer)
4006 return rte_flow_error_set(error, ENOTSUP,
4007 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4009 "port id action is valid in transfer"
4011 if (!action || !action->conf)
4012 return rte_flow_error_set(error, ENOTSUP,
4013 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4015 "port id action parameters must be"
4017 if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4018 MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4019 return rte_flow_error_set(error, EINVAL,
4020 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4021 "can have only one fate actions in"
4023 dev_priv = mlx5_dev_to_eswitch_info(dev);
4025 return rte_flow_error_set(error, rte_errno,
4026 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4028 "failed to obtain E-Switch info");
4029 port_id = action->conf;
4030 port = port_id->original ? dev->data->port_id : port_id->id;
4031 act_priv = mlx5_port_to_eswitch_info(port, false);
4033 return rte_flow_error_set
4035 RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
4036 "failed to obtain E-Switch port id for port");
4037 if (act_priv->domain_id != dev_priv->domain_id)
4038 return rte_flow_error_set
4040 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4041 "port does not belong to"
4042 " E-Switch being configured");
4047 * Get the maximum number of modify header actions.
4050 * Pointer to rte_eth_dev structure.
4052 * Flags bits to check if root level.
4055 * Max number of modify header actions device can support.
4057 static inline unsigned int
4058 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
4062 * There's no way to directly query the max capacity from FW.
4063 * The maximal value on root table should be assumed to be supported.
4065 if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
4066 return MLX5_MAX_MODIFY_NUM;
4068 return MLX5_ROOT_TBL_MODIFY_NUM;
4072 * Validate the meter action.
4075 * Pointer to rte_eth_dev structure.
4076 * @param[in] action_flags
4077 * Bit-fields that holds the actions detected until now.
4079 * Pointer to the meter action.
4081 * Attributes of flow that includes this action.
4083 * Pointer to error structure.
4086 * 0 on success, a negative errno value otherwise and rte_ernno is set.
4089 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
4090 uint64_t action_flags,
4091 const struct rte_flow_action *action,
4092 const struct rte_flow_attr *attr,
4093 struct rte_flow_error *error)
4095 struct mlx5_priv *priv = dev->data->dev_private;
4096 const struct rte_flow_action_meter *am = action->conf;
4097 struct mlx5_flow_meter *fm;
4100 return rte_flow_error_set(error, EINVAL,
4101 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4102 "meter action conf is NULL");
4104 if (action_flags & MLX5_FLOW_ACTION_METER)
4105 return rte_flow_error_set(error, ENOTSUP,
4106 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4107 "meter chaining not support");
4108 if (action_flags & MLX5_FLOW_ACTION_JUMP)
4109 return rte_flow_error_set(error, ENOTSUP,
4110 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4111 "meter with jump not support");
4113 return rte_flow_error_set(error, ENOTSUP,
4114 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4116 "meter action not supported");
4117 fm = mlx5_flow_meter_find(priv, am->mtr_id);
4119 return rte_flow_error_set(error, EINVAL,
4120 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4122 if (fm->ref_cnt && (!(fm->transfer == attr->transfer ||
4123 (!fm->ingress && !attr->ingress && attr->egress) ||
4124 (!fm->egress && !attr->egress && attr->ingress))))
4125 return rte_flow_error_set(error, EINVAL,
4126 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4127 "Flow attributes are either invalid "
4128 "or have a conflict with current "
4129 "meter attributes");
4134 * Validate the age action.
4136 * @param[in] action_flags
4137 * Holds the actions detected until now.
4139 * Pointer to the age action.
4141 * Pointer to the Ethernet device structure.
4143 * Pointer to error structure.
4146 * 0 on success, a negative errno value otherwise and rte_errno is set.
4149 flow_dv_validate_action_age(uint64_t action_flags,
4150 const struct rte_flow_action *action,
4151 struct rte_eth_dev *dev,
4152 struct rte_flow_error *error)
4154 struct mlx5_priv *priv = dev->data->dev_private;
4155 const struct rte_flow_action_age *age = action->conf;
4157 if (!priv->config.devx || priv->sh->cmng.counter_fallback)
4158 return rte_flow_error_set(error, ENOTSUP,
4159 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4161 "age action not supported");
4162 if (!(action->conf))
4163 return rte_flow_error_set(error, EINVAL,
4164 RTE_FLOW_ERROR_TYPE_ACTION, action,
4165 "configuration cannot be null");
4166 if (!(age->timeout))
4167 return rte_flow_error_set(error, EINVAL,
4168 RTE_FLOW_ERROR_TYPE_ACTION, action,
4169 "invalid timeout value 0");
4170 if (action_flags & MLX5_FLOW_ACTION_AGE)
4171 return rte_flow_error_set(error, EINVAL,
4172 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4173 "duplicate age actions set");
4178 * Validate the modify-header IPv4 DSCP actions.
4180 * @param[in] action_flags
4181 * Holds the actions detected until now.
4183 * Pointer to the modify action.
4184 * @param[in] item_flags
4185 * Holds the items detected.
4187 * Pointer to error structure.
4190 * 0 on success, a negative errno value otherwise and rte_errno is set.
4193 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
4194 const struct rte_flow_action *action,
4195 const uint64_t item_flags,
4196 struct rte_flow_error *error)
4200 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4202 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
4203 return rte_flow_error_set(error, EINVAL,
4204 RTE_FLOW_ERROR_TYPE_ACTION,
4206 "no ipv4 item in pattern");
4212 * Validate the modify-header IPv6 DSCP actions.
4214 * @param[in] action_flags
4215 * Holds the actions detected until now.
4217 * Pointer to the modify action.
4218 * @param[in] item_flags
4219 * Holds the items detected.
4221 * Pointer to error structure.
4224 * 0 on success, a negative errno value otherwise and rte_errno is set.
4227 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
4228 const struct rte_flow_action *action,
4229 const uint64_t item_flags,
4230 struct rte_flow_error *error)
4234 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4236 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
4237 return rte_flow_error_set(error, EINVAL,
4238 RTE_FLOW_ERROR_TYPE_ACTION,
4240 "no ipv6 item in pattern");
4246 * Match modify-header resource.
4249 * Pointer to the hash list.
4251 * Pointer to exist resource entry object.
4253 * Key of the new entry.
4255 * Pointer to new modify-header resource.
4258 * 0 on matching, non-zero otherwise.
4261 flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused,
4262 struct mlx5_hlist_entry *entry,
4263 uint64_t key __rte_unused, void *cb_ctx)
4265 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4266 struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
4267 struct mlx5_flow_dv_modify_hdr_resource *resource =
4268 container_of(entry, typeof(*resource), entry);
4269 uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
4271 key_len += ref->actions_num * sizeof(ref->actions[0]);
4272 return ref->actions_num != resource->actions_num ||
4273 memcmp(&ref->ft_type, &resource->ft_type, key_len);
4276 struct mlx5_hlist_entry *
4277 flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused,
4280 struct mlx5_dev_ctx_shared *sh = list->ctx;
4281 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4282 struct mlx5dv_dr_domain *ns;
4283 struct mlx5_flow_dv_modify_hdr_resource *entry;
4284 struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
4286 uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
4287 uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
4289 entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0,
4292 rte_flow_error_set(ctx->error, ENOMEM,
4293 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4294 "cannot allocate resource memory");
4297 rte_memcpy(&entry->ft_type,
4298 RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
4299 key_len + data_len);
4300 if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
4301 ns = sh->fdb_domain;
4302 else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
4306 ret = mlx5_flow_os_create_flow_action_modify_header
4307 (sh->ctx, ns, entry,
4308 data_len, &entry->action);
4311 rte_flow_error_set(ctx->error, ENOMEM,
4312 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4313 NULL, "cannot create modification action");
4316 return &entry->entry;
4320 * Validate the sample action.
4322 * @param[in] action_flags
4323 * Holds the actions detected until now.
4325 * Pointer to the sample action.
4327 * Pointer to the Ethernet device structure.
4329 * Attributes of flow that includes this action.
4331 * Pointer to error structure.
4334 * 0 on success, a negative errno value otherwise and rte_errno is set.
4337 flow_dv_validate_action_sample(uint64_t action_flags,
4338 const struct rte_flow_action *action,
4339 struct rte_eth_dev *dev,
4340 const struct rte_flow_attr *attr,
4341 struct rte_flow_error *error)
4343 struct mlx5_priv *priv = dev->data->dev_private;
4344 struct mlx5_dev_config *dev_conf = &priv->config;
4345 const struct rte_flow_action_sample *sample = action->conf;
4346 const struct rte_flow_action *act;
4347 uint64_t sub_action_flags = 0;
4348 uint16_t queue_index = 0xFFFF;
4354 return rte_flow_error_set(error, EINVAL,
4355 RTE_FLOW_ERROR_TYPE_ACTION, action,
4356 "configuration cannot be NULL");
4357 if (sample->ratio == 0)
4358 return rte_flow_error_set(error, EINVAL,
4359 RTE_FLOW_ERROR_TYPE_ACTION, action,
4360 "ratio value starts from 1");
4361 if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
4362 return rte_flow_error_set(error, ENOTSUP,
4363 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4365 "sample action not supported");
4366 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
4367 return rte_flow_error_set(error, EINVAL,
4368 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4369 "Multiple sample actions not "
4371 if (action_flags & MLX5_FLOW_ACTION_METER)
4372 return rte_flow_error_set(error, EINVAL,
4373 RTE_FLOW_ERROR_TYPE_ACTION, action,
4374 "wrong action order, meter should "
4375 "be after sample action");
4376 if (action_flags & MLX5_FLOW_ACTION_JUMP)
4377 return rte_flow_error_set(error, EINVAL,
4378 RTE_FLOW_ERROR_TYPE_ACTION, action,
4379 "wrong action order, jump should "
4380 "be after sample action");
4381 act = sample->actions;
4382 for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
4383 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
4384 return rte_flow_error_set(error, ENOTSUP,
4385 RTE_FLOW_ERROR_TYPE_ACTION,
4386 act, "too many actions");
4387 switch (act->type) {
4388 case RTE_FLOW_ACTION_TYPE_QUEUE:
4389 ret = mlx5_flow_validate_action_queue(act,
4395 queue_index = ((const struct rte_flow_action_queue *)
4396 (act->conf))->index;
4397 sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
4400 case RTE_FLOW_ACTION_TYPE_MARK:
4401 ret = flow_dv_validate_action_mark(dev, act,
4406 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
4407 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
4408 MLX5_FLOW_ACTION_MARK_EXT;
4410 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
4413 case RTE_FLOW_ACTION_TYPE_COUNT:
4414 ret = flow_dv_validate_action_count(dev, error);
4417 sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
4420 case RTE_FLOW_ACTION_TYPE_PORT_ID:
4421 ret = flow_dv_validate_action_port_id(dev,
4428 sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
4431 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4432 ret = flow_dv_validate_action_raw_encap_decap
4433 (dev, NULL, act->conf, attr, &sub_action_flags,
4440 return rte_flow_error_set(error, ENOTSUP,
4441 RTE_FLOW_ERROR_TYPE_ACTION,
4443 "Doesn't support optional "
4447 if (attr->ingress && !attr->transfer) {
4448 if (!(sub_action_flags & MLX5_FLOW_ACTION_QUEUE))
4449 return rte_flow_error_set(error, EINVAL,
4450 RTE_FLOW_ERROR_TYPE_ACTION,
4452 "Ingress must has a dest "
4453 "QUEUE for Sample");
4454 } else if (attr->egress && !attr->transfer) {
4455 return rte_flow_error_set(error, ENOTSUP,
4456 RTE_FLOW_ERROR_TYPE_ACTION,
4458 "Sample Only support Ingress "
4460 } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
4461 MLX5_ASSERT(attr->transfer);
4462 if (sample->ratio > 1)
4463 return rte_flow_error_set(error, ENOTSUP,
4464 RTE_FLOW_ERROR_TYPE_ACTION,
4466 "E-Switch doesn't support "
4467 "any optional action "
4470 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
4471 return rte_flow_error_set(error, ENOTSUP,
4472 RTE_FLOW_ERROR_TYPE_ACTION,
4474 "unsupported action QUEUE");
4475 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
4476 return rte_flow_error_set(error, EINVAL,
4477 RTE_FLOW_ERROR_TYPE_ACTION,
4479 "E-Switch must has a dest "
4480 "port for mirroring");
4482 /* Continue validation for Xcap actions.*/
4483 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
4484 (queue_index == 0xFFFF ||
4485 mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
4486 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
4487 MLX5_FLOW_XCAP_ACTIONS)
4488 return rte_flow_error_set(error, ENOTSUP,
4489 RTE_FLOW_ERROR_TYPE_ACTION,
4490 NULL, "encap and decap "
4491 "combination aren't "
4493 if (!attr->transfer && attr->ingress && (sub_action_flags &
4494 MLX5_FLOW_ACTION_ENCAP))
4495 return rte_flow_error_set(error, ENOTSUP,
4496 RTE_FLOW_ERROR_TYPE_ACTION,
4497 NULL, "encap is not supported"
4498 " for ingress traffic");
4504 * Find existing modify-header resource or create and register a new one.
4506 * @param dev[in, out]
4507 * Pointer to rte_eth_dev structure.
4508 * @param[in, out] resource
4509 * Pointer to modify-header resource.
4510 * @parm[in, out] dev_flow
4511 * Pointer to the dev_flow.
4513 * pointer to error structure.
4516 * 0 on success otherwise -errno and errno is set.
4519 flow_dv_modify_hdr_resource_register
4520 (struct rte_eth_dev *dev,
4521 struct mlx5_flow_dv_modify_hdr_resource *resource,
4522 struct mlx5_flow *dev_flow,
4523 struct rte_flow_error *error)
4525 struct mlx5_priv *priv = dev->data->dev_private;
4526 struct mlx5_dev_ctx_shared *sh = priv->sh;
4527 uint32_t key_len = sizeof(*resource) -
4528 offsetof(typeof(*resource), ft_type) +
4529 resource->actions_num * sizeof(resource->actions[0]);
4530 struct mlx5_hlist_entry *entry;
4531 struct mlx5_flow_cb_ctx ctx = {
4536 resource->flags = dev_flow->dv.group ? 0 :
4537 MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
4538 if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
4540 return rte_flow_error_set(error, EOVERFLOW,
4541 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4542 "too many modify header items");
4543 resource->entry.key = __rte_raw_cksum(&resource->ft_type, key_len, 0);
4544 entry = mlx5_hlist_register(sh->modify_cmds, resource->entry.key, &ctx);
4547 resource = container_of(entry, typeof(*resource), entry);
4548 dev_flow->handle->dvh.modify_hdr = resource;
4553 * Get DV flow counter by index.
4556 * Pointer to the Ethernet device structure.
4558 * mlx5 flow counter index in the container.
4560 * mlx5 flow counter pool in the container,
4563 * Pointer to the counter, NULL otherwise.
4565 static struct mlx5_flow_counter *
4566 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
4568 struct mlx5_flow_counter_pool **ppool)
4570 struct mlx5_priv *priv = dev->data->dev_private;
4571 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4572 struct mlx5_flow_counter_pool *pool;
4574 /* Decrease to original index and clear shared bit. */
4575 idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
4576 MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
4577 pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
4581 return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
4585 * Check the devx counter belongs to the pool.
4588 * Pointer to the counter pool.
4590 * The counter devx ID.
4593 * True if counter belongs to the pool, false otherwise.
4596 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
4598 int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
4599 MLX5_COUNTERS_PER_POOL;
4601 if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
4607 * Get a pool by devx counter ID.
4610 * Pointer to the counter management.
4612 * The counter devx ID.
4615 * The counter pool pointer if exists, NULL otherwise,
4617 static struct mlx5_flow_counter_pool *
4618 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
4621 struct mlx5_flow_counter_pool *pool = NULL;
4623 rte_spinlock_lock(&cmng->pool_update_sl);
4624 /* Check last used pool. */
4625 if (cmng->last_pool_idx != POOL_IDX_INVALID &&
4626 flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
4627 pool = cmng->pools[cmng->last_pool_idx];
4630 /* ID out of range means no suitable pool in the container. */
4631 if (id > cmng->max_id || id < cmng->min_id)
4634 * Find the pool from the end of the container, since mostly counter
4635 * ID is sequence increasing, and the last pool should be the needed
4640 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
4642 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
4648 rte_spinlock_unlock(&cmng->pool_update_sl);
4653 * Resize a counter container.
4656 * Pointer to the Ethernet device structure.
4659 * 0 on success, otherwise negative errno value and rte_errno is set.
4662 flow_dv_container_resize(struct rte_eth_dev *dev)
4664 struct mlx5_priv *priv = dev->data->dev_private;
4665 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4666 void *old_pools = cmng->pools;
4667 uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
4668 uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
4669 void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
4676 memcpy(pools, old_pools, cmng->n *
4677 sizeof(struct mlx5_flow_counter_pool *));
4679 cmng->pools = pools;
4681 mlx5_free(old_pools);
4686 * Query a devx flow counter.
4689 * Pointer to the Ethernet device structure.
4691 * Index to the flow counter.
4693 * The statistics value of packets.
4695 * The statistics value of bytes.
4698 * 0 on success, otherwise a negative errno value and rte_errno is set.
4701 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
4704 struct mlx5_priv *priv = dev->data->dev_private;
4705 struct mlx5_flow_counter_pool *pool = NULL;
4706 struct mlx5_flow_counter *cnt;
4709 cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
4711 if (priv->sh->cmng.counter_fallback)
4712 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
4713 0, pkts, bytes, 0, NULL, NULL, 0);
4714 rte_spinlock_lock(&pool->sl);
4719 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
4720 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
4721 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
4723 rte_spinlock_unlock(&pool->sl);
4728 * Create and initialize a new counter pool.
4731 * Pointer to the Ethernet device structure.
4733 * The devX counter handle.
4735 * Whether the pool is for counter that was allocated for aging.
4736 * @param[in/out] cont_cur
4737 * Pointer to the container pointer, it will be update in pool resize.
4740 * The pool container pointer on success, NULL otherwise and rte_errno is set.
4742 static struct mlx5_flow_counter_pool *
4743 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
4746 struct mlx5_priv *priv = dev->data->dev_private;
4747 struct mlx5_flow_counter_pool *pool;
4748 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4749 bool fallback = priv->sh->cmng.counter_fallback;
4750 uint32_t size = sizeof(*pool);
4752 size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
4753 size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
4754 pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
4760 pool->is_aged = !!age;
4761 pool->query_gen = 0;
4762 pool->min_dcs = dcs;
4763 rte_spinlock_init(&pool->sl);
4764 rte_spinlock_init(&pool->csl);
4765 TAILQ_INIT(&pool->counters[0]);
4766 TAILQ_INIT(&pool->counters[1]);
4767 pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
4768 rte_spinlock_lock(&cmng->pool_update_sl);
4769 pool->index = cmng->n_valid;
4770 if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
4772 rte_spinlock_unlock(&cmng->pool_update_sl);
4775 cmng->pools[pool->index] = pool;
4777 if (unlikely(fallback)) {
4778 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
4780 if (base < cmng->min_id)
4781 cmng->min_id = base;
4782 if (base > cmng->max_id)
4783 cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
4784 cmng->last_pool_idx = pool->index;
4786 rte_spinlock_unlock(&cmng->pool_update_sl);
4791 * Prepare a new counter and/or a new counter pool.
4794 * Pointer to the Ethernet device structure.
4795 * @param[out] cnt_free
4796 * Where to put the pointer of a new counter.
4798 * Whether the pool is for counter that was allocated for aging.
4801 * The counter pool pointer and @p cnt_free is set on success,
4802 * NULL otherwise and rte_errno is set.
4804 static struct mlx5_flow_counter_pool *
4805 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
4806 struct mlx5_flow_counter **cnt_free,
4809 struct mlx5_priv *priv = dev->data->dev_private;
4810 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4811 struct mlx5_flow_counter_pool *pool;
4812 struct mlx5_counters tmp_tq;
4813 struct mlx5_devx_obj *dcs = NULL;
4814 struct mlx5_flow_counter *cnt;
4815 enum mlx5_counter_type cnt_type =
4816 age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
4817 bool fallback = priv->sh->cmng.counter_fallback;
4821 /* bulk_bitmap must be 0 for single counter allocation. */
4822 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
4825 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
4827 pool = flow_dv_pool_create(dev, dcs, age);
4829 mlx5_devx_cmd_destroy(dcs);
4833 i = dcs->id % MLX5_COUNTERS_PER_POOL;
4834 cnt = MLX5_POOL_GET_CNT(pool, i);
4836 cnt->dcs_when_free = dcs;
4840 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
4842 rte_errno = ENODATA;
4845 pool = flow_dv_pool_create(dev, dcs, age);
4847 mlx5_devx_cmd_destroy(dcs);
4850 TAILQ_INIT(&tmp_tq);
4851 for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
4852 cnt = MLX5_POOL_GET_CNT(pool, i);
4854 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
4856 rte_spinlock_lock(&cmng->csl[cnt_type]);
4857 TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
4858 rte_spinlock_unlock(&cmng->csl[cnt_type]);
4859 *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
4860 (*cnt_free)->pool = pool;
4865 * Allocate a flow counter.
4868 * Pointer to the Ethernet device structure.
4870 * Whether the counter was allocated for aging.
4873 * Index to flow counter on success, 0 otherwise and rte_errno is set.
4876 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
4878 struct mlx5_priv *priv = dev->data->dev_private;
4879 struct mlx5_flow_counter_pool *pool = NULL;
4880 struct mlx5_flow_counter *cnt_free = NULL;
4881 bool fallback = priv->sh->cmng.counter_fallback;
4882 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4883 enum mlx5_counter_type cnt_type =
4884 age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
4887 if (!priv->config.devx) {
4888 rte_errno = ENOTSUP;
4891 /* Get free counters from container. */
4892 rte_spinlock_lock(&cmng->csl[cnt_type]);
4893 cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
4895 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
4896 rte_spinlock_unlock(&cmng->csl[cnt_type]);
4897 if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
4899 pool = cnt_free->pool;
4901 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
4902 /* Create a DV counter action only in the first time usage. */
4903 if (!cnt_free->action) {
4905 struct mlx5_devx_obj *dcs;
4909 offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
4910 dcs = pool->min_dcs;
4913 dcs = cnt_free->dcs_when_free;
4915 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
4922 cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
4923 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
4924 /* Update the counter reset values. */
4925 if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
4928 if (!fallback && !priv->sh->cmng.query_thread_on)
4929 /* Start the asynchronous batch query by the host thread. */
4930 mlx5_set_query_alarm(priv->sh);
4934 cnt_free->pool = pool;
4936 cnt_free->dcs_when_free = cnt_free->dcs_when_active;
4937 rte_spinlock_lock(&cmng->csl[cnt_type]);
4938 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
4939 rte_spinlock_unlock(&cmng->csl[cnt_type]);
4945 * Allocate a shared flow counter.
4948 * Pointer to the shared counter configuration.
4950 * Pointer to save the allocated counter index.
4953 * Index to flow counter on success, 0 otherwise and rte_errno is set.
4957 flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
4959 struct mlx5_shared_counter_conf *conf = ctx;
4960 struct rte_eth_dev *dev = conf->dev;
4961 struct mlx5_flow_counter *cnt;
4963 data->dword = flow_dv_counter_alloc(dev, 0);
4964 data->dword |= MLX5_CNT_SHARED_OFFSET;
4965 cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
4966 cnt->shared_info.id = conf->id;
4971 * Get a shared flow counter.
4974 * Pointer to the Ethernet device structure.
4976 * Counter identifier.
4979 * Index to flow counter on success, 0 otherwise and rte_errno is set.
4982 flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
4984 struct mlx5_priv *priv = dev->data->dev_private;
4985 struct mlx5_shared_counter_conf conf = {
4989 union mlx5_l3t_data data = {
4993 mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
4994 flow_dv_counter_alloc_shared_cb, &conf);
4999 * Get age param from counter index.
5002 * Pointer to the Ethernet device structure.
5003 * @param[in] counter
5004 * Index to the counter handler.
5007 * The aging parameter specified for the counter index.
5009 static struct mlx5_age_param*
5010 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
5013 struct mlx5_flow_counter *cnt;
5014 struct mlx5_flow_counter_pool *pool = NULL;
5016 flow_dv_counter_get_by_idx(dev, counter, &pool);
5017 counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
5018 cnt = MLX5_POOL_GET_CNT(pool, counter);
5019 return MLX5_CNT_TO_AGE(cnt);
5023 * Remove a flow counter from aged counter list.
5026 * Pointer to the Ethernet device structure.
5027 * @param[in] counter
5028 * Index to the counter handler.
5030 * Pointer to the counter handler.
5033 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
5034 uint32_t counter, struct mlx5_flow_counter *cnt)
5036 struct mlx5_age_info *age_info;
5037 struct mlx5_age_param *age_param;
5038 struct mlx5_priv *priv = dev->data->dev_private;
5039 uint16_t expected = AGE_CANDIDATE;
5041 age_info = GET_PORT_AGE_INFO(priv);
5042 age_param = flow_dv_counter_idx_get_age(dev, counter);
5043 if (!__atomic_compare_exchange_n(&age_param->state, &expected,
5044 AGE_FREE, false, __ATOMIC_RELAXED,
5045 __ATOMIC_RELAXED)) {
5047 * We need the lock even it is age timeout,
5048 * since counter may still in process.
5050 rte_spinlock_lock(&age_info->aged_sl);
5051 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
5052 rte_spinlock_unlock(&age_info->aged_sl);
5053 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
5058 * Release a flow counter.
5061 * Pointer to the Ethernet device structure.
5062 * @param[in] counter
5063 * Index to the counter handler.
5066 flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter)
5068 struct mlx5_priv *priv = dev->data->dev_private;
5069 struct mlx5_flow_counter_pool *pool = NULL;
5070 struct mlx5_flow_counter *cnt;
5071 enum mlx5_counter_type cnt_type;
5075 cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5077 if (IS_SHARED_CNT(counter) &&
5078 mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id))
5081 flow_dv_counter_remove_from_age(dev, counter, cnt);
5084 * Put the counter back to list to be updated in none fallback mode.
5085 * Currently, we are using two list alternately, while one is in query,
5086 * add the freed counter to the other list based on the pool query_gen
5087 * value. After query finishes, add counter the list to the global
5088 * container counter list. The list changes while query starts. In
5089 * this case, lock will not be needed as query callback and release
5090 * function both operate with the different list.
5093 if (!priv->sh->cmng.counter_fallback) {
5094 rte_spinlock_lock(&pool->csl);
5095 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
5096 rte_spinlock_unlock(&pool->csl);
5098 cnt->dcs_when_free = cnt->dcs_when_active;
5099 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
5100 MLX5_COUNTER_TYPE_ORIGIN;
5101 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
5102 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
5104 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
5109 * Verify the @p attributes will be correctly understood by the NIC and store
5110 * them in the @p flow if everything is correct.
5113 * Pointer to dev struct.
5114 * @param[in] attributes
5115 * Pointer to flow attributes
5116 * @param[in] external
5117 * This flow rule is created by request external to PMD.
5119 * Pointer to error structure.
5122 * - 0 on success and non root table.
5123 * - 1 on success and root table.
5124 * - a negative errno value otherwise and rte_errno is set.
5127 flow_dv_validate_attributes(struct rte_eth_dev *dev,
5128 const struct mlx5_flow_tunnel *tunnel,
5129 const struct rte_flow_attr *attributes,
5130 struct flow_grp_info grp_info,
5131 struct rte_flow_error *error)
5133 struct mlx5_priv *priv = dev->data->dev_private;
5134 uint32_t priority_max = priv->config.flow_prio - 1;
5137 #ifndef HAVE_MLX5DV_DR
5138 RTE_SET_USED(tunnel);
5139 RTE_SET_USED(grp_info);
5140 if (attributes->group)
5141 return rte_flow_error_set(error, ENOTSUP,
5142 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
5144 "groups are not supported");
5148 ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
5153 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
5155 if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
5156 attributes->priority >= priority_max)
5157 return rte_flow_error_set(error, ENOTSUP,
5158 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
5160 "priority out of range");
5161 if (attributes->transfer) {
5162 if (!priv->config.dv_esw_en)
5163 return rte_flow_error_set
5165 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5166 "E-Switch dr is not supported");
5167 if (!(priv->representor || priv->master))
5168 return rte_flow_error_set
5169 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5170 NULL, "E-Switch configuration can only be"
5171 " done by a master or a representor device");
5172 if (attributes->egress)
5173 return rte_flow_error_set
5175 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
5176 "egress is not supported");
5178 if (!(attributes->egress ^ attributes->ingress))
5179 return rte_flow_error_set(error, ENOTSUP,
5180 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
5181 "must specify exactly one of "
5182 "ingress or egress");
5187 * Internal validation function. For validating both actions and items.
5190 * Pointer to the rte_eth_dev structure.
5192 * Pointer to the flow attributes.
5194 * Pointer to the list of items.
5195 * @param[in] actions
5196 * Pointer to the list of actions.
5197 * @param[in] external
5198 * This flow rule is created by request external to PMD.
5199 * @param[in] hairpin
5200 * Number of hairpin TX actions, 0 means classic flow.
5202 * Pointer to the error structure.
5205 * 0 on success, a negative errno value otherwise and rte_errno is set.
5208 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
5209 const struct rte_flow_item items[],
5210 const struct rte_flow_action actions[],
5211 bool external, int hairpin, struct rte_flow_error *error)
5214 uint64_t action_flags = 0;
5215 uint64_t item_flags = 0;
5216 uint64_t last_item = 0;
5217 uint8_t next_protocol = 0xff;
5218 uint16_t ether_type = 0;
5220 uint8_t item_ipv6_proto = 0;
5221 const struct rte_flow_item *gre_item = NULL;
5222 const struct rte_flow_action_raw_decap *decap;
5223 const struct rte_flow_action_raw_encap *encap;
5224 const struct rte_flow_action_rss *rss;
5225 const struct rte_flow_item_tcp nic_tcp_mask = {
5228 .src_port = RTE_BE16(UINT16_MAX),
5229 .dst_port = RTE_BE16(UINT16_MAX),
5232 const struct rte_flow_item_ipv6 nic_ipv6_mask = {
5235 "\xff\xff\xff\xff\xff\xff\xff\xff"
5236 "\xff\xff\xff\xff\xff\xff\xff\xff",
5238 "\xff\xff\xff\xff\xff\xff\xff\xff"
5239 "\xff\xff\xff\xff\xff\xff\xff\xff",
5240 .vtc_flow = RTE_BE32(0xffffffff),
5246 const struct rte_flow_item_ecpri nic_ecpri_mask = {
5250 RTE_BE32(((const struct rte_ecpri_common_hdr) {
5254 .dummy[0] = 0xffffffff,
5257 struct mlx5_priv *priv = dev->data->dev_private;
5258 struct mlx5_dev_config *dev_conf = &priv->config;
5259 uint16_t queue_index = 0xFFFF;
5260 const struct rte_flow_item_vlan *vlan_m = NULL;
5261 int16_t rw_act_num = 0;
5263 const struct mlx5_flow_tunnel *tunnel;
5264 struct flow_grp_info grp_info = {
5265 .external = !!external,
5266 .transfer = !!attr->transfer,
5267 .fdb_def_rule = !!priv->fdb_def_rule,
5269 const struct rte_eth_hairpin_conf *conf;
5273 if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
5274 tunnel = flow_items_to_tunnel(items);
5275 action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
5276 MLX5_FLOW_ACTION_DECAP;
5277 } else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) {
5278 tunnel = flow_actions_to_tunnel(actions);
5279 action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
5283 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
5284 (dev, tunnel, attr, items, actions);
5285 ret = flow_dv_validate_attributes(dev, tunnel, attr, grp_info, error);
5288 is_root = (uint64_t)ret;
5289 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
5290 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
5291 int type = items->type;
5293 if (!mlx5_flow_os_item_supported(type))
5294 return rte_flow_error_set(error, ENOTSUP,
5295 RTE_FLOW_ERROR_TYPE_ITEM,
5296 NULL, "item not supported");
5298 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
5299 if (items[0].type != (typeof(items[0].type))
5300 MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL)
5301 return rte_flow_error_set
5303 RTE_FLOW_ERROR_TYPE_ITEM,
5304 NULL, "MLX5 private items "
5305 "must be the first");
5307 case RTE_FLOW_ITEM_TYPE_VOID:
5309 case RTE_FLOW_ITEM_TYPE_PORT_ID:
5310 ret = flow_dv_validate_item_port_id
5311 (dev, items, attr, item_flags, error);
5314 last_item = MLX5_FLOW_ITEM_PORT_ID;
5316 case RTE_FLOW_ITEM_TYPE_ETH:
5317 ret = mlx5_flow_validate_item_eth(items, item_flags,
5321 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
5322 MLX5_FLOW_LAYER_OUTER_L2;
5323 if (items->mask != NULL && items->spec != NULL) {
5325 ((const struct rte_flow_item_eth *)
5328 ((const struct rte_flow_item_eth *)
5330 ether_type = rte_be_to_cpu_16(ether_type);
5335 case RTE_FLOW_ITEM_TYPE_VLAN:
5336 ret = flow_dv_validate_item_vlan(items, item_flags,
5340 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
5341 MLX5_FLOW_LAYER_OUTER_VLAN;
5342 if (items->mask != NULL && items->spec != NULL) {
5344 ((const struct rte_flow_item_vlan *)
5345 items->spec)->inner_type;
5347 ((const struct rte_flow_item_vlan *)
5348 items->mask)->inner_type;
5349 ether_type = rte_be_to_cpu_16(ether_type);
5353 /* Store outer VLAN mask for of_push_vlan action. */
5355 vlan_m = items->mask;
5357 case RTE_FLOW_ITEM_TYPE_IPV4:
5358 mlx5_flow_tunnel_ip_check(items, next_protocol,
5359 &item_flags, &tunnel);
5360 ret = flow_dv_validate_item_ipv4(items, item_flags,
5361 last_item, ether_type,
5365 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
5366 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
5367 if (items->mask != NULL &&
5368 ((const struct rte_flow_item_ipv4 *)
5369 items->mask)->hdr.next_proto_id) {
5371 ((const struct rte_flow_item_ipv4 *)
5372 (items->spec))->hdr.next_proto_id;
5374 ((const struct rte_flow_item_ipv4 *)
5375 (items->mask))->hdr.next_proto_id;
5377 /* Reset for inner layer. */
5378 next_protocol = 0xff;
5381 case RTE_FLOW_ITEM_TYPE_IPV6:
5382 mlx5_flow_tunnel_ip_check(items, next_protocol,
5383 &item_flags, &tunnel);
5384 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
5391 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
5392 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
5393 if (items->mask != NULL &&
5394 ((const struct rte_flow_item_ipv6 *)
5395 items->mask)->hdr.proto) {
5397 ((const struct rte_flow_item_ipv6 *)
5398 items->spec)->hdr.proto;
5400 ((const struct rte_flow_item_ipv6 *)
5401 items->spec)->hdr.proto;
5403 ((const struct rte_flow_item_ipv6 *)
5404 items->mask)->hdr.proto;
5406 /* Reset for inner layer. */
5407 next_protocol = 0xff;
5410 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
5411 ret = flow_dv_validate_item_ipv6_frag_ext(items,
5416 last_item = tunnel ?
5417 MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
5418 MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
5419 if (items->mask != NULL &&
5420 ((const struct rte_flow_item_ipv6_frag_ext *)
5421 items->mask)->hdr.next_header) {
5423 ((const struct rte_flow_item_ipv6_frag_ext *)
5424 items->spec)->hdr.next_header;
5426 ((const struct rte_flow_item_ipv6_frag_ext *)
5427 items->mask)->hdr.next_header;
5429 /* Reset for inner layer. */
5430 next_protocol = 0xff;
5433 case RTE_FLOW_ITEM_TYPE_TCP:
5434 ret = mlx5_flow_validate_item_tcp
5441 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
5442 MLX5_FLOW_LAYER_OUTER_L4_TCP;
5444 case RTE_FLOW_ITEM_TYPE_UDP:
5445 ret = mlx5_flow_validate_item_udp(items, item_flags,
5450 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
5451 MLX5_FLOW_LAYER_OUTER_L4_UDP;
5453 case RTE_FLOW_ITEM_TYPE_GRE:
5454 ret = mlx5_flow_validate_item_gre(items, item_flags,
5455 next_protocol, error);
5459 last_item = MLX5_FLOW_LAYER_GRE;
5461 case RTE_FLOW_ITEM_TYPE_NVGRE:
5462 ret = mlx5_flow_validate_item_nvgre(items, item_flags,
5467 last_item = MLX5_FLOW_LAYER_NVGRE;
5469 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
5470 ret = mlx5_flow_validate_item_gre_key
5471 (items, item_flags, gre_item, error);
5474 last_item = MLX5_FLOW_LAYER_GRE_KEY;
5476 case RTE_FLOW_ITEM_TYPE_VXLAN:
5477 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
5481 last_item = MLX5_FLOW_LAYER_VXLAN;
5483 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5484 ret = mlx5_flow_validate_item_vxlan_gpe(items,
5489 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
5491 case RTE_FLOW_ITEM_TYPE_GENEVE:
5492 ret = mlx5_flow_validate_item_geneve(items,
5497 last_item = MLX5_FLOW_LAYER_GENEVE;
5499 case RTE_FLOW_ITEM_TYPE_MPLS:
5500 ret = mlx5_flow_validate_item_mpls(dev, items,
5505 last_item = MLX5_FLOW_LAYER_MPLS;
5508 case RTE_FLOW_ITEM_TYPE_MARK:
5509 ret = flow_dv_validate_item_mark(dev, items, attr,
5513 last_item = MLX5_FLOW_ITEM_MARK;
5515 case RTE_FLOW_ITEM_TYPE_META:
5516 ret = flow_dv_validate_item_meta(dev, items, attr,
5520 last_item = MLX5_FLOW_ITEM_METADATA;
5522 case RTE_FLOW_ITEM_TYPE_ICMP:
5523 ret = mlx5_flow_validate_item_icmp(items, item_flags,
5528 last_item = MLX5_FLOW_LAYER_ICMP;
5530 case RTE_FLOW_ITEM_TYPE_ICMP6:
5531 ret = mlx5_flow_validate_item_icmp6(items, item_flags,
5536 item_ipv6_proto = IPPROTO_ICMPV6;
5537 last_item = MLX5_FLOW_LAYER_ICMP6;
5539 case RTE_FLOW_ITEM_TYPE_TAG:
5540 ret = flow_dv_validate_item_tag(dev, items,
5544 last_item = MLX5_FLOW_ITEM_TAG;
5546 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
5547 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
5549 case RTE_FLOW_ITEM_TYPE_GTP:
5550 ret = flow_dv_validate_item_gtp(dev, items, item_flags,
5554 last_item = MLX5_FLOW_LAYER_GTP;
5556 case RTE_FLOW_ITEM_TYPE_ECPRI:
5557 /* Capacity will be checked in the translate stage. */
5558 ret = mlx5_flow_validate_item_ecpri(items, item_flags,
5565 last_item = MLX5_FLOW_LAYER_ECPRI;
5568 return rte_flow_error_set(error, ENOTSUP,
5569 RTE_FLOW_ERROR_TYPE_ITEM,
5570 NULL, "item not supported");
5572 item_flags |= last_item;
5574 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5575 int type = actions->type;
5577 if (!mlx5_flow_os_action_supported(type))
5578 return rte_flow_error_set(error, ENOTSUP,
5579 RTE_FLOW_ERROR_TYPE_ACTION,
5581 "action not supported");
5582 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5583 return rte_flow_error_set(error, ENOTSUP,
5584 RTE_FLOW_ERROR_TYPE_ACTION,
5585 actions, "too many actions");
5587 case RTE_FLOW_ACTION_TYPE_VOID:
5589 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5590 ret = flow_dv_validate_action_port_id(dev,
5597 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5600 case RTE_FLOW_ACTION_TYPE_FLAG:
5601 ret = flow_dv_validate_action_flag(dev, action_flags,
5605 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5606 /* Count all modify-header actions as one. */
5607 if (!(action_flags &
5608 MLX5_FLOW_MODIFY_HDR_ACTIONS))
5610 action_flags |= MLX5_FLOW_ACTION_FLAG |
5611 MLX5_FLOW_ACTION_MARK_EXT;
5613 action_flags |= MLX5_FLOW_ACTION_FLAG;
5616 rw_act_num += MLX5_ACT_NUM_SET_MARK;
5618 case RTE_FLOW_ACTION_TYPE_MARK:
5619 ret = flow_dv_validate_action_mark(dev, actions,
5624 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5625 /* Count all modify-header actions as one. */
5626 if (!(action_flags &
5627 MLX5_FLOW_MODIFY_HDR_ACTIONS))
5629 action_flags |= MLX5_FLOW_ACTION_MARK |
5630 MLX5_FLOW_ACTION_MARK_EXT;
5632 action_flags |= MLX5_FLOW_ACTION_MARK;
5635 rw_act_num += MLX5_ACT_NUM_SET_MARK;
5637 case RTE_FLOW_ACTION_TYPE_SET_META:
5638 ret = flow_dv_validate_action_set_meta(dev, actions,
5643 /* Count all modify-header actions as one action. */
5644 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5646 action_flags |= MLX5_FLOW_ACTION_SET_META;
5647 rw_act_num += MLX5_ACT_NUM_SET_META;
5649 case RTE_FLOW_ACTION_TYPE_SET_TAG:
5650 ret = flow_dv_validate_action_set_tag(dev, actions,
5655 /* Count all modify-header actions as one action. */
5656 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5658 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
5659 rw_act_num += MLX5_ACT_NUM_SET_TAG;
5661 case RTE_FLOW_ACTION_TYPE_DROP:
5662 ret = mlx5_flow_validate_action_drop(action_flags,
5666 action_flags |= MLX5_FLOW_ACTION_DROP;
5669 case RTE_FLOW_ACTION_TYPE_QUEUE:
5670 ret = mlx5_flow_validate_action_queue(actions,
5675 queue_index = ((const struct rte_flow_action_queue *)
5676 (actions->conf))->index;
5677 action_flags |= MLX5_FLOW_ACTION_QUEUE;
5680 case RTE_FLOW_ACTION_TYPE_RSS:
5681 rss = actions->conf;
5682 ret = mlx5_flow_validate_action_rss(actions,
5688 if (rss != NULL && rss->queue_num)
5689 queue_index = rss->queue[0];
5690 action_flags |= MLX5_FLOW_ACTION_RSS;
5693 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
5695 mlx5_flow_validate_action_default_miss(action_flags,
5699 action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
5702 case RTE_FLOW_ACTION_TYPE_COUNT:
5703 ret = flow_dv_validate_action_count(dev, error);
5706 action_flags |= MLX5_FLOW_ACTION_COUNT;
5709 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
5710 if (flow_dv_validate_action_pop_vlan(dev,
5716 action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
5719 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
5720 ret = flow_dv_validate_action_push_vlan(dev,
5727 action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
5730 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
5731 ret = flow_dv_validate_action_set_vlan_pcp
5732 (action_flags, actions, error);
5735 /* Count PCP with push_vlan command. */
5736 action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
5738 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5739 ret = flow_dv_validate_action_set_vlan_vid
5740 (item_flags, action_flags,
5744 /* Count VID with push_vlan command. */
5745 action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
5746 rw_act_num += MLX5_ACT_NUM_MDF_VID;
5748 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5749 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5750 ret = flow_dv_validate_action_l2_encap(dev,
5756 action_flags |= MLX5_FLOW_ACTION_ENCAP;
5759 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
5760 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
5761 ret = flow_dv_validate_action_decap(dev, action_flags,
5765 action_flags |= MLX5_FLOW_ACTION_DECAP;
5768 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5769 ret = flow_dv_validate_action_raw_encap_decap
5770 (dev, NULL, actions->conf, attr, &action_flags,
5775 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5776 decap = actions->conf;
5777 while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
5779 if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
5783 encap = actions->conf;
5785 ret = flow_dv_validate_action_raw_encap_decap
5787 decap ? decap : &empty_decap, encap,
5788 attr, &action_flags, &actions_n,
5793 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
5794 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
5795 ret = flow_dv_validate_action_modify_mac(action_flags,
5801 /* Count all modify-header actions as one action. */
5802 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5804 action_flags |= actions->type ==
5805 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
5806 MLX5_FLOW_ACTION_SET_MAC_SRC :
5807 MLX5_FLOW_ACTION_SET_MAC_DST;
5809 * Even if the source and destination MAC addresses have
5810 * overlap in the header with 4B alignment, the convert
5811 * function will handle them separately and 4 SW actions
5812 * will be created. And 2 actions will be added each
5813 * time no matter how many bytes of address will be set.
5815 rw_act_num += MLX5_ACT_NUM_MDF_MAC;
5817 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
5818 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
5819 ret = flow_dv_validate_action_modify_ipv4(action_flags,
5825 /* Count all modify-header actions as one action. */
5826 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5828 action_flags |= actions->type ==
5829 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
5830 MLX5_FLOW_ACTION_SET_IPV4_SRC :
5831 MLX5_FLOW_ACTION_SET_IPV4_DST;
5832 rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
5834 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
5835 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
5836 ret = flow_dv_validate_action_modify_ipv6(action_flags,
5842 if (item_ipv6_proto == IPPROTO_ICMPV6)
5843 return rte_flow_error_set(error, ENOTSUP,
5844 RTE_FLOW_ERROR_TYPE_ACTION,
5846 "Can't change header "
5847 "with ICMPv6 proto");
5848 /* Count all modify-header actions as one action. */
5849 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5851 action_flags |= actions->type ==
5852 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
5853 MLX5_FLOW_ACTION_SET_IPV6_SRC :
5854 MLX5_FLOW_ACTION_SET_IPV6_DST;
5855 rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
5857 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
5858 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
5859 ret = flow_dv_validate_action_modify_tp(action_flags,
5865 /* Count all modify-header actions as one action. */
5866 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5868 action_flags |= actions->type ==
5869 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
5870 MLX5_FLOW_ACTION_SET_TP_SRC :
5871 MLX5_FLOW_ACTION_SET_TP_DST;
5872 rw_act_num += MLX5_ACT_NUM_MDF_PORT;
5874 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
5875 case RTE_FLOW_ACTION_TYPE_SET_TTL:
5876 ret = flow_dv_validate_action_modify_ttl(action_flags,
5882 /* Count all modify-header actions as one action. */
5883 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5885 action_flags |= actions->type ==
5886 RTE_FLOW_ACTION_TYPE_SET_TTL ?
5887 MLX5_FLOW_ACTION_SET_TTL :
5888 MLX5_FLOW_ACTION_DEC_TTL;
5889 rw_act_num += MLX5_ACT_NUM_MDF_TTL;
5891 case RTE_FLOW_ACTION_TYPE_JUMP:
5892 ret = flow_dv_validate_action_jump(dev, tunnel, actions,
5899 action_flags |= MLX5_FLOW_ACTION_JUMP;
5901 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
5902 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
5903 ret = flow_dv_validate_action_modify_tcp_seq
5910 /* Count all modify-header actions as one action. */
5911 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5913 action_flags |= actions->type ==
5914 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
5915 MLX5_FLOW_ACTION_INC_TCP_SEQ :
5916 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
5917 rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
5919 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
5920 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
5921 ret = flow_dv_validate_action_modify_tcp_ack
5928 /* Count all modify-header actions as one action. */
5929 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5931 action_flags |= actions->type ==
5932 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
5933 MLX5_FLOW_ACTION_INC_TCP_ACK :
5934 MLX5_FLOW_ACTION_DEC_TCP_ACK;
5935 rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
5937 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
5939 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
5940 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
5941 rw_act_num += MLX5_ACT_NUM_SET_TAG;
5943 case RTE_FLOW_ACTION_TYPE_METER:
5944 ret = mlx5_flow_validate_action_meter(dev,
5950 action_flags |= MLX5_FLOW_ACTION_METER;
5952 /* Meter action will add one more TAG action. */
5953 rw_act_num += MLX5_ACT_NUM_SET_TAG;
5955 case RTE_FLOW_ACTION_TYPE_AGE:
5956 ret = flow_dv_validate_action_age(action_flags,
5961 action_flags |= MLX5_FLOW_ACTION_AGE;
5964 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
5965 ret = flow_dv_validate_action_modify_ipv4_dscp
5972 /* Count all modify-header actions as one action. */
5973 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5975 action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
5976 rw_act_num += MLX5_ACT_NUM_SET_DSCP;
5978 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
5979 ret = flow_dv_validate_action_modify_ipv6_dscp
5986 /* Count all modify-header actions as one action. */
5987 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5989 action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
5990 rw_act_num += MLX5_ACT_NUM_SET_DSCP;
5992 case RTE_FLOW_ACTION_TYPE_SAMPLE:
5993 ret = flow_dv_validate_action_sample(action_flags,
5998 action_flags |= MLX5_FLOW_ACTION_SAMPLE;
6001 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
6002 if (actions[0].type != (typeof(actions[0].type))
6003 MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET)
6004 return rte_flow_error_set
6006 RTE_FLOW_ERROR_TYPE_ACTION,
6007 NULL, "MLX5 private action "
6008 "must be the first");
6010 action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6013 return rte_flow_error_set(error, ENOTSUP,
6014 RTE_FLOW_ERROR_TYPE_ACTION,
6016 "action not supported");
6020 * Validate actions in flow rules
6021 * - Explicit decap action is prohibited by the tunnel offload API.
6022 * - Drop action in tunnel steer rule is prohibited by the API.
6023 * - Application cannot use MARK action because it's value can mask
6024 * tunnel default miss nitification.
6025 * - JUMP in tunnel match rule has no support in current PMD
6027 * - TAG & META are reserved for future uses.
6029 if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
6030 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP |
6031 MLX5_FLOW_ACTION_MARK |
6032 MLX5_FLOW_ACTION_SET_TAG |
6033 MLX5_FLOW_ACTION_SET_META |
6034 MLX5_FLOW_ACTION_DROP;
6036 if (action_flags & bad_actions_mask)
6037 return rte_flow_error_set
6039 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6040 "Invalid RTE action in tunnel "
6042 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
6043 return rte_flow_error_set
6045 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6046 "tunnel set decap rule must terminate "
6049 return rte_flow_error_set
6051 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6052 "tunnel flows for ingress traffic only");
6054 if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
6055 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP |
6056 MLX5_FLOW_ACTION_MARK |
6057 MLX5_FLOW_ACTION_SET_TAG |
6058 MLX5_FLOW_ACTION_SET_META;
6060 if (action_flags & bad_actions_mask)
6061 return rte_flow_error_set
6063 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6064 "Invalid RTE action in tunnel "
6068 * Validate the drop action mutual exclusion with other actions.
6069 * Drop action is mutually-exclusive with any other action, except for
6072 if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
6073 (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
6074 return rte_flow_error_set(error, EINVAL,
6075 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6076 "Drop action is mutually-exclusive "
6077 "with any other action, except for "
6079 /* Eswitch has few restrictions on using items and actions */
6080 if (attr->transfer) {
6081 if (!mlx5_flow_ext_mreg_supported(dev) &&
6082 action_flags & MLX5_FLOW_ACTION_FLAG)
6083 return rte_flow_error_set(error, ENOTSUP,
6084 RTE_FLOW_ERROR_TYPE_ACTION,
6086 "unsupported action FLAG");
6087 if (!mlx5_flow_ext_mreg_supported(dev) &&
6088 action_flags & MLX5_FLOW_ACTION_MARK)
6089 return rte_flow_error_set(error, ENOTSUP,
6090 RTE_FLOW_ERROR_TYPE_ACTION,
6092 "unsupported action MARK");
6093 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
6094 return rte_flow_error_set(error, ENOTSUP,
6095 RTE_FLOW_ERROR_TYPE_ACTION,
6097 "unsupported action QUEUE");
6098 if (action_flags & MLX5_FLOW_ACTION_RSS)
6099 return rte_flow_error_set(error, ENOTSUP,
6100 RTE_FLOW_ERROR_TYPE_ACTION,
6102 "unsupported action RSS");
6103 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
6104 return rte_flow_error_set(error, EINVAL,
6105 RTE_FLOW_ERROR_TYPE_ACTION,
6107 "no fate action is found");
6109 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
6110 return rte_flow_error_set(error, EINVAL,
6111 RTE_FLOW_ERROR_TYPE_ACTION,
6113 "no fate action is found");
6116 * Continue validation for Xcap and VLAN actions.
6117 * If hairpin is working in explicit TX rule mode, there is no actions
6118 * splitting and the validation of hairpin ingress flow should be the
6119 * same as other standard flows.
6121 if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
6122 MLX5_FLOW_VLAN_ACTIONS)) &&
6123 (queue_index == 0xFFFF ||
6124 mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
6125 ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
6126 conf->tx_explicit != 0))) {
6127 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
6128 MLX5_FLOW_XCAP_ACTIONS)
6129 return rte_flow_error_set(error, ENOTSUP,
6130 RTE_FLOW_ERROR_TYPE_ACTION,
6131 NULL, "encap and decap "
6132 "combination aren't supported");
6133 if (!attr->transfer && attr->ingress) {
6134 if (action_flags & MLX5_FLOW_ACTION_ENCAP)
6135 return rte_flow_error_set
6137 RTE_FLOW_ERROR_TYPE_ACTION,
6138 NULL, "encap is not supported"
6139 " for ingress traffic");
6140 else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
6141 return rte_flow_error_set
6143 RTE_FLOW_ERROR_TYPE_ACTION,
6144 NULL, "push VLAN action not "
6145 "supported for ingress");
6146 else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
6147 MLX5_FLOW_VLAN_ACTIONS)
6148 return rte_flow_error_set
6150 RTE_FLOW_ERROR_TYPE_ACTION,
6151 NULL, "no support for "
6152 "multiple VLAN actions");
6156 * Hairpin flow will add one more TAG action in TX implicit mode.
6157 * In TX explicit mode, there will be no hairpin flow ID.
6160 rw_act_num += MLX5_ACT_NUM_SET_TAG;
6161 /* extra metadata enabled: one more TAG action will be add. */
6162 if (dev_conf->dv_flow_en &&
6163 dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
6164 mlx5_flow_ext_mreg_supported(dev))
6165 rw_act_num += MLX5_ACT_NUM_SET_TAG;
6166 if ((uint32_t)rw_act_num >
6167 flow_dv_modify_hdr_action_max(dev, is_root)) {
6168 return rte_flow_error_set(error, ENOTSUP,
6169 RTE_FLOW_ERROR_TYPE_ACTION,
6170 NULL, "too many header modify"
6171 " actions to support");
6177 * Internal preparation function. Allocates the DV flow size,
6178 * this size is constant.
6181 * Pointer to the rte_eth_dev structure.
6183 * Pointer to the flow attributes.
6185 * Pointer to the list of items.
6186 * @param[in] actions
6187 * Pointer to the list of actions.
6189 * Pointer to the error structure.
6192 * Pointer to mlx5_flow object on success,
6193 * otherwise NULL and rte_errno is set.
6195 static struct mlx5_flow *
6196 flow_dv_prepare(struct rte_eth_dev *dev,
6197 const struct rte_flow_attr *attr __rte_unused,
6198 const struct rte_flow_item items[] __rte_unused,
6199 const struct rte_flow_action actions[] __rte_unused,
6200 struct rte_flow_error *error)
6202 uint32_t handle_idx = 0;
6203 struct mlx5_flow *dev_flow;
6204 struct mlx5_flow_handle *dev_handle;
6205 struct mlx5_priv *priv = dev->data->dev_private;
6206 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
6209 /* In case of corrupting the memory. */
6210 if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
6211 rte_flow_error_set(error, ENOSPC,
6212 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6213 "not free temporary device flow");
6216 dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
6219 rte_flow_error_set(error, ENOMEM,
6220 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6221 "not enough memory to create flow handle");
6224 MLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows));
6225 dev_flow = &wks->flows[wks->flow_idx++];
6226 dev_flow->handle = dev_handle;
6227 dev_flow->handle_idx = handle_idx;
6229 * In some old rdma-core releases, before continuing, a check of the
6230 * length of matching parameter will be done at first. It needs to use
6231 * the length without misc4 param. If the flow has misc4 support, then
6232 * the length needs to be adjusted accordingly. Each param member is
6233 * aligned with a 64B boundary naturally.
6235 dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
6236 MLX5_ST_SZ_BYTES(fte_match_set_misc4);
6238 * The matching value needs to be cleared to 0 before using. In the
6239 * past, it will be automatically cleared when using rte_*alloc
6240 * API. The time consumption will be almost the same as before.
6242 memset(dev_flow->dv.value.buf, 0, MLX5_ST_SZ_BYTES(fte_match_param));
6243 dev_flow->ingress = attr->ingress;
6244 dev_flow->dv.transfer = attr->transfer;
6248 #ifdef RTE_LIBRTE_MLX5_DEBUG
6250 * Sanity check for match mask and value. Similar to check_valid_spec() in
6251 * kernel driver. If unmasked bit is present in value, it returns failure.
6254 * pointer to match mask buffer.
6255 * @param match_value
6256 * pointer to match value buffer.
6259 * 0 if valid, -EINVAL otherwise.
6262 flow_dv_check_valid_spec(void *match_mask, void *match_value)
6264 uint8_t *m = match_mask;
6265 uint8_t *v = match_value;
6268 for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
6271 "match_value differs from match_criteria"
6272 " %p[%u] != %p[%u]",
6273 match_value, i, match_mask, i);
6282 * Add match of ip_version.
6286 * @param[in] headers_v
6287 * Values header pointer.
6288 * @param[in] headers_m
6289 * Masks header pointer.
6290 * @param[in] ip_version
6291 * The IP version to set.
6294 flow_dv_set_match_ip_version(uint32_t group,
6300 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
6302 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
6304 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
6305 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
6306 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
6310 * Add Ethernet item to matcher and to the value.
6312 * @param[in, out] matcher
6314 * @param[in, out] key
6315 * Flow matcher value.
6317 * Flow pattern to translate.
6319 * Item is inner pattern.
6322 flow_dv_translate_item_eth(void *matcher, void *key,
6323 const struct rte_flow_item *item, int inner,
6326 const struct rte_flow_item_eth *eth_m = item->mask;
6327 const struct rte_flow_item_eth *eth_v = item->spec;
6328 const struct rte_flow_item_eth nic_mask = {
6329 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
6330 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
6331 .type = RTE_BE16(0xffff),
6344 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6346 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6348 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6350 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6352 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
6353 ð_m->dst, sizeof(eth_m->dst));
6354 /* The value must be in the range of the mask. */
6355 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
6356 for (i = 0; i < sizeof(eth_m->dst); ++i)
6357 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
6358 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
6359 ð_m->src, sizeof(eth_m->src));
6360 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
6361 /* The value must be in the range of the mask. */
6362 for (i = 0; i < sizeof(eth_m->dst); ++i)
6363 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
6365 * HW supports match on one Ethertype, the Ethertype following the last
6366 * VLAN tag of the packet (see PRM).
6367 * Set match on ethertype only if ETH header is not followed by VLAN.
6368 * HW is optimized for IPv4/IPv6. In such cases, avoid setting
6369 * ethertype, and use ip_version field instead.
6370 * eCPRI over Ether layer will use type value 0xAEFE.
6372 if (eth_m->type == 0xFFFF) {
6373 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
6374 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6375 switch (eth_v->type) {
6376 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
6377 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6379 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
6380 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6381 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6383 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
6384 flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
6386 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
6387 flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
6393 if (eth_m->has_vlan) {
6394 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6395 if (eth_v->has_vlan) {
6397 * Here, when also has_more_vlan field in VLAN item is
6398 * not set, only single-tagged packets will be matched.
6400 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6404 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
6405 rte_be_to_cpu_16(eth_m->type));
6406 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
6407 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
6411 * Add VLAN item to matcher and to the value.
6413 * @param[in, out] dev_flow
6415 * @param[in, out] matcher
6417 * @param[in, out] key
6418 * Flow matcher value.
6420 * Flow pattern to translate.
6422 * Item is inner pattern.
6425 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
6426 void *matcher, void *key,
6427 const struct rte_flow_item *item,
6428 int inner, uint32_t group)
6430 const struct rte_flow_item_vlan *vlan_m = item->mask;
6431 const struct rte_flow_item_vlan *vlan_v = item->spec;
6438 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6440 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6442 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6444 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6446 * This is workaround, masks are not supported,
6447 * and pre-validated.
6450 dev_flow->handle->vf_vlan.tag =
6451 rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
6454 * When VLAN item exists in flow, mark packet as tagged,
6455 * even if TCI is not specified.
6457 if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
6458 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6459 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6464 vlan_m = &rte_flow_item_vlan_mask;
6465 tci_m = rte_be_to_cpu_16(vlan_m->tci);
6466 tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
6467 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
6468 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
6469 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
6470 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
6471 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
6472 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
6474 * HW is optimized for IPv4/IPv6. In such cases, avoid setting
6475 * ethertype, and use ip_version field instead.
6477 if (vlan_m->inner_type == 0xFFFF) {
6478 switch (vlan_v->inner_type) {
6479 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
6480 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6481 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6482 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
6484 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
6485 flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
6487 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
6488 flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
6494 if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
6495 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6496 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6497 /* Only one vlan_tag bit can be set. */
6498 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
6501 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
6502 rte_be_to_cpu_16(vlan_m->inner_type));
6503 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
6504 rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
6508 * Add IPV4 item to matcher and to the value.
6510 * @param[in, out] matcher
6512 * @param[in, out] key
6513 * Flow matcher value.
6515 * Flow pattern to translate.
6517 * Item is inner pattern.
6519 * The group to insert the rule.
6522 flow_dv_translate_item_ipv4(void *matcher, void *key,
6523 const struct rte_flow_item *item,
6524 int inner, uint32_t group)
6526 const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
6527 const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
6528 const struct rte_flow_item_ipv4 nic_mask = {
6530 .src_addr = RTE_BE32(0xffffffff),
6531 .dst_addr = RTE_BE32(0xffffffff),
6532 .type_of_service = 0xff,
6533 .next_proto_id = 0xff,
6534 .time_to_live = 0xff,
6544 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6546 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6548 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6550 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6552 flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
6557 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6558 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6559 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6560 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6561 *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
6562 *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
6563 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6564 src_ipv4_src_ipv6.ipv4_layout.ipv4);
6565 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6566 src_ipv4_src_ipv6.ipv4_layout.ipv4);
6567 *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
6568 *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
6569 tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
6570 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
6571 ipv4_m->hdr.type_of_service);
6572 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
6573 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
6574 ipv4_m->hdr.type_of_service >> 2);
6575 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
6576 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6577 ipv4_m->hdr.next_proto_id);
6578 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6579 ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
6580 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6581 ipv4_m->hdr.time_to_live);
6582 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6583 ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
6584 MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
6585 !!(ipv4_m->hdr.fragment_offset));
6586 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
6587 !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
6591 * Add IPV6 item to matcher and to the value.
6593 * @param[in, out] matcher
6595 * @param[in, out] key
6596 * Flow matcher value.
6598 * Flow pattern to translate.
6600 * Item is inner pattern.
6602 * The group to insert the rule.
6605 flow_dv_translate_item_ipv6(void *matcher, void *key,
6606 const struct rte_flow_item *item,
6607 int inner, uint32_t group)
6609 const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
6610 const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
6611 const struct rte_flow_item_ipv6 nic_mask = {
6614 "\xff\xff\xff\xff\xff\xff\xff\xff"
6615 "\xff\xff\xff\xff\xff\xff\xff\xff",
6617 "\xff\xff\xff\xff\xff\xff\xff\xff"
6618 "\xff\xff\xff\xff\xff\xff\xff\xff",
6619 .vtc_flow = RTE_BE32(0xffffffff),
6626 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6627 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6636 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6638 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6640 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6642 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6644 flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
6649 size = sizeof(ipv6_m->hdr.dst_addr);
6650 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6651 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6652 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6653 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6654 memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
6655 for (i = 0; i < size; ++i)
6656 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
6657 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6658 src_ipv4_src_ipv6.ipv6_layout.ipv6);
6659 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6660 src_ipv4_src_ipv6.ipv6_layout.ipv6);
6661 memcpy(l24_m, ipv6_m->hdr.src_addr, size);
6662 for (i = 0; i < size; ++i)
6663 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
6665 vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
6666 vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
6667 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
6668 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
6669 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
6670 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
6673 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
6675 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
6678 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
6680 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
6684 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6686 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6687 ipv6_v->hdr.proto & ipv6_m->hdr.proto);
6689 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6690 ipv6_m->hdr.hop_limits);
6691 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6692 ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
6693 MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
6694 !!(ipv6_m->has_frag_ext));
6695 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
6696 !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
6700 * Add IPV6 fragment extension item to matcher and to the value.
6702 * @param[in, out] matcher
6704 * @param[in, out] key
6705 * Flow matcher value.
6707 * Flow pattern to translate.
6709 * Item is inner pattern.
6712 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
6713 const struct rte_flow_item *item,
6716 const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
6717 const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
6718 const struct rte_flow_item_ipv6_frag_ext nic_mask = {
6720 .next_header = 0xff,
6721 .frag_data = RTE_BE16(0xffff),
6728 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6730 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6732 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6734 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6736 /* IPv6 fragment extension item exists, so packet is IP fragment. */
6737 MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
6738 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
6739 if (!ipv6_frag_ext_v)
6741 if (!ipv6_frag_ext_m)
6742 ipv6_frag_ext_m = &nic_mask;
6743 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6744 ipv6_frag_ext_m->hdr.next_header);
6745 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6746 ipv6_frag_ext_v->hdr.next_header &
6747 ipv6_frag_ext_m->hdr.next_header);
6751 * Add TCP item to matcher and to the value.
6753 * @param[in, out] matcher
6755 * @param[in, out] key
6756 * Flow matcher value.
6758 * Flow pattern to translate.
6760 * Item is inner pattern.
6763 flow_dv_translate_item_tcp(void *matcher, void *key,
6764 const struct rte_flow_item *item,
6767 const struct rte_flow_item_tcp *tcp_m = item->mask;
6768 const struct rte_flow_item_tcp *tcp_v = item->spec;
6773 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6775 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6777 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6779 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6781 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6782 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
6786 tcp_m = &rte_flow_item_tcp_mask;
6787 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
6788 rte_be_to_cpu_16(tcp_m->hdr.src_port));
6789 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
6790 rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
6791 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
6792 rte_be_to_cpu_16(tcp_m->hdr.dst_port));
6793 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
6794 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
6795 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
6796 tcp_m->hdr.tcp_flags);
6797 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
6798 (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
6802 * Add UDP item to matcher and to the value.
6804 * @param[in, out] matcher
6806 * @param[in, out] key
6807 * Flow matcher value.
6809 * Flow pattern to translate.
6811 * Item is inner pattern.
6814 flow_dv_translate_item_udp(void *matcher, void *key,
6815 const struct rte_flow_item *item,
6818 const struct rte_flow_item_udp *udp_m = item->mask;
6819 const struct rte_flow_item_udp *udp_v = item->spec;
6824 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6826 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6828 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6830 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6832 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6833 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
6837 udp_m = &rte_flow_item_udp_mask;
6838 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
6839 rte_be_to_cpu_16(udp_m->hdr.src_port));
6840 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
6841 rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
6842 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
6843 rte_be_to_cpu_16(udp_m->hdr.dst_port));
6844 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
6845 rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
6849 * Add GRE optional Key item to matcher and to the value.
6851 * @param[in, out] matcher
6853 * @param[in, out] key
6854 * Flow matcher value.
6856 * Flow pattern to translate.
6858 * Item is inner pattern.
6861 flow_dv_translate_item_gre_key(void *matcher, void *key,
6862 const struct rte_flow_item *item)
6864 const rte_be32_t *key_m = item->mask;
6865 const rte_be32_t *key_v = item->spec;
6866 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6867 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6868 rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
6870 /* GRE K bit must be on and should already be validated */
6871 MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
6872 MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
6876 key_m = &gre_key_default_mask;
6877 MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
6878 rte_be_to_cpu_32(*key_m) >> 8);
6879 MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
6880 rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
6881 MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
6882 rte_be_to_cpu_32(*key_m) & 0xFF);
6883 MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
6884 rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
6888 * Add GRE item to matcher and to the value.
6890 * @param[in, out] matcher
6892 * @param[in, out] key
6893 * Flow matcher value.
6895 * Flow pattern to translate.
6897 * Item is inner pattern.
6900 flow_dv_translate_item_gre(void *matcher, void *key,
6901 const struct rte_flow_item *item,
6904 const struct rte_flow_item_gre *gre_m = item->mask;
6905 const struct rte_flow_item_gre *gre_v = item->spec;
6908 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6909 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6916 uint16_t s_present:1;
6917 uint16_t k_present:1;
6918 uint16_t rsvd_bit1:1;
6919 uint16_t c_present:1;
6923 } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
6926 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6928 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6930 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6932 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6934 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6935 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
6939 gre_m = &rte_flow_item_gre_mask;
6940 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
6941 rte_be_to_cpu_16(gre_m->protocol));
6942 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
6943 rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
6944 gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
6945 gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
6946 MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
6947 gre_crks_rsvd0_ver_m.c_present);
6948 MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
6949 gre_crks_rsvd0_ver_v.c_present &
6950 gre_crks_rsvd0_ver_m.c_present);
6951 MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
6952 gre_crks_rsvd0_ver_m.k_present);
6953 MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
6954 gre_crks_rsvd0_ver_v.k_present &
6955 gre_crks_rsvd0_ver_m.k_present);
6956 MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
6957 gre_crks_rsvd0_ver_m.s_present);
6958 MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
6959 gre_crks_rsvd0_ver_v.s_present &
6960 gre_crks_rsvd0_ver_m.s_present);
6964 * Add NVGRE item to matcher and to the value.
6966 * @param[in, out] matcher
6968 * @param[in, out] key
6969 * Flow matcher value.
6971 * Flow pattern to translate.
6973 * Item is inner pattern.
6976 flow_dv_translate_item_nvgre(void *matcher, void *key,
6977 const struct rte_flow_item *item,
6980 const struct rte_flow_item_nvgre *nvgre_m = item->mask;
6981 const struct rte_flow_item_nvgre *nvgre_v = item->spec;
6982 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6983 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6984 const char *tni_flow_id_m;
6985 const char *tni_flow_id_v;
6991 /* For NVGRE, GRE header fields must be set with defined values. */
6992 const struct rte_flow_item_gre gre_spec = {
6993 .c_rsvd0_ver = RTE_BE16(0x2000),
6994 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
6996 const struct rte_flow_item_gre gre_mask = {
6997 .c_rsvd0_ver = RTE_BE16(0xB000),
6998 .protocol = RTE_BE16(UINT16_MAX),
7000 const struct rte_flow_item gre_item = {
7005 flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
7009 nvgre_m = &rte_flow_item_nvgre_mask;
7010 tni_flow_id_m = (const char *)nvgre_m->tni;
7011 tni_flow_id_v = (const char *)nvgre_v->tni;
7012 size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
7013 gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
7014 gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
7015 memcpy(gre_key_m, tni_flow_id_m, size);
7016 for (i = 0; i < size; ++i)
7017 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
7021 * Add VXLAN item to matcher and to the value.
7023 * @param[in, out] matcher
7025 * @param[in, out] key
7026 * Flow matcher value.
7028 * Flow pattern to translate.
7030 * Item is inner pattern.
7033 flow_dv_translate_item_vxlan(void *matcher, void *key,
7034 const struct rte_flow_item *item,
7037 const struct rte_flow_item_vxlan *vxlan_m = item->mask;
7038 const struct rte_flow_item_vxlan *vxlan_v = item->spec;
7041 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7042 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7050 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7052 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7054 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7056 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7058 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
7059 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
7060 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7061 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7062 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7067 vxlan_m = &rte_flow_item_vxlan_mask;
7068 size = sizeof(vxlan_m->vni);
7069 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
7070 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
7071 memcpy(vni_m, vxlan_m->vni, size);
7072 for (i = 0; i < size; ++i)
7073 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
7077 * Add VXLAN-GPE item to matcher and to the value.
7079 * @param[in, out] matcher
7081 * @param[in, out] key
7082 * Flow matcher value.
7084 * Flow pattern to translate.
7086 * Item is inner pattern.
7090 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
7091 const struct rte_flow_item *item, int inner)
7093 const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
7094 const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
7098 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
7100 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7106 uint8_t flags_m = 0xff;
7107 uint8_t flags_v = 0xc;
7110 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7112 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7114 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7116 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7118 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
7119 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
7120 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7121 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7122 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7127 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
7128 size = sizeof(vxlan_m->vni);
7129 vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
7130 vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
7131 memcpy(vni_m, vxlan_m->vni, size);
7132 for (i = 0; i < size; ++i)
7133 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
7134 if (vxlan_m->flags) {
7135 flags_m = vxlan_m->flags;
7136 flags_v = vxlan_v->flags;
7138 MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
7139 MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
7140 MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
7142 MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
7147 * Add Geneve item to matcher and to the value.
7149 * @param[in, out] matcher
7151 * @param[in, out] key
7152 * Flow matcher value.
7154 * Flow pattern to translate.
7156 * Item is inner pattern.
7160 flow_dv_translate_item_geneve(void *matcher, void *key,
7161 const struct rte_flow_item *item, int inner)
7163 const struct rte_flow_item_geneve *geneve_m = item->mask;
7164 const struct rte_flow_item_geneve *geneve_v = item->spec;
7167 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7168 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7177 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7179 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7181 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7183 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7185 dport = MLX5_UDP_PORT_GENEVE;
7186 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7187 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7188 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7193 geneve_m = &rte_flow_item_geneve_mask;
7194 size = sizeof(geneve_m->vni);
7195 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
7196 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
7197 memcpy(vni_m, geneve_m->vni, size);
7198 for (i = 0; i < size; ++i)
7199 vni_v[i] = vni_m[i] & geneve_v->vni[i];
7200 MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
7201 rte_be_to_cpu_16(geneve_m->protocol));
7202 MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
7203 rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
7204 gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
7205 gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
7206 MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
7207 MLX5_GENEVE_OAMF_VAL(gbhdr_m));
7208 MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
7209 MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
7210 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
7211 MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
7212 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
7213 MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
7214 MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
7218 * Add MPLS item to matcher and to the value.
7220 * @param[in, out] matcher
7222 * @param[in, out] key
7223 * Flow matcher value.
7225 * Flow pattern to translate.
7226 * @param[in] prev_layer
7227 * The protocol layer indicated in previous item.
7229 * Item is inner pattern.
7232 flow_dv_translate_item_mpls(void *matcher, void *key,
7233 const struct rte_flow_item *item,
7234 uint64_t prev_layer,
7237 const uint32_t *in_mpls_m = item->mask;
7238 const uint32_t *in_mpls_v = item->spec;
7239 uint32_t *out_mpls_m = 0;
7240 uint32_t *out_mpls_v = 0;
7241 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7242 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7243 void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
7245 void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
7246 void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
7247 void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7249 switch (prev_layer) {
7250 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
7251 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
7252 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
7253 MLX5_UDP_PORT_MPLS);
7255 case MLX5_FLOW_LAYER_GRE:
7256 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
7257 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
7258 RTE_ETHER_TYPE_MPLS);
7261 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
7262 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
7269 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
7270 switch (prev_layer) {
7271 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
7273 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
7274 outer_first_mpls_over_udp);
7276 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
7277 outer_first_mpls_over_udp);
7279 case MLX5_FLOW_LAYER_GRE:
7281 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
7282 outer_first_mpls_over_gre);
7284 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
7285 outer_first_mpls_over_gre);
7288 /* Inner MPLS not over GRE is not supported. */
7291 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
7295 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
7301 if (out_mpls_m && out_mpls_v) {
7302 *out_mpls_m = *in_mpls_m;
7303 *out_mpls_v = *in_mpls_v & *in_mpls_m;
7308 * Add metadata register item to matcher
7310 * @param[in, out] matcher
7312 * @param[in, out] key
7313 * Flow matcher value.
7314 * @param[in] reg_type
7315 * Type of device metadata register
7322 flow_dv_match_meta_reg(void *matcher, void *key,
7323 enum modify_reg reg_type,
7324 uint32_t data, uint32_t mask)
7327 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
7329 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
7335 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
7336 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
7339 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
7340 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
7344 * The metadata register C0 field might be divided into
7345 * source vport index and META item value, we should set
7346 * this field according to specified mask, not as whole one.
7348 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
7350 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
7351 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
7354 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
7357 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
7358 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
7361 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
7362 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
7365 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
7366 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
7369 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
7370 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
7373 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
7374 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
7377 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
7378 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
7381 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
7382 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
7391 * Add MARK item to matcher
7394 * The device to configure through.
7395 * @param[in, out] matcher
7397 * @param[in, out] key
7398 * Flow matcher value.
7400 * Flow pattern to translate.
7403 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
7404 void *matcher, void *key,
7405 const struct rte_flow_item *item)
7407 struct mlx5_priv *priv = dev->data->dev_private;
7408 const struct rte_flow_item_mark *mark;
7412 mark = item->mask ? (const void *)item->mask :
7413 &rte_flow_item_mark_mask;
7414 mask = mark->id & priv->sh->dv_mark_mask;
7415 mark = (const void *)item->spec;
7417 value = mark->id & priv->sh->dv_mark_mask & mask;
7419 enum modify_reg reg;
7421 /* Get the metadata register index for the mark. */
7422 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
7423 MLX5_ASSERT(reg > 0);
7424 if (reg == REG_C_0) {
7425 struct mlx5_priv *priv = dev->data->dev_private;
7426 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7427 uint32_t shl_c0 = rte_bsf32(msk_c0);
7433 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
7438 * Add META item to matcher
7441 * The devich to configure through.
7442 * @param[in, out] matcher
7444 * @param[in, out] key
7445 * Flow matcher value.
7447 * Attributes of flow that includes this item.
7449 * Flow pattern to translate.
7452 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
7453 void *matcher, void *key,
7454 const struct rte_flow_attr *attr,
7455 const struct rte_flow_item *item)
7457 const struct rte_flow_item_meta *meta_m;
7458 const struct rte_flow_item_meta *meta_v;
7460 meta_m = (const void *)item->mask;
7462 meta_m = &rte_flow_item_meta_mask;
7463 meta_v = (const void *)item->spec;
7466 uint32_t value = meta_v->data;
7467 uint32_t mask = meta_m->data;
7469 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
7473 * In datapath code there is no endianness
7474 * coversions for perfromance reasons, all
7475 * pattern conversions are done in rte_flow.
7477 value = rte_cpu_to_be_32(value);
7478 mask = rte_cpu_to_be_32(mask);
7479 if (reg == REG_C_0) {
7480 struct mlx5_priv *priv = dev->data->dev_private;
7481 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7482 uint32_t shl_c0 = rte_bsf32(msk_c0);
7483 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
7484 uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
7491 MLX5_ASSERT(msk_c0);
7492 MLX5_ASSERT(!(~msk_c0 & mask));
7494 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
7499 * Add vport metadata Reg C0 item to matcher
7501 * @param[in, out] matcher
7503 * @param[in, out] key
7504 * Flow matcher value.
7506 * Flow pattern to translate.
7509 flow_dv_translate_item_meta_vport(void *matcher, void *key,
7510 uint32_t value, uint32_t mask)
7512 flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
7516 * Add tag item to matcher
7519 * The devich to configure through.
7520 * @param[in, out] matcher
7522 * @param[in, out] key
7523 * Flow matcher value.
7525 * Flow pattern to translate.
7528 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
7529 void *matcher, void *key,
7530 const struct rte_flow_item *item)
7532 const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
7533 const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
7534 uint32_t mask, value;
7537 value = tag_v->data;
7538 mask = tag_m ? tag_m->data : UINT32_MAX;
7539 if (tag_v->id == REG_C_0) {
7540 struct mlx5_priv *priv = dev->data->dev_private;
7541 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7542 uint32_t shl_c0 = rte_bsf32(msk_c0);
7548 flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
7552 * Add TAG item to matcher
7555 * The devich to configure through.
7556 * @param[in, out] matcher
7558 * @param[in, out] key
7559 * Flow matcher value.
7561 * Flow pattern to translate.
7564 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
7565 void *matcher, void *key,
7566 const struct rte_flow_item *item)
7568 const struct rte_flow_item_tag *tag_v = item->spec;
7569 const struct rte_flow_item_tag *tag_m = item->mask;
7570 enum modify_reg reg;
7573 tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
7574 /* Get the metadata register index for the tag. */
7575 reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
7576 MLX5_ASSERT(reg > 0);
7577 flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
7581 * Add source vport match to the specified matcher.
7583 * @param[in, out] matcher
7585 * @param[in, out] key
7586 * Flow matcher value.
7588 * Source vport value to match
7593 flow_dv_translate_item_source_vport(void *matcher, void *key,
7594 int16_t port, uint16_t mask)
7596 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7597 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7599 MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
7600 MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
7604 * Translate port-id item to eswitch match on port-id.
7607 * The devich to configure through.
7608 * @param[in, out] matcher
7610 * @param[in, out] key
7611 * Flow matcher value.
7613 * Flow pattern to translate.
7616 * 0 on success, a negative errno value otherwise.
7619 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
7620 void *key, const struct rte_flow_item *item)
7622 const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
7623 const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
7624 struct mlx5_priv *priv;
7627 mask = pid_m ? pid_m->id : 0xffff;
7628 id = pid_v ? pid_v->id : dev->data->port_id;
7629 priv = mlx5_port_to_eswitch_info(id, item == NULL);
7632 /* Translate to vport field or to metadata, depending on mode. */
7633 if (priv->vport_meta_mask)
7634 flow_dv_translate_item_meta_vport(matcher, key,
7635 priv->vport_meta_tag,
7636 priv->vport_meta_mask);
7638 flow_dv_translate_item_source_vport(matcher, key,
7639 priv->vport_id, mask);
7644 * Add ICMP6 item to matcher and to the value.
7646 * @param[in, out] matcher
7648 * @param[in, out] key
7649 * Flow matcher value.
7651 * Flow pattern to translate.
7653 * Item is inner pattern.
7656 flow_dv_translate_item_icmp6(void *matcher, void *key,
7657 const struct rte_flow_item *item,
7660 const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
7661 const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
7664 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7666 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7668 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7670 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7672 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7674 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7676 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
7677 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
7681 icmp6_m = &rte_flow_item_icmp6_mask;
7682 MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
7683 MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
7684 icmp6_v->type & icmp6_m->type);
7685 MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
7686 MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
7687 icmp6_v->code & icmp6_m->code);
7691 * Add ICMP item to matcher and to the value.
7693 * @param[in, out] matcher
7695 * @param[in, out] key
7696 * Flow matcher value.
7698 * Flow pattern to translate.
7700 * Item is inner pattern.
7703 flow_dv_translate_item_icmp(void *matcher, void *key,
7704 const struct rte_flow_item *item,
7707 const struct rte_flow_item_icmp *icmp_m = item->mask;
7708 const struct rte_flow_item_icmp *icmp_v = item->spec;
7709 uint32_t icmp_header_data_m = 0;
7710 uint32_t icmp_header_data_v = 0;
7713 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7715 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7717 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7719 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7721 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7723 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7725 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
7726 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
7730 icmp_m = &rte_flow_item_icmp_mask;
7731 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
7732 icmp_m->hdr.icmp_type);
7733 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
7734 icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
7735 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
7736 icmp_m->hdr.icmp_code);
7737 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
7738 icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
7739 icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
7740 icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
7741 if (icmp_header_data_m) {
7742 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
7743 icmp_header_data_v |=
7744 rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
7745 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
7746 icmp_header_data_m);
7747 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
7748 icmp_header_data_v & icmp_header_data_m);
7753 * Add GTP item to matcher and to the value.
7755 * @param[in, out] matcher
7757 * @param[in, out] key
7758 * Flow matcher value.
7760 * Flow pattern to translate.
7762 * Item is inner pattern.
7765 flow_dv_translate_item_gtp(void *matcher, void *key,
7766 const struct rte_flow_item *item, int inner)
7768 const struct rte_flow_item_gtp *gtp_m = item->mask;
7769 const struct rte_flow_item_gtp *gtp_v = item->spec;
7772 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7774 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7775 uint16_t dport = RTE_GTPU_UDP_PORT;
7778 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7780 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7782 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7784 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7786 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7787 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7788 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7793 gtp_m = &rte_flow_item_gtp_mask;
7794 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
7795 gtp_m->v_pt_rsv_flags);
7796 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
7797 gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
7798 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
7799 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
7800 gtp_v->msg_type & gtp_m->msg_type);
7801 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
7802 rte_be_to_cpu_32(gtp_m->teid));
7803 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
7804 rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
7808 * Add eCPRI item to matcher and to the value.
7811 * The devich to configure through.
7812 * @param[in, out] matcher
7814 * @param[in, out] key
7815 * Flow matcher value.
7817 * Flow pattern to translate.
7818 * @param[in] samples
7819 * Sample IDs to be used in the matching.
7822 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
7823 void *key, const struct rte_flow_item *item)
7825 struct mlx5_priv *priv = dev->data->dev_private;
7826 const struct rte_flow_item_ecpri *ecpri_m = item->mask;
7827 const struct rte_flow_item_ecpri *ecpri_v = item->spec;
7828 void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
7830 void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
7838 ecpri_m = &rte_flow_item_ecpri_mask;
7840 * Maximal four DW samples are supported in a single matching now.
7841 * Two are used now for a eCPRI matching:
7842 * 1. Type: one byte, mask should be 0x00ff0000 in network order
7843 * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
7846 if (!ecpri_m->hdr.common.u32)
7848 samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
7849 /* Need to take the whole DW as the mask to fill the entry. */
7850 dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
7851 prog_sample_field_value_0);
7852 dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
7853 prog_sample_field_value_0);
7854 /* Already big endian (network order) in the header. */
7855 *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
7856 *(uint32_t *)dw_v = ecpri_v->hdr.common.u32;
7857 /* Sample#0, used for matching type, offset 0. */
7858 MLX5_SET(fte_match_set_misc4, misc4_m,
7859 prog_sample_field_id_0, samples[0]);
7860 /* It makes no sense to set the sample ID in the mask field. */
7861 MLX5_SET(fte_match_set_misc4, misc4_v,
7862 prog_sample_field_id_0, samples[0]);
7864 * Checking if message body part needs to be matched.
7865 * Some wildcard rules only matching type field should be supported.
7867 if (ecpri_m->hdr.dummy[0]) {
7868 switch (ecpri_v->hdr.common.type) {
7869 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
7870 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
7871 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
7872 dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
7873 prog_sample_field_value_1);
7874 dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
7875 prog_sample_field_value_1);
7876 *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
7877 *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0];
7878 /* Sample#1, to match message body, offset 4. */
7879 MLX5_SET(fte_match_set_misc4, misc4_m,
7880 prog_sample_field_id_1, samples[1]);
7881 MLX5_SET(fte_match_set_misc4, misc4_v,
7882 prog_sample_field_id_1, samples[1]);
7885 /* Others, do not match any sample ID. */
7891 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
7893 #define HEADER_IS_ZERO(match_criteria, headers) \
7894 !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
7895 matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
7898 * Calculate flow matcher enable bitmap.
7900 * @param match_criteria
7901 * Pointer to flow matcher criteria.
7904 * Bitmap of enabled fields.
7907 flow_dv_matcher_enable(uint32_t *match_criteria)
7909 uint8_t match_criteria_enable;
7911 match_criteria_enable =
7912 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
7913 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
7914 match_criteria_enable |=
7915 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
7916 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
7917 match_criteria_enable |=
7918 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
7919 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
7920 match_criteria_enable |=
7921 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
7922 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
7923 match_criteria_enable |=
7924 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
7925 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
7926 match_criteria_enable |=
7927 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
7928 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
7929 return match_criteria_enable;
7932 struct mlx5_hlist_entry *
7933 flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
7935 struct mlx5_dev_ctx_shared *sh = list->ctx;
7936 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
7937 struct rte_eth_dev *dev = ctx->dev;
7938 struct mlx5_flow_tbl_data_entry *tbl_data;
7939 struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data;
7940 struct rte_flow_error *error = ctx->error;
7941 union mlx5_flow_tbl_key key = { .v64 = key64 };
7942 struct mlx5_flow_tbl_resource *tbl;
7947 tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
7949 rte_flow_error_set(error, ENOMEM,
7950 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7952 "cannot allocate flow table data entry");
7955 tbl_data->idx = idx;
7956 tbl_data->tunnel = tt_prm->tunnel;
7957 tbl_data->group_id = tt_prm->group_id;
7958 tbl_data->external = tt_prm->external;
7959 tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
7960 tbl_data->is_egress = !!key.direction;
7961 tbl = &tbl_data->tbl;
7963 return &tbl_data->entry;
7965 domain = sh->fdb_domain;
7966 else if (key.direction)
7967 domain = sh->tx_domain;
7969 domain = sh->rx_domain;
7970 ret = mlx5_flow_os_create_flow_tbl(domain, key.table_id, &tbl->obj);
7972 rte_flow_error_set(error, ENOMEM,
7973 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7974 NULL, "cannot create flow table object");
7975 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
7979 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
7980 (tbl->obj, &tbl_data->jump.action);
7982 rte_flow_error_set(error, ENOMEM,
7983 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7985 "cannot create flow jump action");
7986 mlx5_flow_os_destroy_flow_tbl(tbl->obj);
7987 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
7991 MKSTR(matcher_name, "%s_%s_%u_matcher_cache",
7992 key.domain ? "FDB" : "NIC", key.direction ? "egress" : "ingress",
7994 mlx5_cache_list_init(&tbl_data->matchers, matcher_name, 0, sh,
7995 flow_dv_matcher_create_cb,
7996 flow_dv_matcher_match_cb,
7997 flow_dv_matcher_remove_cb);
7998 return &tbl_data->entry;
8004 * @param[in, out] dev
8005 * Pointer to rte_eth_dev structure.
8006 * @param[in] table_id
8009 * Direction of the table.
8010 * @param[in] transfer
8011 * E-Switch or NIC flow.
8013 * Dummy entry for dv API.
8015 * pointer to error structure.
8018 * Returns tables resource based on the index, NULL in case of failed.
8020 struct mlx5_flow_tbl_resource *
8021 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
8022 uint32_t table_id, uint8_t egress,
8025 const struct mlx5_flow_tunnel *tunnel,
8026 uint32_t group_id, uint8_t dummy,
8027 struct rte_flow_error *error)
8029 struct mlx5_priv *priv = dev->data->dev_private;
8030 union mlx5_flow_tbl_key table_key = {
8032 .table_id = table_id,
8034 .domain = !!transfer,
8035 .direction = !!egress,
8038 struct mlx5_flow_tbl_tunnel_prm tt_prm = {
8040 .group_id = group_id,
8041 .external = external,
8043 struct mlx5_flow_cb_ctx ctx = {
8048 struct mlx5_hlist_entry *entry;
8049 struct mlx5_flow_tbl_data_entry *tbl_data;
8051 entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
8053 rte_flow_error_set(error, ENOMEM,
8054 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8055 "cannot get table");
8058 tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
8059 return &tbl_data->tbl;
8063 flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
8064 struct mlx5_hlist_entry *entry)
8066 struct mlx5_dev_ctx_shared *sh = list->ctx;
8067 struct mlx5_flow_tbl_data_entry *tbl_data =
8068 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
8070 MLX5_ASSERT(entry && sh);
8071 if (tbl_data->jump.action)
8072 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
8073 if (tbl_data->tbl.obj)
8074 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
8075 if (tbl_data->tunnel_offload && tbl_data->external) {
8076 struct mlx5_hlist_entry *he;
8077 struct mlx5_hlist *tunnel_grp_hash;
8078 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
8079 union tunnel_tbl_key tunnel_key = {
8080 .tunnel_id = tbl_data->tunnel ?
8081 tbl_data->tunnel->tunnel_id : 0,
8082 .group = tbl_data->group_id
8084 union mlx5_flow_tbl_key table_key = {
8087 uint32_t table_id = table_key.table_id;
8089 tunnel_grp_hash = tbl_data->tunnel ?
8090 tbl_data->tunnel->groups :
8092 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL);
8094 struct tunnel_tbl_entry *tte;
8095 tte = container_of(he, typeof(*tte), hash);
8096 MLX5_ASSERT(tte->flow_table == table_id);
8097 mlx5_hlist_remove(tunnel_grp_hash, he);
8100 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
8101 tunnel_flow_tbl_to_id(table_id));
8103 "Table_id %#x tunnel %u group %u released.",
8106 tbl_data->tunnel->tunnel_id : 0,
8107 tbl_data->group_id);
8109 mlx5_cache_list_destroy(&tbl_data->matchers);
8110 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
8114 * Release a flow table.
8117 * Pointer to device shared structure.
8119 * Table resource to be released.
8122 * Returns 0 if table was released, else return 1;
8125 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
8126 struct mlx5_flow_tbl_resource *tbl)
8128 struct mlx5_flow_tbl_data_entry *tbl_data =
8129 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
8133 return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
8137 flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused,
8138 struct mlx5_cache_entry *entry, void *cb_ctx)
8140 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8141 struct mlx5_flow_dv_matcher *ref = ctx->data;
8142 struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
8145 return cur->crc != ref->crc ||
8146 cur->priority != ref->priority ||
8147 memcmp((const void *)cur->mask.buf,
8148 (const void *)ref->mask.buf, ref->mask.size);
8151 struct mlx5_cache_entry *
8152 flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
8153 struct mlx5_cache_entry *entry __rte_unused,
8156 struct mlx5_dev_ctx_shared *sh = list->ctx;
8157 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8158 struct mlx5_flow_dv_matcher *ref = ctx->data;
8159 struct mlx5_flow_dv_matcher *cache;
8160 struct mlx5dv_flow_matcher_attr dv_attr = {
8161 .type = IBV_FLOW_ATTR_NORMAL,
8162 .match_mask = (void *)&ref->mask,
8164 struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
8168 cache = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache), 0, SOCKET_ID_ANY);
8170 rte_flow_error_set(ctx->error, ENOMEM,
8171 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8172 "cannot create matcher");
8176 dv_attr.match_criteria_enable =
8177 flow_dv_matcher_enable(cache->mask.buf);
8178 dv_attr.priority = ref->priority;
8180 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
8181 ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
8182 &cache->matcher_object);
8185 rte_flow_error_set(ctx->error, ENOMEM,
8186 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8187 "cannot create matcher");
8190 return &cache->entry;
8194 * Register the flow matcher.
8196 * @param[in, out] dev
8197 * Pointer to rte_eth_dev structure.
8198 * @param[in, out] matcher
8199 * Pointer to flow matcher.
8200 * @param[in, out] key
8201 * Pointer to flow table key.
8202 * @parm[in, out] dev_flow
8203 * Pointer to the dev_flow.
8205 * pointer to error structure.
8208 * 0 on success otherwise -errno and errno is set.
8211 flow_dv_matcher_register(struct rte_eth_dev *dev,
8212 struct mlx5_flow_dv_matcher *ref,
8213 union mlx5_flow_tbl_key *key,
8214 struct mlx5_flow *dev_flow,
8215 struct rte_flow_error *error)
8217 struct mlx5_cache_entry *entry;
8218 struct mlx5_flow_dv_matcher *cache;
8219 struct mlx5_flow_tbl_resource *tbl;
8220 struct mlx5_flow_tbl_data_entry *tbl_data;
8221 struct mlx5_flow_cb_ctx ctx = {
8226 tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
8227 key->domain, false, NULL, 0, 0, error);
8229 return -rte_errno; /* No need to refill the error info */
8230 tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
8232 entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
8234 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
8235 return rte_flow_error_set(error, ENOMEM,
8236 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8237 "cannot allocate ref memory");
8239 cache = container_of(entry, typeof(*cache), entry);
8240 dev_flow->handle->dvh.matcher = cache;
8244 struct mlx5_hlist_entry *
8245 flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx)
8247 struct mlx5_dev_ctx_shared *sh = list->ctx;
8248 struct rte_flow_error *error = ctx;
8249 struct mlx5_flow_dv_tag_resource *entry;
8253 entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
8255 rte_flow_error_set(error, ENOMEM,
8256 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8257 "cannot allocate resource memory");
8261 ret = mlx5_flow_os_create_flow_action_tag(key,
8264 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
8265 rte_flow_error_set(error, ENOMEM,
8266 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8267 NULL, "cannot create action");
8270 return &entry->entry;
8274 * Find existing tag resource or create and register a new one.
8276 * @param dev[in, out]
8277 * Pointer to rte_eth_dev structure.
8278 * @param[in, out] tag_be24
8279 * Tag value in big endian then R-shift 8.
8280 * @parm[in, out] dev_flow
8281 * Pointer to the dev_flow.
8283 * pointer to error structure.
8286 * 0 on success otherwise -errno and errno is set.
8289 flow_dv_tag_resource_register
8290 (struct rte_eth_dev *dev,
8292 struct mlx5_flow *dev_flow,
8293 struct rte_flow_error *error)
8295 struct mlx5_priv *priv = dev->data->dev_private;
8296 struct mlx5_flow_dv_tag_resource *cache_resource;
8297 struct mlx5_hlist_entry *entry;
8299 entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error);
8301 cache_resource = container_of
8302 (entry, struct mlx5_flow_dv_tag_resource, entry);
8303 dev_flow->handle->dvh.rix_tag = cache_resource->idx;
8304 dev_flow->dv.tag_resource = cache_resource;
8311 flow_dv_tag_remove_cb(struct mlx5_hlist *list,
8312 struct mlx5_hlist_entry *entry)
8314 struct mlx5_dev_ctx_shared *sh = list->ctx;
8315 struct mlx5_flow_dv_tag_resource *tag =
8316 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
8318 MLX5_ASSERT(tag && sh && tag->action);
8319 claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
8320 DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
8321 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
8328 * Pointer to Ethernet device.
8333 * 1 while a reference on it exists, 0 when freed.
8336 flow_dv_tag_release(struct rte_eth_dev *dev,
8339 struct mlx5_priv *priv = dev->data->dev_private;
8340 struct mlx5_flow_dv_tag_resource *tag;
8342 tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
8345 DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
8346 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
8347 return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
8351 * Translate port ID action to vport.
8354 * Pointer to rte_eth_dev structure.
8356 * Pointer to the port ID action.
8357 * @param[out] dst_port_id
8358 * The target port ID.
8360 * Pointer to the error structure.
8363 * 0 on success, a negative errno value otherwise and rte_errno is set.
8366 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
8367 const struct rte_flow_action *action,
8368 uint32_t *dst_port_id,
8369 struct rte_flow_error *error)
8372 struct mlx5_priv *priv;
8373 const struct rte_flow_action_port_id *conf =
8374 (const struct rte_flow_action_port_id *)action->conf;
8376 port = conf->original ? dev->data->port_id : conf->id;
8377 priv = mlx5_port_to_eswitch_info(port, false);
8379 return rte_flow_error_set(error, -rte_errno,
8380 RTE_FLOW_ERROR_TYPE_ACTION,
8382 "No eswitch info was found for port");
8383 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
8385 * This parameter is transferred to
8386 * mlx5dv_dr_action_create_dest_ib_port().
8388 *dst_port_id = priv->dev_port;
8391 * Legacy mode, no LAG configurations is supported.
8392 * This parameter is transferred to
8393 * mlx5dv_dr_action_create_dest_vport().
8395 *dst_port_id = priv->vport_id;
8401 * Create a counter with aging configuration.
8404 * Pointer to rte_eth_dev structure.
8406 * Pointer to the counter action configuration.
8408 * Pointer to the aging action configuration.
8411 * Index to flow counter on success, 0 otherwise.
8414 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
8415 struct mlx5_flow *dev_flow,
8416 const struct rte_flow_action_count *count,
8417 const struct rte_flow_action_age *age)
8420 struct mlx5_age_param *age_param;
8422 if (count && count->shared)
8423 counter = flow_dv_counter_get_shared(dev, count->id);
8425 counter = flow_dv_counter_alloc(dev, !!age);
8426 if (!counter || age == NULL)
8428 age_param = flow_dv_counter_idx_get_age(dev, counter);
8429 age_param->context = age->context ? age->context :
8430 (void *)(uintptr_t)(dev_flow->flow_idx);
8431 age_param->timeout = age->timeout;
8432 age_param->port_id = dev->data->port_id;
8433 __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
8434 __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
8438 * Add Tx queue matcher
8441 * Pointer to the dev struct.
8442 * @param[in, out] matcher
8444 * @param[in, out] key
8445 * Flow matcher value.
8447 * Flow pattern to translate.
8449 * Item is inner pattern.
8452 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
8453 void *matcher, void *key,
8454 const struct rte_flow_item *item)
8456 const struct mlx5_rte_flow_item_tx_queue *queue_m;
8457 const struct mlx5_rte_flow_item_tx_queue *queue_v;
8459 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8461 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8462 struct mlx5_txq_ctrl *txq;
8466 queue_m = (const void *)item->mask;
8469 queue_v = (const void *)item->spec;
8472 txq = mlx5_txq_get(dev, queue_v->queue);
8475 queue = txq->obj->sq->id;
8476 MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
8477 MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
8478 queue & queue_m->queue);
8479 mlx5_txq_release(dev, queue_v->queue);
8483 * Set the hash fields according to the @p flow information.
8485 * @param[in] dev_flow
8486 * Pointer to the mlx5_flow.
8487 * @param[in] rss_desc
8488 * Pointer to the mlx5_flow_rss_desc.
8491 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
8492 struct mlx5_flow_rss_desc *rss_desc)
8494 uint64_t items = dev_flow->handle->layers;
8496 uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
8498 dev_flow->hash_fields = 0;
8499 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
8500 if (rss_desc->level >= 2) {
8501 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
8505 if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
8506 (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
8507 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
8508 if (rss_types & ETH_RSS_L3_SRC_ONLY)
8509 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
8510 else if (rss_types & ETH_RSS_L3_DST_ONLY)
8511 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
8513 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
8515 } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
8516 (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
8517 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
8518 if (rss_types & ETH_RSS_L3_SRC_ONLY)
8519 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
8520 else if (rss_types & ETH_RSS_L3_DST_ONLY)
8521 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
8523 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
8526 if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
8527 (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
8528 if (rss_types & ETH_RSS_UDP) {
8529 if (rss_types & ETH_RSS_L4_SRC_ONLY)
8530 dev_flow->hash_fields |=
8531 IBV_RX_HASH_SRC_PORT_UDP;
8532 else if (rss_types & ETH_RSS_L4_DST_ONLY)
8533 dev_flow->hash_fields |=
8534 IBV_RX_HASH_DST_PORT_UDP;
8536 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
8538 } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
8539 (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
8540 if (rss_types & ETH_RSS_TCP) {
8541 if (rss_types & ETH_RSS_L4_SRC_ONLY)
8542 dev_flow->hash_fields |=
8543 IBV_RX_HASH_SRC_PORT_TCP;
8544 else if (rss_types & ETH_RSS_L4_DST_ONLY)
8545 dev_flow->hash_fields |=
8546 IBV_RX_HASH_DST_PORT_TCP;
8548 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
8554 * Prepare an Rx Hash queue.
8557 * Pointer to Ethernet device.
8558 * @param[in] dev_flow
8559 * Pointer to the mlx5_flow.
8560 * @param[in] rss_desc
8561 * Pointer to the mlx5_flow_rss_desc.
8562 * @param[out] hrxq_idx
8563 * Hash Rx queue index.
8566 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
8568 static struct mlx5_hrxq *
8569 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
8570 struct mlx5_flow *dev_flow,
8571 struct mlx5_flow_rss_desc *rss_desc,
8574 struct mlx5_priv *priv = dev->data->dev_private;
8575 struct mlx5_flow_handle *dh = dev_flow->handle;
8576 struct mlx5_hrxq *hrxq;
8578 MLX5_ASSERT(rss_desc->queue_num);
8579 rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
8580 rss_desc->hash_fields = dev_flow->hash_fields;
8581 rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
8582 rss_desc->standalone = false;
8583 *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
8586 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
8592 * Find existing sample resource or create and register a new one.
8594 * @param[in, out] dev
8595 * Pointer to rte_eth_dev structure.
8597 * Attributes of flow that includes this item.
8598 * @param[in] resource
8599 * Pointer to sample resource.
8600 * @parm[in, out] dev_flow
8601 * Pointer to the dev_flow.
8602 * @param[in, out] sample_dv_actions
8603 * Pointer to sample actions list.
8605 * pointer to error structure.
8608 * 0 on success otherwise -errno and errno is set.
8611 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
8612 const struct rte_flow_attr *attr,
8613 struct mlx5_flow_dv_sample_resource *resource,
8614 struct mlx5_flow *dev_flow,
8615 void **sample_dv_actions,
8616 struct rte_flow_error *error)
8618 struct mlx5_flow_dv_sample_resource *cache_resource;
8619 struct mlx5dv_dr_flow_sampler_attr sampler_attr;
8620 struct mlx5_priv *priv = dev->data->dev_private;
8621 struct mlx5_dev_ctx_shared *sh = priv->sh;
8622 struct mlx5_flow_tbl_resource *tbl;
8624 const uint32_t next_ft_step = 1;
8625 uint32_t next_ft_id = resource->ft_id + next_ft_step;
8627 /* Lookup a matching resource from cache. */
8628 ILIST_FOREACH(sh->ipool[MLX5_IPOOL_SAMPLE], sh->sample_action_list,
8629 idx, cache_resource, next) {
8630 if (resource->ratio == cache_resource->ratio &&
8631 resource->ft_type == cache_resource->ft_type &&
8632 resource->ft_id == cache_resource->ft_id &&
8633 resource->set_action == cache_resource->set_action &&
8634 !memcmp((void *)&resource->sample_act,
8635 (void *)&cache_resource->sample_act,
8636 sizeof(struct mlx5_flow_sub_actions_list))) {
8637 DRV_LOG(DEBUG, "sample resource %p: refcnt %d++",
8638 (void *)cache_resource,
8639 __atomic_load_n(&cache_resource->refcnt,
8641 __atomic_fetch_add(&cache_resource->refcnt, 1,
8643 dev_flow->handle->dvh.rix_sample = idx;
8644 dev_flow->dv.sample_res = cache_resource;
8648 /* Register new sample resource. */
8649 cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE],
8650 &dev_flow->handle->dvh.rix_sample);
8651 if (!cache_resource)
8652 return rte_flow_error_set(error, ENOMEM,
8653 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8655 "cannot allocate resource memory");
8656 *cache_resource = *resource;
8657 /* Create normal path table level */
8658 tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
8659 attr->egress, attr->transfer,
8660 dev_flow->external, NULL, 0, 0, error);
8662 rte_flow_error_set(error, ENOMEM,
8663 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8665 "fail to create normal path table "
8669 cache_resource->normal_path_tbl = tbl;
8670 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
8671 cache_resource->default_miss =
8672 mlx5_glue->dr_create_flow_action_default_miss();
8673 if (!cache_resource->default_miss) {
8674 rte_flow_error_set(error, ENOMEM,
8675 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8677 "cannot create default miss "
8681 sample_dv_actions[resource->sample_act.actions_num++] =
8682 cache_resource->default_miss;
8684 /* Create a DR sample action */
8685 sampler_attr.sample_ratio = cache_resource->ratio;
8686 sampler_attr.default_next_table = tbl->obj;
8687 sampler_attr.num_sample_actions = resource->sample_act.actions_num;
8688 sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
8689 &sample_dv_actions[0];
8690 sampler_attr.action = cache_resource->set_action;
8691 cache_resource->verbs_action =
8692 mlx5_glue->dr_create_flow_action_sampler(&sampler_attr);
8693 if (!cache_resource->verbs_action) {
8694 rte_flow_error_set(error, ENOMEM,
8695 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8696 NULL, "cannot create sample action");
8699 __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED);
8700 ILIST_INSERT(sh->ipool[MLX5_IPOOL_SAMPLE], &sh->sample_action_list,
8701 dev_flow->handle->dvh.rix_sample, cache_resource,
8703 dev_flow->dv.sample_res = cache_resource;
8704 DRV_LOG(DEBUG, "new sample resource %p: refcnt %d++",
8705 (void *)cache_resource,
8706 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
8709 if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
8710 if (cache_resource->default_miss)
8711 claim_zero(mlx5_glue->destroy_flow_action
8712 (cache_resource->default_miss));
8714 if (cache_resource->sample_idx.rix_hrxq &&
8715 !mlx5_hrxq_release(dev,
8716 cache_resource->sample_idx.rix_hrxq))
8717 cache_resource->sample_idx.rix_hrxq = 0;
8718 if (cache_resource->sample_idx.rix_tag &&
8719 !flow_dv_tag_release(dev,
8720 cache_resource->sample_idx.rix_tag))
8721 cache_resource->sample_idx.rix_tag = 0;
8722 if (cache_resource->sample_idx.cnt) {
8723 flow_dv_counter_release(dev,
8724 cache_resource->sample_idx.cnt);
8725 cache_resource->sample_idx.cnt = 0;
8728 if (cache_resource->normal_path_tbl)
8729 flow_dv_tbl_resource_release(MLX5_SH(dev),
8730 cache_resource->normal_path_tbl);
8731 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE],
8732 dev_flow->handle->dvh.rix_sample);
8733 dev_flow->handle->dvh.rix_sample = 0;
8738 * Find existing destination array resource or create and register a new one.
8740 * @param[in, out] dev
8741 * Pointer to rte_eth_dev structure.
8743 * Attributes of flow that includes this item.
8744 * @param[in] resource
8745 * Pointer to destination array resource.
8746 * @parm[in, out] dev_flow
8747 * Pointer to the dev_flow.
8749 * pointer to error structure.
8752 * 0 on success otherwise -errno and errno is set.
8755 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
8756 const struct rte_flow_attr *attr,
8757 struct mlx5_flow_dv_dest_array_resource *resource,
8758 struct mlx5_flow *dev_flow,
8759 struct rte_flow_error *error)
8761 struct mlx5_flow_dv_dest_array_resource *cache_resource;
8762 struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
8763 struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
8764 struct mlx5_priv *priv = dev->data->dev_private;
8765 struct mlx5_dev_ctx_shared *sh = priv->sh;
8766 struct mlx5_flow_sub_actions_list *sample_act;
8767 struct mlx5dv_dr_domain *domain;
8770 /* Lookup a matching resource from cache. */
8771 ILIST_FOREACH(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
8772 sh->dest_array_list,
8773 idx, cache_resource, next) {
8774 if (resource->num_of_dest == cache_resource->num_of_dest &&
8775 resource->ft_type == cache_resource->ft_type &&
8776 !memcmp((void *)cache_resource->sample_act,
8777 (void *)resource->sample_act,
8778 (resource->num_of_dest *
8779 sizeof(struct mlx5_flow_sub_actions_list)))) {
8780 DRV_LOG(DEBUG, "dest array resource %p: refcnt %d++",
8781 (void *)cache_resource,
8782 __atomic_load_n(&cache_resource->refcnt,
8784 __atomic_fetch_add(&cache_resource->refcnt, 1,
8786 dev_flow->handle->dvh.rix_dest_array = idx;
8787 dev_flow->dv.dest_array_res = cache_resource;
8791 /* Register new destination array resource. */
8792 cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
8793 &dev_flow->handle->dvh.rix_dest_array);
8794 if (!cache_resource)
8795 return rte_flow_error_set(error, ENOMEM,
8796 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8798 "cannot allocate resource memory");
8799 *cache_resource = *resource;
8801 domain = sh->fdb_domain;
8802 else if (attr->ingress)
8803 domain = sh->rx_domain;
8805 domain = sh->tx_domain;
8806 for (idx = 0; idx < resource->num_of_dest; idx++) {
8807 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
8808 mlx5_malloc(MLX5_MEM_ZERO,
8809 sizeof(struct mlx5dv_dr_action_dest_attr),
8811 if (!dest_attr[idx]) {
8812 rte_flow_error_set(error, ENOMEM,
8813 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8815 "cannot allocate resource memory");
8818 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
8819 sample_act = &resource->sample_act[idx];
8820 if (sample_act->action_flags == MLX5_FLOW_ACTION_QUEUE) {
8821 dest_attr[idx]->dest = sample_act->dr_queue_action;
8822 } else if (sample_act->action_flags ==
8823 (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP)) {
8824 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
8825 dest_attr[idx]->dest_reformat = &dest_reformat[idx];
8826 dest_attr[idx]->dest_reformat->reformat =
8827 sample_act->dr_encap_action;
8828 dest_attr[idx]->dest_reformat->dest =
8829 sample_act->dr_port_id_action;
8830 } else if (sample_act->action_flags ==
8831 MLX5_FLOW_ACTION_PORT_ID) {
8832 dest_attr[idx]->dest = sample_act->dr_port_id_action;
8835 /* create a dest array actioin */
8836 cache_resource->action = mlx5_glue->dr_create_flow_action_dest_array
8838 cache_resource->num_of_dest,
8840 if (!cache_resource->action) {
8841 rte_flow_error_set(error, ENOMEM,
8842 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8844 "cannot create destination array action");
8847 __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED);
8848 ILIST_INSERT(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
8849 &sh->dest_array_list,
8850 dev_flow->handle->dvh.rix_dest_array, cache_resource,
8852 dev_flow->dv.dest_array_res = cache_resource;
8853 DRV_LOG(DEBUG, "new destination array resource %p: refcnt %d++",
8854 (void *)cache_resource,
8855 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
8856 for (idx = 0; idx < resource->num_of_dest; idx++)
8857 mlx5_free(dest_attr[idx]);
8860 for (idx = 0; idx < resource->num_of_dest; idx++) {
8861 struct mlx5_flow_sub_actions_idx *act_res =
8862 &cache_resource->sample_idx[idx];
8863 if (act_res->rix_hrxq &&
8864 !mlx5_hrxq_release(dev,
8866 act_res->rix_hrxq = 0;
8867 if (act_res->rix_encap_decap &&
8868 !flow_dv_encap_decap_resource_release(dev,
8869 act_res->rix_encap_decap))
8870 act_res->rix_encap_decap = 0;
8871 if (act_res->rix_port_id_action &&
8872 !flow_dv_port_id_action_resource_release(dev,
8873 act_res->rix_port_id_action))
8874 act_res->rix_port_id_action = 0;
8876 mlx5_free(dest_attr[idx]);
8879 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
8880 dev_flow->handle->dvh.rix_dest_array);
8881 dev_flow->handle->dvh.rix_dest_array = 0;
8886 * Convert Sample action to DV specification.
8889 * Pointer to rte_eth_dev structure.
8891 * Pointer to action structure.
8892 * @param[in, out] dev_flow
8893 * Pointer to the mlx5_flow.
8895 * Pointer to the flow attributes.
8896 * @param[in, out] num_of_dest
8897 * Pointer to the num of destination.
8898 * @param[in, out] sample_actions
8899 * Pointer to sample actions list.
8900 * @param[in, out] res
8901 * Pointer to sample resource.
8903 * Pointer to the error structure.
8906 * 0 on success, a negative errno value otherwise and rte_errno is set.
8909 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
8910 const struct rte_flow_action *action,
8911 struct mlx5_flow *dev_flow,
8912 const struct rte_flow_attr *attr,
8913 uint32_t *num_of_dest,
8914 void **sample_actions,
8915 struct mlx5_flow_dv_sample_resource *res,
8916 struct rte_flow_error *error)
8918 struct mlx5_priv *priv = dev->data->dev_private;
8919 const struct rte_flow_action_sample *sample_action;
8920 const struct rte_flow_action *sub_actions;
8921 const struct rte_flow_action_queue *queue;
8922 struct mlx5_flow_sub_actions_list *sample_act;
8923 struct mlx5_flow_sub_actions_idx *sample_idx;
8924 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
8925 struct mlx5_flow_rss_desc *rss_desc;
8926 uint64_t action_flags = 0;
8929 rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
8930 sample_act = &res->sample_act;
8931 sample_idx = &res->sample_idx;
8932 sample_action = (const struct rte_flow_action_sample *)action->conf;
8933 res->ratio = sample_action->ratio;
8934 sub_actions = sample_action->actions;
8935 for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
8936 int type = sub_actions->type;
8937 uint32_t pre_rix = 0;
8940 case RTE_FLOW_ACTION_TYPE_QUEUE:
8942 struct mlx5_hrxq *hrxq;
8945 queue = sub_actions->conf;
8946 rss_desc->queue_num = 1;
8947 rss_desc->queue[0] = queue->index;
8948 hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
8949 rss_desc, &hrxq_idx);
8951 return rte_flow_error_set
8953 RTE_FLOW_ERROR_TYPE_ACTION,
8955 "cannot create fate queue");
8956 sample_act->dr_queue_action = hrxq->action;
8957 sample_idx->rix_hrxq = hrxq_idx;
8958 sample_actions[sample_act->actions_num++] =
8961 action_flags |= MLX5_FLOW_ACTION_QUEUE;
8962 if (action_flags & MLX5_FLOW_ACTION_MARK)
8963 dev_flow->handle->rix_hrxq = hrxq_idx;
8964 dev_flow->handle->fate_action =
8965 MLX5_FLOW_FATE_QUEUE;
8968 case RTE_FLOW_ACTION_TYPE_MARK:
8970 uint32_t tag_be = mlx5_flow_mark_set
8971 (((const struct rte_flow_action_mark *)
8972 (sub_actions->conf))->id);
8974 dev_flow->handle->mark = 1;
8975 pre_rix = dev_flow->handle->dvh.rix_tag;
8976 /* Save the mark resource before sample */
8977 pre_r = dev_flow->dv.tag_resource;
8978 if (flow_dv_tag_resource_register(dev, tag_be,
8981 MLX5_ASSERT(dev_flow->dv.tag_resource);
8982 sample_act->dr_tag_action =
8983 dev_flow->dv.tag_resource->action;
8984 sample_idx->rix_tag =
8985 dev_flow->handle->dvh.rix_tag;
8986 sample_actions[sample_act->actions_num++] =
8987 sample_act->dr_tag_action;
8988 /* Recover the mark resource after sample */
8989 dev_flow->dv.tag_resource = pre_r;
8990 dev_flow->handle->dvh.rix_tag = pre_rix;
8991 action_flags |= MLX5_FLOW_ACTION_MARK;
8994 case RTE_FLOW_ACTION_TYPE_COUNT:
8998 counter = flow_dv_translate_create_counter(dev,
8999 dev_flow, sub_actions->conf, 0);
9001 return rte_flow_error_set
9003 RTE_FLOW_ERROR_TYPE_ACTION,
9005 "cannot create counter"
9007 sample_idx->cnt = counter;
9008 sample_act->dr_cnt_action =
9009 (flow_dv_counter_get_by_idx(dev,
9010 counter, NULL))->action;
9011 sample_actions[sample_act->actions_num++] =
9012 sample_act->dr_cnt_action;
9013 action_flags |= MLX5_FLOW_ACTION_COUNT;
9016 case RTE_FLOW_ACTION_TYPE_PORT_ID:
9018 struct mlx5_flow_dv_port_id_action_resource
9020 uint32_t port_id = 0;
9022 memset(&port_id_resource, 0, sizeof(port_id_resource));
9023 /* Save the port id resource before sample */
9024 pre_rix = dev_flow->handle->rix_port_id_action;
9025 pre_r = dev_flow->dv.port_id_action;
9026 if (flow_dv_translate_action_port_id(dev, sub_actions,
9029 port_id_resource.port_id = port_id;
9030 if (flow_dv_port_id_action_resource_register
9031 (dev, &port_id_resource, dev_flow, error))
9033 sample_act->dr_port_id_action =
9034 dev_flow->dv.port_id_action->action;
9035 sample_idx->rix_port_id_action =
9036 dev_flow->handle->rix_port_id_action;
9037 sample_actions[sample_act->actions_num++] =
9038 sample_act->dr_port_id_action;
9039 /* Recover the port id resource after sample */
9040 dev_flow->dv.port_id_action = pre_r;
9041 dev_flow->handle->rix_port_id_action = pre_rix;
9043 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9046 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
9047 /* Save the encap resource before sample */
9048 pre_rix = dev_flow->handle->dvh.rix_encap_decap;
9049 pre_r = dev_flow->dv.encap_decap;
9050 if (flow_dv_create_action_l2_encap(dev, sub_actions,
9055 sample_act->dr_encap_action =
9056 dev_flow->dv.encap_decap->action;
9057 sample_idx->rix_encap_decap =
9058 dev_flow->handle->dvh.rix_encap_decap;
9059 sample_actions[sample_act->actions_num++] =
9060 sample_act->dr_encap_action;
9061 /* Recover the encap resource after sample */
9062 dev_flow->dv.encap_decap = pre_r;
9063 dev_flow->handle->dvh.rix_encap_decap = pre_rix;
9064 action_flags |= MLX5_FLOW_ACTION_ENCAP;
9067 return rte_flow_error_set(error, EINVAL,
9068 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9070 "Not support for sampler action");
9073 sample_act->action_flags = action_flags;
9074 res->ft_id = dev_flow->dv.group;
9075 if (attr->transfer) {
9077 uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
9078 uint64_t set_action;
9079 } action_ctx = { .set_action = 0 };
9081 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
9082 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
9083 MLX5_MODIFICATION_TYPE_SET);
9084 MLX5_SET(set_action_in, action_ctx.action_in, field,
9085 MLX5_MODI_META_REG_C_0);
9086 MLX5_SET(set_action_in, action_ctx.action_in, data,
9087 priv->vport_meta_tag);
9088 res->set_action = action_ctx.set_action;
9089 } else if (attr->ingress) {
9090 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9096 * Convert Sample action to DV specification.
9099 * Pointer to rte_eth_dev structure.
9100 * @param[in, out] dev_flow
9101 * Pointer to the mlx5_flow.
9103 * Pointer to the flow attributes.
9104 * @param[in] num_of_dest
9105 * The num of destination.
9106 * @param[in, out] res
9107 * Pointer to sample resource.
9108 * @param[in, out] mdest_res
9109 * Pointer to destination array resource.
9110 * @param[in] sample_actions
9111 * Pointer to sample path actions list.
9112 * @param[in] action_flags
9113 * Holds the actions detected until now.
9115 * Pointer to the error structure.
9118 * 0 on success, a negative errno value otherwise and rte_errno is set.
9121 flow_dv_create_action_sample(struct rte_eth_dev *dev,
9122 struct mlx5_flow *dev_flow,
9123 const struct rte_flow_attr *attr,
9124 uint32_t num_of_dest,
9125 struct mlx5_flow_dv_sample_resource *res,
9126 struct mlx5_flow_dv_dest_array_resource *mdest_res,
9127 void **sample_actions,
9128 uint64_t action_flags,
9129 struct rte_flow_error *error)
9131 /* update normal path action resource into last index of array */
9132 uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
9133 struct mlx5_flow_sub_actions_list *sample_act =
9134 &mdest_res->sample_act[dest_index];
9135 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
9136 struct mlx5_flow_rss_desc *rss_desc;
9137 uint32_t normal_idx = 0;
9138 struct mlx5_hrxq *hrxq;
9142 rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
9143 if (num_of_dest > 1) {
9144 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
9145 /* Handle QP action for mirroring */
9146 hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
9147 rss_desc, &hrxq_idx);
9149 return rte_flow_error_set
9151 RTE_FLOW_ERROR_TYPE_ACTION,
9153 "cannot create rx queue");
9155 mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
9156 sample_act->dr_queue_action = hrxq->action;
9157 if (action_flags & MLX5_FLOW_ACTION_MARK)
9158 dev_flow->handle->rix_hrxq = hrxq_idx;
9159 dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
9161 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
9163 mdest_res->sample_idx[dest_index].rix_encap_decap =
9164 dev_flow->handle->dvh.rix_encap_decap;
9165 sample_act->dr_encap_action =
9166 dev_flow->dv.encap_decap->action;
9168 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
9170 mdest_res->sample_idx[dest_index].rix_port_id_action =
9171 dev_flow->handle->rix_port_id_action;
9172 sample_act->dr_port_id_action =
9173 dev_flow->dv.port_id_action->action;
9175 sample_act->actions_num = normal_idx;
9176 /* update sample action resource into first index of array */
9177 mdest_res->ft_type = res->ft_type;
9178 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
9179 sizeof(struct mlx5_flow_sub_actions_idx));
9180 memcpy(&mdest_res->sample_act[0], &res->sample_act,
9181 sizeof(struct mlx5_flow_sub_actions_list));
9182 mdest_res->num_of_dest = num_of_dest;
9183 if (flow_dv_dest_array_resource_register(dev, attr, mdest_res,
9185 return rte_flow_error_set(error, EINVAL,
9186 RTE_FLOW_ERROR_TYPE_ACTION,
9187 NULL, "can't create sample "
9190 if (flow_dv_sample_resource_register(dev, attr, res, dev_flow,
9191 sample_actions, error))
9192 return rte_flow_error_set(error, EINVAL,
9193 RTE_FLOW_ERROR_TYPE_ACTION,
9195 "can't create sample action");
9201 * Fill the flow with DV spec, lock free
9202 * (mutex should be acquired by caller).
9205 * Pointer to rte_eth_dev structure.
9206 * @param[in, out] dev_flow
9207 * Pointer to the sub flow.
9209 * Pointer to the flow attributes.
9211 * Pointer to the list of items.
9212 * @param[in] actions
9213 * Pointer to the list of actions.
9215 * Pointer to the error structure.
9218 * 0 on success, a negative errno value otherwise and rte_errno is set.
9221 __flow_dv_translate(struct rte_eth_dev *dev,
9222 struct mlx5_flow *dev_flow,
9223 const struct rte_flow_attr *attr,
9224 const struct rte_flow_item items[],
9225 const struct rte_flow_action actions[],
9226 struct rte_flow_error *error)
9228 struct mlx5_priv *priv = dev->data->dev_private;
9229 struct mlx5_dev_config *dev_conf = &priv->config;
9230 struct rte_flow *flow = dev_flow->flow;
9231 struct mlx5_flow_handle *handle = dev_flow->handle;
9232 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
9233 struct mlx5_flow_rss_desc *rss_desc;
9234 uint64_t item_flags = 0;
9235 uint64_t last_item = 0;
9236 uint64_t action_flags = 0;
9237 uint64_t priority = attr->priority;
9238 struct mlx5_flow_dv_matcher matcher = {
9240 .size = sizeof(matcher.mask.buf) -
9241 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
9245 bool actions_end = false;
9247 struct mlx5_flow_dv_modify_hdr_resource res;
9248 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
9249 sizeof(struct mlx5_modification_cmd) *
9250 (MLX5_MAX_MODIFY_NUM + 1)];
9252 struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
9253 const struct rte_flow_action_count *count = NULL;
9254 const struct rte_flow_action_age *age = NULL;
9255 union flow_dv_attr flow_attr = { .attr = 0 };
9257 union mlx5_flow_tbl_key tbl_key;
9258 uint32_t modify_action_position = UINT32_MAX;
9259 void *match_mask = matcher.mask.buf;
9260 void *match_value = dev_flow->dv.value.buf;
9261 uint8_t next_protocol = 0xff;
9262 struct rte_vlan_hdr vlan = { 0 };
9263 struct mlx5_flow_dv_dest_array_resource mdest_res;
9264 struct mlx5_flow_dv_sample_resource sample_res;
9265 void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
9266 struct mlx5_flow_sub_actions_list *sample_act;
9267 uint32_t sample_act_pos = UINT32_MAX;
9268 uint32_t num_of_dest = 0;
9269 int tmp_actions_n = 0;
9272 const struct mlx5_flow_tunnel *tunnel;
9273 struct flow_grp_info grp_info = {
9274 .external = !!dev_flow->external,
9275 .transfer = !!attr->transfer,
9276 .fdb_def_rule = !!priv->fdb_def_rule,
9280 rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
9281 memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
9282 memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
9283 mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
9284 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9285 /* update normal path action resource into last index of array */
9286 sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
9287 tunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ?
9288 flow_items_to_tunnel(items) :
9289 is_flow_tunnel_steer_rule(dev, attr, items, actions) ?
9290 flow_actions_to_tunnel(actions) :
9291 dev_flow->tunnel ? dev_flow->tunnel : NULL;
9292 mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
9293 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9294 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
9295 (dev, tunnel, attr, items, actions);
9296 ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
9300 dev_flow->dv.group = table;
9302 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
9303 if (priority == MLX5_FLOW_PRIO_RSVD)
9304 priority = dev_conf->flow_prio - 1;
9305 /* number of actions must be set to 0 in case of dirty stack. */
9306 mhdr_res->actions_num = 0;
9307 if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
9309 * do not add decap action if match rule drops packet
9310 * HW rejects rules with decap & drop
9312 bool add_decap = true;
9313 const struct rte_flow_action *ptr = actions;
9314 struct mlx5_flow_tbl_resource *tbl;
9316 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
9317 if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
9323 if (flow_dv_create_action_l2_decap(dev, dev_flow,
9327 dev_flow->dv.actions[actions_n++] =
9328 dev_flow->dv.encap_decap->action;
9329 action_flags |= MLX5_FLOW_ACTION_DECAP;
9332 * bind table_id with <group, table> for tunnel match rule.
9333 * Tunnel set rule establishes that bind in JUMP action handler.
9334 * Required for scenario when application creates tunnel match
9335 * rule before tunnel set rule.
9337 tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
9339 !!dev_flow->external, tunnel,
9340 attr->group, 0, error);
9342 return rte_flow_error_set
9343 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
9344 actions, "cannot register tunnel group");
9346 for (; !actions_end ; actions++) {
9347 const struct rte_flow_action_queue *queue;
9348 const struct rte_flow_action_rss *rss;
9349 const struct rte_flow_action *action = actions;
9350 const uint8_t *rss_key;
9351 const struct rte_flow_action_meter *mtr;
9352 struct mlx5_flow_tbl_resource *tbl;
9353 uint32_t port_id = 0;
9354 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
9355 int action_type = actions->type;
9356 const struct rte_flow_action *found_action = NULL;
9357 struct mlx5_flow_meter *fm = NULL;
9358 uint32_t jump_group = 0;
9360 if (!mlx5_flow_os_action_supported(action_type))
9361 return rte_flow_error_set(error, ENOTSUP,
9362 RTE_FLOW_ERROR_TYPE_ACTION,
9364 "action not supported");
9365 switch (action_type) {
9366 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
9367 action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
9369 case RTE_FLOW_ACTION_TYPE_VOID:
9371 case RTE_FLOW_ACTION_TYPE_PORT_ID:
9372 if (flow_dv_translate_action_port_id(dev, action,
9375 port_id_resource.port_id = port_id;
9376 MLX5_ASSERT(!handle->rix_port_id_action);
9377 if (flow_dv_port_id_action_resource_register
9378 (dev, &port_id_resource, dev_flow, error))
9380 dev_flow->dv.actions[actions_n++] =
9381 dev_flow->dv.port_id_action->action;
9382 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9383 dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
9384 sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9387 case RTE_FLOW_ACTION_TYPE_FLAG:
9388 action_flags |= MLX5_FLOW_ACTION_FLAG;
9389 dev_flow->handle->mark = 1;
9390 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
9391 struct rte_flow_action_mark mark = {
9392 .id = MLX5_FLOW_MARK_DEFAULT,
9395 if (flow_dv_convert_action_mark(dev, &mark,
9399 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
9402 tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
9404 * Only one FLAG or MARK is supported per device flow
9405 * right now. So the pointer to the tag resource must be
9406 * zero before the register process.
9408 MLX5_ASSERT(!handle->dvh.rix_tag);
9409 if (flow_dv_tag_resource_register(dev, tag_be,
9412 MLX5_ASSERT(dev_flow->dv.tag_resource);
9413 dev_flow->dv.actions[actions_n++] =
9414 dev_flow->dv.tag_resource->action;
9416 case RTE_FLOW_ACTION_TYPE_MARK:
9417 action_flags |= MLX5_FLOW_ACTION_MARK;
9418 dev_flow->handle->mark = 1;
9419 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
9420 const struct rte_flow_action_mark *mark =
9421 (const struct rte_flow_action_mark *)
9424 if (flow_dv_convert_action_mark(dev, mark,
9428 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
9432 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
9433 /* Legacy (non-extensive) MARK action. */
9434 tag_be = mlx5_flow_mark_set
9435 (((const struct rte_flow_action_mark *)
9436 (actions->conf))->id);
9437 MLX5_ASSERT(!handle->dvh.rix_tag);
9438 if (flow_dv_tag_resource_register(dev, tag_be,
9441 MLX5_ASSERT(dev_flow->dv.tag_resource);
9442 dev_flow->dv.actions[actions_n++] =
9443 dev_flow->dv.tag_resource->action;
9445 case RTE_FLOW_ACTION_TYPE_SET_META:
9446 if (flow_dv_convert_action_set_meta
9447 (dev, mhdr_res, attr,
9448 (const struct rte_flow_action_set_meta *)
9449 actions->conf, error))
9451 action_flags |= MLX5_FLOW_ACTION_SET_META;
9453 case RTE_FLOW_ACTION_TYPE_SET_TAG:
9454 if (flow_dv_convert_action_set_tag
9456 (const struct rte_flow_action_set_tag *)
9457 actions->conf, error))
9459 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
9461 case RTE_FLOW_ACTION_TYPE_DROP:
9462 action_flags |= MLX5_FLOW_ACTION_DROP;
9463 dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
9465 case RTE_FLOW_ACTION_TYPE_QUEUE:
9466 queue = actions->conf;
9467 rss_desc->queue_num = 1;
9468 rss_desc->queue[0] = queue->index;
9469 action_flags |= MLX5_FLOW_ACTION_QUEUE;
9470 dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
9471 sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
9474 case RTE_FLOW_ACTION_TYPE_RSS:
9475 rss = actions->conf;
9476 memcpy(rss_desc->queue, rss->queue,
9477 rss->queue_num * sizeof(uint16_t));
9478 rss_desc->queue_num = rss->queue_num;
9479 /* NULL RSS key indicates default RSS key. */
9480 rss_key = !rss->key ? rss_hash_default_key : rss->key;
9481 memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
9483 * rss->level and rss.types should be set in advance
9484 * when expanding items for RSS.
9486 action_flags |= MLX5_FLOW_ACTION_RSS;
9487 dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
9489 case RTE_FLOW_ACTION_TYPE_AGE:
9490 case RTE_FLOW_ACTION_TYPE_COUNT:
9491 if (!dev_conf->devx) {
9492 return rte_flow_error_set
9494 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9496 "count action not supported");
9498 /* Save information first, will apply later. */
9499 if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT)
9500 count = action->conf;
9503 action_flags |= MLX5_FLOW_ACTION_COUNT;
9505 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
9506 dev_flow->dv.actions[actions_n++] =
9507 priv->sh->pop_vlan_action;
9508 action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
9510 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
9511 if (!(action_flags &
9512 MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
9513 flow_dev_get_vlan_info_from_items(items, &vlan);
9514 vlan.eth_proto = rte_be_to_cpu_16
9515 ((((const struct rte_flow_action_of_push_vlan *)
9516 actions->conf)->ethertype));
9517 found_action = mlx5_flow_find_action
9519 RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
9521 mlx5_update_vlan_vid_pcp(found_action, &vlan);
9522 found_action = mlx5_flow_find_action
9524 RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
9526 mlx5_update_vlan_vid_pcp(found_action, &vlan);
9527 if (flow_dv_create_action_push_vlan
9528 (dev, attr, &vlan, dev_flow, error))
9530 dev_flow->dv.actions[actions_n++] =
9531 dev_flow->dv.push_vlan_res->action;
9532 action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
9534 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
9535 /* of_vlan_push action handled this action */
9536 MLX5_ASSERT(action_flags &
9537 MLX5_FLOW_ACTION_OF_PUSH_VLAN);
9539 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
9540 if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
9542 flow_dev_get_vlan_info_from_items(items, &vlan);
9543 mlx5_update_vlan_vid_pcp(actions, &vlan);
9544 /* If no VLAN push - this is a modify header action */
9545 if (flow_dv_convert_action_modify_vlan_vid
9546 (mhdr_res, actions, error))
9548 action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
9550 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
9551 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
9552 if (flow_dv_create_action_l2_encap(dev, actions,
9557 dev_flow->dv.actions[actions_n++] =
9558 dev_flow->dv.encap_decap->action;
9559 action_flags |= MLX5_FLOW_ACTION_ENCAP;
9560 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
9561 sample_act->action_flags |=
9562 MLX5_FLOW_ACTION_ENCAP;
9564 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
9565 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
9566 if (flow_dv_create_action_l2_decap(dev, dev_flow,
9570 dev_flow->dv.actions[actions_n++] =
9571 dev_flow->dv.encap_decap->action;
9572 action_flags |= MLX5_FLOW_ACTION_DECAP;
9574 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
9575 /* Handle encap with preceding decap. */
9576 if (action_flags & MLX5_FLOW_ACTION_DECAP) {
9577 if (flow_dv_create_action_raw_encap
9578 (dev, actions, dev_flow, attr, error))
9580 dev_flow->dv.actions[actions_n++] =
9581 dev_flow->dv.encap_decap->action;
9583 /* Handle encap without preceding decap. */
9584 if (flow_dv_create_action_l2_encap
9585 (dev, actions, dev_flow, attr->transfer,
9588 dev_flow->dv.actions[actions_n++] =
9589 dev_flow->dv.encap_decap->action;
9591 action_flags |= MLX5_FLOW_ACTION_ENCAP;
9592 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
9593 sample_act->action_flags |=
9594 MLX5_FLOW_ACTION_ENCAP;
9596 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
9597 while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
9599 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
9600 if (flow_dv_create_action_l2_decap
9601 (dev, dev_flow, attr->transfer, error))
9603 dev_flow->dv.actions[actions_n++] =
9604 dev_flow->dv.encap_decap->action;
9606 /* If decap is followed by encap, handle it at encap. */
9607 action_flags |= MLX5_FLOW_ACTION_DECAP;
9609 case RTE_FLOW_ACTION_TYPE_JUMP:
9610 jump_group = ((const struct rte_flow_action_jump *)
9611 action->conf)->group;
9612 grp_info.std_tbl_fix = 0;
9613 ret = mlx5_flow_group_to_table(dev, tunnel,
9619 tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
9621 !!dev_flow->external,
9622 tunnel, jump_group, 0,
9625 return rte_flow_error_set
9627 RTE_FLOW_ERROR_TYPE_ACTION,
9629 "cannot create jump action.");
9630 if (flow_dv_jump_tbl_resource_register
9631 (dev, tbl, dev_flow, error)) {
9632 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
9633 return rte_flow_error_set
9635 RTE_FLOW_ERROR_TYPE_ACTION,
9637 "cannot create jump action.");
9639 dev_flow->dv.actions[actions_n++] =
9640 dev_flow->dv.jump->action;
9641 action_flags |= MLX5_FLOW_ACTION_JUMP;
9642 dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
9644 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
9645 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
9646 if (flow_dv_convert_action_modify_mac
9647 (mhdr_res, actions, error))
9649 action_flags |= actions->type ==
9650 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
9651 MLX5_FLOW_ACTION_SET_MAC_SRC :
9652 MLX5_FLOW_ACTION_SET_MAC_DST;
9654 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
9655 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
9656 if (flow_dv_convert_action_modify_ipv4
9657 (mhdr_res, actions, error))
9659 action_flags |= actions->type ==
9660 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
9661 MLX5_FLOW_ACTION_SET_IPV4_SRC :
9662 MLX5_FLOW_ACTION_SET_IPV4_DST;
9664 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
9665 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
9666 if (flow_dv_convert_action_modify_ipv6
9667 (mhdr_res, actions, error))
9669 action_flags |= actions->type ==
9670 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
9671 MLX5_FLOW_ACTION_SET_IPV6_SRC :
9672 MLX5_FLOW_ACTION_SET_IPV6_DST;
9674 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
9675 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
9676 if (flow_dv_convert_action_modify_tp
9677 (mhdr_res, actions, items,
9678 &flow_attr, dev_flow, !!(action_flags &
9679 MLX5_FLOW_ACTION_DECAP), error))
9681 action_flags |= actions->type ==
9682 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
9683 MLX5_FLOW_ACTION_SET_TP_SRC :
9684 MLX5_FLOW_ACTION_SET_TP_DST;
9686 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
9687 if (flow_dv_convert_action_modify_dec_ttl
9688 (mhdr_res, items, &flow_attr, dev_flow,
9690 MLX5_FLOW_ACTION_DECAP), error))
9692 action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
9694 case RTE_FLOW_ACTION_TYPE_SET_TTL:
9695 if (flow_dv_convert_action_modify_ttl
9696 (mhdr_res, actions, items, &flow_attr,
9697 dev_flow, !!(action_flags &
9698 MLX5_FLOW_ACTION_DECAP), error))
9700 action_flags |= MLX5_FLOW_ACTION_SET_TTL;
9702 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
9703 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
9704 if (flow_dv_convert_action_modify_tcp_seq
9705 (mhdr_res, actions, error))
9707 action_flags |= actions->type ==
9708 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
9709 MLX5_FLOW_ACTION_INC_TCP_SEQ :
9710 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
9713 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
9714 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
9715 if (flow_dv_convert_action_modify_tcp_ack
9716 (mhdr_res, actions, error))
9718 action_flags |= actions->type ==
9719 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
9720 MLX5_FLOW_ACTION_INC_TCP_ACK :
9721 MLX5_FLOW_ACTION_DEC_TCP_ACK;
9723 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
9724 if (flow_dv_convert_action_set_reg
9725 (mhdr_res, actions, error))
9727 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
9729 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
9730 if (flow_dv_convert_action_copy_mreg
9731 (dev, mhdr_res, actions, error))
9733 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
9735 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
9736 action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
9737 dev_flow->handle->fate_action =
9738 MLX5_FLOW_FATE_DEFAULT_MISS;
9740 case RTE_FLOW_ACTION_TYPE_METER:
9741 mtr = actions->conf;
9743 fm = mlx5_flow_meter_attach(priv, mtr->mtr_id,
9746 return rte_flow_error_set(error,
9748 RTE_FLOW_ERROR_TYPE_ACTION,
9751 "or invalid parameters");
9752 flow->meter = fm->idx;
9754 /* Set the meter action. */
9756 fm = mlx5_ipool_get(priv->sh->ipool
9757 [MLX5_IPOOL_MTR], flow->meter);
9759 return rte_flow_error_set(error,
9761 RTE_FLOW_ERROR_TYPE_ACTION,
9764 "or invalid parameters");
9766 dev_flow->dv.actions[actions_n++] =
9767 fm->mfts->meter_action;
9768 action_flags |= MLX5_FLOW_ACTION_METER;
9770 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
9771 if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
9774 action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
9776 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
9777 if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
9780 action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
9782 case RTE_FLOW_ACTION_TYPE_SAMPLE:
9783 sample_act_pos = actions_n;
9784 ret = flow_dv_translate_action_sample(dev,
9794 action_flags |= MLX5_FLOW_ACTION_SAMPLE;
9795 /* put encap action into group if work with port id */
9796 if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
9797 (action_flags & MLX5_FLOW_ACTION_PORT_ID))
9798 sample_act->action_flags |=
9799 MLX5_FLOW_ACTION_ENCAP;
9801 case RTE_FLOW_ACTION_TYPE_END:
9803 if (mhdr_res->actions_num) {
9804 /* create modify action if needed. */
9805 if (flow_dv_modify_hdr_resource_register
9806 (dev, mhdr_res, dev_flow, error))
9808 dev_flow->dv.actions[modify_action_position] =
9809 handle->dvh.modify_hdr->action;
9811 if (action_flags & MLX5_FLOW_ACTION_COUNT) {
9813 flow_dv_translate_create_counter(dev,
9814 dev_flow, count, age);
9817 return rte_flow_error_set
9819 RTE_FLOW_ERROR_TYPE_ACTION,
9821 "cannot create counter"
9823 dev_flow->dv.actions[actions_n] =
9824 (flow_dv_counter_get_by_idx(dev,
9825 flow->counter, NULL))->action;
9828 if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
9829 ret = flow_dv_create_action_sample(dev,
9838 return rte_flow_error_set
9840 RTE_FLOW_ERROR_TYPE_ACTION,
9842 "cannot create sample action");
9843 if (num_of_dest > 1) {
9844 dev_flow->dv.actions[sample_act_pos] =
9845 dev_flow->dv.dest_array_res->action;
9847 dev_flow->dv.actions[sample_act_pos] =
9848 dev_flow->dv.sample_res->verbs_action;
9855 if (mhdr_res->actions_num &&
9856 modify_action_position == UINT32_MAX)
9857 modify_action_position = actions_n++;
9860 * For multiple destination (sample action with ratio=1), the encap
9861 * action and port id action will be combined into group action.
9862 * So need remove the original these actions in the flow and only
9863 * use the sample action instead of.
9865 if (num_of_dest > 1 && sample_act->dr_port_id_action) {
9867 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
9869 for (i = 0; i < actions_n; i++) {
9870 if ((sample_act->dr_encap_action &&
9871 sample_act->dr_encap_action ==
9872 dev_flow->dv.actions[i]) ||
9873 (sample_act->dr_port_id_action &&
9874 sample_act->dr_port_id_action ==
9875 dev_flow->dv.actions[i]))
9877 temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
9879 memcpy((void *)dev_flow->dv.actions,
9880 (void *)temp_actions,
9881 tmp_actions_n * sizeof(void *));
9882 actions_n = tmp_actions_n;
9884 dev_flow->dv.actions_n = actions_n;
9885 dev_flow->act_flags = action_flags;
9886 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
9887 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
9888 int item_type = items->type;
9890 if (!mlx5_flow_os_item_supported(item_type))
9891 return rte_flow_error_set(error, ENOTSUP,
9892 RTE_FLOW_ERROR_TYPE_ITEM,
9893 NULL, "item not supported");
9894 switch (item_type) {
9895 case RTE_FLOW_ITEM_TYPE_PORT_ID:
9896 flow_dv_translate_item_port_id(dev, match_mask,
9897 match_value, items);
9898 last_item = MLX5_FLOW_ITEM_PORT_ID;
9900 case RTE_FLOW_ITEM_TYPE_ETH:
9901 flow_dv_translate_item_eth(match_mask, match_value,
9903 dev_flow->dv.group);
9904 matcher.priority = action_flags &
9905 MLX5_FLOW_ACTION_DEFAULT_MISS &&
9906 !dev_flow->external ?
9907 MLX5_PRIORITY_MAP_L3 :
9908 MLX5_PRIORITY_MAP_L2;
9909 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
9910 MLX5_FLOW_LAYER_OUTER_L2;
9912 case RTE_FLOW_ITEM_TYPE_VLAN:
9913 flow_dv_translate_item_vlan(dev_flow,
9914 match_mask, match_value,
9916 dev_flow->dv.group);
9917 matcher.priority = MLX5_PRIORITY_MAP_L2;
9918 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
9919 MLX5_FLOW_LAYER_INNER_VLAN) :
9920 (MLX5_FLOW_LAYER_OUTER_L2 |
9921 MLX5_FLOW_LAYER_OUTER_VLAN);
9923 case RTE_FLOW_ITEM_TYPE_IPV4:
9924 mlx5_flow_tunnel_ip_check(items, next_protocol,
9925 &item_flags, &tunnel);
9926 flow_dv_translate_item_ipv4(match_mask, match_value,
9928 dev_flow->dv.group);
9929 matcher.priority = MLX5_PRIORITY_MAP_L3;
9930 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
9931 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
9932 if (items->mask != NULL &&
9933 ((const struct rte_flow_item_ipv4 *)
9934 items->mask)->hdr.next_proto_id) {
9936 ((const struct rte_flow_item_ipv4 *)
9937 (items->spec))->hdr.next_proto_id;
9939 ((const struct rte_flow_item_ipv4 *)
9940 (items->mask))->hdr.next_proto_id;
9942 /* Reset for inner layer. */
9943 next_protocol = 0xff;
9946 case RTE_FLOW_ITEM_TYPE_IPV6:
9947 mlx5_flow_tunnel_ip_check(items, next_protocol,
9948 &item_flags, &tunnel);
9949 flow_dv_translate_item_ipv6(match_mask, match_value,
9951 dev_flow->dv.group);
9952 matcher.priority = MLX5_PRIORITY_MAP_L3;
9953 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
9954 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
9955 if (items->mask != NULL &&
9956 ((const struct rte_flow_item_ipv6 *)
9957 items->mask)->hdr.proto) {
9959 ((const struct rte_flow_item_ipv6 *)
9960 items->spec)->hdr.proto;
9962 ((const struct rte_flow_item_ipv6 *)
9963 items->mask)->hdr.proto;
9965 /* Reset for inner layer. */
9966 next_protocol = 0xff;
9969 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
9970 flow_dv_translate_item_ipv6_frag_ext(match_mask,
9973 last_item = tunnel ?
9974 MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
9975 MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
9976 if (items->mask != NULL &&
9977 ((const struct rte_flow_item_ipv6_frag_ext *)
9978 items->mask)->hdr.next_header) {
9980 ((const struct rte_flow_item_ipv6_frag_ext *)
9981 items->spec)->hdr.next_header;
9983 ((const struct rte_flow_item_ipv6_frag_ext *)
9984 items->mask)->hdr.next_header;
9986 /* Reset for inner layer. */
9987 next_protocol = 0xff;
9990 case RTE_FLOW_ITEM_TYPE_TCP:
9991 flow_dv_translate_item_tcp(match_mask, match_value,
9993 matcher.priority = MLX5_PRIORITY_MAP_L4;
9994 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
9995 MLX5_FLOW_LAYER_OUTER_L4_TCP;
9997 case RTE_FLOW_ITEM_TYPE_UDP:
9998 flow_dv_translate_item_udp(match_mask, match_value,
10000 matcher.priority = MLX5_PRIORITY_MAP_L4;
10001 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
10002 MLX5_FLOW_LAYER_OUTER_L4_UDP;
10004 case RTE_FLOW_ITEM_TYPE_GRE:
10005 flow_dv_translate_item_gre(match_mask, match_value,
10007 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10008 last_item = MLX5_FLOW_LAYER_GRE;
10010 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
10011 flow_dv_translate_item_gre_key(match_mask,
10012 match_value, items);
10013 last_item = MLX5_FLOW_LAYER_GRE_KEY;
10015 case RTE_FLOW_ITEM_TYPE_NVGRE:
10016 flow_dv_translate_item_nvgre(match_mask, match_value,
10018 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10019 last_item = MLX5_FLOW_LAYER_GRE;
10021 case RTE_FLOW_ITEM_TYPE_VXLAN:
10022 flow_dv_translate_item_vxlan(match_mask, match_value,
10024 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10025 last_item = MLX5_FLOW_LAYER_VXLAN;
10027 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
10028 flow_dv_translate_item_vxlan_gpe(match_mask,
10029 match_value, items,
10031 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10032 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
10034 case RTE_FLOW_ITEM_TYPE_GENEVE:
10035 flow_dv_translate_item_geneve(match_mask, match_value,
10037 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10038 last_item = MLX5_FLOW_LAYER_GENEVE;
10040 case RTE_FLOW_ITEM_TYPE_MPLS:
10041 flow_dv_translate_item_mpls(match_mask, match_value,
10042 items, last_item, tunnel);
10043 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10044 last_item = MLX5_FLOW_LAYER_MPLS;
10046 case RTE_FLOW_ITEM_TYPE_MARK:
10047 flow_dv_translate_item_mark(dev, match_mask,
10048 match_value, items);
10049 last_item = MLX5_FLOW_ITEM_MARK;
10051 case RTE_FLOW_ITEM_TYPE_META:
10052 flow_dv_translate_item_meta(dev, match_mask,
10053 match_value, attr, items);
10054 last_item = MLX5_FLOW_ITEM_METADATA;
10056 case RTE_FLOW_ITEM_TYPE_ICMP:
10057 flow_dv_translate_item_icmp(match_mask, match_value,
10059 last_item = MLX5_FLOW_LAYER_ICMP;
10061 case RTE_FLOW_ITEM_TYPE_ICMP6:
10062 flow_dv_translate_item_icmp6(match_mask, match_value,
10064 last_item = MLX5_FLOW_LAYER_ICMP6;
10066 case RTE_FLOW_ITEM_TYPE_TAG:
10067 flow_dv_translate_item_tag(dev, match_mask,
10068 match_value, items);
10069 last_item = MLX5_FLOW_ITEM_TAG;
10071 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
10072 flow_dv_translate_mlx5_item_tag(dev, match_mask,
10073 match_value, items);
10074 last_item = MLX5_FLOW_ITEM_TAG;
10076 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
10077 flow_dv_translate_item_tx_queue(dev, match_mask,
10080 last_item = MLX5_FLOW_ITEM_TX_QUEUE;
10082 case RTE_FLOW_ITEM_TYPE_GTP:
10083 flow_dv_translate_item_gtp(match_mask, match_value,
10085 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10086 last_item = MLX5_FLOW_LAYER_GTP;
10088 case RTE_FLOW_ITEM_TYPE_ECPRI:
10089 if (!mlx5_flex_parser_ecpri_exist(dev)) {
10090 /* Create it only the first time to be used. */
10091 ret = mlx5_flex_parser_ecpri_alloc(dev);
10093 return rte_flow_error_set
10095 RTE_FLOW_ERROR_TYPE_ITEM,
10097 "cannot create eCPRI parser");
10099 /* Adjust the length matcher and device flow value. */
10100 matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
10101 dev_flow->dv.value.size =
10102 MLX5_ST_SZ_BYTES(fte_match_param);
10103 flow_dv_translate_item_ecpri(dev, match_mask,
10104 match_value, items);
10105 /* No other protocol should follow eCPRI layer. */
10106 last_item = MLX5_FLOW_LAYER_ECPRI;
10111 item_flags |= last_item;
10114 * When E-Switch mode is enabled, we have two cases where we need to
10115 * set the source port manually.
10116 * The first one, is in case of Nic steering rule, and the second is
10117 * E-Switch rule where no port_id item was found. In both cases
10118 * the source port is set according the current port in use.
10120 if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
10121 (priv->representor || priv->master)) {
10122 if (flow_dv_translate_item_port_id(dev, match_mask,
10123 match_value, NULL))
10126 #ifdef RTE_LIBRTE_MLX5_DEBUG
10127 MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
10128 dev_flow->dv.value.buf));
10131 * Layers may be already initialized from prefix flow if this dev_flow
10132 * is the suffix flow.
10134 handle->layers |= item_flags;
10135 if (action_flags & MLX5_FLOW_ACTION_RSS)
10136 flow_dv_hashfields_set(dev_flow, rss_desc);
10137 /* Register matcher. */
10138 matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
10139 matcher.mask.size);
10140 matcher.priority = mlx5_flow_adjust_priority(dev, priority,
10142 /* reserved field no needs to be set to 0 here. */
10143 tbl_key.domain = attr->transfer;
10144 tbl_key.direction = attr->egress;
10145 tbl_key.table_id = dev_flow->dv.group;
10146 if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
10152 * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
10155 * @param[in, out] action
10156 * Shred RSS action holding hash RX queue objects.
10157 * @param[in] hash_fields
10158 * Defines combination of packet fields to participate in RX hash.
10159 * @param[in] tunnel
10161 * @param[in] hrxq_idx
10162 * Hash RX queue index to set.
10165 * 0 on success, otherwise negative errno value.
10168 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
10169 const uint64_t hash_fields,
10173 uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel;
10175 switch (hash_fields & ~IBV_RX_HASH_INNER) {
10176 case MLX5_RSS_HASH_IPV4:
10177 hrxqs[0] = hrxq_idx;
10179 case MLX5_RSS_HASH_IPV4_TCP:
10180 hrxqs[1] = hrxq_idx;
10182 case MLX5_RSS_HASH_IPV4_UDP:
10183 hrxqs[2] = hrxq_idx;
10185 case MLX5_RSS_HASH_IPV6:
10186 hrxqs[3] = hrxq_idx;
10188 case MLX5_RSS_HASH_IPV6_TCP:
10189 hrxqs[4] = hrxq_idx;
10191 case MLX5_RSS_HASH_IPV6_UDP:
10192 hrxqs[5] = hrxq_idx;
10194 case MLX5_RSS_HASH_NONE:
10195 hrxqs[6] = hrxq_idx;
10203 * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
10206 * @param[in] action
10207 * Shred RSS action holding hash RX queue objects.
10208 * @param[in] hash_fields
10209 * Defines combination of packet fields to participate in RX hash.
10210 * @param[in] tunnel
10214 * Valid hash RX queue index, otherwise 0.
10217 __flow_dv_action_rss_hrxq_lookup(const struct mlx5_shared_action_rss *action,
10218 const uint64_t hash_fields,
10221 const uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel;
10223 switch (hash_fields & ~IBV_RX_HASH_INNER) {
10224 case MLX5_RSS_HASH_IPV4:
10226 case MLX5_RSS_HASH_IPV4_TCP:
10228 case MLX5_RSS_HASH_IPV4_UDP:
10230 case MLX5_RSS_HASH_IPV6:
10232 case MLX5_RSS_HASH_IPV6_TCP:
10234 case MLX5_RSS_HASH_IPV6_UDP:
10236 case MLX5_RSS_HASH_NONE:
10244 * Retrieves hash RX queue suitable for the *flow*.
10245 * If shared action configured for *flow* suitable hash RX queue will be
10246 * retrieved from attached shared action.
10249 * Shred RSS action holding hash RX queue objects.
10250 * @param[in] dev_flow
10251 * Pointer to the sub flow.
10253 * Pointer to retrieved hash RX queue object.
10256 * Valid hash RX queue index, otherwise 0 and rte_errno is set.
10259 __flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct rte_flow *flow,
10260 struct mlx5_flow *dev_flow,
10261 struct mlx5_hrxq **hrxq)
10263 struct mlx5_priv *priv = dev->data->dev_private;
10264 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10267 if (flow->shared_rss) {
10268 hrxq_idx = __flow_dv_action_rss_hrxq_lookup
10269 (flow->shared_rss, dev_flow->hash_fields,
10270 !!(dev_flow->handle->layers &
10271 MLX5_FLOW_LAYER_TUNNEL));
10273 *hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
10275 __atomic_fetch_add(&(*hrxq)->refcnt, 1,
10279 struct mlx5_flow_rss_desc *rss_desc =
10280 &wks->rss_desc[!!wks->flow_nested_idx];
10282 *hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
10289 * Apply the flow to the NIC, lock free,
10290 * (mutex should be acquired by caller).
10293 * Pointer to the Ethernet device structure.
10294 * @param[in, out] flow
10295 * Pointer to flow structure.
10296 * @param[out] error
10297 * Pointer to error structure.
10300 * 0 on success, a negative errno value otherwise and rte_errno is set.
10303 __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
10304 struct rte_flow_error *error)
10306 struct mlx5_flow_dv_workspace *dv;
10307 struct mlx5_flow_handle *dh;
10308 struct mlx5_flow_handle_dv *dv_h;
10309 struct mlx5_flow *dev_flow;
10310 struct mlx5_priv *priv = dev->data->dev_private;
10311 uint32_t handle_idx;
10315 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10318 for (idx = wks->flow_idx - 1; idx >= wks->flow_nested_idx; idx--) {
10319 dev_flow = &wks->flows[idx];
10320 dv = &dev_flow->dv;
10321 dh = dev_flow->handle;
10324 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
10325 if (dv->transfer) {
10326 dv->actions[n++] = priv->sh->esw_drop_action;
10328 MLX5_ASSERT(priv->drop_queue.hrxq);
10330 priv->drop_queue.hrxq->action;
10332 } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
10333 !dv_h->rix_sample && !dv_h->rix_dest_array) {
10334 struct mlx5_hrxq *hrxq = NULL;
10335 uint32_t hrxq_idx = __flow_dv_rss_get_hrxq
10336 (dev, flow, dev_flow, &hrxq);
10340 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10341 "cannot get hash queue");
10344 dh->rix_hrxq = hrxq_idx;
10345 dv->actions[n++] = hrxq->action;
10346 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
10347 if (!priv->sh->default_miss_action) {
10350 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10351 "default miss action not be created.");
10354 dv->actions[n++] = priv->sh->default_miss_action;
10356 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
10357 (void *)&dv->value, n,
10358 dv->actions, &dh->drv_flow);
10360 rte_flow_error_set(error, errno,
10361 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10363 "hardware refuses to create flow");
10366 if (priv->vmwa_context &&
10367 dh->vf_vlan.tag && !dh->vf_vlan.created) {
10369 * The rule contains the VLAN pattern.
10370 * For VF we are going to create VLAN
10371 * interface to make hypervisor set correct
10372 * e-Switch vport context.
10374 mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
10379 err = rte_errno; /* Save rte_errno before cleanup. */
10380 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
10381 handle_idx, dh, next) {
10382 /* hrxq is union, don't clear it if the flag is not set. */
10383 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
10384 mlx5_hrxq_release(dev, dh->rix_hrxq);
10387 if (dh->vf_vlan.tag && dh->vf_vlan.created)
10388 mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
10390 rte_errno = err; /* Restore rte_errno. */
10395 flow_dv_matcher_remove_cb(struct mlx5_cache_list *list __rte_unused,
10396 struct mlx5_cache_entry *entry)
10398 struct mlx5_flow_dv_matcher *cache = container_of(entry, typeof(*cache),
10401 claim_zero(mlx5_flow_os_destroy_flow_matcher(cache->matcher_object));
10406 * Release the flow matcher.
10409 * Pointer to Ethernet device.
10411 * Pointer to mlx5_flow_handle.
10414 * 1 while a reference on it exists, 0 when freed.
10417 flow_dv_matcher_release(struct rte_eth_dev *dev,
10418 struct mlx5_flow_handle *handle)
10420 struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
10421 struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
10422 typeof(*tbl), tbl);
10425 MLX5_ASSERT(matcher->matcher_object);
10426 ret = mlx5_cache_unregister(&tbl->matchers, &matcher->entry);
10427 flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
10432 * Release encap_decap resource.
10435 * Pointer to the hash list.
10437 * Pointer to exist resource entry object.
10440 flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
10441 struct mlx5_hlist_entry *entry)
10443 struct mlx5_dev_ctx_shared *sh = list->ctx;
10444 struct mlx5_flow_dv_encap_decap_resource *res =
10445 container_of(entry, typeof(*res), entry);
10447 claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
10448 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
10452 * Release an encap/decap resource.
10455 * Pointer to Ethernet device.
10456 * @param encap_decap_idx
10457 * Index of encap decap resource.
10460 * 1 while a reference on it exists, 0 when freed.
10463 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
10464 uint32_t encap_decap_idx)
10466 struct mlx5_priv *priv = dev->data->dev_private;
10467 struct mlx5_flow_dv_encap_decap_resource *cache_resource;
10469 cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
10471 if (!cache_resource)
10473 MLX5_ASSERT(cache_resource->action);
10474 return mlx5_hlist_unregister(priv->sh->encaps_decaps,
10475 &cache_resource->entry);
10479 * Release an jump to table action resource.
10482 * Pointer to Ethernet device.
10484 * Pointer to mlx5_flow_handle.
10487 * 1 while a reference on it exists, 0 when freed.
10490 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
10491 struct mlx5_flow_handle *handle)
10493 struct mlx5_priv *priv = dev->data->dev_private;
10494 struct mlx5_flow_tbl_data_entry *tbl_data;
10496 tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
10500 return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
10504 flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused,
10505 struct mlx5_hlist_entry *entry)
10507 struct mlx5_flow_dv_modify_hdr_resource *res =
10508 container_of(entry, typeof(*res), entry);
10510 claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
10515 * Release a modify-header resource.
10518 * Pointer to Ethernet device.
10520 * Pointer to mlx5_flow_handle.
10523 * 1 while a reference on it exists, 0 when freed.
10526 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
10527 struct mlx5_flow_handle *handle)
10529 struct mlx5_priv *priv = dev->data->dev_private;
10530 struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
10532 MLX5_ASSERT(entry->action);
10533 return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
10537 flow_dv_port_id_remove_cb(struct mlx5_cache_list *list,
10538 struct mlx5_cache_entry *entry)
10540 struct mlx5_dev_ctx_shared *sh = list->ctx;
10541 struct mlx5_flow_dv_port_id_action_resource *cache =
10542 container_of(entry, typeof(*cache), entry);
10544 claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
10545 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], cache->idx);
10549 * Release port ID action resource.
10552 * Pointer to Ethernet device.
10554 * Pointer to mlx5_flow_handle.
10557 * 1 while a reference on it exists, 0 when freed.
10560 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
10563 struct mlx5_priv *priv = dev->data->dev_private;
10564 struct mlx5_flow_dv_port_id_action_resource *cache;
10566 cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
10569 MLX5_ASSERT(cache->action);
10570 return mlx5_cache_unregister(&priv->sh->port_id_action_list,
10575 * Release push vlan action resource.
10578 * Pointer to Ethernet device.
10580 * Pointer to mlx5_flow_handle.
10583 * 1 while a reference on it exists, 0 when freed.
10586 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
10587 struct mlx5_flow_handle *handle)
10589 struct mlx5_priv *priv = dev->data->dev_private;
10590 uint32_t idx = handle->dvh.rix_push_vlan;
10591 struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
10593 cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN],
10595 if (!cache_resource)
10597 MLX5_ASSERT(cache_resource->action);
10598 DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
10599 (void *)cache_resource,
10600 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
10601 if (__atomic_sub_fetch(&cache_resource->refcnt, 1,
10602 __ATOMIC_RELAXED) == 0) {
10603 claim_zero(mlx5_flow_os_destroy_flow_action
10604 (cache_resource->action));
10605 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN],
10606 &priv->sh->push_vlan_action_list, idx,
10607 cache_resource, next);
10608 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
10609 DRV_LOG(DEBUG, "push vlan action resource %p: removed",
10610 (void *)cache_resource);
10617 * Release the fate resource.
10620 * Pointer to Ethernet device.
10622 * Pointer to mlx5_flow_handle.
10625 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
10626 struct mlx5_flow_handle *handle)
10628 if (!handle->rix_fate)
10630 switch (handle->fate_action) {
10631 case MLX5_FLOW_FATE_QUEUE:
10632 mlx5_hrxq_release(dev, handle->rix_hrxq);
10634 case MLX5_FLOW_FATE_JUMP:
10635 flow_dv_jump_tbl_resource_release(dev, handle);
10637 case MLX5_FLOW_FATE_PORT_ID:
10638 flow_dv_port_id_action_resource_release(dev,
10639 handle->rix_port_id_action);
10642 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
10645 handle->rix_fate = 0;
10649 * Release an sample resource.
10652 * Pointer to Ethernet device.
10654 * Pointer to mlx5_flow_handle.
10657 * 1 while a reference on it exists, 0 when freed.
10660 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
10661 struct mlx5_flow_handle *handle)
10663 struct mlx5_priv *priv = dev->data->dev_private;
10664 uint32_t idx = handle->dvh.rix_sample;
10665 struct mlx5_flow_dv_sample_resource *cache_resource;
10667 cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
10669 if (!cache_resource)
10671 MLX5_ASSERT(cache_resource->verbs_action);
10672 DRV_LOG(DEBUG, "sample resource %p: refcnt %d--",
10673 (void *)cache_resource,
10674 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
10675 if (__atomic_sub_fetch(&cache_resource->refcnt, 1,
10676 __ATOMIC_RELAXED) == 0) {
10677 if (cache_resource->verbs_action)
10678 claim_zero(mlx5_glue->destroy_flow_action
10679 (cache_resource->verbs_action));
10680 if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
10681 if (cache_resource->default_miss)
10682 claim_zero(mlx5_glue->destroy_flow_action
10683 (cache_resource->default_miss));
10685 if (cache_resource->normal_path_tbl)
10686 flow_dv_tbl_resource_release(MLX5_SH(dev),
10687 cache_resource->normal_path_tbl);
10689 if (cache_resource->sample_idx.rix_hrxq &&
10690 !mlx5_hrxq_release(dev,
10691 cache_resource->sample_idx.rix_hrxq))
10692 cache_resource->sample_idx.rix_hrxq = 0;
10693 if (cache_resource->sample_idx.rix_tag &&
10694 !flow_dv_tag_release(dev,
10695 cache_resource->sample_idx.rix_tag))
10696 cache_resource->sample_idx.rix_tag = 0;
10697 if (cache_resource->sample_idx.cnt) {
10698 flow_dv_counter_release(dev,
10699 cache_resource->sample_idx.cnt);
10700 cache_resource->sample_idx.cnt = 0;
10702 if (!__atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)) {
10703 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
10704 &priv->sh->sample_action_list, idx,
10705 cache_resource, next);
10706 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], idx);
10707 DRV_LOG(DEBUG, "sample resource %p: removed",
10708 (void *)cache_resource);
10715 * Release an destination array resource.
10718 * Pointer to Ethernet device.
10720 * Pointer to mlx5_flow_handle.
10723 * 1 while a reference on it exists, 0 when freed.
10726 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
10727 struct mlx5_flow_handle *handle)
10729 struct mlx5_priv *priv = dev->data->dev_private;
10730 struct mlx5_flow_dv_dest_array_resource *cache_resource;
10731 struct mlx5_flow_sub_actions_idx *mdest_act_res;
10732 uint32_t idx = handle->dvh.rix_dest_array;
10735 cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
10737 if (!cache_resource)
10739 MLX5_ASSERT(cache_resource->action);
10740 DRV_LOG(DEBUG, "destination array resource %p: refcnt %d--",
10741 (void *)cache_resource,
10742 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
10743 if (__atomic_sub_fetch(&cache_resource->refcnt, 1,
10744 __ATOMIC_RELAXED) == 0) {
10745 if (cache_resource->action)
10746 claim_zero(mlx5_glue->destroy_flow_action
10747 (cache_resource->action));
10748 for (; i < cache_resource->num_of_dest; i++) {
10749 mdest_act_res = &cache_resource->sample_idx[i];
10750 if (mdest_act_res->rix_hrxq) {
10751 mlx5_hrxq_release(dev,
10752 mdest_act_res->rix_hrxq);
10753 mdest_act_res->rix_hrxq = 0;
10755 if (mdest_act_res->rix_encap_decap) {
10756 flow_dv_encap_decap_resource_release(dev,
10757 mdest_act_res->rix_encap_decap);
10758 mdest_act_res->rix_encap_decap = 0;
10760 if (mdest_act_res->rix_port_id_action) {
10761 flow_dv_port_id_action_resource_release(dev,
10762 mdest_act_res->rix_port_id_action);
10763 mdest_act_res->rix_port_id_action = 0;
10765 if (mdest_act_res->rix_tag) {
10766 flow_dv_tag_release(dev,
10767 mdest_act_res->rix_tag);
10768 mdest_act_res->rix_tag = 0;
10771 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
10772 &priv->sh->dest_array_list, idx,
10773 cache_resource, next);
10774 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], idx);
10775 DRV_LOG(DEBUG, "destination array resource %p: removed",
10776 (void *)cache_resource);
10783 * Remove the flow from the NIC but keeps it in memory.
10784 * Lock free, (mutex should be acquired by caller).
10787 * Pointer to Ethernet device.
10788 * @param[in, out] flow
10789 * Pointer to flow structure.
10792 __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
10794 struct mlx5_flow_handle *dh;
10795 uint32_t handle_idx;
10796 struct mlx5_priv *priv = dev->data->dev_private;
10800 handle_idx = flow->dev_handles;
10801 while (handle_idx) {
10802 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
10806 if (dh->drv_flow) {
10807 claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
10808 dh->drv_flow = NULL;
10810 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
10811 flow_dv_fate_resource_release(dev, dh);
10812 if (dh->vf_vlan.tag && dh->vf_vlan.created)
10813 mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
10814 handle_idx = dh->next.next;
10819 * Remove the flow from the NIC and the memory.
10820 * Lock free, (mutex should be acquired by caller).
10823 * Pointer to the Ethernet device structure.
10824 * @param[in, out] flow
10825 * Pointer to flow structure.
10828 __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
10830 struct rte_flow_shared_action *shared;
10831 struct mlx5_flow_handle *dev_handle;
10832 struct mlx5_priv *priv = dev->data->dev_private;
10836 __flow_dv_remove(dev, flow);
10837 shared = mlx5_flow_get_shared_rss(flow);
10839 __atomic_sub_fetch(&shared->refcnt, 1, __ATOMIC_RELAXED);
10840 if (flow->counter) {
10841 flow_dv_counter_release(dev, flow->counter);
10845 struct mlx5_flow_meter *fm;
10847 fm = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR],
10850 mlx5_flow_meter_detach(fm);
10853 while (flow->dev_handles) {
10854 uint32_t tmp_idx = flow->dev_handles;
10856 dev_handle = mlx5_ipool_get(priv->sh->ipool
10857 [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
10860 flow->dev_handles = dev_handle->next.next;
10861 if (dev_handle->dvh.matcher)
10862 flow_dv_matcher_release(dev, dev_handle);
10863 if (dev_handle->dvh.rix_sample)
10864 flow_dv_sample_resource_release(dev, dev_handle);
10865 if (dev_handle->dvh.rix_dest_array)
10866 flow_dv_dest_array_resource_release(dev, dev_handle);
10867 if (dev_handle->dvh.rix_encap_decap)
10868 flow_dv_encap_decap_resource_release(dev,
10869 dev_handle->dvh.rix_encap_decap);
10870 if (dev_handle->dvh.modify_hdr)
10871 flow_dv_modify_hdr_resource_release(dev, dev_handle);
10872 if (dev_handle->dvh.rix_push_vlan)
10873 flow_dv_push_vlan_action_resource_release(dev,
10875 if (dev_handle->dvh.rix_tag)
10876 flow_dv_tag_release(dev,
10877 dev_handle->dvh.rix_tag);
10878 flow_dv_fate_resource_release(dev, dev_handle);
10879 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
10885 * Release array of hash RX queue objects.
10889 * Pointer to the Ethernet device structure.
10890 * @param[in, out] hrxqs
10891 * Array of hash RX queue objects.
10894 * Total number of references to hash RX queue objects in *hrxqs* array
10895 * after this operation.
10898 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
10899 uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
10904 for (i = 0; i < RTE_DIM(*hrxqs); i++) {
10905 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
10915 * Release all hash RX queue objects representing shared RSS action.
10918 * Pointer to the Ethernet device structure.
10919 * @param[in, out] action
10920 * Shared RSS action to remove hash RX queue objects from.
10923 * Total number of references to hash RX queue objects stored in *action*
10924 * after this operation.
10925 * Expected to be 0 if no external references held.
10928 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
10929 struct mlx5_shared_action_rss *action)
10931 return __flow_dv_hrxqs_release(dev, &action->hrxq) +
10932 __flow_dv_hrxqs_release(dev, &action->hrxq_tunnel);
10936 * Setup shared RSS action.
10937 * Prepare set of hash RX queue objects sufficient to handle all valid
10938 * hash_fields combinations (see enum ibv_rx_hash_fields).
10941 * Pointer to the Ethernet device structure.
10942 * @param[in, out] action
10943 * Partially initialized shared RSS action.
10944 * @param[out] error
10945 * Perform verbose error reporting if not NULL. Initialized in case of
10949 * 0 on success, otherwise negative errno value.
10952 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
10953 struct mlx5_shared_action_rss *action,
10954 struct rte_flow_error *error)
10956 struct mlx5_flow_rss_desc rss_desc = { 0 };
10960 memcpy(rss_desc.key, action->origin.key, MLX5_RSS_HASH_KEY_LEN);
10961 rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
10962 rss_desc.const_q = action->origin.queue;
10963 rss_desc.queue_num = action->origin.queue_num;
10964 rss_desc.standalone = true;
10965 for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
10967 uint64_t hash_fields = mlx5_rss_hash_fields[i];
10970 for (tunnel = 0; tunnel < 2; tunnel++) {
10971 rss_desc.tunnel = tunnel;
10972 rss_desc.hash_fields = hash_fields;
10973 hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
10977 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10978 "cannot get hash queue");
10979 goto error_hrxq_new;
10981 err = __flow_dv_action_rss_hrxq_set
10982 (action, hash_fields, tunnel, hrxq_idx);
10989 __flow_dv_action_rss_hrxqs_release(dev, action);
10995 * Create shared RSS action.
10998 * Pointer to the Ethernet device structure.
11000 * Shared action configuration.
11002 * RSS action specification used to create shared action.
11003 * @param[out] error
11004 * Perform verbose error reporting if not NULL. Initialized in case of
11008 * A valid shared action handle in case of success, NULL otherwise and
11009 * rte_errno is set.
11011 static struct rte_flow_shared_action *
11012 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
11013 const struct rte_flow_shared_action_conf *conf,
11014 const struct rte_flow_action_rss *rss,
11015 struct rte_flow_error *error)
11017 struct rte_flow_shared_action *shared_action = NULL;
11018 void *queue = NULL;
11019 struct mlx5_shared_action_rss *shared_rss;
11020 struct rte_flow_action_rss *origin;
11021 const uint8_t *rss_key;
11022 uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
11024 RTE_SET_USED(conf);
11025 queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
11027 shared_action = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*shared_action), 0,
11029 if (!shared_action || !queue) {
11030 rte_flow_error_set(error, ENOMEM,
11031 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11032 "cannot allocate resource memory");
11033 goto error_rss_init;
11035 shared_rss = &shared_action->rss;
11036 shared_rss->queue = queue;
11037 origin = &shared_rss->origin;
11038 origin->func = rss->func;
11039 origin->level = rss->level;
11040 /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
11041 origin->types = !rss->types ? ETH_RSS_IP : rss->types;
11042 /* NULL RSS key indicates default RSS key. */
11043 rss_key = !rss->key ? rss_hash_default_key : rss->key;
11044 memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11045 origin->key = &shared_rss->key[0];
11046 origin->key_len = MLX5_RSS_HASH_KEY_LEN;
11047 memcpy(shared_rss->queue, rss->queue, queue_size);
11048 origin->queue = shared_rss->queue;
11049 origin->queue_num = rss->queue_num;
11050 if (__flow_dv_action_rss_setup(dev, shared_rss, error))
11051 goto error_rss_init;
11052 shared_action->type = MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS;
11053 return shared_action;
11055 mlx5_free(shared_action);
11061 * Destroy the shared RSS action.
11062 * Release related hash RX queue objects.
11065 * Pointer to the Ethernet device structure.
11066 * @param[in] shared_rss
11067 * The shared RSS action object to be removed.
11068 * @param[out] error
11069 * Perform verbose error reporting if not NULL. Initialized in case of
11073 * 0 on success, otherwise negative errno value.
11076 __flow_dv_action_rss_release(struct rte_eth_dev *dev,
11077 struct mlx5_shared_action_rss *shared_rss,
11078 struct rte_flow_error *error)
11080 struct rte_flow_shared_action *shared_action = NULL;
11081 uint32_t old_refcnt = 1;
11082 int remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
11085 return rte_flow_error_set(error, ETOOMANYREFS,
11086 RTE_FLOW_ERROR_TYPE_ACTION,
11088 "shared rss hrxq has references");
11090 shared_action = container_of(shared_rss,
11091 struct rte_flow_shared_action, rss);
11092 if (!__atomic_compare_exchange_n(&shared_action->refcnt, &old_refcnt,
11094 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
11095 return rte_flow_error_set(error, ETOOMANYREFS,
11096 RTE_FLOW_ERROR_TYPE_ACTION,
11098 "shared rss has references");
11100 rte_free(shared_rss->queue);
11105 * Create shared action, lock free,
11106 * (mutex should be acquired by caller).
11107 * Dispatcher for action type specific call.
11110 * Pointer to the Ethernet device structure.
11112 * Shared action configuration.
11113 * @param[in] action
11114 * Action specification used to create shared action.
11115 * @param[out] error
11116 * Perform verbose error reporting if not NULL. Initialized in case of
11120 * A valid shared action handle in case of success, NULL otherwise and
11121 * rte_errno is set.
11123 static struct rte_flow_shared_action *
11124 __flow_dv_action_create(struct rte_eth_dev *dev,
11125 const struct rte_flow_shared_action_conf *conf,
11126 const struct rte_flow_action *action,
11127 struct rte_flow_error *error)
11129 struct rte_flow_shared_action *shared_action = NULL;
11130 struct mlx5_priv *priv = dev->data->dev_private;
11132 switch (action->type) {
11133 case RTE_FLOW_ACTION_TYPE_RSS:
11134 shared_action = __flow_dv_action_rss_create(dev, conf,
11139 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
11140 NULL, "action type not supported");
11143 if (shared_action) {
11144 __atomic_add_fetch(&shared_action->refcnt, 1,
11146 LIST_INSERT_HEAD(&priv->shared_actions, shared_action, next);
11148 return shared_action;
11152 * Destroy the shared action.
11153 * Release action related resources on the NIC and the memory.
11154 * Lock free, (mutex should be acquired by caller).
11155 * Dispatcher for action type specific call.
11158 * Pointer to the Ethernet device structure.
11159 * @param[in] action
11160 * The shared action object to be removed.
11161 * @param[out] error
11162 * Perform verbose error reporting if not NULL. Initialized in case of
11166 * 0 on success, otherwise negative errno value.
11169 __flow_dv_action_destroy(struct rte_eth_dev *dev,
11170 struct rte_flow_shared_action *action,
11171 struct rte_flow_error *error)
11175 switch (action->type) {
11176 case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
11177 ret = __flow_dv_action_rss_release(dev, &action->rss, error);
11180 return rte_flow_error_set(error, ENOTSUP,
11181 RTE_FLOW_ERROR_TYPE_ACTION,
11183 "action type not supported");
11187 LIST_REMOVE(action, next);
11193 * Updates in place shared RSS action configuration.
11196 * Pointer to the Ethernet device structure.
11197 * @param[in] shared_rss
11198 * The shared RSS action object to be updated.
11199 * @param[in] action_conf
11200 * RSS action specification used to modify *shared_rss*.
11201 * @param[out] error
11202 * Perform verbose error reporting if not NULL. Initialized in case of
11206 * 0 on success, otherwise negative errno value.
11207 * @note: currently only support update of RSS queues.
11210 __flow_dv_action_rss_update(struct rte_eth_dev *dev,
11211 struct mlx5_shared_action_rss *shared_rss,
11212 const struct rte_flow_action_rss *action_conf,
11213 struct rte_flow_error *error)
11217 void *queue = NULL;
11218 const uint8_t *rss_key;
11219 uint32_t rss_key_len;
11220 uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
11222 queue = mlx5_malloc(MLX5_MEM_ZERO,
11223 RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
11226 return rte_flow_error_set(error, ENOMEM,
11227 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11229 "cannot allocate resource memory");
11230 if (action_conf->key) {
11231 rss_key = action_conf->key;
11232 rss_key_len = action_conf->key_len;
11234 rss_key = rss_hash_default_key;
11235 rss_key_len = MLX5_RSS_HASH_KEY_LEN;
11237 for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
11239 uint64_t hash_fields = mlx5_rss_hash_fields[i];
11242 for (tunnel = 0; tunnel < 2; tunnel++) {
11243 hrxq_idx = __flow_dv_action_rss_hrxq_lookup
11244 (shared_rss, hash_fields, tunnel);
11245 MLX5_ASSERT(hrxq_idx);
11246 ret = mlx5_hrxq_modify
11248 rss_key, rss_key_len,
11250 action_conf->queue, action_conf->queue_num);
11253 return rte_flow_error_set
11255 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11256 "cannot update hash queue");
11260 mlx5_free(shared_rss->queue);
11261 shared_rss->queue = queue;
11262 memcpy(shared_rss->queue, action_conf->queue, queue_size);
11263 shared_rss->origin.queue = shared_rss->queue;
11264 shared_rss->origin.queue_num = action_conf->queue_num;
11269 * Updates in place shared action configuration, lock free,
11270 * (mutex should be acquired by caller).
11273 * Pointer to the Ethernet device structure.
11274 * @param[in] action
11275 * The shared action object to be updated.
11276 * @param[in] action_conf
11277 * Action specification used to modify *action*.
11278 * *action_conf* should be of type correlating with type of the *action*,
11279 * otherwise considered as invalid.
11280 * @param[out] error
11281 * Perform verbose error reporting if not NULL. Initialized in case of
11285 * 0 on success, otherwise negative errno value.
11288 __flow_dv_action_update(struct rte_eth_dev *dev,
11289 struct rte_flow_shared_action *action,
11290 const void *action_conf,
11291 struct rte_flow_error *error)
11293 switch (action->type) {
11294 case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
11295 return __flow_dv_action_rss_update(dev, &action->rss,
11296 action_conf, error);
11298 return rte_flow_error_set(error, ENOTSUP,
11299 RTE_FLOW_ERROR_TYPE_ACTION,
11301 "action type not supported");
11305 * Query a dv flow rule for its statistics via devx.
11308 * Pointer to Ethernet device.
11310 * Pointer to the sub flow.
11312 * data retrieved by the query.
11313 * @param[out] error
11314 * Perform verbose error reporting if not NULL.
11317 * 0 on success, a negative errno value otherwise and rte_errno is set.
11320 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
11321 void *data, struct rte_flow_error *error)
11323 struct mlx5_priv *priv = dev->data->dev_private;
11324 struct rte_flow_query_count *qc = data;
11326 if (!priv->config.devx)
11327 return rte_flow_error_set(error, ENOTSUP,
11328 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11330 "counters are not supported");
11331 if (flow->counter) {
11332 uint64_t pkts, bytes;
11333 struct mlx5_flow_counter *cnt;
11335 cnt = flow_dv_counter_get_by_idx(dev, flow->counter,
11337 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
11341 return rte_flow_error_set(error, -err,
11342 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11343 NULL, "cannot read counters");
11346 qc->hits = pkts - cnt->hits;
11347 qc->bytes = bytes - cnt->bytes;
11350 cnt->bytes = bytes;
11354 return rte_flow_error_set(error, EINVAL,
11355 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11357 "counters are not available");
11361 * Query a flow rule AGE action for aging information.
11364 * Pointer to Ethernet device.
11366 * Pointer to the sub flow.
11368 * data retrieved by the query.
11369 * @param[out] error
11370 * Perform verbose error reporting if not NULL.
11373 * 0 on success, a negative errno value otherwise and rte_errno is set.
11376 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
11377 void *data, struct rte_flow_error *error)
11379 struct rte_flow_query_age *resp = data;
11381 if (flow->counter) {
11382 struct mlx5_age_param *age_param =
11383 flow_dv_counter_idx_get_age(dev, flow->counter);
11385 if (!age_param || !age_param->timeout)
11386 return rte_flow_error_set
11388 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11389 NULL, "cannot read age data");
11390 resp->aged = __atomic_load_n(&age_param->state,
11391 __ATOMIC_RELAXED) ==
11393 resp->sec_since_last_hit_valid = !resp->aged;
11394 if (resp->sec_since_last_hit_valid)
11395 resp->sec_since_last_hit =
11396 __atomic_load_n(&age_param->sec_since_last_hit,
11400 return rte_flow_error_set(error, EINVAL,
11401 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11403 "age data not available");
11409 * @see rte_flow_query()
11410 * @see rte_flow_ops
11413 flow_dv_query(struct rte_eth_dev *dev,
11414 struct rte_flow *flow __rte_unused,
11415 const struct rte_flow_action *actions __rte_unused,
11416 void *data __rte_unused,
11417 struct rte_flow_error *error __rte_unused)
11421 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
11422 switch (actions->type) {
11423 case RTE_FLOW_ACTION_TYPE_VOID:
11425 case RTE_FLOW_ACTION_TYPE_COUNT:
11426 ret = flow_dv_query_count(dev, flow, data, error);
11428 case RTE_FLOW_ACTION_TYPE_AGE:
11429 ret = flow_dv_query_age(dev, flow, data, error);
11432 return rte_flow_error_set(error, ENOTSUP,
11433 RTE_FLOW_ERROR_TYPE_ACTION,
11435 "action not supported");
11442 * Destroy the meter table set.
11443 * Lock free, (mutex should be acquired by caller).
11446 * Pointer to Ethernet device.
11448 * Pointer to the meter table set.
11454 flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
11455 struct mlx5_meter_domains_infos *tbl)
11457 struct mlx5_priv *priv = dev->data->dev_private;
11458 struct mlx5_meter_domains_infos *mtd =
11459 (struct mlx5_meter_domains_infos *)tbl;
11461 if (!mtd || !priv->config.dv_flow_en)
11463 if (mtd->ingress.policer_rules[RTE_MTR_DROPPED])
11464 claim_zero(mlx5_flow_os_destroy_flow
11465 (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
11466 if (mtd->egress.policer_rules[RTE_MTR_DROPPED])
11467 claim_zero(mlx5_flow_os_destroy_flow
11468 (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
11469 if (mtd->transfer.policer_rules[RTE_MTR_DROPPED])
11470 claim_zero(mlx5_flow_os_destroy_flow
11471 (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
11472 if (mtd->egress.color_matcher)
11473 claim_zero(mlx5_flow_os_destroy_flow_matcher
11474 (mtd->egress.color_matcher));
11475 if (mtd->egress.any_matcher)
11476 claim_zero(mlx5_flow_os_destroy_flow_matcher
11477 (mtd->egress.any_matcher));
11478 if (mtd->egress.tbl)
11479 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.tbl);
11480 if (mtd->egress.sfx_tbl)
11481 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.sfx_tbl);
11482 if (mtd->ingress.color_matcher)
11483 claim_zero(mlx5_flow_os_destroy_flow_matcher
11484 (mtd->ingress.color_matcher));
11485 if (mtd->ingress.any_matcher)
11486 claim_zero(mlx5_flow_os_destroy_flow_matcher
11487 (mtd->ingress.any_matcher));
11488 if (mtd->ingress.tbl)
11489 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->ingress.tbl);
11490 if (mtd->ingress.sfx_tbl)
11491 flow_dv_tbl_resource_release(MLX5_SH(dev),
11492 mtd->ingress.sfx_tbl);
11493 if (mtd->transfer.color_matcher)
11494 claim_zero(mlx5_flow_os_destroy_flow_matcher
11495 (mtd->transfer.color_matcher));
11496 if (mtd->transfer.any_matcher)
11497 claim_zero(mlx5_flow_os_destroy_flow_matcher
11498 (mtd->transfer.any_matcher));
11499 if (mtd->transfer.tbl)
11500 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->transfer.tbl);
11501 if (mtd->transfer.sfx_tbl)
11502 flow_dv_tbl_resource_release(MLX5_SH(dev),
11503 mtd->transfer.sfx_tbl);
11504 if (mtd->drop_actn)
11505 claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn));
11510 /* Number of meter flow actions, count and jump or count and drop. */
11511 #define METER_ACTIONS 2
11514 * Create specify domain meter table and suffix table.
11517 * Pointer to Ethernet device.
11518 * @param[in,out] mtb
11519 * Pointer to DV meter table set.
11520 * @param[in] egress
11522 * @param[in] transfer
11524 * @param[in] color_reg_c_idx
11525 * Reg C index for color match.
11528 * 0 on success, -1 otherwise and rte_errno is set.
11531 flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev,
11532 struct mlx5_meter_domains_infos *mtb,
11533 uint8_t egress, uint8_t transfer,
11534 uint32_t color_reg_c_idx)
11536 struct mlx5_priv *priv = dev->data->dev_private;
11537 struct mlx5_dev_ctx_shared *sh = priv->sh;
11538 struct mlx5_flow_dv_match_params mask = {
11539 .size = sizeof(mask.buf),
11541 struct mlx5_flow_dv_match_params value = {
11542 .size = sizeof(value.buf),
11544 struct mlx5dv_flow_matcher_attr dv_attr = {
11545 .type = IBV_FLOW_ATTR_NORMAL,
11547 .match_criteria_enable = 0,
11548 .match_mask = (void *)&mask,
11550 void *actions[METER_ACTIONS];
11551 struct mlx5_meter_domain_info *dtb;
11552 struct rte_flow_error error;
11557 dtb = &mtb->transfer;
11559 dtb = &mtb->egress;
11561 dtb = &mtb->ingress;
11562 /* Create the meter table with METER level. */
11563 dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER,
11564 egress, transfer, false, NULL, 0,
11567 DRV_LOG(ERR, "Failed to create meter policer table.");
11570 /* Create the meter suffix table with SUFFIX level. */
11571 dtb->sfx_tbl = flow_dv_tbl_resource_get(dev,
11572 MLX5_FLOW_TABLE_LEVEL_SUFFIX,
11573 egress, transfer, false, NULL, 0,
11575 if (!dtb->sfx_tbl) {
11576 DRV_LOG(ERR, "Failed to create meter suffix table.");
11579 /* Create matchers, Any and Color. */
11580 dv_attr.priority = 3;
11581 dv_attr.match_criteria_enable = 0;
11582 ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
11583 &dtb->any_matcher);
11585 DRV_LOG(ERR, "Failed to create meter"
11586 " policer default matcher.");
11589 dv_attr.priority = 0;
11590 dv_attr.match_criteria_enable =
11591 1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
11592 flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx,
11593 rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX);
11594 ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
11595 &dtb->color_matcher);
11597 DRV_LOG(ERR, "Failed to create meter policer color matcher.");
11600 if (mtb->count_actns[RTE_MTR_DROPPED])
11601 actions[i++] = mtb->count_actns[RTE_MTR_DROPPED];
11602 actions[i++] = mtb->drop_actn;
11603 /* Default rule: lowest priority, match any, actions: drop. */
11604 ret = mlx5_flow_os_create_flow(dtb->any_matcher, (void *)&value, i,
11606 &dtb->policer_rules[RTE_MTR_DROPPED]);
11608 DRV_LOG(ERR, "Failed to create meter policer drop rule.");
11617 * Create the needed meter and suffix tables.
11618 * Lock free, (mutex should be acquired by caller).
11621 * Pointer to Ethernet device.
11623 * Pointer to the flow meter.
11626 * Pointer to table set on success, NULL otherwise and rte_errno is set.
11628 static struct mlx5_meter_domains_infos *
11629 flow_dv_create_mtr_tbl(struct rte_eth_dev *dev,
11630 const struct mlx5_flow_meter *fm)
11632 struct mlx5_priv *priv = dev->data->dev_private;
11633 struct mlx5_meter_domains_infos *mtb;
11637 if (!priv->mtr_en) {
11638 rte_errno = ENOTSUP;
11641 mtb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mtb), 0, SOCKET_ID_ANY);
11643 DRV_LOG(ERR, "Failed to allocate memory for meter.");
11646 /* Create meter count actions */
11647 for (i = 0; i <= RTE_MTR_DROPPED; i++) {
11648 struct mlx5_flow_counter *cnt;
11649 if (!fm->policer_stats.cnt[i])
11651 cnt = flow_dv_counter_get_by_idx(dev,
11652 fm->policer_stats.cnt[i], NULL);
11653 mtb->count_actns[i] = cnt->action;
11655 /* Create drop action. */
11656 ret = mlx5_flow_os_create_flow_action_drop(&mtb->drop_actn);
11658 DRV_LOG(ERR, "Failed to create drop action.");
11661 /* Egress meter table. */
11662 ret = flow_dv_prepare_mtr_tables(dev, mtb, 1, 0, priv->mtr_color_reg);
11664 DRV_LOG(ERR, "Failed to prepare egress meter table.");
11667 /* Ingress meter table. */
11668 ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 0, priv->mtr_color_reg);
11670 DRV_LOG(ERR, "Failed to prepare ingress meter table.");
11673 /* FDB meter table. */
11674 if (priv->config.dv_esw_en) {
11675 ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 1,
11676 priv->mtr_color_reg);
11678 DRV_LOG(ERR, "Failed to prepare fdb meter table.");
11684 flow_dv_destroy_mtr_tbl(dev, mtb);
11689 * Destroy domain policer rule.
11692 * Pointer to domain table.
11695 flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt)
11699 for (i = 0; i < RTE_MTR_DROPPED; i++) {
11700 if (dt->policer_rules[i]) {
11701 claim_zero(mlx5_flow_os_destroy_flow
11702 (dt->policer_rules[i]));
11703 dt->policer_rules[i] = NULL;
11706 if (dt->jump_actn) {
11707 claim_zero(mlx5_flow_os_destroy_flow_action(dt->jump_actn));
11708 dt->jump_actn = NULL;
11713 * Destroy policer rules.
11716 * Pointer to Ethernet device.
11718 * Pointer to flow meter structure.
11720 * Pointer to flow attributes.
11726 flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused,
11727 const struct mlx5_flow_meter *fm,
11728 const struct rte_flow_attr *attr)
11730 struct mlx5_meter_domains_infos *mtb = fm ? fm->mfts : NULL;
11735 flow_dv_destroy_domain_policer_rule(&mtb->egress);
11737 flow_dv_destroy_domain_policer_rule(&mtb->ingress);
11738 if (attr->transfer)
11739 flow_dv_destroy_domain_policer_rule(&mtb->transfer);
11744 * Create specify domain meter policer rule.
11747 * Pointer to flow meter structure.
11749 * Pointer to DV meter table set.
11750 * @param[in] mtr_reg_c
11751 * Color match REG_C.
11754 * 0 on success, -1 otherwise.
11757 flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm,
11758 struct mlx5_meter_domain_info *dtb,
11761 struct mlx5_flow_dv_match_params matcher = {
11762 .size = sizeof(matcher.buf),
11764 struct mlx5_flow_dv_match_params value = {
11765 .size = sizeof(value.buf),
11767 struct mlx5_meter_domains_infos *mtb = fm->mfts;
11768 void *actions[METER_ACTIONS];
11772 /* Create jump action. */
11773 if (!dtb->jump_actn)
11774 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
11775 (dtb->sfx_tbl->obj, &dtb->jump_actn);
11777 DRV_LOG(ERR, "Failed to create policer jump action.");
11780 for (i = 0; i < RTE_MTR_DROPPED; i++) {
11783 flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c,
11784 rte_col_2_mlx5_col(i), UINT8_MAX);
11785 if (mtb->count_actns[i])
11786 actions[j++] = mtb->count_actns[i];
11787 if (fm->action[i] == MTR_POLICER_ACTION_DROP)
11788 actions[j++] = mtb->drop_actn;
11790 actions[j++] = dtb->jump_actn;
11791 ret = mlx5_flow_os_create_flow(dtb->color_matcher,
11792 (void *)&value, j, actions,
11793 &dtb->policer_rules[i]);
11795 DRV_LOG(ERR, "Failed to create policer rule.");
11806 * Create policer rules.
11809 * Pointer to Ethernet device.
11811 * Pointer to flow meter structure.
11813 * Pointer to flow attributes.
11816 * 0 on success, -1 otherwise.
11819 flow_dv_create_policer_rules(struct rte_eth_dev *dev,
11820 struct mlx5_flow_meter *fm,
11821 const struct rte_flow_attr *attr)
11823 struct mlx5_priv *priv = dev->data->dev_private;
11824 struct mlx5_meter_domains_infos *mtb = fm->mfts;
11827 if (attr->egress) {
11828 ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress,
11829 priv->mtr_color_reg);
11831 DRV_LOG(ERR, "Failed to create egress policer.");
11835 if (attr->ingress) {
11836 ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress,
11837 priv->mtr_color_reg);
11839 DRV_LOG(ERR, "Failed to create ingress policer.");
11843 if (attr->transfer) {
11844 ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer,
11845 priv->mtr_color_reg);
11847 DRV_LOG(ERR, "Failed to create transfer policer.");
11853 flow_dv_destroy_policer_rules(dev, fm, attr);
11858 * Validate the batch counter support in root table.
11860 * Create a simple flow with invalid counter and drop action on root table to
11861 * validate if batch counter with offset on root table is supported or not.
11864 * Pointer to rte_eth_dev structure.
11867 * 0 on success, a negative errno value otherwise and rte_errno is set.
11870 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
11872 struct mlx5_priv *priv = dev->data->dev_private;
11873 struct mlx5_dev_ctx_shared *sh = priv->sh;
11874 struct mlx5_flow_dv_match_params mask = {
11875 .size = sizeof(mask.buf),
11877 struct mlx5_flow_dv_match_params value = {
11878 .size = sizeof(value.buf),
11880 struct mlx5dv_flow_matcher_attr dv_attr = {
11881 .type = IBV_FLOW_ATTR_NORMAL,
11883 .match_criteria_enable = 0,
11884 .match_mask = (void *)&mask,
11886 void *actions[2] = { 0 };
11887 struct mlx5_flow_tbl_resource *tbl = NULL, *dest_tbl = NULL;
11888 struct mlx5_devx_obj *dcs = NULL;
11889 void *matcher = NULL;
11893 tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL, 0, 0, NULL);
11896 dest_tbl = flow_dv_tbl_resource_get(dev, 1, 0, 0, false,
11900 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
11903 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
11907 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
11908 (dest_tbl->obj, &actions[1]);
11911 dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
11912 ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
11916 ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 2,
11920 * If batch counter with offset is not supported, the driver will not
11921 * validate the invalid offset value, flow create should success.
11922 * In this case, it means batch counter is not supported in root table.
11924 * Otherwise, if flow create is failed, counter offset is supported.
11927 DRV_LOG(INFO, "Batch counter is not supported in root "
11928 "table. Switch to fallback mode.");
11929 rte_errno = ENOTSUP;
11931 claim_zero(mlx5_flow_os_destroy_flow(flow));
11933 /* Check matcher to make sure validate fail at flow create. */
11934 if (!matcher || (matcher && errno != EINVAL))
11935 DRV_LOG(ERR, "Unexpected error in counter offset "
11936 "support detection");
11939 for (i = 0; i < 2; i++) {
11941 claim_zero(mlx5_flow_os_destroy_flow_action
11945 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
11947 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
11949 flow_dv_tbl_resource_release(MLX5_SH(dev), dest_tbl);
11951 claim_zero(mlx5_devx_cmd_destroy(dcs));
11956 * Query a devx counter.
11959 * Pointer to the Ethernet device structure.
11961 * Index to the flow counter.
11963 * Set to clear the counter statistics.
11965 * The statistics value of packets.
11966 * @param[out] bytes
11967 * The statistics value of bytes.
11970 * 0 on success, otherwise return -1.
11973 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
11974 uint64_t *pkts, uint64_t *bytes)
11976 struct mlx5_priv *priv = dev->data->dev_private;
11977 struct mlx5_flow_counter *cnt;
11978 uint64_t inn_pkts, inn_bytes;
11981 if (!priv->config.devx)
11984 ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
11987 cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
11988 *pkts = inn_pkts - cnt->hits;
11989 *bytes = inn_bytes - cnt->bytes;
11991 cnt->hits = inn_pkts;
11992 cnt->bytes = inn_bytes;
11998 * Get aged-out flows.
12001 * Pointer to the Ethernet device structure.
12002 * @param[in] context
12003 * The address of an array of pointers to the aged-out flows contexts.
12004 * @param[in] nb_contexts
12005 * The length of context array pointers.
12006 * @param[out] error
12007 * Perform verbose error reporting if not NULL. Initialized in case of
12011 * how many contexts get in success, otherwise negative errno value.
12012 * if nb_contexts is 0, return the amount of all aged contexts.
12013 * if nb_contexts is not 0 , return the amount of aged flows reported
12014 * in the context array.
12015 * @note: only stub for now
12018 flow_get_aged_flows(struct rte_eth_dev *dev,
12020 uint32_t nb_contexts,
12021 struct rte_flow_error *error)
12023 struct mlx5_priv *priv = dev->data->dev_private;
12024 struct mlx5_age_info *age_info;
12025 struct mlx5_age_param *age_param;
12026 struct mlx5_flow_counter *counter;
12029 if (nb_contexts && !context)
12030 return rte_flow_error_set(error, EINVAL,
12031 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12033 "Should assign at least one flow or"
12034 " context to get if nb_contexts != 0");
12035 age_info = GET_PORT_AGE_INFO(priv);
12036 rte_spinlock_lock(&age_info->aged_sl);
12037 TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
12040 age_param = MLX5_CNT_TO_AGE(counter);
12041 context[nb_flows - 1] = age_param->context;
12042 if (!(--nb_contexts))
12046 rte_spinlock_unlock(&age_info->aged_sl);
12047 MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
12052 * Mutex-protected thunk to lock-free __flow_dv_translate().
12055 flow_dv_translate(struct rte_eth_dev *dev,
12056 struct mlx5_flow *dev_flow,
12057 const struct rte_flow_attr *attr,
12058 const struct rte_flow_item items[],
12059 const struct rte_flow_action actions[],
12060 struct rte_flow_error *error)
12064 flow_dv_shared_lock(dev);
12065 ret = __flow_dv_translate(dev, dev_flow, attr, items, actions, error);
12066 flow_dv_shared_unlock(dev);
12071 * Mutex-protected thunk to lock-free __flow_dv_apply().
12074 flow_dv_apply(struct rte_eth_dev *dev,
12075 struct rte_flow *flow,
12076 struct rte_flow_error *error)
12080 flow_dv_shared_lock(dev);
12081 ret = __flow_dv_apply(dev, flow, error);
12082 flow_dv_shared_unlock(dev);
12087 * Mutex-protected thunk to lock-free __flow_dv_remove().
12090 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
12092 flow_dv_shared_lock(dev);
12093 __flow_dv_remove(dev, flow);
12094 flow_dv_shared_unlock(dev);
12098 * Mutex-protected thunk to lock-free __flow_dv_destroy().
12101 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
12103 flow_dv_shared_lock(dev);
12104 __flow_dv_destroy(dev, flow);
12105 flow_dv_shared_unlock(dev);
12109 * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
12112 flow_dv_counter_allocate(struct rte_eth_dev *dev)
12116 flow_dv_shared_lock(dev);
12117 cnt = flow_dv_counter_alloc(dev, 0);
12118 flow_dv_shared_unlock(dev);
12123 * Mutex-protected thunk to lock-free flow_dv_counter_release().
12126 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t cnt)
12128 flow_dv_shared_lock(dev);
12129 flow_dv_counter_release(dev, cnt);
12130 flow_dv_shared_unlock(dev);
12134 * Validate shared action.
12135 * Dispatcher for action type specific validation.
12138 * Pointer to the Ethernet device structure.
12140 * Shared action configuration.
12141 * @param[in] action
12142 * The shared action object to validate.
12143 * @param[out] error
12144 * Perform verbose error reporting if not NULL. Initialized in case of
12148 * 0 on success, otherwise negative errno value.
12151 flow_dv_action_validate(struct rte_eth_dev *dev,
12152 const struct rte_flow_shared_action_conf *conf,
12153 const struct rte_flow_action *action,
12154 struct rte_flow_error *error)
12156 RTE_SET_USED(conf);
12157 switch (action->type) {
12158 case RTE_FLOW_ACTION_TYPE_RSS:
12159 return mlx5_validate_action_rss(dev, action, error);
12161 return rte_flow_error_set(error, ENOTSUP,
12162 RTE_FLOW_ERROR_TYPE_ACTION,
12164 "action type not supported");
12169 * Mutex-protected thunk to lock-free __flow_dv_action_create().
12171 static struct rte_flow_shared_action *
12172 flow_dv_action_create(struct rte_eth_dev *dev,
12173 const struct rte_flow_shared_action_conf *conf,
12174 const struct rte_flow_action *action,
12175 struct rte_flow_error *error)
12177 struct rte_flow_shared_action *shared_action = NULL;
12179 flow_dv_shared_lock(dev);
12180 shared_action = __flow_dv_action_create(dev, conf, action, error);
12181 flow_dv_shared_unlock(dev);
12182 return shared_action;
12186 * Mutex-protected thunk to lock-free __flow_dv_action_destroy().
12189 flow_dv_action_destroy(struct rte_eth_dev *dev,
12190 struct rte_flow_shared_action *action,
12191 struct rte_flow_error *error)
12195 flow_dv_shared_lock(dev);
12196 ret = __flow_dv_action_destroy(dev, action, error);
12197 flow_dv_shared_unlock(dev);
12202 * Mutex-protected thunk to lock-free __flow_dv_action_update().
12205 flow_dv_action_update(struct rte_eth_dev *dev,
12206 struct rte_flow_shared_action *action,
12207 const void *action_conf,
12208 struct rte_flow_error *error)
12212 flow_dv_shared_lock(dev);
12213 ret = __flow_dv_action_update(dev, action, action_conf,
12215 flow_dv_shared_unlock(dev);
12220 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
12222 struct mlx5_priv *priv = dev->data->dev_private;
12225 if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
12226 ret = mlx5_glue->dr_sync_domain(priv->sh->rx_domain,
12231 if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
12232 ret = mlx5_glue->dr_sync_domain(priv->sh->tx_domain, flags);
12236 if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
12237 ret = mlx5_glue->dr_sync_domain(priv->sh->fdb_domain, flags);
12244 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
12245 .validate = flow_dv_validate,
12246 .prepare = flow_dv_prepare,
12247 .translate = flow_dv_translate,
12248 .apply = flow_dv_apply,
12249 .remove = flow_dv_remove,
12250 .destroy = flow_dv_destroy,
12251 .query = flow_dv_query,
12252 .create_mtr_tbls = flow_dv_create_mtr_tbl,
12253 .destroy_mtr_tbls = flow_dv_destroy_mtr_tbl,
12254 .create_policer_rules = flow_dv_create_policer_rules,
12255 .destroy_policer_rules = flow_dv_destroy_policer_rules,
12256 .counter_alloc = flow_dv_counter_allocate,
12257 .counter_free = flow_dv_counter_free,
12258 .counter_query = flow_dv_counter_query,
12259 .get_aged_flows = flow_get_aged_flows,
12260 .action_validate = flow_dv_action_validate,
12261 .action_create = flow_dv_action_create,
12262 .action_destroy = flow_dv_action_destroy,
12263 .action_update = flow_dv_action_update,
12264 .sync_domain = flow_dv_sync_domain,
12267 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */