1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <rte_ethdev_driver.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
20 #include <rte_vxlan.h>
22 #include <rte_eal_paging.h>
25 #include <mlx5_glue.h>
26 #include <mlx5_devx_cmds.h>
28 #include <mlx5_malloc.h>
30 #include "mlx5_defs.h"
32 #include "mlx5_common_os.h"
33 #include "mlx5_flow.h"
34 #include "mlx5_flow_os.h"
35 #include "mlx5_rxtx.h"
36 #include "rte_pmd_mlx5.h"
38 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
40 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
41 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
44 #ifndef HAVE_MLX5DV_DR_ESWITCH
45 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
46 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
50 #ifndef HAVE_MLX5DV_DR
51 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
54 /* VLAN header definitions */
55 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
56 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
57 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
58 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
59 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
74 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
75 struct mlx5_flow_tbl_resource *tbl);
78 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
79 uint32_t encap_decap_idx);
82 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
86 * Initialize flow attributes structure according to flow items' types.
88 * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
89 * mode. For tunnel mode, the items to be modified are the outermost ones.
92 * Pointer to item specification.
94 * Pointer to flow attributes structure.
96 * Pointer to the sub flow.
97 * @param[in] tunnel_decap
98 * Whether action is after tunnel decapsulation.
101 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
102 struct mlx5_flow *dev_flow, bool tunnel_decap)
104 uint64_t layers = dev_flow->handle->layers;
107 * If layers is already initialized, it means this dev_flow is the
108 * suffix flow, the layers flags is set by the prefix flow. Need to
109 * use the layer flags from prefix flow as the suffix flow may not
110 * have the user defined items as the flow is split.
113 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
115 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
117 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
119 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
124 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
125 uint8_t next_protocol = 0xff;
126 switch (item->type) {
127 case RTE_FLOW_ITEM_TYPE_GRE:
128 case RTE_FLOW_ITEM_TYPE_NVGRE:
129 case RTE_FLOW_ITEM_TYPE_VXLAN:
130 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
131 case RTE_FLOW_ITEM_TYPE_GENEVE:
132 case RTE_FLOW_ITEM_TYPE_MPLS:
136 case RTE_FLOW_ITEM_TYPE_IPV4:
139 if (item->mask != NULL &&
140 ((const struct rte_flow_item_ipv4 *)
141 item->mask)->hdr.next_proto_id)
143 ((const struct rte_flow_item_ipv4 *)
144 (item->spec))->hdr.next_proto_id &
145 ((const struct rte_flow_item_ipv4 *)
146 (item->mask))->hdr.next_proto_id;
147 if ((next_protocol == IPPROTO_IPIP ||
148 next_protocol == IPPROTO_IPV6) && tunnel_decap)
151 case RTE_FLOW_ITEM_TYPE_IPV6:
154 if (item->mask != NULL &&
155 ((const struct rte_flow_item_ipv6 *)
156 item->mask)->hdr.proto)
158 ((const struct rte_flow_item_ipv6 *)
159 (item->spec))->hdr.proto &
160 ((const struct rte_flow_item_ipv6 *)
161 (item->mask))->hdr.proto;
162 if ((next_protocol == IPPROTO_IPIP ||
163 next_protocol == IPPROTO_IPV6) && tunnel_decap)
166 case RTE_FLOW_ITEM_TYPE_UDP:
170 case RTE_FLOW_ITEM_TYPE_TCP:
182 * Convert rte_mtr_color to mlx5 color.
191 rte_col_2_mlx5_col(enum rte_color rcol)
194 case RTE_COLOR_GREEN:
195 return MLX5_FLOW_COLOR_GREEN;
196 case RTE_COLOR_YELLOW:
197 return MLX5_FLOW_COLOR_YELLOW;
199 return MLX5_FLOW_COLOR_RED;
203 return MLX5_FLOW_COLOR_UNDEFINED;
206 struct field_modify_info {
207 uint32_t size; /* Size of field in protocol header, in bytes. */
208 uint32_t offset; /* Offset of field in protocol header, in bytes. */
209 enum mlx5_modification_field id;
212 struct field_modify_info modify_eth[] = {
213 {4, 0, MLX5_MODI_OUT_DMAC_47_16},
214 {2, 4, MLX5_MODI_OUT_DMAC_15_0},
215 {4, 6, MLX5_MODI_OUT_SMAC_47_16},
216 {2, 10, MLX5_MODI_OUT_SMAC_15_0},
220 struct field_modify_info modify_vlan_out_first_vid[] = {
221 /* Size in bits !!! */
222 {12, 0, MLX5_MODI_OUT_FIRST_VID},
226 struct field_modify_info modify_ipv4[] = {
227 {1, 1, MLX5_MODI_OUT_IP_DSCP},
228 {1, 8, MLX5_MODI_OUT_IPV4_TTL},
229 {4, 12, MLX5_MODI_OUT_SIPV4},
230 {4, 16, MLX5_MODI_OUT_DIPV4},
234 struct field_modify_info modify_ipv6[] = {
235 {1, 0, MLX5_MODI_OUT_IP_DSCP},
236 {1, 7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
237 {4, 8, MLX5_MODI_OUT_SIPV6_127_96},
238 {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
239 {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
240 {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
241 {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
242 {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
243 {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
244 {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
248 struct field_modify_info modify_udp[] = {
249 {2, 0, MLX5_MODI_OUT_UDP_SPORT},
250 {2, 2, MLX5_MODI_OUT_UDP_DPORT},
254 struct field_modify_info modify_tcp[] = {
255 {2, 0, MLX5_MODI_OUT_TCP_SPORT},
256 {2, 2, MLX5_MODI_OUT_TCP_DPORT},
257 {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
258 {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
263 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
264 uint8_t next_protocol, uint64_t *item_flags,
267 MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
268 item->type == RTE_FLOW_ITEM_TYPE_IPV6);
269 if (next_protocol == IPPROTO_IPIP) {
270 *item_flags |= MLX5_FLOW_LAYER_IPIP;
273 if (next_protocol == IPPROTO_IPV6) {
274 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
280 * Acquire the synchronizing object to protect multithreaded access
281 * to shared dv context. Lock occurs only if context is actually
282 * shared, i.e. we have multiport IB device and representors are
286 * Pointer to the rte_eth_dev structure.
289 flow_dv_shared_lock(struct rte_eth_dev *dev)
291 struct mlx5_priv *priv = dev->data->dev_private;
292 struct mlx5_dev_ctx_shared *sh = priv->sh;
294 if (sh->refcnt > 1) {
297 ret = pthread_mutex_lock(&sh->dv_mutex);
304 flow_dv_shared_unlock(struct rte_eth_dev *dev)
306 struct mlx5_priv *priv = dev->data->dev_private;
307 struct mlx5_dev_ctx_shared *sh = priv->sh;
309 if (sh->refcnt > 1) {
312 ret = pthread_mutex_unlock(&sh->dv_mutex);
318 /* Update VLAN's VID/PCP based on input rte_flow_action.
321 * Pointer to struct rte_flow_action.
323 * Pointer to struct rte_vlan_hdr.
326 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
327 struct rte_vlan_hdr *vlan)
330 if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
332 ((const struct rte_flow_action_of_set_vlan_pcp *)
333 action->conf)->vlan_pcp;
334 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
335 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
336 vlan->vlan_tci |= vlan_tci;
337 } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
338 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
339 vlan->vlan_tci |= rte_be_to_cpu_16
340 (((const struct rte_flow_action_of_set_vlan_vid *)
341 action->conf)->vlan_vid);
346 * Fetch 1, 2, 3 or 4 byte field from the byte array
347 * and return as unsigned integer in host-endian format.
350 * Pointer to data array.
352 * Size of field to extract.
355 * converted field in host endian format.
357 static inline uint32_t
358 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
367 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
370 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
371 ret = (ret << 8) | *(data + sizeof(uint16_t));
374 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
385 * Convert modify-header action to DV specification.
387 * Data length of each action is determined by provided field description
388 * and the item mask. Data bit offset and width of each action is determined
389 * by provided item mask.
392 * Pointer to item specification.
394 * Pointer to field modification information.
395 * For MLX5_MODIFICATION_TYPE_SET specifies destination field.
396 * For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
397 * For MLX5_MODIFICATION_TYPE_COPY specifies source field.
399 * Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
400 * Negative offset value sets the same offset as source offset.
401 * size field is ignored, value is taken from source field.
402 * @param[in,out] resource
403 * Pointer to the modify-header resource.
405 * Type of modification.
407 * Pointer to the error structure.
410 * 0 on success, a negative errno value otherwise and rte_errno is set.
413 flow_dv_convert_modify_action(struct rte_flow_item *item,
414 struct field_modify_info *field,
415 struct field_modify_info *dcopy,
416 struct mlx5_flow_dv_modify_hdr_resource *resource,
417 uint32_t type, struct rte_flow_error *error)
419 uint32_t i = resource->actions_num;
420 struct mlx5_modification_cmd *actions = resource->actions;
423 * The item and mask are provided in big-endian format.
424 * The fields should be presented as in big-endian format either.
425 * Mask must be always present, it defines the actual field width.
427 MLX5_ASSERT(item->mask);
428 MLX5_ASSERT(field->size);
435 if (i >= MLX5_MAX_MODIFY_NUM)
436 return rte_flow_error_set(error, EINVAL,
437 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
438 "too many items to modify");
439 /* Fetch variable byte size mask from the array. */
440 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
441 field->offset, field->size);
446 /* Deduce actual data width in bits from mask value. */
447 off_b = rte_bsf32(mask);
448 size_b = sizeof(uint32_t) * CHAR_BIT -
449 off_b - __builtin_clz(mask);
451 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
452 actions[i] = (struct mlx5_modification_cmd) {
458 /* Convert entire record to expected big-endian format. */
459 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
460 if (type == MLX5_MODIFICATION_TYPE_COPY) {
462 actions[i].dst_field = dcopy->id;
463 actions[i].dst_offset =
464 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
465 /* Convert entire record to big-endian format. */
466 actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
468 MLX5_ASSERT(item->spec);
469 data = flow_dv_fetch_field((const uint8_t *)item->spec +
470 field->offset, field->size);
471 /* Shift out the trailing masked bits from data. */
472 data = (data & mask) >> off_b;
473 actions[i].data1 = rte_cpu_to_be_32(data);
477 } while (field->size);
478 if (resource->actions_num == i)
479 return rte_flow_error_set(error, EINVAL,
480 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
481 "invalid modification flow item");
482 resource->actions_num = i;
487 * Convert modify-header set IPv4 address action to DV specification.
489 * @param[in,out] resource
490 * Pointer to the modify-header resource.
492 * Pointer to action specification.
494 * Pointer to the error structure.
497 * 0 on success, a negative errno value otherwise and rte_errno is set.
500 flow_dv_convert_action_modify_ipv4
501 (struct mlx5_flow_dv_modify_hdr_resource *resource,
502 const struct rte_flow_action *action,
503 struct rte_flow_error *error)
505 const struct rte_flow_action_set_ipv4 *conf =
506 (const struct rte_flow_action_set_ipv4 *)(action->conf);
507 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
508 struct rte_flow_item_ipv4 ipv4;
509 struct rte_flow_item_ipv4 ipv4_mask;
511 memset(&ipv4, 0, sizeof(ipv4));
512 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
513 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
514 ipv4.hdr.src_addr = conf->ipv4_addr;
515 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
517 ipv4.hdr.dst_addr = conf->ipv4_addr;
518 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
521 item.mask = &ipv4_mask;
522 return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
523 MLX5_MODIFICATION_TYPE_SET, error);
527 * Convert modify-header set IPv6 address action to DV specification.
529 * @param[in,out] resource
530 * Pointer to the modify-header resource.
532 * Pointer to action specification.
534 * Pointer to the error structure.
537 * 0 on success, a negative errno value otherwise and rte_errno is set.
540 flow_dv_convert_action_modify_ipv6
541 (struct mlx5_flow_dv_modify_hdr_resource *resource,
542 const struct rte_flow_action *action,
543 struct rte_flow_error *error)
545 const struct rte_flow_action_set_ipv6 *conf =
546 (const struct rte_flow_action_set_ipv6 *)(action->conf);
547 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
548 struct rte_flow_item_ipv6 ipv6;
549 struct rte_flow_item_ipv6 ipv6_mask;
551 memset(&ipv6, 0, sizeof(ipv6));
552 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
553 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
554 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
555 sizeof(ipv6.hdr.src_addr));
556 memcpy(&ipv6_mask.hdr.src_addr,
557 &rte_flow_item_ipv6_mask.hdr.src_addr,
558 sizeof(ipv6.hdr.src_addr));
560 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
561 sizeof(ipv6.hdr.dst_addr));
562 memcpy(&ipv6_mask.hdr.dst_addr,
563 &rte_flow_item_ipv6_mask.hdr.dst_addr,
564 sizeof(ipv6.hdr.dst_addr));
567 item.mask = &ipv6_mask;
568 return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
569 MLX5_MODIFICATION_TYPE_SET, error);
573 * Convert modify-header set MAC address action to DV specification.
575 * @param[in,out] resource
576 * Pointer to the modify-header resource.
578 * Pointer to action specification.
580 * Pointer to the error structure.
583 * 0 on success, a negative errno value otherwise and rte_errno is set.
586 flow_dv_convert_action_modify_mac
587 (struct mlx5_flow_dv_modify_hdr_resource *resource,
588 const struct rte_flow_action *action,
589 struct rte_flow_error *error)
591 const struct rte_flow_action_set_mac *conf =
592 (const struct rte_flow_action_set_mac *)(action->conf);
593 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
594 struct rte_flow_item_eth eth;
595 struct rte_flow_item_eth eth_mask;
597 memset(ð, 0, sizeof(eth));
598 memset(ð_mask, 0, sizeof(eth_mask));
599 if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
600 memcpy(ð.src.addr_bytes, &conf->mac_addr,
601 sizeof(eth.src.addr_bytes));
602 memcpy(ð_mask.src.addr_bytes,
603 &rte_flow_item_eth_mask.src.addr_bytes,
604 sizeof(eth_mask.src.addr_bytes));
606 memcpy(ð.dst.addr_bytes, &conf->mac_addr,
607 sizeof(eth.dst.addr_bytes));
608 memcpy(ð_mask.dst.addr_bytes,
609 &rte_flow_item_eth_mask.dst.addr_bytes,
610 sizeof(eth_mask.dst.addr_bytes));
613 item.mask = ð_mask;
614 return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
615 MLX5_MODIFICATION_TYPE_SET, error);
619 * Convert modify-header set VLAN VID action to DV specification.
621 * @param[in,out] resource
622 * Pointer to the modify-header resource.
624 * Pointer to action specification.
626 * Pointer to the error structure.
629 * 0 on success, a negative errno value otherwise and rte_errno is set.
632 flow_dv_convert_action_modify_vlan_vid
633 (struct mlx5_flow_dv_modify_hdr_resource *resource,
634 const struct rte_flow_action *action,
635 struct rte_flow_error *error)
637 const struct rte_flow_action_of_set_vlan_vid *conf =
638 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
639 int i = resource->actions_num;
640 struct mlx5_modification_cmd *actions = resource->actions;
641 struct field_modify_info *field = modify_vlan_out_first_vid;
643 if (i >= MLX5_MAX_MODIFY_NUM)
644 return rte_flow_error_set(error, EINVAL,
645 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
646 "too many items to modify");
647 actions[i] = (struct mlx5_modification_cmd) {
648 .action_type = MLX5_MODIFICATION_TYPE_SET,
650 .length = field->size,
651 .offset = field->offset,
653 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
654 actions[i].data1 = conf->vlan_vid;
655 actions[i].data1 = actions[i].data1 << 16;
656 resource->actions_num = ++i;
661 * Convert modify-header set TP action to DV specification.
663 * @param[in,out] resource
664 * Pointer to the modify-header resource.
666 * Pointer to action specification.
668 * Pointer to rte_flow_item objects list.
670 * Pointer to flow attributes structure.
671 * @param[in] dev_flow
672 * Pointer to the sub flow.
673 * @param[in] tunnel_decap
674 * Whether action is after tunnel decapsulation.
676 * Pointer to the error structure.
679 * 0 on success, a negative errno value otherwise and rte_errno is set.
682 flow_dv_convert_action_modify_tp
683 (struct mlx5_flow_dv_modify_hdr_resource *resource,
684 const struct rte_flow_action *action,
685 const struct rte_flow_item *items,
686 union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
687 bool tunnel_decap, struct rte_flow_error *error)
689 const struct rte_flow_action_set_tp *conf =
690 (const struct rte_flow_action_set_tp *)(action->conf);
691 struct rte_flow_item item;
692 struct rte_flow_item_udp udp;
693 struct rte_flow_item_udp udp_mask;
694 struct rte_flow_item_tcp tcp;
695 struct rte_flow_item_tcp tcp_mask;
696 struct field_modify_info *field;
699 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
701 memset(&udp, 0, sizeof(udp));
702 memset(&udp_mask, 0, sizeof(udp_mask));
703 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
704 udp.hdr.src_port = conf->port;
705 udp_mask.hdr.src_port =
706 rte_flow_item_udp_mask.hdr.src_port;
708 udp.hdr.dst_port = conf->port;
709 udp_mask.hdr.dst_port =
710 rte_flow_item_udp_mask.hdr.dst_port;
712 item.type = RTE_FLOW_ITEM_TYPE_UDP;
714 item.mask = &udp_mask;
717 MLX5_ASSERT(attr->tcp);
718 memset(&tcp, 0, sizeof(tcp));
719 memset(&tcp_mask, 0, sizeof(tcp_mask));
720 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
721 tcp.hdr.src_port = conf->port;
722 tcp_mask.hdr.src_port =
723 rte_flow_item_tcp_mask.hdr.src_port;
725 tcp.hdr.dst_port = conf->port;
726 tcp_mask.hdr.dst_port =
727 rte_flow_item_tcp_mask.hdr.dst_port;
729 item.type = RTE_FLOW_ITEM_TYPE_TCP;
731 item.mask = &tcp_mask;
734 return flow_dv_convert_modify_action(&item, field, NULL, resource,
735 MLX5_MODIFICATION_TYPE_SET, error);
739 * Convert modify-header set TTL action to DV specification.
741 * @param[in,out] resource
742 * Pointer to the modify-header resource.
744 * Pointer to action specification.
746 * Pointer to rte_flow_item objects list.
748 * Pointer to flow attributes structure.
749 * @param[in] dev_flow
750 * Pointer to the sub flow.
751 * @param[in] tunnel_decap
752 * Whether action is after tunnel decapsulation.
754 * Pointer to the error structure.
757 * 0 on success, a negative errno value otherwise and rte_errno is set.
760 flow_dv_convert_action_modify_ttl
761 (struct mlx5_flow_dv_modify_hdr_resource *resource,
762 const struct rte_flow_action *action,
763 const struct rte_flow_item *items,
764 union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
765 bool tunnel_decap, struct rte_flow_error *error)
767 const struct rte_flow_action_set_ttl *conf =
768 (const struct rte_flow_action_set_ttl *)(action->conf);
769 struct rte_flow_item item;
770 struct rte_flow_item_ipv4 ipv4;
771 struct rte_flow_item_ipv4 ipv4_mask;
772 struct rte_flow_item_ipv6 ipv6;
773 struct rte_flow_item_ipv6 ipv6_mask;
774 struct field_modify_info *field;
777 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
779 memset(&ipv4, 0, sizeof(ipv4));
780 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
781 ipv4.hdr.time_to_live = conf->ttl_value;
782 ipv4_mask.hdr.time_to_live = 0xFF;
783 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
785 item.mask = &ipv4_mask;
788 MLX5_ASSERT(attr->ipv6);
789 memset(&ipv6, 0, sizeof(ipv6));
790 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
791 ipv6.hdr.hop_limits = conf->ttl_value;
792 ipv6_mask.hdr.hop_limits = 0xFF;
793 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
795 item.mask = &ipv6_mask;
798 return flow_dv_convert_modify_action(&item, field, NULL, resource,
799 MLX5_MODIFICATION_TYPE_SET, error);
803 * Convert modify-header decrement TTL action to DV specification.
805 * @param[in,out] resource
806 * Pointer to the modify-header resource.
808 * Pointer to action specification.
810 * Pointer to rte_flow_item objects list.
812 * Pointer to flow attributes structure.
813 * @param[in] dev_flow
814 * Pointer to the sub flow.
815 * @param[in] tunnel_decap
816 * Whether action is after tunnel decapsulation.
818 * Pointer to the error structure.
821 * 0 on success, a negative errno value otherwise and rte_errno is set.
824 flow_dv_convert_action_modify_dec_ttl
825 (struct mlx5_flow_dv_modify_hdr_resource *resource,
826 const struct rte_flow_item *items,
827 union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
828 bool tunnel_decap, struct rte_flow_error *error)
830 struct rte_flow_item item;
831 struct rte_flow_item_ipv4 ipv4;
832 struct rte_flow_item_ipv4 ipv4_mask;
833 struct rte_flow_item_ipv6 ipv6;
834 struct rte_flow_item_ipv6 ipv6_mask;
835 struct field_modify_info *field;
838 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
840 memset(&ipv4, 0, sizeof(ipv4));
841 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
842 ipv4.hdr.time_to_live = 0xFF;
843 ipv4_mask.hdr.time_to_live = 0xFF;
844 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
846 item.mask = &ipv4_mask;
849 MLX5_ASSERT(attr->ipv6);
850 memset(&ipv6, 0, sizeof(ipv6));
851 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
852 ipv6.hdr.hop_limits = 0xFF;
853 ipv6_mask.hdr.hop_limits = 0xFF;
854 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
856 item.mask = &ipv6_mask;
859 return flow_dv_convert_modify_action(&item, field, NULL, resource,
860 MLX5_MODIFICATION_TYPE_ADD, error);
864 * Convert modify-header increment/decrement TCP Sequence number
865 * to DV specification.
867 * @param[in,out] resource
868 * Pointer to the modify-header resource.
870 * Pointer to action specification.
872 * Pointer to the error structure.
875 * 0 on success, a negative errno value otherwise and rte_errno is set.
878 flow_dv_convert_action_modify_tcp_seq
879 (struct mlx5_flow_dv_modify_hdr_resource *resource,
880 const struct rte_flow_action *action,
881 struct rte_flow_error *error)
883 const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
884 uint64_t value = rte_be_to_cpu_32(*conf);
885 struct rte_flow_item item;
886 struct rte_flow_item_tcp tcp;
887 struct rte_flow_item_tcp tcp_mask;
889 memset(&tcp, 0, sizeof(tcp));
890 memset(&tcp_mask, 0, sizeof(tcp_mask));
891 if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
893 * The HW has no decrement operation, only increment operation.
894 * To simulate decrement X from Y using increment operation
895 * we need to add UINT32_MAX X times to Y.
896 * Each adding of UINT32_MAX decrements Y by 1.
899 tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
900 tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
901 item.type = RTE_FLOW_ITEM_TYPE_TCP;
903 item.mask = &tcp_mask;
904 return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
905 MLX5_MODIFICATION_TYPE_ADD, error);
909 * Convert modify-header increment/decrement TCP Acknowledgment number
910 * to DV specification.
912 * @param[in,out] resource
913 * Pointer to the modify-header resource.
915 * Pointer to action specification.
917 * Pointer to the error structure.
920 * 0 on success, a negative errno value otherwise and rte_errno is set.
923 flow_dv_convert_action_modify_tcp_ack
924 (struct mlx5_flow_dv_modify_hdr_resource *resource,
925 const struct rte_flow_action *action,
926 struct rte_flow_error *error)
928 const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
929 uint64_t value = rte_be_to_cpu_32(*conf);
930 struct rte_flow_item item;
931 struct rte_flow_item_tcp tcp;
932 struct rte_flow_item_tcp tcp_mask;
934 memset(&tcp, 0, sizeof(tcp));
935 memset(&tcp_mask, 0, sizeof(tcp_mask));
936 if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
938 * The HW has no decrement operation, only increment operation.
939 * To simulate decrement X from Y using increment operation
940 * we need to add UINT32_MAX X times to Y.
941 * Each adding of UINT32_MAX decrements Y by 1.
944 tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
945 tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
946 item.type = RTE_FLOW_ITEM_TYPE_TCP;
948 item.mask = &tcp_mask;
949 return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
950 MLX5_MODIFICATION_TYPE_ADD, error);
953 static enum mlx5_modification_field reg_to_field[] = {
954 [REG_NON] = MLX5_MODI_OUT_NONE,
955 [REG_A] = MLX5_MODI_META_DATA_REG_A,
956 [REG_B] = MLX5_MODI_META_DATA_REG_B,
957 [REG_C_0] = MLX5_MODI_META_REG_C_0,
958 [REG_C_1] = MLX5_MODI_META_REG_C_1,
959 [REG_C_2] = MLX5_MODI_META_REG_C_2,
960 [REG_C_3] = MLX5_MODI_META_REG_C_3,
961 [REG_C_4] = MLX5_MODI_META_REG_C_4,
962 [REG_C_5] = MLX5_MODI_META_REG_C_5,
963 [REG_C_6] = MLX5_MODI_META_REG_C_6,
964 [REG_C_7] = MLX5_MODI_META_REG_C_7,
968 * Convert register set to DV specification.
970 * @param[in,out] resource
971 * Pointer to the modify-header resource.
973 * Pointer to action specification.
975 * Pointer to the error structure.
978 * 0 on success, a negative errno value otherwise and rte_errno is set.
981 flow_dv_convert_action_set_reg
982 (struct mlx5_flow_dv_modify_hdr_resource *resource,
983 const struct rte_flow_action *action,
984 struct rte_flow_error *error)
986 const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
987 struct mlx5_modification_cmd *actions = resource->actions;
988 uint32_t i = resource->actions_num;
990 if (i >= MLX5_MAX_MODIFY_NUM)
991 return rte_flow_error_set(error, EINVAL,
992 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
993 "too many items to modify");
994 MLX5_ASSERT(conf->id != REG_NON);
995 MLX5_ASSERT(conf->id < RTE_DIM(reg_to_field));
996 actions[i] = (struct mlx5_modification_cmd) {
997 .action_type = MLX5_MODIFICATION_TYPE_SET,
998 .field = reg_to_field[conf->id],
1000 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
1001 actions[i].data1 = rte_cpu_to_be_32(conf->data);
1003 resource->actions_num = i;
1008 * Convert SET_TAG action to DV specification.
1011 * Pointer to the rte_eth_dev structure.
1012 * @param[in,out] resource
1013 * Pointer to the modify-header resource.
1015 * Pointer to action specification.
1017 * Pointer to the error structure.
1020 * 0 on success, a negative errno value otherwise and rte_errno is set.
1023 flow_dv_convert_action_set_tag
1024 (struct rte_eth_dev *dev,
1025 struct mlx5_flow_dv_modify_hdr_resource *resource,
1026 const struct rte_flow_action_set_tag *conf,
1027 struct rte_flow_error *error)
1029 rte_be32_t data = rte_cpu_to_be_32(conf->data);
1030 rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1031 struct rte_flow_item item = {
1035 struct field_modify_info reg_c_x[] = {
1038 enum mlx5_modification_field reg_type;
1041 ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1044 MLX5_ASSERT(ret != REG_NON);
1045 MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1046 reg_type = reg_to_field[ret];
1047 MLX5_ASSERT(reg_type > 0);
1048 reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1049 return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1050 MLX5_MODIFICATION_TYPE_SET, error);
1054 * Convert internal COPY_REG action to DV specification.
1057 * Pointer to the rte_eth_dev structure.
1058 * @param[in,out] res
1059 * Pointer to the modify-header resource.
1061 * Pointer to action specification.
1063 * Pointer to the error structure.
1066 * 0 on success, a negative errno value otherwise and rte_errno is set.
1069 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1070 struct mlx5_flow_dv_modify_hdr_resource *res,
1071 const struct rte_flow_action *action,
1072 struct rte_flow_error *error)
1074 const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1075 rte_be32_t mask = RTE_BE32(UINT32_MAX);
1076 struct rte_flow_item item = {
1080 struct field_modify_info reg_src[] = {
1081 {4, 0, reg_to_field[conf->src]},
1084 struct field_modify_info reg_dst = {
1086 .id = reg_to_field[conf->dst],
1088 /* Adjust reg_c[0] usage according to reported mask. */
1089 if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1090 struct mlx5_priv *priv = dev->data->dev_private;
1091 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1093 MLX5_ASSERT(reg_c0);
1094 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1095 if (conf->dst == REG_C_0) {
1096 /* Copy to reg_c[0], within mask only. */
1097 reg_dst.offset = rte_bsf32(reg_c0);
1099 * Mask is ignoring the enianness, because
1100 * there is no conversion in datapath.
1102 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1103 /* Copy from destination lower bits to reg_c[0]. */
1104 mask = reg_c0 >> reg_dst.offset;
1106 /* Copy from destination upper bits to reg_c[0]. */
1107 mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1108 rte_fls_u32(reg_c0));
1111 mask = rte_cpu_to_be_32(reg_c0);
1112 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1113 /* Copy from reg_c[0] to destination lower bits. */
1116 /* Copy from reg_c[0] to destination upper bits. */
1117 reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1118 (rte_fls_u32(reg_c0) -
1123 return flow_dv_convert_modify_action(&item,
1124 reg_src, ®_dst, res,
1125 MLX5_MODIFICATION_TYPE_COPY,
1130 * Convert MARK action to DV specification. This routine is used
1131 * in extensive metadata only and requires metadata register to be
1132 * handled. In legacy mode hardware tag resource is engaged.
1135 * Pointer to the rte_eth_dev structure.
1137 * Pointer to MARK action specification.
1138 * @param[in,out] resource
1139 * Pointer to the modify-header resource.
1141 * Pointer to the error structure.
1144 * 0 on success, a negative errno value otherwise and rte_errno is set.
1147 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1148 const struct rte_flow_action_mark *conf,
1149 struct mlx5_flow_dv_modify_hdr_resource *resource,
1150 struct rte_flow_error *error)
1152 struct mlx5_priv *priv = dev->data->dev_private;
1153 rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1154 priv->sh->dv_mark_mask);
1155 rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1156 struct rte_flow_item item = {
1160 struct field_modify_info reg_c_x[] = {
1166 return rte_flow_error_set(error, EINVAL,
1167 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1168 NULL, "zero mark action mask");
1169 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1172 MLX5_ASSERT(reg > 0);
1173 if (reg == REG_C_0) {
1174 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1175 uint32_t shl_c0 = rte_bsf32(msk_c0);
1177 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1178 mask = rte_cpu_to_be_32(mask) & msk_c0;
1179 mask = rte_cpu_to_be_32(mask << shl_c0);
1181 reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1182 return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1183 MLX5_MODIFICATION_TYPE_SET, error);
1187 * Get metadata register index for specified steering domain.
1190 * Pointer to the rte_eth_dev structure.
1192 * Attributes of flow to determine steering domain.
1194 * Pointer to the error structure.
1197 * positive index on success, a negative errno value otherwise
1198 * and rte_errno is set.
1200 static enum modify_reg
1201 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1202 const struct rte_flow_attr *attr,
1203 struct rte_flow_error *error)
1206 mlx5_flow_get_reg_id(dev, attr->transfer ?
1210 MLX5_METADATA_RX, 0, error);
1212 return rte_flow_error_set(error,
1213 ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1214 NULL, "unavailable "
1215 "metadata register");
1220 * Convert SET_META action to DV specification.
1223 * Pointer to the rte_eth_dev structure.
1224 * @param[in,out] resource
1225 * Pointer to the modify-header resource.
1227 * Attributes of flow that includes this item.
1229 * Pointer to action specification.
1231 * Pointer to the error structure.
1234 * 0 on success, a negative errno value otherwise and rte_errno is set.
1237 flow_dv_convert_action_set_meta
1238 (struct rte_eth_dev *dev,
1239 struct mlx5_flow_dv_modify_hdr_resource *resource,
1240 const struct rte_flow_attr *attr,
1241 const struct rte_flow_action_set_meta *conf,
1242 struct rte_flow_error *error)
1244 uint32_t data = conf->data;
1245 uint32_t mask = conf->mask;
1246 struct rte_flow_item item = {
1250 struct field_modify_info reg_c_x[] = {
1253 int reg = flow_dv_get_metadata_reg(dev, attr, error);
1258 * In datapath code there is no endianness
1259 * coversions for perfromance reasons, all
1260 * pattern conversions are done in rte_flow.
1262 if (reg == REG_C_0) {
1263 struct mlx5_priv *priv = dev->data->dev_private;
1264 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1267 MLX5_ASSERT(msk_c0);
1268 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1269 shl_c0 = rte_bsf32(msk_c0);
1271 shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
1275 MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
1277 reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1278 /* The routine expects parameters in memory as big-endian ones. */
1279 return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1280 MLX5_MODIFICATION_TYPE_SET, error);
1284 * Convert modify-header set IPv4 DSCP action to DV specification.
1286 * @param[in,out] resource
1287 * Pointer to the modify-header resource.
1289 * Pointer to action specification.
1291 * Pointer to the error structure.
1294 * 0 on success, a negative errno value otherwise and rte_errno is set.
1297 flow_dv_convert_action_modify_ipv4_dscp
1298 (struct mlx5_flow_dv_modify_hdr_resource *resource,
1299 const struct rte_flow_action *action,
1300 struct rte_flow_error *error)
1302 const struct rte_flow_action_set_dscp *conf =
1303 (const struct rte_flow_action_set_dscp *)(action->conf);
1304 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1305 struct rte_flow_item_ipv4 ipv4;
1306 struct rte_flow_item_ipv4 ipv4_mask;
1308 memset(&ipv4, 0, sizeof(ipv4));
1309 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1310 ipv4.hdr.type_of_service = conf->dscp;
1311 ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1313 item.mask = &ipv4_mask;
1314 return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1315 MLX5_MODIFICATION_TYPE_SET, error);
1319 * Convert modify-header set IPv6 DSCP action to DV specification.
1321 * @param[in,out] resource
1322 * Pointer to the modify-header resource.
1324 * Pointer to action specification.
1326 * Pointer to the error structure.
1329 * 0 on success, a negative errno value otherwise and rte_errno is set.
1332 flow_dv_convert_action_modify_ipv6_dscp
1333 (struct mlx5_flow_dv_modify_hdr_resource *resource,
1334 const struct rte_flow_action *action,
1335 struct rte_flow_error *error)
1337 const struct rte_flow_action_set_dscp *conf =
1338 (const struct rte_flow_action_set_dscp *)(action->conf);
1339 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1340 struct rte_flow_item_ipv6 ipv6;
1341 struct rte_flow_item_ipv6 ipv6_mask;
1343 memset(&ipv6, 0, sizeof(ipv6));
1344 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1346 * Even though the DSCP bits offset of IPv6 is not byte aligned,
1347 * rdma-core only accept the DSCP bits byte aligned start from
1348 * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1349 * bits in IPv6 case as rdma-core requires byte aligned value.
1351 ipv6.hdr.vtc_flow = conf->dscp;
1352 ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1354 item.mask = &ipv6_mask;
1355 return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1356 MLX5_MODIFICATION_TYPE_SET, error);
1360 * Validate MARK item.
1363 * Pointer to the rte_eth_dev structure.
1365 * Item specification.
1367 * Attributes of flow that includes this item.
1369 * Pointer to error structure.
1372 * 0 on success, a negative errno value otherwise and rte_errno is set.
1375 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1376 const struct rte_flow_item *item,
1377 const struct rte_flow_attr *attr __rte_unused,
1378 struct rte_flow_error *error)
1380 struct mlx5_priv *priv = dev->data->dev_private;
1381 struct mlx5_dev_config *config = &priv->config;
1382 const struct rte_flow_item_mark *spec = item->spec;
1383 const struct rte_flow_item_mark *mask = item->mask;
1384 const struct rte_flow_item_mark nic_mask = {
1385 .id = priv->sh->dv_mark_mask,
1389 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1390 return rte_flow_error_set(error, ENOTSUP,
1391 RTE_FLOW_ERROR_TYPE_ITEM, item,
1392 "extended metadata feature"
1394 if (!mlx5_flow_ext_mreg_supported(dev))
1395 return rte_flow_error_set(error, ENOTSUP,
1396 RTE_FLOW_ERROR_TYPE_ITEM, item,
1397 "extended metadata register"
1398 " isn't supported");
1400 return rte_flow_error_set(error, ENOTSUP,
1401 RTE_FLOW_ERROR_TYPE_ITEM, item,
1402 "extended metadata register"
1403 " isn't available");
1404 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1408 return rte_flow_error_set(error, EINVAL,
1409 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1411 "data cannot be empty");
1412 if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1413 return rte_flow_error_set(error, EINVAL,
1414 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1416 "mark id exceeds the limit");
1420 return rte_flow_error_set(error, EINVAL,
1421 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1422 "mask cannot be zero");
1424 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1425 (const uint8_t *)&nic_mask,
1426 sizeof(struct rte_flow_item_mark),
1427 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1434 * Validate META item.
1437 * Pointer to the rte_eth_dev structure.
1439 * Item specification.
1441 * Attributes of flow that includes this item.
1443 * Pointer to error structure.
1446 * 0 on success, a negative errno value otherwise and rte_errno is set.
1449 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1450 const struct rte_flow_item *item,
1451 const struct rte_flow_attr *attr,
1452 struct rte_flow_error *error)
1454 struct mlx5_priv *priv = dev->data->dev_private;
1455 struct mlx5_dev_config *config = &priv->config;
1456 const struct rte_flow_item_meta *spec = item->spec;
1457 const struct rte_flow_item_meta *mask = item->mask;
1458 struct rte_flow_item_meta nic_mask = {
1465 return rte_flow_error_set(error, EINVAL,
1466 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1468 "data cannot be empty");
1469 if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1470 if (!mlx5_flow_ext_mreg_supported(dev))
1471 return rte_flow_error_set(error, ENOTSUP,
1472 RTE_FLOW_ERROR_TYPE_ITEM, item,
1473 "extended metadata register"
1474 " isn't supported");
1475 reg = flow_dv_get_metadata_reg(dev, attr, error);
1479 return rte_flow_error_set(error, ENOTSUP,
1480 RTE_FLOW_ERROR_TYPE_ITEM, item,
1484 nic_mask.data = priv->sh->dv_meta_mask;
1485 } else if (attr->transfer) {
1486 return rte_flow_error_set(error, ENOTSUP,
1487 RTE_FLOW_ERROR_TYPE_ITEM, item,
1488 "extended metadata feature "
1489 "should be enabled when "
1490 "meta item is requested "
1491 "with e-switch mode ");
1494 mask = &rte_flow_item_meta_mask;
1496 return rte_flow_error_set(error, EINVAL,
1497 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1498 "mask cannot be zero");
1500 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1501 (const uint8_t *)&nic_mask,
1502 sizeof(struct rte_flow_item_meta),
1503 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1508 * Validate TAG item.
1511 * Pointer to the rte_eth_dev structure.
1513 * Item specification.
1515 * Attributes of flow that includes this item.
1517 * Pointer to error structure.
1520 * 0 on success, a negative errno value otherwise and rte_errno is set.
1523 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
1524 const struct rte_flow_item *item,
1525 const struct rte_flow_attr *attr __rte_unused,
1526 struct rte_flow_error *error)
1528 const struct rte_flow_item_tag *spec = item->spec;
1529 const struct rte_flow_item_tag *mask = item->mask;
1530 const struct rte_flow_item_tag nic_mask = {
1531 .data = RTE_BE32(UINT32_MAX),
1536 if (!mlx5_flow_ext_mreg_supported(dev))
1537 return rte_flow_error_set(error, ENOTSUP,
1538 RTE_FLOW_ERROR_TYPE_ITEM, item,
1539 "extensive metadata register"
1540 " isn't supported");
1542 return rte_flow_error_set(error, EINVAL,
1543 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1545 "data cannot be empty");
1547 mask = &rte_flow_item_tag_mask;
1549 return rte_flow_error_set(error, EINVAL,
1550 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1551 "mask cannot be zero");
1553 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1554 (const uint8_t *)&nic_mask,
1555 sizeof(struct rte_flow_item_tag),
1556 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1559 if (mask->index != 0xff)
1560 return rte_flow_error_set(error, EINVAL,
1561 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1562 "partial mask for tag index"
1563 " is not supported");
1564 ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
1567 MLX5_ASSERT(ret != REG_NON);
1572 * Validate vport item.
1575 * Pointer to the rte_eth_dev structure.
1577 * Item specification.
1579 * Attributes of flow that includes this item.
1580 * @param[in] item_flags
1581 * Bit-fields that holds the items detected until now.
1583 * Pointer to error structure.
1586 * 0 on success, a negative errno value otherwise and rte_errno is set.
1589 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
1590 const struct rte_flow_item *item,
1591 const struct rte_flow_attr *attr,
1592 uint64_t item_flags,
1593 struct rte_flow_error *error)
1595 const struct rte_flow_item_port_id *spec = item->spec;
1596 const struct rte_flow_item_port_id *mask = item->mask;
1597 const struct rte_flow_item_port_id switch_mask = {
1600 struct mlx5_priv *esw_priv;
1601 struct mlx5_priv *dev_priv;
1604 if (!attr->transfer)
1605 return rte_flow_error_set(error, EINVAL,
1606 RTE_FLOW_ERROR_TYPE_ITEM,
1608 "match on port id is valid only"
1609 " when transfer flag is enabled");
1610 if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
1611 return rte_flow_error_set(error, ENOTSUP,
1612 RTE_FLOW_ERROR_TYPE_ITEM, item,
1613 "multiple source ports are not"
1616 mask = &switch_mask;
1617 if (mask->id != 0xffffffff)
1618 return rte_flow_error_set(error, ENOTSUP,
1619 RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1621 "no support for partial mask on"
1623 ret = mlx5_flow_item_acceptable
1624 (item, (const uint8_t *)mask,
1625 (const uint8_t *)&rte_flow_item_port_id_mask,
1626 sizeof(struct rte_flow_item_port_id),
1627 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1632 esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
1634 return rte_flow_error_set(error, rte_errno,
1635 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1636 "failed to obtain E-Switch info for"
1638 dev_priv = mlx5_dev_to_eswitch_info(dev);
1640 return rte_flow_error_set(error, rte_errno,
1641 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1643 "failed to obtain E-Switch info");
1644 if (esw_priv->domain_id != dev_priv->domain_id)
1645 return rte_flow_error_set(error, EINVAL,
1646 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1647 "cannot match on a port from a"
1648 " different E-Switch");
1653 * Validate VLAN item.
1656 * Item specification.
1657 * @param[in] item_flags
1658 * Bit-fields that holds the items detected until now.
1660 * Ethernet device flow is being created on.
1662 * Pointer to error structure.
1665 * 0 on success, a negative errno value otherwise and rte_errno is set.
1668 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
1669 uint64_t item_flags,
1670 struct rte_eth_dev *dev,
1671 struct rte_flow_error *error)
1673 const struct rte_flow_item_vlan *mask = item->mask;
1674 const struct rte_flow_item_vlan nic_mask = {
1675 .tci = RTE_BE16(UINT16_MAX),
1676 .inner_type = RTE_BE16(UINT16_MAX),
1679 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1681 const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
1682 MLX5_FLOW_LAYER_INNER_L4) :
1683 (MLX5_FLOW_LAYER_OUTER_L3 |
1684 MLX5_FLOW_LAYER_OUTER_L4);
1685 const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1686 MLX5_FLOW_LAYER_OUTER_VLAN;
1688 if (item_flags & vlanm)
1689 return rte_flow_error_set(error, EINVAL,
1690 RTE_FLOW_ERROR_TYPE_ITEM, item,
1691 "multiple VLAN layers not supported");
1692 else if ((item_flags & l34m) != 0)
1693 return rte_flow_error_set(error, EINVAL,
1694 RTE_FLOW_ERROR_TYPE_ITEM, item,
1695 "VLAN cannot follow L3/L4 layer");
1697 mask = &rte_flow_item_vlan_mask;
1698 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1699 (const uint8_t *)&nic_mask,
1700 sizeof(struct rte_flow_item_vlan),
1701 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1704 if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
1705 struct mlx5_priv *priv = dev->data->dev_private;
1707 if (priv->vmwa_context) {
1709 * Non-NULL context means we have a virtual machine
1710 * and SR-IOV enabled, we have to create VLAN interface
1711 * to make hypervisor to setup E-Switch vport
1712 * context correctly. We avoid creating the multiple
1713 * VLAN interfaces, so we cannot support VLAN tag mask.
1715 return rte_flow_error_set(error, EINVAL,
1716 RTE_FLOW_ERROR_TYPE_ITEM,
1718 "VLAN tag mask is not"
1719 " supported in virtual"
1727 * GTP flags are contained in 1 byte of the format:
1728 * -------------------------------------------
1729 * | bit | 0 - 2 | 3 | 4 | 5 | 6 | 7 |
1730 * |-----------------------------------------|
1731 * | value | Version | PT | Res | E | S | PN |
1732 * -------------------------------------------
1734 * Matching is supported only for GTP flags E, S, PN.
1736 #define MLX5_GTP_FLAGS_MASK 0x07
1739 * Validate GTP item.
1742 * Pointer to the rte_eth_dev structure.
1744 * Item specification.
1745 * @param[in] item_flags
1746 * Bit-fields that holds the items detected until now.
1748 * Pointer to error structure.
1751 * 0 on success, a negative errno value otherwise and rte_errno is set.
1754 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
1755 const struct rte_flow_item *item,
1756 uint64_t item_flags,
1757 struct rte_flow_error *error)
1759 struct mlx5_priv *priv = dev->data->dev_private;
1760 const struct rte_flow_item_gtp *spec = item->spec;
1761 const struct rte_flow_item_gtp *mask = item->mask;
1762 const struct rte_flow_item_gtp nic_mask = {
1763 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
1765 .teid = RTE_BE32(0xffffffff),
1768 if (!priv->config.hca_attr.tunnel_stateless_gtp)
1769 return rte_flow_error_set(error, ENOTSUP,
1770 RTE_FLOW_ERROR_TYPE_ITEM, item,
1771 "GTP support is not enabled");
1772 if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1773 return rte_flow_error_set(error, ENOTSUP,
1774 RTE_FLOW_ERROR_TYPE_ITEM, item,
1775 "multiple tunnel layers not"
1777 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1778 return rte_flow_error_set(error, EINVAL,
1779 RTE_FLOW_ERROR_TYPE_ITEM, item,
1780 "no outer UDP layer found");
1782 mask = &rte_flow_item_gtp_mask;
1783 if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
1784 return rte_flow_error_set(error, ENOTSUP,
1785 RTE_FLOW_ERROR_TYPE_ITEM, item,
1786 "Match is supported for GTP"
1788 return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1789 (const uint8_t *)&nic_mask,
1790 sizeof(struct rte_flow_item_gtp),
1791 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1795 * Validate IPV4 item.
1796 * Use existing validation function mlx5_flow_validate_item_ipv4(), and
1797 * add specific validation of fragment_offset field,
1800 * Item specification.
1801 * @param[in] item_flags
1802 * Bit-fields that holds the items detected until now.
1804 * Pointer to error structure.
1807 * 0 on success, a negative errno value otherwise and rte_errno is set.
1810 flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
1811 uint64_t item_flags,
1813 uint16_t ether_type,
1814 struct rte_flow_error *error)
1817 const struct rte_flow_item_ipv4 *spec = item->spec;
1818 const struct rte_flow_item_ipv4 *last = item->last;
1819 const struct rte_flow_item_ipv4 *mask = item->mask;
1820 rte_be16_t fragment_offset_spec = 0;
1821 rte_be16_t fragment_offset_last = 0;
1822 const struct rte_flow_item_ipv4 nic_ipv4_mask = {
1824 .src_addr = RTE_BE32(0xffffffff),
1825 .dst_addr = RTE_BE32(0xffffffff),
1826 .type_of_service = 0xff,
1827 .fragment_offset = RTE_BE16(0xffff),
1828 .next_proto_id = 0xff,
1829 .time_to_live = 0xff,
1833 ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
1834 ether_type, &nic_ipv4_mask,
1835 MLX5_ITEM_RANGE_ACCEPTED, error);
1839 fragment_offset_spec = spec->hdr.fragment_offset &
1840 mask->hdr.fragment_offset;
1841 if (!fragment_offset_spec)
1844 * spec and mask are valid, enforce using full mask to make sure the
1845 * complete value is used correctly.
1847 if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1848 != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1849 return rte_flow_error_set(error, EINVAL,
1850 RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1851 item, "must use full mask for"
1852 " fragment_offset");
1854 * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
1855 * indicating this is 1st fragment of fragmented packet.
1856 * This is not yet supported in MLX5, return appropriate error message.
1858 if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
1859 return rte_flow_error_set(error, ENOTSUP,
1860 RTE_FLOW_ERROR_TYPE_ITEM, item,
1861 "match on first fragment not "
1863 if (fragment_offset_spec && !last)
1864 return rte_flow_error_set(error, ENOTSUP,
1865 RTE_FLOW_ERROR_TYPE_ITEM, item,
1866 "specified value not supported");
1867 /* spec and last are valid, validate the specified range. */
1868 fragment_offset_last = last->hdr.fragment_offset &
1869 mask->hdr.fragment_offset;
1871 * Match on fragment_offset spec 0x2001 and last 0x3fff
1872 * means MF is 1 and frag-offset is > 0.
1873 * This packet is fragment 2nd and onward, excluding last.
1874 * This is not yet supported in MLX5, return appropriate
1877 if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
1878 fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1879 return rte_flow_error_set(error, ENOTSUP,
1880 RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1881 last, "match on following "
1882 "fragments not supported");
1884 * Match on fragment_offset spec 0x0001 and last 0x1fff
1885 * means MF is 0 and frag-offset is > 0.
1886 * This packet is last fragment of fragmented packet.
1887 * This is not yet supported in MLX5, return appropriate
1890 if (fragment_offset_spec == RTE_BE16(1) &&
1891 fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
1892 return rte_flow_error_set(error, ENOTSUP,
1893 RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1894 last, "match on last "
1895 "fragment not supported");
1897 * Match on fragment_offset spec 0x0001 and last 0x3fff
1898 * means MF and/or frag-offset is not 0.
1899 * This is a fragmented packet.
1900 * Other range values are invalid and rejected.
1902 if (!(fragment_offset_spec == RTE_BE16(1) &&
1903 fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
1904 return rte_flow_error_set(error, ENOTSUP,
1905 RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
1906 "specified range not supported");
1911 * Validate IPV6 fragment extension item.
1914 * Item specification.
1915 * @param[in] item_flags
1916 * Bit-fields that holds the items detected until now.
1918 * Pointer to error structure.
1921 * 0 on success, a negative errno value otherwise and rte_errno is set.
1924 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
1925 uint64_t item_flags,
1926 struct rte_flow_error *error)
1928 const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
1929 const struct rte_flow_item_ipv6_frag_ext *last = item->last;
1930 const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
1931 rte_be16_t frag_data_spec = 0;
1932 rte_be16_t frag_data_last = 0;
1933 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1934 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1935 MLX5_FLOW_LAYER_OUTER_L4;
1937 struct rte_flow_item_ipv6_frag_ext nic_mask = {
1939 .next_header = 0xff,
1940 .frag_data = RTE_BE16(0xffff),
1944 if (item_flags & l4m)
1945 return rte_flow_error_set(error, EINVAL,
1946 RTE_FLOW_ERROR_TYPE_ITEM, item,
1947 "ipv6 fragment extension item cannot "
1949 if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
1950 (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
1951 return rte_flow_error_set(error, EINVAL,
1952 RTE_FLOW_ERROR_TYPE_ITEM, item,
1953 "ipv6 fragment extension item must "
1954 "follow ipv6 item");
1956 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
1957 if (!frag_data_spec)
1960 * spec and mask are valid, enforce using full mask to make sure the
1961 * complete value is used correctly.
1963 if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
1964 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
1965 return rte_flow_error_set(error, EINVAL,
1966 RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1967 item, "must use full mask for"
1970 * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
1971 * This is 1st fragment of fragmented packet.
1973 if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
1974 return rte_flow_error_set(error, ENOTSUP,
1975 RTE_FLOW_ERROR_TYPE_ITEM, item,
1976 "match on first fragment not "
1978 if (frag_data_spec && !last)
1979 return rte_flow_error_set(error, EINVAL,
1980 RTE_FLOW_ERROR_TYPE_ITEM, item,
1981 "specified value not supported");
1982 ret = mlx5_flow_item_acceptable
1983 (item, (const uint8_t *)mask,
1984 (const uint8_t *)&nic_mask,
1985 sizeof(struct rte_flow_item_ipv6_frag_ext),
1986 MLX5_ITEM_RANGE_ACCEPTED, error);
1989 /* spec and last are valid, validate the specified range. */
1990 frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
1992 * Match on frag_data spec 0x0009 and last 0xfff9
1993 * means M is 1 and frag-offset is > 0.
1994 * This packet is fragment 2nd and onward, excluding last.
1995 * This is not yet supported in MLX5, return appropriate
1998 if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
1999 RTE_IPV6_EHDR_MF_MASK) &&
2000 frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2001 return rte_flow_error_set(error, ENOTSUP,
2002 RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2003 last, "match on following "
2004 "fragments not supported");
2006 * Match on frag_data spec 0x0008 and last 0xfff8
2007 * means M is 0 and frag-offset is > 0.
2008 * This packet is last fragment of fragmented packet.
2009 * This is not yet supported in MLX5, return appropriate
2012 if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2013 frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2014 return rte_flow_error_set(error, ENOTSUP,
2015 RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2016 last, "match on last "
2017 "fragment not supported");
2018 /* Other range values are invalid and rejected. */
2019 return rte_flow_error_set(error, EINVAL,
2020 RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2021 "specified range not supported");
2025 * Validate the pop VLAN action.
2028 * Pointer to the rte_eth_dev structure.
2029 * @param[in] action_flags
2030 * Holds the actions detected until now.
2032 * Pointer to the pop vlan action.
2033 * @param[in] item_flags
2034 * The items found in this flow rule.
2036 * Pointer to flow attributes.
2038 * Pointer to error structure.
2041 * 0 on success, a negative errno value otherwise and rte_errno is set.
2044 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2045 uint64_t action_flags,
2046 const struct rte_flow_action *action,
2047 uint64_t item_flags,
2048 const struct rte_flow_attr *attr,
2049 struct rte_flow_error *error)
2051 const struct mlx5_priv *priv = dev->data->dev_private;
2055 if (!priv->sh->pop_vlan_action)
2056 return rte_flow_error_set(error, ENOTSUP,
2057 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2059 "pop vlan action is not supported");
2061 return rte_flow_error_set(error, ENOTSUP,
2062 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2064 "pop vlan action not supported for "
2066 if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2067 return rte_flow_error_set(error, ENOTSUP,
2068 RTE_FLOW_ERROR_TYPE_ACTION, action,
2069 "no support for multiple VLAN "
2071 /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2072 if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2073 !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2074 return rte_flow_error_set(error, ENOTSUP,
2075 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2077 "cannot pop vlan after decap without "
2078 "match on inner vlan in the flow");
2079 /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2080 if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2081 !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2082 return rte_flow_error_set(error, ENOTSUP,
2083 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2085 "cannot pop vlan without a "
2086 "match on (outer) vlan in the flow");
2087 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2088 return rte_flow_error_set(error, EINVAL,
2089 RTE_FLOW_ERROR_TYPE_ACTION, action,
2090 "wrong action order, port_id should "
2091 "be after pop VLAN action");
2092 if (!attr->transfer && priv->representor)
2093 return rte_flow_error_set(error, ENOTSUP,
2094 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2095 "pop vlan action for VF representor "
2096 "not supported on NIC table");
2101 * Get VLAN default info from vlan match info.
2104 * the list of item specifications.
2106 * pointer VLAN info to fill to.
2109 * 0 on success, a negative errno value otherwise and rte_errno is set.
2112 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2113 struct rte_vlan_hdr *vlan)
2115 const struct rte_flow_item_vlan nic_mask = {
2116 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2117 MLX5DV_FLOW_VLAN_VID_MASK),
2118 .inner_type = RTE_BE16(0xffff),
2123 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2124 int type = items->type;
2126 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2127 type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2130 if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2131 const struct rte_flow_item_vlan *vlan_m = items->mask;
2132 const struct rte_flow_item_vlan *vlan_v = items->spec;
2134 /* If VLAN item in pattern doesn't contain data, return here. */
2139 /* Only full match values are accepted */
2140 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2141 MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2142 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2144 rte_be_to_cpu_16(vlan_v->tci &
2145 MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2147 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2148 MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2149 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2151 rte_be_to_cpu_16(vlan_v->tci &
2152 MLX5DV_FLOW_VLAN_VID_MASK_BE);
2154 if (vlan_m->inner_type == nic_mask.inner_type)
2155 vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2156 vlan_m->inner_type);
2161 * Validate the push VLAN action.
2164 * Pointer to the rte_eth_dev structure.
2165 * @param[in] action_flags
2166 * Holds the actions detected until now.
2167 * @param[in] item_flags
2168 * The items found in this flow rule.
2170 * Pointer to the action structure.
2172 * Pointer to flow attributes
2174 * Pointer to error structure.
2177 * 0 on success, a negative errno value otherwise and rte_errno is set.
2180 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2181 uint64_t action_flags,
2182 const struct rte_flow_item_vlan *vlan_m,
2183 const struct rte_flow_action *action,
2184 const struct rte_flow_attr *attr,
2185 struct rte_flow_error *error)
2187 const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2188 const struct mlx5_priv *priv = dev->data->dev_private;
2190 if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2191 push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2192 return rte_flow_error_set(error, EINVAL,
2193 RTE_FLOW_ERROR_TYPE_ACTION, action,
2194 "invalid vlan ethertype");
2195 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2196 return rte_flow_error_set(error, EINVAL,
2197 RTE_FLOW_ERROR_TYPE_ACTION, action,
2198 "wrong action order, port_id should "
2199 "be after push VLAN");
2200 if (!attr->transfer && priv->representor)
2201 return rte_flow_error_set(error, ENOTSUP,
2202 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2203 "push vlan action for VF representor "
2204 "not supported on NIC table");
2206 (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2207 (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2208 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2209 !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2210 !(mlx5_flow_find_action
2211 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2212 return rte_flow_error_set(error, EINVAL,
2213 RTE_FLOW_ERROR_TYPE_ACTION, action,
2214 "not full match mask on VLAN PCP and "
2215 "there is no of_set_vlan_pcp action, "
2216 "push VLAN action cannot figure out "
2219 (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2220 (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2221 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2222 !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2223 !(mlx5_flow_find_action
2224 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2225 return rte_flow_error_set(error, EINVAL,
2226 RTE_FLOW_ERROR_TYPE_ACTION, action,
2227 "not full match mask on VLAN VID and "
2228 "there is no of_set_vlan_vid action, "
2229 "push VLAN action cannot figure out "
2236 * Validate the set VLAN PCP.
2238 * @param[in] action_flags
2239 * Holds the actions detected until now.
2240 * @param[in] actions
2241 * Pointer to the list of actions remaining in the flow rule.
2243 * Pointer to error structure.
2246 * 0 on success, a negative errno value otherwise and rte_errno is set.
2249 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2250 const struct rte_flow_action actions[],
2251 struct rte_flow_error *error)
2253 const struct rte_flow_action *action = actions;
2254 const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2256 if (conf->vlan_pcp > 7)
2257 return rte_flow_error_set(error, EINVAL,
2258 RTE_FLOW_ERROR_TYPE_ACTION, action,
2259 "VLAN PCP value is too big");
2260 if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2261 return rte_flow_error_set(error, ENOTSUP,
2262 RTE_FLOW_ERROR_TYPE_ACTION, action,
2263 "set VLAN PCP action must follow "
2264 "the push VLAN action");
2265 if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2266 return rte_flow_error_set(error, ENOTSUP,
2267 RTE_FLOW_ERROR_TYPE_ACTION, action,
2268 "Multiple VLAN PCP modification are "
2270 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2271 return rte_flow_error_set(error, EINVAL,
2272 RTE_FLOW_ERROR_TYPE_ACTION, action,
2273 "wrong action order, port_id should "
2274 "be after set VLAN PCP");
2279 * Validate the set VLAN VID.
2281 * @param[in] item_flags
2282 * Holds the items detected in this rule.
2283 * @param[in] action_flags
2284 * Holds the actions detected until now.
2285 * @param[in] actions
2286 * Pointer to the list of actions remaining in the flow rule.
2288 * Pointer to error structure.
2291 * 0 on success, a negative errno value otherwise and rte_errno is set.
2294 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2295 uint64_t action_flags,
2296 const struct rte_flow_action actions[],
2297 struct rte_flow_error *error)
2299 const struct rte_flow_action *action = actions;
2300 const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2302 if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2303 return rte_flow_error_set(error, EINVAL,
2304 RTE_FLOW_ERROR_TYPE_ACTION, action,
2305 "VLAN VID value is too big");
2306 if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
2307 !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2308 return rte_flow_error_set(error, ENOTSUP,
2309 RTE_FLOW_ERROR_TYPE_ACTION, action,
2310 "set VLAN VID action must follow push"
2311 " VLAN action or match on VLAN item");
2312 if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
2313 return rte_flow_error_set(error, ENOTSUP,
2314 RTE_FLOW_ERROR_TYPE_ACTION, action,
2315 "Multiple VLAN VID modifications are "
2317 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2318 return rte_flow_error_set(error, EINVAL,
2319 RTE_FLOW_ERROR_TYPE_ACTION, action,
2320 "wrong action order, port_id should "
2321 "be after set VLAN VID");
2326 * Validate the FLAG action.
2329 * Pointer to the rte_eth_dev structure.
2330 * @param[in] action_flags
2331 * Holds the actions detected until now.
2333 * Pointer to flow attributes
2335 * Pointer to error structure.
2338 * 0 on success, a negative errno value otherwise and rte_errno is set.
2341 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
2342 uint64_t action_flags,
2343 const struct rte_flow_attr *attr,
2344 struct rte_flow_error *error)
2346 struct mlx5_priv *priv = dev->data->dev_private;
2347 struct mlx5_dev_config *config = &priv->config;
2350 /* Fall back if no extended metadata register support. */
2351 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2352 return mlx5_flow_validate_action_flag(action_flags, attr,
2354 /* Extensive metadata mode requires registers. */
2355 if (!mlx5_flow_ext_mreg_supported(dev))
2356 return rte_flow_error_set(error, ENOTSUP,
2357 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2358 "no metadata registers "
2359 "to support flag action");
2360 if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
2361 return rte_flow_error_set(error, ENOTSUP,
2362 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2363 "extended metadata register"
2364 " isn't available");
2365 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2368 MLX5_ASSERT(ret > 0);
2369 if (action_flags & MLX5_FLOW_ACTION_MARK)
2370 return rte_flow_error_set(error, EINVAL,
2371 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2372 "can't mark and flag in same flow");
2373 if (action_flags & MLX5_FLOW_ACTION_FLAG)
2374 return rte_flow_error_set(error, EINVAL,
2375 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2377 " actions in same flow");
2382 * Validate MARK action.
2385 * Pointer to the rte_eth_dev structure.
2387 * Pointer to action.
2388 * @param[in] action_flags
2389 * Holds the actions detected until now.
2391 * Pointer to flow attributes
2393 * Pointer to error structure.
2396 * 0 on success, a negative errno value otherwise and rte_errno is set.
2399 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
2400 const struct rte_flow_action *action,
2401 uint64_t action_flags,
2402 const struct rte_flow_attr *attr,
2403 struct rte_flow_error *error)
2405 struct mlx5_priv *priv = dev->data->dev_private;
2406 struct mlx5_dev_config *config = &priv->config;
2407 const struct rte_flow_action_mark *mark = action->conf;
2410 /* Fall back if no extended metadata register support. */
2411 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2412 return mlx5_flow_validate_action_mark(action, action_flags,
2414 /* Extensive metadata mode requires registers. */
2415 if (!mlx5_flow_ext_mreg_supported(dev))
2416 return rte_flow_error_set(error, ENOTSUP,
2417 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2418 "no metadata registers "
2419 "to support mark action");
2420 if (!priv->sh->dv_mark_mask)
2421 return rte_flow_error_set(error, ENOTSUP,
2422 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2423 "extended metadata register"
2424 " isn't available");
2425 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2428 MLX5_ASSERT(ret > 0);
2430 return rte_flow_error_set(error, EINVAL,
2431 RTE_FLOW_ERROR_TYPE_ACTION, action,
2432 "configuration cannot be null");
2433 if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
2434 return rte_flow_error_set(error, EINVAL,
2435 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2437 "mark id exceeds the limit");
2438 if (action_flags & MLX5_FLOW_ACTION_FLAG)
2439 return rte_flow_error_set(error, EINVAL,
2440 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2441 "can't flag and mark in same flow");
2442 if (action_flags & MLX5_FLOW_ACTION_MARK)
2443 return rte_flow_error_set(error, EINVAL,
2444 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2445 "can't have 2 mark actions in same"
2451 * Validate SET_META action.
2454 * Pointer to the rte_eth_dev structure.
2456 * Pointer to the action structure.
2457 * @param[in] action_flags
2458 * Holds the actions detected until now.
2460 * Pointer to flow attributes
2462 * Pointer to error structure.
2465 * 0 on success, a negative errno value otherwise and rte_errno is set.
2468 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
2469 const struct rte_flow_action *action,
2470 uint64_t action_flags __rte_unused,
2471 const struct rte_flow_attr *attr,
2472 struct rte_flow_error *error)
2474 const struct rte_flow_action_set_meta *conf;
2475 uint32_t nic_mask = UINT32_MAX;
2478 if (!mlx5_flow_ext_mreg_supported(dev))
2479 return rte_flow_error_set(error, ENOTSUP,
2480 RTE_FLOW_ERROR_TYPE_ACTION, action,
2481 "extended metadata register"
2482 " isn't supported");
2483 reg = flow_dv_get_metadata_reg(dev, attr, error);
2486 if (reg != REG_A && reg != REG_B) {
2487 struct mlx5_priv *priv = dev->data->dev_private;
2489 nic_mask = priv->sh->dv_meta_mask;
2491 if (!(action->conf))
2492 return rte_flow_error_set(error, EINVAL,
2493 RTE_FLOW_ERROR_TYPE_ACTION, action,
2494 "configuration cannot be null");
2495 conf = (const struct rte_flow_action_set_meta *)action->conf;
2497 return rte_flow_error_set(error, EINVAL,
2498 RTE_FLOW_ERROR_TYPE_ACTION, action,
2499 "zero mask doesn't have any effect");
2500 if (conf->mask & ~nic_mask)
2501 return rte_flow_error_set(error, EINVAL,
2502 RTE_FLOW_ERROR_TYPE_ACTION, action,
2503 "meta data must be within reg C0");
2508 * Validate SET_TAG action.
2511 * Pointer to the rte_eth_dev structure.
2513 * Pointer to the action structure.
2514 * @param[in] action_flags
2515 * Holds the actions detected until now.
2517 * Pointer to flow attributes
2519 * Pointer to error structure.
2522 * 0 on success, a negative errno value otherwise and rte_errno is set.
2525 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
2526 const struct rte_flow_action *action,
2527 uint64_t action_flags,
2528 const struct rte_flow_attr *attr,
2529 struct rte_flow_error *error)
2531 const struct rte_flow_action_set_tag *conf;
2532 const uint64_t terminal_action_flags =
2533 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
2534 MLX5_FLOW_ACTION_RSS;
2537 if (!mlx5_flow_ext_mreg_supported(dev))
2538 return rte_flow_error_set(error, ENOTSUP,
2539 RTE_FLOW_ERROR_TYPE_ACTION, action,
2540 "extensive metadata register"
2541 " isn't supported");
2542 if (!(action->conf))
2543 return rte_flow_error_set(error, EINVAL,
2544 RTE_FLOW_ERROR_TYPE_ACTION, action,
2545 "configuration cannot be null");
2546 conf = (const struct rte_flow_action_set_tag *)action->conf;
2548 return rte_flow_error_set(error, EINVAL,
2549 RTE_FLOW_ERROR_TYPE_ACTION, action,
2550 "zero mask doesn't have any effect");
2551 ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
2554 if (!attr->transfer && attr->ingress &&
2555 (action_flags & terminal_action_flags))
2556 return rte_flow_error_set(error, EINVAL,
2557 RTE_FLOW_ERROR_TYPE_ACTION, action,
2558 "set_tag has no effect"
2559 " with terminal actions");
2564 * Validate count action.
2567 * Pointer to rte_eth_dev structure.
2569 * Pointer to error structure.
2572 * 0 on success, a negative errno value otherwise and rte_errno is set.
2575 flow_dv_validate_action_count(struct rte_eth_dev *dev,
2576 struct rte_flow_error *error)
2578 struct mlx5_priv *priv = dev->data->dev_private;
2580 if (!priv->config.devx)
2582 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
2586 return rte_flow_error_set
2588 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2590 "count action not supported");
2594 * Validate the L2 encap action.
2597 * Pointer to the rte_eth_dev structure.
2598 * @param[in] action_flags
2599 * Holds the actions detected until now.
2601 * Pointer to the action structure.
2603 * Pointer to flow attributes.
2605 * Pointer to error structure.
2608 * 0 on success, a negative errno value otherwise and rte_errno is set.
2611 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
2612 uint64_t action_flags,
2613 const struct rte_flow_action *action,
2614 const struct rte_flow_attr *attr,
2615 struct rte_flow_error *error)
2617 const struct mlx5_priv *priv = dev->data->dev_private;
2619 if (!(action->conf))
2620 return rte_flow_error_set(error, EINVAL,
2621 RTE_FLOW_ERROR_TYPE_ACTION, action,
2622 "configuration cannot be null");
2623 if (action_flags & MLX5_FLOW_ACTION_ENCAP)
2624 return rte_flow_error_set(error, EINVAL,
2625 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2626 "can only have a single encap action "
2628 if (!attr->transfer && priv->representor)
2629 return rte_flow_error_set(error, ENOTSUP,
2630 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2631 "encap action for VF representor "
2632 "not supported on NIC table");
2637 * Validate a decap action.
2640 * Pointer to the rte_eth_dev structure.
2641 * @param[in] action_flags
2642 * Holds the actions detected until now.
2644 * Pointer to flow attributes
2646 * Pointer to error structure.
2649 * 0 on success, a negative errno value otherwise and rte_errno is set.
2652 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
2653 uint64_t action_flags,
2654 const struct rte_flow_attr *attr,
2655 struct rte_flow_error *error)
2657 const struct mlx5_priv *priv = dev->data->dev_private;
2659 if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
2660 !priv->config.decap_en)
2661 return rte_flow_error_set(error, ENOTSUP,
2662 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2663 "decap is not enabled");
2664 if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
2665 return rte_flow_error_set(error, ENOTSUP,
2666 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2668 MLX5_FLOW_ACTION_DECAP ? "can only "
2669 "have a single decap action" : "decap "
2670 "after encap is not supported");
2671 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
2672 return rte_flow_error_set(error, EINVAL,
2673 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2674 "can't have decap action after"
2677 return rte_flow_error_set(error, ENOTSUP,
2678 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2680 "decap action not supported for "
2682 if (!attr->transfer && priv->representor)
2683 return rte_flow_error_set(error, ENOTSUP,
2684 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2685 "decap action for VF representor "
2686 "not supported on NIC table");
2690 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
2693 * Validate the raw encap and decap actions.
2696 * Pointer to the rte_eth_dev structure.
2698 * Pointer to the decap action.
2700 * Pointer to the encap action.
2702 * Pointer to flow attributes
2703 * @param[in/out] action_flags
2704 * Holds the actions detected until now.
2705 * @param[out] actions_n
2706 * pointer to the number of actions counter.
2708 * Pointer to error structure.
2711 * 0 on success, a negative errno value otherwise and rte_errno is set.
2714 flow_dv_validate_action_raw_encap_decap
2715 (struct rte_eth_dev *dev,
2716 const struct rte_flow_action_raw_decap *decap,
2717 const struct rte_flow_action_raw_encap *encap,
2718 const struct rte_flow_attr *attr, uint64_t *action_flags,
2719 int *actions_n, struct rte_flow_error *error)
2721 const struct mlx5_priv *priv = dev->data->dev_private;
2724 if (encap && (!encap->size || !encap->data))
2725 return rte_flow_error_set(error, EINVAL,
2726 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2727 "raw encap data cannot be empty");
2728 if (decap && encap) {
2729 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
2730 encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
2733 else if (encap->size <=
2734 MLX5_ENCAPSULATION_DECISION_SIZE &&
2736 MLX5_ENCAPSULATION_DECISION_SIZE)
2739 else if (encap->size >
2740 MLX5_ENCAPSULATION_DECISION_SIZE &&
2742 MLX5_ENCAPSULATION_DECISION_SIZE)
2743 /* 2 L2 actions: encap and decap. */
2746 return rte_flow_error_set(error,
2748 RTE_FLOW_ERROR_TYPE_ACTION,
2749 NULL, "unsupported too small "
2750 "raw decap and too small raw "
2751 "encap combination");
2754 ret = flow_dv_validate_action_decap(dev, *action_flags, attr,
2758 *action_flags |= MLX5_FLOW_ACTION_DECAP;
2762 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
2763 return rte_flow_error_set(error, ENOTSUP,
2764 RTE_FLOW_ERROR_TYPE_ACTION,
2766 "small raw encap size");
2767 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
2768 return rte_flow_error_set(error, EINVAL,
2769 RTE_FLOW_ERROR_TYPE_ACTION,
2771 "more than one encap action");
2772 if (!attr->transfer && priv->representor)
2773 return rte_flow_error_set
2775 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2776 "encap action for VF representor "
2777 "not supported on NIC table");
2778 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
2785 * Match encap_decap resource.
2788 * Pointer to the hash list.
2790 * Pointer to exist resource entry object.
2792 * Key of the new entry.
2794 * Pointer to new encap_decap resource.
2797 * 0 on matching, none-zero otherwise.
2800 flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused,
2801 struct mlx5_hlist_entry *entry,
2802 uint64_t key __rte_unused, void *cb_ctx)
2804 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2805 struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
2806 struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2808 cache_resource = container_of(entry,
2809 struct mlx5_flow_dv_encap_decap_resource,
2811 if (resource->entry.key == cache_resource->entry.key &&
2812 resource->reformat_type == cache_resource->reformat_type &&
2813 resource->ft_type == cache_resource->ft_type &&
2814 resource->flags == cache_resource->flags &&
2815 resource->size == cache_resource->size &&
2816 !memcmp((const void *)resource->buf,
2817 (const void *)cache_resource->buf,
2824 * Allocate encap_decap resource.
2827 * Pointer to the hash list.
2829 * Pointer to exist resource entry object.
2831 * Pointer to new encap_decap resource.
2834 * 0 on matching, none-zero otherwise.
2836 struct mlx5_hlist_entry *
2837 flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
2838 uint64_t key __rte_unused,
2841 struct mlx5_dev_ctx_shared *sh = list->ctx;
2842 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2843 struct mlx5dv_dr_domain *domain;
2844 struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
2845 struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2849 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2850 domain = sh->fdb_domain;
2851 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
2852 domain = sh->rx_domain;
2854 domain = sh->tx_domain;
2855 /* Register new encap/decap resource. */
2856 cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
2858 if (!cache_resource) {
2859 rte_flow_error_set(ctx->error, ENOMEM,
2860 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2861 "cannot allocate resource memory");
2864 *cache_resource = *resource;
2865 cache_resource->idx = idx;
2866 ret = mlx5_flow_os_create_flow_action_packet_reformat
2867 (sh->ctx, domain, cache_resource,
2868 &cache_resource->action);
2870 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
2871 rte_flow_error_set(ctx->error, ENOMEM,
2872 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2873 NULL, "cannot create action");
2877 return &cache_resource->entry;
2881 * Find existing encap/decap resource or create and register a new one.
2883 * @param[in, out] dev
2884 * Pointer to rte_eth_dev structure.
2885 * @param[in, out] resource
2886 * Pointer to encap/decap resource.
2887 * @parm[in, out] dev_flow
2888 * Pointer to the dev_flow.
2890 * pointer to error structure.
2893 * 0 on success otherwise -errno and errno is set.
2896 flow_dv_encap_decap_resource_register
2897 (struct rte_eth_dev *dev,
2898 struct mlx5_flow_dv_encap_decap_resource *resource,
2899 struct mlx5_flow *dev_flow,
2900 struct rte_flow_error *error)
2902 struct mlx5_priv *priv = dev->data->dev_private;
2903 struct mlx5_dev_ctx_shared *sh = priv->sh;
2904 struct mlx5_hlist_entry *entry;
2905 union mlx5_flow_encap_decap_key encap_decap_key = {
2907 .ft_type = resource->ft_type,
2908 .refmt_type = resource->reformat_type,
2909 .buf_size = resource->size,
2910 .table_level = !!dev_flow->dv.group,
2914 struct mlx5_flow_cb_ctx ctx = {
2919 resource->flags = dev_flow->dv.group ? 0 : 1;
2920 encap_decap_key.cksum = __rte_raw_cksum(resource->buf,
2922 resource->entry.key = encap_decap_key.v64;
2923 entry = mlx5_hlist_register(sh->encaps_decaps, resource->entry.key,
2927 resource = container_of(entry, typeof(*resource), entry);
2928 dev_flow->dv.encap_decap = resource;
2929 dev_flow->handle->dvh.rix_encap_decap = resource->idx;
2934 * Find existing table jump resource or create and register a new one.
2936 * @param[in, out] dev
2937 * Pointer to rte_eth_dev structure.
2938 * @param[in, out] tbl
2939 * Pointer to flow table resource.
2940 * @parm[in, out] dev_flow
2941 * Pointer to the dev_flow.
2943 * pointer to error structure.
2946 * 0 on success otherwise -errno and errno is set.
2949 flow_dv_jump_tbl_resource_register
2950 (struct rte_eth_dev *dev __rte_unused,
2951 struct mlx5_flow_tbl_resource *tbl,
2952 struct mlx5_flow *dev_flow,
2953 struct rte_flow_error *error __rte_unused)
2955 struct mlx5_flow_tbl_data_entry *tbl_data =
2956 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
2959 MLX5_ASSERT(tbl_data->jump.action);
2960 dev_flow->handle->rix_jump = tbl_data->idx;
2961 dev_flow->dv.jump = &tbl_data->jump;
2966 flow_dv_port_id_match_cb(struct mlx5_cache_list *list __rte_unused,
2967 struct mlx5_cache_entry *entry, void *cb_ctx)
2969 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2970 struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
2971 struct mlx5_flow_dv_port_id_action_resource *res =
2972 container_of(entry, typeof(*res), entry);
2974 return ref->port_id != res->port_id;
2977 struct mlx5_cache_entry *
2978 flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
2979 struct mlx5_cache_entry *entry __rte_unused,
2982 struct mlx5_dev_ctx_shared *sh = list->ctx;
2983 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2984 struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
2985 struct mlx5_flow_dv_port_id_action_resource *cache;
2989 /* Register new port id action resource. */
2990 cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
2992 rte_flow_error_set(ctx->error, ENOMEM,
2993 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2994 "cannot allocate port_id action cache memory");
2998 ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3002 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3003 rte_flow_error_set(ctx->error, ENOMEM,
3004 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3005 "cannot create action");
3008 return &cache->entry;
3012 * Find existing table port ID resource or create and register a new one.
3014 * @param[in, out] dev
3015 * Pointer to rte_eth_dev structure.
3016 * @param[in, out] resource
3017 * Pointer to port ID action resource.
3018 * @parm[in, out] dev_flow
3019 * Pointer to the dev_flow.
3021 * pointer to error structure.
3024 * 0 on success otherwise -errno and errno is set.
3027 flow_dv_port_id_action_resource_register
3028 (struct rte_eth_dev *dev,
3029 struct mlx5_flow_dv_port_id_action_resource *resource,
3030 struct mlx5_flow *dev_flow,
3031 struct rte_flow_error *error)
3033 struct mlx5_priv *priv = dev->data->dev_private;
3034 struct mlx5_cache_entry *entry;
3035 struct mlx5_flow_dv_port_id_action_resource *cache;
3036 struct mlx5_flow_cb_ctx ctx = {
3041 entry = mlx5_cache_register(&priv->sh->port_id_action_list, &ctx);
3044 cache = container_of(entry, typeof(*cache), entry);
3045 dev_flow->dv.port_id_action = cache;
3046 dev_flow->handle->rix_port_id_action = cache->idx;
3051 flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list __rte_unused,
3052 struct mlx5_cache_entry *entry, void *cb_ctx)
3054 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3055 struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3056 struct mlx5_flow_dv_push_vlan_action_resource *res =
3057 container_of(entry, typeof(*res), entry);
3059 return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3062 struct mlx5_cache_entry *
3063 flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
3064 struct mlx5_cache_entry *entry __rte_unused,
3067 struct mlx5_dev_ctx_shared *sh = list->ctx;
3068 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3069 struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3070 struct mlx5_flow_dv_push_vlan_action_resource *cache;
3071 struct mlx5dv_dr_domain *domain;
3075 /* Register new port id action resource. */
3076 cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3078 rte_flow_error_set(ctx->error, ENOMEM,
3079 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3080 "cannot allocate push_vlan action cache memory");
3084 if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3085 domain = sh->fdb_domain;
3086 else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3087 domain = sh->rx_domain;
3089 domain = sh->tx_domain;
3090 ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3093 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3094 rte_flow_error_set(ctx->error, ENOMEM,
3095 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3096 "cannot create push vlan action");
3099 return &cache->entry;
3103 * Find existing push vlan resource or create and register a new one.
3105 * @param [in, out] dev
3106 * Pointer to rte_eth_dev structure.
3107 * @param[in, out] resource
3108 * Pointer to port ID action resource.
3109 * @parm[in, out] dev_flow
3110 * Pointer to the dev_flow.
3112 * pointer to error structure.
3115 * 0 on success otherwise -errno and errno is set.
3118 flow_dv_push_vlan_action_resource_register
3119 (struct rte_eth_dev *dev,
3120 struct mlx5_flow_dv_push_vlan_action_resource *resource,
3121 struct mlx5_flow *dev_flow,
3122 struct rte_flow_error *error)
3124 struct mlx5_priv *priv = dev->data->dev_private;
3125 struct mlx5_flow_dv_push_vlan_action_resource *cache;
3126 struct mlx5_cache_entry *entry;
3127 struct mlx5_flow_cb_ctx ctx = {
3132 entry = mlx5_cache_register(&priv->sh->push_vlan_action_list, &ctx);
3135 cache = container_of(entry, typeof(*cache), entry);
3137 dev_flow->handle->dvh.rix_push_vlan = cache->idx;
3138 dev_flow->dv.push_vlan_res = cache;
3143 * Get the size of specific rte_flow_item_type hdr size
3145 * @param[in] item_type
3146 * Tested rte_flow_item_type.
3149 * sizeof struct item_type, 0 if void or irrelevant.
3152 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
3156 switch (item_type) {
3157 case RTE_FLOW_ITEM_TYPE_ETH:
3158 retval = sizeof(struct rte_ether_hdr);
3160 case RTE_FLOW_ITEM_TYPE_VLAN:
3161 retval = sizeof(struct rte_vlan_hdr);
3163 case RTE_FLOW_ITEM_TYPE_IPV4:
3164 retval = sizeof(struct rte_ipv4_hdr);
3166 case RTE_FLOW_ITEM_TYPE_IPV6:
3167 retval = sizeof(struct rte_ipv6_hdr);
3169 case RTE_FLOW_ITEM_TYPE_UDP:
3170 retval = sizeof(struct rte_udp_hdr);
3172 case RTE_FLOW_ITEM_TYPE_TCP:
3173 retval = sizeof(struct rte_tcp_hdr);
3175 case RTE_FLOW_ITEM_TYPE_VXLAN:
3176 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3177 retval = sizeof(struct rte_vxlan_hdr);
3179 case RTE_FLOW_ITEM_TYPE_GRE:
3180 case RTE_FLOW_ITEM_TYPE_NVGRE:
3181 retval = sizeof(struct rte_gre_hdr);
3183 case RTE_FLOW_ITEM_TYPE_MPLS:
3184 retval = sizeof(struct rte_mpls_hdr);
3186 case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
3194 #define MLX5_ENCAP_IPV4_VERSION 0x40
3195 #define MLX5_ENCAP_IPV4_IHL_MIN 0x05
3196 #define MLX5_ENCAP_IPV4_TTL_DEF 0x40
3197 #define MLX5_ENCAP_IPV6_VTC_FLOW 0x60000000
3198 #define MLX5_ENCAP_IPV6_HOP_LIMIT 0xff
3199 #define MLX5_ENCAP_VXLAN_FLAGS 0x08000000
3200 #define MLX5_ENCAP_VXLAN_GPE_FLAGS 0x04
3203 * Convert the encap action data from list of rte_flow_item to raw buffer
3206 * Pointer to rte_flow_item objects list.
3208 * Pointer to the output buffer.
3210 * Pointer to the output buffer size.
3212 * Pointer to the error structure.
3215 * 0 on success, a negative errno value otherwise and rte_errno is set.
3218 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
3219 size_t *size, struct rte_flow_error *error)
3221 struct rte_ether_hdr *eth = NULL;
3222 struct rte_vlan_hdr *vlan = NULL;
3223 struct rte_ipv4_hdr *ipv4 = NULL;
3224 struct rte_ipv6_hdr *ipv6 = NULL;
3225 struct rte_udp_hdr *udp = NULL;
3226 struct rte_vxlan_hdr *vxlan = NULL;
3227 struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
3228 struct rte_gre_hdr *gre = NULL;
3230 size_t temp_size = 0;
3233 return rte_flow_error_set(error, EINVAL,
3234 RTE_FLOW_ERROR_TYPE_ACTION,
3235 NULL, "invalid empty data");
3236 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3237 len = flow_dv_get_item_hdr_len(items->type);
3238 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
3239 return rte_flow_error_set(error, EINVAL,
3240 RTE_FLOW_ERROR_TYPE_ACTION,
3241 (void *)items->type,
3242 "items total size is too big"
3243 " for encap action");
3244 rte_memcpy((void *)&buf[temp_size], items->spec, len);
3245 switch (items->type) {
3246 case RTE_FLOW_ITEM_TYPE_ETH:
3247 eth = (struct rte_ether_hdr *)&buf[temp_size];
3249 case RTE_FLOW_ITEM_TYPE_VLAN:
3250 vlan = (struct rte_vlan_hdr *)&buf[temp_size];
3252 return rte_flow_error_set(error, EINVAL,
3253 RTE_FLOW_ERROR_TYPE_ACTION,
3254 (void *)items->type,
3255 "eth header not found");
3256 if (!eth->ether_type)
3257 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
3259 case RTE_FLOW_ITEM_TYPE_IPV4:
3260 ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
3262 return rte_flow_error_set(error, EINVAL,
3263 RTE_FLOW_ERROR_TYPE_ACTION,
3264 (void *)items->type,
3265 "neither eth nor vlan"
3267 if (vlan && !vlan->eth_proto)
3268 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3269 else if (eth && !eth->ether_type)
3270 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3271 if (!ipv4->version_ihl)
3272 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
3273 MLX5_ENCAP_IPV4_IHL_MIN;
3274 if (!ipv4->time_to_live)
3275 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
3277 case RTE_FLOW_ITEM_TYPE_IPV6:
3278 ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
3280 return rte_flow_error_set(error, EINVAL,
3281 RTE_FLOW_ERROR_TYPE_ACTION,
3282 (void *)items->type,
3283 "neither eth nor vlan"
3285 if (vlan && !vlan->eth_proto)
3286 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3287 else if (eth && !eth->ether_type)
3288 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3289 if (!ipv6->vtc_flow)
3291 RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
3292 if (!ipv6->hop_limits)
3293 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
3295 case RTE_FLOW_ITEM_TYPE_UDP:
3296 udp = (struct rte_udp_hdr *)&buf[temp_size];
3298 return rte_flow_error_set(error, EINVAL,
3299 RTE_FLOW_ERROR_TYPE_ACTION,
3300 (void *)items->type,
3301 "ip header not found");
3302 if (ipv4 && !ipv4->next_proto_id)
3303 ipv4->next_proto_id = IPPROTO_UDP;
3304 else if (ipv6 && !ipv6->proto)
3305 ipv6->proto = IPPROTO_UDP;
3307 case RTE_FLOW_ITEM_TYPE_VXLAN:
3308 vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
3310 return rte_flow_error_set(error, EINVAL,
3311 RTE_FLOW_ERROR_TYPE_ACTION,
3312 (void *)items->type,
3313 "udp header not found");
3315 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
3316 if (!vxlan->vx_flags)
3318 RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
3320 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3321 vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
3323 return rte_flow_error_set(error, EINVAL,
3324 RTE_FLOW_ERROR_TYPE_ACTION,
3325 (void *)items->type,
3326 "udp header not found");
3327 if (!vxlan_gpe->proto)
3328 return rte_flow_error_set(error, EINVAL,
3329 RTE_FLOW_ERROR_TYPE_ACTION,
3330 (void *)items->type,
3331 "next protocol not found");
3334 RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
3335 if (!vxlan_gpe->vx_flags)
3336 vxlan_gpe->vx_flags =
3337 MLX5_ENCAP_VXLAN_GPE_FLAGS;
3339 case RTE_FLOW_ITEM_TYPE_GRE:
3340 case RTE_FLOW_ITEM_TYPE_NVGRE:
3341 gre = (struct rte_gre_hdr *)&buf[temp_size];
3343 return rte_flow_error_set(error, EINVAL,
3344 RTE_FLOW_ERROR_TYPE_ACTION,
3345 (void *)items->type,
3346 "next protocol not found");
3348 return rte_flow_error_set(error, EINVAL,
3349 RTE_FLOW_ERROR_TYPE_ACTION,
3350 (void *)items->type,
3351 "ip header not found");
3352 if (ipv4 && !ipv4->next_proto_id)
3353 ipv4->next_proto_id = IPPROTO_GRE;
3354 else if (ipv6 && !ipv6->proto)
3355 ipv6->proto = IPPROTO_GRE;
3357 case RTE_FLOW_ITEM_TYPE_VOID:
3360 return rte_flow_error_set(error, EINVAL,
3361 RTE_FLOW_ERROR_TYPE_ACTION,
3362 (void *)items->type,
3363 "unsupported item type");
3373 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
3375 struct rte_ether_hdr *eth = NULL;
3376 struct rte_vlan_hdr *vlan = NULL;
3377 struct rte_ipv6_hdr *ipv6 = NULL;
3378 struct rte_udp_hdr *udp = NULL;
3382 eth = (struct rte_ether_hdr *)data;
3383 next_hdr = (char *)(eth + 1);
3384 proto = RTE_BE16(eth->ether_type);
3387 while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
3388 vlan = (struct rte_vlan_hdr *)next_hdr;
3389 proto = RTE_BE16(vlan->eth_proto);
3390 next_hdr += sizeof(struct rte_vlan_hdr);
3393 /* HW calculates IPv4 csum. no need to proceed */
3394 if (proto == RTE_ETHER_TYPE_IPV4)
3397 /* non IPv4/IPv6 header. not supported */
3398 if (proto != RTE_ETHER_TYPE_IPV6) {
3399 return rte_flow_error_set(error, ENOTSUP,
3400 RTE_FLOW_ERROR_TYPE_ACTION,
3401 NULL, "Cannot offload non IPv4/IPv6");
3404 ipv6 = (struct rte_ipv6_hdr *)next_hdr;
3406 /* ignore non UDP */
3407 if (ipv6->proto != IPPROTO_UDP)
3410 udp = (struct rte_udp_hdr *)(ipv6 + 1);
3411 udp->dgram_cksum = 0;
3417 * Convert L2 encap action to DV specification.
3420 * Pointer to rte_eth_dev structure.
3422 * Pointer to action structure.
3423 * @param[in, out] dev_flow
3424 * Pointer to the mlx5_flow.
3425 * @param[in] transfer
3426 * Mark if the flow is E-Switch flow.
3428 * Pointer to the error structure.
3431 * 0 on success, a negative errno value otherwise and rte_errno is set.
3434 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
3435 const struct rte_flow_action *action,
3436 struct mlx5_flow *dev_flow,
3438 struct rte_flow_error *error)
3440 const struct rte_flow_item *encap_data;
3441 const struct rte_flow_action_raw_encap *raw_encap_data;
3442 struct mlx5_flow_dv_encap_decap_resource res = {
3444 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
3445 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3446 MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
3449 if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3451 (const struct rte_flow_action_raw_encap *)action->conf;
3452 res.size = raw_encap_data->size;
3453 memcpy(res.buf, raw_encap_data->data, res.size);
3455 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
3457 ((const struct rte_flow_action_vxlan_encap *)
3458 action->conf)->definition;
3461 ((const struct rte_flow_action_nvgre_encap *)
3462 action->conf)->definition;
3463 if (flow_dv_convert_encap_data(encap_data, res.buf,
3467 if (flow_dv_zero_encap_udp_csum(res.buf, error))
3469 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3470 return rte_flow_error_set(error, EINVAL,
3471 RTE_FLOW_ERROR_TYPE_ACTION,
3472 NULL, "can't create L2 encap action");
3477 * Convert L2 decap action to DV specification.
3480 * Pointer to rte_eth_dev structure.
3481 * @param[in, out] dev_flow
3482 * Pointer to the mlx5_flow.
3483 * @param[in] transfer
3484 * Mark if the flow is E-Switch flow.
3486 * Pointer to the error structure.
3489 * 0 on success, a negative errno value otherwise and rte_errno is set.
3492 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
3493 struct mlx5_flow *dev_flow,
3495 struct rte_flow_error *error)
3497 struct mlx5_flow_dv_encap_decap_resource res = {
3500 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
3501 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3502 MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
3505 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3506 return rte_flow_error_set(error, EINVAL,
3507 RTE_FLOW_ERROR_TYPE_ACTION,
3508 NULL, "can't create L2 decap action");
3513 * Convert raw decap/encap (L3 tunnel) action to DV specification.
3516 * Pointer to rte_eth_dev structure.
3518 * Pointer to action structure.
3519 * @param[in, out] dev_flow
3520 * Pointer to the mlx5_flow.
3522 * Pointer to the flow attributes.
3524 * Pointer to the error structure.
3527 * 0 on success, a negative errno value otherwise and rte_errno is set.
3530 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
3531 const struct rte_flow_action *action,
3532 struct mlx5_flow *dev_flow,
3533 const struct rte_flow_attr *attr,
3534 struct rte_flow_error *error)
3536 const struct rte_flow_action_raw_encap *encap_data;
3537 struct mlx5_flow_dv_encap_decap_resource res;
3539 memset(&res, 0, sizeof(res));
3540 encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
3541 res.size = encap_data->size;
3542 memcpy(res.buf, encap_data->data, res.size);
3543 res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
3544 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
3545 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
3547 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3549 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3550 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3551 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3552 return rte_flow_error_set(error, EINVAL,
3553 RTE_FLOW_ERROR_TYPE_ACTION,
3554 NULL, "can't create encap action");
3559 * Create action push VLAN.
3562 * Pointer to rte_eth_dev structure.
3564 * Pointer to the flow attributes.
3566 * Pointer to the vlan to push to the Ethernet header.
3567 * @param[in, out] dev_flow
3568 * Pointer to the mlx5_flow.
3570 * Pointer to the error structure.
3573 * 0 on success, a negative errno value otherwise and rte_errno is set.
3576 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
3577 const struct rte_flow_attr *attr,
3578 const struct rte_vlan_hdr *vlan,
3579 struct mlx5_flow *dev_flow,
3580 struct rte_flow_error *error)
3582 struct mlx5_flow_dv_push_vlan_action_resource res;
3584 memset(&res, 0, sizeof(res));
3586 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
3589 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3591 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3592 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3593 return flow_dv_push_vlan_action_resource_register
3594 (dev, &res, dev_flow, error);
3597 static int fdb_mirror;
3600 * Validate the modify-header actions.
3602 * @param[in] action_flags
3603 * Holds the actions detected until now.
3605 * Pointer to the modify action.
3607 * Pointer to error structure.
3610 * 0 on success, a negative errno value otherwise and rte_errno is set.
3613 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
3614 const struct rte_flow_action *action,
3615 struct rte_flow_error *error)
3617 if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
3618 return rte_flow_error_set(error, EINVAL,
3619 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3620 NULL, "action configuration not set");
3621 if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3622 return rte_flow_error_set(error, EINVAL,
3623 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3624 "can't have encap action before"
3626 if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror)
3627 return rte_flow_error_set(error, EINVAL,
3628 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3629 "can't support sample action before"
3630 " modify action for E-Switch"
3636 * Validate the modify-header MAC address actions.
3638 * @param[in] action_flags
3639 * Holds the actions detected until now.
3641 * Pointer to the modify action.
3642 * @param[in] item_flags
3643 * Holds the items detected.
3645 * Pointer to error structure.
3648 * 0 on success, a negative errno value otherwise and rte_errno is set.
3651 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
3652 const struct rte_flow_action *action,
3653 const uint64_t item_flags,
3654 struct rte_flow_error *error)
3658 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3660 if (!(item_flags & MLX5_FLOW_LAYER_L2))
3661 return rte_flow_error_set(error, EINVAL,
3662 RTE_FLOW_ERROR_TYPE_ACTION,
3664 "no L2 item in pattern");
3670 * Validate the modify-header IPv4 address actions.
3672 * @param[in] action_flags
3673 * Holds the actions detected until now.
3675 * Pointer to the modify action.
3676 * @param[in] item_flags
3677 * Holds the items detected.
3679 * Pointer to error structure.
3682 * 0 on success, a negative errno value otherwise and rte_errno is set.
3685 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
3686 const struct rte_flow_action *action,
3687 const uint64_t item_flags,
3688 struct rte_flow_error *error)
3693 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3695 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3696 MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3697 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3698 if (!(item_flags & layer))
3699 return rte_flow_error_set(error, EINVAL,
3700 RTE_FLOW_ERROR_TYPE_ACTION,
3702 "no ipv4 item in pattern");
3708 * Validate the modify-header IPv6 address actions.
3710 * @param[in] action_flags
3711 * Holds the actions detected until now.
3713 * Pointer to the modify action.
3714 * @param[in] item_flags
3715 * Holds the items detected.
3717 * Pointer to error structure.
3720 * 0 on success, a negative errno value otherwise and rte_errno is set.
3723 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
3724 const struct rte_flow_action *action,
3725 const uint64_t item_flags,
3726 struct rte_flow_error *error)
3731 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3733 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3734 MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3735 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3736 if (!(item_flags & layer))
3737 return rte_flow_error_set(error, EINVAL,
3738 RTE_FLOW_ERROR_TYPE_ACTION,
3740 "no ipv6 item in pattern");
3746 * Validate the modify-header TP actions.
3748 * @param[in] action_flags
3749 * Holds the actions detected until now.
3751 * Pointer to the modify action.
3752 * @param[in] item_flags
3753 * Holds the items detected.
3755 * Pointer to error structure.
3758 * 0 on success, a negative errno value otherwise and rte_errno is set.
3761 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
3762 const struct rte_flow_action *action,
3763 const uint64_t item_flags,
3764 struct rte_flow_error *error)
3769 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3771 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3772 MLX5_FLOW_LAYER_INNER_L4 :
3773 MLX5_FLOW_LAYER_OUTER_L4;
3774 if (!(item_flags & layer))
3775 return rte_flow_error_set(error, EINVAL,
3776 RTE_FLOW_ERROR_TYPE_ACTION,
3777 NULL, "no transport layer "
3784 * Validate the modify-header actions of increment/decrement
3785 * TCP Sequence-number.
3787 * @param[in] action_flags
3788 * Holds the actions detected until now.
3790 * Pointer to the modify action.
3791 * @param[in] item_flags
3792 * Holds the items detected.
3794 * Pointer to error structure.
3797 * 0 on success, a negative errno value otherwise and rte_errno is set.
3800 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
3801 const struct rte_flow_action *action,
3802 const uint64_t item_flags,
3803 struct rte_flow_error *error)
3808 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3810 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3811 MLX5_FLOW_LAYER_INNER_L4_TCP :
3812 MLX5_FLOW_LAYER_OUTER_L4_TCP;
3813 if (!(item_flags & layer))
3814 return rte_flow_error_set(error, EINVAL,
3815 RTE_FLOW_ERROR_TYPE_ACTION,
3816 NULL, "no TCP item in"
3818 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
3819 (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
3820 (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
3821 (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
3822 return rte_flow_error_set(error, EINVAL,
3823 RTE_FLOW_ERROR_TYPE_ACTION,
3825 "cannot decrease and increase"
3826 " TCP sequence number"
3827 " at the same time");
3833 * Validate the modify-header actions of increment/decrement
3834 * TCP Acknowledgment number.
3836 * @param[in] action_flags
3837 * Holds the actions detected until now.
3839 * Pointer to the modify action.
3840 * @param[in] item_flags
3841 * Holds the items detected.
3843 * Pointer to error structure.
3846 * 0 on success, a negative errno value otherwise and rte_errno is set.
3849 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
3850 const struct rte_flow_action *action,
3851 const uint64_t item_flags,
3852 struct rte_flow_error *error)
3857 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3859 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3860 MLX5_FLOW_LAYER_INNER_L4_TCP :
3861 MLX5_FLOW_LAYER_OUTER_L4_TCP;
3862 if (!(item_flags & layer))
3863 return rte_flow_error_set(error, EINVAL,
3864 RTE_FLOW_ERROR_TYPE_ACTION,
3865 NULL, "no TCP item in"
3867 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
3868 (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
3869 (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
3870 (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
3871 return rte_flow_error_set(error, EINVAL,
3872 RTE_FLOW_ERROR_TYPE_ACTION,
3874 "cannot decrease and increase"
3875 " TCP acknowledgment number"
3876 " at the same time");
3882 * Validate the modify-header TTL actions.
3884 * @param[in] action_flags
3885 * Holds the actions detected until now.
3887 * Pointer to the modify action.
3888 * @param[in] item_flags
3889 * Holds the items detected.
3891 * Pointer to error structure.
3894 * 0 on success, a negative errno value otherwise and rte_errno is set.
3897 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
3898 const struct rte_flow_action *action,
3899 const uint64_t item_flags,
3900 struct rte_flow_error *error)
3905 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3907 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3908 MLX5_FLOW_LAYER_INNER_L3 :
3909 MLX5_FLOW_LAYER_OUTER_L3;
3910 if (!(item_flags & layer))
3911 return rte_flow_error_set(error, EINVAL,
3912 RTE_FLOW_ERROR_TYPE_ACTION,
3914 "no IP protocol in pattern");
3920 * Validate jump action.
3923 * Pointer to the jump action.
3924 * @param[in] action_flags
3925 * Holds the actions detected until now.
3926 * @param[in] attributes
3927 * Pointer to flow attributes
3928 * @param[in] external
3929 * Action belongs to flow rule created by request external to PMD.
3931 * Pointer to error structure.
3934 * 0 on success, a negative errno value otherwise and rte_errno is set.
3937 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
3938 const struct mlx5_flow_tunnel *tunnel,
3939 const struct rte_flow_action *action,
3940 uint64_t action_flags,
3941 const struct rte_flow_attr *attributes,
3942 bool external, struct rte_flow_error *error)
3944 uint32_t target_group, table;
3946 struct flow_grp_info grp_info = {
3947 .external = !!external,
3948 .transfer = !!attributes->transfer,
3952 if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
3953 MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3954 return rte_flow_error_set(error, EINVAL,
3955 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3956 "can't have 2 fate actions in"
3958 if (action_flags & MLX5_FLOW_ACTION_METER)
3959 return rte_flow_error_set(error, ENOTSUP,
3960 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3961 "jump with meter not support");
3962 if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror)
3963 return rte_flow_error_set(error, EINVAL,
3964 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3965 "E-Switch mirroring can't support"
3966 " Sample action and jump action in"
3969 return rte_flow_error_set(error, EINVAL,
3970 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3971 NULL, "action configuration not set");
3973 ((const struct rte_flow_action_jump *)action->conf)->group;
3974 ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
3978 if (attributes->group == target_group &&
3979 !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
3980 MLX5_FLOW_ACTION_TUNNEL_MATCH)))
3981 return rte_flow_error_set(error, EINVAL,
3982 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3983 "target group must be other than"
3984 " the current flow group");
3989 * Validate the port_id action.
3992 * Pointer to rte_eth_dev structure.
3993 * @param[in] action_flags
3994 * Bit-fields that holds the actions detected until now.
3996 * Port_id RTE action structure.
3998 * Attributes of flow that includes this action.
4000 * Pointer to error structure.
4003 * 0 on success, a negative errno value otherwise and rte_errno is set.
4006 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
4007 uint64_t action_flags,
4008 const struct rte_flow_action *action,
4009 const struct rte_flow_attr *attr,
4010 struct rte_flow_error *error)
4012 const struct rte_flow_action_port_id *port_id;
4013 struct mlx5_priv *act_priv;
4014 struct mlx5_priv *dev_priv;
4017 if (!attr->transfer)
4018 return rte_flow_error_set(error, ENOTSUP,
4019 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4021 "port id action is valid in transfer"
4023 if (!action || !action->conf)
4024 return rte_flow_error_set(error, ENOTSUP,
4025 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4027 "port id action parameters must be"
4029 if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4030 MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4031 return rte_flow_error_set(error, EINVAL,
4032 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4033 "can have only one fate actions in"
4035 dev_priv = mlx5_dev_to_eswitch_info(dev);
4037 return rte_flow_error_set(error, rte_errno,
4038 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4040 "failed to obtain E-Switch info");
4041 port_id = action->conf;
4042 port = port_id->original ? dev->data->port_id : port_id->id;
4043 act_priv = mlx5_port_to_eswitch_info(port, false);
4045 return rte_flow_error_set
4047 RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
4048 "failed to obtain E-Switch port id for port");
4049 if (act_priv->domain_id != dev_priv->domain_id)
4050 return rte_flow_error_set
4052 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4053 "port does not belong to"
4054 " E-Switch being configured");
4059 * Get the maximum number of modify header actions.
4062 * Pointer to rte_eth_dev structure.
4064 * Flags bits to check if root level.
4067 * Max number of modify header actions device can support.
4069 static inline unsigned int
4070 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
4074 * There's no way to directly query the max capacity from FW.
4075 * The maximal value on root table should be assumed to be supported.
4077 if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
4078 return MLX5_MAX_MODIFY_NUM;
4080 return MLX5_ROOT_TBL_MODIFY_NUM;
4084 * Validate the meter action.
4087 * Pointer to rte_eth_dev structure.
4088 * @param[in] action_flags
4089 * Bit-fields that holds the actions detected until now.
4091 * Pointer to the meter action.
4093 * Attributes of flow that includes this action.
4095 * Pointer to error structure.
4098 * 0 on success, a negative errno value otherwise and rte_ernno is set.
4101 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
4102 uint64_t action_flags,
4103 const struct rte_flow_action *action,
4104 const struct rte_flow_attr *attr,
4105 struct rte_flow_error *error)
4107 struct mlx5_priv *priv = dev->data->dev_private;
4108 const struct rte_flow_action_meter *am = action->conf;
4109 struct mlx5_flow_meter *fm;
4112 return rte_flow_error_set(error, EINVAL,
4113 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4114 "meter action conf is NULL");
4116 if (action_flags & MLX5_FLOW_ACTION_METER)
4117 return rte_flow_error_set(error, ENOTSUP,
4118 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4119 "meter chaining not support");
4120 if (action_flags & MLX5_FLOW_ACTION_JUMP)
4121 return rte_flow_error_set(error, ENOTSUP,
4122 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4123 "meter with jump not support");
4125 return rte_flow_error_set(error, ENOTSUP,
4126 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4128 "meter action not supported");
4129 fm = mlx5_flow_meter_find(priv, am->mtr_id);
4131 return rte_flow_error_set(error, EINVAL,
4132 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4134 if (fm->ref_cnt && (!(fm->transfer == attr->transfer ||
4135 (!fm->ingress && !attr->ingress && attr->egress) ||
4136 (!fm->egress && !attr->egress && attr->ingress))))
4137 return rte_flow_error_set(error, EINVAL,
4138 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4139 "Flow attributes are either invalid "
4140 "or have a conflict with current "
4141 "meter attributes");
4146 * Validate the age action.
4148 * @param[in] action_flags
4149 * Holds the actions detected until now.
4151 * Pointer to the age action.
4153 * Pointer to the Ethernet device structure.
4155 * Pointer to error structure.
4158 * 0 on success, a negative errno value otherwise and rte_errno is set.
4161 flow_dv_validate_action_age(uint64_t action_flags,
4162 const struct rte_flow_action *action,
4163 struct rte_eth_dev *dev,
4164 struct rte_flow_error *error)
4166 struct mlx5_priv *priv = dev->data->dev_private;
4167 const struct rte_flow_action_age *age = action->conf;
4169 if (!priv->config.devx || priv->sh->cmng.counter_fallback)
4170 return rte_flow_error_set(error, ENOTSUP,
4171 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4173 "age action not supported");
4174 if (!(action->conf))
4175 return rte_flow_error_set(error, EINVAL,
4176 RTE_FLOW_ERROR_TYPE_ACTION, action,
4177 "configuration cannot be null");
4178 if (!(age->timeout))
4179 return rte_flow_error_set(error, EINVAL,
4180 RTE_FLOW_ERROR_TYPE_ACTION, action,
4181 "invalid timeout value 0");
4182 if (action_flags & MLX5_FLOW_ACTION_AGE)
4183 return rte_flow_error_set(error, EINVAL,
4184 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4185 "duplicate age actions set");
4190 * Validate the modify-header IPv4 DSCP actions.
4192 * @param[in] action_flags
4193 * Holds the actions detected until now.
4195 * Pointer to the modify action.
4196 * @param[in] item_flags
4197 * Holds the items detected.
4199 * Pointer to error structure.
4202 * 0 on success, a negative errno value otherwise and rte_errno is set.
4205 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
4206 const struct rte_flow_action *action,
4207 const uint64_t item_flags,
4208 struct rte_flow_error *error)
4212 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4214 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
4215 return rte_flow_error_set(error, EINVAL,
4216 RTE_FLOW_ERROR_TYPE_ACTION,
4218 "no ipv4 item in pattern");
4224 * Validate the modify-header IPv6 DSCP actions.
4226 * @param[in] action_flags
4227 * Holds the actions detected until now.
4229 * Pointer to the modify action.
4230 * @param[in] item_flags
4231 * Holds the items detected.
4233 * Pointer to error structure.
4236 * 0 on success, a negative errno value otherwise and rte_errno is set.
4239 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
4240 const struct rte_flow_action *action,
4241 const uint64_t item_flags,
4242 struct rte_flow_error *error)
4246 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4248 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
4249 return rte_flow_error_set(error, EINVAL,
4250 RTE_FLOW_ERROR_TYPE_ACTION,
4252 "no ipv6 item in pattern");
4258 * Match modify-header resource.
4261 * Pointer to the hash list.
4263 * Pointer to exist resource entry object.
4265 * Key of the new entry.
4267 * Pointer to new modify-header resource.
4270 * 0 on matching, non-zero otherwise.
4273 flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused,
4274 struct mlx5_hlist_entry *entry,
4275 uint64_t key __rte_unused, void *cb_ctx)
4277 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4278 struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
4279 struct mlx5_flow_dv_modify_hdr_resource *resource =
4280 container_of(entry, typeof(*resource), entry);
4281 uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
4283 key_len += ref->actions_num * sizeof(ref->actions[0]);
4284 return ref->actions_num != resource->actions_num ||
4285 memcmp(&ref->ft_type, &resource->ft_type, key_len);
4288 struct mlx5_hlist_entry *
4289 flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused,
4292 struct mlx5_dev_ctx_shared *sh = list->ctx;
4293 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4294 struct mlx5dv_dr_domain *ns;
4295 struct mlx5_flow_dv_modify_hdr_resource *entry;
4296 struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
4298 uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
4299 uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
4301 entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0,
4304 rte_flow_error_set(ctx->error, ENOMEM,
4305 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4306 "cannot allocate resource memory");
4309 rte_memcpy(&entry->ft_type,
4310 RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
4311 key_len + data_len);
4312 if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
4313 ns = sh->fdb_domain;
4314 else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
4318 ret = mlx5_flow_os_create_flow_action_modify_header
4319 (sh->ctx, ns, entry,
4320 data_len, &entry->action);
4323 rte_flow_error_set(ctx->error, ENOMEM,
4324 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4325 NULL, "cannot create modification action");
4328 return &entry->entry;
4332 * Validate the sample action.
4334 * @param[in] action_flags
4335 * Holds the actions detected until now.
4337 * Pointer to the sample action.
4339 * Pointer to the Ethernet device structure.
4341 * Attributes of flow that includes this action.
4343 * Pointer to error structure.
4346 * 0 on success, a negative errno value otherwise and rte_errno is set.
4349 flow_dv_validate_action_sample(uint64_t action_flags,
4350 const struct rte_flow_action *action,
4351 struct rte_eth_dev *dev,
4352 const struct rte_flow_attr *attr,
4353 struct rte_flow_error *error)
4355 struct mlx5_priv *priv = dev->data->dev_private;
4356 struct mlx5_dev_config *dev_conf = &priv->config;
4357 const struct rte_flow_action_sample *sample = action->conf;
4358 const struct rte_flow_action *act;
4359 uint64_t sub_action_flags = 0;
4360 uint16_t queue_index = 0xFFFF;
4366 return rte_flow_error_set(error, EINVAL,
4367 RTE_FLOW_ERROR_TYPE_ACTION, action,
4368 "configuration cannot be NULL");
4369 if (sample->ratio == 0)
4370 return rte_flow_error_set(error, EINVAL,
4371 RTE_FLOW_ERROR_TYPE_ACTION, action,
4372 "ratio value starts from 1");
4373 if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
4374 return rte_flow_error_set(error, ENOTSUP,
4375 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4377 "sample action not supported");
4378 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
4379 return rte_flow_error_set(error, EINVAL,
4380 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4381 "Multiple sample actions not "
4383 if (action_flags & MLX5_FLOW_ACTION_METER)
4384 return rte_flow_error_set(error, EINVAL,
4385 RTE_FLOW_ERROR_TYPE_ACTION, action,
4386 "wrong action order, meter should "
4387 "be after sample action");
4388 if (action_flags & MLX5_FLOW_ACTION_JUMP)
4389 return rte_flow_error_set(error, EINVAL,
4390 RTE_FLOW_ERROR_TYPE_ACTION, action,
4391 "wrong action order, jump should "
4392 "be after sample action");
4393 act = sample->actions;
4394 for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
4395 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
4396 return rte_flow_error_set(error, ENOTSUP,
4397 RTE_FLOW_ERROR_TYPE_ACTION,
4398 act, "too many actions");
4399 switch (act->type) {
4400 case RTE_FLOW_ACTION_TYPE_QUEUE:
4401 ret = mlx5_flow_validate_action_queue(act,
4407 queue_index = ((const struct rte_flow_action_queue *)
4408 (act->conf))->index;
4409 sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
4412 case RTE_FLOW_ACTION_TYPE_MARK:
4413 ret = flow_dv_validate_action_mark(dev, act,
4418 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
4419 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
4420 MLX5_FLOW_ACTION_MARK_EXT;
4422 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
4425 case RTE_FLOW_ACTION_TYPE_COUNT:
4426 ret = flow_dv_validate_action_count(dev, error);
4429 sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
4432 case RTE_FLOW_ACTION_TYPE_PORT_ID:
4433 ret = flow_dv_validate_action_port_id(dev,
4440 sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
4443 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4444 ret = flow_dv_validate_action_raw_encap_decap
4445 (dev, NULL, act->conf, attr, &sub_action_flags,
4452 return rte_flow_error_set(error, ENOTSUP,
4453 RTE_FLOW_ERROR_TYPE_ACTION,
4455 "Doesn't support optional "
4459 if (attr->ingress && !attr->transfer) {
4460 if (!(sub_action_flags & MLX5_FLOW_ACTION_QUEUE))
4461 return rte_flow_error_set(error, EINVAL,
4462 RTE_FLOW_ERROR_TYPE_ACTION,
4464 "Ingress must has a dest "
4465 "QUEUE for Sample");
4466 } else if (attr->egress && !attr->transfer) {
4467 return rte_flow_error_set(error, ENOTSUP,
4468 RTE_FLOW_ERROR_TYPE_ACTION,
4470 "Sample Only support Ingress "
4472 } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
4473 MLX5_ASSERT(attr->transfer);
4474 if (sample->ratio > 1)
4475 return rte_flow_error_set(error, ENOTSUP,
4476 RTE_FLOW_ERROR_TYPE_ACTION,
4478 "E-Switch doesn't support "
4479 "any optional action "
4482 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
4483 return rte_flow_error_set(error, ENOTSUP,
4484 RTE_FLOW_ERROR_TYPE_ACTION,
4486 "unsupported action QUEUE");
4487 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
4488 return rte_flow_error_set(error, EINVAL,
4489 RTE_FLOW_ERROR_TYPE_ACTION,
4491 "E-Switch must has a dest "
4492 "port for mirroring");
4494 /* Continue validation for Xcap actions.*/
4495 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
4496 (queue_index == 0xFFFF ||
4497 mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
4498 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
4499 MLX5_FLOW_XCAP_ACTIONS)
4500 return rte_flow_error_set(error, ENOTSUP,
4501 RTE_FLOW_ERROR_TYPE_ACTION,
4502 NULL, "encap and decap "
4503 "combination aren't "
4505 if (!attr->transfer && attr->ingress && (sub_action_flags &
4506 MLX5_FLOW_ACTION_ENCAP))
4507 return rte_flow_error_set(error, ENOTSUP,
4508 RTE_FLOW_ERROR_TYPE_ACTION,
4509 NULL, "encap is not supported"
4510 " for ingress traffic");
4516 * Find existing modify-header resource or create and register a new one.
4518 * @param dev[in, out]
4519 * Pointer to rte_eth_dev structure.
4520 * @param[in, out] resource
4521 * Pointer to modify-header resource.
4522 * @parm[in, out] dev_flow
4523 * Pointer to the dev_flow.
4525 * pointer to error structure.
4528 * 0 on success otherwise -errno and errno is set.
4531 flow_dv_modify_hdr_resource_register
4532 (struct rte_eth_dev *dev,
4533 struct mlx5_flow_dv_modify_hdr_resource *resource,
4534 struct mlx5_flow *dev_flow,
4535 struct rte_flow_error *error)
4537 struct mlx5_priv *priv = dev->data->dev_private;
4538 struct mlx5_dev_ctx_shared *sh = priv->sh;
4539 uint32_t key_len = sizeof(*resource) -
4540 offsetof(typeof(*resource), ft_type) +
4541 resource->actions_num * sizeof(resource->actions[0]);
4542 struct mlx5_hlist_entry *entry;
4543 struct mlx5_flow_cb_ctx ctx = {
4548 resource->flags = dev_flow->dv.group ? 0 :
4549 MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
4550 if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
4552 return rte_flow_error_set(error, EOVERFLOW,
4553 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4554 "too many modify header items");
4555 resource->entry.key = __rte_raw_cksum(&resource->ft_type, key_len, 0);
4556 entry = mlx5_hlist_register(sh->modify_cmds, resource->entry.key, &ctx);
4559 resource = container_of(entry, typeof(*resource), entry);
4560 dev_flow->handle->dvh.modify_hdr = resource;
4565 * Get DV flow counter by index.
4568 * Pointer to the Ethernet device structure.
4570 * mlx5 flow counter index in the container.
4572 * mlx5 flow counter pool in the container,
4575 * Pointer to the counter, NULL otherwise.
4577 static struct mlx5_flow_counter *
4578 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
4580 struct mlx5_flow_counter_pool **ppool)
4582 struct mlx5_priv *priv = dev->data->dev_private;
4583 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4584 struct mlx5_flow_counter_pool *pool;
4586 /* Decrease to original index and clear shared bit. */
4587 idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
4588 MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
4589 pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
4593 return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
4597 * Check the devx counter belongs to the pool.
4600 * Pointer to the counter pool.
4602 * The counter devx ID.
4605 * True if counter belongs to the pool, false otherwise.
4608 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
4610 int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
4611 MLX5_COUNTERS_PER_POOL;
4613 if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
4619 * Get a pool by devx counter ID.
4622 * Pointer to the counter management.
4624 * The counter devx ID.
4627 * The counter pool pointer if exists, NULL otherwise,
4629 static struct mlx5_flow_counter_pool *
4630 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
4633 struct mlx5_flow_counter_pool *pool = NULL;
4635 rte_spinlock_lock(&cmng->pool_update_sl);
4636 /* Check last used pool. */
4637 if (cmng->last_pool_idx != POOL_IDX_INVALID &&
4638 flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
4639 pool = cmng->pools[cmng->last_pool_idx];
4642 /* ID out of range means no suitable pool in the container. */
4643 if (id > cmng->max_id || id < cmng->min_id)
4646 * Find the pool from the end of the container, since mostly counter
4647 * ID is sequence increasing, and the last pool should be the needed
4652 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
4654 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
4660 rte_spinlock_unlock(&cmng->pool_update_sl);
4665 * Resize a counter container.
4668 * Pointer to the Ethernet device structure.
4671 * 0 on success, otherwise negative errno value and rte_errno is set.
4674 flow_dv_container_resize(struct rte_eth_dev *dev)
4676 struct mlx5_priv *priv = dev->data->dev_private;
4677 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4678 void *old_pools = cmng->pools;
4679 uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
4680 uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
4681 void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
4688 memcpy(pools, old_pools, cmng->n *
4689 sizeof(struct mlx5_flow_counter_pool *));
4691 cmng->pools = pools;
4693 mlx5_free(old_pools);
4698 * Query a devx flow counter.
4701 * Pointer to the Ethernet device structure.
4703 * Index to the flow counter.
4705 * The statistics value of packets.
4707 * The statistics value of bytes.
4710 * 0 on success, otherwise a negative errno value and rte_errno is set.
4713 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
4716 struct mlx5_priv *priv = dev->data->dev_private;
4717 struct mlx5_flow_counter_pool *pool = NULL;
4718 struct mlx5_flow_counter *cnt;
4721 cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
4723 if (priv->sh->cmng.counter_fallback)
4724 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
4725 0, pkts, bytes, 0, NULL, NULL, 0);
4726 rte_spinlock_lock(&pool->sl);
4731 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
4732 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
4733 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
4735 rte_spinlock_unlock(&pool->sl);
4740 * Create and initialize a new counter pool.
4743 * Pointer to the Ethernet device structure.
4745 * The devX counter handle.
4747 * Whether the pool is for counter that was allocated for aging.
4748 * @param[in/out] cont_cur
4749 * Pointer to the container pointer, it will be update in pool resize.
4752 * The pool container pointer on success, NULL otherwise and rte_errno is set.
4754 static struct mlx5_flow_counter_pool *
4755 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
4758 struct mlx5_priv *priv = dev->data->dev_private;
4759 struct mlx5_flow_counter_pool *pool;
4760 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4761 bool fallback = priv->sh->cmng.counter_fallback;
4762 uint32_t size = sizeof(*pool);
4764 size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
4765 size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
4766 pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
4772 pool->is_aged = !!age;
4773 pool->query_gen = 0;
4774 pool->min_dcs = dcs;
4775 rte_spinlock_init(&pool->sl);
4776 rte_spinlock_init(&pool->csl);
4777 TAILQ_INIT(&pool->counters[0]);
4778 TAILQ_INIT(&pool->counters[1]);
4779 pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
4780 rte_spinlock_lock(&cmng->pool_update_sl);
4781 pool->index = cmng->n_valid;
4782 if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
4784 rte_spinlock_unlock(&cmng->pool_update_sl);
4787 cmng->pools[pool->index] = pool;
4789 if (unlikely(fallback)) {
4790 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
4792 if (base < cmng->min_id)
4793 cmng->min_id = base;
4794 if (base > cmng->max_id)
4795 cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
4796 cmng->last_pool_idx = pool->index;
4798 rte_spinlock_unlock(&cmng->pool_update_sl);
4803 * Prepare a new counter and/or a new counter pool.
4806 * Pointer to the Ethernet device structure.
4807 * @param[out] cnt_free
4808 * Where to put the pointer of a new counter.
4810 * Whether the pool is for counter that was allocated for aging.
4813 * The counter pool pointer and @p cnt_free is set on success,
4814 * NULL otherwise and rte_errno is set.
4816 static struct mlx5_flow_counter_pool *
4817 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
4818 struct mlx5_flow_counter **cnt_free,
4821 struct mlx5_priv *priv = dev->data->dev_private;
4822 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4823 struct mlx5_flow_counter_pool *pool;
4824 struct mlx5_counters tmp_tq;
4825 struct mlx5_devx_obj *dcs = NULL;
4826 struct mlx5_flow_counter *cnt;
4827 enum mlx5_counter_type cnt_type =
4828 age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
4829 bool fallback = priv->sh->cmng.counter_fallback;
4833 /* bulk_bitmap must be 0 for single counter allocation. */
4834 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
4837 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
4839 pool = flow_dv_pool_create(dev, dcs, age);
4841 mlx5_devx_cmd_destroy(dcs);
4845 i = dcs->id % MLX5_COUNTERS_PER_POOL;
4846 cnt = MLX5_POOL_GET_CNT(pool, i);
4848 cnt->dcs_when_free = dcs;
4852 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
4854 rte_errno = ENODATA;
4857 pool = flow_dv_pool_create(dev, dcs, age);
4859 mlx5_devx_cmd_destroy(dcs);
4862 TAILQ_INIT(&tmp_tq);
4863 for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
4864 cnt = MLX5_POOL_GET_CNT(pool, i);
4866 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
4868 rte_spinlock_lock(&cmng->csl[cnt_type]);
4869 TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
4870 rte_spinlock_unlock(&cmng->csl[cnt_type]);
4871 *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
4872 (*cnt_free)->pool = pool;
4877 * Allocate a flow counter.
4880 * Pointer to the Ethernet device structure.
4882 * Whether the counter was allocated for aging.
4885 * Index to flow counter on success, 0 otherwise and rte_errno is set.
4888 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
4890 struct mlx5_priv *priv = dev->data->dev_private;
4891 struct mlx5_flow_counter_pool *pool = NULL;
4892 struct mlx5_flow_counter *cnt_free = NULL;
4893 bool fallback = priv->sh->cmng.counter_fallback;
4894 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4895 enum mlx5_counter_type cnt_type =
4896 age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
4899 if (!priv->config.devx) {
4900 rte_errno = ENOTSUP;
4903 /* Get free counters from container. */
4904 rte_spinlock_lock(&cmng->csl[cnt_type]);
4905 cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
4907 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
4908 rte_spinlock_unlock(&cmng->csl[cnt_type]);
4909 if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
4911 pool = cnt_free->pool;
4913 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
4914 /* Create a DV counter action only in the first time usage. */
4915 if (!cnt_free->action) {
4917 struct mlx5_devx_obj *dcs;
4921 offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
4922 dcs = pool->min_dcs;
4925 dcs = cnt_free->dcs_when_free;
4927 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
4934 cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
4935 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
4936 /* Update the counter reset values. */
4937 if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
4940 if (!fallback && !priv->sh->cmng.query_thread_on)
4941 /* Start the asynchronous batch query by the host thread. */
4942 mlx5_set_query_alarm(priv->sh);
4946 cnt_free->pool = pool;
4948 cnt_free->dcs_when_free = cnt_free->dcs_when_active;
4949 rte_spinlock_lock(&cmng->csl[cnt_type]);
4950 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
4951 rte_spinlock_unlock(&cmng->csl[cnt_type]);
4957 * Allocate a shared flow counter.
4960 * Pointer to the shared counter configuration.
4962 * Pointer to save the allocated counter index.
4965 * Index to flow counter on success, 0 otherwise and rte_errno is set.
4969 flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
4971 struct mlx5_shared_counter_conf *conf = ctx;
4972 struct rte_eth_dev *dev = conf->dev;
4973 struct mlx5_flow_counter *cnt;
4975 data->dword = flow_dv_counter_alloc(dev, 0);
4976 data->dword |= MLX5_CNT_SHARED_OFFSET;
4977 cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
4978 cnt->shared_info.id = conf->id;
4983 * Get a shared flow counter.
4986 * Pointer to the Ethernet device structure.
4988 * Counter identifier.
4991 * Index to flow counter on success, 0 otherwise and rte_errno is set.
4994 flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
4996 struct mlx5_priv *priv = dev->data->dev_private;
4997 struct mlx5_shared_counter_conf conf = {
5001 union mlx5_l3t_data data = {
5005 mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
5006 flow_dv_counter_alloc_shared_cb, &conf);
5011 * Get age param from counter index.
5014 * Pointer to the Ethernet device structure.
5015 * @param[in] counter
5016 * Index to the counter handler.
5019 * The aging parameter specified for the counter index.
5021 static struct mlx5_age_param*
5022 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
5025 struct mlx5_flow_counter *cnt;
5026 struct mlx5_flow_counter_pool *pool = NULL;
5028 flow_dv_counter_get_by_idx(dev, counter, &pool);
5029 counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
5030 cnt = MLX5_POOL_GET_CNT(pool, counter);
5031 return MLX5_CNT_TO_AGE(cnt);
5035 * Remove a flow counter from aged counter list.
5038 * Pointer to the Ethernet device structure.
5039 * @param[in] counter
5040 * Index to the counter handler.
5042 * Pointer to the counter handler.
5045 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
5046 uint32_t counter, struct mlx5_flow_counter *cnt)
5048 struct mlx5_age_info *age_info;
5049 struct mlx5_age_param *age_param;
5050 struct mlx5_priv *priv = dev->data->dev_private;
5051 uint16_t expected = AGE_CANDIDATE;
5053 age_info = GET_PORT_AGE_INFO(priv);
5054 age_param = flow_dv_counter_idx_get_age(dev, counter);
5055 if (!__atomic_compare_exchange_n(&age_param->state, &expected,
5056 AGE_FREE, false, __ATOMIC_RELAXED,
5057 __ATOMIC_RELAXED)) {
5059 * We need the lock even it is age timeout,
5060 * since counter may still in process.
5062 rte_spinlock_lock(&age_info->aged_sl);
5063 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
5064 rte_spinlock_unlock(&age_info->aged_sl);
5065 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
5070 * Release a flow counter.
5073 * Pointer to the Ethernet device structure.
5074 * @param[in] counter
5075 * Index to the counter handler.
5078 flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter)
5080 struct mlx5_priv *priv = dev->data->dev_private;
5081 struct mlx5_flow_counter_pool *pool = NULL;
5082 struct mlx5_flow_counter *cnt;
5083 enum mlx5_counter_type cnt_type;
5087 cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5089 if (IS_SHARED_CNT(counter) &&
5090 mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id))
5093 flow_dv_counter_remove_from_age(dev, counter, cnt);
5096 * Put the counter back to list to be updated in none fallback mode.
5097 * Currently, we are using two list alternately, while one is in query,
5098 * add the freed counter to the other list based on the pool query_gen
5099 * value. After query finishes, add counter the list to the global
5100 * container counter list. The list changes while query starts. In
5101 * this case, lock will not be needed as query callback and release
5102 * function both operate with the different list.
5105 if (!priv->sh->cmng.counter_fallback) {
5106 rte_spinlock_lock(&pool->csl);
5107 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
5108 rte_spinlock_unlock(&pool->csl);
5110 cnt->dcs_when_free = cnt->dcs_when_active;
5111 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
5112 MLX5_COUNTER_TYPE_ORIGIN;
5113 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
5114 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
5116 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
5121 * Verify the @p attributes will be correctly understood by the NIC and store
5122 * them in the @p flow if everything is correct.
5125 * Pointer to dev struct.
5126 * @param[in] attributes
5127 * Pointer to flow attributes
5128 * @param[in] external
5129 * This flow rule is created by request external to PMD.
5131 * Pointer to error structure.
5134 * - 0 on success and non root table.
5135 * - 1 on success and root table.
5136 * - a negative errno value otherwise and rte_errno is set.
5139 flow_dv_validate_attributes(struct rte_eth_dev *dev,
5140 const struct mlx5_flow_tunnel *tunnel,
5141 const struct rte_flow_attr *attributes,
5142 struct flow_grp_info grp_info,
5143 struct rte_flow_error *error)
5145 struct mlx5_priv *priv = dev->data->dev_private;
5146 uint32_t priority_max = priv->config.flow_prio - 1;
5149 #ifndef HAVE_MLX5DV_DR
5150 RTE_SET_USED(tunnel);
5151 RTE_SET_USED(grp_info);
5152 if (attributes->group)
5153 return rte_flow_error_set(error, ENOTSUP,
5154 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
5156 "groups are not supported");
5160 ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
5165 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
5167 if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
5168 attributes->priority >= priority_max)
5169 return rte_flow_error_set(error, ENOTSUP,
5170 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
5172 "priority out of range");
5173 if (attributes->transfer) {
5174 if (!priv->config.dv_esw_en)
5175 return rte_flow_error_set
5177 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5178 "E-Switch dr is not supported");
5179 if (!(priv->representor || priv->master))
5180 return rte_flow_error_set
5181 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5182 NULL, "E-Switch configuration can only be"
5183 " done by a master or a representor device");
5184 if (attributes->egress)
5185 return rte_flow_error_set
5187 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
5188 "egress is not supported");
5190 if (!(attributes->egress ^ attributes->ingress))
5191 return rte_flow_error_set(error, ENOTSUP,
5192 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
5193 "must specify exactly one of "
5194 "ingress or egress");
5199 * Internal validation function. For validating both actions and items.
5202 * Pointer to the rte_eth_dev structure.
5204 * Pointer to the flow attributes.
5206 * Pointer to the list of items.
5207 * @param[in] actions
5208 * Pointer to the list of actions.
5209 * @param[in] external
5210 * This flow rule is created by request external to PMD.
5211 * @param[in] hairpin
5212 * Number of hairpin TX actions, 0 means classic flow.
5214 * Pointer to the error structure.
5217 * 0 on success, a negative errno value otherwise and rte_errno is set.
5220 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
5221 const struct rte_flow_item items[],
5222 const struct rte_flow_action actions[],
5223 bool external, int hairpin, struct rte_flow_error *error)
5226 uint64_t action_flags = 0;
5227 uint64_t item_flags = 0;
5228 uint64_t last_item = 0;
5229 uint8_t next_protocol = 0xff;
5230 uint16_t ether_type = 0;
5232 uint8_t item_ipv6_proto = 0;
5233 const struct rte_flow_item *gre_item = NULL;
5234 const struct rte_flow_action_raw_decap *decap;
5235 const struct rte_flow_action_raw_encap *encap;
5236 const struct rte_flow_action_rss *rss;
5237 const struct rte_flow_item_tcp nic_tcp_mask = {
5240 .src_port = RTE_BE16(UINT16_MAX),
5241 .dst_port = RTE_BE16(UINT16_MAX),
5244 const struct rte_flow_item_ipv6 nic_ipv6_mask = {
5247 "\xff\xff\xff\xff\xff\xff\xff\xff"
5248 "\xff\xff\xff\xff\xff\xff\xff\xff",
5250 "\xff\xff\xff\xff\xff\xff\xff\xff"
5251 "\xff\xff\xff\xff\xff\xff\xff\xff",
5252 .vtc_flow = RTE_BE32(0xffffffff),
5258 const struct rte_flow_item_ecpri nic_ecpri_mask = {
5262 RTE_BE32(((const struct rte_ecpri_common_hdr) {
5266 .dummy[0] = 0xffffffff,
5269 struct mlx5_priv *priv = dev->data->dev_private;
5270 struct mlx5_dev_config *dev_conf = &priv->config;
5271 uint16_t queue_index = 0xFFFF;
5272 const struct rte_flow_item_vlan *vlan_m = NULL;
5273 int16_t rw_act_num = 0;
5275 const struct mlx5_flow_tunnel *tunnel;
5276 struct flow_grp_info grp_info = {
5277 .external = !!external,
5278 .transfer = !!attr->transfer,
5279 .fdb_def_rule = !!priv->fdb_def_rule,
5281 const struct rte_eth_hairpin_conf *conf;
5285 if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
5286 tunnel = flow_items_to_tunnel(items);
5287 action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
5288 MLX5_FLOW_ACTION_DECAP;
5289 } else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) {
5290 tunnel = flow_actions_to_tunnel(actions);
5291 action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
5295 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
5296 (dev, tunnel, attr, items, actions);
5297 ret = flow_dv_validate_attributes(dev, tunnel, attr, grp_info, error);
5300 is_root = (uint64_t)ret;
5301 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
5302 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
5303 int type = items->type;
5305 if (!mlx5_flow_os_item_supported(type))
5306 return rte_flow_error_set(error, ENOTSUP,
5307 RTE_FLOW_ERROR_TYPE_ITEM,
5308 NULL, "item not supported");
5310 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
5311 if (items[0].type != (typeof(items[0].type))
5312 MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL)
5313 return rte_flow_error_set
5315 RTE_FLOW_ERROR_TYPE_ITEM,
5316 NULL, "MLX5 private items "
5317 "must be the first");
5319 case RTE_FLOW_ITEM_TYPE_VOID:
5321 case RTE_FLOW_ITEM_TYPE_PORT_ID:
5322 ret = flow_dv_validate_item_port_id
5323 (dev, items, attr, item_flags, error);
5326 last_item = MLX5_FLOW_ITEM_PORT_ID;
5328 case RTE_FLOW_ITEM_TYPE_ETH:
5329 ret = mlx5_flow_validate_item_eth(items, item_flags,
5333 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
5334 MLX5_FLOW_LAYER_OUTER_L2;
5335 if (items->mask != NULL && items->spec != NULL) {
5337 ((const struct rte_flow_item_eth *)
5340 ((const struct rte_flow_item_eth *)
5342 ether_type = rte_be_to_cpu_16(ether_type);
5347 case RTE_FLOW_ITEM_TYPE_VLAN:
5348 ret = flow_dv_validate_item_vlan(items, item_flags,
5352 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
5353 MLX5_FLOW_LAYER_OUTER_VLAN;
5354 if (items->mask != NULL && items->spec != NULL) {
5356 ((const struct rte_flow_item_vlan *)
5357 items->spec)->inner_type;
5359 ((const struct rte_flow_item_vlan *)
5360 items->mask)->inner_type;
5361 ether_type = rte_be_to_cpu_16(ether_type);
5365 /* Store outer VLAN mask for of_push_vlan action. */
5367 vlan_m = items->mask;
5369 case RTE_FLOW_ITEM_TYPE_IPV4:
5370 mlx5_flow_tunnel_ip_check(items, next_protocol,
5371 &item_flags, &tunnel);
5372 ret = flow_dv_validate_item_ipv4(items, item_flags,
5373 last_item, ether_type,
5377 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
5378 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
5379 if (items->mask != NULL &&
5380 ((const struct rte_flow_item_ipv4 *)
5381 items->mask)->hdr.next_proto_id) {
5383 ((const struct rte_flow_item_ipv4 *)
5384 (items->spec))->hdr.next_proto_id;
5386 ((const struct rte_flow_item_ipv4 *)
5387 (items->mask))->hdr.next_proto_id;
5389 /* Reset for inner layer. */
5390 next_protocol = 0xff;
5393 case RTE_FLOW_ITEM_TYPE_IPV6:
5394 mlx5_flow_tunnel_ip_check(items, next_protocol,
5395 &item_flags, &tunnel);
5396 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
5403 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
5404 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
5405 if (items->mask != NULL &&
5406 ((const struct rte_flow_item_ipv6 *)
5407 items->mask)->hdr.proto) {
5409 ((const struct rte_flow_item_ipv6 *)
5410 items->spec)->hdr.proto;
5412 ((const struct rte_flow_item_ipv6 *)
5413 items->spec)->hdr.proto;
5415 ((const struct rte_flow_item_ipv6 *)
5416 items->mask)->hdr.proto;
5418 /* Reset for inner layer. */
5419 next_protocol = 0xff;
5422 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
5423 ret = flow_dv_validate_item_ipv6_frag_ext(items,
5428 last_item = tunnel ?
5429 MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
5430 MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
5431 if (items->mask != NULL &&
5432 ((const struct rte_flow_item_ipv6_frag_ext *)
5433 items->mask)->hdr.next_header) {
5435 ((const struct rte_flow_item_ipv6_frag_ext *)
5436 items->spec)->hdr.next_header;
5438 ((const struct rte_flow_item_ipv6_frag_ext *)
5439 items->mask)->hdr.next_header;
5441 /* Reset for inner layer. */
5442 next_protocol = 0xff;
5445 case RTE_FLOW_ITEM_TYPE_TCP:
5446 ret = mlx5_flow_validate_item_tcp
5453 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
5454 MLX5_FLOW_LAYER_OUTER_L4_TCP;
5456 case RTE_FLOW_ITEM_TYPE_UDP:
5457 ret = mlx5_flow_validate_item_udp(items, item_flags,
5462 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
5463 MLX5_FLOW_LAYER_OUTER_L4_UDP;
5465 case RTE_FLOW_ITEM_TYPE_GRE:
5466 ret = mlx5_flow_validate_item_gre(items, item_flags,
5467 next_protocol, error);
5471 last_item = MLX5_FLOW_LAYER_GRE;
5473 case RTE_FLOW_ITEM_TYPE_NVGRE:
5474 ret = mlx5_flow_validate_item_nvgre(items, item_flags,
5479 last_item = MLX5_FLOW_LAYER_NVGRE;
5481 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
5482 ret = mlx5_flow_validate_item_gre_key
5483 (items, item_flags, gre_item, error);
5486 last_item = MLX5_FLOW_LAYER_GRE_KEY;
5488 case RTE_FLOW_ITEM_TYPE_VXLAN:
5489 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
5493 last_item = MLX5_FLOW_LAYER_VXLAN;
5495 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5496 ret = mlx5_flow_validate_item_vxlan_gpe(items,
5501 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
5503 case RTE_FLOW_ITEM_TYPE_GENEVE:
5504 ret = mlx5_flow_validate_item_geneve(items,
5509 last_item = MLX5_FLOW_LAYER_GENEVE;
5511 case RTE_FLOW_ITEM_TYPE_MPLS:
5512 ret = mlx5_flow_validate_item_mpls(dev, items,
5517 last_item = MLX5_FLOW_LAYER_MPLS;
5520 case RTE_FLOW_ITEM_TYPE_MARK:
5521 ret = flow_dv_validate_item_mark(dev, items, attr,
5525 last_item = MLX5_FLOW_ITEM_MARK;
5527 case RTE_FLOW_ITEM_TYPE_META:
5528 ret = flow_dv_validate_item_meta(dev, items, attr,
5532 last_item = MLX5_FLOW_ITEM_METADATA;
5534 case RTE_FLOW_ITEM_TYPE_ICMP:
5535 ret = mlx5_flow_validate_item_icmp(items, item_flags,
5540 last_item = MLX5_FLOW_LAYER_ICMP;
5542 case RTE_FLOW_ITEM_TYPE_ICMP6:
5543 ret = mlx5_flow_validate_item_icmp6(items, item_flags,
5548 item_ipv6_proto = IPPROTO_ICMPV6;
5549 last_item = MLX5_FLOW_LAYER_ICMP6;
5551 case RTE_FLOW_ITEM_TYPE_TAG:
5552 ret = flow_dv_validate_item_tag(dev, items,
5556 last_item = MLX5_FLOW_ITEM_TAG;
5558 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
5559 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
5561 case RTE_FLOW_ITEM_TYPE_GTP:
5562 ret = flow_dv_validate_item_gtp(dev, items, item_flags,
5566 last_item = MLX5_FLOW_LAYER_GTP;
5568 case RTE_FLOW_ITEM_TYPE_ECPRI:
5569 /* Capacity will be checked in the translate stage. */
5570 ret = mlx5_flow_validate_item_ecpri(items, item_flags,
5577 last_item = MLX5_FLOW_LAYER_ECPRI;
5580 return rte_flow_error_set(error, ENOTSUP,
5581 RTE_FLOW_ERROR_TYPE_ITEM,
5582 NULL, "item not supported");
5584 item_flags |= last_item;
5586 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5587 int type = actions->type;
5589 if (!mlx5_flow_os_action_supported(type))
5590 return rte_flow_error_set(error, ENOTSUP,
5591 RTE_FLOW_ERROR_TYPE_ACTION,
5593 "action not supported");
5594 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5595 return rte_flow_error_set(error, ENOTSUP,
5596 RTE_FLOW_ERROR_TYPE_ACTION,
5597 actions, "too many actions");
5599 case RTE_FLOW_ACTION_TYPE_VOID:
5601 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5602 ret = flow_dv_validate_action_port_id(dev,
5609 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5612 case RTE_FLOW_ACTION_TYPE_FLAG:
5613 ret = flow_dv_validate_action_flag(dev, action_flags,
5617 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5618 /* Count all modify-header actions as one. */
5619 if (!(action_flags &
5620 MLX5_FLOW_MODIFY_HDR_ACTIONS))
5622 action_flags |= MLX5_FLOW_ACTION_FLAG |
5623 MLX5_FLOW_ACTION_MARK_EXT;
5625 action_flags |= MLX5_FLOW_ACTION_FLAG;
5628 rw_act_num += MLX5_ACT_NUM_SET_MARK;
5630 case RTE_FLOW_ACTION_TYPE_MARK:
5631 ret = flow_dv_validate_action_mark(dev, actions,
5636 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5637 /* Count all modify-header actions as one. */
5638 if (!(action_flags &
5639 MLX5_FLOW_MODIFY_HDR_ACTIONS))
5641 action_flags |= MLX5_FLOW_ACTION_MARK |
5642 MLX5_FLOW_ACTION_MARK_EXT;
5644 action_flags |= MLX5_FLOW_ACTION_MARK;
5647 rw_act_num += MLX5_ACT_NUM_SET_MARK;
5649 case RTE_FLOW_ACTION_TYPE_SET_META:
5650 ret = flow_dv_validate_action_set_meta(dev, actions,
5655 /* Count all modify-header actions as one action. */
5656 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5658 action_flags |= MLX5_FLOW_ACTION_SET_META;
5659 rw_act_num += MLX5_ACT_NUM_SET_META;
5661 case RTE_FLOW_ACTION_TYPE_SET_TAG:
5662 ret = flow_dv_validate_action_set_tag(dev, actions,
5667 /* Count all modify-header actions as one action. */
5668 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5670 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
5671 rw_act_num += MLX5_ACT_NUM_SET_TAG;
5673 case RTE_FLOW_ACTION_TYPE_DROP:
5674 ret = mlx5_flow_validate_action_drop(action_flags,
5678 action_flags |= MLX5_FLOW_ACTION_DROP;
5681 case RTE_FLOW_ACTION_TYPE_QUEUE:
5682 ret = mlx5_flow_validate_action_queue(actions,
5687 queue_index = ((const struct rte_flow_action_queue *)
5688 (actions->conf))->index;
5689 action_flags |= MLX5_FLOW_ACTION_QUEUE;
5692 case RTE_FLOW_ACTION_TYPE_RSS:
5693 rss = actions->conf;
5694 ret = mlx5_flow_validate_action_rss(actions,
5700 if (rss != NULL && rss->queue_num)
5701 queue_index = rss->queue[0];
5702 action_flags |= MLX5_FLOW_ACTION_RSS;
5705 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
5707 mlx5_flow_validate_action_default_miss(action_flags,
5711 action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
5714 case RTE_FLOW_ACTION_TYPE_COUNT:
5715 ret = flow_dv_validate_action_count(dev, error);
5718 action_flags |= MLX5_FLOW_ACTION_COUNT;
5721 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
5722 if (flow_dv_validate_action_pop_vlan(dev,
5728 action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
5731 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
5732 ret = flow_dv_validate_action_push_vlan(dev,
5739 action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
5742 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
5743 ret = flow_dv_validate_action_set_vlan_pcp
5744 (action_flags, actions, error);
5747 /* Count PCP with push_vlan command. */
5748 action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
5750 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5751 ret = flow_dv_validate_action_set_vlan_vid
5752 (item_flags, action_flags,
5756 /* Count VID with push_vlan command. */
5757 action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
5758 rw_act_num += MLX5_ACT_NUM_MDF_VID;
5760 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5761 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5762 ret = flow_dv_validate_action_l2_encap(dev,
5768 action_flags |= MLX5_FLOW_ACTION_ENCAP;
5771 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
5772 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
5773 ret = flow_dv_validate_action_decap(dev, action_flags,
5777 action_flags |= MLX5_FLOW_ACTION_DECAP;
5780 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5781 ret = flow_dv_validate_action_raw_encap_decap
5782 (dev, NULL, actions->conf, attr, &action_flags,
5787 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5788 decap = actions->conf;
5789 while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
5791 if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
5795 encap = actions->conf;
5797 ret = flow_dv_validate_action_raw_encap_decap
5799 decap ? decap : &empty_decap, encap,
5800 attr, &action_flags, &actions_n,
5805 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
5806 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
5807 ret = flow_dv_validate_action_modify_mac(action_flags,
5813 /* Count all modify-header actions as one action. */
5814 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5816 action_flags |= actions->type ==
5817 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
5818 MLX5_FLOW_ACTION_SET_MAC_SRC :
5819 MLX5_FLOW_ACTION_SET_MAC_DST;
5821 * Even if the source and destination MAC addresses have
5822 * overlap in the header with 4B alignment, the convert
5823 * function will handle them separately and 4 SW actions
5824 * will be created. And 2 actions will be added each
5825 * time no matter how many bytes of address will be set.
5827 rw_act_num += MLX5_ACT_NUM_MDF_MAC;
5829 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
5830 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
5831 ret = flow_dv_validate_action_modify_ipv4(action_flags,
5837 /* Count all modify-header actions as one action. */
5838 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5840 action_flags |= actions->type ==
5841 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
5842 MLX5_FLOW_ACTION_SET_IPV4_SRC :
5843 MLX5_FLOW_ACTION_SET_IPV4_DST;
5844 rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
5846 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
5847 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
5848 ret = flow_dv_validate_action_modify_ipv6(action_flags,
5854 if (item_ipv6_proto == IPPROTO_ICMPV6)
5855 return rte_flow_error_set(error, ENOTSUP,
5856 RTE_FLOW_ERROR_TYPE_ACTION,
5858 "Can't change header "
5859 "with ICMPv6 proto");
5860 /* Count all modify-header actions as one action. */
5861 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5863 action_flags |= actions->type ==
5864 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
5865 MLX5_FLOW_ACTION_SET_IPV6_SRC :
5866 MLX5_FLOW_ACTION_SET_IPV6_DST;
5867 rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
5869 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
5870 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
5871 ret = flow_dv_validate_action_modify_tp(action_flags,
5877 /* Count all modify-header actions as one action. */
5878 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5880 action_flags |= actions->type ==
5881 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
5882 MLX5_FLOW_ACTION_SET_TP_SRC :
5883 MLX5_FLOW_ACTION_SET_TP_DST;
5884 rw_act_num += MLX5_ACT_NUM_MDF_PORT;
5886 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
5887 case RTE_FLOW_ACTION_TYPE_SET_TTL:
5888 ret = flow_dv_validate_action_modify_ttl(action_flags,
5894 /* Count all modify-header actions as one action. */
5895 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5897 action_flags |= actions->type ==
5898 RTE_FLOW_ACTION_TYPE_SET_TTL ?
5899 MLX5_FLOW_ACTION_SET_TTL :
5900 MLX5_FLOW_ACTION_DEC_TTL;
5901 rw_act_num += MLX5_ACT_NUM_MDF_TTL;
5903 case RTE_FLOW_ACTION_TYPE_JUMP:
5904 ret = flow_dv_validate_action_jump(dev, tunnel, actions,
5911 action_flags |= MLX5_FLOW_ACTION_JUMP;
5913 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
5914 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
5915 ret = flow_dv_validate_action_modify_tcp_seq
5922 /* Count all modify-header actions as one action. */
5923 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5925 action_flags |= actions->type ==
5926 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
5927 MLX5_FLOW_ACTION_INC_TCP_SEQ :
5928 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
5929 rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
5931 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
5932 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
5933 ret = flow_dv_validate_action_modify_tcp_ack
5940 /* Count all modify-header actions as one action. */
5941 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5943 action_flags |= actions->type ==
5944 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
5945 MLX5_FLOW_ACTION_INC_TCP_ACK :
5946 MLX5_FLOW_ACTION_DEC_TCP_ACK;
5947 rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
5949 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
5951 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
5952 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
5953 rw_act_num += MLX5_ACT_NUM_SET_TAG;
5955 case RTE_FLOW_ACTION_TYPE_METER:
5956 ret = mlx5_flow_validate_action_meter(dev,
5962 action_flags |= MLX5_FLOW_ACTION_METER;
5964 /* Meter action will add one more TAG action. */
5965 rw_act_num += MLX5_ACT_NUM_SET_TAG;
5967 case RTE_FLOW_ACTION_TYPE_AGE:
5968 ret = flow_dv_validate_action_age(action_flags,
5973 action_flags |= MLX5_FLOW_ACTION_AGE;
5976 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
5977 ret = flow_dv_validate_action_modify_ipv4_dscp
5984 /* Count all modify-header actions as one action. */
5985 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5987 action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
5988 rw_act_num += MLX5_ACT_NUM_SET_DSCP;
5990 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
5991 ret = flow_dv_validate_action_modify_ipv6_dscp
5998 /* Count all modify-header actions as one action. */
5999 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6001 action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
6002 rw_act_num += MLX5_ACT_NUM_SET_DSCP;
6004 case RTE_FLOW_ACTION_TYPE_SAMPLE:
6005 ret = flow_dv_validate_action_sample(action_flags,
6010 action_flags |= MLX5_FLOW_ACTION_SAMPLE;
6013 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
6014 if (actions[0].type != (typeof(actions[0].type))
6015 MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET)
6016 return rte_flow_error_set
6018 RTE_FLOW_ERROR_TYPE_ACTION,
6019 NULL, "MLX5 private action "
6020 "must be the first");
6022 action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6025 return rte_flow_error_set(error, ENOTSUP,
6026 RTE_FLOW_ERROR_TYPE_ACTION,
6028 "action not supported");
6032 * Validate actions in flow rules
6033 * - Explicit decap action is prohibited by the tunnel offload API.
6034 * - Drop action in tunnel steer rule is prohibited by the API.
6035 * - Application cannot use MARK action because it's value can mask
6036 * tunnel default miss nitification.
6037 * - JUMP in tunnel match rule has no support in current PMD
6039 * - TAG & META are reserved for future uses.
6041 if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
6042 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP |
6043 MLX5_FLOW_ACTION_MARK |
6044 MLX5_FLOW_ACTION_SET_TAG |
6045 MLX5_FLOW_ACTION_SET_META |
6046 MLX5_FLOW_ACTION_DROP;
6048 if (action_flags & bad_actions_mask)
6049 return rte_flow_error_set
6051 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6052 "Invalid RTE action in tunnel "
6054 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
6055 return rte_flow_error_set
6057 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6058 "tunnel set decap rule must terminate "
6061 return rte_flow_error_set
6063 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6064 "tunnel flows for ingress traffic only");
6066 if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
6067 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP |
6068 MLX5_FLOW_ACTION_MARK |
6069 MLX5_FLOW_ACTION_SET_TAG |
6070 MLX5_FLOW_ACTION_SET_META;
6072 if (action_flags & bad_actions_mask)
6073 return rte_flow_error_set
6075 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6076 "Invalid RTE action in tunnel "
6080 * Validate the drop action mutual exclusion with other actions.
6081 * Drop action is mutually-exclusive with any other action, except for
6084 if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
6085 (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
6086 return rte_flow_error_set(error, EINVAL,
6087 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6088 "Drop action is mutually-exclusive "
6089 "with any other action, except for "
6091 /* Eswitch has few restrictions on using items and actions */
6092 if (attr->transfer) {
6093 if (!mlx5_flow_ext_mreg_supported(dev) &&
6094 action_flags & MLX5_FLOW_ACTION_FLAG)
6095 return rte_flow_error_set(error, ENOTSUP,
6096 RTE_FLOW_ERROR_TYPE_ACTION,
6098 "unsupported action FLAG");
6099 if (!mlx5_flow_ext_mreg_supported(dev) &&
6100 action_flags & MLX5_FLOW_ACTION_MARK)
6101 return rte_flow_error_set(error, ENOTSUP,
6102 RTE_FLOW_ERROR_TYPE_ACTION,
6104 "unsupported action MARK");
6105 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
6106 return rte_flow_error_set(error, ENOTSUP,
6107 RTE_FLOW_ERROR_TYPE_ACTION,
6109 "unsupported action QUEUE");
6110 if (action_flags & MLX5_FLOW_ACTION_RSS)
6111 return rte_flow_error_set(error, ENOTSUP,
6112 RTE_FLOW_ERROR_TYPE_ACTION,
6114 "unsupported action RSS");
6115 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
6116 return rte_flow_error_set(error, EINVAL,
6117 RTE_FLOW_ERROR_TYPE_ACTION,
6119 "no fate action is found");
6121 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
6122 return rte_flow_error_set(error, EINVAL,
6123 RTE_FLOW_ERROR_TYPE_ACTION,
6125 "no fate action is found");
6128 * Continue validation for Xcap and VLAN actions.
6129 * If hairpin is working in explicit TX rule mode, there is no actions
6130 * splitting and the validation of hairpin ingress flow should be the
6131 * same as other standard flows.
6133 if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
6134 MLX5_FLOW_VLAN_ACTIONS)) &&
6135 (queue_index == 0xFFFF ||
6136 mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
6137 ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
6138 conf->tx_explicit != 0))) {
6139 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
6140 MLX5_FLOW_XCAP_ACTIONS)
6141 return rte_flow_error_set(error, ENOTSUP,
6142 RTE_FLOW_ERROR_TYPE_ACTION,
6143 NULL, "encap and decap "
6144 "combination aren't supported");
6145 if (!attr->transfer && attr->ingress) {
6146 if (action_flags & MLX5_FLOW_ACTION_ENCAP)
6147 return rte_flow_error_set
6149 RTE_FLOW_ERROR_TYPE_ACTION,
6150 NULL, "encap is not supported"
6151 " for ingress traffic");
6152 else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
6153 return rte_flow_error_set
6155 RTE_FLOW_ERROR_TYPE_ACTION,
6156 NULL, "push VLAN action not "
6157 "supported for ingress");
6158 else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
6159 MLX5_FLOW_VLAN_ACTIONS)
6160 return rte_flow_error_set
6162 RTE_FLOW_ERROR_TYPE_ACTION,
6163 NULL, "no support for "
6164 "multiple VLAN actions");
6168 * Hairpin flow will add one more TAG action in TX implicit mode.
6169 * In TX explicit mode, there will be no hairpin flow ID.
6172 rw_act_num += MLX5_ACT_NUM_SET_TAG;
6173 /* extra metadata enabled: one more TAG action will be add. */
6174 if (dev_conf->dv_flow_en &&
6175 dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
6176 mlx5_flow_ext_mreg_supported(dev))
6177 rw_act_num += MLX5_ACT_NUM_SET_TAG;
6178 if ((uint32_t)rw_act_num >
6179 flow_dv_modify_hdr_action_max(dev, is_root)) {
6180 return rte_flow_error_set(error, ENOTSUP,
6181 RTE_FLOW_ERROR_TYPE_ACTION,
6182 NULL, "too many header modify"
6183 " actions to support");
6189 * Internal preparation function. Allocates the DV flow size,
6190 * this size is constant.
6193 * Pointer to the rte_eth_dev structure.
6195 * Pointer to the flow attributes.
6197 * Pointer to the list of items.
6198 * @param[in] actions
6199 * Pointer to the list of actions.
6201 * Pointer to the error structure.
6204 * Pointer to mlx5_flow object on success,
6205 * otherwise NULL and rte_errno is set.
6207 static struct mlx5_flow *
6208 flow_dv_prepare(struct rte_eth_dev *dev,
6209 const struct rte_flow_attr *attr __rte_unused,
6210 const struct rte_flow_item items[] __rte_unused,
6211 const struct rte_flow_action actions[] __rte_unused,
6212 struct rte_flow_error *error)
6214 uint32_t handle_idx = 0;
6215 struct mlx5_flow *dev_flow;
6216 struct mlx5_flow_handle *dev_handle;
6217 struct mlx5_priv *priv = dev->data->dev_private;
6218 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
6221 /* In case of corrupting the memory. */
6222 if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
6223 rte_flow_error_set(error, ENOSPC,
6224 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6225 "not free temporary device flow");
6228 dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
6231 rte_flow_error_set(error, ENOMEM,
6232 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6233 "not enough memory to create flow handle");
6236 MLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows));
6237 dev_flow = &wks->flows[wks->flow_idx++];
6238 dev_flow->handle = dev_handle;
6239 dev_flow->handle_idx = handle_idx;
6241 * In some old rdma-core releases, before continuing, a check of the
6242 * length of matching parameter will be done at first. It needs to use
6243 * the length without misc4 param. If the flow has misc4 support, then
6244 * the length needs to be adjusted accordingly. Each param member is
6245 * aligned with a 64B boundary naturally.
6247 dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
6248 MLX5_ST_SZ_BYTES(fte_match_set_misc4);
6250 * The matching value needs to be cleared to 0 before using. In the
6251 * past, it will be automatically cleared when using rte_*alloc
6252 * API. The time consumption will be almost the same as before.
6254 memset(dev_flow->dv.value.buf, 0, MLX5_ST_SZ_BYTES(fte_match_param));
6255 dev_flow->ingress = attr->ingress;
6256 dev_flow->dv.transfer = attr->transfer;
6260 #ifdef RTE_LIBRTE_MLX5_DEBUG
6262 * Sanity check for match mask and value. Similar to check_valid_spec() in
6263 * kernel driver. If unmasked bit is present in value, it returns failure.
6266 * pointer to match mask buffer.
6267 * @param match_value
6268 * pointer to match value buffer.
6271 * 0 if valid, -EINVAL otherwise.
6274 flow_dv_check_valid_spec(void *match_mask, void *match_value)
6276 uint8_t *m = match_mask;
6277 uint8_t *v = match_value;
6280 for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
6283 "match_value differs from match_criteria"
6284 " %p[%u] != %p[%u]",
6285 match_value, i, match_mask, i);
6294 * Add match of ip_version.
6298 * @param[in] headers_v
6299 * Values header pointer.
6300 * @param[in] headers_m
6301 * Masks header pointer.
6302 * @param[in] ip_version
6303 * The IP version to set.
6306 flow_dv_set_match_ip_version(uint32_t group,
6312 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
6314 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
6316 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
6317 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
6318 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
6322 * Add Ethernet item to matcher and to the value.
6324 * @param[in, out] matcher
6326 * @param[in, out] key
6327 * Flow matcher value.
6329 * Flow pattern to translate.
6331 * Item is inner pattern.
6334 flow_dv_translate_item_eth(void *matcher, void *key,
6335 const struct rte_flow_item *item, int inner,
6338 const struct rte_flow_item_eth *eth_m = item->mask;
6339 const struct rte_flow_item_eth *eth_v = item->spec;
6340 const struct rte_flow_item_eth nic_mask = {
6341 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
6342 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
6343 .type = RTE_BE16(0xffff),
6356 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6358 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6360 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6362 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6364 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
6365 ð_m->dst, sizeof(eth_m->dst));
6366 /* The value must be in the range of the mask. */
6367 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
6368 for (i = 0; i < sizeof(eth_m->dst); ++i)
6369 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
6370 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
6371 ð_m->src, sizeof(eth_m->src));
6372 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
6373 /* The value must be in the range of the mask. */
6374 for (i = 0; i < sizeof(eth_m->dst); ++i)
6375 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
6377 * HW supports match on one Ethertype, the Ethertype following the last
6378 * VLAN tag of the packet (see PRM).
6379 * Set match on ethertype only if ETH header is not followed by VLAN.
6380 * HW is optimized for IPv4/IPv6. In such cases, avoid setting
6381 * ethertype, and use ip_version field instead.
6382 * eCPRI over Ether layer will use type value 0xAEFE.
6384 if (eth_m->type == 0xFFFF) {
6385 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
6386 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6387 switch (eth_v->type) {
6388 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
6389 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6391 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
6392 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6393 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6395 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
6396 flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
6398 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
6399 flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
6405 if (eth_m->has_vlan) {
6406 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6407 if (eth_v->has_vlan) {
6409 * Here, when also has_more_vlan field in VLAN item is
6410 * not set, only single-tagged packets will be matched.
6412 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6416 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
6417 rte_be_to_cpu_16(eth_m->type));
6418 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
6419 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
6423 * Add VLAN item to matcher and to the value.
6425 * @param[in, out] dev_flow
6427 * @param[in, out] matcher
6429 * @param[in, out] key
6430 * Flow matcher value.
6432 * Flow pattern to translate.
6434 * Item is inner pattern.
6437 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
6438 void *matcher, void *key,
6439 const struct rte_flow_item *item,
6440 int inner, uint32_t group)
6442 const struct rte_flow_item_vlan *vlan_m = item->mask;
6443 const struct rte_flow_item_vlan *vlan_v = item->spec;
6450 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6452 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6454 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6456 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6458 * This is workaround, masks are not supported,
6459 * and pre-validated.
6462 dev_flow->handle->vf_vlan.tag =
6463 rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
6466 * When VLAN item exists in flow, mark packet as tagged,
6467 * even if TCI is not specified.
6469 if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
6470 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6471 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6476 vlan_m = &rte_flow_item_vlan_mask;
6477 tci_m = rte_be_to_cpu_16(vlan_m->tci);
6478 tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
6479 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
6480 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
6481 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
6482 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
6483 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
6484 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
6486 * HW is optimized for IPv4/IPv6. In such cases, avoid setting
6487 * ethertype, and use ip_version field instead.
6489 if (vlan_m->inner_type == 0xFFFF) {
6490 switch (vlan_v->inner_type) {
6491 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
6492 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6493 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6494 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
6496 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
6497 flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
6499 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
6500 flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
6506 if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
6507 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6508 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6509 /* Only one vlan_tag bit can be set. */
6510 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
6513 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
6514 rte_be_to_cpu_16(vlan_m->inner_type));
6515 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
6516 rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
6520 * Add IPV4 item to matcher and to the value.
6522 * @param[in, out] matcher
6524 * @param[in, out] key
6525 * Flow matcher value.
6527 * Flow pattern to translate.
6529 * Item is inner pattern.
6531 * The group to insert the rule.
6534 flow_dv_translate_item_ipv4(void *matcher, void *key,
6535 const struct rte_flow_item *item,
6536 int inner, uint32_t group)
6538 const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
6539 const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
6540 const struct rte_flow_item_ipv4 nic_mask = {
6542 .src_addr = RTE_BE32(0xffffffff),
6543 .dst_addr = RTE_BE32(0xffffffff),
6544 .type_of_service = 0xff,
6545 .next_proto_id = 0xff,
6546 .time_to_live = 0xff,
6556 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6558 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6560 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6562 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6564 flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
6569 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6570 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6571 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6572 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6573 *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
6574 *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
6575 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6576 src_ipv4_src_ipv6.ipv4_layout.ipv4);
6577 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6578 src_ipv4_src_ipv6.ipv4_layout.ipv4);
6579 *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
6580 *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
6581 tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
6582 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
6583 ipv4_m->hdr.type_of_service);
6584 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
6585 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
6586 ipv4_m->hdr.type_of_service >> 2);
6587 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
6588 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6589 ipv4_m->hdr.next_proto_id);
6590 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6591 ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
6592 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6593 ipv4_m->hdr.time_to_live);
6594 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6595 ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
6596 MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
6597 !!(ipv4_m->hdr.fragment_offset));
6598 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
6599 !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
6603 * Add IPV6 item to matcher and to the value.
6605 * @param[in, out] matcher
6607 * @param[in, out] key
6608 * Flow matcher value.
6610 * Flow pattern to translate.
6612 * Item is inner pattern.
6614 * The group to insert the rule.
6617 flow_dv_translate_item_ipv6(void *matcher, void *key,
6618 const struct rte_flow_item *item,
6619 int inner, uint32_t group)
6621 const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
6622 const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
6623 const struct rte_flow_item_ipv6 nic_mask = {
6626 "\xff\xff\xff\xff\xff\xff\xff\xff"
6627 "\xff\xff\xff\xff\xff\xff\xff\xff",
6629 "\xff\xff\xff\xff\xff\xff\xff\xff"
6630 "\xff\xff\xff\xff\xff\xff\xff\xff",
6631 .vtc_flow = RTE_BE32(0xffffffff),
6638 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6639 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6648 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6650 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6652 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6654 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6656 flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
6661 size = sizeof(ipv6_m->hdr.dst_addr);
6662 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6663 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6664 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6665 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6666 memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
6667 for (i = 0; i < size; ++i)
6668 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
6669 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6670 src_ipv4_src_ipv6.ipv6_layout.ipv6);
6671 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6672 src_ipv4_src_ipv6.ipv6_layout.ipv6);
6673 memcpy(l24_m, ipv6_m->hdr.src_addr, size);
6674 for (i = 0; i < size; ++i)
6675 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
6677 vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
6678 vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
6679 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
6680 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
6681 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
6682 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
6685 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
6687 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
6690 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
6692 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
6696 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6698 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6699 ipv6_v->hdr.proto & ipv6_m->hdr.proto);
6701 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6702 ipv6_m->hdr.hop_limits);
6703 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6704 ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
6705 MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
6706 !!(ipv6_m->has_frag_ext));
6707 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
6708 !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
6712 * Add IPV6 fragment extension item to matcher and to the value.
6714 * @param[in, out] matcher
6716 * @param[in, out] key
6717 * Flow matcher value.
6719 * Flow pattern to translate.
6721 * Item is inner pattern.
6724 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
6725 const struct rte_flow_item *item,
6728 const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
6729 const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
6730 const struct rte_flow_item_ipv6_frag_ext nic_mask = {
6732 .next_header = 0xff,
6733 .frag_data = RTE_BE16(0xffff),
6740 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6742 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6744 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6746 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6748 /* IPv6 fragment extension item exists, so packet is IP fragment. */
6749 MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
6750 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
6751 if (!ipv6_frag_ext_v)
6753 if (!ipv6_frag_ext_m)
6754 ipv6_frag_ext_m = &nic_mask;
6755 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6756 ipv6_frag_ext_m->hdr.next_header);
6757 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6758 ipv6_frag_ext_v->hdr.next_header &
6759 ipv6_frag_ext_m->hdr.next_header);
6763 * Add TCP item to matcher and to the value.
6765 * @param[in, out] matcher
6767 * @param[in, out] key
6768 * Flow matcher value.
6770 * Flow pattern to translate.
6772 * Item is inner pattern.
6775 flow_dv_translate_item_tcp(void *matcher, void *key,
6776 const struct rte_flow_item *item,
6779 const struct rte_flow_item_tcp *tcp_m = item->mask;
6780 const struct rte_flow_item_tcp *tcp_v = item->spec;
6785 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6787 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6789 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6791 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6793 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6794 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
6798 tcp_m = &rte_flow_item_tcp_mask;
6799 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
6800 rte_be_to_cpu_16(tcp_m->hdr.src_port));
6801 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
6802 rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
6803 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
6804 rte_be_to_cpu_16(tcp_m->hdr.dst_port));
6805 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
6806 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
6807 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
6808 tcp_m->hdr.tcp_flags);
6809 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
6810 (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
6814 * Add UDP item to matcher and to the value.
6816 * @param[in, out] matcher
6818 * @param[in, out] key
6819 * Flow matcher value.
6821 * Flow pattern to translate.
6823 * Item is inner pattern.
6826 flow_dv_translate_item_udp(void *matcher, void *key,
6827 const struct rte_flow_item *item,
6830 const struct rte_flow_item_udp *udp_m = item->mask;
6831 const struct rte_flow_item_udp *udp_v = item->spec;
6836 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6838 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6840 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6842 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6844 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6845 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
6849 udp_m = &rte_flow_item_udp_mask;
6850 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
6851 rte_be_to_cpu_16(udp_m->hdr.src_port));
6852 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
6853 rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
6854 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
6855 rte_be_to_cpu_16(udp_m->hdr.dst_port));
6856 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
6857 rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
6861 * Add GRE optional Key item to matcher and to the value.
6863 * @param[in, out] matcher
6865 * @param[in, out] key
6866 * Flow matcher value.
6868 * Flow pattern to translate.
6870 * Item is inner pattern.
6873 flow_dv_translate_item_gre_key(void *matcher, void *key,
6874 const struct rte_flow_item *item)
6876 const rte_be32_t *key_m = item->mask;
6877 const rte_be32_t *key_v = item->spec;
6878 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6879 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6880 rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
6882 /* GRE K bit must be on and should already be validated */
6883 MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
6884 MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
6888 key_m = &gre_key_default_mask;
6889 MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
6890 rte_be_to_cpu_32(*key_m) >> 8);
6891 MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
6892 rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
6893 MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
6894 rte_be_to_cpu_32(*key_m) & 0xFF);
6895 MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
6896 rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
6900 * Add GRE item to matcher and to the value.
6902 * @param[in, out] matcher
6904 * @param[in, out] key
6905 * Flow matcher value.
6907 * Flow pattern to translate.
6909 * Item is inner pattern.
6912 flow_dv_translate_item_gre(void *matcher, void *key,
6913 const struct rte_flow_item *item,
6916 const struct rte_flow_item_gre *gre_m = item->mask;
6917 const struct rte_flow_item_gre *gre_v = item->spec;
6920 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6921 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6928 uint16_t s_present:1;
6929 uint16_t k_present:1;
6930 uint16_t rsvd_bit1:1;
6931 uint16_t c_present:1;
6935 } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
6938 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6940 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6942 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6944 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6946 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6947 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
6951 gre_m = &rte_flow_item_gre_mask;
6952 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
6953 rte_be_to_cpu_16(gre_m->protocol));
6954 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
6955 rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
6956 gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
6957 gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
6958 MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
6959 gre_crks_rsvd0_ver_m.c_present);
6960 MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
6961 gre_crks_rsvd0_ver_v.c_present &
6962 gre_crks_rsvd0_ver_m.c_present);
6963 MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
6964 gre_crks_rsvd0_ver_m.k_present);
6965 MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
6966 gre_crks_rsvd0_ver_v.k_present &
6967 gre_crks_rsvd0_ver_m.k_present);
6968 MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
6969 gre_crks_rsvd0_ver_m.s_present);
6970 MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
6971 gre_crks_rsvd0_ver_v.s_present &
6972 gre_crks_rsvd0_ver_m.s_present);
6976 * Add NVGRE item to matcher and to the value.
6978 * @param[in, out] matcher
6980 * @param[in, out] key
6981 * Flow matcher value.
6983 * Flow pattern to translate.
6985 * Item is inner pattern.
6988 flow_dv_translate_item_nvgre(void *matcher, void *key,
6989 const struct rte_flow_item *item,
6992 const struct rte_flow_item_nvgre *nvgre_m = item->mask;
6993 const struct rte_flow_item_nvgre *nvgre_v = item->spec;
6994 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6995 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6996 const char *tni_flow_id_m;
6997 const char *tni_flow_id_v;
7003 /* For NVGRE, GRE header fields must be set with defined values. */
7004 const struct rte_flow_item_gre gre_spec = {
7005 .c_rsvd0_ver = RTE_BE16(0x2000),
7006 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
7008 const struct rte_flow_item_gre gre_mask = {
7009 .c_rsvd0_ver = RTE_BE16(0xB000),
7010 .protocol = RTE_BE16(UINT16_MAX),
7012 const struct rte_flow_item gre_item = {
7017 flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
7021 nvgre_m = &rte_flow_item_nvgre_mask;
7022 tni_flow_id_m = (const char *)nvgre_m->tni;
7023 tni_flow_id_v = (const char *)nvgre_v->tni;
7024 size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
7025 gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
7026 gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
7027 memcpy(gre_key_m, tni_flow_id_m, size);
7028 for (i = 0; i < size; ++i)
7029 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
7033 * Add VXLAN item to matcher and to the value.
7035 * @param[in, out] matcher
7037 * @param[in, out] key
7038 * Flow matcher value.
7040 * Flow pattern to translate.
7042 * Item is inner pattern.
7045 flow_dv_translate_item_vxlan(void *matcher, void *key,
7046 const struct rte_flow_item *item,
7049 const struct rte_flow_item_vxlan *vxlan_m = item->mask;
7050 const struct rte_flow_item_vxlan *vxlan_v = item->spec;
7053 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7054 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7062 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7064 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7066 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7068 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7070 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
7071 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
7072 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7073 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7074 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7079 vxlan_m = &rte_flow_item_vxlan_mask;
7080 size = sizeof(vxlan_m->vni);
7081 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
7082 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
7083 memcpy(vni_m, vxlan_m->vni, size);
7084 for (i = 0; i < size; ++i)
7085 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
7089 * Add VXLAN-GPE item to matcher and to the value.
7091 * @param[in, out] matcher
7093 * @param[in, out] key
7094 * Flow matcher value.
7096 * Flow pattern to translate.
7098 * Item is inner pattern.
7102 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
7103 const struct rte_flow_item *item, int inner)
7105 const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
7106 const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
7110 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
7112 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7118 uint8_t flags_m = 0xff;
7119 uint8_t flags_v = 0xc;
7122 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7124 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7126 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7128 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7130 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
7131 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
7132 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7133 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7134 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7139 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
7140 size = sizeof(vxlan_m->vni);
7141 vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
7142 vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
7143 memcpy(vni_m, vxlan_m->vni, size);
7144 for (i = 0; i < size; ++i)
7145 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
7146 if (vxlan_m->flags) {
7147 flags_m = vxlan_m->flags;
7148 flags_v = vxlan_v->flags;
7150 MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
7151 MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
7152 MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
7154 MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
7159 * Add Geneve item to matcher and to the value.
7161 * @param[in, out] matcher
7163 * @param[in, out] key
7164 * Flow matcher value.
7166 * Flow pattern to translate.
7168 * Item is inner pattern.
7172 flow_dv_translate_item_geneve(void *matcher, void *key,
7173 const struct rte_flow_item *item, int inner)
7175 const struct rte_flow_item_geneve *geneve_m = item->mask;
7176 const struct rte_flow_item_geneve *geneve_v = item->spec;
7179 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7180 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7189 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7191 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7193 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7195 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7197 dport = MLX5_UDP_PORT_GENEVE;
7198 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7199 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7200 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7205 geneve_m = &rte_flow_item_geneve_mask;
7206 size = sizeof(geneve_m->vni);
7207 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
7208 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
7209 memcpy(vni_m, geneve_m->vni, size);
7210 for (i = 0; i < size; ++i)
7211 vni_v[i] = vni_m[i] & geneve_v->vni[i];
7212 MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
7213 rte_be_to_cpu_16(geneve_m->protocol));
7214 MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
7215 rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
7216 gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
7217 gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
7218 MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
7219 MLX5_GENEVE_OAMF_VAL(gbhdr_m));
7220 MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
7221 MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
7222 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
7223 MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
7224 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
7225 MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
7226 MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
7230 * Add MPLS item to matcher and to the value.
7232 * @param[in, out] matcher
7234 * @param[in, out] key
7235 * Flow matcher value.
7237 * Flow pattern to translate.
7238 * @param[in] prev_layer
7239 * The protocol layer indicated in previous item.
7241 * Item is inner pattern.
7244 flow_dv_translate_item_mpls(void *matcher, void *key,
7245 const struct rte_flow_item *item,
7246 uint64_t prev_layer,
7249 const uint32_t *in_mpls_m = item->mask;
7250 const uint32_t *in_mpls_v = item->spec;
7251 uint32_t *out_mpls_m = 0;
7252 uint32_t *out_mpls_v = 0;
7253 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7254 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7255 void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
7257 void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
7258 void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
7259 void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7261 switch (prev_layer) {
7262 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
7263 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
7264 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
7265 MLX5_UDP_PORT_MPLS);
7267 case MLX5_FLOW_LAYER_GRE:
7268 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
7269 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
7270 RTE_ETHER_TYPE_MPLS);
7273 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
7274 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
7281 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
7282 switch (prev_layer) {
7283 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
7285 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
7286 outer_first_mpls_over_udp);
7288 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
7289 outer_first_mpls_over_udp);
7291 case MLX5_FLOW_LAYER_GRE:
7293 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
7294 outer_first_mpls_over_gre);
7296 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
7297 outer_first_mpls_over_gre);
7300 /* Inner MPLS not over GRE is not supported. */
7303 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
7307 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
7313 if (out_mpls_m && out_mpls_v) {
7314 *out_mpls_m = *in_mpls_m;
7315 *out_mpls_v = *in_mpls_v & *in_mpls_m;
7320 * Add metadata register item to matcher
7322 * @param[in, out] matcher
7324 * @param[in, out] key
7325 * Flow matcher value.
7326 * @param[in] reg_type
7327 * Type of device metadata register
7334 flow_dv_match_meta_reg(void *matcher, void *key,
7335 enum modify_reg reg_type,
7336 uint32_t data, uint32_t mask)
7339 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
7341 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
7347 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
7348 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
7351 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
7352 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
7356 * The metadata register C0 field might be divided into
7357 * source vport index and META item value, we should set
7358 * this field according to specified mask, not as whole one.
7360 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
7362 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
7363 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
7366 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
7369 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
7370 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
7373 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
7374 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
7377 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
7378 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
7381 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
7382 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
7385 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
7386 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
7389 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
7390 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
7393 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
7394 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
7403 * Add MARK item to matcher
7406 * The device to configure through.
7407 * @param[in, out] matcher
7409 * @param[in, out] key
7410 * Flow matcher value.
7412 * Flow pattern to translate.
7415 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
7416 void *matcher, void *key,
7417 const struct rte_flow_item *item)
7419 struct mlx5_priv *priv = dev->data->dev_private;
7420 const struct rte_flow_item_mark *mark;
7424 mark = item->mask ? (const void *)item->mask :
7425 &rte_flow_item_mark_mask;
7426 mask = mark->id & priv->sh->dv_mark_mask;
7427 mark = (const void *)item->spec;
7429 value = mark->id & priv->sh->dv_mark_mask & mask;
7431 enum modify_reg reg;
7433 /* Get the metadata register index for the mark. */
7434 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
7435 MLX5_ASSERT(reg > 0);
7436 if (reg == REG_C_0) {
7437 struct mlx5_priv *priv = dev->data->dev_private;
7438 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7439 uint32_t shl_c0 = rte_bsf32(msk_c0);
7445 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
7450 * Add META item to matcher
7453 * The devich to configure through.
7454 * @param[in, out] matcher
7456 * @param[in, out] key
7457 * Flow matcher value.
7459 * Attributes of flow that includes this item.
7461 * Flow pattern to translate.
7464 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
7465 void *matcher, void *key,
7466 const struct rte_flow_attr *attr,
7467 const struct rte_flow_item *item)
7469 const struct rte_flow_item_meta *meta_m;
7470 const struct rte_flow_item_meta *meta_v;
7472 meta_m = (const void *)item->mask;
7474 meta_m = &rte_flow_item_meta_mask;
7475 meta_v = (const void *)item->spec;
7478 uint32_t value = meta_v->data;
7479 uint32_t mask = meta_m->data;
7481 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
7485 * In datapath code there is no endianness
7486 * coversions for perfromance reasons, all
7487 * pattern conversions are done in rte_flow.
7489 value = rte_cpu_to_be_32(value);
7490 mask = rte_cpu_to_be_32(mask);
7491 if (reg == REG_C_0) {
7492 struct mlx5_priv *priv = dev->data->dev_private;
7493 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7494 uint32_t shl_c0 = rte_bsf32(msk_c0);
7495 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
7496 uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
7503 MLX5_ASSERT(msk_c0);
7504 MLX5_ASSERT(!(~msk_c0 & mask));
7506 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
7511 * Add vport metadata Reg C0 item to matcher
7513 * @param[in, out] matcher
7515 * @param[in, out] key
7516 * Flow matcher value.
7518 * Flow pattern to translate.
7521 flow_dv_translate_item_meta_vport(void *matcher, void *key,
7522 uint32_t value, uint32_t mask)
7524 flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
7528 * Add tag item to matcher
7531 * The devich to configure through.
7532 * @param[in, out] matcher
7534 * @param[in, out] key
7535 * Flow matcher value.
7537 * Flow pattern to translate.
7540 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
7541 void *matcher, void *key,
7542 const struct rte_flow_item *item)
7544 const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
7545 const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
7546 uint32_t mask, value;
7549 value = tag_v->data;
7550 mask = tag_m ? tag_m->data : UINT32_MAX;
7551 if (tag_v->id == REG_C_0) {
7552 struct mlx5_priv *priv = dev->data->dev_private;
7553 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7554 uint32_t shl_c0 = rte_bsf32(msk_c0);
7560 flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
7564 * Add TAG item to matcher
7567 * The devich to configure through.
7568 * @param[in, out] matcher
7570 * @param[in, out] key
7571 * Flow matcher value.
7573 * Flow pattern to translate.
7576 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
7577 void *matcher, void *key,
7578 const struct rte_flow_item *item)
7580 const struct rte_flow_item_tag *tag_v = item->spec;
7581 const struct rte_flow_item_tag *tag_m = item->mask;
7582 enum modify_reg reg;
7585 tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
7586 /* Get the metadata register index for the tag. */
7587 reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
7588 MLX5_ASSERT(reg > 0);
7589 flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
7593 * Add source vport match to the specified matcher.
7595 * @param[in, out] matcher
7597 * @param[in, out] key
7598 * Flow matcher value.
7600 * Source vport value to match
7605 flow_dv_translate_item_source_vport(void *matcher, void *key,
7606 int16_t port, uint16_t mask)
7608 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7609 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7611 MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
7612 MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
7616 * Translate port-id item to eswitch match on port-id.
7619 * The devich to configure through.
7620 * @param[in, out] matcher
7622 * @param[in, out] key
7623 * Flow matcher value.
7625 * Flow pattern to translate.
7628 * 0 on success, a negative errno value otherwise.
7631 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
7632 void *key, const struct rte_flow_item *item)
7634 const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
7635 const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
7636 struct mlx5_priv *priv;
7639 mask = pid_m ? pid_m->id : 0xffff;
7640 id = pid_v ? pid_v->id : dev->data->port_id;
7641 priv = mlx5_port_to_eswitch_info(id, item == NULL);
7644 /* Translate to vport field or to metadata, depending on mode. */
7645 if (priv->vport_meta_mask)
7646 flow_dv_translate_item_meta_vport(matcher, key,
7647 priv->vport_meta_tag,
7648 priv->vport_meta_mask);
7650 flow_dv_translate_item_source_vport(matcher, key,
7651 priv->vport_id, mask);
7656 * Add ICMP6 item to matcher and to the value.
7658 * @param[in, out] matcher
7660 * @param[in, out] key
7661 * Flow matcher value.
7663 * Flow pattern to translate.
7665 * Item is inner pattern.
7668 flow_dv_translate_item_icmp6(void *matcher, void *key,
7669 const struct rte_flow_item *item,
7672 const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
7673 const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
7676 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7678 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7680 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7682 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7684 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7686 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7688 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
7689 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
7693 icmp6_m = &rte_flow_item_icmp6_mask;
7694 MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
7695 MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
7696 icmp6_v->type & icmp6_m->type);
7697 MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
7698 MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
7699 icmp6_v->code & icmp6_m->code);
7703 * Add ICMP item to matcher and to the value.
7705 * @param[in, out] matcher
7707 * @param[in, out] key
7708 * Flow matcher value.
7710 * Flow pattern to translate.
7712 * Item is inner pattern.
7715 flow_dv_translate_item_icmp(void *matcher, void *key,
7716 const struct rte_flow_item *item,
7719 const struct rte_flow_item_icmp *icmp_m = item->mask;
7720 const struct rte_flow_item_icmp *icmp_v = item->spec;
7721 uint32_t icmp_header_data_m = 0;
7722 uint32_t icmp_header_data_v = 0;
7725 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7727 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7729 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7731 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7733 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7735 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7737 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
7738 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
7742 icmp_m = &rte_flow_item_icmp_mask;
7743 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
7744 icmp_m->hdr.icmp_type);
7745 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
7746 icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
7747 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
7748 icmp_m->hdr.icmp_code);
7749 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
7750 icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
7751 icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
7752 icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
7753 if (icmp_header_data_m) {
7754 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
7755 icmp_header_data_v |=
7756 rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
7757 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
7758 icmp_header_data_m);
7759 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
7760 icmp_header_data_v & icmp_header_data_m);
7765 * Add GTP item to matcher and to the value.
7767 * @param[in, out] matcher
7769 * @param[in, out] key
7770 * Flow matcher value.
7772 * Flow pattern to translate.
7774 * Item is inner pattern.
7777 flow_dv_translate_item_gtp(void *matcher, void *key,
7778 const struct rte_flow_item *item, int inner)
7780 const struct rte_flow_item_gtp *gtp_m = item->mask;
7781 const struct rte_flow_item_gtp *gtp_v = item->spec;
7784 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7786 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7787 uint16_t dport = RTE_GTPU_UDP_PORT;
7790 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7792 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7794 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7796 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7798 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7799 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7800 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7805 gtp_m = &rte_flow_item_gtp_mask;
7806 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
7807 gtp_m->v_pt_rsv_flags);
7808 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
7809 gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
7810 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
7811 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
7812 gtp_v->msg_type & gtp_m->msg_type);
7813 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
7814 rte_be_to_cpu_32(gtp_m->teid));
7815 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
7816 rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
7820 * Add eCPRI item to matcher and to the value.
7823 * The devich to configure through.
7824 * @param[in, out] matcher
7826 * @param[in, out] key
7827 * Flow matcher value.
7829 * Flow pattern to translate.
7830 * @param[in] samples
7831 * Sample IDs to be used in the matching.
7834 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
7835 void *key, const struct rte_flow_item *item)
7837 struct mlx5_priv *priv = dev->data->dev_private;
7838 const struct rte_flow_item_ecpri *ecpri_m = item->mask;
7839 const struct rte_flow_item_ecpri *ecpri_v = item->spec;
7840 void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
7842 void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
7850 ecpri_m = &rte_flow_item_ecpri_mask;
7852 * Maximal four DW samples are supported in a single matching now.
7853 * Two are used now for a eCPRI matching:
7854 * 1. Type: one byte, mask should be 0x00ff0000 in network order
7855 * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
7858 if (!ecpri_m->hdr.common.u32)
7860 samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
7861 /* Need to take the whole DW as the mask to fill the entry. */
7862 dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
7863 prog_sample_field_value_0);
7864 dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
7865 prog_sample_field_value_0);
7866 /* Already big endian (network order) in the header. */
7867 *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
7868 *(uint32_t *)dw_v = ecpri_v->hdr.common.u32;
7869 /* Sample#0, used for matching type, offset 0. */
7870 MLX5_SET(fte_match_set_misc4, misc4_m,
7871 prog_sample_field_id_0, samples[0]);
7872 /* It makes no sense to set the sample ID in the mask field. */
7873 MLX5_SET(fte_match_set_misc4, misc4_v,
7874 prog_sample_field_id_0, samples[0]);
7876 * Checking if message body part needs to be matched.
7877 * Some wildcard rules only matching type field should be supported.
7879 if (ecpri_m->hdr.dummy[0]) {
7880 switch (ecpri_v->hdr.common.type) {
7881 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
7882 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
7883 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
7884 dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
7885 prog_sample_field_value_1);
7886 dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
7887 prog_sample_field_value_1);
7888 *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
7889 *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0];
7890 /* Sample#1, to match message body, offset 4. */
7891 MLX5_SET(fte_match_set_misc4, misc4_m,
7892 prog_sample_field_id_1, samples[1]);
7893 MLX5_SET(fte_match_set_misc4, misc4_v,
7894 prog_sample_field_id_1, samples[1]);
7897 /* Others, do not match any sample ID. */
7903 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
7905 #define HEADER_IS_ZERO(match_criteria, headers) \
7906 !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
7907 matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
7910 * Calculate flow matcher enable bitmap.
7912 * @param match_criteria
7913 * Pointer to flow matcher criteria.
7916 * Bitmap of enabled fields.
7919 flow_dv_matcher_enable(uint32_t *match_criteria)
7921 uint8_t match_criteria_enable;
7923 match_criteria_enable =
7924 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
7925 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
7926 match_criteria_enable |=
7927 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
7928 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
7929 match_criteria_enable |=
7930 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
7931 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
7932 match_criteria_enable |=
7933 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
7934 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
7935 match_criteria_enable |=
7936 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
7937 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
7938 match_criteria_enable |=
7939 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
7940 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
7941 return match_criteria_enable;
7944 struct mlx5_hlist_entry *
7945 flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
7947 struct mlx5_dev_ctx_shared *sh = list->ctx;
7948 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
7949 struct rte_eth_dev *dev = ctx->dev;
7950 struct mlx5_flow_tbl_data_entry *tbl_data;
7951 struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data;
7952 struct rte_flow_error *error = ctx->error;
7953 union mlx5_flow_tbl_key key = { .v64 = key64 };
7954 struct mlx5_flow_tbl_resource *tbl;
7959 tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
7961 rte_flow_error_set(error, ENOMEM,
7962 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7964 "cannot allocate flow table data entry");
7967 tbl_data->idx = idx;
7968 tbl_data->tunnel = tt_prm->tunnel;
7969 tbl_data->group_id = tt_prm->group_id;
7970 tbl_data->external = tt_prm->external;
7971 tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
7972 tbl_data->is_egress = !!key.direction;
7973 tbl = &tbl_data->tbl;
7975 return &tbl_data->entry;
7977 domain = sh->fdb_domain;
7978 else if (key.direction)
7979 domain = sh->tx_domain;
7981 domain = sh->rx_domain;
7982 ret = mlx5_flow_os_create_flow_tbl(domain, key.table_id, &tbl->obj);
7984 rte_flow_error_set(error, ENOMEM,
7985 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7986 NULL, "cannot create flow table object");
7987 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
7991 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
7992 (tbl->obj, &tbl_data->jump.action);
7994 rte_flow_error_set(error, ENOMEM,
7995 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7997 "cannot create flow jump action");
7998 mlx5_flow_os_destroy_flow_tbl(tbl->obj);
7999 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
8003 MKSTR(matcher_name, "%s_%s_%u_matcher_cache",
8004 key.domain ? "FDB" : "NIC", key.direction ? "egress" : "ingress",
8006 mlx5_cache_list_init(&tbl_data->matchers, matcher_name, 0, sh,
8007 flow_dv_matcher_create_cb,
8008 flow_dv_matcher_match_cb,
8009 flow_dv_matcher_remove_cb);
8010 return &tbl_data->entry;
8016 * @param[in, out] dev
8017 * Pointer to rte_eth_dev structure.
8018 * @param[in] table_id
8021 * Direction of the table.
8022 * @param[in] transfer
8023 * E-Switch or NIC flow.
8025 * Dummy entry for dv API.
8027 * pointer to error structure.
8030 * Returns tables resource based on the index, NULL in case of failed.
8032 struct mlx5_flow_tbl_resource *
8033 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
8034 uint32_t table_id, uint8_t egress,
8037 const struct mlx5_flow_tunnel *tunnel,
8038 uint32_t group_id, uint8_t dummy,
8039 struct rte_flow_error *error)
8041 struct mlx5_priv *priv = dev->data->dev_private;
8042 union mlx5_flow_tbl_key table_key = {
8044 .table_id = table_id,
8046 .domain = !!transfer,
8047 .direction = !!egress,
8050 struct mlx5_flow_tbl_tunnel_prm tt_prm = {
8052 .group_id = group_id,
8053 .external = external,
8055 struct mlx5_flow_cb_ctx ctx = {
8060 struct mlx5_hlist_entry *entry;
8061 struct mlx5_flow_tbl_data_entry *tbl_data;
8063 entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
8065 rte_flow_error_set(error, ENOMEM,
8066 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8067 "cannot get table");
8070 tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
8071 return &tbl_data->tbl;
8075 flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
8076 struct mlx5_hlist_entry *entry)
8078 struct mlx5_dev_ctx_shared *sh = list->ctx;
8079 struct mlx5_flow_tbl_data_entry *tbl_data =
8080 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
8082 MLX5_ASSERT(entry && sh);
8083 if (tbl_data->jump.action)
8084 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
8085 if (tbl_data->tbl.obj)
8086 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
8087 if (tbl_data->tunnel_offload && tbl_data->external) {
8088 struct mlx5_hlist_entry *he;
8089 struct mlx5_hlist *tunnel_grp_hash;
8090 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
8091 union tunnel_tbl_key tunnel_key = {
8092 .tunnel_id = tbl_data->tunnel ?
8093 tbl_data->tunnel->tunnel_id : 0,
8094 .group = tbl_data->group_id
8096 union mlx5_flow_tbl_key table_key = {
8099 uint32_t table_id = table_key.table_id;
8101 tunnel_grp_hash = tbl_data->tunnel ?
8102 tbl_data->tunnel->groups :
8104 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL);
8106 mlx5_hlist_unregister(tunnel_grp_hash, he);
8108 "Table_id %#x tunnel %u group %u released.",
8111 tbl_data->tunnel->tunnel_id : 0,
8112 tbl_data->group_id);
8114 mlx5_cache_list_destroy(&tbl_data->matchers);
8115 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
8119 * Release a flow table.
8122 * Pointer to device shared structure.
8124 * Table resource to be released.
8127 * Returns 0 if table was released, else return 1;
8130 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
8131 struct mlx5_flow_tbl_resource *tbl)
8133 struct mlx5_flow_tbl_data_entry *tbl_data =
8134 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
8138 return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
8142 flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused,
8143 struct mlx5_cache_entry *entry, void *cb_ctx)
8145 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8146 struct mlx5_flow_dv_matcher *ref = ctx->data;
8147 struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
8150 return cur->crc != ref->crc ||
8151 cur->priority != ref->priority ||
8152 memcmp((const void *)cur->mask.buf,
8153 (const void *)ref->mask.buf, ref->mask.size);
8156 struct mlx5_cache_entry *
8157 flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
8158 struct mlx5_cache_entry *entry __rte_unused,
8161 struct mlx5_dev_ctx_shared *sh = list->ctx;
8162 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8163 struct mlx5_flow_dv_matcher *ref = ctx->data;
8164 struct mlx5_flow_dv_matcher *cache;
8165 struct mlx5dv_flow_matcher_attr dv_attr = {
8166 .type = IBV_FLOW_ATTR_NORMAL,
8167 .match_mask = (void *)&ref->mask,
8169 struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
8173 cache = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache), 0, SOCKET_ID_ANY);
8175 rte_flow_error_set(ctx->error, ENOMEM,
8176 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8177 "cannot create matcher");
8181 dv_attr.match_criteria_enable =
8182 flow_dv_matcher_enable(cache->mask.buf);
8183 dv_attr.priority = ref->priority;
8185 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
8186 ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
8187 &cache->matcher_object);
8190 rte_flow_error_set(ctx->error, ENOMEM,
8191 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8192 "cannot create matcher");
8195 return &cache->entry;
8199 * Register the flow matcher.
8201 * @param[in, out] dev
8202 * Pointer to rte_eth_dev structure.
8203 * @param[in, out] matcher
8204 * Pointer to flow matcher.
8205 * @param[in, out] key
8206 * Pointer to flow table key.
8207 * @parm[in, out] dev_flow
8208 * Pointer to the dev_flow.
8210 * pointer to error structure.
8213 * 0 on success otherwise -errno and errno is set.
8216 flow_dv_matcher_register(struct rte_eth_dev *dev,
8217 struct mlx5_flow_dv_matcher *ref,
8218 union mlx5_flow_tbl_key *key,
8219 struct mlx5_flow *dev_flow,
8220 struct rte_flow_error *error)
8222 struct mlx5_cache_entry *entry;
8223 struct mlx5_flow_dv_matcher *cache;
8224 struct mlx5_flow_tbl_resource *tbl;
8225 struct mlx5_flow_tbl_data_entry *tbl_data;
8226 struct mlx5_flow_cb_ctx ctx = {
8231 tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
8232 key->domain, false, NULL, 0, 0, error);
8234 return -rte_errno; /* No need to refill the error info */
8235 tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
8237 entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
8239 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
8240 return rte_flow_error_set(error, ENOMEM,
8241 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8242 "cannot allocate ref memory");
8244 cache = container_of(entry, typeof(*cache), entry);
8245 dev_flow->handle->dvh.matcher = cache;
8249 struct mlx5_hlist_entry *
8250 flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx)
8252 struct mlx5_dev_ctx_shared *sh = list->ctx;
8253 struct rte_flow_error *error = ctx;
8254 struct mlx5_flow_dv_tag_resource *entry;
8258 entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
8260 rte_flow_error_set(error, ENOMEM,
8261 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8262 "cannot allocate resource memory");
8266 ret = mlx5_flow_os_create_flow_action_tag(key,
8269 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
8270 rte_flow_error_set(error, ENOMEM,
8271 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8272 NULL, "cannot create action");
8275 return &entry->entry;
8279 * Find existing tag resource or create and register a new one.
8281 * @param dev[in, out]
8282 * Pointer to rte_eth_dev structure.
8283 * @param[in, out] tag_be24
8284 * Tag value in big endian then R-shift 8.
8285 * @parm[in, out] dev_flow
8286 * Pointer to the dev_flow.
8288 * pointer to error structure.
8291 * 0 on success otherwise -errno and errno is set.
8294 flow_dv_tag_resource_register
8295 (struct rte_eth_dev *dev,
8297 struct mlx5_flow *dev_flow,
8298 struct rte_flow_error *error)
8300 struct mlx5_priv *priv = dev->data->dev_private;
8301 struct mlx5_flow_dv_tag_resource *cache_resource;
8302 struct mlx5_hlist_entry *entry;
8304 entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error);
8306 cache_resource = container_of
8307 (entry, struct mlx5_flow_dv_tag_resource, entry);
8308 dev_flow->handle->dvh.rix_tag = cache_resource->idx;
8309 dev_flow->dv.tag_resource = cache_resource;
8316 flow_dv_tag_remove_cb(struct mlx5_hlist *list,
8317 struct mlx5_hlist_entry *entry)
8319 struct mlx5_dev_ctx_shared *sh = list->ctx;
8320 struct mlx5_flow_dv_tag_resource *tag =
8321 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
8323 MLX5_ASSERT(tag && sh && tag->action);
8324 claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
8325 DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
8326 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
8333 * Pointer to Ethernet device.
8338 * 1 while a reference on it exists, 0 when freed.
8341 flow_dv_tag_release(struct rte_eth_dev *dev,
8344 struct mlx5_priv *priv = dev->data->dev_private;
8345 struct mlx5_flow_dv_tag_resource *tag;
8347 tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
8350 DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
8351 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
8352 return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
8356 * Translate port ID action to vport.
8359 * Pointer to rte_eth_dev structure.
8361 * Pointer to the port ID action.
8362 * @param[out] dst_port_id
8363 * The target port ID.
8365 * Pointer to the error structure.
8368 * 0 on success, a negative errno value otherwise and rte_errno is set.
8371 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
8372 const struct rte_flow_action *action,
8373 uint32_t *dst_port_id,
8374 struct rte_flow_error *error)
8377 struct mlx5_priv *priv;
8378 const struct rte_flow_action_port_id *conf =
8379 (const struct rte_flow_action_port_id *)action->conf;
8381 port = conf->original ? dev->data->port_id : conf->id;
8382 priv = mlx5_port_to_eswitch_info(port, false);
8384 return rte_flow_error_set(error, -rte_errno,
8385 RTE_FLOW_ERROR_TYPE_ACTION,
8387 "No eswitch info was found for port");
8388 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
8390 * This parameter is transferred to
8391 * mlx5dv_dr_action_create_dest_ib_port().
8393 *dst_port_id = priv->dev_port;
8396 * Legacy mode, no LAG configurations is supported.
8397 * This parameter is transferred to
8398 * mlx5dv_dr_action_create_dest_vport().
8400 *dst_port_id = priv->vport_id;
8406 * Create a counter with aging configuration.
8409 * Pointer to rte_eth_dev structure.
8411 * Pointer to the counter action configuration.
8413 * Pointer to the aging action configuration.
8416 * Index to flow counter on success, 0 otherwise.
8419 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
8420 struct mlx5_flow *dev_flow,
8421 const struct rte_flow_action_count *count,
8422 const struct rte_flow_action_age *age)
8425 struct mlx5_age_param *age_param;
8427 if (count && count->shared)
8428 counter = flow_dv_counter_get_shared(dev, count->id);
8430 counter = flow_dv_counter_alloc(dev, !!age);
8431 if (!counter || age == NULL)
8433 age_param = flow_dv_counter_idx_get_age(dev, counter);
8434 age_param->context = age->context ? age->context :
8435 (void *)(uintptr_t)(dev_flow->flow_idx);
8436 age_param->timeout = age->timeout;
8437 age_param->port_id = dev->data->port_id;
8438 __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
8439 __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
8443 * Add Tx queue matcher
8446 * Pointer to the dev struct.
8447 * @param[in, out] matcher
8449 * @param[in, out] key
8450 * Flow matcher value.
8452 * Flow pattern to translate.
8454 * Item is inner pattern.
8457 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
8458 void *matcher, void *key,
8459 const struct rte_flow_item *item)
8461 const struct mlx5_rte_flow_item_tx_queue *queue_m;
8462 const struct mlx5_rte_flow_item_tx_queue *queue_v;
8464 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8466 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8467 struct mlx5_txq_ctrl *txq;
8471 queue_m = (const void *)item->mask;
8474 queue_v = (const void *)item->spec;
8477 txq = mlx5_txq_get(dev, queue_v->queue);
8480 queue = txq->obj->sq->id;
8481 MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
8482 MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
8483 queue & queue_m->queue);
8484 mlx5_txq_release(dev, queue_v->queue);
8488 * Set the hash fields according to the @p flow information.
8490 * @param[in] dev_flow
8491 * Pointer to the mlx5_flow.
8492 * @param[in] rss_desc
8493 * Pointer to the mlx5_flow_rss_desc.
8496 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
8497 struct mlx5_flow_rss_desc *rss_desc)
8499 uint64_t items = dev_flow->handle->layers;
8501 uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
8503 dev_flow->hash_fields = 0;
8504 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
8505 if (rss_desc->level >= 2) {
8506 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
8510 if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
8511 (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
8512 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
8513 if (rss_types & ETH_RSS_L3_SRC_ONLY)
8514 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
8515 else if (rss_types & ETH_RSS_L3_DST_ONLY)
8516 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
8518 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
8520 } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
8521 (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
8522 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
8523 if (rss_types & ETH_RSS_L3_SRC_ONLY)
8524 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
8525 else if (rss_types & ETH_RSS_L3_DST_ONLY)
8526 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
8528 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
8531 if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
8532 (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
8533 if (rss_types & ETH_RSS_UDP) {
8534 if (rss_types & ETH_RSS_L4_SRC_ONLY)
8535 dev_flow->hash_fields |=
8536 IBV_RX_HASH_SRC_PORT_UDP;
8537 else if (rss_types & ETH_RSS_L4_DST_ONLY)
8538 dev_flow->hash_fields |=
8539 IBV_RX_HASH_DST_PORT_UDP;
8541 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
8543 } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
8544 (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
8545 if (rss_types & ETH_RSS_TCP) {
8546 if (rss_types & ETH_RSS_L4_SRC_ONLY)
8547 dev_flow->hash_fields |=
8548 IBV_RX_HASH_SRC_PORT_TCP;
8549 else if (rss_types & ETH_RSS_L4_DST_ONLY)
8550 dev_flow->hash_fields |=
8551 IBV_RX_HASH_DST_PORT_TCP;
8553 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
8559 * Prepare an Rx Hash queue.
8562 * Pointer to Ethernet device.
8563 * @param[in] dev_flow
8564 * Pointer to the mlx5_flow.
8565 * @param[in] rss_desc
8566 * Pointer to the mlx5_flow_rss_desc.
8567 * @param[out] hrxq_idx
8568 * Hash Rx queue index.
8571 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
8573 static struct mlx5_hrxq *
8574 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
8575 struct mlx5_flow *dev_flow,
8576 struct mlx5_flow_rss_desc *rss_desc,
8579 struct mlx5_priv *priv = dev->data->dev_private;
8580 struct mlx5_flow_handle *dh = dev_flow->handle;
8581 struct mlx5_hrxq *hrxq;
8583 MLX5_ASSERT(rss_desc->queue_num);
8584 rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
8585 rss_desc->hash_fields = dev_flow->hash_fields;
8586 rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
8587 rss_desc->standalone = false;
8588 *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
8591 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
8597 * Release sample sub action resource.
8599 * @param[in, out] dev
8600 * Pointer to rte_eth_dev structure.
8601 * @param[in] act_res
8602 * Pointer to sample sub action resource.
8605 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
8606 struct mlx5_flow_sub_actions_idx *act_res)
8608 if (act_res->rix_hrxq) {
8609 mlx5_hrxq_release(dev, act_res->rix_hrxq);
8610 act_res->rix_hrxq = 0;
8612 if (act_res->rix_encap_decap) {
8613 flow_dv_encap_decap_resource_release(dev,
8614 act_res->rix_encap_decap);
8615 act_res->rix_encap_decap = 0;
8617 if (act_res->rix_port_id_action) {
8618 flow_dv_port_id_action_resource_release(dev,
8619 act_res->rix_port_id_action);
8620 act_res->rix_port_id_action = 0;
8622 if (act_res->rix_tag) {
8623 flow_dv_tag_release(dev, act_res->rix_tag);
8624 act_res->rix_tag = 0;
8627 flow_dv_counter_release(dev, act_res->cnt);
8633 flow_dv_sample_match_cb(struct mlx5_cache_list *list __rte_unused,
8634 struct mlx5_cache_entry *entry, void *cb_ctx)
8636 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8637 struct rte_eth_dev *dev = ctx->dev;
8638 struct mlx5_flow_dv_sample_resource *resource = ctx->data;
8639 struct mlx5_flow_dv_sample_resource *cache_resource =
8640 container_of(entry, typeof(*cache_resource), entry);
8642 if (resource->ratio == cache_resource->ratio &&
8643 resource->ft_type == cache_resource->ft_type &&
8644 resource->ft_id == cache_resource->ft_id &&
8645 resource->set_action == cache_resource->set_action &&
8646 !memcmp((void *)&resource->sample_act,
8647 (void *)&cache_resource->sample_act,
8648 sizeof(struct mlx5_flow_sub_actions_list))) {
8650 * Existing sample action should release the prepared
8651 * sub-actions reference counter.
8653 flow_dv_sample_sub_actions_release(dev,
8654 &resource->sample_idx);
8660 struct mlx5_cache_entry *
8661 flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused,
8662 struct mlx5_cache_entry *entry __rte_unused,
8665 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8666 struct rte_eth_dev *dev = ctx->dev;
8667 struct mlx5_flow_dv_sample_resource *resource = ctx->data;
8668 void **sample_dv_actions = resource->sub_actions;
8669 struct mlx5_flow_dv_sample_resource *cache_resource;
8670 struct mlx5dv_dr_flow_sampler_attr sampler_attr;
8671 struct mlx5_priv *priv = dev->data->dev_private;
8672 struct mlx5_dev_ctx_shared *sh = priv->sh;
8673 struct mlx5_flow_tbl_resource *tbl;
8675 const uint32_t next_ft_step = 1;
8676 uint32_t next_ft_id = resource->ft_id + next_ft_step;
8677 uint8_t is_egress = 0;
8678 uint8_t is_transfer = 0;
8679 struct rte_flow_error *error = ctx->error;
8681 /* Register new sample resource. */
8682 cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
8683 if (!cache_resource) {
8684 rte_flow_error_set(error, ENOMEM,
8685 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8687 "cannot allocate resource memory");
8690 *cache_resource = *resource;
8691 /* Create normal path table level */
8692 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
8694 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
8696 tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
8697 is_egress, is_transfer,
8698 true, NULL, 0, 0, error);
8700 rte_flow_error_set(error, ENOMEM,
8701 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8703 "fail to create normal path table "
8707 cache_resource->normal_path_tbl = tbl;
8708 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
8709 cache_resource->default_miss =
8710 mlx5_glue->dr_create_flow_action_default_miss();
8711 if (!cache_resource->default_miss) {
8712 rte_flow_error_set(error, ENOMEM,
8713 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8715 "cannot create default miss "
8719 sample_dv_actions[resource->sample_act.actions_num++] =
8720 cache_resource->default_miss;
8722 /* Create a DR sample action */
8723 sampler_attr.sample_ratio = cache_resource->ratio;
8724 sampler_attr.default_next_table = tbl->obj;
8725 sampler_attr.num_sample_actions = resource->sample_act.actions_num;
8726 sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
8727 &sample_dv_actions[0];
8728 sampler_attr.action = cache_resource->set_action;
8729 cache_resource->verbs_action =
8730 mlx5_glue->dr_create_flow_action_sampler(&sampler_attr);
8731 if (!cache_resource->verbs_action) {
8732 rte_flow_error_set(error, ENOMEM,
8733 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8734 NULL, "cannot create sample action");
8737 cache_resource->idx = idx;
8738 return &cache_resource->entry;
8740 if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB &&
8741 cache_resource->default_miss)
8742 claim_zero(mlx5_glue->destroy_flow_action
8743 (cache_resource->default_miss));
8745 flow_dv_sample_sub_actions_release(dev,
8746 &cache_resource->sample_idx);
8747 if (cache_resource->normal_path_tbl)
8748 flow_dv_tbl_resource_release(MLX5_SH(dev),
8749 cache_resource->normal_path_tbl);
8750 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
8756 * Find existing sample resource or create and register a new one.
8758 * @param[in, out] dev
8759 * Pointer to rte_eth_dev structure.
8760 * @param[in] resource
8761 * Pointer to sample resource.
8762 * @parm[in, out] dev_flow
8763 * Pointer to the dev_flow.
8765 * pointer to error structure.
8768 * 0 on success otherwise -errno and errno is set.
8771 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
8772 struct mlx5_flow_dv_sample_resource *resource,
8773 struct mlx5_flow *dev_flow,
8774 struct rte_flow_error *error)
8776 struct mlx5_flow_dv_sample_resource *cache_resource;
8777 struct mlx5_cache_entry *entry;
8778 struct mlx5_priv *priv = dev->data->dev_private;
8779 struct mlx5_flow_cb_ctx ctx = {
8785 entry = mlx5_cache_register(&priv->sh->sample_action_list, &ctx);
8788 cache_resource = container_of(entry, typeof(*cache_resource), entry);
8789 dev_flow->handle->dvh.rix_sample = cache_resource->idx;
8790 dev_flow->dv.sample_res = cache_resource;
8795 flow_dv_dest_array_match_cb(struct mlx5_cache_list *list __rte_unused,
8796 struct mlx5_cache_entry *entry, void *cb_ctx)
8798 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8799 struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
8800 struct rte_eth_dev *dev = ctx->dev;
8801 struct mlx5_flow_dv_dest_array_resource *cache_resource =
8802 container_of(entry, typeof(*cache_resource), entry);
8805 if (resource->num_of_dest == cache_resource->num_of_dest &&
8806 resource->ft_type == cache_resource->ft_type &&
8807 !memcmp((void *)cache_resource->sample_act,
8808 (void *)resource->sample_act,
8809 (resource->num_of_dest *
8810 sizeof(struct mlx5_flow_sub_actions_list)))) {
8812 * Existing sample action should release the prepared
8813 * sub-actions reference counter.
8815 for (idx = 0; idx < resource->num_of_dest; idx++)
8816 flow_dv_sample_sub_actions_release(dev,
8817 &resource->sample_idx[idx]);
8823 struct mlx5_cache_entry *
8824 flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
8825 struct mlx5_cache_entry *entry __rte_unused,
8828 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8829 struct rte_eth_dev *dev = ctx->dev;
8830 struct mlx5_flow_dv_dest_array_resource *cache_resource;
8831 struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
8832 struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
8833 struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
8834 struct mlx5_priv *priv = dev->data->dev_private;
8835 struct mlx5_dev_ctx_shared *sh = priv->sh;
8836 struct mlx5_flow_sub_actions_list *sample_act;
8837 struct mlx5dv_dr_domain *domain;
8838 uint32_t idx = 0, res_idx = 0;
8839 struct rte_flow_error *error = ctx->error;
8841 /* Register new destination array resource. */
8842 cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
8844 if (!cache_resource) {
8845 rte_flow_error_set(error, ENOMEM,
8846 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8848 "cannot allocate resource memory");
8851 *cache_resource = *resource;
8852 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
8853 domain = sh->fdb_domain;
8854 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
8855 domain = sh->rx_domain;
8857 domain = sh->tx_domain;
8858 for (idx = 0; idx < resource->num_of_dest; idx++) {
8859 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
8860 mlx5_malloc(MLX5_MEM_ZERO,
8861 sizeof(struct mlx5dv_dr_action_dest_attr),
8863 if (!dest_attr[idx]) {
8864 rte_flow_error_set(error, ENOMEM,
8865 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8867 "cannot allocate resource memory");
8870 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
8871 sample_act = &resource->sample_act[idx];
8872 if (sample_act->action_flags == MLX5_FLOW_ACTION_QUEUE) {
8873 dest_attr[idx]->dest = sample_act->dr_queue_action;
8874 } else if (sample_act->action_flags ==
8875 (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP)) {
8876 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
8877 dest_attr[idx]->dest_reformat = &dest_reformat[idx];
8878 dest_attr[idx]->dest_reformat->reformat =
8879 sample_act->dr_encap_action;
8880 dest_attr[idx]->dest_reformat->dest =
8881 sample_act->dr_port_id_action;
8882 } else if (sample_act->action_flags ==
8883 MLX5_FLOW_ACTION_PORT_ID) {
8884 dest_attr[idx]->dest = sample_act->dr_port_id_action;
8887 /* create a dest array actioin */
8888 cache_resource->action = mlx5_glue->dr_create_flow_action_dest_array
8890 cache_resource->num_of_dest,
8892 if (!cache_resource->action) {
8893 rte_flow_error_set(error, ENOMEM,
8894 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8896 "cannot create destination array action");
8899 cache_resource->idx = res_idx;
8900 for (idx = 0; idx < resource->num_of_dest; idx++)
8901 mlx5_free(dest_attr[idx]);
8902 return &cache_resource->entry;
8904 for (idx = 0; idx < resource->num_of_dest; idx++) {
8905 struct mlx5_flow_sub_actions_idx *act_res =
8906 &cache_resource->sample_idx[idx];
8907 if (act_res->rix_hrxq &&
8908 !mlx5_hrxq_release(dev,
8910 act_res->rix_hrxq = 0;
8911 if (act_res->rix_encap_decap &&
8912 !flow_dv_encap_decap_resource_release(dev,
8913 act_res->rix_encap_decap))
8914 act_res->rix_encap_decap = 0;
8915 if (act_res->rix_port_id_action &&
8916 !flow_dv_port_id_action_resource_release(dev,
8917 act_res->rix_port_id_action))
8918 act_res->rix_port_id_action = 0;
8920 mlx5_free(dest_attr[idx]);
8923 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
8928 * Find existing destination array resource or create and register a new one.
8930 * @param[in, out] dev
8931 * Pointer to rte_eth_dev structure.
8932 * @param[in] resource
8933 * Pointer to destination array resource.
8934 * @parm[in, out] dev_flow
8935 * Pointer to the dev_flow.
8937 * pointer to error structure.
8940 * 0 on success otherwise -errno and errno is set.
8943 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
8944 struct mlx5_flow_dv_dest_array_resource *resource,
8945 struct mlx5_flow *dev_flow,
8946 struct rte_flow_error *error)
8948 struct mlx5_flow_dv_dest_array_resource *cache_resource;
8949 struct mlx5_priv *priv = dev->data->dev_private;
8950 struct mlx5_cache_entry *entry;
8951 struct mlx5_flow_cb_ctx ctx = {
8957 entry = mlx5_cache_register(&priv->sh->dest_array_list, &ctx);
8960 cache_resource = container_of(entry, typeof(*cache_resource), entry);
8961 dev_flow->handle->dvh.rix_dest_array = cache_resource->idx;
8962 dev_flow->dv.dest_array_res = cache_resource;
8967 * Convert Sample action to DV specification.
8970 * Pointer to rte_eth_dev structure.
8972 * Pointer to action structure.
8973 * @param[in, out] dev_flow
8974 * Pointer to the mlx5_flow.
8976 * Pointer to the flow attributes.
8977 * @param[in, out] num_of_dest
8978 * Pointer to the num of destination.
8979 * @param[in, out] sample_actions
8980 * Pointer to sample actions list.
8981 * @param[in, out] res
8982 * Pointer to sample resource.
8984 * Pointer to the error structure.
8987 * 0 on success, a negative errno value otherwise and rte_errno is set.
8990 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
8991 const struct rte_flow_action *action,
8992 struct mlx5_flow *dev_flow,
8993 const struct rte_flow_attr *attr,
8994 uint32_t *num_of_dest,
8995 void **sample_actions,
8996 struct mlx5_flow_dv_sample_resource *res,
8997 struct rte_flow_error *error)
8999 struct mlx5_priv *priv = dev->data->dev_private;
9000 const struct rte_flow_action_sample *sample_action;
9001 const struct rte_flow_action *sub_actions;
9002 const struct rte_flow_action_queue *queue;
9003 struct mlx5_flow_sub_actions_list *sample_act;
9004 struct mlx5_flow_sub_actions_idx *sample_idx;
9005 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
9006 struct mlx5_flow_rss_desc *rss_desc;
9007 uint64_t action_flags = 0;
9010 rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
9011 sample_act = &res->sample_act;
9012 sample_idx = &res->sample_idx;
9013 sample_action = (const struct rte_flow_action_sample *)action->conf;
9014 res->ratio = sample_action->ratio;
9015 sub_actions = sample_action->actions;
9016 for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
9017 int type = sub_actions->type;
9018 uint32_t pre_rix = 0;
9021 case RTE_FLOW_ACTION_TYPE_QUEUE:
9023 struct mlx5_hrxq *hrxq;
9026 queue = sub_actions->conf;
9027 rss_desc->queue_num = 1;
9028 rss_desc->queue[0] = queue->index;
9029 hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
9030 rss_desc, &hrxq_idx);
9032 return rte_flow_error_set
9034 RTE_FLOW_ERROR_TYPE_ACTION,
9036 "cannot create fate queue");
9037 sample_act->dr_queue_action = hrxq->action;
9038 sample_idx->rix_hrxq = hrxq_idx;
9039 sample_actions[sample_act->actions_num++] =
9042 action_flags |= MLX5_FLOW_ACTION_QUEUE;
9043 if (action_flags & MLX5_FLOW_ACTION_MARK)
9044 dev_flow->handle->rix_hrxq = hrxq_idx;
9045 dev_flow->handle->fate_action =
9046 MLX5_FLOW_FATE_QUEUE;
9049 case RTE_FLOW_ACTION_TYPE_MARK:
9051 uint32_t tag_be = mlx5_flow_mark_set
9052 (((const struct rte_flow_action_mark *)
9053 (sub_actions->conf))->id);
9055 dev_flow->handle->mark = 1;
9056 pre_rix = dev_flow->handle->dvh.rix_tag;
9057 /* Save the mark resource before sample */
9058 pre_r = dev_flow->dv.tag_resource;
9059 if (flow_dv_tag_resource_register(dev, tag_be,
9062 MLX5_ASSERT(dev_flow->dv.tag_resource);
9063 sample_act->dr_tag_action =
9064 dev_flow->dv.tag_resource->action;
9065 sample_idx->rix_tag =
9066 dev_flow->handle->dvh.rix_tag;
9067 sample_actions[sample_act->actions_num++] =
9068 sample_act->dr_tag_action;
9069 /* Recover the mark resource after sample */
9070 dev_flow->dv.tag_resource = pre_r;
9071 dev_flow->handle->dvh.rix_tag = pre_rix;
9072 action_flags |= MLX5_FLOW_ACTION_MARK;
9075 case RTE_FLOW_ACTION_TYPE_COUNT:
9079 counter = flow_dv_translate_create_counter(dev,
9080 dev_flow, sub_actions->conf, 0);
9082 return rte_flow_error_set
9084 RTE_FLOW_ERROR_TYPE_ACTION,
9086 "cannot create counter"
9088 sample_idx->cnt = counter;
9089 sample_act->dr_cnt_action =
9090 (flow_dv_counter_get_by_idx(dev,
9091 counter, NULL))->action;
9092 sample_actions[sample_act->actions_num++] =
9093 sample_act->dr_cnt_action;
9094 action_flags |= MLX5_FLOW_ACTION_COUNT;
9097 case RTE_FLOW_ACTION_TYPE_PORT_ID:
9099 struct mlx5_flow_dv_port_id_action_resource
9101 uint32_t port_id = 0;
9103 memset(&port_id_resource, 0, sizeof(port_id_resource));
9104 /* Save the port id resource before sample */
9105 pre_rix = dev_flow->handle->rix_port_id_action;
9106 pre_r = dev_flow->dv.port_id_action;
9107 if (flow_dv_translate_action_port_id(dev, sub_actions,
9110 port_id_resource.port_id = port_id;
9111 if (flow_dv_port_id_action_resource_register
9112 (dev, &port_id_resource, dev_flow, error))
9114 sample_act->dr_port_id_action =
9115 dev_flow->dv.port_id_action->action;
9116 sample_idx->rix_port_id_action =
9117 dev_flow->handle->rix_port_id_action;
9118 sample_actions[sample_act->actions_num++] =
9119 sample_act->dr_port_id_action;
9120 /* Recover the port id resource after sample */
9121 dev_flow->dv.port_id_action = pre_r;
9122 dev_flow->handle->rix_port_id_action = pre_rix;
9124 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9127 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
9128 /* Save the encap resource before sample */
9129 pre_rix = dev_flow->handle->dvh.rix_encap_decap;
9130 pre_r = dev_flow->dv.encap_decap;
9131 if (flow_dv_create_action_l2_encap(dev, sub_actions,
9136 sample_act->dr_encap_action =
9137 dev_flow->dv.encap_decap->action;
9138 sample_idx->rix_encap_decap =
9139 dev_flow->handle->dvh.rix_encap_decap;
9140 sample_actions[sample_act->actions_num++] =
9141 sample_act->dr_encap_action;
9142 /* Recover the encap resource after sample */
9143 dev_flow->dv.encap_decap = pre_r;
9144 dev_flow->handle->dvh.rix_encap_decap = pre_rix;
9145 action_flags |= MLX5_FLOW_ACTION_ENCAP;
9148 return rte_flow_error_set(error, EINVAL,
9149 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9151 "Not support for sampler action");
9154 sample_act->action_flags = action_flags;
9155 res->ft_id = dev_flow->dv.group;
9156 if (attr->transfer) {
9158 uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
9159 uint64_t set_action;
9160 } action_ctx = { .set_action = 0 };
9162 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
9163 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
9164 MLX5_MODIFICATION_TYPE_SET);
9165 MLX5_SET(set_action_in, action_ctx.action_in, field,
9166 MLX5_MODI_META_REG_C_0);
9167 MLX5_SET(set_action_in, action_ctx.action_in, data,
9168 priv->vport_meta_tag);
9169 res->set_action = action_ctx.set_action;
9170 } else if (attr->ingress) {
9171 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9173 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
9179 * Convert Sample action to DV specification.
9182 * Pointer to rte_eth_dev structure.
9183 * @param[in, out] dev_flow
9184 * Pointer to the mlx5_flow.
9185 * @param[in] num_of_dest
9186 * The num of destination.
9187 * @param[in, out] res
9188 * Pointer to sample resource.
9189 * @param[in, out] mdest_res
9190 * Pointer to destination array resource.
9191 * @param[in] sample_actions
9192 * Pointer to sample path actions list.
9193 * @param[in] action_flags
9194 * Holds the actions detected until now.
9196 * Pointer to the error structure.
9199 * 0 on success, a negative errno value otherwise and rte_errno is set.
9202 flow_dv_create_action_sample(struct rte_eth_dev *dev,
9203 struct mlx5_flow *dev_flow,
9204 uint32_t num_of_dest,
9205 struct mlx5_flow_dv_sample_resource *res,
9206 struct mlx5_flow_dv_dest_array_resource *mdest_res,
9207 void **sample_actions,
9208 uint64_t action_flags,
9209 struct rte_flow_error *error)
9211 /* update normal path action resource into last index of array */
9212 uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
9213 struct mlx5_flow_sub_actions_list *sample_act =
9214 &mdest_res->sample_act[dest_index];
9215 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
9216 struct mlx5_flow_rss_desc *rss_desc;
9217 uint32_t normal_idx = 0;
9218 struct mlx5_hrxq *hrxq;
9222 rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
9223 if (num_of_dest > 1) {
9224 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
9225 /* Handle QP action for mirroring */
9226 hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
9227 rss_desc, &hrxq_idx);
9229 return rte_flow_error_set
9231 RTE_FLOW_ERROR_TYPE_ACTION,
9233 "cannot create rx queue");
9235 mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
9236 sample_act->dr_queue_action = hrxq->action;
9237 if (action_flags & MLX5_FLOW_ACTION_MARK)
9238 dev_flow->handle->rix_hrxq = hrxq_idx;
9239 dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
9241 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
9243 mdest_res->sample_idx[dest_index].rix_encap_decap =
9244 dev_flow->handle->dvh.rix_encap_decap;
9245 sample_act->dr_encap_action =
9246 dev_flow->dv.encap_decap->action;
9248 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
9250 mdest_res->sample_idx[dest_index].rix_port_id_action =
9251 dev_flow->handle->rix_port_id_action;
9252 sample_act->dr_port_id_action =
9253 dev_flow->dv.port_id_action->action;
9255 sample_act->actions_num = normal_idx;
9256 /* update sample action resource into first index of array */
9257 mdest_res->ft_type = res->ft_type;
9258 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
9259 sizeof(struct mlx5_flow_sub_actions_idx));
9260 memcpy(&mdest_res->sample_act[0], &res->sample_act,
9261 sizeof(struct mlx5_flow_sub_actions_list));
9262 mdest_res->num_of_dest = num_of_dest;
9263 if (flow_dv_dest_array_resource_register(dev, mdest_res,
9265 return rte_flow_error_set(error, EINVAL,
9266 RTE_FLOW_ERROR_TYPE_ACTION,
9267 NULL, "can't create sample "
9270 res->sub_actions = sample_actions;
9271 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
9272 return rte_flow_error_set(error, EINVAL,
9273 RTE_FLOW_ERROR_TYPE_ACTION,
9275 "can't create sample action");
9281 * Fill the flow with DV spec, lock free
9282 * (mutex should be acquired by caller).
9285 * Pointer to rte_eth_dev structure.
9286 * @param[in, out] dev_flow
9287 * Pointer to the sub flow.
9289 * Pointer to the flow attributes.
9291 * Pointer to the list of items.
9292 * @param[in] actions
9293 * Pointer to the list of actions.
9295 * Pointer to the error structure.
9298 * 0 on success, a negative errno value otherwise and rte_errno is set.
9301 __flow_dv_translate(struct rte_eth_dev *dev,
9302 struct mlx5_flow *dev_flow,
9303 const struct rte_flow_attr *attr,
9304 const struct rte_flow_item items[],
9305 const struct rte_flow_action actions[],
9306 struct rte_flow_error *error)
9308 struct mlx5_priv *priv = dev->data->dev_private;
9309 struct mlx5_dev_config *dev_conf = &priv->config;
9310 struct rte_flow *flow = dev_flow->flow;
9311 struct mlx5_flow_handle *handle = dev_flow->handle;
9312 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
9313 struct mlx5_flow_rss_desc *rss_desc;
9314 uint64_t item_flags = 0;
9315 uint64_t last_item = 0;
9316 uint64_t action_flags = 0;
9317 uint64_t priority = attr->priority;
9318 struct mlx5_flow_dv_matcher matcher = {
9320 .size = sizeof(matcher.mask.buf) -
9321 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
9325 bool actions_end = false;
9327 struct mlx5_flow_dv_modify_hdr_resource res;
9328 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
9329 sizeof(struct mlx5_modification_cmd) *
9330 (MLX5_MAX_MODIFY_NUM + 1)];
9332 struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
9333 const struct rte_flow_action_count *count = NULL;
9334 const struct rte_flow_action_age *age = NULL;
9335 union flow_dv_attr flow_attr = { .attr = 0 };
9337 union mlx5_flow_tbl_key tbl_key;
9338 uint32_t modify_action_position = UINT32_MAX;
9339 void *match_mask = matcher.mask.buf;
9340 void *match_value = dev_flow->dv.value.buf;
9341 uint8_t next_protocol = 0xff;
9342 struct rte_vlan_hdr vlan = { 0 };
9343 struct mlx5_flow_dv_dest_array_resource mdest_res;
9344 struct mlx5_flow_dv_sample_resource sample_res;
9345 void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
9346 struct mlx5_flow_sub_actions_list *sample_act;
9347 uint32_t sample_act_pos = UINT32_MAX;
9348 uint32_t num_of_dest = 0;
9349 int tmp_actions_n = 0;
9352 const struct mlx5_flow_tunnel *tunnel;
9353 struct flow_grp_info grp_info = {
9354 .external = !!dev_flow->external,
9355 .transfer = !!attr->transfer,
9356 .fdb_def_rule = !!priv->fdb_def_rule,
9360 rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
9361 memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
9362 memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
9363 mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
9364 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9365 /* update normal path action resource into last index of array */
9366 sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
9367 tunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ?
9368 flow_items_to_tunnel(items) :
9369 is_flow_tunnel_steer_rule(dev, attr, items, actions) ?
9370 flow_actions_to_tunnel(actions) :
9371 dev_flow->tunnel ? dev_flow->tunnel : NULL;
9372 mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
9373 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9374 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
9375 (dev, tunnel, attr, items, actions);
9376 ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
9380 dev_flow->dv.group = table;
9382 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
9383 if (priority == MLX5_FLOW_PRIO_RSVD)
9384 priority = dev_conf->flow_prio - 1;
9385 /* number of actions must be set to 0 in case of dirty stack. */
9386 mhdr_res->actions_num = 0;
9387 if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
9389 * do not add decap action if match rule drops packet
9390 * HW rejects rules with decap & drop
9392 bool add_decap = true;
9393 const struct rte_flow_action *ptr = actions;
9394 struct mlx5_flow_tbl_resource *tbl;
9396 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
9397 if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
9403 if (flow_dv_create_action_l2_decap(dev, dev_flow,
9407 dev_flow->dv.actions[actions_n++] =
9408 dev_flow->dv.encap_decap->action;
9409 action_flags |= MLX5_FLOW_ACTION_DECAP;
9412 * bind table_id with <group, table> for tunnel match rule.
9413 * Tunnel set rule establishes that bind in JUMP action handler.
9414 * Required for scenario when application creates tunnel match
9415 * rule before tunnel set rule.
9417 tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
9419 !!dev_flow->external, tunnel,
9420 attr->group, 0, error);
9422 return rte_flow_error_set
9423 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
9424 actions, "cannot register tunnel group");
9426 for (; !actions_end ; actions++) {
9427 const struct rte_flow_action_queue *queue;
9428 const struct rte_flow_action_rss *rss;
9429 const struct rte_flow_action *action = actions;
9430 const uint8_t *rss_key;
9431 const struct rte_flow_action_meter *mtr;
9432 struct mlx5_flow_tbl_resource *tbl;
9433 uint32_t port_id = 0;
9434 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
9435 int action_type = actions->type;
9436 const struct rte_flow_action *found_action = NULL;
9437 struct mlx5_flow_meter *fm = NULL;
9438 uint32_t jump_group = 0;
9440 if (!mlx5_flow_os_action_supported(action_type))
9441 return rte_flow_error_set(error, ENOTSUP,
9442 RTE_FLOW_ERROR_TYPE_ACTION,
9444 "action not supported");
9445 switch (action_type) {
9446 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
9447 action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
9449 case RTE_FLOW_ACTION_TYPE_VOID:
9451 case RTE_FLOW_ACTION_TYPE_PORT_ID:
9452 if (flow_dv_translate_action_port_id(dev, action,
9455 port_id_resource.port_id = port_id;
9456 MLX5_ASSERT(!handle->rix_port_id_action);
9457 if (flow_dv_port_id_action_resource_register
9458 (dev, &port_id_resource, dev_flow, error))
9460 dev_flow->dv.actions[actions_n++] =
9461 dev_flow->dv.port_id_action->action;
9462 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9463 dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
9464 sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9467 case RTE_FLOW_ACTION_TYPE_FLAG:
9468 action_flags |= MLX5_FLOW_ACTION_FLAG;
9469 dev_flow->handle->mark = 1;
9470 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
9471 struct rte_flow_action_mark mark = {
9472 .id = MLX5_FLOW_MARK_DEFAULT,
9475 if (flow_dv_convert_action_mark(dev, &mark,
9479 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
9482 tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
9484 * Only one FLAG or MARK is supported per device flow
9485 * right now. So the pointer to the tag resource must be
9486 * zero before the register process.
9488 MLX5_ASSERT(!handle->dvh.rix_tag);
9489 if (flow_dv_tag_resource_register(dev, tag_be,
9492 MLX5_ASSERT(dev_flow->dv.tag_resource);
9493 dev_flow->dv.actions[actions_n++] =
9494 dev_flow->dv.tag_resource->action;
9496 case RTE_FLOW_ACTION_TYPE_MARK:
9497 action_flags |= MLX5_FLOW_ACTION_MARK;
9498 dev_flow->handle->mark = 1;
9499 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
9500 const struct rte_flow_action_mark *mark =
9501 (const struct rte_flow_action_mark *)
9504 if (flow_dv_convert_action_mark(dev, mark,
9508 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
9512 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
9513 /* Legacy (non-extensive) MARK action. */
9514 tag_be = mlx5_flow_mark_set
9515 (((const struct rte_flow_action_mark *)
9516 (actions->conf))->id);
9517 MLX5_ASSERT(!handle->dvh.rix_tag);
9518 if (flow_dv_tag_resource_register(dev, tag_be,
9521 MLX5_ASSERT(dev_flow->dv.tag_resource);
9522 dev_flow->dv.actions[actions_n++] =
9523 dev_flow->dv.tag_resource->action;
9525 case RTE_FLOW_ACTION_TYPE_SET_META:
9526 if (flow_dv_convert_action_set_meta
9527 (dev, mhdr_res, attr,
9528 (const struct rte_flow_action_set_meta *)
9529 actions->conf, error))
9531 action_flags |= MLX5_FLOW_ACTION_SET_META;
9533 case RTE_FLOW_ACTION_TYPE_SET_TAG:
9534 if (flow_dv_convert_action_set_tag
9536 (const struct rte_flow_action_set_tag *)
9537 actions->conf, error))
9539 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
9541 case RTE_FLOW_ACTION_TYPE_DROP:
9542 action_flags |= MLX5_FLOW_ACTION_DROP;
9543 dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
9545 case RTE_FLOW_ACTION_TYPE_QUEUE:
9546 queue = actions->conf;
9547 rss_desc->queue_num = 1;
9548 rss_desc->queue[0] = queue->index;
9549 action_flags |= MLX5_FLOW_ACTION_QUEUE;
9550 dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
9551 sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
9554 case RTE_FLOW_ACTION_TYPE_RSS:
9555 rss = actions->conf;
9556 memcpy(rss_desc->queue, rss->queue,
9557 rss->queue_num * sizeof(uint16_t));
9558 rss_desc->queue_num = rss->queue_num;
9559 /* NULL RSS key indicates default RSS key. */
9560 rss_key = !rss->key ? rss_hash_default_key : rss->key;
9561 memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
9563 * rss->level and rss.types should be set in advance
9564 * when expanding items for RSS.
9566 action_flags |= MLX5_FLOW_ACTION_RSS;
9567 dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
9569 case RTE_FLOW_ACTION_TYPE_AGE:
9570 case RTE_FLOW_ACTION_TYPE_COUNT:
9571 if (!dev_conf->devx) {
9572 return rte_flow_error_set
9574 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9576 "count action not supported");
9578 /* Save information first, will apply later. */
9579 if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT)
9580 count = action->conf;
9583 action_flags |= MLX5_FLOW_ACTION_COUNT;
9585 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
9586 dev_flow->dv.actions[actions_n++] =
9587 priv->sh->pop_vlan_action;
9588 action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
9590 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
9591 if (!(action_flags &
9592 MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
9593 flow_dev_get_vlan_info_from_items(items, &vlan);
9594 vlan.eth_proto = rte_be_to_cpu_16
9595 ((((const struct rte_flow_action_of_push_vlan *)
9596 actions->conf)->ethertype));
9597 found_action = mlx5_flow_find_action
9599 RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
9601 mlx5_update_vlan_vid_pcp(found_action, &vlan);
9602 found_action = mlx5_flow_find_action
9604 RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
9606 mlx5_update_vlan_vid_pcp(found_action, &vlan);
9607 if (flow_dv_create_action_push_vlan
9608 (dev, attr, &vlan, dev_flow, error))
9610 dev_flow->dv.actions[actions_n++] =
9611 dev_flow->dv.push_vlan_res->action;
9612 action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
9614 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
9615 /* of_vlan_push action handled this action */
9616 MLX5_ASSERT(action_flags &
9617 MLX5_FLOW_ACTION_OF_PUSH_VLAN);
9619 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
9620 if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
9622 flow_dev_get_vlan_info_from_items(items, &vlan);
9623 mlx5_update_vlan_vid_pcp(actions, &vlan);
9624 /* If no VLAN push - this is a modify header action */
9625 if (flow_dv_convert_action_modify_vlan_vid
9626 (mhdr_res, actions, error))
9628 action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
9630 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
9631 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
9632 if (flow_dv_create_action_l2_encap(dev, actions,
9637 dev_flow->dv.actions[actions_n++] =
9638 dev_flow->dv.encap_decap->action;
9639 action_flags |= MLX5_FLOW_ACTION_ENCAP;
9640 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
9641 sample_act->action_flags |=
9642 MLX5_FLOW_ACTION_ENCAP;
9644 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
9645 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
9646 if (flow_dv_create_action_l2_decap(dev, dev_flow,
9650 dev_flow->dv.actions[actions_n++] =
9651 dev_flow->dv.encap_decap->action;
9652 action_flags |= MLX5_FLOW_ACTION_DECAP;
9654 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
9655 /* Handle encap with preceding decap. */
9656 if (action_flags & MLX5_FLOW_ACTION_DECAP) {
9657 if (flow_dv_create_action_raw_encap
9658 (dev, actions, dev_flow, attr, error))
9660 dev_flow->dv.actions[actions_n++] =
9661 dev_flow->dv.encap_decap->action;
9663 /* Handle encap without preceding decap. */
9664 if (flow_dv_create_action_l2_encap
9665 (dev, actions, dev_flow, attr->transfer,
9668 dev_flow->dv.actions[actions_n++] =
9669 dev_flow->dv.encap_decap->action;
9671 action_flags |= MLX5_FLOW_ACTION_ENCAP;
9672 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
9673 sample_act->action_flags |=
9674 MLX5_FLOW_ACTION_ENCAP;
9676 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
9677 while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
9679 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
9680 if (flow_dv_create_action_l2_decap
9681 (dev, dev_flow, attr->transfer, error))
9683 dev_flow->dv.actions[actions_n++] =
9684 dev_flow->dv.encap_decap->action;
9686 /* If decap is followed by encap, handle it at encap. */
9687 action_flags |= MLX5_FLOW_ACTION_DECAP;
9689 case RTE_FLOW_ACTION_TYPE_JUMP:
9690 jump_group = ((const struct rte_flow_action_jump *)
9691 action->conf)->group;
9692 grp_info.std_tbl_fix = 0;
9693 ret = mlx5_flow_group_to_table(dev, tunnel,
9699 tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
9701 !!dev_flow->external,
9702 tunnel, jump_group, 0,
9705 return rte_flow_error_set
9707 RTE_FLOW_ERROR_TYPE_ACTION,
9709 "cannot create jump action.");
9710 if (flow_dv_jump_tbl_resource_register
9711 (dev, tbl, dev_flow, error)) {
9712 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
9713 return rte_flow_error_set
9715 RTE_FLOW_ERROR_TYPE_ACTION,
9717 "cannot create jump action.");
9719 dev_flow->dv.actions[actions_n++] =
9720 dev_flow->dv.jump->action;
9721 action_flags |= MLX5_FLOW_ACTION_JUMP;
9722 dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
9724 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
9725 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
9726 if (flow_dv_convert_action_modify_mac
9727 (mhdr_res, actions, error))
9729 action_flags |= actions->type ==
9730 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
9731 MLX5_FLOW_ACTION_SET_MAC_SRC :
9732 MLX5_FLOW_ACTION_SET_MAC_DST;
9734 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
9735 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
9736 if (flow_dv_convert_action_modify_ipv4
9737 (mhdr_res, actions, error))
9739 action_flags |= actions->type ==
9740 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
9741 MLX5_FLOW_ACTION_SET_IPV4_SRC :
9742 MLX5_FLOW_ACTION_SET_IPV4_DST;
9744 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
9745 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
9746 if (flow_dv_convert_action_modify_ipv6
9747 (mhdr_res, actions, error))
9749 action_flags |= actions->type ==
9750 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
9751 MLX5_FLOW_ACTION_SET_IPV6_SRC :
9752 MLX5_FLOW_ACTION_SET_IPV6_DST;
9754 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
9755 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
9756 if (flow_dv_convert_action_modify_tp
9757 (mhdr_res, actions, items,
9758 &flow_attr, dev_flow, !!(action_flags &
9759 MLX5_FLOW_ACTION_DECAP), error))
9761 action_flags |= actions->type ==
9762 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
9763 MLX5_FLOW_ACTION_SET_TP_SRC :
9764 MLX5_FLOW_ACTION_SET_TP_DST;
9766 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
9767 if (flow_dv_convert_action_modify_dec_ttl
9768 (mhdr_res, items, &flow_attr, dev_flow,
9770 MLX5_FLOW_ACTION_DECAP), error))
9772 action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
9774 case RTE_FLOW_ACTION_TYPE_SET_TTL:
9775 if (flow_dv_convert_action_modify_ttl
9776 (mhdr_res, actions, items, &flow_attr,
9777 dev_flow, !!(action_flags &
9778 MLX5_FLOW_ACTION_DECAP), error))
9780 action_flags |= MLX5_FLOW_ACTION_SET_TTL;
9782 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
9783 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
9784 if (flow_dv_convert_action_modify_tcp_seq
9785 (mhdr_res, actions, error))
9787 action_flags |= actions->type ==
9788 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
9789 MLX5_FLOW_ACTION_INC_TCP_SEQ :
9790 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
9793 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
9794 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
9795 if (flow_dv_convert_action_modify_tcp_ack
9796 (mhdr_res, actions, error))
9798 action_flags |= actions->type ==
9799 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
9800 MLX5_FLOW_ACTION_INC_TCP_ACK :
9801 MLX5_FLOW_ACTION_DEC_TCP_ACK;
9803 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
9804 if (flow_dv_convert_action_set_reg
9805 (mhdr_res, actions, error))
9807 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
9809 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
9810 if (flow_dv_convert_action_copy_mreg
9811 (dev, mhdr_res, actions, error))
9813 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
9815 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
9816 action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
9817 dev_flow->handle->fate_action =
9818 MLX5_FLOW_FATE_DEFAULT_MISS;
9820 case RTE_FLOW_ACTION_TYPE_METER:
9821 mtr = actions->conf;
9823 fm = mlx5_flow_meter_attach(priv, mtr->mtr_id,
9826 return rte_flow_error_set(error,
9828 RTE_FLOW_ERROR_TYPE_ACTION,
9831 "or invalid parameters");
9832 flow->meter = fm->idx;
9834 /* Set the meter action. */
9836 fm = mlx5_ipool_get(priv->sh->ipool
9837 [MLX5_IPOOL_MTR], flow->meter);
9839 return rte_flow_error_set(error,
9841 RTE_FLOW_ERROR_TYPE_ACTION,
9844 "or invalid parameters");
9846 dev_flow->dv.actions[actions_n++] =
9847 fm->mfts->meter_action;
9848 action_flags |= MLX5_FLOW_ACTION_METER;
9850 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
9851 if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
9854 action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
9856 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
9857 if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
9860 action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
9862 case RTE_FLOW_ACTION_TYPE_SAMPLE:
9863 sample_act_pos = actions_n;
9864 ret = flow_dv_translate_action_sample(dev,
9874 action_flags |= MLX5_FLOW_ACTION_SAMPLE;
9875 /* put encap action into group if work with port id */
9876 if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
9877 (action_flags & MLX5_FLOW_ACTION_PORT_ID))
9878 sample_act->action_flags |=
9879 MLX5_FLOW_ACTION_ENCAP;
9881 case RTE_FLOW_ACTION_TYPE_END:
9883 if (mhdr_res->actions_num) {
9884 /* create modify action if needed. */
9885 if (flow_dv_modify_hdr_resource_register
9886 (dev, mhdr_res, dev_flow, error))
9888 dev_flow->dv.actions[modify_action_position] =
9889 handle->dvh.modify_hdr->action;
9891 if (action_flags & MLX5_FLOW_ACTION_COUNT) {
9893 flow_dv_translate_create_counter(dev,
9894 dev_flow, count, age);
9897 return rte_flow_error_set
9899 RTE_FLOW_ERROR_TYPE_ACTION,
9901 "cannot create counter"
9903 dev_flow->dv.actions[actions_n] =
9904 (flow_dv_counter_get_by_idx(dev,
9905 flow->counter, NULL))->action;
9908 if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
9909 ret = flow_dv_create_action_sample(dev,
9918 return rte_flow_error_set
9920 RTE_FLOW_ERROR_TYPE_ACTION,
9922 "cannot create sample action");
9923 if (num_of_dest > 1) {
9924 dev_flow->dv.actions[sample_act_pos] =
9925 dev_flow->dv.dest_array_res->action;
9927 dev_flow->dv.actions[sample_act_pos] =
9928 dev_flow->dv.sample_res->verbs_action;
9935 if (mhdr_res->actions_num &&
9936 modify_action_position == UINT32_MAX)
9937 modify_action_position = actions_n++;
9940 * For multiple destination (sample action with ratio=1), the encap
9941 * action and port id action will be combined into group action.
9942 * So need remove the original these actions in the flow and only
9943 * use the sample action instead of.
9945 if (num_of_dest > 1 && sample_act->dr_port_id_action) {
9947 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
9949 for (i = 0; i < actions_n; i++) {
9950 if ((sample_act->dr_encap_action &&
9951 sample_act->dr_encap_action ==
9952 dev_flow->dv.actions[i]) ||
9953 (sample_act->dr_port_id_action &&
9954 sample_act->dr_port_id_action ==
9955 dev_flow->dv.actions[i]))
9957 temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
9959 memcpy((void *)dev_flow->dv.actions,
9960 (void *)temp_actions,
9961 tmp_actions_n * sizeof(void *));
9962 actions_n = tmp_actions_n;
9964 dev_flow->dv.actions_n = actions_n;
9965 dev_flow->act_flags = action_flags;
9966 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
9967 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
9968 int item_type = items->type;
9970 if (!mlx5_flow_os_item_supported(item_type))
9971 return rte_flow_error_set(error, ENOTSUP,
9972 RTE_FLOW_ERROR_TYPE_ITEM,
9973 NULL, "item not supported");
9974 switch (item_type) {
9975 case RTE_FLOW_ITEM_TYPE_PORT_ID:
9976 flow_dv_translate_item_port_id(dev, match_mask,
9977 match_value, items);
9978 last_item = MLX5_FLOW_ITEM_PORT_ID;
9980 case RTE_FLOW_ITEM_TYPE_ETH:
9981 flow_dv_translate_item_eth(match_mask, match_value,
9983 dev_flow->dv.group);
9984 matcher.priority = action_flags &
9985 MLX5_FLOW_ACTION_DEFAULT_MISS &&
9986 !dev_flow->external ?
9987 MLX5_PRIORITY_MAP_L3 :
9988 MLX5_PRIORITY_MAP_L2;
9989 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
9990 MLX5_FLOW_LAYER_OUTER_L2;
9992 case RTE_FLOW_ITEM_TYPE_VLAN:
9993 flow_dv_translate_item_vlan(dev_flow,
9994 match_mask, match_value,
9996 dev_flow->dv.group);
9997 matcher.priority = MLX5_PRIORITY_MAP_L2;
9998 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
9999 MLX5_FLOW_LAYER_INNER_VLAN) :
10000 (MLX5_FLOW_LAYER_OUTER_L2 |
10001 MLX5_FLOW_LAYER_OUTER_VLAN);
10003 case RTE_FLOW_ITEM_TYPE_IPV4:
10004 mlx5_flow_tunnel_ip_check(items, next_protocol,
10005 &item_flags, &tunnel);
10006 flow_dv_translate_item_ipv4(match_mask, match_value,
10008 dev_flow->dv.group);
10009 matcher.priority = MLX5_PRIORITY_MAP_L3;
10010 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
10011 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
10012 if (items->mask != NULL &&
10013 ((const struct rte_flow_item_ipv4 *)
10014 items->mask)->hdr.next_proto_id) {
10016 ((const struct rte_flow_item_ipv4 *)
10017 (items->spec))->hdr.next_proto_id;
10019 ((const struct rte_flow_item_ipv4 *)
10020 (items->mask))->hdr.next_proto_id;
10022 /* Reset for inner layer. */
10023 next_protocol = 0xff;
10026 case RTE_FLOW_ITEM_TYPE_IPV6:
10027 mlx5_flow_tunnel_ip_check(items, next_protocol,
10028 &item_flags, &tunnel);
10029 flow_dv_translate_item_ipv6(match_mask, match_value,
10031 dev_flow->dv.group);
10032 matcher.priority = MLX5_PRIORITY_MAP_L3;
10033 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
10034 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
10035 if (items->mask != NULL &&
10036 ((const struct rte_flow_item_ipv6 *)
10037 items->mask)->hdr.proto) {
10039 ((const struct rte_flow_item_ipv6 *)
10040 items->spec)->hdr.proto;
10042 ((const struct rte_flow_item_ipv6 *)
10043 items->mask)->hdr.proto;
10045 /* Reset for inner layer. */
10046 next_protocol = 0xff;
10049 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
10050 flow_dv_translate_item_ipv6_frag_ext(match_mask,
10053 last_item = tunnel ?
10054 MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
10055 MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
10056 if (items->mask != NULL &&
10057 ((const struct rte_flow_item_ipv6_frag_ext *)
10058 items->mask)->hdr.next_header) {
10060 ((const struct rte_flow_item_ipv6_frag_ext *)
10061 items->spec)->hdr.next_header;
10063 ((const struct rte_flow_item_ipv6_frag_ext *)
10064 items->mask)->hdr.next_header;
10066 /* Reset for inner layer. */
10067 next_protocol = 0xff;
10070 case RTE_FLOW_ITEM_TYPE_TCP:
10071 flow_dv_translate_item_tcp(match_mask, match_value,
10073 matcher.priority = MLX5_PRIORITY_MAP_L4;
10074 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
10075 MLX5_FLOW_LAYER_OUTER_L4_TCP;
10077 case RTE_FLOW_ITEM_TYPE_UDP:
10078 flow_dv_translate_item_udp(match_mask, match_value,
10080 matcher.priority = MLX5_PRIORITY_MAP_L4;
10081 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
10082 MLX5_FLOW_LAYER_OUTER_L4_UDP;
10084 case RTE_FLOW_ITEM_TYPE_GRE:
10085 flow_dv_translate_item_gre(match_mask, match_value,
10087 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10088 last_item = MLX5_FLOW_LAYER_GRE;
10090 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
10091 flow_dv_translate_item_gre_key(match_mask,
10092 match_value, items);
10093 last_item = MLX5_FLOW_LAYER_GRE_KEY;
10095 case RTE_FLOW_ITEM_TYPE_NVGRE:
10096 flow_dv_translate_item_nvgre(match_mask, match_value,
10098 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10099 last_item = MLX5_FLOW_LAYER_GRE;
10101 case RTE_FLOW_ITEM_TYPE_VXLAN:
10102 flow_dv_translate_item_vxlan(match_mask, match_value,
10104 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10105 last_item = MLX5_FLOW_LAYER_VXLAN;
10107 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
10108 flow_dv_translate_item_vxlan_gpe(match_mask,
10109 match_value, items,
10111 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10112 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
10114 case RTE_FLOW_ITEM_TYPE_GENEVE:
10115 flow_dv_translate_item_geneve(match_mask, match_value,
10117 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10118 last_item = MLX5_FLOW_LAYER_GENEVE;
10120 case RTE_FLOW_ITEM_TYPE_MPLS:
10121 flow_dv_translate_item_mpls(match_mask, match_value,
10122 items, last_item, tunnel);
10123 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10124 last_item = MLX5_FLOW_LAYER_MPLS;
10126 case RTE_FLOW_ITEM_TYPE_MARK:
10127 flow_dv_translate_item_mark(dev, match_mask,
10128 match_value, items);
10129 last_item = MLX5_FLOW_ITEM_MARK;
10131 case RTE_FLOW_ITEM_TYPE_META:
10132 flow_dv_translate_item_meta(dev, match_mask,
10133 match_value, attr, items);
10134 last_item = MLX5_FLOW_ITEM_METADATA;
10136 case RTE_FLOW_ITEM_TYPE_ICMP:
10137 flow_dv_translate_item_icmp(match_mask, match_value,
10139 last_item = MLX5_FLOW_LAYER_ICMP;
10141 case RTE_FLOW_ITEM_TYPE_ICMP6:
10142 flow_dv_translate_item_icmp6(match_mask, match_value,
10144 last_item = MLX5_FLOW_LAYER_ICMP6;
10146 case RTE_FLOW_ITEM_TYPE_TAG:
10147 flow_dv_translate_item_tag(dev, match_mask,
10148 match_value, items);
10149 last_item = MLX5_FLOW_ITEM_TAG;
10151 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
10152 flow_dv_translate_mlx5_item_tag(dev, match_mask,
10153 match_value, items);
10154 last_item = MLX5_FLOW_ITEM_TAG;
10156 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
10157 flow_dv_translate_item_tx_queue(dev, match_mask,
10160 last_item = MLX5_FLOW_ITEM_TX_QUEUE;
10162 case RTE_FLOW_ITEM_TYPE_GTP:
10163 flow_dv_translate_item_gtp(match_mask, match_value,
10165 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10166 last_item = MLX5_FLOW_LAYER_GTP;
10168 case RTE_FLOW_ITEM_TYPE_ECPRI:
10169 if (!mlx5_flex_parser_ecpri_exist(dev)) {
10170 /* Create it only the first time to be used. */
10171 ret = mlx5_flex_parser_ecpri_alloc(dev);
10173 return rte_flow_error_set
10175 RTE_FLOW_ERROR_TYPE_ITEM,
10177 "cannot create eCPRI parser");
10179 /* Adjust the length matcher and device flow value. */
10180 matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
10181 dev_flow->dv.value.size =
10182 MLX5_ST_SZ_BYTES(fte_match_param);
10183 flow_dv_translate_item_ecpri(dev, match_mask,
10184 match_value, items);
10185 /* No other protocol should follow eCPRI layer. */
10186 last_item = MLX5_FLOW_LAYER_ECPRI;
10191 item_flags |= last_item;
10194 * When E-Switch mode is enabled, we have two cases where we need to
10195 * set the source port manually.
10196 * The first one, is in case of Nic steering rule, and the second is
10197 * E-Switch rule where no port_id item was found. In both cases
10198 * the source port is set according the current port in use.
10200 if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
10201 (priv->representor || priv->master)) {
10202 if (flow_dv_translate_item_port_id(dev, match_mask,
10203 match_value, NULL))
10206 #ifdef RTE_LIBRTE_MLX5_DEBUG
10207 MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
10208 dev_flow->dv.value.buf));
10211 * Layers may be already initialized from prefix flow if this dev_flow
10212 * is the suffix flow.
10214 handle->layers |= item_flags;
10215 if (action_flags & MLX5_FLOW_ACTION_RSS)
10216 flow_dv_hashfields_set(dev_flow, rss_desc);
10217 /* Register matcher. */
10218 matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
10219 matcher.mask.size);
10220 matcher.priority = mlx5_flow_adjust_priority(dev, priority,
10222 /* reserved field no needs to be set to 0 here. */
10223 tbl_key.domain = attr->transfer;
10224 tbl_key.direction = attr->egress;
10225 tbl_key.table_id = dev_flow->dv.group;
10226 if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
10232 * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
10235 * @param[in, out] action
10236 * Shred RSS action holding hash RX queue objects.
10237 * @param[in] hash_fields
10238 * Defines combination of packet fields to participate in RX hash.
10239 * @param[in] tunnel
10241 * @param[in] hrxq_idx
10242 * Hash RX queue index to set.
10245 * 0 on success, otherwise negative errno value.
10248 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
10249 const uint64_t hash_fields,
10253 uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel;
10255 switch (hash_fields & ~IBV_RX_HASH_INNER) {
10256 case MLX5_RSS_HASH_IPV4:
10257 hrxqs[0] = hrxq_idx;
10259 case MLX5_RSS_HASH_IPV4_TCP:
10260 hrxqs[1] = hrxq_idx;
10262 case MLX5_RSS_HASH_IPV4_UDP:
10263 hrxqs[2] = hrxq_idx;
10265 case MLX5_RSS_HASH_IPV6:
10266 hrxqs[3] = hrxq_idx;
10268 case MLX5_RSS_HASH_IPV6_TCP:
10269 hrxqs[4] = hrxq_idx;
10271 case MLX5_RSS_HASH_IPV6_UDP:
10272 hrxqs[5] = hrxq_idx;
10274 case MLX5_RSS_HASH_NONE:
10275 hrxqs[6] = hrxq_idx;
10283 * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
10286 * @param[in] action
10287 * Shred RSS action holding hash RX queue objects.
10288 * @param[in] hash_fields
10289 * Defines combination of packet fields to participate in RX hash.
10290 * @param[in] tunnel
10294 * Valid hash RX queue index, otherwise 0.
10297 __flow_dv_action_rss_hrxq_lookup(const struct mlx5_shared_action_rss *action,
10298 const uint64_t hash_fields,
10301 const uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel;
10303 switch (hash_fields & ~IBV_RX_HASH_INNER) {
10304 case MLX5_RSS_HASH_IPV4:
10306 case MLX5_RSS_HASH_IPV4_TCP:
10308 case MLX5_RSS_HASH_IPV4_UDP:
10310 case MLX5_RSS_HASH_IPV6:
10312 case MLX5_RSS_HASH_IPV6_TCP:
10314 case MLX5_RSS_HASH_IPV6_UDP:
10316 case MLX5_RSS_HASH_NONE:
10324 * Retrieves hash RX queue suitable for the *flow*.
10325 * If shared action configured for *flow* suitable hash RX queue will be
10326 * retrieved from attached shared action.
10329 * Shred RSS action holding hash RX queue objects.
10330 * @param[in] dev_flow
10331 * Pointer to the sub flow.
10333 * Pointer to retrieved hash RX queue object.
10336 * Valid hash RX queue index, otherwise 0 and rte_errno is set.
10339 __flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct rte_flow *flow,
10340 struct mlx5_flow *dev_flow,
10341 struct mlx5_hrxq **hrxq)
10343 struct mlx5_priv *priv = dev->data->dev_private;
10344 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10347 if (flow->shared_rss) {
10348 hrxq_idx = __flow_dv_action_rss_hrxq_lookup
10349 (flow->shared_rss, dev_flow->hash_fields,
10350 !!(dev_flow->handle->layers &
10351 MLX5_FLOW_LAYER_TUNNEL));
10353 *hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
10355 __atomic_fetch_add(&(*hrxq)->refcnt, 1,
10359 struct mlx5_flow_rss_desc *rss_desc =
10360 &wks->rss_desc[!!wks->flow_nested_idx];
10362 *hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
10369 * Apply the flow to the NIC, lock free,
10370 * (mutex should be acquired by caller).
10373 * Pointer to the Ethernet device structure.
10374 * @param[in, out] flow
10375 * Pointer to flow structure.
10376 * @param[out] error
10377 * Pointer to error structure.
10380 * 0 on success, a negative errno value otherwise and rte_errno is set.
10383 __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
10384 struct rte_flow_error *error)
10386 struct mlx5_flow_dv_workspace *dv;
10387 struct mlx5_flow_handle *dh;
10388 struct mlx5_flow_handle_dv *dv_h;
10389 struct mlx5_flow *dev_flow;
10390 struct mlx5_priv *priv = dev->data->dev_private;
10391 uint32_t handle_idx;
10395 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10398 for (idx = wks->flow_idx - 1; idx >= wks->flow_nested_idx; idx--) {
10399 dev_flow = &wks->flows[idx];
10400 dv = &dev_flow->dv;
10401 dh = dev_flow->handle;
10404 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
10405 if (dv->transfer) {
10406 dv->actions[n++] = priv->sh->esw_drop_action;
10408 MLX5_ASSERT(priv->drop_queue.hrxq);
10410 priv->drop_queue.hrxq->action;
10412 } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
10413 !dv_h->rix_sample && !dv_h->rix_dest_array) {
10414 struct mlx5_hrxq *hrxq = NULL;
10415 uint32_t hrxq_idx = __flow_dv_rss_get_hrxq
10416 (dev, flow, dev_flow, &hrxq);
10420 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10421 "cannot get hash queue");
10424 dh->rix_hrxq = hrxq_idx;
10425 dv->actions[n++] = hrxq->action;
10426 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
10427 if (!priv->sh->default_miss_action) {
10430 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10431 "default miss action not be created.");
10434 dv->actions[n++] = priv->sh->default_miss_action;
10436 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
10437 (void *)&dv->value, n,
10438 dv->actions, &dh->drv_flow);
10440 rte_flow_error_set(error, errno,
10441 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10443 "hardware refuses to create flow");
10446 if (priv->vmwa_context &&
10447 dh->vf_vlan.tag && !dh->vf_vlan.created) {
10449 * The rule contains the VLAN pattern.
10450 * For VF we are going to create VLAN
10451 * interface to make hypervisor set correct
10452 * e-Switch vport context.
10454 mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
10459 err = rte_errno; /* Save rte_errno before cleanup. */
10460 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
10461 handle_idx, dh, next) {
10462 /* hrxq is union, don't clear it if the flag is not set. */
10463 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
10464 mlx5_hrxq_release(dev, dh->rix_hrxq);
10467 if (dh->vf_vlan.tag && dh->vf_vlan.created)
10468 mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
10470 rte_errno = err; /* Restore rte_errno. */
10475 flow_dv_matcher_remove_cb(struct mlx5_cache_list *list __rte_unused,
10476 struct mlx5_cache_entry *entry)
10478 struct mlx5_flow_dv_matcher *cache = container_of(entry, typeof(*cache),
10481 claim_zero(mlx5_flow_os_destroy_flow_matcher(cache->matcher_object));
10486 * Release the flow matcher.
10489 * Pointer to Ethernet device.
10491 * Pointer to mlx5_flow_handle.
10494 * 1 while a reference on it exists, 0 when freed.
10497 flow_dv_matcher_release(struct rte_eth_dev *dev,
10498 struct mlx5_flow_handle *handle)
10500 struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
10501 struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
10502 typeof(*tbl), tbl);
10505 MLX5_ASSERT(matcher->matcher_object);
10506 ret = mlx5_cache_unregister(&tbl->matchers, &matcher->entry);
10507 flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
10512 * Release encap_decap resource.
10515 * Pointer to the hash list.
10517 * Pointer to exist resource entry object.
10520 flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
10521 struct mlx5_hlist_entry *entry)
10523 struct mlx5_dev_ctx_shared *sh = list->ctx;
10524 struct mlx5_flow_dv_encap_decap_resource *res =
10525 container_of(entry, typeof(*res), entry);
10527 claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
10528 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
10532 * Release an encap/decap resource.
10535 * Pointer to Ethernet device.
10536 * @param encap_decap_idx
10537 * Index of encap decap resource.
10540 * 1 while a reference on it exists, 0 when freed.
10543 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
10544 uint32_t encap_decap_idx)
10546 struct mlx5_priv *priv = dev->data->dev_private;
10547 struct mlx5_flow_dv_encap_decap_resource *cache_resource;
10549 cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
10551 if (!cache_resource)
10553 MLX5_ASSERT(cache_resource->action);
10554 return mlx5_hlist_unregister(priv->sh->encaps_decaps,
10555 &cache_resource->entry);
10559 * Release an jump to table action resource.
10562 * Pointer to Ethernet device.
10564 * Pointer to mlx5_flow_handle.
10567 * 1 while a reference on it exists, 0 when freed.
10570 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
10571 struct mlx5_flow_handle *handle)
10573 struct mlx5_priv *priv = dev->data->dev_private;
10574 struct mlx5_flow_tbl_data_entry *tbl_data;
10576 tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
10580 return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
10584 flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused,
10585 struct mlx5_hlist_entry *entry)
10587 struct mlx5_flow_dv_modify_hdr_resource *res =
10588 container_of(entry, typeof(*res), entry);
10590 claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
10595 * Release a modify-header resource.
10598 * Pointer to Ethernet device.
10600 * Pointer to mlx5_flow_handle.
10603 * 1 while a reference on it exists, 0 when freed.
10606 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
10607 struct mlx5_flow_handle *handle)
10609 struct mlx5_priv *priv = dev->data->dev_private;
10610 struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
10612 MLX5_ASSERT(entry->action);
10613 return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
10617 flow_dv_port_id_remove_cb(struct mlx5_cache_list *list,
10618 struct mlx5_cache_entry *entry)
10620 struct mlx5_dev_ctx_shared *sh = list->ctx;
10621 struct mlx5_flow_dv_port_id_action_resource *cache =
10622 container_of(entry, typeof(*cache), entry);
10624 claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
10625 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], cache->idx);
10629 * Release port ID action resource.
10632 * Pointer to Ethernet device.
10634 * Pointer to mlx5_flow_handle.
10637 * 1 while a reference on it exists, 0 when freed.
10640 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
10643 struct mlx5_priv *priv = dev->data->dev_private;
10644 struct mlx5_flow_dv_port_id_action_resource *cache;
10646 cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
10649 MLX5_ASSERT(cache->action);
10650 return mlx5_cache_unregister(&priv->sh->port_id_action_list,
10655 flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
10656 struct mlx5_cache_entry *entry)
10658 struct mlx5_dev_ctx_shared *sh = list->ctx;
10659 struct mlx5_flow_dv_push_vlan_action_resource *cache =
10660 container_of(entry, typeof(*cache), entry);
10662 claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
10663 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], cache->idx);
10667 * Release push vlan action resource.
10670 * Pointer to Ethernet device.
10672 * Pointer to mlx5_flow_handle.
10675 * 1 while a reference on it exists, 0 when freed.
10678 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
10679 struct mlx5_flow_handle *handle)
10681 struct mlx5_priv *priv = dev->data->dev_private;
10682 struct mlx5_flow_dv_push_vlan_action_resource *cache;
10683 uint32_t idx = handle->dvh.rix_push_vlan;
10685 cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
10688 MLX5_ASSERT(cache->action);
10689 return mlx5_cache_unregister(&priv->sh->push_vlan_action_list,
10694 * Release the fate resource.
10697 * Pointer to Ethernet device.
10699 * Pointer to mlx5_flow_handle.
10702 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
10703 struct mlx5_flow_handle *handle)
10705 if (!handle->rix_fate)
10707 switch (handle->fate_action) {
10708 case MLX5_FLOW_FATE_QUEUE:
10709 mlx5_hrxq_release(dev, handle->rix_hrxq);
10711 case MLX5_FLOW_FATE_JUMP:
10712 flow_dv_jump_tbl_resource_release(dev, handle);
10714 case MLX5_FLOW_FATE_PORT_ID:
10715 flow_dv_port_id_action_resource_release(dev,
10716 handle->rix_port_id_action);
10719 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
10722 handle->rix_fate = 0;
10726 flow_dv_sample_remove_cb(struct mlx5_cache_list *list,
10727 struct mlx5_cache_entry *entry)
10729 struct rte_eth_dev *dev = list->ctx;
10730 struct mlx5_priv *priv = dev->data->dev_private;
10731 struct mlx5_flow_dv_sample_resource *cache_resource =
10732 container_of(entry, typeof(*cache_resource), entry);
10734 if (cache_resource->verbs_action)
10735 claim_zero(mlx5_glue->destroy_flow_action
10736 (cache_resource->verbs_action));
10737 if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
10738 if (cache_resource->default_miss)
10739 claim_zero(mlx5_glue->destroy_flow_action
10740 (cache_resource->default_miss));
10742 if (cache_resource->normal_path_tbl)
10743 flow_dv_tbl_resource_release(MLX5_SH(dev),
10744 cache_resource->normal_path_tbl);
10745 flow_dv_sample_sub_actions_release(dev,
10746 &cache_resource->sample_idx);
10747 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
10748 cache_resource->idx);
10749 DRV_LOG(DEBUG, "sample resource %p: removed",
10750 (void *)cache_resource);
10754 * Release an sample resource.
10757 * Pointer to Ethernet device.
10759 * Pointer to mlx5_flow_handle.
10762 * 1 while a reference on it exists, 0 when freed.
10765 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
10766 struct mlx5_flow_handle *handle)
10768 struct mlx5_priv *priv = dev->data->dev_private;
10769 struct mlx5_flow_dv_sample_resource *cache_resource;
10771 cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
10772 handle->dvh.rix_sample);
10773 if (!cache_resource)
10775 MLX5_ASSERT(cache_resource->verbs_action);
10776 return mlx5_cache_unregister(&priv->sh->sample_action_list,
10777 &cache_resource->entry);
10781 flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list,
10782 struct mlx5_cache_entry *entry)
10784 struct rte_eth_dev *dev = list->ctx;
10785 struct mlx5_priv *priv = dev->data->dev_private;
10786 struct mlx5_flow_dv_dest_array_resource *cache_resource =
10787 container_of(entry, typeof(*cache_resource), entry);
10790 MLX5_ASSERT(cache_resource->action);
10791 if (cache_resource->action)
10792 claim_zero(mlx5_glue->destroy_flow_action
10793 (cache_resource->action));
10794 for (; i < cache_resource->num_of_dest; i++)
10795 flow_dv_sample_sub_actions_release(dev,
10796 &cache_resource->sample_idx[i]);
10797 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
10798 cache_resource->idx);
10799 DRV_LOG(DEBUG, "destination array resource %p: removed",
10800 (void *)cache_resource);
10804 * Release an destination array resource.
10807 * Pointer to Ethernet device.
10809 * Pointer to mlx5_flow_handle.
10812 * 1 while a reference on it exists, 0 when freed.
10815 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
10816 struct mlx5_flow_handle *handle)
10818 struct mlx5_priv *priv = dev->data->dev_private;
10819 struct mlx5_flow_dv_dest_array_resource *cache;
10821 cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
10822 handle->dvh.rix_dest_array);
10825 MLX5_ASSERT(cache->action);
10826 return mlx5_cache_unregister(&priv->sh->dest_array_list,
10831 * Remove the flow from the NIC but keeps it in memory.
10832 * Lock free, (mutex should be acquired by caller).
10835 * Pointer to Ethernet device.
10836 * @param[in, out] flow
10837 * Pointer to flow structure.
10840 __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
10842 struct mlx5_flow_handle *dh;
10843 uint32_t handle_idx;
10844 struct mlx5_priv *priv = dev->data->dev_private;
10848 handle_idx = flow->dev_handles;
10849 while (handle_idx) {
10850 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
10854 if (dh->drv_flow) {
10855 claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
10856 dh->drv_flow = NULL;
10858 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
10859 flow_dv_fate_resource_release(dev, dh);
10860 if (dh->vf_vlan.tag && dh->vf_vlan.created)
10861 mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
10862 handle_idx = dh->next.next;
10867 * Remove the flow from the NIC and the memory.
10868 * Lock free, (mutex should be acquired by caller).
10871 * Pointer to the Ethernet device structure.
10872 * @param[in, out] flow
10873 * Pointer to flow structure.
10876 __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
10878 struct rte_flow_shared_action *shared;
10879 struct mlx5_flow_handle *dev_handle;
10880 struct mlx5_priv *priv = dev->data->dev_private;
10884 __flow_dv_remove(dev, flow);
10885 shared = mlx5_flow_get_shared_rss(flow);
10887 __atomic_sub_fetch(&shared->refcnt, 1, __ATOMIC_RELAXED);
10888 if (flow->counter) {
10889 flow_dv_counter_release(dev, flow->counter);
10893 struct mlx5_flow_meter *fm;
10895 fm = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR],
10898 mlx5_flow_meter_detach(fm);
10901 while (flow->dev_handles) {
10902 uint32_t tmp_idx = flow->dev_handles;
10904 dev_handle = mlx5_ipool_get(priv->sh->ipool
10905 [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
10908 flow->dev_handles = dev_handle->next.next;
10909 if (dev_handle->dvh.matcher)
10910 flow_dv_matcher_release(dev, dev_handle);
10911 if (dev_handle->dvh.rix_sample)
10912 flow_dv_sample_resource_release(dev, dev_handle);
10913 if (dev_handle->dvh.rix_dest_array)
10914 flow_dv_dest_array_resource_release(dev, dev_handle);
10915 if (dev_handle->dvh.rix_encap_decap)
10916 flow_dv_encap_decap_resource_release(dev,
10917 dev_handle->dvh.rix_encap_decap);
10918 if (dev_handle->dvh.modify_hdr)
10919 flow_dv_modify_hdr_resource_release(dev, dev_handle);
10920 if (dev_handle->dvh.rix_push_vlan)
10921 flow_dv_push_vlan_action_resource_release(dev,
10923 if (dev_handle->dvh.rix_tag)
10924 flow_dv_tag_release(dev,
10925 dev_handle->dvh.rix_tag);
10926 flow_dv_fate_resource_release(dev, dev_handle);
10927 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
10933 * Release array of hash RX queue objects.
10937 * Pointer to the Ethernet device structure.
10938 * @param[in, out] hrxqs
10939 * Array of hash RX queue objects.
10942 * Total number of references to hash RX queue objects in *hrxqs* array
10943 * after this operation.
10946 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
10947 uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
10952 for (i = 0; i < RTE_DIM(*hrxqs); i++) {
10953 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
10963 * Release all hash RX queue objects representing shared RSS action.
10966 * Pointer to the Ethernet device structure.
10967 * @param[in, out] action
10968 * Shared RSS action to remove hash RX queue objects from.
10971 * Total number of references to hash RX queue objects stored in *action*
10972 * after this operation.
10973 * Expected to be 0 if no external references held.
10976 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
10977 struct mlx5_shared_action_rss *action)
10979 return __flow_dv_hrxqs_release(dev, &action->hrxq) +
10980 __flow_dv_hrxqs_release(dev, &action->hrxq_tunnel);
10984 * Setup shared RSS action.
10985 * Prepare set of hash RX queue objects sufficient to handle all valid
10986 * hash_fields combinations (see enum ibv_rx_hash_fields).
10989 * Pointer to the Ethernet device structure.
10990 * @param[in, out] action
10991 * Partially initialized shared RSS action.
10992 * @param[out] error
10993 * Perform verbose error reporting if not NULL. Initialized in case of
10997 * 0 on success, otherwise negative errno value.
11000 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
11001 struct mlx5_shared_action_rss *action,
11002 struct rte_flow_error *error)
11004 struct mlx5_flow_rss_desc rss_desc = { 0 };
11008 memcpy(rss_desc.key, action->origin.key, MLX5_RSS_HASH_KEY_LEN);
11009 rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
11010 rss_desc.const_q = action->origin.queue;
11011 rss_desc.queue_num = action->origin.queue_num;
11012 rss_desc.standalone = true;
11013 for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
11015 uint64_t hash_fields = mlx5_rss_hash_fields[i];
11018 for (tunnel = 0; tunnel < 2; tunnel++) {
11019 rss_desc.tunnel = tunnel;
11020 rss_desc.hash_fields = hash_fields;
11021 hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
11025 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11026 "cannot get hash queue");
11027 goto error_hrxq_new;
11029 err = __flow_dv_action_rss_hrxq_set
11030 (action, hash_fields, tunnel, hrxq_idx);
11037 __flow_dv_action_rss_hrxqs_release(dev, action);
11043 * Create shared RSS action.
11046 * Pointer to the Ethernet device structure.
11048 * Shared action configuration.
11050 * RSS action specification used to create shared action.
11051 * @param[out] error
11052 * Perform verbose error reporting if not NULL. Initialized in case of
11056 * A valid shared action handle in case of success, NULL otherwise and
11057 * rte_errno is set.
11059 static struct rte_flow_shared_action *
11060 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
11061 const struct rte_flow_shared_action_conf *conf,
11062 const struct rte_flow_action_rss *rss,
11063 struct rte_flow_error *error)
11065 struct rte_flow_shared_action *shared_action = NULL;
11066 void *queue = NULL;
11067 struct mlx5_shared_action_rss *shared_rss;
11068 struct rte_flow_action_rss *origin;
11069 const uint8_t *rss_key;
11070 uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
11072 RTE_SET_USED(conf);
11073 queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
11075 shared_action = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*shared_action), 0,
11077 if (!shared_action || !queue) {
11078 rte_flow_error_set(error, ENOMEM,
11079 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11080 "cannot allocate resource memory");
11081 goto error_rss_init;
11083 shared_rss = &shared_action->rss;
11084 shared_rss->queue = queue;
11085 origin = &shared_rss->origin;
11086 origin->func = rss->func;
11087 origin->level = rss->level;
11088 /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
11089 origin->types = !rss->types ? ETH_RSS_IP : rss->types;
11090 /* NULL RSS key indicates default RSS key. */
11091 rss_key = !rss->key ? rss_hash_default_key : rss->key;
11092 memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11093 origin->key = &shared_rss->key[0];
11094 origin->key_len = MLX5_RSS_HASH_KEY_LEN;
11095 memcpy(shared_rss->queue, rss->queue, queue_size);
11096 origin->queue = shared_rss->queue;
11097 origin->queue_num = rss->queue_num;
11098 if (__flow_dv_action_rss_setup(dev, shared_rss, error))
11099 goto error_rss_init;
11100 shared_action->type = MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS;
11101 return shared_action;
11103 mlx5_free(shared_action);
11109 * Destroy the shared RSS action.
11110 * Release related hash RX queue objects.
11113 * Pointer to the Ethernet device structure.
11114 * @param[in] shared_rss
11115 * The shared RSS action object to be removed.
11116 * @param[out] error
11117 * Perform verbose error reporting if not NULL. Initialized in case of
11121 * 0 on success, otherwise negative errno value.
11124 __flow_dv_action_rss_release(struct rte_eth_dev *dev,
11125 struct mlx5_shared_action_rss *shared_rss,
11126 struct rte_flow_error *error)
11128 struct rte_flow_shared_action *shared_action = NULL;
11129 uint32_t old_refcnt = 1;
11130 int remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
11133 return rte_flow_error_set(error, ETOOMANYREFS,
11134 RTE_FLOW_ERROR_TYPE_ACTION,
11136 "shared rss hrxq has references");
11138 shared_action = container_of(shared_rss,
11139 struct rte_flow_shared_action, rss);
11140 if (!__atomic_compare_exchange_n(&shared_action->refcnt, &old_refcnt,
11142 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
11143 return rte_flow_error_set(error, ETOOMANYREFS,
11144 RTE_FLOW_ERROR_TYPE_ACTION,
11146 "shared rss has references");
11148 rte_free(shared_rss->queue);
11153 * Create shared action, lock free,
11154 * (mutex should be acquired by caller).
11155 * Dispatcher for action type specific call.
11158 * Pointer to the Ethernet device structure.
11160 * Shared action configuration.
11161 * @param[in] action
11162 * Action specification used to create shared action.
11163 * @param[out] error
11164 * Perform verbose error reporting if not NULL. Initialized in case of
11168 * A valid shared action handle in case of success, NULL otherwise and
11169 * rte_errno is set.
11171 static struct rte_flow_shared_action *
11172 __flow_dv_action_create(struct rte_eth_dev *dev,
11173 const struct rte_flow_shared_action_conf *conf,
11174 const struct rte_flow_action *action,
11175 struct rte_flow_error *error)
11177 struct rte_flow_shared_action *shared_action = NULL;
11178 struct mlx5_priv *priv = dev->data->dev_private;
11180 switch (action->type) {
11181 case RTE_FLOW_ACTION_TYPE_RSS:
11182 shared_action = __flow_dv_action_rss_create(dev, conf,
11187 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
11188 NULL, "action type not supported");
11191 if (shared_action) {
11192 __atomic_add_fetch(&shared_action->refcnt, 1,
11194 LIST_INSERT_HEAD(&priv->shared_actions, shared_action, next);
11196 return shared_action;
11200 * Destroy the shared action.
11201 * Release action related resources on the NIC and the memory.
11202 * Lock free, (mutex should be acquired by caller).
11203 * Dispatcher for action type specific call.
11206 * Pointer to the Ethernet device structure.
11207 * @param[in] action
11208 * The shared action object to be removed.
11209 * @param[out] error
11210 * Perform verbose error reporting if not NULL. Initialized in case of
11214 * 0 on success, otherwise negative errno value.
11217 __flow_dv_action_destroy(struct rte_eth_dev *dev,
11218 struct rte_flow_shared_action *action,
11219 struct rte_flow_error *error)
11223 switch (action->type) {
11224 case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
11225 ret = __flow_dv_action_rss_release(dev, &action->rss, error);
11228 return rte_flow_error_set(error, ENOTSUP,
11229 RTE_FLOW_ERROR_TYPE_ACTION,
11231 "action type not supported");
11235 LIST_REMOVE(action, next);
11241 * Updates in place shared RSS action configuration.
11244 * Pointer to the Ethernet device structure.
11245 * @param[in] shared_rss
11246 * The shared RSS action object to be updated.
11247 * @param[in] action_conf
11248 * RSS action specification used to modify *shared_rss*.
11249 * @param[out] error
11250 * Perform verbose error reporting if not NULL. Initialized in case of
11254 * 0 on success, otherwise negative errno value.
11255 * @note: currently only support update of RSS queues.
11258 __flow_dv_action_rss_update(struct rte_eth_dev *dev,
11259 struct mlx5_shared_action_rss *shared_rss,
11260 const struct rte_flow_action_rss *action_conf,
11261 struct rte_flow_error *error)
11265 void *queue = NULL;
11266 const uint8_t *rss_key;
11267 uint32_t rss_key_len;
11268 uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
11270 queue = mlx5_malloc(MLX5_MEM_ZERO,
11271 RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
11274 return rte_flow_error_set(error, ENOMEM,
11275 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11277 "cannot allocate resource memory");
11278 if (action_conf->key) {
11279 rss_key = action_conf->key;
11280 rss_key_len = action_conf->key_len;
11282 rss_key = rss_hash_default_key;
11283 rss_key_len = MLX5_RSS_HASH_KEY_LEN;
11285 for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
11287 uint64_t hash_fields = mlx5_rss_hash_fields[i];
11290 for (tunnel = 0; tunnel < 2; tunnel++) {
11291 hrxq_idx = __flow_dv_action_rss_hrxq_lookup
11292 (shared_rss, hash_fields, tunnel);
11293 MLX5_ASSERT(hrxq_idx);
11294 ret = mlx5_hrxq_modify
11296 rss_key, rss_key_len,
11298 action_conf->queue, action_conf->queue_num);
11301 return rte_flow_error_set
11303 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11304 "cannot update hash queue");
11308 mlx5_free(shared_rss->queue);
11309 shared_rss->queue = queue;
11310 memcpy(shared_rss->queue, action_conf->queue, queue_size);
11311 shared_rss->origin.queue = shared_rss->queue;
11312 shared_rss->origin.queue_num = action_conf->queue_num;
11317 * Updates in place shared action configuration, lock free,
11318 * (mutex should be acquired by caller).
11321 * Pointer to the Ethernet device structure.
11322 * @param[in] action
11323 * The shared action object to be updated.
11324 * @param[in] action_conf
11325 * Action specification used to modify *action*.
11326 * *action_conf* should be of type correlating with type of the *action*,
11327 * otherwise considered as invalid.
11328 * @param[out] error
11329 * Perform verbose error reporting if not NULL. Initialized in case of
11333 * 0 on success, otherwise negative errno value.
11336 __flow_dv_action_update(struct rte_eth_dev *dev,
11337 struct rte_flow_shared_action *action,
11338 const void *action_conf,
11339 struct rte_flow_error *error)
11341 switch (action->type) {
11342 case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
11343 return __flow_dv_action_rss_update(dev, &action->rss,
11344 action_conf, error);
11346 return rte_flow_error_set(error, ENOTSUP,
11347 RTE_FLOW_ERROR_TYPE_ACTION,
11349 "action type not supported");
11353 * Query a dv flow rule for its statistics via devx.
11356 * Pointer to Ethernet device.
11358 * Pointer to the sub flow.
11360 * data retrieved by the query.
11361 * @param[out] error
11362 * Perform verbose error reporting if not NULL.
11365 * 0 on success, a negative errno value otherwise and rte_errno is set.
11368 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
11369 void *data, struct rte_flow_error *error)
11371 struct mlx5_priv *priv = dev->data->dev_private;
11372 struct rte_flow_query_count *qc = data;
11374 if (!priv->config.devx)
11375 return rte_flow_error_set(error, ENOTSUP,
11376 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11378 "counters are not supported");
11379 if (flow->counter) {
11380 uint64_t pkts, bytes;
11381 struct mlx5_flow_counter *cnt;
11383 cnt = flow_dv_counter_get_by_idx(dev, flow->counter,
11385 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
11389 return rte_flow_error_set(error, -err,
11390 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11391 NULL, "cannot read counters");
11394 qc->hits = pkts - cnt->hits;
11395 qc->bytes = bytes - cnt->bytes;
11398 cnt->bytes = bytes;
11402 return rte_flow_error_set(error, EINVAL,
11403 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11405 "counters are not available");
11409 * Query a flow rule AGE action for aging information.
11412 * Pointer to Ethernet device.
11414 * Pointer to the sub flow.
11416 * data retrieved by the query.
11417 * @param[out] error
11418 * Perform verbose error reporting if not NULL.
11421 * 0 on success, a negative errno value otherwise and rte_errno is set.
11424 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
11425 void *data, struct rte_flow_error *error)
11427 struct rte_flow_query_age *resp = data;
11429 if (flow->counter) {
11430 struct mlx5_age_param *age_param =
11431 flow_dv_counter_idx_get_age(dev, flow->counter);
11433 if (!age_param || !age_param->timeout)
11434 return rte_flow_error_set
11436 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11437 NULL, "cannot read age data");
11438 resp->aged = __atomic_load_n(&age_param->state,
11439 __ATOMIC_RELAXED) ==
11441 resp->sec_since_last_hit_valid = !resp->aged;
11442 if (resp->sec_since_last_hit_valid)
11443 resp->sec_since_last_hit =
11444 __atomic_load_n(&age_param->sec_since_last_hit,
11448 return rte_flow_error_set(error, EINVAL,
11449 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11451 "age data not available");
11457 * @see rte_flow_query()
11458 * @see rte_flow_ops
11461 flow_dv_query(struct rte_eth_dev *dev,
11462 struct rte_flow *flow __rte_unused,
11463 const struct rte_flow_action *actions __rte_unused,
11464 void *data __rte_unused,
11465 struct rte_flow_error *error __rte_unused)
11469 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
11470 switch (actions->type) {
11471 case RTE_FLOW_ACTION_TYPE_VOID:
11473 case RTE_FLOW_ACTION_TYPE_COUNT:
11474 ret = flow_dv_query_count(dev, flow, data, error);
11476 case RTE_FLOW_ACTION_TYPE_AGE:
11477 ret = flow_dv_query_age(dev, flow, data, error);
11480 return rte_flow_error_set(error, ENOTSUP,
11481 RTE_FLOW_ERROR_TYPE_ACTION,
11483 "action not supported");
11490 * Destroy the meter table set.
11491 * Lock free, (mutex should be acquired by caller).
11494 * Pointer to Ethernet device.
11496 * Pointer to the meter table set.
11502 flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
11503 struct mlx5_meter_domains_infos *tbl)
11505 struct mlx5_priv *priv = dev->data->dev_private;
11506 struct mlx5_meter_domains_infos *mtd =
11507 (struct mlx5_meter_domains_infos *)tbl;
11509 if (!mtd || !priv->config.dv_flow_en)
11511 if (mtd->ingress.policer_rules[RTE_MTR_DROPPED])
11512 claim_zero(mlx5_flow_os_destroy_flow
11513 (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
11514 if (mtd->egress.policer_rules[RTE_MTR_DROPPED])
11515 claim_zero(mlx5_flow_os_destroy_flow
11516 (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
11517 if (mtd->transfer.policer_rules[RTE_MTR_DROPPED])
11518 claim_zero(mlx5_flow_os_destroy_flow
11519 (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
11520 if (mtd->egress.color_matcher)
11521 claim_zero(mlx5_flow_os_destroy_flow_matcher
11522 (mtd->egress.color_matcher));
11523 if (mtd->egress.any_matcher)
11524 claim_zero(mlx5_flow_os_destroy_flow_matcher
11525 (mtd->egress.any_matcher));
11526 if (mtd->egress.tbl)
11527 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.tbl);
11528 if (mtd->egress.sfx_tbl)
11529 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.sfx_tbl);
11530 if (mtd->ingress.color_matcher)
11531 claim_zero(mlx5_flow_os_destroy_flow_matcher
11532 (mtd->ingress.color_matcher));
11533 if (mtd->ingress.any_matcher)
11534 claim_zero(mlx5_flow_os_destroy_flow_matcher
11535 (mtd->ingress.any_matcher));
11536 if (mtd->ingress.tbl)
11537 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->ingress.tbl);
11538 if (mtd->ingress.sfx_tbl)
11539 flow_dv_tbl_resource_release(MLX5_SH(dev),
11540 mtd->ingress.sfx_tbl);
11541 if (mtd->transfer.color_matcher)
11542 claim_zero(mlx5_flow_os_destroy_flow_matcher
11543 (mtd->transfer.color_matcher));
11544 if (mtd->transfer.any_matcher)
11545 claim_zero(mlx5_flow_os_destroy_flow_matcher
11546 (mtd->transfer.any_matcher));
11547 if (mtd->transfer.tbl)
11548 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->transfer.tbl);
11549 if (mtd->transfer.sfx_tbl)
11550 flow_dv_tbl_resource_release(MLX5_SH(dev),
11551 mtd->transfer.sfx_tbl);
11552 if (mtd->drop_actn)
11553 claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn));
11558 /* Number of meter flow actions, count and jump or count and drop. */
11559 #define METER_ACTIONS 2
11562 * Create specify domain meter table and suffix table.
11565 * Pointer to Ethernet device.
11566 * @param[in,out] mtb
11567 * Pointer to DV meter table set.
11568 * @param[in] egress
11570 * @param[in] transfer
11572 * @param[in] color_reg_c_idx
11573 * Reg C index for color match.
11576 * 0 on success, -1 otherwise and rte_errno is set.
11579 flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev,
11580 struct mlx5_meter_domains_infos *mtb,
11581 uint8_t egress, uint8_t transfer,
11582 uint32_t color_reg_c_idx)
11584 struct mlx5_priv *priv = dev->data->dev_private;
11585 struct mlx5_dev_ctx_shared *sh = priv->sh;
11586 struct mlx5_flow_dv_match_params mask = {
11587 .size = sizeof(mask.buf),
11589 struct mlx5_flow_dv_match_params value = {
11590 .size = sizeof(value.buf),
11592 struct mlx5dv_flow_matcher_attr dv_attr = {
11593 .type = IBV_FLOW_ATTR_NORMAL,
11595 .match_criteria_enable = 0,
11596 .match_mask = (void *)&mask,
11598 void *actions[METER_ACTIONS];
11599 struct mlx5_meter_domain_info *dtb;
11600 struct rte_flow_error error;
11605 dtb = &mtb->transfer;
11607 dtb = &mtb->egress;
11609 dtb = &mtb->ingress;
11610 /* Create the meter table with METER level. */
11611 dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER,
11612 egress, transfer, false, NULL, 0,
11615 DRV_LOG(ERR, "Failed to create meter policer table.");
11618 /* Create the meter suffix table with SUFFIX level. */
11619 dtb->sfx_tbl = flow_dv_tbl_resource_get(dev,
11620 MLX5_FLOW_TABLE_LEVEL_SUFFIX,
11621 egress, transfer, false, NULL, 0,
11623 if (!dtb->sfx_tbl) {
11624 DRV_LOG(ERR, "Failed to create meter suffix table.");
11627 /* Create matchers, Any and Color. */
11628 dv_attr.priority = 3;
11629 dv_attr.match_criteria_enable = 0;
11630 ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
11631 &dtb->any_matcher);
11633 DRV_LOG(ERR, "Failed to create meter"
11634 " policer default matcher.");
11637 dv_attr.priority = 0;
11638 dv_attr.match_criteria_enable =
11639 1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
11640 flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx,
11641 rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX);
11642 ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
11643 &dtb->color_matcher);
11645 DRV_LOG(ERR, "Failed to create meter policer color matcher.");
11648 if (mtb->count_actns[RTE_MTR_DROPPED])
11649 actions[i++] = mtb->count_actns[RTE_MTR_DROPPED];
11650 actions[i++] = mtb->drop_actn;
11651 /* Default rule: lowest priority, match any, actions: drop. */
11652 ret = mlx5_flow_os_create_flow(dtb->any_matcher, (void *)&value, i,
11654 &dtb->policer_rules[RTE_MTR_DROPPED]);
11656 DRV_LOG(ERR, "Failed to create meter policer drop rule.");
11665 * Create the needed meter and suffix tables.
11666 * Lock free, (mutex should be acquired by caller).
11669 * Pointer to Ethernet device.
11671 * Pointer to the flow meter.
11674 * Pointer to table set on success, NULL otherwise and rte_errno is set.
11676 static struct mlx5_meter_domains_infos *
11677 flow_dv_create_mtr_tbl(struct rte_eth_dev *dev,
11678 const struct mlx5_flow_meter *fm)
11680 struct mlx5_priv *priv = dev->data->dev_private;
11681 struct mlx5_meter_domains_infos *mtb;
11685 if (!priv->mtr_en) {
11686 rte_errno = ENOTSUP;
11689 mtb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mtb), 0, SOCKET_ID_ANY);
11691 DRV_LOG(ERR, "Failed to allocate memory for meter.");
11694 /* Create meter count actions */
11695 for (i = 0; i <= RTE_MTR_DROPPED; i++) {
11696 struct mlx5_flow_counter *cnt;
11697 if (!fm->policer_stats.cnt[i])
11699 cnt = flow_dv_counter_get_by_idx(dev,
11700 fm->policer_stats.cnt[i], NULL);
11701 mtb->count_actns[i] = cnt->action;
11703 /* Create drop action. */
11704 ret = mlx5_flow_os_create_flow_action_drop(&mtb->drop_actn);
11706 DRV_LOG(ERR, "Failed to create drop action.");
11709 /* Egress meter table. */
11710 ret = flow_dv_prepare_mtr_tables(dev, mtb, 1, 0, priv->mtr_color_reg);
11712 DRV_LOG(ERR, "Failed to prepare egress meter table.");
11715 /* Ingress meter table. */
11716 ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 0, priv->mtr_color_reg);
11718 DRV_LOG(ERR, "Failed to prepare ingress meter table.");
11721 /* FDB meter table. */
11722 if (priv->config.dv_esw_en) {
11723 ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 1,
11724 priv->mtr_color_reg);
11726 DRV_LOG(ERR, "Failed to prepare fdb meter table.");
11732 flow_dv_destroy_mtr_tbl(dev, mtb);
11737 * Destroy domain policer rule.
11740 * Pointer to domain table.
11743 flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt)
11747 for (i = 0; i < RTE_MTR_DROPPED; i++) {
11748 if (dt->policer_rules[i]) {
11749 claim_zero(mlx5_flow_os_destroy_flow
11750 (dt->policer_rules[i]));
11751 dt->policer_rules[i] = NULL;
11754 if (dt->jump_actn) {
11755 claim_zero(mlx5_flow_os_destroy_flow_action(dt->jump_actn));
11756 dt->jump_actn = NULL;
11761 * Destroy policer rules.
11764 * Pointer to Ethernet device.
11766 * Pointer to flow meter structure.
11768 * Pointer to flow attributes.
11774 flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused,
11775 const struct mlx5_flow_meter *fm,
11776 const struct rte_flow_attr *attr)
11778 struct mlx5_meter_domains_infos *mtb = fm ? fm->mfts : NULL;
11783 flow_dv_destroy_domain_policer_rule(&mtb->egress);
11785 flow_dv_destroy_domain_policer_rule(&mtb->ingress);
11786 if (attr->transfer)
11787 flow_dv_destroy_domain_policer_rule(&mtb->transfer);
11792 * Create specify domain meter policer rule.
11795 * Pointer to flow meter structure.
11797 * Pointer to DV meter table set.
11798 * @param[in] mtr_reg_c
11799 * Color match REG_C.
11802 * 0 on success, -1 otherwise.
11805 flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm,
11806 struct mlx5_meter_domain_info *dtb,
11809 struct mlx5_flow_dv_match_params matcher = {
11810 .size = sizeof(matcher.buf),
11812 struct mlx5_flow_dv_match_params value = {
11813 .size = sizeof(value.buf),
11815 struct mlx5_meter_domains_infos *mtb = fm->mfts;
11816 void *actions[METER_ACTIONS];
11820 /* Create jump action. */
11821 if (!dtb->jump_actn)
11822 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
11823 (dtb->sfx_tbl->obj, &dtb->jump_actn);
11825 DRV_LOG(ERR, "Failed to create policer jump action.");
11828 for (i = 0; i < RTE_MTR_DROPPED; i++) {
11831 flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c,
11832 rte_col_2_mlx5_col(i), UINT8_MAX);
11833 if (mtb->count_actns[i])
11834 actions[j++] = mtb->count_actns[i];
11835 if (fm->action[i] == MTR_POLICER_ACTION_DROP)
11836 actions[j++] = mtb->drop_actn;
11838 actions[j++] = dtb->jump_actn;
11839 ret = mlx5_flow_os_create_flow(dtb->color_matcher,
11840 (void *)&value, j, actions,
11841 &dtb->policer_rules[i]);
11843 DRV_LOG(ERR, "Failed to create policer rule.");
11854 * Create policer rules.
11857 * Pointer to Ethernet device.
11859 * Pointer to flow meter structure.
11861 * Pointer to flow attributes.
11864 * 0 on success, -1 otherwise.
11867 flow_dv_create_policer_rules(struct rte_eth_dev *dev,
11868 struct mlx5_flow_meter *fm,
11869 const struct rte_flow_attr *attr)
11871 struct mlx5_priv *priv = dev->data->dev_private;
11872 struct mlx5_meter_domains_infos *mtb = fm->mfts;
11875 if (attr->egress) {
11876 ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress,
11877 priv->mtr_color_reg);
11879 DRV_LOG(ERR, "Failed to create egress policer.");
11883 if (attr->ingress) {
11884 ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress,
11885 priv->mtr_color_reg);
11887 DRV_LOG(ERR, "Failed to create ingress policer.");
11891 if (attr->transfer) {
11892 ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer,
11893 priv->mtr_color_reg);
11895 DRV_LOG(ERR, "Failed to create transfer policer.");
11901 flow_dv_destroy_policer_rules(dev, fm, attr);
11906 * Validate the batch counter support in root table.
11908 * Create a simple flow with invalid counter and drop action on root table to
11909 * validate if batch counter with offset on root table is supported or not.
11912 * Pointer to rte_eth_dev structure.
11915 * 0 on success, a negative errno value otherwise and rte_errno is set.
11918 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
11920 struct mlx5_priv *priv = dev->data->dev_private;
11921 struct mlx5_dev_ctx_shared *sh = priv->sh;
11922 struct mlx5_flow_dv_match_params mask = {
11923 .size = sizeof(mask.buf),
11925 struct mlx5_flow_dv_match_params value = {
11926 .size = sizeof(value.buf),
11928 struct mlx5dv_flow_matcher_attr dv_attr = {
11929 .type = IBV_FLOW_ATTR_NORMAL,
11931 .match_criteria_enable = 0,
11932 .match_mask = (void *)&mask,
11934 void *actions[2] = { 0 };
11935 struct mlx5_flow_tbl_resource *tbl = NULL, *dest_tbl = NULL;
11936 struct mlx5_devx_obj *dcs = NULL;
11937 void *matcher = NULL;
11941 tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL, 0, 0, NULL);
11944 dest_tbl = flow_dv_tbl_resource_get(dev, 1, 0, 0, false,
11948 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
11951 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
11955 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
11956 (dest_tbl->obj, &actions[1]);
11959 dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
11960 ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
11964 ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 2,
11968 * If batch counter with offset is not supported, the driver will not
11969 * validate the invalid offset value, flow create should success.
11970 * In this case, it means batch counter is not supported in root table.
11972 * Otherwise, if flow create is failed, counter offset is supported.
11975 DRV_LOG(INFO, "Batch counter is not supported in root "
11976 "table. Switch to fallback mode.");
11977 rte_errno = ENOTSUP;
11979 claim_zero(mlx5_flow_os_destroy_flow(flow));
11981 /* Check matcher to make sure validate fail at flow create. */
11982 if (!matcher || (matcher && errno != EINVAL))
11983 DRV_LOG(ERR, "Unexpected error in counter offset "
11984 "support detection");
11987 for (i = 0; i < 2; i++) {
11989 claim_zero(mlx5_flow_os_destroy_flow_action
11993 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
11995 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
11997 flow_dv_tbl_resource_release(MLX5_SH(dev), dest_tbl);
11999 claim_zero(mlx5_devx_cmd_destroy(dcs));
12004 * Query a devx counter.
12007 * Pointer to the Ethernet device structure.
12009 * Index to the flow counter.
12011 * Set to clear the counter statistics.
12013 * The statistics value of packets.
12014 * @param[out] bytes
12015 * The statistics value of bytes.
12018 * 0 on success, otherwise return -1.
12021 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
12022 uint64_t *pkts, uint64_t *bytes)
12024 struct mlx5_priv *priv = dev->data->dev_private;
12025 struct mlx5_flow_counter *cnt;
12026 uint64_t inn_pkts, inn_bytes;
12029 if (!priv->config.devx)
12032 ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
12035 cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
12036 *pkts = inn_pkts - cnt->hits;
12037 *bytes = inn_bytes - cnt->bytes;
12039 cnt->hits = inn_pkts;
12040 cnt->bytes = inn_bytes;
12046 * Get aged-out flows.
12049 * Pointer to the Ethernet device structure.
12050 * @param[in] context
12051 * The address of an array of pointers to the aged-out flows contexts.
12052 * @param[in] nb_contexts
12053 * The length of context array pointers.
12054 * @param[out] error
12055 * Perform verbose error reporting if not NULL. Initialized in case of
12059 * how many contexts get in success, otherwise negative errno value.
12060 * if nb_contexts is 0, return the amount of all aged contexts.
12061 * if nb_contexts is not 0 , return the amount of aged flows reported
12062 * in the context array.
12063 * @note: only stub for now
12066 flow_get_aged_flows(struct rte_eth_dev *dev,
12068 uint32_t nb_contexts,
12069 struct rte_flow_error *error)
12071 struct mlx5_priv *priv = dev->data->dev_private;
12072 struct mlx5_age_info *age_info;
12073 struct mlx5_age_param *age_param;
12074 struct mlx5_flow_counter *counter;
12077 if (nb_contexts && !context)
12078 return rte_flow_error_set(error, EINVAL,
12079 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12081 "Should assign at least one flow or"
12082 " context to get if nb_contexts != 0");
12083 age_info = GET_PORT_AGE_INFO(priv);
12084 rte_spinlock_lock(&age_info->aged_sl);
12085 TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
12088 age_param = MLX5_CNT_TO_AGE(counter);
12089 context[nb_flows - 1] = age_param->context;
12090 if (!(--nb_contexts))
12094 rte_spinlock_unlock(&age_info->aged_sl);
12095 MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
12100 * Mutex-protected thunk to lock-free __flow_dv_translate().
12103 flow_dv_translate(struct rte_eth_dev *dev,
12104 struct mlx5_flow *dev_flow,
12105 const struct rte_flow_attr *attr,
12106 const struct rte_flow_item items[],
12107 const struct rte_flow_action actions[],
12108 struct rte_flow_error *error)
12112 flow_dv_shared_lock(dev);
12113 ret = __flow_dv_translate(dev, dev_flow, attr, items, actions, error);
12114 flow_dv_shared_unlock(dev);
12119 * Mutex-protected thunk to lock-free __flow_dv_apply().
12122 flow_dv_apply(struct rte_eth_dev *dev,
12123 struct rte_flow *flow,
12124 struct rte_flow_error *error)
12128 flow_dv_shared_lock(dev);
12129 ret = __flow_dv_apply(dev, flow, error);
12130 flow_dv_shared_unlock(dev);
12135 * Mutex-protected thunk to lock-free __flow_dv_remove().
12138 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
12140 flow_dv_shared_lock(dev);
12141 __flow_dv_remove(dev, flow);
12142 flow_dv_shared_unlock(dev);
12146 * Mutex-protected thunk to lock-free __flow_dv_destroy().
12149 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
12151 flow_dv_shared_lock(dev);
12152 __flow_dv_destroy(dev, flow);
12153 flow_dv_shared_unlock(dev);
12157 * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
12160 flow_dv_counter_allocate(struct rte_eth_dev *dev)
12164 flow_dv_shared_lock(dev);
12165 cnt = flow_dv_counter_alloc(dev, 0);
12166 flow_dv_shared_unlock(dev);
12171 * Mutex-protected thunk to lock-free flow_dv_counter_release().
12174 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t cnt)
12176 flow_dv_shared_lock(dev);
12177 flow_dv_counter_release(dev, cnt);
12178 flow_dv_shared_unlock(dev);
12182 * Validate shared action.
12183 * Dispatcher for action type specific validation.
12186 * Pointer to the Ethernet device structure.
12188 * Shared action configuration.
12189 * @param[in] action
12190 * The shared action object to validate.
12191 * @param[out] error
12192 * Perform verbose error reporting if not NULL. Initialized in case of
12196 * 0 on success, otherwise negative errno value.
12199 flow_dv_action_validate(struct rte_eth_dev *dev,
12200 const struct rte_flow_shared_action_conf *conf,
12201 const struct rte_flow_action *action,
12202 struct rte_flow_error *error)
12204 RTE_SET_USED(conf);
12205 switch (action->type) {
12206 case RTE_FLOW_ACTION_TYPE_RSS:
12207 return mlx5_validate_action_rss(dev, action, error);
12209 return rte_flow_error_set(error, ENOTSUP,
12210 RTE_FLOW_ERROR_TYPE_ACTION,
12212 "action type not supported");
12217 * Mutex-protected thunk to lock-free __flow_dv_action_create().
12219 static struct rte_flow_shared_action *
12220 flow_dv_action_create(struct rte_eth_dev *dev,
12221 const struct rte_flow_shared_action_conf *conf,
12222 const struct rte_flow_action *action,
12223 struct rte_flow_error *error)
12225 struct rte_flow_shared_action *shared_action = NULL;
12227 flow_dv_shared_lock(dev);
12228 shared_action = __flow_dv_action_create(dev, conf, action, error);
12229 flow_dv_shared_unlock(dev);
12230 return shared_action;
12234 * Mutex-protected thunk to lock-free __flow_dv_action_destroy().
12237 flow_dv_action_destroy(struct rte_eth_dev *dev,
12238 struct rte_flow_shared_action *action,
12239 struct rte_flow_error *error)
12243 flow_dv_shared_lock(dev);
12244 ret = __flow_dv_action_destroy(dev, action, error);
12245 flow_dv_shared_unlock(dev);
12250 * Mutex-protected thunk to lock-free __flow_dv_action_update().
12253 flow_dv_action_update(struct rte_eth_dev *dev,
12254 struct rte_flow_shared_action *action,
12255 const void *action_conf,
12256 struct rte_flow_error *error)
12260 flow_dv_shared_lock(dev);
12261 ret = __flow_dv_action_update(dev, action, action_conf,
12263 flow_dv_shared_unlock(dev);
12268 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
12270 struct mlx5_priv *priv = dev->data->dev_private;
12273 if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
12274 ret = mlx5_glue->dr_sync_domain(priv->sh->rx_domain,
12279 if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
12280 ret = mlx5_glue->dr_sync_domain(priv->sh->tx_domain, flags);
12284 if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
12285 ret = mlx5_glue->dr_sync_domain(priv->sh->fdb_domain, flags);
12292 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
12293 .validate = flow_dv_validate,
12294 .prepare = flow_dv_prepare,
12295 .translate = flow_dv_translate,
12296 .apply = flow_dv_apply,
12297 .remove = flow_dv_remove,
12298 .destroy = flow_dv_destroy,
12299 .query = flow_dv_query,
12300 .create_mtr_tbls = flow_dv_create_mtr_tbl,
12301 .destroy_mtr_tbls = flow_dv_destroy_mtr_tbl,
12302 .create_policer_rules = flow_dv_create_policer_rules,
12303 .destroy_policer_rules = flow_dv_destroy_policer_rules,
12304 .counter_alloc = flow_dv_counter_allocate,
12305 .counter_free = flow_dv_counter_free,
12306 .counter_query = flow_dv_counter_query,
12307 .get_aged_flows = flow_get_aged_flows,
12308 .action_validate = flow_dv_action_validate,
12309 .action_create = flow_dv_action_create,
12310 .action_destroy = flow_dv_action_destroy,
12311 .action_update = flow_dv_action_update,
12312 .sync_domain = flow_dv_sync_domain,
12315 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */