1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
20 #include <rte_vxlan.h>
22 #include <rte_eal_paging.h>
25 #include <rte_mtr_driver.h>
26 #include <rte_tailq.h>
28 #include <mlx5_glue.h>
29 #include <mlx5_devx_cmds.h>
31 #include <mlx5_malloc.h>
33 #include "mlx5_defs.h"
35 #include "mlx5_common_os.h"
36 #include "mlx5_flow.h"
37 #include "mlx5_flow_os.h"
40 #include "rte_pmd_mlx5.h"
42 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
44 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
45 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
48 #ifndef HAVE_MLX5DV_DR_ESWITCH
49 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
50 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
54 #ifndef HAVE_MLX5DV_DR
55 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
58 /* VLAN header definitions */
59 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
60 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
61 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
62 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
63 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
78 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
79 struct mlx5_flow_tbl_resource *tbl);
82 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
83 uint32_t encap_decap_idx);
86 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
89 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
92 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
96 * Initialize flow attributes structure according to flow items' types.
98 * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
99 * mode. For tunnel mode, the items to be modified are the outermost ones.
102 * Pointer to item specification.
104 * Pointer to flow attributes structure.
105 * @param[in] dev_flow
106 * Pointer to the sub flow.
107 * @param[in] tunnel_decap
108 * Whether action is after tunnel decapsulation.
111 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
112 struct mlx5_flow *dev_flow, bool tunnel_decap)
114 uint64_t layers = dev_flow->handle->layers;
117 * If layers is already initialized, it means this dev_flow is the
118 * suffix flow, the layers flags is set by the prefix flow. Need to
119 * use the layer flags from prefix flow as the suffix flow may not
120 * have the user defined items as the flow is split.
123 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
125 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
127 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
129 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
134 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
135 uint8_t next_protocol = 0xff;
136 switch (item->type) {
137 case RTE_FLOW_ITEM_TYPE_GRE:
138 case RTE_FLOW_ITEM_TYPE_NVGRE:
139 case RTE_FLOW_ITEM_TYPE_VXLAN:
140 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
141 case RTE_FLOW_ITEM_TYPE_GENEVE:
142 case RTE_FLOW_ITEM_TYPE_MPLS:
146 case RTE_FLOW_ITEM_TYPE_IPV4:
149 if (item->mask != NULL &&
150 ((const struct rte_flow_item_ipv4 *)
151 item->mask)->hdr.next_proto_id)
153 ((const struct rte_flow_item_ipv4 *)
154 (item->spec))->hdr.next_proto_id &
155 ((const struct rte_flow_item_ipv4 *)
156 (item->mask))->hdr.next_proto_id;
157 if ((next_protocol == IPPROTO_IPIP ||
158 next_protocol == IPPROTO_IPV6) && tunnel_decap)
161 case RTE_FLOW_ITEM_TYPE_IPV6:
164 if (item->mask != NULL &&
165 ((const struct rte_flow_item_ipv6 *)
166 item->mask)->hdr.proto)
168 ((const struct rte_flow_item_ipv6 *)
169 (item->spec))->hdr.proto &
170 ((const struct rte_flow_item_ipv6 *)
171 (item->mask))->hdr.proto;
172 if ((next_protocol == IPPROTO_IPIP ||
173 next_protocol == IPPROTO_IPV6) && tunnel_decap)
176 case RTE_FLOW_ITEM_TYPE_UDP:
180 case RTE_FLOW_ITEM_TYPE_TCP:
192 * Convert rte_mtr_color to mlx5 color.
201 rte_col_2_mlx5_col(enum rte_color rcol)
204 case RTE_COLOR_GREEN:
205 return MLX5_FLOW_COLOR_GREEN;
206 case RTE_COLOR_YELLOW:
207 return MLX5_FLOW_COLOR_YELLOW;
209 return MLX5_FLOW_COLOR_RED;
213 return MLX5_FLOW_COLOR_UNDEFINED;
216 struct field_modify_info {
217 uint32_t size; /* Size of field in protocol header, in bytes. */
218 uint32_t offset; /* Offset of field in protocol header, in bytes. */
219 enum mlx5_modification_field id;
222 struct field_modify_info modify_eth[] = {
223 {4, 0, MLX5_MODI_OUT_DMAC_47_16},
224 {2, 4, MLX5_MODI_OUT_DMAC_15_0},
225 {4, 6, MLX5_MODI_OUT_SMAC_47_16},
226 {2, 10, MLX5_MODI_OUT_SMAC_15_0},
230 struct field_modify_info modify_vlan_out_first_vid[] = {
231 /* Size in bits !!! */
232 {12, 0, MLX5_MODI_OUT_FIRST_VID},
236 struct field_modify_info modify_ipv4[] = {
237 {1, 1, MLX5_MODI_OUT_IP_DSCP},
238 {1, 8, MLX5_MODI_OUT_IPV4_TTL},
239 {4, 12, MLX5_MODI_OUT_SIPV4},
240 {4, 16, MLX5_MODI_OUT_DIPV4},
244 struct field_modify_info modify_ipv6[] = {
245 {1, 0, MLX5_MODI_OUT_IP_DSCP},
246 {1, 7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
247 {4, 8, MLX5_MODI_OUT_SIPV6_127_96},
248 {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
249 {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
250 {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
251 {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
252 {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
253 {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
254 {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
258 struct field_modify_info modify_udp[] = {
259 {2, 0, MLX5_MODI_OUT_UDP_SPORT},
260 {2, 2, MLX5_MODI_OUT_UDP_DPORT},
264 struct field_modify_info modify_tcp[] = {
265 {2, 0, MLX5_MODI_OUT_TCP_SPORT},
266 {2, 2, MLX5_MODI_OUT_TCP_DPORT},
267 {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
268 {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
272 static const struct rte_flow_item *
273 mlx5_flow_find_tunnel_item(const struct rte_flow_item *item)
275 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
276 switch (item->type) {
279 case RTE_FLOW_ITEM_TYPE_VXLAN:
280 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
281 case RTE_FLOW_ITEM_TYPE_GRE:
282 case RTE_FLOW_ITEM_TYPE_MPLS:
283 case RTE_FLOW_ITEM_TYPE_NVGRE:
284 case RTE_FLOW_ITEM_TYPE_GENEVE:
286 case RTE_FLOW_ITEM_TYPE_IPV4:
287 case RTE_FLOW_ITEM_TYPE_IPV6:
288 if (item[1].type == RTE_FLOW_ITEM_TYPE_IPV4 ||
289 item[1].type == RTE_FLOW_ITEM_TYPE_IPV6)
298 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
299 uint8_t next_protocol, uint64_t *item_flags,
302 MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
303 item->type == RTE_FLOW_ITEM_TYPE_IPV6);
304 if (next_protocol == IPPROTO_IPIP) {
305 *item_flags |= MLX5_FLOW_LAYER_IPIP;
308 if (next_protocol == IPPROTO_IPV6) {
309 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
314 static inline struct mlx5_hlist *
315 flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, struct mlx5_hlist **phl,
316 const char *name, uint32_t size, bool direct_key,
317 bool lcores_share, void *ctx,
318 mlx5_list_create_cb cb_create,
319 mlx5_list_match_cb cb_match,
320 mlx5_list_remove_cb cb_remove,
321 mlx5_list_clone_cb cb_clone,
322 mlx5_list_clone_free_cb cb_clone_free)
324 struct mlx5_hlist *hl;
325 struct mlx5_hlist *expected = NULL;
326 char s[MLX5_NAME_SIZE];
328 hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
331 snprintf(s, sizeof(s), "%s_%s", sh->ibdev_name, name);
332 hl = mlx5_hlist_create(s, size, direct_key, lcores_share,
333 ctx, cb_create, cb_match, cb_remove, cb_clone,
336 DRV_LOG(ERR, "%s hash creation failed", name);
340 if (!__atomic_compare_exchange_n(phl, &expected, hl, false,
343 mlx5_hlist_destroy(hl);
344 hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
349 /* Update VLAN's VID/PCP based on input rte_flow_action.
352 * Pointer to struct rte_flow_action.
354 * Pointer to struct rte_vlan_hdr.
357 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
358 struct rte_vlan_hdr *vlan)
361 if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
363 ((const struct rte_flow_action_of_set_vlan_pcp *)
364 action->conf)->vlan_pcp;
365 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
366 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
367 vlan->vlan_tci |= vlan_tci;
368 } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
369 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
370 vlan->vlan_tci |= rte_be_to_cpu_16
371 (((const struct rte_flow_action_of_set_vlan_vid *)
372 action->conf)->vlan_vid);
377 * Fetch 1, 2, 3 or 4 byte field from the byte array
378 * and return as unsigned integer in host-endian format.
381 * Pointer to data array.
383 * Size of field to extract.
386 * converted field in host endian format.
388 static inline uint32_t
389 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
398 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
401 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
402 ret = (ret << 8) | *(data + sizeof(uint16_t));
405 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
416 * Convert modify-header action to DV specification.
418 * Data length of each action is determined by provided field description
419 * and the item mask. Data bit offset and width of each action is determined
420 * by provided item mask.
423 * Pointer to item specification.
425 * Pointer to field modification information.
426 * For MLX5_MODIFICATION_TYPE_SET specifies destination field.
427 * For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
428 * For MLX5_MODIFICATION_TYPE_COPY specifies source field.
430 * Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
431 * Negative offset value sets the same offset as source offset.
432 * size field is ignored, value is taken from source field.
433 * @param[in,out] resource
434 * Pointer to the modify-header resource.
436 * Type of modification.
438 * Pointer to the error structure.
441 * 0 on success, a negative errno value otherwise and rte_errno is set.
444 flow_dv_convert_modify_action(struct rte_flow_item *item,
445 struct field_modify_info *field,
446 struct field_modify_info *dcopy,
447 struct mlx5_flow_dv_modify_hdr_resource *resource,
448 uint32_t type, struct rte_flow_error *error)
450 uint32_t i = resource->actions_num;
451 struct mlx5_modification_cmd *actions = resource->actions;
452 uint32_t carry_b = 0;
455 * The item and mask are provided in big-endian format.
456 * The fields should be presented as in big-endian format either.
457 * Mask must be always present, it defines the actual field width.
459 MLX5_ASSERT(item->mask);
460 MLX5_ASSERT(field->size);
466 bool next_field = true;
467 bool next_dcopy = true;
469 if (i >= MLX5_MAX_MODIFY_NUM)
470 return rte_flow_error_set(error, EINVAL,
471 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
472 "too many items to modify");
473 /* Fetch variable byte size mask from the array. */
474 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
475 field->offset, field->size);
480 /* Deduce actual data width in bits from mask value. */
481 off_b = rte_bsf32(mask) + carry_b;
482 size_b = sizeof(uint32_t) * CHAR_BIT -
483 off_b - __builtin_clz(mask);
485 actions[i] = (struct mlx5_modification_cmd) {
489 .length = (size_b == sizeof(uint32_t) * CHAR_BIT) ?
492 if (type == MLX5_MODIFICATION_TYPE_COPY) {
494 actions[i].dst_field = dcopy->id;
495 actions[i].dst_offset =
496 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
497 /* Convert entire record to big-endian format. */
498 actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
500 * Destination field overflow. Copy leftovers of
501 * a source field to the next destination field.
504 if ((size_b > dcopy->size * CHAR_BIT - dcopy->offset) &&
507 dcopy->size * CHAR_BIT - dcopy->offset;
508 carry_b = actions[i].length;
512 * Not enough bits in a source filed to fill a
513 * destination field. Switch to the next source.
515 if ((size_b < dcopy->size * CHAR_BIT - dcopy->offset) &&
516 (size_b == field->size * CHAR_BIT - off_b)) {
518 field->size * CHAR_BIT - off_b;
519 dcopy->offset += actions[i].length;
525 MLX5_ASSERT(item->spec);
526 data = flow_dv_fetch_field((const uint8_t *)item->spec +
527 field->offset, field->size);
528 /* Shift out the trailing masked bits from data. */
529 data = (data & mask) >> off_b;
530 actions[i].data1 = rte_cpu_to_be_32(data);
532 /* Convert entire record to expected big-endian format. */
533 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
537 } while (field->size);
538 if (resource->actions_num == i)
539 return rte_flow_error_set(error, EINVAL,
540 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
541 "invalid modification flow item");
542 resource->actions_num = i;
547 * Convert modify-header set IPv4 address action to DV specification.
549 * @param[in,out] resource
550 * Pointer to the modify-header resource.
552 * Pointer to action specification.
554 * Pointer to the error structure.
557 * 0 on success, a negative errno value otherwise and rte_errno is set.
560 flow_dv_convert_action_modify_ipv4
561 (struct mlx5_flow_dv_modify_hdr_resource *resource,
562 const struct rte_flow_action *action,
563 struct rte_flow_error *error)
565 const struct rte_flow_action_set_ipv4 *conf =
566 (const struct rte_flow_action_set_ipv4 *)(action->conf);
567 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
568 struct rte_flow_item_ipv4 ipv4;
569 struct rte_flow_item_ipv4 ipv4_mask;
571 memset(&ipv4, 0, sizeof(ipv4));
572 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
573 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
574 ipv4.hdr.src_addr = conf->ipv4_addr;
575 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
577 ipv4.hdr.dst_addr = conf->ipv4_addr;
578 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
581 item.mask = &ipv4_mask;
582 return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
583 MLX5_MODIFICATION_TYPE_SET, error);
587 * Convert modify-header set IPv6 address action to DV specification.
589 * @param[in,out] resource
590 * Pointer to the modify-header resource.
592 * Pointer to action specification.
594 * Pointer to the error structure.
597 * 0 on success, a negative errno value otherwise and rte_errno is set.
600 flow_dv_convert_action_modify_ipv6
601 (struct mlx5_flow_dv_modify_hdr_resource *resource,
602 const struct rte_flow_action *action,
603 struct rte_flow_error *error)
605 const struct rte_flow_action_set_ipv6 *conf =
606 (const struct rte_flow_action_set_ipv6 *)(action->conf);
607 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
608 struct rte_flow_item_ipv6 ipv6;
609 struct rte_flow_item_ipv6 ipv6_mask;
611 memset(&ipv6, 0, sizeof(ipv6));
612 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
613 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
614 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
615 sizeof(ipv6.hdr.src_addr));
616 memcpy(&ipv6_mask.hdr.src_addr,
617 &rte_flow_item_ipv6_mask.hdr.src_addr,
618 sizeof(ipv6.hdr.src_addr));
620 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
621 sizeof(ipv6.hdr.dst_addr));
622 memcpy(&ipv6_mask.hdr.dst_addr,
623 &rte_flow_item_ipv6_mask.hdr.dst_addr,
624 sizeof(ipv6.hdr.dst_addr));
627 item.mask = &ipv6_mask;
628 return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
629 MLX5_MODIFICATION_TYPE_SET, error);
633 * Convert modify-header set MAC address action to DV specification.
635 * @param[in,out] resource
636 * Pointer to the modify-header resource.
638 * Pointer to action specification.
640 * Pointer to the error structure.
643 * 0 on success, a negative errno value otherwise and rte_errno is set.
646 flow_dv_convert_action_modify_mac
647 (struct mlx5_flow_dv_modify_hdr_resource *resource,
648 const struct rte_flow_action *action,
649 struct rte_flow_error *error)
651 const struct rte_flow_action_set_mac *conf =
652 (const struct rte_flow_action_set_mac *)(action->conf);
653 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
654 struct rte_flow_item_eth eth;
655 struct rte_flow_item_eth eth_mask;
657 memset(ð, 0, sizeof(eth));
658 memset(ð_mask, 0, sizeof(eth_mask));
659 if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
660 memcpy(ð.src.addr_bytes, &conf->mac_addr,
661 sizeof(eth.src.addr_bytes));
662 memcpy(ð_mask.src.addr_bytes,
663 &rte_flow_item_eth_mask.src.addr_bytes,
664 sizeof(eth_mask.src.addr_bytes));
666 memcpy(ð.dst.addr_bytes, &conf->mac_addr,
667 sizeof(eth.dst.addr_bytes));
668 memcpy(ð_mask.dst.addr_bytes,
669 &rte_flow_item_eth_mask.dst.addr_bytes,
670 sizeof(eth_mask.dst.addr_bytes));
673 item.mask = ð_mask;
674 return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
675 MLX5_MODIFICATION_TYPE_SET, error);
679 * Convert modify-header set VLAN VID action to DV specification.
681 * @param[in,out] resource
682 * Pointer to the modify-header resource.
684 * Pointer to action specification.
686 * Pointer to the error structure.
689 * 0 on success, a negative errno value otherwise and rte_errno is set.
692 flow_dv_convert_action_modify_vlan_vid
693 (struct mlx5_flow_dv_modify_hdr_resource *resource,
694 const struct rte_flow_action *action,
695 struct rte_flow_error *error)
697 const struct rte_flow_action_of_set_vlan_vid *conf =
698 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
699 int i = resource->actions_num;
700 struct mlx5_modification_cmd *actions = resource->actions;
701 struct field_modify_info *field = modify_vlan_out_first_vid;
703 if (i >= MLX5_MAX_MODIFY_NUM)
704 return rte_flow_error_set(error, EINVAL,
705 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
706 "too many items to modify");
707 actions[i] = (struct mlx5_modification_cmd) {
708 .action_type = MLX5_MODIFICATION_TYPE_SET,
710 .length = field->size,
711 .offset = field->offset,
713 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
714 actions[i].data1 = conf->vlan_vid;
715 actions[i].data1 = actions[i].data1 << 16;
716 resource->actions_num = ++i;
721 * Convert modify-header set TP action to DV specification.
723 * @param[in,out] resource
724 * Pointer to the modify-header resource.
726 * Pointer to action specification.
728 * Pointer to rte_flow_item objects list.
730 * Pointer to flow attributes structure.
731 * @param[in] dev_flow
732 * Pointer to the sub flow.
733 * @param[in] tunnel_decap
734 * Whether action is after tunnel decapsulation.
736 * Pointer to the error structure.
739 * 0 on success, a negative errno value otherwise and rte_errno is set.
742 flow_dv_convert_action_modify_tp
743 (struct mlx5_flow_dv_modify_hdr_resource *resource,
744 const struct rte_flow_action *action,
745 const struct rte_flow_item *items,
746 union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
747 bool tunnel_decap, struct rte_flow_error *error)
749 const struct rte_flow_action_set_tp *conf =
750 (const struct rte_flow_action_set_tp *)(action->conf);
751 struct rte_flow_item item;
752 struct rte_flow_item_udp udp;
753 struct rte_flow_item_udp udp_mask;
754 struct rte_flow_item_tcp tcp;
755 struct rte_flow_item_tcp tcp_mask;
756 struct field_modify_info *field;
759 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
761 memset(&udp, 0, sizeof(udp));
762 memset(&udp_mask, 0, sizeof(udp_mask));
763 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
764 udp.hdr.src_port = conf->port;
765 udp_mask.hdr.src_port =
766 rte_flow_item_udp_mask.hdr.src_port;
768 udp.hdr.dst_port = conf->port;
769 udp_mask.hdr.dst_port =
770 rte_flow_item_udp_mask.hdr.dst_port;
772 item.type = RTE_FLOW_ITEM_TYPE_UDP;
774 item.mask = &udp_mask;
777 MLX5_ASSERT(attr->tcp);
778 memset(&tcp, 0, sizeof(tcp));
779 memset(&tcp_mask, 0, sizeof(tcp_mask));
780 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
781 tcp.hdr.src_port = conf->port;
782 tcp_mask.hdr.src_port =
783 rte_flow_item_tcp_mask.hdr.src_port;
785 tcp.hdr.dst_port = conf->port;
786 tcp_mask.hdr.dst_port =
787 rte_flow_item_tcp_mask.hdr.dst_port;
789 item.type = RTE_FLOW_ITEM_TYPE_TCP;
791 item.mask = &tcp_mask;
794 return flow_dv_convert_modify_action(&item, field, NULL, resource,
795 MLX5_MODIFICATION_TYPE_SET, error);
799 * Convert modify-header set TTL action to DV specification.
801 * @param[in,out] resource
802 * Pointer to the modify-header resource.
804 * Pointer to action specification.
806 * Pointer to rte_flow_item objects list.
808 * Pointer to flow attributes structure.
809 * @param[in] dev_flow
810 * Pointer to the sub flow.
811 * @param[in] tunnel_decap
812 * Whether action is after tunnel decapsulation.
814 * Pointer to the error structure.
817 * 0 on success, a negative errno value otherwise and rte_errno is set.
820 flow_dv_convert_action_modify_ttl
821 (struct mlx5_flow_dv_modify_hdr_resource *resource,
822 const struct rte_flow_action *action,
823 const struct rte_flow_item *items,
824 union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
825 bool tunnel_decap, struct rte_flow_error *error)
827 const struct rte_flow_action_set_ttl *conf =
828 (const struct rte_flow_action_set_ttl *)(action->conf);
829 struct rte_flow_item item;
830 struct rte_flow_item_ipv4 ipv4;
831 struct rte_flow_item_ipv4 ipv4_mask;
832 struct rte_flow_item_ipv6 ipv6;
833 struct rte_flow_item_ipv6 ipv6_mask;
834 struct field_modify_info *field;
837 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
839 memset(&ipv4, 0, sizeof(ipv4));
840 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
841 ipv4.hdr.time_to_live = conf->ttl_value;
842 ipv4_mask.hdr.time_to_live = 0xFF;
843 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
845 item.mask = &ipv4_mask;
848 MLX5_ASSERT(attr->ipv6);
849 memset(&ipv6, 0, sizeof(ipv6));
850 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
851 ipv6.hdr.hop_limits = conf->ttl_value;
852 ipv6_mask.hdr.hop_limits = 0xFF;
853 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
855 item.mask = &ipv6_mask;
858 return flow_dv_convert_modify_action(&item, field, NULL, resource,
859 MLX5_MODIFICATION_TYPE_SET, error);
863 * Convert modify-header decrement TTL action to DV specification.
865 * @param[in,out] resource
866 * Pointer to the modify-header resource.
868 * Pointer to action specification.
870 * Pointer to rte_flow_item objects list.
872 * Pointer to flow attributes structure.
873 * @param[in] dev_flow
874 * Pointer to the sub flow.
875 * @param[in] tunnel_decap
876 * Whether action is after tunnel decapsulation.
878 * Pointer to the error structure.
881 * 0 on success, a negative errno value otherwise and rte_errno is set.
884 flow_dv_convert_action_modify_dec_ttl
885 (struct mlx5_flow_dv_modify_hdr_resource *resource,
886 const struct rte_flow_item *items,
887 union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
888 bool tunnel_decap, struct rte_flow_error *error)
890 struct rte_flow_item item;
891 struct rte_flow_item_ipv4 ipv4;
892 struct rte_flow_item_ipv4 ipv4_mask;
893 struct rte_flow_item_ipv6 ipv6;
894 struct rte_flow_item_ipv6 ipv6_mask;
895 struct field_modify_info *field;
898 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
900 memset(&ipv4, 0, sizeof(ipv4));
901 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
902 ipv4.hdr.time_to_live = 0xFF;
903 ipv4_mask.hdr.time_to_live = 0xFF;
904 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
906 item.mask = &ipv4_mask;
909 MLX5_ASSERT(attr->ipv6);
910 memset(&ipv6, 0, sizeof(ipv6));
911 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
912 ipv6.hdr.hop_limits = 0xFF;
913 ipv6_mask.hdr.hop_limits = 0xFF;
914 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
916 item.mask = &ipv6_mask;
919 return flow_dv_convert_modify_action(&item, field, NULL, resource,
920 MLX5_MODIFICATION_TYPE_ADD, error);
924 * Convert modify-header increment/decrement TCP Sequence number
925 * to DV specification.
927 * @param[in,out] resource
928 * Pointer to the modify-header resource.
930 * Pointer to action specification.
932 * Pointer to the error structure.
935 * 0 on success, a negative errno value otherwise and rte_errno is set.
938 flow_dv_convert_action_modify_tcp_seq
939 (struct mlx5_flow_dv_modify_hdr_resource *resource,
940 const struct rte_flow_action *action,
941 struct rte_flow_error *error)
943 const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
944 uint64_t value = rte_be_to_cpu_32(*conf);
945 struct rte_flow_item item;
946 struct rte_flow_item_tcp tcp;
947 struct rte_flow_item_tcp tcp_mask;
949 memset(&tcp, 0, sizeof(tcp));
950 memset(&tcp_mask, 0, sizeof(tcp_mask));
951 if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
953 * The HW has no decrement operation, only increment operation.
954 * To simulate decrement X from Y using increment operation
955 * we need to add UINT32_MAX X times to Y.
956 * Each adding of UINT32_MAX decrements Y by 1.
959 tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
960 tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
961 item.type = RTE_FLOW_ITEM_TYPE_TCP;
963 item.mask = &tcp_mask;
964 return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
965 MLX5_MODIFICATION_TYPE_ADD, error);
969 * Convert modify-header increment/decrement TCP Acknowledgment number
970 * to DV specification.
972 * @param[in,out] resource
973 * Pointer to the modify-header resource.
975 * Pointer to action specification.
977 * Pointer to the error structure.
980 * 0 on success, a negative errno value otherwise and rte_errno is set.
983 flow_dv_convert_action_modify_tcp_ack
984 (struct mlx5_flow_dv_modify_hdr_resource *resource,
985 const struct rte_flow_action *action,
986 struct rte_flow_error *error)
988 const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
989 uint64_t value = rte_be_to_cpu_32(*conf);
990 struct rte_flow_item item;
991 struct rte_flow_item_tcp tcp;
992 struct rte_flow_item_tcp tcp_mask;
994 memset(&tcp, 0, sizeof(tcp));
995 memset(&tcp_mask, 0, sizeof(tcp_mask));
996 if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
998 * The HW has no decrement operation, only increment operation.
999 * To simulate decrement X from Y using increment operation
1000 * we need to add UINT32_MAX X times to Y.
1001 * Each adding of UINT32_MAX decrements Y by 1.
1003 value *= UINT32_MAX;
1004 tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
1005 tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
1006 item.type = RTE_FLOW_ITEM_TYPE_TCP;
1008 item.mask = &tcp_mask;
1009 return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
1010 MLX5_MODIFICATION_TYPE_ADD, error);
1013 static enum mlx5_modification_field reg_to_field[] = {
1014 [REG_NON] = MLX5_MODI_OUT_NONE,
1015 [REG_A] = MLX5_MODI_META_DATA_REG_A,
1016 [REG_B] = MLX5_MODI_META_DATA_REG_B,
1017 [REG_C_0] = MLX5_MODI_META_REG_C_0,
1018 [REG_C_1] = MLX5_MODI_META_REG_C_1,
1019 [REG_C_2] = MLX5_MODI_META_REG_C_2,
1020 [REG_C_3] = MLX5_MODI_META_REG_C_3,
1021 [REG_C_4] = MLX5_MODI_META_REG_C_4,
1022 [REG_C_5] = MLX5_MODI_META_REG_C_5,
1023 [REG_C_6] = MLX5_MODI_META_REG_C_6,
1024 [REG_C_7] = MLX5_MODI_META_REG_C_7,
1028 * Convert register set to DV specification.
1030 * @param[in,out] resource
1031 * Pointer to the modify-header resource.
1033 * Pointer to action specification.
1035 * Pointer to the error structure.
1038 * 0 on success, a negative errno value otherwise and rte_errno is set.
1041 flow_dv_convert_action_set_reg
1042 (struct mlx5_flow_dv_modify_hdr_resource *resource,
1043 const struct rte_flow_action *action,
1044 struct rte_flow_error *error)
1046 const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
1047 struct mlx5_modification_cmd *actions = resource->actions;
1048 uint32_t i = resource->actions_num;
1050 if (i >= MLX5_MAX_MODIFY_NUM)
1051 return rte_flow_error_set(error, EINVAL,
1052 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1053 "too many items to modify");
1054 MLX5_ASSERT(conf->id != REG_NON);
1055 MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
1056 actions[i] = (struct mlx5_modification_cmd) {
1057 .action_type = MLX5_MODIFICATION_TYPE_SET,
1058 .field = reg_to_field[conf->id],
1059 .offset = conf->offset,
1060 .length = conf->length,
1062 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
1063 actions[i].data1 = rte_cpu_to_be_32(conf->data);
1065 resource->actions_num = i;
1070 * Convert SET_TAG action to DV specification.
1073 * Pointer to the rte_eth_dev structure.
1074 * @param[in,out] resource
1075 * Pointer to the modify-header resource.
1077 * Pointer to action specification.
1079 * Pointer to the error structure.
1082 * 0 on success, a negative errno value otherwise and rte_errno is set.
1085 flow_dv_convert_action_set_tag
1086 (struct rte_eth_dev *dev,
1087 struct mlx5_flow_dv_modify_hdr_resource *resource,
1088 const struct rte_flow_action_set_tag *conf,
1089 struct rte_flow_error *error)
1091 rte_be32_t data = rte_cpu_to_be_32(conf->data);
1092 rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1093 struct rte_flow_item item = {
1097 struct field_modify_info reg_c_x[] = {
1100 enum mlx5_modification_field reg_type;
1103 ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1106 MLX5_ASSERT(ret != REG_NON);
1107 MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1108 reg_type = reg_to_field[ret];
1109 MLX5_ASSERT(reg_type > 0);
1110 reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1111 return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1112 MLX5_MODIFICATION_TYPE_SET, error);
1116 * Convert internal COPY_REG action to DV specification.
1119 * Pointer to the rte_eth_dev structure.
1120 * @param[in,out] res
1121 * Pointer to the modify-header resource.
1123 * Pointer to action specification.
1125 * Pointer to the error structure.
1128 * 0 on success, a negative errno value otherwise and rte_errno is set.
1131 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1132 struct mlx5_flow_dv_modify_hdr_resource *res,
1133 const struct rte_flow_action *action,
1134 struct rte_flow_error *error)
1136 const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1137 rte_be32_t mask = RTE_BE32(UINT32_MAX);
1138 struct rte_flow_item item = {
1142 struct field_modify_info reg_src[] = {
1143 {4, 0, reg_to_field[conf->src]},
1146 struct field_modify_info reg_dst = {
1148 .id = reg_to_field[conf->dst],
1150 /* Adjust reg_c[0] usage according to reported mask. */
1151 if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1152 struct mlx5_priv *priv = dev->data->dev_private;
1153 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1155 MLX5_ASSERT(reg_c0);
1156 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1157 if (conf->dst == REG_C_0) {
1158 /* Copy to reg_c[0], within mask only. */
1159 reg_dst.offset = rte_bsf32(reg_c0);
1161 * Mask is ignoring the enianness, because
1162 * there is no conversion in datapath.
1164 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1165 /* Copy from destination lower bits to reg_c[0]. */
1166 mask = reg_c0 >> reg_dst.offset;
1168 /* Copy from destination upper bits to reg_c[0]. */
1169 mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1170 rte_fls_u32(reg_c0));
1173 mask = rte_cpu_to_be_32(reg_c0);
1174 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1175 /* Copy from reg_c[0] to destination lower bits. */
1178 /* Copy from reg_c[0] to destination upper bits. */
1179 reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1180 (rte_fls_u32(reg_c0) -
1185 return flow_dv_convert_modify_action(&item,
1186 reg_src, ®_dst, res,
1187 MLX5_MODIFICATION_TYPE_COPY,
1192 * Convert MARK action to DV specification. This routine is used
1193 * in extensive metadata only and requires metadata register to be
1194 * handled. In legacy mode hardware tag resource is engaged.
1197 * Pointer to the rte_eth_dev structure.
1199 * Pointer to MARK action specification.
1200 * @param[in,out] resource
1201 * Pointer to the modify-header resource.
1203 * Pointer to the error structure.
1206 * 0 on success, a negative errno value otherwise and rte_errno is set.
1209 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1210 const struct rte_flow_action_mark *conf,
1211 struct mlx5_flow_dv_modify_hdr_resource *resource,
1212 struct rte_flow_error *error)
1214 struct mlx5_priv *priv = dev->data->dev_private;
1215 rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1216 priv->sh->dv_mark_mask);
1217 rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1218 struct rte_flow_item item = {
1222 struct field_modify_info reg_c_x[] = {
1228 return rte_flow_error_set(error, EINVAL,
1229 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1230 NULL, "zero mark action mask");
1231 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1234 MLX5_ASSERT(reg > 0);
1235 if (reg == REG_C_0) {
1236 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1237 uint32_t shl_c0 = rte_bsf32(msk_c0);
1239 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1240 mask = rte_cpu_to_be_32(mask) & msk_c0;
1241 mask = rte_cpu_to_be_32(mask << shl_c0);
1243 reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1244 return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1245 MLX5_MODIFICATION_TYPE_SET, error);
1249 * Get metadata register index for specified steering domain.
1252 * Pointer to the rte_eth_dev structure.
1254 * Attributes of flow to determine steering domain.
1256 * Pointer to the error structure.
1259 * positive index on success, a negative errno value otherwise
1260 * and rte_errno is set.
1262 static enum modify_reg
1263 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1264 const struct rte_flow_attr *attr,
1265 struct rte_flow_error *error)
1268 mlx5_flow_get_reg_id(dev, attr->transfer ?
1272 MLX5_METADATA_RX, 0, error);
1274 return rte_flow_error_set(error,
1275 ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1276 NULL, "unavailable "
1277 "metadata register");
1282 * Convert SET_META action to DV specification.
1285 * Pointer to the rte_eth_dev structure.
1286 * @param[in,out] resource
1287 * Pointer to the modify-header resource.
1289 * Attributes of flow that includes this item.
1291 * Pointer to action specification.
1293 * Pointer to the error structure.
1296 * 0 on success, a negative errno value otherwise and rte_errno is set.
1299 flow_dv_convert_action_set_meta
1300 (struct rte_eth_dev *dev,
1301 struct mlx5_flow_dv_modify_hdr_resource *resource,
1302 const struct rte_flow_attr *attr,
1303 const struct rte_flow_action_set_meta *conf,
1304 struct rte_flow_error *error)
1306 uint32_t mask = rte_cpu_to_be_32(conf->mask);
1307 uint32_t data = rte_cpu_to_be_32(conf->data) & mask;
1308 struct rte_flow_item item = {
1312 struct field_modify_info reg_c_x[] = {
1315 int reg = flow_dv_get_metadata_reg(dev, attr, error);
1319 MLX5_ASSERT(reg != REG_NON);
1320 if (reg == REG_C_0) {
1321 struct mlx5_priv *priv = dev->data->dev_private;
1322 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1323 uint32_t shl_c0 = rte_bsf32(msk_c0);
1325 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1326 mask = rte_cpu_to_be_32(mask) & msk_c0;
1327 mask = rte_cpu_to_be_32(mask << shl_c0);
1329 reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1330 /* The routine expects parameters in memory as big-endian ones. */
1331 return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1332 MLX5_MODIFICATION_TYPE_SET, error);
1336 * Convert modify-header set IPv4 DSCP action to DV specification.
1338 * @param[in,out] resource
1339 * Pointer to the modify-header resource.
1341 * Pointer to action specification.
1343 * Pointer to the error structure.
1346 * 0 on success, a negative errno value otherwise and rte_errno is set.
1349 flow_dv_convert_action_modify_ipv4_dscp
1350 (struct mlx5_flow_dv_modify_hdr_resource *resource,
1351 const struct rte_flow_action *action,
1352 struct rte_flow_error *error)
1354 const struct rte_flow_action_set_dscp *conf =
1355 (const struct rte_flow_action_set_dscp *)(action->conf);
1356 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1357 struct rte_flow_item_ipv4 ipv4;
1358 struct rte_flow_item_ipv4 ipv4_mask;
1360 memset(&ipv4, 0, sizeof(ipv4));
1361 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1362 ipv4.hdr.type_of_service = conf->dscp;
1363 ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1365 item.mask = &ipv4_mask;
1366 return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1367 MLX5_MODIFICATION_TYPE_SET, error);
1371 * Convert modify-header set IPv6 DSCP action to DV specification.
1373 * @param[in,out] resource
1374 * Pointer to the modify-header resource.
1376 * Pointer to action specification.
1378 * Pointer to the error structure.
1381 * 0 on success, a negative errno value otherwise and rte_errno is set.
1384 flow_dv_convert_action_modify_ipv6_dscp
1385 (struct mlx5_flow_dv_modify_hdr_resource *resource,
1386 const struct rte_flow_action *action,
1387 struct rte_flow_error *error)
1389 const struct rte_flow_action_set_dscp *conf =
1390 (const struct rte_flow_action_set_dscp *)(action->conf);
1391 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1392 struct rte_flow_item_ipv6 ipv6;
1393 struct rte_flow_item_ipv6 ipv6_mask;
1395 memset(&ipv6, 0, sizeof(ipv6));
1396 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1398 * Even though the DSCP bits offset of IPv6 is not byte aligned,
1399 * rdma-core only accept the DSCP bits byte aligned start from
1400 * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1401 * bits in IPv6 case as rdma-core requires byte aligned value.
1403 ipv6.hdr.vtc_flow = conf->dscp;
1404 ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1406 item.mask = &ipv6_mask;
1407 return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1408 MLX5_MODIFICATION_TYPE_SET, error);
1412 mlx5_flow_item_field_width(struct mlx5_dev_config *config,
1413 enum rte_flow_field_id field)
1416 case RTE_FLOW_FIELD_START:
1418 case RTE_FLOW_FIELD_MAC_DST:
1419 case RTE_FLOW_FIELD_MAC_SRC:
1421 case RTE_FLOW_FIELD_VLAN_TYPE:
1423 case RTE_FLOW_FIELD_VLAN_ID:
1425 case RTE_FLOW_FIELD_MAC_TYPE:
1427 case RTE_FLOW_FIELD_IPV4_DSCP:
1429 case RTE_FLOW_FIELD_IPV4_TTL:
1431 case RTE_FLOW_FIELD_IPV4_SRC:
1432 case RTE_FLOW_FIELD_IPV4_DST:
1434 case RTE_FLOW_FIELD_IPV6_DSCP:
1436 case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1438 case RTE_FLOW_FIELD_IPV6_SRC:
1439 case RTE_FLOW_FIELD_IPV6_DST:
1441 case RTE_FLOW_FIELD_TCP_PORT_SRC:
1442 case RTE_FLOW_FIELD_TCP_PORT_DST:
1444 case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1445 case RTE_FLOW_FIELD_TCP_ACK_NUM:
1447 case RTE_FLOW_FIELD_TCP_FLAGS:
1449 case RTE_FLOW_FIELD_UDP_PORT_SRC:
1450 case RTE_FLOW_FIELD_UDP_PORT_DST:
1452 case RTE_FLOW_FIELD_VXLAN_VNI:
1453 case RTE_FLOW_FIELD_GENEVE_VNI:
1455 case RTE_FLOW_FIELD_GTP_TEID:
1456 case RTE_FLOW_FIELD_TAG:
1458 case RTE_FLOW_FIELD_MARK:
1460 case RTE_FLOW_FIELD_META:
1461 if (config->dv_xmeta_en == MLX5_XMETA_MODE_META16)
1463 else if (config->dv_xmeta_en == MLX5_XMETA_MODE_META32)
1467 case RTE_FLOW_FIELD_POINTER:
1468 case RTE_FLOW_FIELD_VALUE:
1477 mlx5_flow_field_id_to_modify_info
1478 (const struct rte_flow_action_modify_data *data,
1479 struct field_modify_info *info,
1480 uint32_t *mask, uint32_t *value,
1481 uint32_t width, uint32_t dst_width,
1482 struct rte_eth_dev *dev,
1483 const struct rte_flow_attr *attr,
1484 struct rte_flow_error *error)
1486 struct mlx5_priv *priv = dev->data->dev_private;
1487 struct mlx5_dev_config *config = &priv->config;
1491 switch (data->field) {
1492 case RTE_FLOW_FIELD_START:
1493 /* not supported yet */
1496 case RTE_FLOW_FIELD_MAC_DST:
1497 off = data->offset > 16 ? data->offset - 16 : 0;
1499 if (data->offset < 16) {
1500 info[idx] = (struct field_modify_info){2, 0,
1501 MLX5_MODI_OUT_DMAC_15_0};
1503 mask[idx] = rte_cpu_to_be_16(0xffff >>
1507 mask[idx] = RTE_BE16(0xffff);
1514 info[idx] = (struct field_modify_info){4, 4 * idx,
1515 MLX5_MODI_OUT_DMAC_47_16};
1516 mask[idx] = rte_cpu_to_be_32((0xffffffff >>
1517 (32 - width)) << off);
1519 if (data->offset < 16)
1520 info[idx++] = (struct field_modify_info){2, 0,
1521 MLX5_MODI_OUT_DMAC_15_0};
1522 info[idx] = (struct field_modify_info){4, off,
1523 MLX5_MODI_OUT_DMAC_47_16};
1526 case RTE_FLOW_FIELD_MAC_SRC:
1527 off = data->offset > 16 ? data->offset - 16 : 0;
1529 if (data->offset < 16) {
1530 info[idx] = (struct field_modify_info){2, 0,
1531 MLX5_MODI_OUT_SMAC_15_0};
1533 mask[idx] = rte_cpu_to_be_16(0xffff >>
1537 mask[idx] = RTE_BE16(0xffff);
1544 info[idx] = (struct field_modify_info){4, 4 * idx,
1545 MLX5_MODI_OUT_SMAC_47_16};
1546 mask[idx] = rte_cpu_to_be_32((0xffffffff >>
1547 (32 - width)) << off);
1549 if (data->offset < 16)
1550 info[idx++] = (struct field_modify_info){2, 0,
1551 MLX5_MODI_OUT_SMAC_15_0};
1552 info[idx] = (struct field_modify_info){4, off,
1553 MLX5_MODI_OUT_SMAC_47_16};
1556 case RTE_FLOW_FIELD_VLAN_TYPE:
1557 /* not supported yet */
1559 case RTE_FLOW_FIELD_VLAN_ID:
1560 info[idx] = (struct field_modify_info){2, 0,
1561 MLX5_MODI_OUT_FIRST_VID};
1563 mask[idx] = rte_cpu_to_be_16(0x0fff >> (12 - width));
1565 case RTE_FLOW_FIELD_MAC_TYPE:
1566 info[idx] = (struct field_modify_info){2, 0,
1567 MLX5_MODI_OUT_ETHERTYPE};
1569 mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1571 case RTE_FLOW_FIELD_IPV4_DSCP:
1572 info[idx] = (struct field_modify_info){1, 0,
1573 MLX5_MODI_OUT_IP_DSCP};
1575 mask[idx] = 0x3f >> (6 - width);
1577 case RTE_FLOW_FIELD_IPV4_TTL:
1578 info[idx] = (struct field_modify_info){1, 0,
1579 MLX5_MODI_OUT_IPV4_TTL};
1581 mask[idx] = 0xff >> (8 - width);
1583 case RTE_FLOW_FIELD_IPV4_SRC:
1584 info[idx] = (struct field_modify_info){4, 0,
1585 MLX5_MODI_OUT_SIPV4};
1587 mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1590 case RTE_FLOW_FIELD_IPV4_DST:
1591 info[idx] = (struct field_modify_info){4, 0,
1592 MLX5_MODI_OUT_DIPV4};
1594 mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1597 case RTE_FLOW_FIELD_IPV6_DSCP:
1598 info[idx] = (struct field_modify_info){1, 0,
1599 MLX5_MODI_OUT_IP_DSCP};
1601 mask[idx] = 0x3f >> (6 - width);
1603 case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1604 info[idx] = (struct field_modify_info){1, 0,
1605 MLX5_MODI_OUT_IPV6_HOPLIMIT};
1607 mask[idx] = 0xff >> (8 - width);
1609 case RTE_FLOW_FIELD_IPV6_SRC:
1611 if (data->offset < 32) {
1612 info[idx] = (struct field_modify_info){4,
1614 MLX5_MODI_OUT_SIPV6_31_0};
1617 rte_cpu_to_be_32(0xffffffff >>
1621 mask[idx] = RTE_BE32(0xffffffff);
1628 if (data->offset < 64) {
1629 info[idx] = (struct field_modify_info){4,
1631 MLX5_MODI_OUT_SIPV6_63_32};
1634 rte_cpu_to_be_32(0xffffffff >>
1638 mask[idx] = RTE_BE32(0xffffffff);
1645 if (data->offset < 96) {
1646 info[idx] = (struct field_modify_info){4,
1648 MLX5_MODI_OUT_SIPV6_95_64};
1651 rte_cpu_to_be_32(0xffffffff >>
1655 mask[idx] = RTE_BE32(0xffffffff);
1662 info[idx] = (struct field_modify_info){4, 4 * idx,
1663 MLX5_MODI_OUT_SIPV6_127_96};
1664 mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1667 if (data->offset < 32)
1668 info[idx++] = (struct field_modify_info){4, 0,
1669 MLX5_MODI_OUT_SIPV6_31_0};
1670 if (data->offset < 64)
1671 info[idx++] = (struct field_modify_info){4, 0,
1672 MLX5_MODI_OUT_SIPV6_63_32};
1673 if (data->offset < 96)
1674 info[idx++] = (struct field_modify_info){4, 0,
1675 MLX5_MODI_OUT_SIPV6_95_64};
1676 if (data->offset < 128)
1677 info[idx++] = (struct field_modify_info){4, 0,
1678 MLX5_MODI_OUT_SIPV6_127_96};
1681 case RTE_FLOW_FIELD_IPV6_DST:
1683 if (data->offset < 32) {
1684 info[idx] = (struct field_modify_info){4,
1686 MLX5_MODI_OUT_DIPV6_31_0};
1689 rte_cpu_to_be_32(0xffffffff >>
1693 mask[idx] = RTE_BE32(0xffffffff);
1700 if (data->offset < 64) {
1701 info[idx] = (struct field_modify_info){4,
1703 MLX5_MODI_OUT_DIPV6_63_32};
1706 rte_cpu_to_be_32(0xffffffff >>
1710 mask[idx] = RTE_BE32(0xffffffff);
1717 if (data->offset < 96) {
1718 info[idx] = (struct field_modify_info){4,
1720 MLX5_MODI_OUT_DIPV6_95_64};
1723 rte_cpu_to_be_32(0xffffffff >>
1727 mask[idx] = RTE_BE32(0xffffffff);
1734 info[idx] = (struct field_modify_info){4, 4 * idx,
1735 MLX5_MODI_OUT_DIPV6_127_96};
1736 mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1739 if (data->offset < 32)
1740 info[idx++] = (struct field_modify_info){4, 0,
1741 MLX5_MODI_OUT_DIPV6_31_0};
1742 if (data->offset < 64)
1743 info[idx++] = (struct field_modify_info){4, 0,
1744 MLX5_MODI_OUT_DIPV6_63_32};
1745 if (data->offset < 96)
1746 info[idx++] = (struct field_modify_info){4, 0,
1747 MLX5_MODI_OUT_DIPV6_95_64};
1748 if (data->offset < 128)
1749 info[idx++] = (struct field_modify_info){4, 0,
1750 MLX5_MODI_OUT_DIPV6_127_96};
1753 case RTE_FLOW_FIELD_TCP_PORT_SRC:
1754 info[idx] = (struct field_modify_info){2, 0,
1755 MLX5_MODI_OUT_TCP_SPORT};
1757 mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1759 case RTE_FLOW_FIELD_TCP_PORT_DST:
1760 info[idx] = (struct field_modify_info){2, 0,
1761 MLX5_MODI_OUT_TCP_DPORT};
1763 mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1765 case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1766 info[idx] = (struct field_modify_info){4, 0,
1767 MLX5_MODI_OUT_TCP_SEQ_NUM};
1769 mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1772 case RTE_FLOW_FIELD_TCP_ACK_NUM:
1773 info[idx] = (struct field_modify_info){4, 0,
1774 MLX5_MODI_OUT_TCP_ACK_NUM};
1776 mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1779 case RTE_FLOW_FIELD_TCP_FLAGS:
1780 info[idx] = (struct field_modify_info){2, 0,
1781 MLX5_MODI_OUT_TCP_FLAGS};
1783 mask[idx] = rte_cpu_to_be_16(0x1ff >> (9 - width));
1785 case RTE_FLOW_FIELD_UDP_PORT_SRC:
1786 info[idx] = (struct field_modify_info){2, 0,
1787 MLX5_MODI_OUT_UDP_SPORT};
1789 mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1791 case RTE_FLOW_FIELD_UDP_PORT_DST:
1792 info[idx] = (struct field_modify_info){2, 0,
1793 MLX5_MODI_OUT_UDP_DPORT};
1795 mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1797 case RTE_FLOW_FIELD_VXLAN_VNI:
1798 /* not supported yet */
1800 case RTE_FLOW_FIELD_GENEVE_VNI:
1801 /* not supported yet*/
1803 case RTE_FLOW_FIELD_GTP_TEID:
1804 info[idx] = (struct field_modify_info){4, 0,
1805 MLX5_MODI_GTP_TEID};
1807 mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1810 case RTE_FLOW_FIELD_TAG:
1812 int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
1813 data->level, error);
1816 MLX5_ASSERT(reg != REG_NON);
1817 MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1818 info[idx] = (struct field_modify_info){4, 0,
1822 rte_cpu_to_be_32(0xffffffff >>
1826 case RTE_FLOW_FIELD_MARK:
1828 int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
1832 MLX5_ASSERT(reg != REG_NON);
1833 MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1834 info[idx] = (struct field_modify_info){4, 0,
1838 rte_cpu_to_be_32(0xffffffff >>
1842 case RTE_FLOW_FIELD_META:
1844 unsigned int xmeta = config->dv_xmeta_en;
1845 int reg = flow_dv_get_metadata_reg(dev, attr, error);
1848 MLX5_ASSERT(reg != REG_NON);
1849 MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1850 if (xmeta == MLX5_XMETA_MODE_META16) {
1851 info[idx] = (struct field_modify_info){2, 0,
1854 mask[idx] = rte_cpu_to_be_16(0xffff >>
1856 } else if (xmeta == MLX5_XMETA_MODE_META32) {
1857 info[idx] = (struct field_modify_info){4, 0,
1861 rte_cpu_to_be_32(0xffffffff >>
1868 case RTE_FLOW_FIELD_POINTER:
1869 case RTE_FLOW_FIELD_VALUE:
1870 if (data->field == RTE_FLOW_FIELD_POINTER)
1871 memcpy(&val, (void *)(uintptr_t)data->value,
1875 for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) {
1877 if (dst_width == 48) {
1878 /*special case for MAC addresses */
1879 value[idx] = rte_cpu_to_be_16(val);
1882 } else if (dst_width > 16) {
1883 value[idx] = rte_cpu_to_be_32(val);
1885 } else if (dst_width > 8) {
1886 value[idx] = rte_cpu_to_be_16(val);
1889 value[idx] = (uint8_t)val;
1904 * Convert modify_field action to DV specification.
1907 * Pointer to the rte_eth_dev structure.
1908 * @param[in,out] resource
1909 * Pointer to the modify-header resource.
1911 * Pointer to action specification.
1913 * Attributes of flow that includes this item.
1915 * Pointer to the error structure.
1918 * 0 on success, a negative errno value otherwise and rte_errno is set.
1921 flow_dv_convert_action_modify_field
1922 (struct rte_eth_dev *dev,
1923 struct mlx5_flow_dv_modify_hdr_resource *resource,
1924 const struct rte_flow_action *action,
1925 const struct rte_flow_attr *attr,
1926 struct rte_flow_error *error)
1928 struct mlx5_priv *priv = dev->data->dev_private;
1929 struct mlx5_dev_config *config = &priv->config;
1930 const struct rte_flow_action_modify_field *conf =
1931 (const struct rte_flow_action_modify_field *)(action->conf);
1932 struct rte_flow_item item;
1933 struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1935 struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1937 uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1938 uint32_t value[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1940 uint32_t dst_width = mlx5_flow_item_field_width(config,
1943 if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1944 conf->src.field == RTE_FLOW_FIELD_VALUE) {
1945 type = MLX5_MODIFICATION_TYPE_SET;
1946 /** For SET fill the destination field (field) first. */
1947 mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1948 value, conf->width, dst_width, dev, attr, error);
1949 /** Then copy immediate value from source as per mask. */
1950 mlx5_flow_field_id_to_modify_info(&conf->src, dcopy, mask,
1951 value, conf->width, dst_width, dev, attr, error);
1954 type = MLX5_MODIFICATION_TYPE_COPY;
1955 /** For COPY fill the destination field (dcopy) without mask. */
1956 mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1957 value, conf->width, dst_width, dev, attr, error);
1958 /** Then construct the source field (field) with mask. */
1959 mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1960 value, conf->width, dst_width, dev, attr, error);
1963 return flow_dv_convert_modify_action(&item,
1964 field, dcopy, resource, type, error);
1968 * Validate MARK item.
1971 * Pointer to the rte_eth_dev structure.
1973 * Item specification.
1975 * Attributes of flow that includes this item.
1977 * Pointer to error structure.
1980 * 0 on success, a negative errno value otherwise and rte_errno is set.
1983 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1984 const struct rte_flow_item *item,
1985 const struct rte_flow_attr *attr __rte_unused,
1986 struct rte_flow_error *error)
1988 struct mlx5_priv *priv = dev->data->dev_private;
1989 struct mlx5_dev_config *config = &priv->config;
1990 const struct rte_flow_item_mark *spec = item->spec;
1991 const struct rte_flow_item_mark *mask = item->mask;
1992 const struct rte_flow_item_mark nic_mask = {
1993 .id = priv->sh->dv_mark_mask,
1997 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1998 return rte_flow_error_set(error, ENOTSUP,
1999 RTE_FLOW_ERROR_TYPE_ITEM, item,
2000 "extended metadata feature"
2002 if (!mlx5_flow_ext_mreg_supported(dev))
2003 return rte_flow_error_set(error, ENOTSUP,
2004 RTE_FLOW_ERROR_TYPE_ITEM, item,
2005 "extended metadata register"
2006 " isn't supported");
2008 return rte_flow_error_set(error, ENOTSUP,
2009 RTE_FLOW_ERROR_TYPE_ITEM, item,
2010 "extended metadata register"
2011 " isn't available");
2012 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2016 return rte_flow_error_set(error, EINVAL,
2017 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2019 "data cannot be empty");
2020 if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
2021 return rte_flow_error_set(error, EINVAL,
2022 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2024 "mark id exceeds the limit");
2028 return rte_flow_error_set(error, EINVAL,
2029 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2030 "mask cannot be zero");
2032 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2033 (const uint8_t *)&nic_mask,
2034 sizeof(struct rte_flow_item_mark),
2035 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2042 * Validate META item.
2045 * Pointer to the rte_eth_dev structure.
2047 * Item specification.
2049 * Attributes of flow that includes this item.
2051 * Pointer to error structure.
2054 * 0 on success, a negative errno value otherwise and rte_errno is set.
2057 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
2058 const struct rte_flow_item *item,
2059 const struct rte_flow_attr *attr,
2060 struct rte_flow_error *error)
2062 struct mlx5_priv *priv = dev->data->dev_private;
2063 struct mlx5_dev_config *config = &priv->config;
2064 const struct rte_flow_item_meta *spec = item->spec;
2065 const struct rte_flow_item_meta *mask = item->mask;
2066 struct rte_flow_item_meta nic_mask = {
2073 return rte_flow_error_set(error, EINVAL,
2074 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2076 "data cannot be empty");
2077 if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
2078 if (!mlx5_flow_ext_mreg_supported(dev))
2079 return rte_flow_error_set(error, ENOTSUP,
2080 RTE_FLOW_ERROR_TYPE_ITEM, item,
2081 "extended metadata register"
2082 " isn't supported");
2083 reg = flow_dv_get_metadata_reg(dev, attr, error);
2087 return rte_flow_error_set(error, ENOTSUP,
2088 RTE_FLOW_ERROR_TYPE_ITEM, item,
2089 "unavalable extended metadata register");
2091 return rte_flow_error_set(error, ENOTSUP,
2092 RTE_FLOW_ERROR_TYPE_ITEM, item,
2096 nic_mask.data = priv->sh->dv_meta_mask;
2099 return rte_flow_error_set(error, ENOTSUP,
2100 RTE_FLOW_ERROR_TYPE_ITEM, item,
2101 "extended metadata feature "
2102 "should be enabled when "
2103 "meta item is requested "
2104 "with e-switch mode ");
2106 return rte_flow_error_set(error, ENOTSUP,
2107 RTE_FLOW_ERROR_TYPE_ITEM, item,
2108 "match on metadata for ingress "
2109 "is not supported in legacy "
2113 mask = &rte_flow_item_meta_mask;
2115 return rte_flow_error_set(error, EINVAL,
2116 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2117 "mask cannot be zero");
2119 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2120 (const uint8_t *)&nic_mask,
2121 sizeof(struct rte_flow_item_meta),
2122 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2127 * Validate TAG item.
2130 * Pointer to the rte_eth_dev structure.
2132 * Item specification.
2134 * Attributes of flow that includes this item.
2136 * Pointer to error structure.
2139 * 0 on success, a negative errno value otherwise and rte_errno is set.
2142 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
2143 const struct rte_flow_item *item,
2144 const struct rte_flow_attr *attr __rte_unused,
2145 struct rte_flow_error *error)
2147 const struct rte_flow_item_tag *spec = item->spec;
2148 const struct rte_flow_item_tag *mask = item->mask;
2149 const struct rte_flow_item_tag nic_mask = {
2150 .data = RTE_BE32(UINT32_MAX),
2155 if (!mlx5_flow_ext_mreg_supported(dev))
2156 return rte_flow_error_set(error, ENOTSUP,
2157 RTE_FLOW_ERROR_TYPE_ITEM, item,
2158 "extensive metadata register"
2159 " isn't supported");
2161 return rte_flow_error_set(error, EINVAL,
2162 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2164 "data cannot be empty");
2166 mask = &rte_flow_item_tag_mask;
2168 return rte_flow_error_set(error, EINVAL,
2169 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2170 "mask cannot be zero");
2172 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2173 (const uint8_t *)&nic_mask,
2174 sizeof(struct rte_flow_item_tag),
2175 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2178 if (mask->index != 0xff)
2179 return rte_flow_error_set(error, EINVAL,
2180 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2181 "partial mask for tag index"
2182 " is not supported");
2183 ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
2186 MLX5_ASSERT(ret != REG_NON);
2191 * Validate vport item.
2194 * Pointer to the rte_eth_dev structure.
2196 * Item specification.
2198 * Attributes of flow that includes this item.
2199 * @param[in] item_flags
2200 * Bit-fields that holds the items detected until now.
2202 * Pointer to error structure.
2205 * 0 on success, a negative errno value otherwise and rte_errno is set.
2208 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
2209 const struct rte_flow_item *item,
2210 const struct rte_flow_attr *attr,
2211 uint64_t item_flags,
2212 struct rte_flow_error *error)
2214 const struct rte_flow_item_port_id *spec = item->spec;
2215 const struct rte_flow_item_port_id *mask = item->mask;
2216 const struct rte_flow_item_port_id switch_mask = {
2219 struct mlx5_priv *esw_priv;
2220 struct mlx5_priv *dev_priv;
2223 if (!attr->transfer)
2224 return rte_flow_error_set(error, EINVAL,
2225 RTE_FLOW_ERROR_TYPE_ITEM,
2227 "match on port id is valid only"
2228 " when transfer flag is enabled");
2229 if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
2230 return rte_flow_error_set(error, ENOTSUP,
2231 RTE_FLOW_ERROR_TYPE_ITEM, item,
2232 "multiple source ports are not"
2235 mask = &switch_mask;
2236 if (mask->id != 0xffffffff)
2237 return rte_flow_error_set(error, ENOTSUP,
2238 RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2240 "no support for partial mask on"
2242 ret = mlx5_flow_item_acceptable
2243 (item, (const uint8_t *)mask,
2244 (const uint8_t *)&rte_flow_item_port_id_mask,
2245 sizeof(struct rte_flow_item_port_id),
2246 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2251 esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
2253 return rte_flow_error_set(error, rte_errno,
2254 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2255 "failed to obtain E-Switch info for"
2257 dev_priv = mlx5_dev_to_eswitch_info(dev);
2259 return rte_flow_error_set(error, rte_errno,
2260 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2262 "failed to obtain E-Switch info");
2263 if (esw_priv->domain_id != dev_priv->domain_id)
2264 return rte_flow_error_set(error, EINVAL,
2265 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2266 "cannot match on a port from a"
2267 " different E-Switch");
2272 * Validate VLAN item.
2275 * Item specification.
2276 * @param[in] item_flags
2277 * Bit-fields that holds the items detected until now.
2279 * Ethernet device flow is being created on.
2281 * Pointer to error structure.
2284 * 0 on success, a negative errno value otherwise and rte_errno is set.
2287 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
2288 uint64_t item_flags,
2289 struct rte_eth_dev *dev,
2290 struct rte_flow_error *error)
2292 const struct rte_flow_item_vlan *mask = item->mask;
2293 const struct rte_flow_item_vlan nic_mask = {
2294 .tci = RTE_BE16(UINT16_MAX),
2295 .inner_type = RTE_BE16(UINT16_MAX),
2298 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2300 const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
2301 MLX5_FLOW_LAYER_INNER_L4) :
2302 (MLX5_FLOW_LAYER_OUTER_L3 |
2303 MLX5_FLOW_LAYER_OUTER_L4);
2304 const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2305 MLX5_FLOW_LAYER_OUTER_VLAN;
2307 if (item_flags & vlanm)
2308 return rte_flow_error_set(error, EINVAL,
2309 RTE_FLOW_ERROR_TYPE_ITEM, item,
2310 "multiple VLAN layers not supported");
2311 else if ((item_flags & l34m) != 0)
2312 return rte_flow_error_set(error, EINVAL,
2313 RTE_FLOW_ERROR_TYPE_ITEM, item,
2314 "VLAN cannot follow L3/L4 layer");
2316 mask = &rte_flow_item_vlan_mask;
2317 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2318 (const uint8_t *)&nic_mask,
2319 sizeof(struct rte_flow_item_vlan),
2320 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2323 if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2324 struct mlx5_priv *priv = dev->data->dev_private;
2326 if (priv->vmwa_context) {
2328 * Non-NULL context means we have a virtual machine
2329 * and SR-IOV enabled, we have to create VLAN interface
2330 * to make hypervisor to setup E-Switch vport
2331 * context correctly. We avoid creating the multiple
2332 * VLAN interfaces, so we cannot support VLAN tag mask.
2334 return rte_flow_error_set(error, EINVAL,
2335 RTE_FLOW_ERROR_TYPE_ITEM,
2337 "VLAN tag mask is not"
2338 " supported in virtual"
2346 * GTP flags are contained in 1 byte of the format:
2347 * -------------------------------------------
2348 * | bit | 0 - 2 | 3 | 4 | 5 | 6 | 7 |
2349 * |-----------------------------------------|
2350 * | value | Version | PT | Res | E | S | PN |
2351 * -------------------------------------------
2353 * Matching is supported only for GTP flags E, S, PN.
2355 #define MLX5_GTP_FLAGS_MASK 0x07
2358 * Validate GTP item.
2361 * Pointer to the rte_eth_dev structure.
2363 * Item specification.
2364 * @param[in] item_flags
2365 * Bit-fields that holds the items detected until now.
2367 * Pointer to error structure.
2370 * 0 on success, a negative errno value otherwise and rte_errno is set.
2373 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
2374 const struct rte_flow_item *item,
2375 uint64_t item_flags,
2376 struct rte_flow_error *error)
2378 struct mlx5_priv *priv = dev->data->dev_private;
2379 const struct rte_flow_item_gtp *spec = item->spec;
2380 const struct rte_flow_item_gtp *mask = item->mask;
2381 const struct rte_flow_item_gtp nic_mask = {
2382 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
2384 .teid = RTE_BE32(0xffffffff),
2387 if (!priv->config.hca_attr.tunnel_stateless_gtp)
2388 return rte_flow_error_set(error, ENOTSUP,
2389 RTE_FLOW_ERROR_TYPE_ITEM, item,
2390 "GTP support is not enabled");
2391 if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2392 return rte_flow_error_set(error, ENOTSUP,
2393 RTE_FLOW_ERROR_TYPE_ITEM, item,
2394 "multiple tunnel layers not"
2396 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2397 return rte_flow_error_set(error, EINVAL,
2398 RTE_FLOW_ERROR_TYPE_ITEM, item,
2399 "no outer UDP layer found");
2401 mask = &rte_flow_item_gtp_mask;
2402 if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
2403 return rte_flow_error_set(error, ENOTSUP,
2404 RTE_FLOW_ERROR_TYPE_ITEM, item,
2405 "Match is supported for GTP"
2407 return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2408 (const uint8_t *)&nic_mask,
2409 sizeof(struct rte_flow_item_gtp),
2410 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2414 * Validate GTP PSC item.
2417 * Item specification.
2418 * @param[in] last_item
2419 * Previous validated item in the pattern items.
2420 * @param[in] gtp_item
2421 * Previous GTP item specification.
2423 * Pointer to flow attributes.
2425 * Pointer to error structure.
2428 * 0 on success, a negative errno value otherwise and rte_errno is set.
2431 flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
2433 const struct rte_flow_item *gtp_item,
2434 const struct rte_flow_attr *attr,
2435 struct rte_flow_error *error)
2437 const struct rte_flow_item_gtp *gtp_spec;
2438 const struct rte_flow_item_gtp *gtp_mask;
2439 const struct rte_flow_item_gtp_psc *spec;
2440 const struct rte_flow_item_gtp_psc *mask;
2441 const struct rte_flow_item_gtp_psc nic_mask = {
2446 if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
2447 return rte_flow_error_set
2448 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2449 "GTP PSC item must be preceded with GTP item");
2450 gtp_spec = gtp_item->spec;
2451 gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
2452 /* GTP spec and E flag is requested to match zero. */
2454 (gtp_mask->v_pt_rsv_flags &
2455 ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
2456 return rte_flow_error_set
2457 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2458 "GTP E flag must be 1 to match GTP PSC");
2459 /* Check the flow is not created in group zero. */
2460 if (!attr->transfer && !attr->group)
2461 return rte_flow_error_set
2462 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2463 "GTP PSC is not supported for group 0");
2464 /* GTP spec is here and E flag is requested to match zero. */
2468 mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
2469 if (spec->pdu_type > MLX5_GTP_EXT_MAX_PDU_TYPE)
2470 return rte_flow_error_set
2471 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2472 "PDU type should be smaller than 16");
2473 return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2474 (const uint8_t *)&nic_mask,
2475 sizeof(struct rte_flow_item_gtp_psc),
2476 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2480 * Validate IPV4 item.
2481 * Use existing validation function mlx5_flow_validate_item_ipv4(), and
2482 * add specific validation of fragment_offset field,
2485 * Item specification.
2486 * @param[in] item_flags
2487 * Bit-fields that holds the items detected until now.
2489 * Pointer to error structure.
2492 * 0 on success, a negative errno value otherwise and rte_errno is set.
2495 flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
2496 uint64_t item_flags,
2498 uint16_t ether_type,
2499 struct rte_flow_error *error)
2502 const struct rte_flow_item_ipv4 *spec = item->spec;
2503 const struct rte_flow_item_ipv4 *last = item->last;
2504 const struct rte_flow_item_ipv4 *mask = item->mask;
2505 rte_be16_t fragment_offset_spec = 0;
2506 rte_be16_t fragment_offset_last = 0;
2507 const struct rte_flow_item_ipv4 nic_ipv4_mask = {
2509 .src_addr = RTE_BE32(0xffffffff),
2510 .dst_addr = RTE_BE32(0xffffffff),
2511 .type_of_service = 0xff,
2512 .fragment_offset = RTE_BE16(0xffff),
2513 .next_proto_id = 0xff,
2514 .time_to_live = 0xff,
2518 ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
2519 ether_type, &nic_ipv4_mask,
2520 MLX5_ITEM_RANGE_ACCEPTED, error);
2524 fragment_offset_spec = spec->hdr.fragment_offset &
2525 mask->hdr.fragment_offset;
2526 if (!fragment_offset_spec)
2529 * spec and mask are valid, enforce using full mask to make sure the
2530 * complete value is used correctly.
2532 if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2533 != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2534 return rte_flow_error_set(error, EINVAL,
2535 RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2536 item, "must use full mask for"
2537 " fragment_offset");
2539 * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
2540 * indicating this is 1st fragment of fragmented packet.
2541 * This is not yet supported in MLX5, return appropriate error message.
2543 if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
2544 return rte_flow_error_set(error, ENOTSUP,
2545 RTE_FLOW_ERROR_TYPE_ITEM, item,
2546 "match on first fragment not "
2548 if (fragment_offset_spec && !last)
2549 return rte_flow_error_set(error, ENOTSUP,
2550 RTE_FLOW_ERROR_TYPE_ITEM, item,
2551 "specified value not supported");
2552 /* spec and last are valid, validate the specified range. */
2553 fragment_offset_last = last->hdr.fragment_offset &
2554 mask->hdr.fragment_offset;
2556 * Match on fragment_offset spec 0x2001 and last 0x3fff
2557 * means MF is 1 and frag-offset is > 0.
2558 * This packet is fragment 2nd and onward, excluding last.
2559 * This is not yet supported in MLX5, return appropriate
2562 if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
2563 fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2564 return rte_flow_error_set(error, ENOTSUP,
2565 RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2566 last, "match on following "
2567 "fragments not supported");
2569 * Match on fragment_offset spec 0x0001 and last 0x1fff
2570 * means MF is 0 and frag-offset is > 0.
2571 * This packet is last fragment of fragmented packet.
2572 * This is not yet supported in MLX5, return appropriate
2575 if (fragment_offset_spec == RTE_BE16(1) &&
2576 fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
2577 return rte_flow_error_set(error, ENOTSUP,
2578 RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2579 last, "match on last "
2580 "fragment not supported");
2582 * Match on fragment_offset spec 0x0001 and last 0x3fff
2583 * means MF and/or frag-offset is not 0.
2584 * This is a fragmented packet.
2585 * Other range values are invalid and rejected.
2587 if (!(fragment_offset_spec == RTE_BE16(1) &&
2588 fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
2589 return rte_flow_error_set(error, ENOTSUP,
2590 RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2591 "specified range not supported");
2596 * Validate IPV6 fragment extension item.
2599 * Item specification.
2600 * @param[in] item_flags
2601 * Bit-fields that holds the items detected until now.
2603 * Pointer to error structure.
2606 * 0 on success, a negative errno value otherwise and rte_errno is set.
2609 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
2610 uint64_t item_flags,
2611 struct rte_flow_error *error)
2613 const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
2614 const struct rte_flow_item_ipv6_frag_ext *last = item->last;
2615 const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
2616 rte_be16_t frag_data_spec = 0;
2617 rte_be16_t frag_data_last = 0;
2618 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2619 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2620 MLX5_FLOW_LAYER_OUTER_L4;
2622 struct rte_flow_item_ipv6_frag_ext nic_mask = {
2624 .next_header = 0xff,
2625 .frag_data = RTE_BE16(0xffff),
2629 if (item_flags & l4m)
2630 return rte_flow_error_set(error, EINVAL,
2631 RTE_FLOW_ERROR_TYPE_ITEM, item,
2632 "ipv6 fragment extension item cannot "
2634 if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
2635 (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
2636 return rte_flow_error_set(error, EINVAL,
2637 RTE_FLOW_ERROR_TYPE_ITEM, item,
2638 "ipv6 fragment extension item must "
2639 "follow ipv6 item");
2641 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
2642 if (!frag_data_spec)
2645 * spec and mask are valid, enforce using full mask to make sure the
2646 * complete value is used correctly.
2648 if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
2649 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2650 return rte_flow_error_set(error, EINVAL,
2651 RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2652 item, "must use full mask for"
2655 * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
2656 * This is 1st fragment of fragmented packet.
2658 if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
2659 return rte_flow_error_set(error, ENOTSUP,
2660 RTE_FLOW_ERROR_TYPE_ITEM, item,
2661 "match on first fragment not "
2663 if (frag_data_spec && !last)
2664 return rte_flow_error_set(error, EINVAL,
2665 RTE_FLOW_ERROR_TYPE_ITEM, item,
2666 "specified value not supported");
2667 ret = mlx5_flow_item_acceptable
2668 (item, (const uint8_t *)mask,
2669 (const uint8_t *)&nic_mask,
2670 sizeof(struct rte_flow_item_ipv6_frag_ext),
2671 MLX5_ITEM_RANGE_ACCEPTED, error);
2674 /* spec and last are valid, validate the specified range. */
2675 frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
2677 * Match on frag_data spec 0x0009 and last 0xfff9
2678 * means M is 1 and frag-offset is > 0.
2679 * This packet is fragment 2nd and onward, excluding last.
2680 * This is not yet supported in MLX5, return appropriate
2683 if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
2684 RTE_IPV6_EHDR_MF_MASK) &&
2685 frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2686 return rte_flow_error_set(error, ENOTSUP,
2687 RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2688 last, "match on following "
2689 "fragments not supported");
2691 * Match on frag_data spec 0x0008 and last 0xfff8
2692 * means M is 0 and frag-offset is > 0.
2693 * This packet is last fragment of fragmented packet.
2694 * This is not yet supported in MLX5, return appropriate
2697 if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2698 frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2699 return rte_flow_error_set(error, ENOTSUP,
2700 RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2701 last, "match on last "
2702 "fragment not supported");
2703 /* Other range values are invalid and rejected. */
2704 return rte_flow_error_set(error, EINVAL,
2705 RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2706 "specified range not supported");
2710 * Validate ASO CT item.
2713 * Pointer to the rte_eth_dev structure.
2715 * Item specification.
2716 * @param[in] item_flags
2717 * Pointer to bit-fields that holds the items detected until now.
2719 * Pointer to error structure.
2722 * 0 on success, a negative errno value otherwise and rte_errno is set.
2725 flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev,
2726 const struct rte_flow_item *item,
2727 uint64_t *item_flags,
2728 struct rte_flow_error *error)
2730 const struct rte_flow_item_conntrack *spec = item->spec;
2731 const struct rte_flow_item_conntrack *mask = item->mask;
2735 if (*item_flags & MLX5_FLOW_LAYER_ASO_CT)
2736 return rte_flow_error_set(error, EINVAL,
2737 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2738 "Only one CT is supported");
2740 mask = &rte_flow_item_conntrack_mask;
2741 flags = spec->flags & mask->flags;
2742 if ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID) &&
2743 ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID) ||
2744 (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD) ||
2745 (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)))
2746 return rte_flow_error_set(error, EINVAL,
2747 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2748 "Conflict status bits");
2749 /* State change also needs to be considered. */
2750 *item_flags |= MLX5_FLOW_LAYER_ASO_CT;
2755 * Validate the pop VLAN action.
2758 * Pointer to the rte_eth_dev structure.
2759 * @param[in] action_flags
2760 * Holds the actions detected until now.
2762 * Pointer to the pop vlan action.
2763 * @param[in] item_flags
2764 * The items found in this flow rule.
2766 * Pointer to flow attributes.
2768 * Pointer to error structure.
2771 * 0 on success, a negative errno value otherwise and rte_errno is set.
2774 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2775 uint64_t action_flags,
2776 const struct rte_flow_action *action,
2777 uint64_t item_flags,
2778 const struct rte_flow_attr *attr,
2779 struct rte_flow_error *error)
2781 const struct mlx5_priv *priv = dev->data->dev_private;
2785 if (!priv->sh->pop_vlan_action)
2786 return rte_flow_error_set(error, ENOTSUP,
2787 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2789 "pop vlan action is not supported");
2791 return rte_flow_error_set(error, ENOTSUP,
2792 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2794 "pop vlan action not supported for "
2796 if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2797 return rte_flow_error_set(error, ENOTSUP,
2798 RTE_FLOW_ERROR_TYPE_ACTION, action,
2799 "no support for multiple VLAN "
2801 /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2802 if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2803 !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2804 return rte_flow_error_set(error, ENOTSUP,
2805 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2807 "cannot pop vlan after decap without "
2808 "match on inner vlan in the flow");
2809 /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2810 if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2811 !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2812 return rte_flow_error_set(error, ENOTSUP,
2813 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2815 "cannot pop vlan without a "
2816 "match on (outer) vlan in the flow");
2817 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2818 return rte_flow_error_set(error, EINVAL,
2819 RTE_FLOW_ERROR_TYPE_ACTION, action,
2820 "wrong action order, port_id should "
2821 "be after pop VLAN action");
2822 if (!attr->transfer && priv->representor)
2823 return rte_flow_error_set(error, ENOTSUP,
2824 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2825 "pop vlan action for VF representor "
2826 "not supported on NIC table");
2831 * Get VLAN default info from vlan match info.
2834 * the list of item specifications.
2836 * pointer VLAN info to fill to.
2839 * 0 on success, a negative errno value otherwise and rte_errno is set.
2842 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2843 struct rte_vlan_hdr *vlan)
2845 const struct rte_flow_item_vlan nic_mask = {
2846 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2847 MLX5DV_FLOW_VLAN_VID_MASK),
2848 .inner_type = RTE_BE16(0xffff),
2853 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2854 int type = items->type;
2856 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2857 type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2860 if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2861 const struct rte_flow_item_vlan *vlan_m = items->mask;
2862 const struct rte_flow_item_vlan *vlan_v = items->spec;
2864 /* If VLAN item in pattern doesn't contain data, return here. */
2869 /* Only full match values are accepted */
2870 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2871 MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2872 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2874 rte_be_to_cpu_16(vlan_v->tci &
2875 MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2877 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2878 MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2879 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2881 rte_be_to_cpu_16(vlan_v->tci &
2882 MLX5DV_FLOW_VLAN_VID_MASK_BE);
2884 if (vlan_m->inner_type == nic_mask.inner_type)
2885 vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2886 vlan_m->inner_type);
2891 * Validate the push VLAN action.
2894 * Pointer to the rte_eth_dev structure.
2895 * @param[in] action_flags
2896 * Holds the actions detected until now.
2897 * @param[in] item_flags
2898 * The items found in this flow rule.
2900 * Pointer to the action structure.
2902 * Pointer to flow attributes
2904 * Pointer to error structure.
2907 * 0 on success, a negative errno value otherwise and rte_errno is set.
2910 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2911 uint64_t action_flags,
2912 const struct rte_flow_item_vlan *vlan_m,
2913 const struct rte_flow_action *action,
2914 const struct rte_flow_attr *attr,
2915 struct rte_flow_error *error)
2917 const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2918 const struct mlx5_priv *priv = dev->data->dev_private;
2920 if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2921 push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2922 return rte_flow_error_set(error, EINVAL,
2923 RTE_FLOW_ERROR_TYPE_ACTION, action,
2924 "invalid vlan ethertype");
2925 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2926 return rte_flow_error_set(error, EINVAL,
2927 RTE_FLOW_ERROR_TYPE_ACTION, action,
2928 "wrong action order, port_id should "
2929 "be after push VLAN");
2930 if (!attr->transfer && priv->representor)
2931 return rte_flow_error_set(error, ENOTSUP,
2932 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2933 "push vlan action for VF representor "
2934 "not supported on NIC table");
2936 (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2937 (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2938 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2939 !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2940 !(mlx5_flow_find_action
2941 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2942 return rte_flow_error_set(error, EINVAL,
2943 RTE_FLOW_ERROR_TYPE_ACTION, action,
2944 "not full match mask on VLAN PCP and "
2945 "there is no of_set_vlan_pcp action, "
2946 "push VLAN action cannot figure out "
2949 (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2950 (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2951 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2952 !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2953 !(mlx5_flow_find_action
2954 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2955 return rte_flow_error_set(error, EINVAL,
2956 RTE_FLOW_ERROR_TYPE_ACTION, action,
2957 "not full match mask on VLAN VID and "
2958 "there is no of_set_vlan_vid action, "
2959 "push VLAN action cannot figure out "
2966 * Validate the set VLAN PCP.
2968 * @param[in] action_flags
2969 * Holds the actions detected until now.
2970 * @param[in] actions
2971 * Pointer to the list of actions remaining in the flow rule.
2973 * Pointer to error structure.
2976 * 0 on success, a negative errno value otherwise and rte_errno is set.
2979 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2980 const struct rte_flow_action actions[],
2981 struct rte_flow_error *error)
2983 const struct rte_flow_action *action = actions;
2984 const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2986 if (conf->vlan_pcp > 7)
2987 return rte_flow_error_set(error, EINVAL,
2988 RTE_FLOW_ERROR_TYPE_ACTION, action,
2989 "VLAN PCP value is too big");
2990 if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2991 return rte_flow_error_set(error, ENOTSUP,
2992 RTE_FLOW_ERROR_TYPE_ACTION, action,
2993 "set VLAN PCP action must follow "
2994 "the push VLAN action");
2995 if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2996 return rte_flow_error_set(error, ENOTSUP,
2997 RTE_FLOW_ERROR_TYPE_ACTION, action,
2998 "Multiple VLAN PCP modification are "
3000 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
3001 return rte_flow_error_set(error, EINVAL,
3002 RTE_FLOW_ERROR_TYPE_ACTION, action,
3003 "wrong action order, port_id should "
3004 "be after set VLAN PCP");
3009 * Validate the set VLAN VID.
3011 * @param[in] item_flags
3012 * Holds the items detected in this rule.
3013 * @param[in] action_flags
3014 * Holds the actions detected until now.
3015 * @param[in] actions
3016 * Pointer to the list of actions remaining in the flow rule.
3018 * Pointer to error structure.
3021 * 0 on success, a negative errno value otherwise and rte_errno is set.
3024 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
3025 uint64_t action_flags,
3026 const struct rte_flow_action actions[],
3027 struct rte_flow_error *error)
3029 const struct rte_flow_action *action = actions;
3030 const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
3032 if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
3033 return rte_flow_error_set(error, EINVAL,
3034 RTE_FLOW_ERROR_TYPE_ACTION, action,
3035 "VLAN VID value is too big");
3036 if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
3037 !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
3038 return rte_flow_error_set(error, ENOTSUP,
3039 RTE_FLOW_ERROR_TYPE_ACTION, action,
3040 "set VLAN VID action must follow push"
3041 " VLAN action or match on VLAN item");
3042 if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
3043 return rte_flow_error_set(error, ENOTSUP,
3044 RTE_FLOW_ERROR_TYPE_ACTION, action,
3045 "Multiple VLAN VID modifications are "
3047 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
3048 return rte_flow_error_set(error, EINVAL,
3049 RTE_FLOW_ERROR_TYPE_ACTION, action,
3050 "wrong action order, port_id should "
3051 "be after set VLAN VID");
3056 * Validate the FLAG action.
3059 * Pointer to the rte_eth_dev structure.
3060 * @param[in] action_flags
3061 * Holds the actions detected until now.
3063 * Pointer to flow attributes
3065 * Pointer to error structure.
3068 * 0 on success, a negative errno value otherwise and rte_errno is set.
3071 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
3072 uint64_t action_flags,
3073 const struct rte_flow_attr *attr,
3074 struct rte_flow_error *error)
3076 struct mlx5_priv *priv = dev->data->dev_private;
3077 struct mlx5_dev_config *config = &priv->config;
3080 /* Fall back if no extended metadata register support. */
3081 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3082 return mlx5_flow_validate_action_flag(action_flags, attr,
3084 /* Extensive metadata mode requires registers. */
3085 if (!mlx5_flow_ext_mreg_supported(dev))
3086 return rte_flow_error_set(error, ENOTSUP,
3087 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3088 "no metadata registers "
3089 "to support flag action");
3090 if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
3091 return rte_flow_error_set(error, ENOTSUP,
3092 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3093 "extended metadata register"
3094 " isn't available");
3095 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3098 MLX5_ASSERT(ret > 0);
3099 if (action_flags & MLX5_FLOW_ACTION_MARK)
3100 return rte_flow_error_set(error, EINVAL,
3101 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3102 "can't mark and flag in same flow");
3103 if (action_flags & MLX5_FLOW_ACTION_FLAG)
3104 return rte_flow_error_set(error, EINVAL,
3105 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3107 " actions in same flow");
3112 * Validate MARK action.
3115 * Pointer to the rte_eth_dev structure.
3117 * Pointer to action.
3118 * @param[in] action_flags
3119 * Holds the actions detected until now.
3121 * Pointer to flow attributes
3123 * Pointer to error structure.
3126 * 0 on success, a negative errno value otherwise and rte_errno is set.
3129 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
3130 const struct rte_flow_action *action,
3131 uint64_t action_flags,
3132 const struct rte_flow_attr *attr,
3133 struct rte_flow_error *error)
3135 struct mlx5_priv *priv = dev->data->dev_private;
3136 struct mlx5_dev_config *config = &priv->config;
3137 const struct rte_flow_action_mark *mark = action->conf;
3140 if (is_tunnel_offload_active(dev))
3141 return rte_flow_error_set(error, ENOTSUP,
3142 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3144 "if tunnel offload active");
3145 /* Fall back if no extended metadata register support. */
3146 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3147 return mlx5_flow_validate_action_mark(action, action_flags,
3149 /* Extensive metadata mode requires registers. */
3150 if (!mlx5_flow_ext_mreg_supported(dev))
3151 return rte_flow_error_set(error, ENOTSUP,
3152 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3153 "no metadata registers "
3154 "to support mark action");
3155 if (!priv->sh->dv_mark_mask)
3156 return rte_flow_error_set(error, ENOTSUP,
3157 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3158 "extended metadata register"
3159 " isn't available");
3160 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3163 MLX5_ASSERT(ret > 0);
3165 return rte_flow_error_set(error, EINVAL,
3166 RTE_FLOW_ERROR_TYPE_ACTION, action,
3167 "configuration cannot be null");
3168 if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
3169 return rte_flow_error_set(error, EINVAL,
3170 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3172 "mark id exceeds the limit");
3173 if (action_flags & MLX5_FLOW_ACTION_FLAG)
3174 return rte_flow_error_set(error, EINVAL,
3175 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3176 "can't flag and mark in same flow");
3177 if (action_flags & MLX5_FLOW_ACTION_MARK)
3178 return rte_flow_error_set(error, EINVAL,
3179 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3180 "can't have 2 mark actions in same"
3186 * Validate SET_META action.
3189 * Pointer to the rte_eth_dev structure.
3191 * Pointer to the action structure.
3192 * @param[in] action_flags
3193 * Holds the actions detected until now.
3195 * Pointer to flow attributes
3197 * Pointer to error structure.
3200 * 0 on success, a negative errno value otherwise and rte_errno is set.
3203 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
3204 const struct rte_flow_action *action,
3205 uint64_t action_flags __rte_unused,
3206 const struct rte_flow_attr *attr,
3207 struct rte_flow_error *error)
3209 const struct rte_flow_action_set_meta *conf;
3210 uint32_t nic_mask = UINT32_MAX;
3213 if (!mlx5_flow_ext_mreg_supported(dev))
3214 return rte_flow_error_set(error, ENOTSUP,
3215 RTE_FLOW_ERROR_TYPE_ACTION, action,
3216 "extended metadata register"
3217 " isn't supported");
3218 reg = flow_dv_get_metadata_reg(dev, attr, error);
3222 return rte_flow_error_set(error, ENOTSUP,
3223 RTE_FLOW_ERROR_TYPE_ACTION, action,
3224 "unavalable extended metadata register");
3225 if (reg != REG_A && reg != REG_B) {
3226 struct mlx5_priv *priv = dev->data->dev_private;
3228 nic_mask = priv->sh->dv_meta_mask;
3230 if (!(action->conf))
3231 return rte_flow_error_set(error, EINVAL,
3232 RTE_FLOW_ERROR_TYPE_ACTION, action,
3233 "configuration cannot be null");
3234 conf = (const struct rte_flow_action_set_meta *)action->conf;
3236 return rte_flow_error_set(error, EINVAL,
3237 RTE_FLOW_ERROR_TYPE_ACTION, action,
3238 "zero mask doesn't have any effect");
3239 if (conf->mask & ~nic_mask)
3240 return rte_flow_error_set(error, EINVAL,
3241 RTE_FLOW_ERROR_TYPE_ACTION, action,
3242 "meta data must be within reg C0");
3247 * Validate SET_TAG action.
3250 * Pointer to the rte_eth_dev structure.
3252 * Pointer to the action structure.
3253 * @param[in] action_flags
3254 * Holds the actions detected until now.
3256 * Pointer to flow attributes
3258 * Pointer to error structure.
3261 * 0 on success, a negative errno value otherwise and rte_errno is set.
3264 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
3265 const struct rte_flow_action *action,
3266 uint64_t action_flags,
3267 const struct rte_flow_attr *attr,
3268 struct rte_flow_error *error)
3270 const struct rte_flow_action_set_tag *conf;
3271 const uint64_t terminal_action_flags =
3272 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
3273 MLX5_FLOW_ACTION_RSS;
3276 if (!mlx5_flow_ext_mreg_supported(dev))
3277 return rte_flow_error_set(error, ENOTSUP,
3278 RTE_FLOW_ERROR_TYPE_ACTION, action,
3279 "extensive metadata register"
3280 " isn't supported");
3281 if (!(action->conf))
3282 return rte_flow_error_set(error, EINVAL,
3283 RTE_FLOW_ERROR_TYPE_ACTION, action,
3284 "configuration cannot be null");
3285 conf = (const struct rte_flow_action_set_tag *)action->conf;
3287 return rte_flow_error_set(error, EINVAL,
3288 RTE_FLOW_ERROR_TYPE_ACTION, action,
3289 "zero mask doesn't have any effect");
3290 ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
3293 if (!attr->transfer && attr->ingress &&
3294 (action_flags & terminal_action_flags))
3295 return rte_flow_error_set(error, EINVAL,
3296 RTE_FLOW_ERROR_TYPE_ACTION, action,
3297 "set_tag has no effect"
3298 " with terminal actions");
3303 * Check if action counter is shared by either old or new mechanism.
3306 * Pointer to the action structure.
3309 * True when counter is shared, false otherwise.
3312 is_shared_action_count(const struct rte_flow_action *action)
3314 const struct rte_flow_action_count *count =
3315 (const struct rte_flow_action_count *)action->conf;
3317 if ((int)action->type == MLX5_RTE_FLOW_ACTION_TYPE_COUNT)
3319 return !!(count && count->shared);
3323 * Validate count action.
3326 * Pointer to rte_eth_dev structure.
3328 * Indicator if action is shared.
3329 * @param[in] action_flags
3330 * Holds the actions detected until now.
3332 * Pointer to error structure.
3335 * 0 on success, a negative errno value otherwise and rte_errno is set.
3338 flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,
3339 uint64_t action_flags,
3340 struct rte_flow_error *error)
3342 struct mlx5_priv *priv = dev->data->dev_private;
3344 if (!priv->config.devx)
3346 if (action_flags & MLX5_FLOW_ACTION_COUNT)
3347 return rte_flow_error_set(error, EINVAL,
3348 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3349 "duplicate count actions set");
3350 if (shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
3351 !priv->sh->flow_hit_aso_en)
3352 return rte_flow_error_set(error, EINVAL,
3353 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3354 "old age and shared count combination is not supported");
3355 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
3359 return rte_flow_error_set
3361 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3363 "count action not supported");
3367 * Validate the L2 encap action.
3370 * Pointer to the rte_eth_dev structure.
3371 * @param[in] action_flags
3372 * Holds the actions detected until now.
3374 * Pointer to the action structure.
3376 * Pointer to flow attributes.
3378 * Pointer to error structure.
3381 * 0 on success, a negative errno value otherwise and rte_errno is set.
3384 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
3385 uint64_t action_flags,
3386 const struct rte_flow_action *action,
3387 const struct rte_flow_attr *attr,
3388 struct rte_flow_error *error)
3390 const struct mlx5_priv *priv = dev->data->dev_private;
3392 if (!(action->conf))
3393 return rte_flow_error_set(error, EINVAL,
3394 RTE_FLOW_ERROR_TYPE_ACTION, action,
3395 "configuration cannot be null");
3396 if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3397 return rte_flow_error_set(error, EINVAL,
3398 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3399 "can only have a single encap action "
3401 if (!attr->transfer && priv->representor)
3402 return rte_flow_error_set(error, ENOTSUP,
3403 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3404 "encap action for VF representor "
3405 "not supported on NIC table");
3410 * Validate a decap action.
3413 * Pointer to the rte_eth_dev structure.
3414 * @param[in] action_flags
3415 * Holds the actions detected until now.
3417 * Pointer to the action structure.
3418 * @param[in] item_flags
3419 * Holds the items detected.
3421 * Pointer to flow attributes
3423 * Pointer to error structure.
3426 * 0 on success, a negative errno value otherwise and rte_errno is set.
3429 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
3430 uint64_t action_flags,
3431 const struct rte_flow_action *action,
3432 const uint64_t item_flags,
3433 const struct rte_flow_attr *attr,
3434 struct rte_flow_error *error)
3436 const struct mlx5_priv *priv = dev->data->dev_private;
3438 if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
3439 !priv->config.decap_en)
3440 return rte_flow_error_set(error, ENOTSUP,
3441 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3442 "decap is not enabled");
3443 if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
3444 return rte_flow_error_set(error, ENOTSUP,
3445 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3447 MLX5_FLOW_ACTION_DECAP ? "can only "
3448 "have a single decap action" : "decap "
3449 "after encap is not supported");
3450 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
3451 return rte_flow_error_set(error, EINVAL,
3452 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3453 "can't have decap action after"
3456 return rte_flow_error_set(error, ENOTSUP,
3457 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
3459 "decap action not supported for "
3461 if (!attr->transfer && priv->representor)
3462 return rte_flow_error_set(error, ENOTSUP,
3463 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3464 "decap action for VF representor "
3465 "not supported on NIC table");
3466 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
3467 !(item_flags & MLX5_FLOW_LAYER_VXLAN))
3468 return rte_flow_error_set(error, ENOTSUP,
3469 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3470 "VXLAN item should be present for VXLAN decap");
3474 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
3477 * Validate the raw encap and decap actions.
3480 * Pointer to the rte_eth_dev structure.
3482 * Pointer to the decap action.
3484 * Pointer to the encap action.
3486 * Pointer to flow attributes
3487 * @param[in/out] action_flags
3488 * Holds the actions detected until now.
3489 * @param[out] actions_n
3490 * pointer to the number of actions counter.
3492 * Pointer to the action structure.
3493 * @param[in] item_flags
3494 * Holds the items detected.
3496 * Pointer to error structure.
3499 * 0 on success, a negative errno value otherwise and rte_errno is set.
3502 flow_dv_validate_action_raw_encap_decap
3503 (struct rte_eth_dev *dev,
3504 const struct rte_flow_action_raw_decap *decap,
3505 const struct rte_flow_action_raw_encap *encap,
3506 const struct rte_flow_attr *attr, uint64_t *action_flags,
3507 int *actions_n, const struct rte_flow_action *action,
3508 uint64_t item_flags, struct rte_flow_error *error)
3510 const struct mlx5_priv *priv = dev->data->dev_private;
3513 if (encap && (!encap->size || !encap->data))
3514 return rte_flow_error_set(error, EINVAL,
3515 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3516 "raw encap data cannot be empty");
3517 if (decap && encap) {
3518 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
3519 encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3522 else if (encap->size <=
3523 MLX5_ENCAPSULATION_DECISION_SIZE &&
3525 MLX5_ENCAPSULATION_DECISION_SIZE)
3528 else if (encap->size >
3529 MLX5_ENCAPSULATION_DECISION_SIZE &&
3531 MLX5_ENCAPSULATION_DECISION_SIZE)
3532 /* 2 L2 actions: encap and decap. */
3535 return rte_flow_error_set(error,
3537 RTE_FLOW_ERROR_TYPE_ACTION,
3538 NULL, "unsupported too small "
3539 "raw decap and too small raw "
3540 "encap combination");
3543 ret = flow_dv_validate_action_decap(dev, *action_flags, action,
3544 item_flags, attr, error);
3547 *action_flags |= MLX5_FLOW_ACTION_DECAP;
3551 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
3552 return rte_flow_error_set(error, ENOTSUP,
3553 RTE_FLOW_ERROR_TYPE_ACTION,
3555 "small raw encap size");
3556 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
3557 return rte_flow_error_set(error, EINVAL,
3558 RTE_FLOW_ERROR_TYPE_ACTION,
3560 "more than one encap action");
3561 if (!attr->transfer && priv->representor)
3562 return rte_flow_error_set
3564 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3565 "encap action for VF representor "
3566 "not supported on NIC table");
3567 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
3574 * Validate the ASO CT action.
3577 * Pointer to the rte_eth_dev structure.
3578 * @param[in] action_flags
3579 * Holds the actions detected until now.
3580 * @param[in] item_flags
3581 * The items found in this flow rule.
3583 * Pointer to flow attributes.
3585 * Pointer to error structure.
3588 * 0 on success, a negative errno value otherwise and rte_errno is set.
3591 flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
3592 uint64_t action_flags,
3593 uint64_t item_flags,
3594 const struct rte_flow_attr *attr,
3595 struct rte_flow_error *error)
3599 if (attr->group == 0 && !attr->transfer)
3600 return rte_flow_error_set(error, ENOTSUP,
3601 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3603 "Only support non-root table");
3604 if (action_flags & MLX5_FLOW_FATE_ACTIONS)
3605 return rte_flow_error_set(error, ENOTSUP,
3606 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3607 "CT cannot follow a fate action");
3608 if ((action_flags & MLX5_FLOW_ACTION_METER) ||
3609 (action_flags & MLX5_FLOW_ACTION_AGE))
3610 return rte_flow_error_set(error, EINVAL,
3611 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3612 "Only one ASO action is supported");
3613 if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3614 return rte_flow_error_set(error, EINVAL,
3615 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3616 "Encap cannot exist before CT");
3617 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
3618 return rte_flow_error_set(error, EINVAL,
3619 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3620 "Not a outer TCP packet");
3625 flow_dv_encap_decap_match_cb(void *tool_ctx __rte_unused,
3626 struct mlx5_list_entry *entry, void *cb_ctx)
3628 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3629 struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
3630 struct mlx5_flow_dv_encap_decap_resource *resource;
3632 resource = container_of(entry, struct mlx5_flow_dv_encap_decap_resource,
3634 if (resource->reformat_type == ctx_resource->reformat_type &&
3635 resource->ft_type == ctx_resource->ft_type &&
3636 resource->flags == ctx_resource->flags &&
3637 resource->size == ctx_resource->size &&
3638 !memcmp((const void *)resource->buf,
3639 (const void *)ctx_resource->buf,
3645 struct mlx5_list_entry *
3646 flow_dv_encap_decap_create_cb(void *tool_ctx, void *cb_ctx)
3648 struct mlx5_dev_ctx_shared *sh = tool_ctx;
3649 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3650 struct mlx5dv_dr_domain *domain;
3651 struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
3652 struct mlx5_flow_dv_encap_decap_resource *resource;
3656 if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3657 domain = sh->fdb_domain;
3658 else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3659 domain = sh->rx_domain;
3661 domain = sh->tx_domain;
3662 /* Register new encap/decap resource. */
3663 resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &idx);
3665 rte_flow_error_set(ctx->error, ENOMEM,
3666 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3667 "cannot allocate resource memory");
3670 *resource = *ctx_resource;
3671 resource->idx = idx;
3672 ret = mlx5_flow_os_create_flow_action_packet_reformat(sh->ctx, domain,
3676 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
3677 rte_flow_error_set(ctx->error, ENOMEM,
3678 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3679 NULL, "cannot create action");
3683 return &resource->entry;
3686 struct mlx5_list_entry *
3687 flow_dv_encap_decap_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
3690 struct mlx5_dev_ctx_shared *sh = tool_ctx;
3691 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3692 struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3695 cache_resource = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
3697 if (!cache_resource) {
3698 rte_flow_error_set(ctx->error, ENOMEM,
3699 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3700 "cannot allocate resource memory");
3703 memcpy(cache_resource, oentry, sizeof(*cache_resource));
3704 cache_resource->idx = idx;
3705 return &cache_resource->entry;
3709 flow_dv_encap_decap_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3711 struct mlx5_dev_ctx_shared *sh = tool_ctx;
3712 struct mlx5_flow_dv_encap_decap_resource *res =
3713 container_of(entry, typeof(*res), entry);
3715 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
3719 * Find existing encap/decap resource or create and register a new one.
3721 * @param[in, out] dev
3722 * Pointer to rte_eth_dev structure.
3723 * @param[in, out] resource
3724 * Pointer to encap/decap resource.
3725 * @parm[in, out] dev_flow
3726 * Pointer to the dev_flow.
3728 * pointer to error structure.
3731 * 0 on success otherwise -errno and errno is set.
3734 flow_dv_encap_decap_resource_register
3735 (struct rte_eth_dev *dev,
3736 struct mlx5_flow_dv_encap_decap_resource *resource,
3737 struct mlx5_flow *dev_flow,
3738 struct rte_flow_error *error)
3740 struct mlx5_priv *priv = dev->data->dev_private;
3741 struct mlx5_dev_ctx_shared *sh = priv->sh;
3742 struct mlx5_list_entry *entry;
3746 uint32_t refmt_type:8;
3748 * Header reformat actions can be shared between
3749 * non-root tables. One bit to indicate non-root
3753 uint32_t reserve:15;
3756 } encap_decap_key = {
3758 .ft_type = resource->ft_type,
3759 .refmt_type = resource->reformat_type,
3760 .is_root = !!dev_flow->dv.group,
3764 struct mlx5_flow_cb_ctx ctx = {
3768 struct mlx5_hlist *encaps_decaps;
3771 encaps_decaps = flow_dv_hlist_prepare(sh, &sh->encaps_decaps,
3773 MLX5_FLOW_ENCAP_DECAP_HTABLE_SZ,
3775 flow_dv_encap_decap_create_cb,
3776 flow_dv_encap_decap_match_cb,
3777 flow_dv_encap_decap_remove_cb,
3778 flow_dv_encap_decap_clone_cb,
3779 flow_dv_encap_decap_clone_free_cb);
3780 if (unlikely(!encaps_decaps))
3782 resource->flags = dev_flow->dv.group ? 0 : 1;
3783 key64 = __rte_raw_cksum(&encap_decap_key.v32,
3784 sizeof(encap_decap_key.v32), 0);
3785 if (resource->reformat_type !=
3786 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
3788 key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
3789 entry = mlx5_hlist_register(encaps_decaps, key64, &ctx);
3792 resource = container_of(entry, typeof(*resource), entry);
3793 dev_flow->dv.encap_decap = resource;
3794 dev_flow->handle->dvh.rix_encap_decap = resource->idx;
3799 * Find existing table jump resource or create and register a new one.
3801 * @param[in, out] dev
3802 * Pointer to rte_eth_dev structure.
3803 * @param[in, out] tbl
3804 * Pointer to flow table resource.
3805 * @parm[in, out] dev_flow
3806 * Pointer to the dev_flow.
3808 * pointer to error structure.
3811 * 0 on success otherwise -errno and errno is set.
3814 flow_dv_jump_tbl_resource_register
3815 (struct rte_eth_dev *dev __rte_unused,
3816 struct mlx5_flow_tbl_resource *tbl,
3817 struct mlx5_flow *dev_flow,
3818 struct rte_flow_error *error __rte_unused)
3820 struct mlx5_flow_tbl_data_entry *tbl_data =
3821 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
3824 MLX5_ASSERT(tbl_data->jump.action);
3825 dev_flow->handle->rix_jump = tbl_data->idx;
3826 dev_flow->dv.jump = &tbl_data->jump;
3831 flow_dv_port_id_match_cb(void *tool_ctx __rte_unused,
3832 struct mlx5_list_entry *entry, void *cb_ctx)
3834 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3835 struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3836 struct mlx5_flow_dv_port_id_action_resource *res =
3837 container_of(entry, typeof(*res), entry);
3839 return ref->port_id != res->port_id;
3842 struct mlx5_list_entry *
3843 flow_dv_port_id_create_cb(void *tool_ctx, void *cb_ctx)
3845 struct mlx5_dev_ctx_shared *sh = tool_ctx;
3846 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3847 struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3848 struct mlx5_flow_dv_port_id_action_resource *resource;
3852 /* Register new port id action resource. */
3853 resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3855 rte_flow_error_set(ctx->error, ENOMEM,
3856 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3857 "cannot allocate port_id action memory");
3861 ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3865 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3866 rte_flow_error_set(ctx->error, ENOMEM,
3867 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3868 "cannot create action");
3871 resource->idx = idx;
3872 return &resource->entry;
3875 struct mlx5_list_entry *
3876 flow_dv_port_id_clone_cb(void *tool_ctx,
3877 struct mlx5_list_entry *entry __rte_unused,
3880 struct mlx5_dev_ctx_shared *sh = tool_ctx;
3881 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3882 struct mlx5_flow_dv_port_id_action_resource *resource;
3885 resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3887 rte_flow_error_set(ctx->error, ENOMEM,
3888 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3889 "cannot allocate port_id action memory");
3892 memcpy(resource, entry, sizeof(*resource));
3893 resource->idx = idx;
3894 return &resource->entry;
3898 flow_dv_port_id_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3900 struct mlx5_dev_ctx_shared *sh = tool_ctx;
3901 struct mlx5_flow_dv_port_id_action_resource *resource =
3902 container_of(entry, typeof(*resource), entry);
3904 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
3908 * Find existing table port ID resource or create and register a new one.
3910 * @param[in, out] dev
3911 * Pointer to rte_eth_dev structure.
3912 * @param[in, out] ref
3913 * Pointer to port ID action resource reference.
3914 * @parm[in, out] dev_flow
3915 * Pointer to the dev_flow.
3917 * pointer to error structure.
3920 * 0 on success otherwise -errno and errno is set.
3923 flow_dv_port_id_action_resource_register
3924 (struct rte_eth_dev *dev,
3925 struct mlx5_flow_dv_port_id_action_resource *ref,
3926 struct mlx5_flow *dev_flow,
3927 struct rte_flow_error *error)
3929 struct mlx5_priv *priv = dev->data->dev_private;
3930 struct mlx5_list_entry *entry;
3931 struct mlx5_flow_dv_port_id_action_resource *resource;
3932 struct mlx5_flow_cb_ctx ctx = {
3937 entry = mlx5_list_register(priv->sh->port_id_action_list, &ctx);
3940 resource = container_of(entry, typeof(*resource), entry);
3941 dev_flow->dv.port_id_action = resource;
3942 dev_flow->handle->rix_port_id_action = resource->idx;
3947 flow_dv_push_vlan_match_cb(void *tool_ctx __rte_unused,
3948 struct mlx5_list_entry *entry, void *cb_ctx)
3950 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3951 struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3952 struct mlx5_flow_dv_push_vlan_action_resource *res =
3953 container_of(entry, typeof(*res), entry);
3955 return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3958 struct mlx5_list_entry *
3959 flow_dv_push_vlan_create_cb(void *tool_ctx, void *cb_ctx)
3961 struct mlx5_dev_ctx_shared *sh = tool_ctx;
3962 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3963 struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3964 struct mlx5_flow_dv_push_vlan_action_resource *resource;
3965 struct mlx5dv_dr_domain *domain;
3969 /* Register new port id action resource. */
3970 resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3972 rte_flow_error_set(ctx->error, ENOMEM,
3973 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3974 "cannot allocate push_vlan action memory");
3978 if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3979 domain = sh->fdb_domain;
3980 else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3981 domain = sh->rx_domain;
3983 domain = sh->tx_domain;
3984 ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3987 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3988 rte_flow_error_set(ctx->error, ENOMEM,
3989 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3990 "cannot create push vlan action");
3993 resource->idx = idx;
3994 return &resource->entry;
3997 struct mlx5_list_entry *
3998 flow_dv_push_vlan_clone_cb(void *tool_ctx,
3999 struct mlx5_list_entry *entry __rte_unused,
4002 struct mlx5_dev_ctx_shared *sh = tool_ctx;
4003 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4004 struct mlx5_flow_dv_push_vlan_action_resource *resource;
4007 resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
4009 rte_flow_error_set(ctx->error, ENOMEM,
4010 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4011 "cannot allocate push_vlan action memory");
4014 memcpy(resource, entry, sizeof(*resource));
4015 resource->idx = idx;
4016 return &resource->entry;
4020 flow_dv_push_vlan_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
4022 struct mlx5_dev_ctx_shared *sh = tool_ctx;
4023 struct mlx5_flow_dv_push_vlan_action_resource *resource =
4024 container_of(entry, typeof(*resource), entry);
4026 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
4030 * Find existing push vlan resource or create and register a new one.
4032 * @param [in, out] dev
4033 * Pointer to rte_eth_dev structure.
4034 * @param[in, out] ref
4035 * Pointer to port ID action resource reference.
4036 * @parm[in, out] dev_flow
4037 * Pointer to the dev_flow.
4039 * pointer to error structure.
4042 * 0 on success otherwise -errno and errno is set.
4045 flow_dv_push_vlan_action_resource_register
4046 (struct rte_eth_dev *dev,
4047 struct mlx5_flow_dv_push_vlan_action_resource *ref,
4048 struct mlx5_flow *dev_flow,
4049 struct rte_flow_error *error)
4051 struct mlx5_priv *priv = dev->data->dev_private;
4052 struct mlx5_flow_dv_push_vlan_action_resource *resource;
4053 struct mlx5_list_entry *entry;
4054 struct mlx5_flow_cb_ctx ctx = {
4059 entry = mlx5_list_register(priv->sh->push_vlan_action_list, &ctx);
4062 resource = container_of(entry, typeof(*resource), entry);
4064 dev_flow->handle->dvh.rix_push_vlan = resource->idx;
4065 dev_flow->dv.push_vlan_res = resource;
4070 * Get the size of specific rte_flow_item_type hdr size
4072 * @param[in] item_type
4073 * Tested rte_flow_item_type.
4076 * sizeof struct item_type, 0 if void or irrelevant.
4079 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
4083 switch (item_type) {
4084 case RTE_FLOW_ITEM_TYPE_ETH:
4085 retval = sizeof(struct rte_ether_hdr);
4087 case RTE_FLOW_ITEM_TYPE_VLAN:
4088 retval = sizeof(struct rte_vlan_hdr);
4090 case RTE_FLOW_ITEM_TYPE_IPV4:
4091 retval = sizeof(struct rte_ipv4_hdr);
4093 case RTE_FLOW_ITEM_TYPE_IPV6:
4094 retval = sizeof(struct rte_ipv6_hdr);
4096 case RTE_FLOW_ITEM_TYPE_UDP:
4097 retval = sizeof(struct rte_udp_hdr);
4099 case RTE_FLOW_ITEM_TYPE_TCP:
4100 retval = sizeof(struct rte_tcp_hdr);
4102 case RTE_FLOW_ITEM_TYPE_VXLAN:
4103 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4104 retval = sizeof(struct rte_vxlan_hdr);
4106 case RTE_FLOW_ITEM_TYPE_GRE:
4107 case RTE_FLOW_ITEM_TYPE_NVGRE:
4108 retval = sizeof(struct rte_gre_hdr);
4110 case RTE_FLOW_ITEM_TYPE_MPLS:
4111 retval = sizeof(struct rte_mpls_hdr);
4113 case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
4121 #define MLX5_ENCAP_IPV4_VERSION 0x40
4122 #define MLX5_ENCAP_IPV4_IHL_MIN 0x05
4123 #define MLX5_ENCAP_IPV4_TTL_DEF 0x40
4124 #define MLX5_ENCAP_IPV6_VTC_FLOW 0x60000000
4125 #define MLX5_ENCAP_IPV6_HOP_LIMIT 0xff
4126 #define MLX5_ENCAP_VXLAN_FLAGS 0x08000000
4127 #define MLX5_ENCAP_VXLAN_GPE_FLAGS 0x04
4130 * Convert the encap action data from list of rte_flow_item to raw buffer
4133 * Pointer to rte_flow_item objects list.
4135 * Pointer to the output buffer.
4137 * Pointer to the output buffer size.
4139 * Pointer to the error structure.
4142 * 0 on success, a negative errno value otherwise and rte_errno is set.
4145 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
4146 size_t *size, struct rte_flow_error *error)
4148 struct rte_ether_hdr *eth = NULL;
4149 struct rte_vlan_hdr *vlan = NULL;
4150 struct rte_ipv4_hdr *ipv4 = NULL;
4151 struct rte_ipv6_hdr *ipv6 = NULL;
4152 struct rte_udp_hdr *udp = NULL;
4153 struct rte_vxlan_hdr *vxlan = NULL;
4154 struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
4155 struct rte_gre_hdr *gre = NULL;
4157 size_t temp_size = 0;
4160 return rte_flow_error_set(error, EINVAL,
4161 RTE_FLOW_ERROR_TYPE_ACTION,
4162 NULL, "invalid empty data");
4163 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4164 len = flow_dv_get_item_hdr_len(items->type);
4165 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
4166 return rte_flow_error_set(error, EINVAL,
4167 RTE_FLOW_ERROR_TYPE_ACTION,
4168 (void *)items->type,
4169 "items total size is too big"
4170 " for encap action");
4171 rte_memcpy((void *)&buf[temp_size], items->spec, len);
4172 switch (items->type) {
4173 case RTE_FLOW_ITEM_TYPE_ETH:
4174 eth = (struct rte_ether_hdr *)&buf[temp_size];
4176 case RTE_FLOW_ITEM_TYPE_VLAN:
4177 vlan = (struct rte_vlan_hdr *)&buf[temp_size];
4179 return rte_flow_error_set(error, EINVAL,
4180 RTE_FLOW_ERROR_TYPE_ACTION,
4181 (void *)items->type,
4182 "eth header not found");
4183 if (!eth->ether_type)
4184 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
4186 case RTE_FLOW_ITEM_TYPE_IPV4:
4187 ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
4189 return rte_flow_error_set(error, EINVAL,
4190 RTE_FLOW_ERROR_TYPE_ACTION,
4191 (void *)items->type,
4192 "neither eth nor vlan"
4194 if (vlan && !vlan->eth_proto)
4195 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4196 else if (eth && !eth->ether_type)
4197 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4198 if (!ipv4->version_ihl)
4199 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
4200 MLX5_ENCAP_IPV4_IHL_MIN;
4201 if (!ipv4->time_to_live)
4202 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
4204 case RTE_FLOW_ITEM_TYPE_IPV6:
4205 ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
4207 return rte_flow_error_set(error, EINVAL,
4208 RTE_FLOW_ERROR_TYPE_ACTION,
4209 (void *)items->type,
4210 "neither eth nor vlan"
4212 if (vlan && !vlan->eth_proto)
4213 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4214 else if (eth && !eth->ether_type)
4215 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4216 if (!ipv6->vtc_flow)
4218 RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
4219 if (!ipv6->hop_limits)
4220 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
4222 case RTE_FLOW_ITEM_TYPE_UDP:
4223 udp = (struct rte_udp_hdr *)&buf[temp_size];
4225 return rte_flow_error_set(error, EINVAL,
4226 RTE_FLOW_ERROR_TYPE_ACTION,
4227 (void *)items->type,
4228 "ip header not found");
4229 if (ipv4 && !ipv4->next_proto_id)
4230 ipv4->next_proto_id = IPPROTO_UDP;
4231 else if (ipv6 && !ipv6->proto)
4232 ipv6->proto = IPPROTO_UDP;
4234 case RTE_FLOW_ITEM_TYPE_VXLAN:
4235 vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
4237 return rte_flow_error_set(error, EINVAL,
4238 RTE_FLOW_ERROR_TYPE_ACTION,
4239 (void *)items->type,
4240 "udp header not found");
4242 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
4243 if (!vxlan->vx_flags)
4245 RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
4247 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4248 vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
4250 return rte_flow_error_set(error, EINVAL,
4251 RTE_FLOW_ERROR_TYPE_ACTION,
4252 (void *)items->type,
4253 "udp header not found");
4254 if (!vxlan_gpe->proto)
4255 return rte_flow_error_set(error, EINVAL,
4256 RTE_FLOW_ERROR_TYPE_ACTION,
4257 (void *)items->type,
4258 "next protocol not found");
4261 RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
4262 if (!vxlan_gpe->vx_flags)
4263 vxlan_gpe->vx_flags =
4264 MLX5_ENCAP_VXLAN_GPE_FLAGS;
4266 case RTE_FLOW_ITEM_TYPE_GRE:
4267 case RTE_FLOW_ITEM_TYPE_NVGRE:
4268 gre = (struct rte_gre_hdr *)&buf[temp_size];
4270 return rte_flow_error_set(error, EINVAL,
4271 RTE_FLOW_ERROR_TYPE_ACTION,
4272 (void *)items->type,
4273 "next protocol not found");
4275 return rte_flow_error_set(error, EINVAL,
4276 RTE_FLOW_ERROR_TYPE_ACTION,
4277 (void *)items->type,
4278 "ip header not found");
4279 if (ipv4 && !ipv4->next_proto_id)
4280 ipv4->next_proto_id = IPPROTO_GRE;
4281 else if (ipv6 && !ipv6->proto)
4282 ipv6->proto = IPPROTO_GRE;
4284 case RTE_FLOW_ITEM_TYPE_VOID:
4287 return rte_flow_error_set(error, EINVAL,
4288 RTE_FLOW_ERROR_TYPE_ACTION,
4289 (void *)items->type,
4290 "unsupported item type");
4300 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
4302 struct rte_ether_hdr *eth = NULL;
4303 struct rte_vlan_hdr *vlan = NULL;
4304 struct rte_ipv6_hdr *ipv6 = NULL;
4305 struct rte_udp_hdr *udp = NULL;
4309 eth = (struct rte_ether_hdr *)data;
4310 next_hdr = (char *)(eth + 1);
4311 proto = RTE_BE16(eth->ether_type);
4314 while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
4315 vlan = (struct rte_vlan_hdr *)next_hdr;
4316 proto = RTE_BE16(vlan->eth_proto);
4317 next_hdr += sizeof(struct rte_vlan_hdr);
4320 /* HW calculates IPv4 csum. no need to proceed */
4321 if (proto == RTE_ETHER_TYPE_IPV4)
4324 /* non IPv4/IPv6 header. not supported */
4325 if (proto != RTE_ETHER_TYPE_IPV6) {
4326 return rte_flow_error_set(error, ENOTSUP,
4327 RTE_FLOW_ERROR_TYPE_ACTION,
4328 NULL, "Cannot offload non IPv4/IPv6");
4331 ipv6 = (struct rte_ipv6_hdr *)next_hdr;
4333 /* ignore non UDP */
4334 if (ipv6->proto != IPPROTO_UDP)
4337 udp = (struct rte_udp_hdr *)(ipv6 + 1);
4338 udp->dgram_cksum = 0;
4344 * Convert L2 encap action to DV specification.
4347 * Pointer to rte_eth_dev structure.
4349 * Pointer to action structure.
4350 * @param[in, out] dev_flow
4351 * Pointer to the mlx5_flow.
4352 * @param[in] transfer
4353 * Mark if the flow is E-Switch flow.
4355 * Pointer to the error structure.
4358 * 0 on success, a negative errno value otherwise and rte_errno is set.
4361 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
4362 const struct rte_flow_action *action,
4363 struct mlx5_flow *dev_flow,
4365 struct rte_flow_error *error)
4367 const struct rte_flow_item *encap_data;
4368 const struct rte_flow_action_raw_encap *raw_encap_data;
4369 struct mlx5_flow_dv_encap_decap_resource res = {
4371 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
4372 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4373 MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
4376 if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4378 (const struct rte_flow_action_raw_encap *)action->conf;
4379 res.size = raw_encap_data->size;
4380 memcpy(res.buf, raw_encap_data->data, res.size);
4382 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
4384 ((const struct rte_flow_action_vxlan_encap *)
4385 action->conf)->definition;
4388 ((const struct rte_flow_action_nvgre_encap *)
4389 action->conf)->definition;
4390 if (flow_dv_convert_encap_data(encap_data, res.buf,
4394 if (flow_dv_zero_encap_udp_csum(res.buf, error))
4396 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4397 return rte_flow_error_set(error, EINVAL,
4398 RTE_FLOW_ERROR_TYPE_ACTION,
4399 NULL, "can't create L2 encap action");
4404 * Convert L2 decap action to DV specification.
4407 * Pointer to rte_eth_dev structure.
4408 * @param[in, out] dev_flow
4409 * Pointer to the mlx5_flow.
4410 * @param[in] transfer
4411 * Mark if the flow is E-Switch flow.
4413 * Pointer to the error structure.
4416 * 0 on success, a negative errno value otherwise and rte_errno is set.
4419 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
4420 struct mlx5_flow *dev_flow,
4422 struct rte_flow_error *error)
4424 struct mlx5_flow_dv_encap_decap_resource res = {
4427 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
4428 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4429 MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
4432 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4433 return rte_flow_error_set(error, EINVAL,
4434 RTE_FLOW_ERROR_TYPE_ACTION,
4435 NULL, "can't create L2 decap action");
4440 * Convert raw decap/encap (L3 tunnel) action to DV specification.
4443 * Pointer to rte_eth_dev structure.
4445 * Pointer to action structure.
4446 * @param[in, out] dev_flow
4447 * Pointer to the mlx5_flow.
4449 * Pointer to the flow attributes.
4451 * Pointer to the error structure.
4454 * 0 on success, a negative errno value otherwise and rte_errno is set.
4457 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
4458 const struct rte_flow_action *action,
4459 struct mlx5_flow *dev_flow,
4460 const struct rte_flow_attr *attr,
4461 struct rte_flow_error *error)
4463 const struct rte_flow_action_raw_encap *encap_data;
4464 struct mlx5_flow_dv_encap_decap_resource res;
4466 memset(&res, 0, sizeof(res));
4467 encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
4468 res.size = encap_data->size;
4469 memcpy(res.buf, encap_data->data, res.size);
4470 res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
4471 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
4472 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
4474 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4476 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4477 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4478 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4479 return rte_flow_error_set(error, EINVAL,
4480 RTE_FLOW_ERROR_TYPE_ACTION,
4481 NULL, "can't create encap action");
4486 * Create action push VLAN.
4489 * Pointer to rte_eth_dev structure.
4491 * Pointer to the flow attributes.
4493 * Pointer to the vlan to push to the Ethernet header.
4494 * @param[in, out] dev_flow
4495 * Pointer to the mlx5_flow.
4497 * Pointer to the error structure.
4500 * 0 on success, a negative errno value otherwise and rte_errno is set.
4503 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
4504 const struct rte_flow_attr *attr,
4505 const struct rte_vlan_hdr *vlan,
4506 struct mlx5_flow *dev_flow,
4507 struct rte_flow_error *error)
4509 struct mlx5_flow_dv_push_vlan_action_resource res;
4511 memset(&res, 0, sizeof(res));
4513 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
4516 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4518 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4519 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4520 return flow_dv_push_vlan_action_resource_register
4521 (dev, &res, dev_flow, error);
4525 * Validate the modify-header actions.
4527 * @param[in] action_flags
4528 * Holds the actions detected until now.
4530 * Pointer to the modify action.
4532 * Pointer to error structure.
4535 * 0 on success, a negative errno value otherwise and rte_errno is set.
4538 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
4539 const struct rte_flow_action *action,
4540 struct rte_flow_error *error)
4542 if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
4543 return rte_flow_error_set(error, EINVAL,
4544 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4545 NULL, "action configuration not set");
4546 if (action_flags & MLX5_FLOW_ACTION_ENCAP)
4547 return rte_flow_error_set(error, EINVAL,
4548 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4549 "can't have encap action before"
4555 * Validate the modify-header MAC address actions.
4557 * @param[in] action_flags
4558 * Holds the actions detected until now.
4560 * Pointer to the modify action.
4561 * @param[in] item_flags
4562 * Holds the items detected.
4564 * Pointer to error structure.
4567 * 0 on success, a negative errno value otherwise and rte_errno is set.
4570 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
4571 const struct rte_flow_action *action,
4572 const uint64_t item_flags,
4573 struct rte_flow_error *error)
4577 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4579 if (!(item_flags & MLX5_FLOW_LAYER_L2))
4580 return rte_flow_error_set(error, EINVAL,
4581 RTE_FLOW_ERROR_TYPE_ACTION,
4583 "no L2 item in pattern");
4589 * Validate the modify-header IPv4 address actions.
4591 * @param[in] action_flags
4592 * Holds the actions detected until now.
4594 * Pointer to the modify action.
4595 * @param[in] item_flags
4596 * Holds the items detected.
4598 * Pointer to error structure.
4601 * 0 on success, a negative errno value otherwise and rte_errno is set.
4604 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
4605 const struct rte_flow_action *action,
4606 const uint64_t item_flags,
4607 struct rte_flow_error *error)
4612 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4614 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4615 MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4616 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4617 if (!(item_flags & layer))
4618 return rte_flow_error_set(error, EINVAL,
4619 RTE_FLOW_ERROR_TYPE_ACTION,
4621 "no ipv4 item in pattern");
4627 * Validate the modify-header IPv6 address actions.
4629 * @param[in] action_flags
4630 * Holds the actions detected until now.
4632 * Pointer to the modify action.
4633 * @param[in] item_flags
4634 * Holds the items detected.
4636 * Pointer to error structure.
4639 * 0 on success, a negative errno value otherwise and rte_errno is set.
4642 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
4643 const struct rte_flow_action *action,
4644 const uint64_t item_flags,
4645 struct rte_flow_error *error)
4650 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4652 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4653 MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4654 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4655 if (!(item_flags & layer))
4656 return rte_flow_error_set(error, EINVAL,
4657 RTE_FLOW_ERROR_TYPE_ACTION,
4659 "no ipv6 item in pattern");
4665 * Validate the modify-header TP actions.
4667 * @param[in] action_flags
4668 * Holds the actions detected until now.
4670 * Pointer to the modify action.
4671 * @param[in] item_flags
4672 * Holds the items detected.
4674 * Pointer to error structure.
4677 * 0 on success, a negative errno value otherwise and rte_errno is set.
4680 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
4681 const struct rte_flow_action *action,
4682 const uint64_t item_flags,
4683 struct rte_flow_error *error)
4688 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4690 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4691 MLX5_FLOW_LAYER_INNER_L4 :
4692 MLX5_FLOW_LAYER_OUTER_L4;
4693 if (!(item_flags & layer))
4694 return rte_flow_error_set(error, EINVAL,
4695 RTE_FLOW_ERROR_TYPE_ACTION,
4696 NULL, "no transport layer "
4703 * Validate the modify-header actions of increment/decrement
4704 * TCP Sequence-number.
4706 * @param[in] action_flags
4707 * Holds the actions detected until now.
4709 * Pointer to the modify action.
4710 * @param[in] item_flags
4711 * Holds the items detected.
4713 * Pointer to error structure.
4716 * 0 on success, a negative errno value otherwise and rte_errno is set.
4719 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
4720 const struct rte_flow_action *action,
4721 const uint64_t item_flags,
4722 struct rte_flow_error *error)
4727 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4729 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4730 MLX5_FLOW_LAYER_INNER_L4_TCP :
4731 MLX5_FLOW_LAYER_OUTER_L4_TCP;
4732 if (!(item_flags & layer))
4733 return rte_flow_error_set(error, EINVAL,
4734 RTE_FLOW_ERROR_TYPE_ACTION,
4735 NULL, "no TCP item in"
4737 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
4738 (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
4739 (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
4740 (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
4741 return rte_flow_error_set(error, EINVAL,
4742 RTE_FLOW_ERROR_TYPE_ACTION,
4744 "cannot decrease and increase"
4745 " TCP sequence number"
4746 " at the same time");
4752 * Validate the modify-header actions of increment/decrement
4753 * TCP Acknowledgment number.
4755 * @param[in] action_flags
4756 * Holds the actions detected until now.
4758 * Pointer to the modify action.
4759 * @param[in] item_flags
4760 * Holds the items detected.
4762 * Pointer to error structure.
4765 * 0 on success, a negative errno value otherwise and rte_errno is set.
4768 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
4769 const struct rte_flow_action *action,
4770 const uint64_t item_flags,
4771 struct rte_flow_error *error)
4776 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4778 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4779 MLX5_FLOW_LAYER_INNER_L4_TCP :
4780 MLX5_FLOW_LAYER_OUTER_L4_TCP;
4781 if (!(item_flags & layer))
4782 return rte_flow_error_set(error, EINVAL,
4783 RTE_FLOW_ERROR_TYPE_ACTION,
4784 NULL, "no TCP item in"
4786 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
4787 (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
4788 (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
4789 (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
4790 return rte_flow_error_set(error, EINVAL,
4791 RTE_FLOW_ERROR_TYPE_ACTION,
4793 "cannot decrease and increase"
4794 " TCP acknowledgment number"
4795 " at the same time");
4801 * Validate the modify-header TTL actions.
4803 * @param[in] action_flags
4804 * Holds the actions detected until now.
4806 * Pointer to the modify action.
4807 * @param[in] item_flags
4808 * Holds the items detected.
4810 * Pointer to error structure.
4813 * 0 on success, a negative errno value otherwise and rte_errno is set.
4816 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
4817 const struct rte_flow_action *action,
4818 const uint64_t item_flags,
4819 struct rte_flow_error *error)
4824 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4826 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4827 MLX5_FLOW_LAYER_INNER_L3 :
4828 MLX5_FLOW_LAYER_OUTER_L3;
4829 if (!(item_flags & layer))
4830 return rte_flow_error_set(error, EINVAL,
4831 RTE_FLOW_ERROR_TYPE_ACTION,
4833 "no IP protocol in pattern");
4839 * Validate the generic modify field actions.
4841 * Pointer to the rte_eth_dev structure.
4842 * @param[in] action_flags
4843 * Holds the actions detected until now.
4845 * Pointer to the modify action.
4847 * Pointer to the flow attributes.
4849 * Pointer to error structure.
4852 * Number of header fields to modify (0 or more) on success,
4853 * a negative errno value otherwise and rte_errno is set.
4856 flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
4857 const uint64_t action_flags,
4858 const struct rte_flow_action *action,
4859 const struct rte_flow_attr *attr,
4860 struct rte_flow_error *error)
4863 struct mlx5_priv *priv = dev->data->dev_private;
4864 struct mlx5_dev_config *config = &priv->config;
4865 const struct rte_flow_action_modify_field *action_modify_field =
4867 uint32_t dst_width = mlx5_flow_item_field_width(config,
4868 action_modify_field->dst.field);
4869 uint32_t src_width = mlx5_flow_item_field_width(config,
4870 action_modify_field->src.field);
4872 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4876 if (action_modify_field->width == 0)
4877 return rte_flow_error_set(error, EINVAL,
4878 RTE_FLOW_ERROR_TYPE_ACTION, action,
4879 "no bits are requested to be modified");
4880 else if (action_modify_field->width > dst_width ||
4881 action_modify_field->width > src_width)
4882 return rte_flow_error_set(error, EINVAL,
4883 RTE_FLOW_ERROR_TYPE_ACTION, action,
4884 "cannot modify more bits than"
4885 " the width of a field");
4886 if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE &&
4887 action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) {
4888 if ((action_modify_field->dst.offset +
4889 action_modify_field->width > dst_width) ||
4890 (action_modify_field->dst.offset % 32))
4891 return rte_flow_error_set(error, EINVAL,
4892 RTE_FLOW_ERROR_TYPE_ACTION, action,
4893 "destination offset is too big"
4894 " or not aligned to 4 bytes");
4895 if (action_modify_field->dst.level &&
4896 action_modify_field->dst.field != RTE_FLOW_FIELD_TAG)
4897 return rte_flow_error_set(error, ENOTSUP,
4898 RTE_FLOW_ERROR_TYPE_ACTION, action,
4899 "inner header fields modification"
4900 " is not supported");
4902 if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&
4903 action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {
4904 if (!attr->transfer && !attr->group)
4905 return rte_flow_error_set(error, ENOTSUP,
4906 RTE_FLOW_ERROR_TYPE_ACTION, action,
4907 "modify field action is not"
4908 " supported for group 0");
4909 if ((action_modify_field->src.offset +
4910 action_modify_field->width > src_width) ||
4911 (action_modify_field->src.offset % 32))
4912 return rte_flow_error_set(error, EINVAL,
4913 RTE_FLOW_ERROR_TYPE_ACTION, action,
4914 "source offset is too big"
4915 " or not aligned to 4 bytes");
4916 if (action_modify_field->src.level &&
4917 action_modify_field->src.field != RTE_FLOW_FIELD_TAG)
4918 return rte_flow_error_set(error, ENOTSUP,
4919 RTE_FLOW_ERROR_TYPE_ACTION, action,
4920 "inner header fields modification"
4921 " is not supported");
4923 if ((action_modify_field->dst.field ==
4924 action_modify_field->src.field) &&
4925 (action_modify_field->dst.level ==
4926 action_modify_field->src.level))
4927 return rte_flow_error_set(error, EINVAL,
4928 RTE_FLOW_ERROR_TYPE_ACTION, action,
4929 "source and destination fields"
4930 " cannot be the same");
4931 if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE ||
4932 action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER)
4933 return rte_flow_error_set(error, EINVAL,
4934 RTE_FLOW_ERROR_TYPE_ACTION, action,
4935 "immediate value or a pointer to it"
4936 " cannot be used as a destination");
4937 if (action_modify_field->dst.field == RTE_FLOW_FIELD_START ||
4938 action_modify_field->src.field == RTE_FLOW_FIELD_START)
4939 return rte_flow_error_set(error, ENOTSUP,
4940 RTE_FLOW_ERROR_TYPE_ACTION, action,
4941 "modifications of an arbitrary"
4942 " place in a packet is not supported");
4943 if (action_modify_field->dst.field == RTE_FLOW_FIELD_VLAN_TYPE ||
4944 action_modify_field->src.field == RTE_FLOW_FIELD_VLAN_TYPE)
4945 return rte_flow_error_set(error, ENOTSUP,
4946 RTE_FLOW_ERROR_TYPE_ACTION, action,
4947 "modifications of the 802.1Q Tag"
4948 " Identifier is not supported");
4949 if (action_modify_field->dst.field == RTE_FLOW_FIELD_VXLAN_VNI ||
4950 action_modify_field->src.field == RTE_FLOW_FIELD_VXLAN_VNI)
4951 return rte_flow_error_set(error, ENOTSUP,
4952 RTE_FLOW_ERROR_TYPE_ACTION, action,
4953 "modifications of the VXLAN Network"
4954 " Identifier is not supported");
4955 if (action_modify_field->dst.field == RTE_FLOW_FIELD_GENEVE_VNI ||
4956 action_modify_field->src.field == RTE_FLOW_FIELD_GENEVE_VNI)
4957 return rte_flow_error_set(error, ENOTSUP,
4958 RTE_FLOW_ERROR_TYPE_ACTION, action,
4959 "modifications of the GENEVE Network"
4960 " Identifier is not supported");
4961 if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK ||
4962 action_modify_field->src.field == RTE_FLOW_FIELD_MARK ||
4963 action_modify_field->dst.field == RTE_FLOW_FIELD_META ||
4964 action_modify_field->src.field == RTE_FLOW_FIELD_META) {
4965 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4966 !mlx5_flow_ext_mreg_supported(dev))
4967 return rte_flow_error_set(error, ENOTSUP,
4968 RTE_FLOW_ERROR_TYPE_ACTION, action,
4969 "cannot modify mark or metadata without"
4970 " extended metadata register support");
4972 if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
4973 return rte_flow_error_set(error, ENOTSUP,
4974 RTE_FLOW_ERROR_TYPE_ACTION, action,
4975 "add and sub operations"
4976 " are not supported");
4977 return (action_modify_field->width / 32) +
4978 !!(action_modify_field->width % 32);
4982 * Validate jump action.
4985 * Pointer to the jump action.
4986 * @param[in] action_flags
4987 * Holds the actions detected until now.
4988 * @param[in] attributes
4989 * Pointer to flow attributes
4990 * @param[in] external
4991 * Action belongs to flow rule created by request external to PMD.
4993 * Pointer to error structure.
4996 * 0 on success, a negative errno value otherwise and rte_errno is set.
4999 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
5000 const struct mlx5_flow_tunnel *tunnel,
5001 const struct rte_flow_action *action,
5002 uint64_t action_flags,
5003 const struct rte_flow_attr *attributes,
5004 bool external, struct rte_flow_error *error)
5006 uint32_t target_group, table;
5008 struct flow_grp_info grp_info = {
5009 .external = !!external,
5010 .transfer = !!attributes->transfer,
5014 if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
5015 MLX5_FLOW_FATE_ESWITCH_ACTIONS))
5016 return rte_flow_error_set(error, EINVAL,
5017 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5018 "can't have 2 fate actions in"
5021 return rte_flow_error_set(error, EINVAL,
5022 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
5023 NULL, "action configuration not set");
5025 ((const struct rte_flow_action_jump *)action->conf)->group;
5026 ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
5030 if (attributes->group == target_group &&
5031 !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
5032 MLX5_FLOW_ACTION_TUNNEL_MATCH)))
5033 return rte_flow_error_set(error, EINVAL,
5034 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5035 "target group must be other than"
5036 " the current flow group");
5041 * Validate the port_id action.
5044 * Pointer to rte_eth_dev structure.
5045 * @param[in] action_flags
5046 * Bit-fields that holds the actions detected until now.
5048 * Port_id RTE action structure.
5050 * Attributes of flow that includes this action.
5052 * Pointer to error structure.
5055 * 0 on success, a negative errno value otherwise and rte_errno is set.
5058 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
5059 uint64_t action_flags,
5060 const struct rte_flow_action *action,
5061 const struct rte_flow_attr *attr,
5062 struct rte_flow_error *error)
5064 const struct rte_flow_action_port_id *port_id;
5065 struct mlx5_priv *act_priv;
5066 struct mlx5_priv *dev_priv;
5069 if (!attr->transfer)
5070 return rte_flow_error_set(error, ENOTSUP,
5071 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5073 "port id action is valid in transfer"
5075 if (!action || !action->conf)
5076 return rte_flow_error_set(error, ENOTSUP,
5077 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
5079 "port id action parameters must be"
5081 if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
5082 MLX5_FLOW_FATE_ESWITCH_ACTIONS))
5083 return rte_flow_error_set(error, EINVAL,
5084 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5085 "can have only one fate actions in"
5087 dev_priv = mlx5_dev_to_eswitch_info(dev);
5089 return rte_flow_error_set(error, rte_errno,
5090 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5092 "failed to obtain E-Switch info");
5093 port_id = action->conf;
5094 port = port_id->original ? dev->data->port_id : port_id->id;
5095 act_priv = mlx5_port_to_eswitch_info(port, false);
5097 return rte_flow_error_set
5099 RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
5100 "failed to obtain E-Switch port id for port");
5101 if (act_priv->domain_id != dev_priv->domain_id)
5102 return rte_flow_error_set
5104 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5105 "port does not belong to"
5106 " E-Switch being configured");
5111 * Get the maximum number of modify header actions.
5114 * Pointer to rte_eth_dev structure.
5116 * Whether action is on root table.
5119 * Max number of modify header actions device can support.
5121 static inline unsigned int
5122 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
5126 * There's no way to directly query the max capacity from FW.
5127 * The maximal value on root table should be assumed to be supported.
5130 return MLX5_MAX_MODIFY_NUM;
5132 return MLX5_ROOT_TBL_MODIFY_NUM;
5136 * Validate the meter action.
5139 * Pointer to rte_eth_dev structure.
5140 * @param[in] action_flags
5141 * Bit-fields that holds the actions detected until now.
5143 * Pointer to the meter action.
5145 * Attributes of flow that includes this action.
5146 * @param[in] port_id_item
5147 * Pointer to item indicating port id.
5149 * Pointer to error structure.
5152 * 0 on success, a negative errno value otherwise and rte_ernno is set.
5155 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
5156 uint64_t action_flags,
5157 const struct rte_flow_action *action,
5158 const struct rte_flow_attr *attr,
5159 const struct rte_flow_item *port_id_item,
5161 struct rte_flow_error *error)
5163 struct mlx5_priv *priv = dev->data->dev_private;
5164 const struct rte_flow_action_meter *am = action->conf;
5165 struct mlx5_flow_meter_info *fm;
5166 struct mlx5_flow_meter_policy *mtr_policy;
5167 struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
5170 return rte_flow_error_set(error, EINVAL,
5171 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5172 "meter action conf is NULL");
5174 if (action_flags & MLX5_FLOW_ACTION_METER)
5175 return rte_flow_error_set(error, ENOTSUP,
5176 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5177 "meter chaining not support");
5178 if (action_flags & MLX5_FLOW_ACTION_JUMP)
5179 return rte_flow_error_set(error, ENOTSUP,
5180 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5181 "meter with jump not support");
5183 return rte_flow_error_set(error, ENOTSUP,
5184 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5186 "meter action not supported");
5187 fm = mlx5_flow_meter_find(priv, am->mtr_id, NULL);
5189 return rte_flow_error_set(error, EINVAL,
5190 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5192 /* aso meter can always be shared by different domains */
5193 if (fm->ref_cnt && !priv->sh->meter_aso_en &&
5194 !(fm->transfer == attr->transfer ||
5195 (!fm->ingress && !attr->ingress && attr->egress) ||
5196 (!fm->egress && !attr->egress && attr->ingress)))
5197 return rte_flow_error_set(error, EINVAL,
5198 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5199 "Flow attributes domain are either invalid "
5200 "or have a domain conflict with current "
5201 "meter attributes");
5202 if (fm->def_policy) {
5203 if (!((attr->transfer &&
5204 mtrmng->def_policy[MLX5_MTR_DOMAIN_TRANSFER]) ||
5206 mtrmng->def_policy[MLX5_MTR_DOMAIN_EGRESS]) ||
5208 mtrmng->def_policy[MLX5_MTR_DOMAIN_INGRESS])))
5209 return rte_flow_error_set(error, EINVAL,
5210 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5211 "Flow attributes domain "
5212 "have a conflict with current "
5213 "meter domain attributes");
5216 mtr_policy = mlx5_flow_meter_policy_find(dev,
5217 fm->policy_id, NULL);
5219 return rte_flow_error_set(error, EINVAL,
5220 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5221 "Invalid policy id for meter ");
5222 if (!((attr->transfer && mtr_policy->transfer) ||
5223 (attr->egress && mtr_policy->egress) ||
5224 (attr->ingress && mtr_policy->ingress)))
5225 return rte_flow_error_set(error, EINVAL,
5226 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5227 "Flow attributes domain "
5228 "have a conflict with current "
5229 "meter domain attributes");
5230 if (attr->transfer && mtr_policy->dev) {
5232 * When policy has fate action of port_id,
5233 * the flow should have the same src port as policy.
5235 struct mlx5_priv *policy_port_priv =
5236 mtr_policy->dev->data->dev_private;
5237 int32_t flow_src_port = priv->representor_id;
5240 const struct rte_flow_item_port_id *spec =
5242 struct mlx5_priv *port_priv =
5243 mlx5_port_to_eswitch_info(spec->id,
5246 return rte_flow_error_set(error,
5248 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5250 "Failed to get port info.");
5251 flow_src_port = port_priv->representor_id;
5253 if (flow_src_port != policy_port_priv->representor_id)
5254 return rte_flow_error_set(error,
5256 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5258 "Flow and meter policy "
5259 "have different src port.");
5261 *def_policy = false;
5267 * Validate the age action.
5269 * @param[in] action_flags
5270 * Holds the actions detected until now.
5272 * Pointer to the age action.
5274 * Pointer to the Ethernet device structure.
5276 * Pointer to error structure.
5279 * 0 on success, a negative errno value otherwise and rte_errno is set.
5282 flow_dv_validate_action_age(uint64_t action_flags,
5283 const struct rte_flow_action *action,
5284 struct rte_eth_dev *dev,
5285 struct rte_flow_error *error)
5287 struct mlx5_priv *priv = dev->data->dev_private;
5288 const struct rte_flow_action_age *age = action->conf;
5290 if (!priv->config.devx || (priv->sh->cmng.counter_fallback &&
5291 !priv->sh->aso_age_mng))
5292 return rte_flow_error_set(error, ENOTSUP,
5293 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5295 "age action not supported");
5296 if (!(action->conf))
5297 return rte_flow_error_set(error, EINVAL,
5298 RTE_FLOW_ERROR_TYPE_ACTION, action,
5299 "configuration cannot be null");
5300 if (!(age->timeout))
5301 return rte_flow_error_set(error, EINVAL,
5302 RTE_FLOW_ERROR_TYPE_ACTION, action,
5303 "invalid timeout value 0");
5304 if (action_flags & MLX5_FLOW_ACTION_AGE)
5305 return rte_flow_error_set(error, EINVAL,
5306 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5307 "duplicate age actions set");
5312 * Validate the modify-header IPv4 DSCP actions.
5314 * @param[in] action_flags
5315 * Holds the actions detected until now.
5317 * Pointer to the modify action.
5318 * @param[in] item_flags
5319 * Holds the items detected.
5321 * Pointer to error structure.
5324 * 0 on success, a negative errno value otherwise and rte_errno is set.
5327 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
5328 const struct rte_flow_action *action,
5329 const uint64_t item_flags,
5330 struct rte_flow_error *error)
5334 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5336 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
5337 return rte_flow_error_set(error, EINVAL,
5338 RTE_FLOW_ERROR_TYPE_ACTION,
5340 "no ipv4 item in pattern");
5346 * Validate the modify-header IPv6 DSCP actions.
5348 * @param[in] action_flags
5349 * Holds the actions detected until now.
5351 * Pointer to the modify action.
5352 * @param[in] item_flags
5353 * Holds the items detected.
5355 * Pointer to error structure.
5358 * 0 on success, a negative errno value otherwise and rte_errno is set.
5361 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
5362 const struct rte_flow_action *action,
5363 const uint64_t item_flags,
5364 struct rte_flow_error *error)
5368 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5370 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
5371 return rte_flow_error_set(error, EINVAL,
5372 RTE_FLOW_ERROR_TYPE_ACTION,
5374 "no ipv6 item in pattern");
5380 flow_dv_modify_match_cb(void *tool_ctx __rte_unused,
5381 struct mlx5_list_entry *entry, void *cb_ctx)
5383 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5384 struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5385 struct mlx5_flow_dv_modify_hdr_resource *resource =
5386 container_of(entry, typeof(*resource), entry);
5387 uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5389 key_len += ref->actions_num * sizeof(ref->actions[0]);
5390 return ref->actions_num != resource->actions_num ||
5391 memcmp(&ref->ft_type, &resource->ft_type, key_len);
5394 static struct mlx5_indexed_pool *
5395 flow_dv_modify_ipool_get(struct mlx5_dev_ctx_shared *sh, uint8_t index)
5397 struct mlx5_indexed_pool *ipool = __atomic_load_n
5398 (&sh->mdh_ipools[index], __ATOMIC_SEQ_CST);
5401 struct mlx5_indexed_pool *expected = NULL;
5402 struct mlx5_indexed_pool_config cfg =
5403 (struct mlx5_indexed_pool_config) {
5404 .size = sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
5406 sizeof(struct mlx5_modification_cmd),
5411 .release_mem_en = !!sh->reclaim_mode,
5412 .per_core_cache = sh->reclaim_mode ? 0 : (1 << 16),
5413 .malloc = mlx5_malloc,
5415 .type = "mlx5_modify_action_resource",
5418 cfg.size = RTE_ALIGN(cfg.size, sizeof(ipool));
5419 ipool = mlx5_ipool_create(&cfg);
5422 if (!__atomic_compare_exchange_n(&sh->mdh_ipools[index],
5423 &expected, ipool, false,
5425 __ATOMIC_SEQ_CST)) {
5426 mlx5_ipool_destroy(ipool);
5427 ipool = __atomic_load_n(&sh->mdh_ipools[index],
5434 struct mlx5_list_entry *
5435 flow_dv_modify_create_cb(void *tool_ctx, void *cb_ctx)
5437 struct mlx5_dev_ctx_shared *sh = tool_ctx;
5438 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5439 struct mlx5dv_dr_domain *ns;
5440 struct mlx5_flow_dv_modify_hdr_resource *entry;
5441 struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5442 struct mlx5_indexed_pool *ipool = flow_dv_modify_ipool_get(sh,
5443 ref->actions_num - 1);
5445 uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5446 uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5449 if (unlikely(!ipool)) {
5450 rte_flow_error_set(ctx->error, ENOMEM,
5451 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5452 NULL, "cannot allocate modify ipool");
5455 entry = mlx5_ipool_zmalloc(ipool, &idx);
5457 rte_flow_error_set(ctx->error, ENOMEM,
5458 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5459 "cannot allocate resource memory");
5462 rte_memcpy(&entry->ft_type,
5463 RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
5464 key_len + data_len);
5465 if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
5466 ns = sh->fdb_domain;
5467 else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
5471 ret = mlx5_flow_os_create_flow_action_modify_header
5472 (sh->ctx, ns, entry,
5473 data_len, &entry->action);
5475 mlx5_ipool_free(sh->mdh_ipools[ref->actions_num - 1], idx);
5476 rte_flow_error_set(ctx->error, ENOMEM,
5477 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5478 NULL, "cannot create modification action");
5482 return &entry->entry;
5485 struct mlx5_list_entry *
5486 flow_dv_modify_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
5489 struct mlx5_dev_ctx_shared *sh = tool_ctx;
5490 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5491 struct mlx5_flow_dv_modify_hdr_resource *entry;
5492 struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5493 uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5496 entry = mlx5_ipool_malloc(sh->mdh_ipools[ref->actions_num - 1],
5499 rte_flow_error_set(ctx->error, ENOMEM,
5500 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5501 "cannot allocate resource memory");
5504 memcpy(entry, oentry, sizeof(*entry) + data_len);
5506 return &entry->entry;
5510 flow_dv_modify_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
5512 struct mlx5_dev_ctx_shared *sh = tool_ctx;
5513 struct mlx5_flow_dv_modify_hdr_resource *res =
5514 container_of(entry, typeof(*res), entry);
5516 mlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);
5520 * Validate the sample action.
5522 * @param[in, out] action_flags
5523 * Holds the actions detected until now.
5525 * Pointer to the sample action.
5527 * Pointer to the Ethernet device structure.
5529 * Attributes of flow that includes this action.
5530 * @param[in] item_flags
5531 * Holds the items detected.
5533 * Pointer to the RSS action.
5534 * @param[out] sample_rss
5535 * Pointer to the RSS action in sample action list.
5537 * Pointer to the COUNT action in sample action list.
5538 * @param[out] fdb_mirror_limit
5539 * Pointer to the FDB mirror limitation flag.
5541 * Pointer to error structure.
5544 * 0 on success, a negative errno value otherwise and rte_errno is set.
5547 flow_dv_validate_action_sample(uint64_t *action_flags,
5548 const struct rte_flow_action *action,
5549 struct rte_eth_dev *dev,
5550 const struct rte_flow_attr *attr,
5551 uint64_t item_flags,
5552 const struct rte_flow_action_rss *rss,
5553 const struct rte_flow_action_rss **sample_rss,
5554 const struct rte_flow_action_count **count,
5555 int *fdb_mirror_limit,
5556 struct rte_flow_error *error)
5558 struct mlx5_priv *priv = dev->data->dev_private;
5559 struct mlx5_dev_config *dev_conf = &priv->config;
5560 const struct rte_flow_action_sample *sample = action->conf;
5561 const struct rte_flow_action *act;
5562 uint64_t sub_action_flags = 0;
5563 uint16_t queue_index = 0xFFFF;
5568 return rte_flow_error_set(error, EINVAL,
5569 RTE_FLOW_ERROR_TYPE_ACTION, action,
5570 "configuration cannot be NULL");
5571 if (sample->ratio == 0)
5572 return rte_flow_error_set(error, EINVAL,
5573 RTE_FLOW_ERROR_TYPE_ACTION, action,
5574 "ratio value starts from 1");
5575 if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
5576 return rte_flow_error_set(error, ENOTSUP,
5577 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5579 "sample action not supported");
5580 if (*action_flags & MLX5_FLOW_ACTION_SAMPLE)
5581 return rte_flow_error_set(error, EINVAL,
5582 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5583 "Multiple sample actions not "
5585 if (*action_flags & MLX5_FLOW_ACTION_METER)
5586 return rte_flow_error_set(error, EINVAL,
5587 RTE_FLOW_ERROR_TYPE_ACTION, action,
5588 "wrong action order, meter should "
5589 "be after sample action");
5590 if (*action_flags & MLX5_FLOW_ACTION_JUMP)
5591 return rte_flow_error_set(error, EINVAL,
5592 RTE_FLOW_ERROR_TYPE_ACTION, action,
5593 "wrong action order, jump should "
5594 "be after sample action");
5595 act = sample->actions;
5596 for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
5597 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5598 return rte_flow_error_set(error, ENOTSUP,
5599 RTE_FLOW_ERROR_TYPE_ACTION,
5600 act, "too many actions");
5601 switch (act->type) {
5602 case RTE_FLOW_ACTION_TYPE_QUEUE:
5603 ret = mlx5_flow_validate_action_queue(act,
5609 queue_index = ((const struct rte_flow_action_queue *)
5610 (act->conf))->index;
5611 sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
5614 case RTE_FLOW_ACTION_TYPE_RSS:
5615 *sample_rss = act->conf;
5616 ret = mlx5_flow_validate_action_rss(act,
5623 if (rss && *sample_rss &&
5624 ((*sample_rss)->level != rss->level ||
5625 (*sample_rss)->types != rss->types))
5626 return rte_flow_error_set(error, ENOTSUP,
5627 RTE_FLOW_ERROR_TYPE_ACTION,
5629 "Can't use the different RSS types "
5630 "or level in the same flow");
5631 if (*sample_rss != NULL && (*sample_rss)->queue_num)
5632 queue_index = (*sample_rss)->queue[0];
5633 sub_action_flags |= MLX5_FLOW_ACTION_RSS;
5636 case RTE_FLOW_ACTION_TYPE_MARK:
5637 ret = flow_dv_validate_action_mark(dev, act,
5642 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
5643 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
5644 MLX5_FLOW_ACTION_MARK_EXT;
5646 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
5649 case RTE_FLOW_ACTION_TYPE_COUNT:
5650 ret = flow_dv_validate_action_count
5651 (dev, is_shared_action_count(act),
5652 *action_flags | sub_action_flags,
5657 sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
5658 *action_flags |= MLX5_FLOW_ACTION_COUNT;
5661 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5662 ret = flow_dv_validate_action_port_id(dev,
5669 sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5672 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5673 ret = flow_dv_validate_action_raw_encap_decap
5674 (dev, NULL, act->conf, attr, &sub_action_flags,
5675 &actions_n, action, item_flags, error);
5680 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5681 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5682 ret = flow_dv_validate_action_l2_encap(dev,
5688 sub_action_flags |= MLX5_FLOW_ACTION_ENCAP;
5692 return rte_flow_error_set(error, ENOTSUP,
5693 RTE_FLOW_ERROR_TYPE_ACTION,
5695 "Doesn't support optional "
5699 if (attr->ingress && !attr->transfer) {
5700 if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
5701 MLX5_FLOW_ACTION_RSS)))
5702 return rte_flow_error_set(error, EINVAL,
5703 RTE_FLOW_ERROR_TYPE_ACTION,
5705 "Ingress must has a dest "
5706 "QUEUE for Sample");
5707 } else if (attr->egress && !attr->transfer) {
5708 return rte_flow_error_set(error, ENOTSUP,
5709 RTE_FLOW_ERROR_TYPE_ACTION,
5711 "Sample Only support Ingress "
5713 } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
5714 MLX5_ASSERT(attr->transfer);
5715 if (sample->ratio > 1)
5716 return rte_flow_error_set(error, ENOTSUP,
5717 RTE_FLOW_ERROR_TYPE_ACTION,
5719 "E-Switch doesn't support "
5720 "any optional action "
5722 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
5723 return rte_flow_error_set(error, ENOTSUP,
5724 RTE_FLOW_ERROR_TYPE_ACTION,
5726 "unsupported action QUEUE");
5727 if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
5728 return rte_flow_error_set(error, ENOTSUP,
5729 RTE_FLOW_ERROR_TYPE_ACTION,
5731 "unsupported action QUEUE");
5732 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
5733 return rte_flow_error_set(error, EINVAL,
5734 RTE_FLOW_ERROR_TYPE_ACTION,
5736 "E-Switch must has a dest "
5737 "port for mirroring");
5738 if (!priv->config.hca_attr.reg_c_preserve &&
5739 priv->representor_id != UINT16_MAX)
5740 *fdb_mirror_limit = 1;
5742 /* Continue validation for Xcap actions.*/
5743 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
5744 (queue_index == 0xFFFF ||
5745 mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
5746 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5747 MLX5_FLOW_XCAP_ACTIONS)
5748 return rte_flow_error_set(error, ENOTSUP,
5749 RTE_FLOW_ERROR_TYPE_ACTION,
5750 NULL, "encap and decap "
5751 "combination aren't "
5753 if (!attr->transfer && attr->ingress && (sub_action_flags &
5754 MLX5_FLOW_ACTION_ENCAP))
5755 return rte_flow_error_set(error, ENOTSUP,
5756 RTE_FLOW_ERROR_TYPE_ACTION,
5757 NULL, "encap is not supported"
5758 " for ingress traffic");
5764 * Find existing modify-header resource or create and register a new one.
5766 * @param dev[in, out]
5767 * Pointer to rte_eth_dev structure.
5768 * @param[in, out] resource
5769 * Pointer to modify-header resource.
5770 * @parm[in, out] dev_flow
5771 * Pointer to the dev_flow.
5773 * pointer to error structure.
5776 * 0 on success otherwise -errno and errno is set.
5779 flow_dv_modify_hdr_resource_register
5780 (struct rte_eth_dev *dev,
5781 struct mlx5_flow_dv_modify_hdr_resource *resource,
5782 struct mlx5_flow *dev_flow,
5783 struct rte_flow_error *error)
5785 struct mlx5_priv *priv = dev->data->dev_private;
5786 struct mlx5_dev_ctx_shared *sh = priv->sh;
5787 uint32_t key_len = sizeof(*resource) -
5788 offsetof(typeof(*resource), ft_type) +
5789 resource->actions_num * sizeof(resource->actions[0]);
5790 struct mlx5_list_entry *entry;
5791 struct mlx5_flow_cb_ctx ctx = {
5795 struct mlx5_hlist *modify_cmds;
5798 modify_cmds = flow_dv_hlist_prepare(sh, &sh->modify_cmds,
5800 MLX5_FLOW_HDR_MODIFY_HTABLE_SZ,
5802 flow_dv_modify_create_cb,
5803 flow_dv_modify_match_cb,
5804 flow_dv_modify_remove_cb,
5805 flow_dv_modify_clone_cb,
5806 flow_dv_modify_clone_free_cb);
5807 if (unlikely(!modify_cmds))
5809 resource->root = !dev_flow->dv.group;
5810 if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
5812 return rte_flow_error_set(error, EOVERFLOW,
5813 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5814 "too many modify header items");
5815 key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
5816 entry = mlx5_hlist_register(modify_cmds, key64, &ctx);
5819 resource = container_of(entry, typeof(*resource), entry);
5820 dev_flow->handle->dvh.modify_hdr = resource;
5825 * Get DV flow counter by index.
5828 * Pointer to the Ethernet device structure.
5830 * mlx5 flow counter index in the container.
5832 * mlx5 flow counter pool in the container.
5835 * Pointer to the counter, NULL otherwise.
5837 static struct mlx5_flow_counter *
5838 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
5840 struct mlx5_flow_counter_pool **ppool)
5842 struct mlx5_priv *priv = dev->data->dev_private;
5843 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5844 struct mlx5_flow_counter_pool *pool;
5846 /* Decrease to original index and clear shared bit. */
5847 idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
5848 MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
5849 pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
5853 return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
5857 * Check the devx counter belongs to the pool.
5860 * Pointer to the counter pool.
5862 * The counter devx ID.
5865 * True if counter belongs to the pool, false otherwise.
5868 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
5870 int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
5871 MLX5_COUNTERS_PER_POOL;
5873 if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
5879 * Get a pool by devx counter ID.
5882 * Pointer to the counter management.
5884 * The counter devx ID.
5887 * The counter pool pointer if exists, NULL otherwise,
5889 static struct mlx5_flow_counter_pool *
5890 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
5893 struct mlx5_flow_counter_pool *pool = NULL;
5895 rte_spinlock_lock(&cmng->pool_update_sl);
5896 /* Check last used pool. */
5897 if (cmng->last_pool_idx != POOL_IDX_INVALID &&
5898 flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
5899 pool = cmng->pools[cmng->last_pool_idx];
5902 /* ID out of range means no suitable pool in the container. */
5903 if (id > cmng->max_id || id < cmng->min_id)
5906 * Find the pool from the end of the container, since mostly counter
5907 * ID is sequence increasing, and the last pool should be the needed
5912 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
5914 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
5920 rte_spinlock_unlock(&cmng->pool_update_sl);
5925 * Resize a counter container.
5928 * Pointer to the Ethernet device structure.
5931 * 0 on success, otherwise negative errno value and rte_errno is set.
5934 flow_dv_container_resize(struct rte_eth_dev *dev)
5936 struct mlx5_priv *priv = dev->data->dev_private;
5937 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5938 void *old_pools = cmng->pools;
5939 uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
5940 uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
5941 void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
5948 memcpy(pools, old_pools, cmng->n *
5949 sizeof(struct mlx5_flow_counter_pool *));
5951 cmng->pools = pools;
5953 mlx5_free(old_pools);
5958 * Query a devx flow counter.
5961 * Pointer to the Ethernet device structure.
5962 * @param[in] counter
5963 * Index to the flow counter.
5965 * The statistics value of packets.
5967 * The statistics value of bytes.
5970 * 0 on success, otherwise a negative errno value and rte_errno is set.
5973 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
5976 struct mlx5_priv *priv = dev->data->dev_private;
5977 struct mlx5_flow_counter_pool *pool = NULL;
5978 struct mlx5_flow_counter *cnt;
5981 cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5983 if (priv->sh->cmng.counter_fallback)
5984 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
5985 0, pkts, bytes, 0, NULL, NULL, 0);
5986 rte_spinlock_lock(&pool->sl);
5991 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
5992 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
5993 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
5995 rte_spinlock_unlock(&pool->sl);
6000 * Create and initialize a new counter pool.
6003 * Pointer to the Ethernet device structure.
6005 * The devX counter handle.
6007 * Whether the pool is for counter that was allocated for aging.
6008 * @param[in/out] cont_cur
6009 * Pointer to the container pointer, it will be update in pool resize.
6012 * The pool container pointer on success, NULL otherwise and rte_errno is set.
6014 static struct mlx5_flow_counter_pool *
6015 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
6018 struct mlx5_priv *priv = dev->data->dev_private;
6019 struct mlx5_flow_counter_pool *pool;
6020 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6021 bool fallback = priv->sh->cmng.counter_fallback;
6022 uint32_t size = sizeof(*pool);
6024 size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
6025 size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
6026 pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
6032 pool->is_aged = !!age;
6033 pool->query_gen = 0;
6034 pool->min_dcs = dcs;
6035 rte_spinlock_init(&pool->sl);
6036 rte_spinlock_init(&pool->csl);
6037 TAILQ_INIT(&pool->counters[0]);
6038 TAILQ_INIT(&pool->counters[1]);
6039 pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
6040 rte_spinlock_lock(&cmng->pool_update_sl);
6041 pool->index = cmng->n_valid;
6042 if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
6044 rte_spinlock_unlock(&cmng->pool_update_sl);
6047 cmng->pools[pool->index] = pool;
6049 if (unlikely(fallback)) {
6050 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
6052 if (base < cmng->min_id)
6053 cmng->min_id = base;
6054 if (base > cmng->max_id)
6055 cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
6056 cmng->last_pool_idx = pool->index;
6058 rte_spinlock_unlock(&cmng->pool_update_sl);
6063 * Prepare a new counter and/or a new counter pool.
6066 * Pointer to the Ethernet device structure.
6067 * @param[out] cnt_free
6068 * Where to put the pointer of a new counter.
6070 * Whether the pool is for counter that was allocated for aging.
6073 * The counter pool pointer and @p cnt_free is set on success,
6074 * NULL otherwise and rte_errno is set.
6076 static struct mlx5_flow_counter_pool *
6077 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
6078 struct mlx5_flow_counter **cnt_free,
6081 struct mlx5_priv *priv = dev->data->dev_private;
6082 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6083 struct mlx5_flow_counter_pool *pool;
6084 struct mlx5_counters tmp_tq;
6085 struct mlx5_devx_obj *dcs = NULL;
6086 struct mlx5_flow_counter *cnt;
6087 enum mlx5_counter_type cnt_type =
6088 age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
6089 bool fallback = priv->sh->cmng.counter_fallback;
6093 /* bulk_bitmap must be 0 for single counter allocation. */
6094 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
6097 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
6099 pool = flow_dv_pool_create(dev, dcs, age);
6101 mlx5_devx_cmd_destroy(dcs);
6105 i = dcs->id % MLX5_COUNTERS_PER_POOL;
6106 cnt = MLX5_POOL_GET_CNT(pool, i);
6108 cnt->dcs_when_free = dcs;
6112 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
6114 rte_errno = ENODATA;
6117 pool = flow_dv_pool_create(dev, dcs, age);
6119 mlx5_devx_cmd_destroy(dcs);
6122 TAILQ_INIT(&tmp_tq);
6123 for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
6124 cnt = MLX5_POOL_GET_CNT(pool, i);
6126 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
6128 rte_spinlock_lock(&cmng->csl[cnt_type]);
6129 TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
6130 rte_spinlock_unlock(&cmng->csl[cnt_type]);
6131 *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
6132 (*cnt_free)->pool = pool;
6137 * Allocate a flow counter.
6140 * Pointer to the Ethernet device structure.
6142 * Whether the counter was allocated for aging.
6145 * Index to flow counter on success, 0 otherwise and rte_errno is set.
6148 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
6150 struct mlx5_priv *priv = dev->data->dev_private;
6151 struct mlx5_flow_counter_pool *pool = NULL;
6152 struct mlx5_flow_counter *cnt_free = NULL;
6153 bool fallback = priv->sh->cmng.counter_fallback;
6154 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6155 enum mlx5_counter_type cnt_type =
6156 age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
6159 if (!priv->config.devx) {
6160 rte_errno = ENOTSUP;
6163 /* Get free counters from container. */
6164 rte_spinlock_lock(&cmng->csl[cnt_type]);
6165 cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
6167 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
6168 rte_spinlock_unlock(&cmng->csl[cnt_type]);
6169 if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
6171 pool = cnt_free->pool;
6173 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
6174 /* Create a DV counter action only in the first time usage. */
6175 if (!cnt_free->action) {
6177 struct mlx5_devx_obj *dcs;
6181 offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
6182 dcs = pool->min_dcs;
6185 dcs = cnt_free->dcs_when_free;
6187 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
6194 cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
6195 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
6196 /* Update the counter reset values. */
6197 if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
6200 if (!fallback && !priv->sh->cmng.query_thread_on)
6201 /* Start the asynchronous batch query by the host thread. */
6202 mlx5_set_query_alarm(priv->sh);
6204 * When the count action isn't shared (by ID), shared_info field is
6205 * used for indirect action API's refcnt.
6206 * When the counter action is not shared neither by ID nor by indirect
6207 * action API, shared info must be 1.
6209 cnt_free->shared_info.refcnt = 1;
6213 cnt_free->pool = pool;
6215 cnt_free->dcs_when_free = cnt_free->dcs_when_active;
6216 rte_spinlock_lock(&cmng->csl[cnt_type]);
6217 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
6218 rte_spinlock_unlock(&cmng->csl[cnt_type]);
6224 * Allocate a shared flow counter.
6227 * Pointer to the shared counter configuration.
6229 * Pointer to save the allocated counter index.
6232 * Index to flow counter on success, 0 otherwise and rte_errno is set.
6236 flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
6238 struct mlx5_shared_counter_conf *conf = ctx;
6239 struct rte_eth_dev *dev = conf->dev;
6240 struct mlx5_flow_counter *cnt;
6242 data->dword = flow_dv_counter_alloc(dev, 0);
6243 data->dword |= MLX5_CNT_SHARED_OFFSET;
6244 cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
6245 cnt->shared_info.id = conf->id;
6250 * Get a shared flow counter.
6253 * Pointer to the Ethernet device structure.
6255 * Counter identifier.
6258 * Index to flow counter on success, 0 otherwise and rte_errno is set.
6261 flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
6263 struct mlx5_priv *priv = dev->data->dev_private;
6264 struct mlx5_shared_counter_conf conf = {
6268 union mlx5_l3t_data data = {
6272 mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
6273 flow_dv_counter_alloc_shared_cb, &conf);
6278 * Get age param from counter index.
6281 * Pointer to the Ethernet device structure.
6282 * @param[in] counter
6283 * Index to the counter handler.
6286 * The aging parameter specified for the counter index.
6288 static struct mlx5_age_param*
6289 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
6292 struct mlx5_flow_counter *cnt;
6293 struct mlx5_flow_counter_pool *pool = NULL;
6295 flow_dv_counter_get_by_idx(dev, counter, &pool);
6296 counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
6297 cnt = MLX5_POOL_GET_CNT(pool, counter);
6298 return MLX5_CNT_TO_AGE(cnt);
6302 * Remove a flow counter from aged counter list.
6305 * Pointer to the Ethernet device structure.
6306 * @param[in] counter
6307 * Index to the counter handler.
6309 * Pointer to the counter handler.
6312 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
6313 uint32_t counter, struct mlx5_flow_counter *cnt)
6315 struct mlx5_age_info *age_info;
6316 struct mlx5_age_param *age_param;
6317 struct mlx5_priv *priv = dev->data->dev_private;
6318 uint16_t expected = AGE_CANDIDATE;
6320 age_info = GET_PORT_AGE_INFO(priv);
6321 age_param = flow_dv_counter_idx_get_age(dev, counter);
6322 if (!__atomic_compare_exchange_n(&age_param->state, &expected,
6323 AGE_FREE, false, __ATOMIC_RELAXED,
6324 __ATOMIC_RELAXED)) {
6326 * We need the lock even it is age timeout,
6327 * since counter may still in process.
6329 rte_spinlock_lock(&age_info->aged_sl);
6330 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
6331 rte_spinlock_unlock(&age_info->aged_sl);
6332 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
6337 * Release a flow counter.
6340 * Pointer to the Ethernet device structure.
6341 * @param[in] counter
6342 * Index to the counter handler.
6345 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
6347 struct mlx5_priv *priv = dev->data->dev_private;
6348 struct mlx5_flow_counter_pool *pool = NULL;
6349 struct mlx5_flow_counter *cnt;
6350 enum mlx5_counter_type cnt_type;
6354 cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
6356 if (pool->is_aged) {
6357 flow_dv_counter_remove_from_age(dev, counter, cnt);
6360 * If the counter action is shared by ID, the l3t_clear_entry
6361 * function reduces its references counter. If after the
6362 * reduction the action is still referenced, the function
6363 * returns here and does not release it.
6365 if (IS_LEGACY_SHARED_CNT(counter) &&
6366 mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl,
6367 cnt->shared_info.id))
6370 * If the counter action is shared by indirect action API,
6371 * the atomic function reduces its references counter.
6372 * If after the reduction the action is still referenced, the
6373 * function returns here and does not release it.
6374 * When the counter action is not shared neither by ID nor by
6375 * indirect action API, shared info is 1 before the reduction,
6376 * so this condition is failed and function doesn't return here.
6378 if (!IS_LEGACY_SHARED_CNT(counter) &&
6379 __atomic_sub_fetch(&cnt->shared_info.refcnt, 1,
6385 * Put the counter back to list to be updated in none fallback mode.
6386 * Currently, we are using two list alternately, while one is in query,
6387 * add the freed counter to the other list based on the pool query_gen
6388 * value. After query finishes, add counter the list to the global
6389 * container counter list. The list changes while query starts. In
6390 * this case, lock will not be needed as query callback and release
6391 * function both operate with the different list.
6393 if (!priv->sh->cmng.counter_fallback) {
6394 rte_spinlock_lock(&pool->csl);
6395 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
6396 rte_spinlock_unlock(&pool->csl);
6398 cnt->dcs_when_free = cnt->dcs_when_active;
6399 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
6400 MLX5_COUNTER_TYPE_ORIGIN;
6401 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
6402 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
6404 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
6409 * Resize a meter id container.
6412 * Pointer to the Ethernet device structure.
6415 * 0 on success, otherwise negative errno value and rte_errno is set.
6418 flow_dv_mtr_container_resize(struct rte_eth_dev *dev)
6420 struct mlx5_priv *priv = dev->data->dev_private;
6421 struct mlx5_aso_mtr_pools_mng *pools_mng =
6422 &priv->sh->mtrmng->pools_mng;
6423 void *old_pools = pools_mng->pools;
6424 uint32_t resize = pools_mng->n + MLX5_MTRS_CONTAINER_RESIZE;
6425 uint32_t mem_size = sizeof(struct mlx5_aso_mtr_pool *) * resize;
6426 void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
6433 if (mlx5_aso_queue_init(priv->sh, ASO_OPC_MOD_POLICER)) {
6438 memcpy(pools, old_pools, pools_mng->n *
6439 sizeof(struct mlx5_aso_mtr_pool *));
6440 pools_mng->n = resize;
6441 pools_mng->pools = pools;
6443 mlx5_free(old_pools);
6448 * Prepare a new meter and/or a new meter pool.
6451 * Pointer to the Ethernet device structure.
6452 * @param[out] mtr_free
6453 * Where to put the pointer of a new meter.g.
6456 * The meter pool pointer and @mtr_free is set on success,
6457 * NULL otherwise and rte_errno is set.
6459 static struct mlx5_aso_mtr_pool *
6460 flow_dv_mtr_pool_create(struct rte_eth_dev *dev,
6461 struct mlx5_aso_mtr **mtr_free)
6463 struct mlx5_priv *priv = dev->data->dev_private;
6464 struct mlx5_aso_mtr_pools_mng *pools_mng =
6465 &priv->sh->mtrmng->pools_mng;
6466 struct mlx5_aso_mtr_pool *pool = NULL;
6467 struct mlx5_devx_obj *dcs = NULL;
6469 uint32_t log_obj_size;
6471 log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
6472 dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->ctx,
6473 priv->sh->pdn, log_obj_size);
6475 rte_errno = ENODATA;
6478 pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
6481 claim_zero(mlx5_devx_cmd_destroy(dcs));
6484 pool->devx_obj = dcs;
6485 pool->index = pools_mng->n_valid;
6486 if (pool->index == pools_mng->n && flow_dv_mtr_container_resize(dev)) {
6488 claim_zero(mlx5_devx_cmd_destroy(dcs));
6491 pools_mng->pools[pool->index] = pool;
6492 pools_mng->n_valid++;
6493 for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
6494 pool->mtrs[i].offset = i;
6495 LIST_INSERT_HEAD(&pools_mng->meters,
6496 &pool->mtrs[i], next);
6498 pool->mtrs[0].offset = 0;
6499 *mtr_free = &pool->mtrs[0];
6504 * Release a flow meter into pool.
6507 * Pointer to the Ethernet device structure.
6508 * @param[in] mtr_idx
6509 * Index to aso flow meter.
6512 flow_dv_aso_mtr_release_to_pool(struct rte_eth_dev *dev, uint32_t mtr_idx)
6514 struct mlx5_priv *priv = dev->data->dev_private;
6515 struct mlx5_aso_mtr_pools_mng *pools_mng =
6516 &priv->sh->mtrmng->pools_mng;
6517 struct mlx5_aso_mtr *aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_idx);
6519 MLX5_ASSERT(aso_mtr);
6520 rte_spinlock_lock(&pools_mng->mtrsl);
6521 memset(&aso_mtr->fm, 0, sizeof(struct mlx5_flow_meter_info));
6522 aso_mtr->state = ASO_METER_FREE;
6523 LIST_INSERT_HEAD(&pools_mng->meters, aso_mtr, next);
6524 rte_spinlock_unlock(&pools_mng->mtrsl);
6528 * Allocate a aso flow meter.
6531 * Pointer to the Ethernet device structure.
6534 * Index to aso flow meter on success, 0 otherwise and rte_errno is set.
6537 flow_dv_mtr_alloc(struct rte_eth_dev *dev)
6539 struct mlx5_priv *priv = dev->data->dev_private;
6540 struct mlx5_aso_mtr *mtr_free = NULL;
6541 struct mlx5_aso_mtr_pools_mng *pools_mng =
6542 &priv->sh->mtrmng->pools_mng;
6543 struct mlx5_aso_mtr_pool *pool;
6544 uint32_t mtr_idx = 0;
6546 if (!priv->config.devx) {
6547 rte_errno = ENOTSUP;
6550 /* Allocate the flow meter memory. */
6551 /* Get free meters from management. */
6552 rte_spinlock_lock(&pools_mng->mtrsl);
6553 mtr_free = LIST_FIRST(&pools_mng->meters);
6555 LIST_REMOVE(mtr_free, next);
6556 if (!mtr_free && !flow_dv_mtr_pool_create(dev, &mtr_free)) {
6557 rte_spinlock_unlock(&pools_mng->mtrsl);
6560 mtr_free->state = ASO_METER_WAIT;
6561 rte_spinlock_unlock(&pools_mng->mtrsl);
6562 pool = container_of(mtr_free,
6563 struct mlx5_aso_mtr_pool,
6564 mtrs[mtr_free->offset]);
6565 mtr_idx = MLX5_MAKE_MTR_IDX(pool->index, mtr_free->offset);
6566 if (!mtr_free->fm.meter_action) {
6567 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
6568 struct rte_flow_error error;
6571 reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &error);
6572 mtr_free->fm.meter_action =
6573 mlx5_glue->dv_create_flow_action_aso
6574 (priv->sh->rx_domain,
6575 pool->devx_obj->obj,
6577 (1 << MLX5_FLOW_COLOR_GREEN),
6579 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
6580 if (!mtr_free->fm.meter_action) {
6581 flow_dv_aso_mtr_release_to_pool(dev, mtr_idx);
6589 * Verify the @p attributes will be correctly understood by the NIC and store
6590 * them in the @p flow if everything is correct.
6593 * Pointer to dev struct.
6594 * @param[in] attributes
6595 * Pointer to flow attributes
6596 * @param[in] external
6597 * This flow rule is created by request external to PMD.
6599 * Pointer to error structure.
6602 * - 0 on success and non root table.
6603 * - 1 on success and root table.
6604 * - a negative errno value otherwise and rte_errno is set.
6607 flow_dv_validate_attributes(struct rte_eth_dev *dev,
6608 const struct mlx5_flow_tunnel *tunnel,
6609 const struct rte_flow_attr *attributes,
6610 const struct flow_grp_info *grp_info,
6611 struct rte_flow_error *error)
6613 struct mlx5_priv *priv = dev->data->dev_private;
6614 uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes);
6617 #ifndef HAVE_MLX5DV_DR
6618 RTE_SET_USED(tunnel);
6619 RTE_SET_USED(grp_info);
6620 if (attributes->group)
6621 return rte_flow_error_set(error, ENOTSUP,
6622 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
6624 "groups are not supported");
6628 ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
6633 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
6635 if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
6636 attributes->priority > lowest_priority)
6637 return rte_flow_error_set(error, ENOTSUP,
6638 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
6640 "priority out of range");
6641 if (attributes->transfer) {
6642 if (!priv->config.dv_esw_en)
6643 return rte_flow_error_set
6645 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6646 "E-Switch dr is not supported");
6647 if (!(priv->representor || priv->master))
6648 return rte_flow_error_set
6649 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6650 NULL, "E-Switch configuration can only be"
6651 " done by a master or a representor device");
6652 if (attributes->egress)
6653 return rte_flow_error_set
6655 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
6656 "egress is not supported");
6658 if (!(attributes->egress ^ attributes->ingress))
6659 return rte_flow_error_set(error, ENOTSUP,
6660 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6661 "must specify exactly one of "
6662 "ingress or egress");
6667 mlx5_flow_locate_proto_l3(const struct rte_flow_item **head,
6668 const struct rte_flow_item *end)
6670 const struct rte_flow_item *item = *head;
6671 uint16_t l3_protocol;
6673 for (; item != end; item++) {
6674 switch (item->type) {
6677 case RTE_FLOW_ITEM_TYPE_IPV4:
6678 l3_protocol = RTE_ETHER_TYPE_IPV4;
6680 case RTE_FLOW_ITEM_TYPE_IPV6:
6681 l3_protocol = RTE_ETHER_TYPE_IPV6;
6683 case RTE_FLOW_ITEM_TYPE_ETH:
6684 if (item->mask && item->spec) {
6685 MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_eth,
6688 if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
6689 l3_protocol == RTE_ETHER_TYPE_IPV6)
6693 case RTE_FLOW_ITEM_TYPE_VLAN:
6694 if (item->mask && item->spec) {
6695 MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_vlan,
6698 if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
6699 l3_protocol == RTE_ETHER_TYPE_IPV6)
6712 mlx5_flow_locate_proto_l4(const struct rte_flow_item **head,
6713 const struct rte_flow_item *end)
6715 const struct rte_flow_item *item = *head;
6716 uint8_t l4_protocol;
6718 for (; item != end; item++) {
6719 switch (item->type) {
6722 case RTE_FLOW_ITEM_TYPE_TCP:
6723 l4_protocol = IPPROTO_TCP;
6725 case RTE_FLOW_ITEM_TYPE_UDP:
6726 l4_protocol = IPPROTO_UDP;
6728 case RTE_FLOW_ITEM_TYPE_IPV4:
6729 if (item->mask && item->spec) {
6730 const struct rte_flow_item_ipv4 *mask, *spec;
6732 mask = (typeof(mask))item->mask;
6733 spec = (typeof(spec))item->spec;
6734 l4_protocol = mask->hdr.next_proto_id &
6735 spec->hdr.next_proto_id;
6736 if (l4_protocol == IPPROTO_TCP ||
6737 l4_protocol == IPPROTO_UDP)
6741 case RTE_FLOW_ITEM_TYPE_IPV6:
6742 if (item->mask && item->spec) {
6743 const struct rte_flow_item_ipv6 *mask, *spec;
6744 mask = (typeof(mask))item->mask;
6745 spec = (typeof(spec))item->spec;
6746 l4_protocol = mask->hdr.proto & spec->hdr.proto;
6747 if (l4_protocol == IPPROTO_TCP ||
6748 l4_protocol == IPPROTO_UDP)
6761 flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
6762 const struct rte_flow_item *rule_items,
6763 const struct rte_flow_item *integrity_item,
6764 struct rte_flow_error *error)
6766 struct mlx5_priv *priv = dev->data->dev_private;
6767 const struct rte_flow_item *tunnel_item, *end_item, *item = rule_items;
6768 const struct rte_flow_item_integrity *mask = (typeof(mask))
6769 integrity_item->mask;
6770 const struct rte_flow_item_integrity *spec = (typeof(spec))
6771 integrity_item->spec;
6774 if (!priv->config.hca_attr.pkt_integrity_match)
6775 return rte_flow_error_set(error, ENOTSUP,
6776 RTE_FLOW_ERROR_TYPE_ITEM,
6778 "packet integrity integrity_item not supported");
6780 mask = &rte_flow_item_integrity_mask;
6781 if (!mlx5_validate_integrity_item(mask))
6782 return rte_flow_error_set(error, ENOTSUP,
6783 RTE_FLOW_ERROR_TYPE_ITEM,
6785 "unsupported integrity filter");
6786 tunnel_item = mlx5_flow_find_tunnel_item(rule_items);
6787 if (spec->level > 1) {
6789 return rte_flow_error_set(error, ENOTSUP,
6790 RTE_FLOW_ERROR_TYPE_ITEM,
6792 "missing tunnel item");
6794 end_item = mlx5_find_end_item(tunnel_item);
6796 end_item = tunnel_item ? tunnel_item :
6797 mlx5_find_end_item(integrity_item);
6799 if (mask->l3_ok || mask->ipv4_csum_ok) {
6800 protocol = mlx5_flow_locate_proto_l3(&item, end_item);
6802 return rte_flow_error_set(error, EINVAL,
6803 RTE_FLOW_ERROR_TYPE_ITEM,
6805 "missing L3 protocol");
6807 if (mask->l4_ok || mask->l4_csum_ok) {
6808 protocol = mlx5_flow_locate_proto_l4(&item, end_item);
6810 return rte_flow_error_set(error, EINVAL,
6811 RTE_FLOW_ERROR_TYPE_ITEM,
6813 "missing L4 protocol");
6819 * Internal validation function. For validating both actions and items.
6822 * Pointer to the rte_eth_dev structure.
6824 * Pointer to the flow attributes.
6826 * Pointer to the list of items.
6827 * @param[in] actions
6828 * Pointer to the list of actions.
6829 * @param[in] external
6830 * This flow rule is created by request external to PMD.
6831 * @param[in] hairpin
6832 * Number of hairpin TX actions, 0 means classic flow.
6834 * Pointer to the error structure.
6837 * 0 on success, a negative errno value otherwise and rte_errno is set.
6840 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
6841 const struct rte_flow_item items[],
6842 const struct rte_flow_action actions[],
6843 bool external, int hairpin, struct rte_flow_error *error)
6846 uint64_t action_flags = 0;
6847 uint64_t item_flags = 0;
6848 uint64_t last_item = 0;
6849 uint8_t next_protocol = 0xff;
6850 uint16_t ether_type = 0;
6852 uint8_t item_ipv6_proto = 0;
6853 int fdb_mirror_limit = 0;
6854 int modify_after_mirror = 0;
6855 const struct rte_flow_item *geneve_item = NULL;
6856 const struct rte_flow_item *gre_item = NULL;
6857 const struct rte_flow_item *gtp_item = NULL;
6858 const struct rte_flow_action_raw_decap *decap;
6859 const struct rte_flow_action_raw_encap *encap;
6860 const struct rte_flow_action_rss *rss = NULL;
6861 const struct rte_flow_action_rss *sample_rss = NULL;
6862 const struct rte_flow_action_count *sample_count = NULL;
6863 const struct rte_flow_item_tcp nic_tcp_mask = {
6866 .src_port = RTE_BE16(UINT16_MAX),
6867 .dst_port = RTE_BE16(UINT16_MAX),
6870 const struct rte_flow_item_ipv6 nic_ipv6_mask = {
6873 "\xff\xff\xff\xff\xff\xff\xff\xff"
6874 "\xff\xff\xff\xff\xff\xff\xff\xff",
6876 "\xff\xff\xff\xff\xff\xff\xff\xff"
6877 "\xff\xff\xff\xff\xff\xff\xff\xff",
6878 .vtc_flow = RTE_BE32(0xffffffff),
6884 const struct rte_flow_item_ecpri nic_ecpri_mask = {
6888 RTE_BE32(((const struct rte_ecpri_common_hdr) {
6892 .dummy[0] = 0xffffffff,
6895 struct mlx5_priv *priv = dev->data->dev_private;
6896 struct mlx5_dev_config *dev_conf = &priv->config;
6897 uint16_t queue_index = 0xFFFF;
6898 const struct rte_flow_item_vlan *vlan_m = NULL;
6899 uint32_t rw_act_num = 0;
6901 const struct mlx5_flow_tunnel *tunnel;
6902 enum mlx5_tof_rule_type tof_rule_type;
6903 struct flow_grp_info grp_info = {
6904 .external = !!external,
6905 .transfer = !!attr->transfer,
6906 .fdb_def_rule = !!priv->fdb_def_rule,
6907 .std_tbl_fix = true,
6909 const struct rte_eth_hairpin_conf *conf;
6910 const struct rte_flow_item *rule_items = items;
6911 const struct rte_flow_item *port_id_item = NULL;
6912 bool def_policy = false;
6916 tunnel = is_tunnel_offload_active(dev) ?
6917 mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
6919 if (priv->representor)
6920 return rte_flow_error_set
6922 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6923 NULL, "decap not supported for VF representor");
6924 if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE)
6925 action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6926 else if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE)
6927 action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
6928 MLX5_FLOW_ACTION_DECAP;
6929 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
6930 (dev, attr, tunnel, tof_rule_type);
6932 ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
6935 is_root = (uint64_t)ret;
6936 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
6937 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
6938 int type = items->type;
6940 if (!mlx5_flow_os_item_supported(type))
6941 return rte_flow_error_set(error, ENOTSUP,
6942 RTE_FLOW_ERROR_TYPE_ITEM,
6943 NULL, "item not supported");
6945 case RTE_FLOW_ITEM_TYPE_VOID:
6947 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6948 ret = flow_dv_validate_item_port_id
6949 (dev, items, attr, item_flags, error);
6952 last_item = MLX5_FLOW_ITEM_PORT_ID;
6953 port_id_item = items;
6955 case RTE_FLOW_ITEM_TYPE_ETH:
6956 ret = mlx5_flow_validate_item_eth(items, item_flags,
6960 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
6961 MLX5_FLOW_LAYER_OUTER_L2;
6962 if (items->mask != NULL && items->spec != NULL) {
6964 ((const struct rte_flow_item_eth *)
6967 ((const struct rte_flow_item_eth *)
6969 ether_type = rte_be_to_cpu_16(ether_type);
6974 case RTE_FLOW_ITEM_TYPE_VLAN:
6975 ret = flow_dv_validate_item_vlan(items, item_flags,
6979 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
6980 MLX5_FLOW_LAYER_OUTER_VLAN;
6981 if (items->mask != NULL && items->spec != NULL) {
6983 ((const struct rte_flow_item_vlan *)
6984 items->spec)->inner_type;
6986 ((const struct rte_flow_item_vlan *)
6987 items->mask)->inner_type;
6988 ether_type = rte_be_to_cpu_16(ether_type);
6992 /* Store outer VLAN mask for of_push_vlan action. */
6994 vlan_m = items->mask;
6996 case RTE_FLOW_ITEM_TYPE_IPV4:
6997 mlx5_flow_tunnel_ip_check(items, next_protocol,
6998 &item_flags, &tunnel);
6999 ret = flow_dv_validate_item_ipv4(items, item_flags,
7000 last_item, ether_type,
7004 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
7005 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
7006 if (items->mask != NULL &&
7007 ((const struct rte_flow_item_ipv4 *)
7008 items->mask)->hdr.next_proto_id) {
7010 ((const struct rte_flow_item_ipv4 *)
7011 (items->spec))->hdr.next_proto_id;
7013 ((const struct rte_flow_item_ipv4 *)
7014 (items->mask))->hdr.next_proto_id;
7016 /* Reset for inner layer. */
7017 next_protocol = 0xff;
7020 case RTE_FLOW_ITEM_TYPE_IPV6:
7021 mlx5_flow_tunnel_ip_check(items, next_protocol,
7022 &item_flags, &tunnel);
7023 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
7030 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
7031 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
7032 if (items->mask != NULL &&
7033 ((const struct rte_flow_item_ipv6 *)
7034 items->mask)->hdr.proto) {
7036 ((const struct rte_flow_item_ipv6 *)
7037 items->spec)->hdr.proto;
7039 ((const struct rte_flow_item_ipv6 *)
7040 items->spec)->hdr.proto;
7042 ((const struct rte_flow_item_ipv6 *)
7043 items->mask)->hdr.proto;
7045 /* Reset for inner layer. */
7046 next_protocol = 0xff;
7049 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
7050 ret = flow_dv_validate_item_ipv6_frag_ext(items,
7055 last_item = tunnel ?
7056 MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
7057 MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
7058 if (items->mask != NULL &&
7059 ((const struct rte_flow_item_ipv6_frag_ext *)
7060 items->mask)->hdr.next_header) {
7062 ((const struct rte_flow_item_ipv6_frag_ext *)
7063 items->spec)->hdr.next_header;
7065 ((const struct rte_flow_item_ipv6_frag_ext *)
7066 items->mask)->hdr.next_header;
7068 /* Reset for inner layer. */
7069 next_protocol = 0xff;
7072 case RTE_FLOW_ITEM_TYPE_TCP:
7073 ret = mlx5_flow_validate_item_tcp
7080 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
7081 MLX5_FLOW_LAYER_OUTER_L4_TCP;
7083 case RTE_FLOW_ITEM_TYPE_UDP:
7084 ret = mlx5_flow_validate_item_udp(items, item_flags,
7089 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
7090 MLX5_FLOW_LAYER_OUTER_L4_UDP;
7092 case RTE_FLOW_ITEM_TYPE_GRE:
7093 ret = mlx5_flow_validate_item_gre(items, item_flags,
7094 next_protocol, error);
7098 last_item = MLX5_FLOW_LAYER_GRE;
7100 case RTE_FLOW_ITEM_TYPE_NVGRE:
7101 ret = mlx5_flow_validate_item_nvgre(items, item_flags,
7106 last_item = MLX5_FLOW_LAYER_NVGRE;
7108 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
7109 ret = mlx5_flow_validate_item_gre_key
7110 (items, item_flags, gre_item, error);
7113 last_item = MLX5_FLOW_LAYER_GRE_KEY;
7115 case RTE_FLOW_ITEM_TYPE_VXLAN:
7116 ret = mlx5_flow_validate_item_vxlan(dev, items,
7121 last_item = MLX5_FLOW_LAYER_VXLAN;
7123 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
7124 ret = mlx5_flow_validate_item_vxlan_gpe(items,
7129 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
7131 case RTE_FLOW_ITEM_TYPE_GENEVE:
7132 ret = mlx5_flow_validate_item_geneve(items,
7137 geneve_item = items;
7138 last_item = MLX5_FLOW_LAYER_GENEVE;
7140 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
7141 ret = mlx5_flow_validate_item_geneve_opt(items,
7148 last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
7150 case RTE_FLOW_ITEM_TYPE_MPLS:
7151 ret = mlx5_flow_validate_item_mpls(dev, items,
7156 last_item = MLX5_FLOW_LAYER_MPLS;
7159 case RTE_FLOW_ITEM_TYPE_MARK:
7160 ret = flow_dv_validate_item_mark(dev, items, attr,
7164 last_item = MLX5_FLOW_ITEM_MARK;
7166 case RTE_FLOW_ITEM_TYPE_META:
7167 ret = flow_dv_validate_item_meta(dev, items, attr,
7171 last_item = MLX5_FLOW_ITEM_METADATA;
7173 case RTE_FLOW_ITEM_TYPE_ICMP:
7174 ret = mlx5_flow_validate_item_icmp(items, item_flags,
7179 last_item = MLX5_FLOW_LAYER_ICMP;
7181 case RTE_FLOW_ITEM_TYPE_ICMP6:
7182 ret = mlx5_flow_validate_item_icmp6(items, item_flags,
7187 item_ipv6_proto = IPPROTO_ICMPV6;
7188 last_item = MLX5_FLOW_LAYER_ICMP6;
7190 case RTE_FLOW_ITEM_TYPE_TAG:
7191 ret = flow_dv_validate_item_tag(dev, items,
7195 last_item = MLX5_FLOW_ITEM_TAG;
7197 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
7198 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
7200 case RTE_FLOW_ITEM_TYPE_GTP:
7201 ret = flow_dv_validate_item_gtp(dev, items, item_flags,
7206 last_item = MLX5_FLOW_LAYER_GTP;
7208 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
7209 ret = flow_dv_validate_item_gtp_psc(items, last_item,
7214 last_item = MLX5_FLOW_LAYER_GTP_PSC;
7216 case RTE_FLOW_ITEM_TYPE_ECPRI:
7217 /* Capacity will be checked in the translate stage. */
7218 ret = mlx5_flow_validate_item_ecpri(items, item_flags,
7225 last_item = MLX5_FLOW_LAYER_ECPRI;
7227 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
7228 if (item_flags & MLX5_FLOW_ITEM_INTEGRITY)
7229 return rte_flow_error_set
7231 RTE_FLOW_ERROR_TYPE_ITEM,
7232 NULL, "multiple integrity items not supported");
7233 ret = flow_dv_validate_item_integrity(dev, rule_items,
7237 last_item = MLX5_FLOW_ITEM_INTEGRITY;
7239 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
7240 ret = flow_dv_validate_item_aso_ct(dev, items,
7241 &item_flags, error);
7245 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
7246 /* tunnel offload item was processed before
7247 * list it here as a supported type
7251 return rte_flow_error_set(error, ENOTSUP,
7252 RTE_FLOW_ERROR_TYPE_ITEM,
7253 NULL, "item not supported");
7255 item_flags |= last_item;
7257 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
7258 int type = actions->type;
7259 bool shared_count = false;
7261 if (!mlx5_flow_os_action_supported(type))
7262 return rte_flow_error_set(error, ENOTSUP,
7263 RTE_FLOW_ERROR_TYPE_ACTION,
7265 "action not supported");
7266 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
7267 return rte_flow_error_set(error, ENOTSUP,
7268 RTE_FLOW_ERROR_TYPE_ACTION,
7269 actions, "too many actions");
7271 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
7272 return rte_flow_error_set(error, ENOTSUP,
7273 RTE_FLOW_ERROR_TYPE_ACTION,
7274 NULL, "meter action with policy "
7275 "must be the last action");
7277 case RTE_FLOW_ACTION_TYPE_VOID:
7279 case RTE_FLOW_ACTION_TYPE_PORT_ID:
7280 ret = flow_dv_validate_action_port_id(dev,
7287 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
7290 case RTE_FLOW_ACTION_TYPE_FLAG:
7291 ret = flow_dv_validate_action_flag(dev, action_flags,
7295 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7296 /* Count all modify-header actions as one. */
7297 if (!(action_flags &
7298 MLX5_FLOW_MODIFY_HDR_ACTIONS))
7300 action_flags |= MLX5_FLOW_ACTION_FLAG |
7301 MLX5_FLOW_ACTION_MARK_EXT;
7302 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7303 modify_after_mirror = 1;
7306 action_flags |= MLX5_FLOW_ACTION_FLAG;
7309 rw_act_num += MLX5_ACT_NUM_SET_MARK;
7311 case RTE_FLOW_ACTION_TYPE_MARK:
7312 ret = flow_dv_validate_action_mark(dev, actions,
7317 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7318 /* Count all modify-header actions as one. */
7319 if (!(action_flags &
7320 MLX5_FLOW_MODIFY_HDR_ACTIONS))
7322 action_flags |= MLX5_FLOW_ACTION_MARK |
7323 MLX5_FLOW_ACTION_MARK_EXT;
7324 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7325 modify_after_mirror = 1;
7327 action_flags |= MLX5_FLOW_ACTION_MARK;
7330 rw_act_num += MLX5_ACT_NUM_SET_MARK;
7332 case RTE_FLOW_ACTION_TYPE_SET_META:
7333 ret = flow_dv_validate_action_set_meta(dev, actions,
7338 /* Count all modify-header actions as one action. */
7339 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7341 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7342 modify_after_mirror = 1;
7343 action_flags |= MLX5_FLOW_ACTION_SET_META;
7344 rw_act_num += MLX5_ACT_NUM_SET_META;
7346 case RTE_FLOW_ACTION_TYPE_SET_TAG:
7347 ret = flow_dv_validate_action_set_tag(dev, actions,
7352 /* Count all modify-header actions as one action. */
7353 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7355 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7356 modify_after_mirror = 1;
7357 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7358 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7360 case RTE_FLOW_ACTION_TYPE_DROP:
7361 ret = mlx5_flow_validate_action_drop(action_flags,
7365 action_flags |= MLX5_FLOW_ACTION_DROP;
7368 case RTE_FLOW_ACTION_TYPE_QUEUE:
7369 ret = mlx5_flow_validate_action_queue(actions,
7374 queue_index = ((const struct rte_flow_action_queue *)
7375 (actions->conf))->index;
7376 action_flags |= MLX5_FLOW_ACTION_QUEUE;
7379 case RTE_FLOW_ACTION_TYPE_RSS:
7380 rss = actions->conf;
7381 ret = mlx5_flow_validate_action_rss(actions,
7387 if (rss && sample_rss &&
7388 (sample_rss->level != rss->level ||
7389 sample_rss->types != rss->types))
7390 return rte_flow_error_set(error, ENOTSUP,
7391 RTE_FLOW_ERROR_TYPE_ACTION,
7393 "Can't use the different RSS types "
7394 "or level in the same flow");
7395 if (rss != NULL && rss->queue_num)
7396 queue_index = rss->queue[0];
7397 action_flags |= MLX5_FLOW_ACTION_RSS;
7400 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
7402 mlx5_flow_validate_action_default_miss(action_flags,
7406 action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
7409 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
7410 case RTE_FLOW_ACTION_TYPE_COUNT:
7411 shared_count = is_shared_action_count(actions);
7412 ret = flow_dv_validate_action_count(dev, shared_count,
7417 action_flags |= MLX5_FLOW_ACTION_COUNT;
7420 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
7421 if (flow_dv_validate_action_pop_vlan(dev,
7427 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7428 modify_after_mirror = 1;
7429 action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
7432 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7433 ret = flow_dv_validate_action_push_vlan(dev,
7440 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7441 modify_after_mirror = 1;
7442 action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
7445 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
7446 ret = flow_dv_validate_action_set_vlan_pcp
7447 (action_flags, actions, error);
7450 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7451 modify_after_mirror = 1;
7452 /* Count PCP with push_vlan command. */
7453 action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
7455 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7456 ret = flow_dv_validate_action_set_vlan_vid
7457 (item_flags, action_flags,
7461 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7462 modify_after_mirror = 1;
7463 /* Count VID with push_vlan command. */
7464 action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
7465 rw_act_num += MLX5_ACT_NUM_MDF_VID;
7467 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7468 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7469 ret = flow_dv_validate_action_l2_encap(dev,
7475 action_flags |= MLX5_FLOW_ACTION_ENCAP;
7478 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7479 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7480 ret = flow_dv_validate_action_decap(dev, action_flags,
7481 actions, item_flags,
7485 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7486 modify_after_mirror = 1;
7487 action_flags |= MLX5_FLOW_ACTION_DECAP;
7490 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7491 ret = flow_dv_validate_action_raw_encap_decap
7492 (dev, NULL, actions->conf, attr, &action_flags,
7493 &actions_n, actions, item_flags, error);
7497 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7498 decap = actions->conf;
7499 while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
7501 if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7505 encap = actions->conf;
7507 ret = flow_dv_validate_action_raw_encap_decap
7509 decap ? decap : &empty_decap, encap,
7510 attr, &action_flags, &actions_n,
7511 actions, item_flags, error);
7514 if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7515 (action_flags & MLX5_FLOW_ACTION_DECAP))
7516 modify_after_mirror = 1;
7518 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
7519 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
7520 ret = flow_dv_validate_action_modify_mac(action_flags,
7526 /* Count all modify-header actions as one action. */
7527 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7529 action_flags |= actions->type ==
7530 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
7531 MLX5_FLOW_ACTION_SET_MAC_SRC :
7532 MLX5_FLOW_ACTION_SET_MAC_DST;
7533 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7534 modify_after_mirror = 1;
7536 * Even if the source and destination MAC addresses have
7537 * overlap in the header with 4B alignment, the convert
7538 * function will handle them separately and 4 SW actions
7539 * will be created. And 2 actions will be added each
7540 * time no matter how many bytes of address will be set.
7542 rw_act_num += MLX5_ACT_NUM_MDF_MAC;
7544 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
7545 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
7546 ret = flow_dv_validate_action_modify_ipv4(action_flags,
7552 /* Count all modify-header actions as one action. */
7553 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7555 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7556 modify_after_mirror = 1;
7557 action_flags |= actions->type ==
7558 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
7559 MLX5_FLOW_ACTION_SET_IPV4_SRC :
7560 MLX5_FLOW_ACTION_SET_IPV4_DST;
7561 rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
7563 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
7564 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
7565 ret = flow_dv_validate_action_modify_ipv6(action_flags,
7571 if (item_ipv6_proto == IPPROTO_ICMPV6)
7572 return rte_flow_error_set(error, ENOTSUP,
7573 RTE_FLOW_ERROR_TYPE_ACTION,
7575 "Can't change header "
7576 "with ICMPv6 proto");
7577 /* Count all modify-header actions as one action. */
7578 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7580 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7581 modify_after_mirror = 1;
7582 action_flags |= actions->type ==
7583 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
7584 MLX5_FLOW_ACTION_SET_IPV6_SRC :
7585 MLX5_FLOW_ACTION_SET_IPV6_DST;
7586 rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
7588 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
7589 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
7590 ret = flow_dv_validate_action_modify_tp(action_flags,
7596 /* Count all modify-header actions as one action. */
7597 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7599 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7600 modify_after_mirror = 1;
7601 action_flags |= actions->type ==
7602 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
7603 MLX5_FLOW_ACTION_SET_TP_SRC :
7604 MLX5_FLOW_ACTION_SET_TP_DST;
7605 rw_act_num += MLX5_ACT_NUM_MDF_PORT;
7607 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
7608 case RTE_FLOW_ACTION_TYPE_SET_TTL:
7609 ret = flow_dv_validate_action_modify_ttl(action_flags,
7615 /* Count all modify-header actions as one action. */
7616 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7618 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7619 modify_after_mirror = 1;
7620 action_flags |= actions->type ==
7621 RTE_FLOW_ACTION_TYPE_SET_TTL ?
7622 MLX5_FLOW_ACTION_SET_TTL :
7623 MLX5_FLOW_ACTION_DEC_TTL;
7624 rw_act_num += MLX5_ACT_NUM_MDF_TTL;
7626 case RTE_FLOW_ACTION_TYPE_JUMP:
7627 ret = flow_dv_validate_action_jump(dev, tunnel, actions,
7633 if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7635 return rte_flow_error_set(error, EINVAL,
7636 RTE_FLOW_ERROR_TYPE_ACTION,
7638 "sample and jump action combination is not supported");
7640 action_flags |= MLX5_FLOW_ACTION_JUMP;
7642 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
7643 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
7644 ret = flow_dv_validate_action_modify_tcp_seq
7651 /* Count all modify-header actions as one action. */
7652 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7654 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7655 modify_after_mirror = 1;
7656 action_flags |= actions->type ==
7657 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
7658 MLX5_FLOW_ACTION_INC_TCP_SEQ :
7659 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
7660 rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
7662 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
7663 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
7664 ret = flow_dv_validate_action_modify_tcp_ack
7671 /* Count all modify-header actions as one action. */
7672 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7674 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7675 modify_after_mirror = 1;
7676 action_flags |= actions->type ==
7677 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
7678 MLX5_FLOW_ACTION_INC_TCP_ACK :
7679 MLX5_FLOW_ACTION_DEC_TCP_ACK;
7680 rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
7682 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
7684 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
7685 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
7686 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7688 case RTE_FLOW_ACTION_TYPE_METER:
7689 ret = mlx5_flow_validate_action_meter(dev,
7697 action_flags |= MLX5_FLOW_ACTION_METER;
7700 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
7702 /* Meter action will add one more TAG action. */
7703 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7705 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
7706 if (!attr->transfer && !attr->group)
7707 return rte_flow_error_set(error, ENOTSUP,
7708 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7710 "Shared ASO age action is not supported for group 0");
7711 if (action_flags & MLX5_FLOW_ACTION_AGE)
7712 return rte_flow_error_set
7714 RTE_FLOW_ERROR_TYPE_ACTION,
7716 "duplicate age actions set");
7717 action_flags |= MLX5_FLOW_ACTION_AGE;
7720 case RTE_FLOW_ACTION_TYPE_AGE:
7721 ret = flow_dv_validate_action_age(action_flags,
7727 * Validate the regular AGE action (using counter)
7728 * mutual exclusion with share counter actions.
7730 if (!priv->sh->flow_hit_aso_en) {
7732 return rte_flow_error_set
7734 RTE_FLOW_ERROR_TYPE_ACTION,
7736 "old age and shared count combination is not supported");
7738 return rte_flow_error_set
7740 RTE_FLOW_ERROR_TYPE_ACTION,
7742 "old age action and count must be in the same sub flow");
7744 action_flags |= MLX5_FLOW_ACTION_AGE;
7747 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
7748 ret = flow_dv_validate_action_modify_ipv4_dscp
7755 /* Count all modify-header actions as one action. */
7756 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7758 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7759 modify_after_mirror = 1;
7760 action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
7761 rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7763 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
7764 ret = flow_dv_validate_action_modify_ipv6_dscp
7771 /* Count all modify-header actions as one action. */
7772 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7774 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7775 modify_after_mirror = 1;
7776 action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
7777 rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7779 case RTE_FLOW_ACTION_TYPE_SAMPLE:
7780 ret = flow_dv_validate_action_sample(&action_flags,
7789 action_flags |= MLX5_FLOW_ACTION_SAMPLE;
7792 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7793 ret = flow_dv_validate_action_modify_field(dev,
7800 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7801 modify_after_mirror = 1;
7802 /* Count all modify-header actions as one action. */
7803 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7805 action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7808 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
7809 ret = flow_dv_validate_action_aso_ct(dev, action_flags,
7814 action_flags |= MLX5_FLOW_ACTION_CT;
7816 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
7817 /* tunnel offload action was processed before
7818 * list it here as a supported type
7822 return rte_flow_error_set(error, ENOTSUP,
7823 RTE_FLOW_ERROR_TYPE_ACTION,
7825 "action not supported");
7829 * Validate actions in flow rules
7830 * - Explicit decap action is prohibited by the tunnel offload API.
7831 * - Drop action in tunnel steer rule is prohibited by the API.
7832 * - Application cannot use MARK action because it's value can mask
7833 * tunnel default miss nitification.
7834 * - JUMP in tunnel match rule has no support in current PMD
7836 * - TAG & META are reserved for future uses.
7838 if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
7839 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP |
7840 MLX5_FLOW_ACTION_MARK |
7841 MLX5_FLOW_ACTION_SET_TAG |
7842 MLX5_FLOW_ACTION_SET_META |
7843 MLX5_FLOW_ACTION_DROP;
7845 if (action_flags & bad_actions_mask)
7846 return rte_flow_error_set
7848 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7849 "Invalid RTE action in tunnel "
7851 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
7852 return rte_flow_error_set
7854 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7855 "tunnel set decap rule must terminate "
7858 return rte_flow_error_set
7860 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7861 "tunnel flows for ingress traffic only");
7863 if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
7864 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP |
7865 MLX5_FLOW_ACTION_MARK |
7866 MLX5_FLOW_ACTION_SET_TAG |
7867 MLX5_FLOW_ACTION_SET_META;
7869 if (action_flags & bad_actions_mask)
7870 return rte_flow_error_set
7872 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7873 "Invalid RTE action in tunnel "
7877 * Validate the drop action mutual exclusion with other actions.
7878 * Drop action is mutually-exclusive with any other action, except for
7880 * Drop action compatibility with tunnel offload was already validated.
7882 if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH |
7883 MLX5_FLOW_ACTION_TUNNEL_MATCH));
7884 else if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
7885 (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
7886 return rte_flow_error_set(error, EINVAL,
7887 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7888 "Drop action is mutually-exclusive "
7889 "with any other action, except for "
7891 /* Eswitch has few restrictions on using items and actions */
7892 if (attr->transfer) {
7893 if (!mlx5_flow_ext_mreg_supported(dev) &&
7894 action_flags & MLX5_FLOW_ACTION_FLAG)
7895 return rte_flow_error_set(error, ENOTSUP,
7896 RTE_FLOW_ERROR_TYPE_ACTION,
7898 "unsupported action FLAG");
7899 if (!mlx5_flow_ext_mreg_supported(dev) &&
7900 action_flags & MLX5_FLOW_ACTION_MARK)
7901 return rte_flow_error_set(error, ENOTSUP,
7902 RTE_FLOW_ERROR_TYPE_ACTION,
7904 "unsupported action MARK");
7905 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
7906 return rte_flow_error_set(error, ENOTSUP,
7907 RTE_FLOW_ERROR_TYPE_ACTION,
7909 "unsupported action QUEUE");
7910 if (action_flags & MLX5_FLOW_ACTION_RSS)
7911 return rte_flow_error_set(error, ENOTSUP,
7912 RTE_FLOW_ERROR_TYPE_ACTION,
7914 "unsupported action RSS");
7915 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
7916 return rte_flow_error_set(error, EINVAL,
7917 RTE_FLOW_ERROR_TYPE_ACTION,
7919 "no fate action is found");
7921 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
7922 return rte_flow_error_set(error, EINVAL,
7923 RTE_FLOW_ERROR_TYPE_ACTION,
7925 "no fate action is found");
7928 * Continue validation for Xcap and VLAN actions.
7929 * If hairpin is working in explicit TX rule mode, there is no actions
7930 * splitting and the validation of hairpin ingress flow should be the
7931 * same as other standard flows.
7933 if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
7934 MLX5_FLOW_VLAN_ACTIONS)) &&
7935 (queue_index == 0xFFFF ||
7936 mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
7937 ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
7938 conf->tx_explicit != 0))) {
7939 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
7940 MLX5_FLOW_XCAP_ACTIONS)
7941 return rte_flow_error_set(error, ENOTSUP,
7942 RTE_FLOW_ERROR_TYPE_ACTION,
7943 NULL, "encap and decap "
7944 "combination aren't supported");
7945 if (!attr->transfer && attr->ingress) {
7946 if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7947 return rte_flow_error_set
7949 RTE_FLOW_ERROR_TYPE_ACTION,
7950 NULL, "encap is not supported"
7951 " for ingress traffic");
7952 else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7953 return rte_flow_error_set
7955 RTE_FLOW_ERROR_TYPE_ACTION,
7956 NULL, "push VLAN action not "
7957 "supported for ingress");
7958 else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
7959 MLX5_FLOW_VLAN_ACTIONS)
7960 return rte_flow_error_set
7962 RTE_FLOW_ERROR_TYPE_ACTION,
7963 NULL, "no support for "
7964 "multiple VLAN actions");
7967 if (action_flags & MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY) {
7968 if ((action_flags & (MLX5_FLOW_FATE_ACTIONS &
7969 ~MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)) &&
7971 return rte_flow_error_set
7973 RTE_FLOW_ERROR_TYPE_ACTION,
7974 NULL, "fate action not supported for "
7975 "meter with policy");
7977 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
7978 return rte_flow_error_set
7980 RTE_FLOW_ERROR_TYPE_ACTION,
7981 NULL, "modify header action in egress "
7982 "cannot be done before meter action");
7983 if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7984 return rte_flow_error_set
7986 RTE_FLOW_ERROR_TYPE_ACTION,
7987 NULL, "encap action in egress "
7988 "cannot be done before meter action");
7989 if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7990 return rte_flow_error_set
7992 RTE_FLOW_ERROR_TYPE_ACTION,
7993 NULL, "push vlan action in egress "
7994 "cannot be done before meter action");
7998 * Hairpin flow will add one more TAG action in TX implicit mode.
7999 * In TX explicit mode, there will be no hairpin flow ID.
8002 rw_act_num += MLX5_ACT_NUM_SET_TAG;
8003 /* extra metadata enabled: one more TAG action will be add. */
8004 if (dev_conf->dv_flow_en &&
8005 dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
8006 mlx5_flow_ext_mreg_supported(dev))
8007 rw_act_num += MLX5_ACT_NUM_SET_TAG;
8009 flow_dv_modify_hdr_action_max(dev, is_root)) {
8010 return rte_flow_error_set(error, ENOTSUP,
8011 RTE_FLOW_ERROR_TYPE_ACTION,
8012 NULL, "too many header modify"
8013 " actions to support");
8015 /* Eswitch egress mirror and modify flow has limitation on CX5 */
8016 if (fdb_mirror_limit && modify_after_mirror)
8017 return rte_flow_error_set(error, EINVAL,
8018 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
8019 "sample before modify action is not supported");
8024 * Internal preparation function. Allocates the DV flow size,
8025 * this size is constant.
8028 * Pointer to the rte_eth_dev structure.
8030 * Pointer to the flow attributes.
8032 * Pointer to the list of items.
8033 * @param[in] actions
8034 * Pointer to the list of actions.
8036 * Pointer to the error structure.
8039 * Pointer to mlx5_flow object on success,
8040 * otherwise NULL and rte_errno is set.
8042 static struct mlx5_flow *
8043 flow_dv_prepare(struct rte_eth_dev *dev,
8044 const struct rte_flow_attr *attr __rte_unused,
8045 const struct rte_flow_item items[] __rte_unused,
8046 const struct rte_flow_action actions[] __rte_unused,
8047 struct rte_flow_error *error)
8049 uint32_t handle_idx = 0;
8050 struct mlx5_flow *dev_flow;
8051 struct mlx5_flow_handle *dev_handle;
8052 struct mlx5_priv *priv = dev->data->dev_private;
8053 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
8056 wks->skip_matcher_reg = 0;
8058 wks->final_policy = NULL;
8059 /* In case of corrupting the memory. */
8060 if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
8061 rte_flow_error_set(error, ENOSPC,
8062 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8063 "not free temporary device flow");
8066 dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
8069 rte_flow_error_set(error, ENOMEM,
8070 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8071 "not enough memory to create flow handle");
8074 MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
8075 dev_flow = &wks->flows[wks->flow_idx++];
8076 memset(dev_flow, 0, sizeof(*dev_flow));
8077 dev_flow->handle = dev_handle;
8078 dev_flow->handle_idx = handle_idx;
8079 dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
8080 dev_flow->ingress = attr->ingress;
8081 dev_flow->dv.transfer = attr->transfer;
8085 #ifdef RTE_LIBRTE_MLX5_DEBUG
8087 * Sanity check for match mask and value. Similar to check_valid_spec() in
8088 * kernel driver. If unmasked bit is present in value, it returns failure.
8091 * pointer to match mask buffer.
8092 * @param match_value
8093 * pointer to match value buffer.
8096 * 0 if valid, -EINVAL otherwise.
8099 flow_dv_check_valid_spec(void *match_mask, void *match_value)
8101 uint8_t *m = match_mask;
8102 uint8_t *v = match_value;
8105 for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
8108 "match_value differs from match_criteria"
8109 " %p[%u] != %p[%u]",
8110 match_value, i, match_mask, i);
8119 * Add match of ip_version.
8123 * @param[in] headers_v
8124 * Values header pointer.
8125 * @param[in] headers_m
8126 * Masks header pointer.
8127 * @param[in] ip_version
8128 * The IP version to set.
8131 flow_dv_set_match_ip_version(uint32_t group,
8137 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
8139 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
8141 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
8142 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
8143 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
8147 * Add Ethernet item to matcher and to the value.
8149 * @param[in, out] matcher
8151 * @param[in, out] key
8152 * Flow matcher value.
8154 * Flow pattern to translate.
8156 * Item is inner pattern.
8159 flow_dv_translate_item_eth(void *matcher, void *key,
8160 const struct rte_flow_item *item, int inner,
8163 const struct rte_flow_item_eth *eth_m = item->mask;
8164 const struct rte_flow_item_eth *eth_v = item->spec;
8165 const struct rte_flow_item_eth nic_mask = {
8166 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
8167 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
8168 .type = RTE_BE16(0xffff),
8181 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8183 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8185 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8187 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8189 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
8190 ð_m->dst, sizeof(eth_m->dst));
8191 /* The value must be in the range of the mask. */
8192 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
8193 for (i = 0; i < sizeof(eth_m->dst); ++i)
8194 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
8195 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
8196 ð_m->src, sizeof(eth_m->src));
8197 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
8198 /* The value must be in the range of the mask. */
8199 for (i = 0; i < sizeof(eth_m->dst); ++i)
8200 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
8202 * HW supports match on one Ethertype, the Ethertype following the last
8203 * VLAN tag of the packet (see PRM).
8204 * Set match on ethertype only if ETH header is not followed by VLAN.
8205 * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8206 * ethertype, and use ip_version field instead.
8207 * eCPRI over Ether layer will use type value 0xAEFE.
8209 if (eth_m->type == 0xFFFF) {
8210 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
8211 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8212 switch (eth_v->type) {
8213 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8214 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8216 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
8217 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8218 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8220 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8221 flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8223 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8224 flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8230 if (eth_m->has_vlan) {
8231 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8232 if (eth_v->has_vlan) {
8234 * Here, when also has_more_vlan field in VLAN item is
8235 * not set, only single-tagged packets will be matched.
8237 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8241 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8242 rte_be_to_cpu_16(eth_m->type));
8243 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
8244 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
8248 * Add VLAN item to matcher and to the value.
8250 * @param[in, out] dev_flow
8252 * @param[in, out] matcher
8254 * @param[in, out] key
8255 * Flow matcher value.
8257 * Flow pattern to translate.
8259 * Item is inner pattern.
8262 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
8263 void *matcher, void *key,
8264 const struct rte_flow_item *item,
8265 int inner, uint32_t group)
8267 const struct rte_flow_item_vlan *vlan_m = item->mask;
8268 const struct rte_flow_item_vlan *vlan_v = item->spec;
8275 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8277 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8279 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8281 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8283 * This is workaround, masks are not supported,
8284 * and pre-validated.
8287 dev_flow->handle->vf_vlan.tag =
8288 rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
8291 * When VLAN item exists in flow, mark packet as tagged,
8292 * even if TCI is not specified.
8294 if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
8295 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8296 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8301 vlan_m = &rte_flow_item_vlan_mask;
8302 tci_m = rte_be_to_cpu_16(vlan_m->tci);
8303 tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
8304 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
8305 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
8306 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
8307 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
8308 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
8309 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
8311 * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8312 * ethertype, and use ip_version field instead.
8314 if (vlan_m->inner_type == 0xFFFF) {
8315 switch (vlan_v->inner_type) {
8316 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8317 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8318 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8319 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8321 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8322 flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8324 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8325 flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8331 if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
8332 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8333 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8334 /* Only one vlan_tag bit can be set. */
8335 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8338 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8339 rte_be_to_cpu_16(vlan_m->inner_type));
8340 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
8341 rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
8345 * Add IPV4 item to matcher and to the value.
8347 * @param[in, out] matcher
8349 * @param[in, out] key
8350 * Flow matcher value.
8352 * Flow pattern to translate.
8354 * Item is inner pattern.
8356 * The group to insert the rule.
8359 flow_dv_translate_item_ipv4(void *matcher, void *key,
8360 const struct rte_flow_item *item,
8361 int inner, uint32_t group)
8363 const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
8364 const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
8365 const struct rte_flow_item_ipv4 nic_mask = {
8367 .src_addr = RTE_BE32(0xffffffff),
8368 .dst_addr = RTE_BE32(0xffffffff),
8369 .type_of_service = 0xff,
8370 .next_proto_id = 0xff,
8371 .time_to_live = 0xff,
8381 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8383 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8385 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8387 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8389 flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
8394 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8395 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8396 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8397 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8398 *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
8399 *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
8400 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8401 src_ipv4_src_ipv6.ipv4_layout.ipv4);
8402 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8403 src_ipv4_src_ipv6.ipv4_layout.ipv4);
8404 *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
8405 *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
8406 tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
8407 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
8408 ipv4_m->hdr.type_of_service);
8409 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
8410 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
8411 ipv4_m->hdr.type_of_service >> 2);
8412 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
8413 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8414 ipv4_m->hdr.next_proto_id);
8415 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8416 ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
8417 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8418 ipv4_m->hdr.time_to_live);
8419 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8420 ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
8421 MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8422 !!(ipv4_m->hdr.fragment_offset));
8423 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8424 !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
8428 * Add IPV6 item to matcher and to the value.
8430 * @param[in, out] matcher
8432 * @param[in, out] key
8433 * Flow matcher value.
8435 * Flow pattern to translate.
8437 * Item is inner pattern.
8439 * The group to insert the rule.
8442 flow_dv_translate_item_ipv6(void *matcher, void *key,
8443 const struct rte_flow_item *item,
8444 int inner, uint32_t group)
8446 const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
8447 const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
8448 const struct rte_flow_item_ipv6 nic_mask = {
8451 "\xff\xff\xff\xff\xff\xff\xff\xff"
8452 "\xff\xff\xff\xff\xff\xff\xff\xff",
8454 "\xff\xff\xff\xff\xff\xff\xff\xff"
8455 "\xff\xff\xff\xff\xff\xff\xff\xff",
8456 .vtc_flow = RTE_BE32(0xffffffff),
8463 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8464 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8473 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8475 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8477 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8479 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8481 flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
8486 size = sizeof(ipv6_m->hdr.dst_addr);
8487 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8488 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8489 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8490 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8491 memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
8492 for (i = 0; i < size; ++i)
8493 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
8494 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8495 src_ipv4_src_ipv6.ipv6_layout.ipv6);
8496 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8497 src_ipv4_src_ipv6.ipv6_layout.ipv6);
8498 memcpy(l24_m, ipv6_m->hdr.src_addr, size);
8499 for (i = 0; i < size; ++i)
8500 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
8502 vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
8503 vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
8504 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
8505 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
8506 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
8507 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
8510 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
8512 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
8515 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
8517 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
8521 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8523 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8524 ipv6_v->hdr.proto & ipv6_m->hdr.proto);
8526 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8527 ipv6_m->hdr.hop_limits);
8528 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8529 ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
8530 MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8531 !!(ipv6_m->has_frag_ext));
8532 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8533 !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
8537 * Add IPV6 fragment extension item to matcher and to the value.
8539 * @param[in, out] matcher
8541 * @param[in, out] key
8542 * Flow matcher value.
8544 * Flow pattern to translate.
8546 * Item is inner pattern.
8549 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
8550 const struct rte_flow_item *item,
8553 const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
8554 const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
8555 const struct rte_flow_item_ipv6_frag_ext nic_mask = {
8557 .next_header = 0xff,
8558 .frag_data = RTE_BE16(0xffff),
8565 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8567 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8569 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8571 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8573 /* IPv6 fragment extension item exists, so packet is IP fragment. */
8574 MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
8575 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
8576 if (!ipv6_frag_ext_v)
8578 if (!ipv6_frag_ext_m)
8579 ipv6_frag_ext_m = &nic_mask;
8580 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8581 ipv6_frag_ext_m->hdr.next_header);
8582 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8583 ipv6_frag_ext_v->hdr.next_header &
8584 ipv6_frag_ext_m->hdr.next_header);
8588 * Add TCP item to matcher and to the value.
8590 * @param[in, out] matcher
8592 * @param[in, out] key
8593 * Flow matcher value.
8595 * Flow pattern to translate.
8597 * Item is inner pattern.
8600 flow_dv_translate_item_tcp(void *matcher, void *key,
8601 const struct rte_flow_item *item,
8604 const struct rte_flow_item_tcp *tcp_m = item->mask;
8605 const struct rte_flow_item_tcp *tcp_v = item->spec;
8610 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8612 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8614 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8616 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8618 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8619 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
8623 tcp_m = &rte_flow_item_tcp_mask;
8624 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
8625 rte_be_to_cpu_16(tcp_m->hdr.src_port));
8626 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
8627 rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
8628 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
8629 rte_be_to_cpu_16(tcp_m->hdr.dst_port));
8630 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
8631 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
8632 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
8633 tcp_m->hdr.tcp_flags);
8634 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
8635 (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
8639 * Add UDP item to matcher and to the value.
8641 * @param[in, out] matcher
8643 * @param[in, out] key
8644 * Flow matcher value.
8646 * Flow pattern to translate.
8648 * Item is inner pattern.
8651 flow_dv_translate_item_udp(void *matcher, void *key,
8652 const struct rte_flow_item *item,
8655 const struct rte_flow_item_udp *udp_m = item->mask;
8656 const struct rte_flow_item_udp *udp_v = item->spec;
8661 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8663 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8665 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8667 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8669 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8670 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
8674 udp_m = &rte_flow_item_udp_mask;
8675 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
8676 rte_be_to_cpu_16(udp_m->hdr.src_port));
8677 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
8678 rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
8679 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
8680 rte_be_to_cpu_16(udp_m->hdr.dst_port));
8681 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
8682 rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
8686 * Add GRE optional Key item to matcher and to the value.
8688 * @param[in, out] matcher
8690 * @param[in, out] key
8691 * Flow matcher value.
8693 * Flow pattern to translate.
8695 * Item is inner pattern.
8698 flow_dv_translate_item_gre_key(void *matcher, void *key,
8699 const struct rte_flow_item *item)
8701 const rte_be32_t *key_m = item->mask;
8702 const rte_be32_t *key_v = item->spec;
8703 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8704 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8705 rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
8707 /* GRE K bit must be on and should already be validated */
8708 MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
8709 MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
8713 key_m = &gre_key_default_mask;
8714 MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
8715 rte_be_to_cpu_32(*key_m) >> 8);
8716 MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
8717 rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
8718 MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
8719 rte_be_to_cpu_32(*key_m) & 0xFF);
8720 MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
8721 rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
8725 * Add GRE item to matcher and to the value.
8727 * @param[in, out] matcher
8729 * @param[in, out] key
8730 * Flow matcher value.
8732 * Flow pattern to translate.
8734 * Item is inner pattern.
8737 flow_dv_translate_item_gre(void *matcher, void *key,
8738 const struct rte_flow_item *item,
8741 const struct rte_flow_item_gre *gre_m = item->mask;
8742 const struct rte_flow_item_gre *gre_v = item->spec;
8745 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8746 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8753 uint16_t s_present:1;
8754 uint16_t k_present:1;
8755 uint16_t rsvd_bit1:1;
8756 uint16_t c_present:1;
8760 } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
8763 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8765 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8767 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8769 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8771 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8772 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
8776 gre_m = &rte_flow_item_gre_mask;
8777 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
8778 rte_be_to_cpu_16(gre_m->protocol));
8779 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
8780 rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
8781 gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
8782 gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
8783 MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
8784 gre_crks_rsvd0_ver_m.c_present);
8785 MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
8786 gre_crks_rsvd0_ver_v.c_present &
8787 gre_crks_rsvd0_ver_m.c_present);
8788 MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
8789 gre_crks_rsvd0_ver_m.k_present);
8790 MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
8791 gre_crks_rsvd0_ver_v.k_present &
8792 gre_crks_rsvd0_ver_m.k_present);
8793 MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
8794 gre_crks_rsvd0_ver_m.s_present);
8795 MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
8796 gre_crks_rsvd0_ver_v.s_present &
8797 gre_crks_rsvd0_ver_m.s_present);
8801 * Add NVGRE item to matcher and to the value.
8803 * @param[in, out] matcher
8805 * @param[in, out] key
8806 * Flow matcher value.
8808 * Flow pattern to translate.
8810 * Item is inner pattern.
8813 flow_dv_translate_item_nvgre(void *matcher, void *key,
8814 const struct rte_flow_item *item,
8817 const struct rte_flow_item_nvgre *nvgre_m = item->mask;
8818 const struct rte_flow_item_nvgre *nvgre_v = item->spec;
8819 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8820 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8821 const char *tni_flow_id_m;
8822 const char *tni_flow_id_v;
8828 /* For NVGRE, GRE header fields must be set with defined values. */
8829 const struct rte_flow_item_gre gre_spec = {
8830 .c_rsvd0_ver = RTE_BE16(0x2000),
8831 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
8833 const struct rte_flow_item_gre gre_mask = {
8834 .c_rsvd0_ver = RTE_BE16(0xB000),
8835 .protocol = RTE_BE16(UINT16_MAX),
8837 const struct rte_flow_item gre_item = {
8842 flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
8846 nvgre_m = &rte_flow_item_nvgre_mask;
8847 tni_flow_id_m = (const char *)nvgre_m->tni;
8848 tni_flow_id_v = (const char *)nvgre_v->tni;
8849 size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
8850 gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
8851 gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
8852 memcpy(gre_key_m, tni_flow_id_m, size);
8853 for (i = 0; i < size; ++i)
8854 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
8858 * Add VXLAN item to matcher and to the value.
8861 * Pointer to the Ethernet device structure.
8863 * Flow rule attributes.
8864 * @param[in, out] matcher
8866 * @param[in, out] key
8867 * Flow matcher value.
8869 * Flow pattern to translate.
8871 * Item is inner pattern.
8874 flow_dv_translate_item_vxlan(struct rte_eth_dev *dev,
8875 const struct rte_flow_attr *attr,
8876 void *matcher, void *key,
8877 const struct rte_flow_item *item,
8880 const struct rte_flow_item_vxlan *vxlan_m = item->mask;
8881 const struct rte_flow_item_vxlan *vxlan_v = item->spec;
8886 uint32_t *tunnel_header_v;
8887 uint32_t *tunnel_header_m;
8889 struct mlx5_priv *priv = dev->data->dev_private;
8890 const struct rte_flow_item_vxlan nic_mask = {
8891 .vni = "\xff\xff\xff",
8896 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8898 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8900 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8902 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8904 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8905 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8906 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8907 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8908 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8913 if ((!attr->group && !priv->sh->tunnel_header_0_1) ||
8914 (attr->group && !priv->sh->misc5_cap))
8915 vxlan_m = &rte_flow_item_vxlan_mask;
8917 vxlan_m = &nic_mask;
8919 if ((!attr->group && !attr->transfer && !priv->sh->tunnel_header_0_1) ||
8920 ((attr->group || attr->transfer) && !priv->sh->misc5_cap)) {
8927 misc_m = MLX5_ADDR_OF(fte_match_param,
8928 matcher, misc_parameters);
8929 misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8930 size = sizeof(vxlan_m->vni);
8931 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
8932 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
8933 memcpy(vni_m, vxlan_m->vni, size);
8934 for (i = 0; i < size; ++i)
8935 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8938 misc5_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_5);
8939 misc5_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_5);
8940 tunnel_header_v = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
8943 tunnel_header_m = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
8946 *tunnel_header_v = (vxlan_v->vni[0] & vxlan_m->vni[0]) |
8947 (vxlan_v->vni[1] & vxlan_m->vni[1]) << 8 |
8948 (vxlan_v->vni[2] & vxlan_m->vni[2]) << 16;
8949 if (*tunnel_header_v)
8950 *tunnel_header_m = vxlan_m->vni[0] |
8951 vxlan_m->vni[1] << 8 |
8952 vxlan_m->vni[2] << 16;
8954 *tunnel_header_m = 0x0;
8955 *tunnel_header_v |= (vxlan_v->rsvd1 & vxlan_m->rsvd1) << 24;
8956 if (vxlan_v->rsvd1 & vxlan_m->rsvd1)
8957 *tunnel_header_m |= vxlan_m->rsvd1 << 24;
8961 * Add VXLAN-GPE item to matcher and to the value.
8963 * @param[in, out] matcher
8965 * @param[in, out] key
8966 * Flow matcher value.
8968 * Flow pattern to translate.
8970 * Item is inner pattern.
8974 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
8975 const struct rte_flow_item *item, int inner)
8977 const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
8978 const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
8982 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
8984 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8990 uint8_t flags_m = 0xff;
8991 uint8_t flags_v = 0xc;
8994 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8996 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8998 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9000 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9002 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
9003 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
9004 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9005 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9006 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9011 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
9012 size = sizeof(vxlan_m->vni);
9013 vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
9014 vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
9015 memcpy(vni_m, vxlan_m->vni, size);
9016 for (i = 0; i < size; ++i)
9017 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
9018 if (vxlan_m->flags) {
9019 flags_m = vxlan_m->flags;
9020 flags_v = vxlan_v->flags;
9022 MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
9023 MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
9024 MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
9026 MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
9031 * Add Geneve item to matcher and to the value.
9033 * @param[in, out] matcher
9035 * @param[in, out] key
9036 * Flow matcher value.
9038 * Flow pattern to translate.
9040 * Item is inner pattern.
9044 flow_dv_translate_item_geneve(void *matcher, void *key,
9045 const struct rte_flow_item *item, int inner)
9047 const struct rte_flow_item_geneve *geneve_m = item->mask;
9048 const struct rte_flow_item_geneve *geneve_v = item->spec;
9051 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9052 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9061 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9063 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9065 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9067 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9069 dport = MLX5_UDP_PORT_GENEVE;
9070 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9071 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9072 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9077 geneve_m = &rte_flow_item_geneve_mask;
9078 size = sizeof(geneve_m->vni);
9079 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
9080 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
9081 memcpy(vni_m, geneve_m->vni, size);
9082 for (i = 0; i < size; ++i)
9083 vni_v[i] = vni_m[i] & geneve_v->vni[i];
9084 MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
9085 rte_be_to_cpu_16(geneve_m->protocol));
9086 MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
9087 rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
9088 gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
9089 gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
9090 MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
9091 MLX5_GENEVE_OAMF_VAL(gbhdr_m));
9092 MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
9093 MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
9094 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
9095 MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
9096 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
9097 MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
9098 MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
9102 * Create Geneve TLV option resource.
9104 * @param dev[in, out]
9105 * Pointer to rte_eth_dev structure.
9106 * @param[in, out] tag_be24
9107 * Tag value in big endian then R-shift 8.
9108 * @parm[in, out] dev_flow
9109 * Pointer to the dev_flow.
9111 * pointer to error structure.
9114 * 0 on success otherwise -errno and errno is set.
9118 flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
9119 const struct rte_flow_item *item,
9120 struct rte_flow_error *error)
9122 struct mlx5_priv *priv = dev->data->dev_private;
9123 struct mlx5_dev_ctx_shared *sh = priv->sh;
9124 struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
9125 sh->geneve_tlv_option_resource;
9126 struct mlx5_devx_obj *obj;
9127 const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
9132 rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
9133 if (geneve_opt_resource != NULL) {
9134 if (geneve_opt_resource->option_class ==
9135 geneve_opt_v->option_class &&
9136 geneve_opt_resource->option_type ==
9137 geneve_opt_v->option_type &&
9138 geneve_opt_resource->length ==
9139 geneve_opt_v->option_len) {
9140 /* We already have GENVE TLV option obj allocated. */
9141 __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
9144 ret = rte_flow_error_set(error, ENOMEM,
9145 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9146 "Only one GENEVE TLV option supported");
9150 /* Create a GENEVE TLV object and resource. */
9151 obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->ctx,
9152 geneve_opt_v->option_class,
9153 geneve_opt_v->option_type,
9154 geneve_opt_v->option_len);
9156 ret = rte_flow_error_set(error, ENODATA,
9157 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9158 "Failed to create GENEVE TLV Devx object");
9161 sh->geneve_tlv_option_resource =
9162 mlx5_malloc(MLX5_MEM_ZERO,
9163 sizeof(*geneve_opt_resource),
9165 if (!sh->geneve_tlv_option_resource) {
9166 claim_zero(mlx5_devx_cmd_destroy(obj));
9167 ret = rte_flow_error_set(error, ENOMEM,
9168 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9169 "GENEVE TLV object memory allocation failed");
9172 geneve_opt_resource = sh->geneve_tlv_option_resource;
9173 geneve_opt_resource->obj = obj;
9174 geneve_opt_resource->option_class = geneve_opt_v->option_class;
9175 geneve_opt_resource->option_type = geneve_opt_v->option_type;
9176 geneve_opt_resource->length = geneve_opt_v->option_len;
9177 __atomic_store_n(&geneve_opt_resource->refcnt, 1,
9181 rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
9186 * Add Geneve TLV option item to matcher.
9188 * @param[in, out] dev
9189 * Pointer to rte_eth_dev structure.
9190 * @param[in, out] matcher
9192 * @param[in, out] key
9193 * Flow matcher value.
9195 * Flow pattern to translate.
9197 * Pointer to error structure.
9200 flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
9201 void *key, const struct rte_flow_item *item,
9202 struct rte_flow_error *error)
9204 const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
9205 const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
9206 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9207 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9208 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9210 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9211 rte_be32_t opt_data_key = 0, opt_data_mask = 0;
9217 geneve_opt_m = &rte_flow_item_geneve_opt_mask;
9218 ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
9221 DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
9225 * Set the option length in GENEVE header if not requested.
9226 * The GENEVE TLV option length is expressed by the option length field
9227 * in the GENEVE header.
9228 * If the option length was not requested but the GENEVE TLV option item
9229 * is present we set the option length field implicitly.
9231 if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
9232 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
9233 MLX5_GENEVE_OPTLEN_MASK);
9234 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
9235 geneve_opt_v->option_len + 1);
9238 if (geneve_opt_v->data) {
9239 memcpy(&opt_data_key, geneve_opt_v->data,
9240 RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9241 sizeof(opt_data_key)));
9242 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9243 sizeof(opt_data_key));
9244 memcpy(&opt_data_mask, geneve_opt_m->data,
9245 RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9246 sizeof(opt_data_mask)));
9247 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9248 sizeof(opt_data_mask));
9249 MLX5_SET(fte_match_set_misc3, misc3_m,
9250 geneve_tlv_option_0_data,
9251 rte_be_to_cpu_32(opt_data_mask));
9252 MLX5_SET(fte_match_set_misc3, misc3_v,
9253 geneve_tlv_option_0_data,
9254 rte_be_to_cpu_32(opt_data_key & opt_data_mask));
9260 * Add MPLS item to matcher and to the value.
9262 * @param[in, out] matcher
9264 * @param[in, out] key
9265 * Flow matcher value.
9267 * Flow pattern to translate.
9268 * @param[in] prev_layer
9269 * The protocol layer indicated in previous item.
9271 * Item is inner pattern.
9274 flow_dv_translate_item_mpls(void *matcher, void *key,
9275 const struct rte_flow_item *item,
9276 uint64_t prev_layer,
9279 const uint32_t *in_mpls_m = item->mask;
9280 const uint32_t *in_mpls_v = item->spec;
9281 uint32_t *out_mpls_m = 0;
9282 uint32_t *out_mpls_v = 0;
9283 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9284 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9285 void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
9287 void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9288 void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9289 void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9291 switch (prev_layer) {
9292 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9293 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
9294 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9295 MLX5_UDP_PORT_MPLS);
9297 case MLX5_FLOW_LAYER_GRE:
9299 case MLX5_FLOW_LAYER_GRE_KEY:
9300 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
9301 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
9302 RTE_ETHER_TYPE_MPLS);
9310 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
9311 switch (prev_layer) {
9312 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9314 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9315 outer_first_mpls_over_udp);
9317 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9318 outer_first_mpls_over_udp);
9320 case MLX5_FLOW_LAYER_GRE:
9322 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9323 outer_first_mpls_over_gre);
9325 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9326 outer_first_mpls_over_gre);
9329 /* Inner MPLS not over GRE is not supported. */
9332 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9336 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9342 if (out_mpls_m && out_mpls_v) {
9343 *out_mpls_m = *in_mpls_m;
9344 *out_mpls_v = *in_mpls_v & *in_mpls_m;
9349 * Add metadata register item to matcher
9351 * @param[in, out] matcher
9353 * @param[in, out] key
9354 * Flow matcher value.
9355 * @param[in] reg_type
9356 * Type of device metadata register
9363 flow_dv_match_meta_reg(void *matcher, void *key,
9364 enum modify_reg reg_type,
9365 uint32_t data, uint32_t mask)
9368 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
9370 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9376 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
9377 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
9380 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
9381 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
9385 * The metadata register C0 field might be divided into
9386 * source vport index and META item value, we should set
9387 * this field according to specified mask, not as whole one.
9389 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
9391 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
9392 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
9395 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
9398 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
9399 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
9402 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
9403 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
9406 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
9407 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
9410 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
9411 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
9414 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
9415 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
9418 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
9419 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
9422 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
9423 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
9432 * Add MARK item to matcher
9435 * The device to configure through.
9436 * @param[in, out] matcher
9438 * @param[in, out] key
9439 * Flow matcher value.
9441 * Flow pattern to translate.
9444 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
9445 void *matcher, void *key,
9446 const struct rte_flow_item *item)
9448 struct mlx5_priv *priv = dev->data->dev_private;
9449 const struct rte_flow_item_mark *mark;
9453 mark = item->mask ? (const void *)item->mask :
9454 &rte_flow_item_mark_mask;
9455 mask = mark->id & priv->sh->dv_mark_mask;
9456 mark = (const void *)item->spec;
9458 value = mark->id & priv->sh->dv_mark_mask & mask;
9460 enum modify_reg reg;
9462 /* Get the metadata register index for the mark. */
9463 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
9464 MLX5_ASSERT(reg > 0);
9465 if (reg == REG_C_0) {
9466 struct mlx5_priv *priv = dev->data->dev_private;
9467 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9468 uint32_t shl_c0 = rte_bsf32(msk_c0);
9474 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9479 * Add META item to matcher
9482 * The devich to configure through.
9483 * @param[in, out] matcher
9485 * @param[in, out] key
9486 * Flow matcher value.
9488 * Attributes of flow that includes this item.
9490 * Flow pattern to translate.
9493 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
9494 void *matcher, void *key,
9495 const struct rte_flow_attr *attr,
9496 const struct rte_flow_item *item)
9498 const struct rte_flow_item_meta *meta_m;
9499 const struct rte_flow_item_meta *meta_v;
9501 meta_m = (const void *)item->mask;
9503 meta_m = &rte_flow_item_meta_mask;
9504 meta_v = (const void *)item->spec;
9507 uint32_t value = meta_v->data;
9508 uint32_t mask = meta_m->data;
9510 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
9513 MLX5_ASSERT(reg != REG_NON);
9514 if (reg == REG_C_0) {
9515 struct mlx5_priv *priv = dev->data->dev_private;
9516 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9517 uint32_t shl_c0 = rte_bsf32(msk_c0);
9523 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9528 * Add vport metadata Reg C0 item to matcher
9530 * @param[in, out] matcher
9532 * @param[in, out] key
9533 * Flow matcher value.
9535 * Flow pattern to translate.
9538 flow_dv_translate_item_meta_vport(void *matcher, void *key,
9539 uint32_t value, uint32_t mask)
9541 flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
9545 * Add tag item to matcher
9548 * The devich to configure through.
9549 * @param[in, out] matcher
9551 * @param[in, out] key
9552 * Flow matcher value.
9554 * Flow pattern to translate.
9557 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
9558 void *matcher, void *key,
9559 const struct rte_flow_item *item)
9561 const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
9562 const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
9563 uint32_t mask, value;
9566 value = tag_v->data;
9567 mask = tag_m ? tag_m->data : UINT32_MAX;
9568 if (tag_v->id == REG_C_0) {
9569 struct mlx5_priv *priv = dev->data->dev_private;
9570 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9571 uint32_t shl_c0 = rte_bsf32(msk_c0);
9577 flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
9581 * Add TAG item to matcher
9584 * The devich to configure through.
9585 * @param[in, out] matcher
9587 * @param[in, out] key
9588 * Flow matcher value.
9590 * Flow pattern to translate.
9593 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
9594 void *matcher, void *key,
9595 const struct rte_flow_item *item)
9597 const struct rte_flow_item_tag *tag_v = item->spec;
9598 const struct rte_flow_item_tag *tag_m = item->mask;
9599 enum modify_reg reg;
9602 tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
9603 /* Get the metadata register index for the tag. */
9604 reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
9605 MLX5_ASSERT(reg > 0);
9606 flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
9610 * Add source vport match to the specified matcher.
9612 * @param[in, out] matcher
9614 * @param[in, out] key
9615 * Flow matcher value.
9617 * Source vport value to match
9622 flow_dv_translate_item_source_vport(void *matcher, void *key,
9623 int16_t port, uint16_t mask)
9625 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9626 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9628 MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
9629 MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
9633 * Translate port-id item to eswitch match on port-id.
9636 * The devich to configure through.
9637 * @param[in, out] matcher
9639 * @param[in, out] key
9640 * Flow matcher value.
9642 * Flow pattern to translate.
9647 * 0 on success, a negative errno value otherwise.
9650 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
9651 void *key, const struct rte_flow_item *item,
9652 const struct rte_flow_attr *attr)
9654 const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
9655 const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
9656 struct mlx5_priv *priv;
9659 mask = pid_m ? pid_m->id : 0xffff;
9660 id = pid_v ? pid_v->id : dev->data->port_id;
9661 priv = mlx5_port_to_eswitch_info(id, item == NULL);
9665 * Translate to vport field or to metadata, depending on mode.
9666 * Kernel can use either misc.source_port or half of C0 metadata
9669 if (priv->vport_meta_mask) {
9671 * Provide the hint for SW steering library
9672 * to insert the flow into ingress domain and
9673 * save the extra vport match.
9675 if (mask == 0xffff && priv->vport_id == 0xffff &&
9676 priv->pf_bond < 0 && attr->transfer)
9677 flow_dv_translate_item_source_vport
9678 (matcher, key, priv->vport_id, mask);
9680 * We should always set the vport metadata register,
9681 * otherwise the SW steering library can drop
9682 * the rule if wire vport metadata value is not zero,
9683 * it depends on kernel configuration.
9685 flow_dv_translate_item_meta_vport(matcher, key,
9686 priv->vport_meta_tag,
9687 priv->vport_meta_mask);
9689 flow_dv_translate_item_source_vport(matcher, key,
9690 priv->vport_id, mask);
9696 * Add ICMP6 item to matcher and to the value.
9698 * @param[in, out] matcher
9700 * @param[in, out] key
9701 * Flow matcher value.
9703 * Flow pattern to translate.
9705 * Item is inner pattern.
9708 flow_dv_translate_item_icmp6(void *matcher, void *key,
9709 const struct rte_flow_item *item,
9712 const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
9713 const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
9716 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9718 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9720 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9722 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9724 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9726 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9728 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9729 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
9733 icmp6_m = &rte_flow_item_icmp6_mask;
9734 MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
9735 MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
9736 icmp6_v->type & icmp6_m->type);
9737 MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
9738 MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
9739 icmp6_v->code & icmp6_m->code);
9743 * Add ICMP item to matcher and to the value.
9745 * @param[in, out] matcher
9747 * @param[in, out] key
9748 * Flow matcher value.
9750 * Flow pattern to translate.
9752 * Item is inner pattern.
9755 flow_dv_translate_item_icmp(void *matcher, void *key,
9756 const struct rte_flow_item *item,
9759 const struct rte_flow_item_icmp *icmp_m = item->mask;
9760 const struct rte_flow_item_icmp *icmp_v = item->spec;
9761 uint32_t icmp_header_data_m = 0;
9762 uint32_t icmp_header_data_v = 0;
9765 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9767 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9769 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9771 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9773 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9775 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9777 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9778 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
9782 icmp_m = &rte_flow_item_icmp_mask;
9783 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
9784 icmp_m->hdr.icmp_type);
9785 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
9786 icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
9787 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
9788 icmp_m->hdr.icmp_code);
9789 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
9790 icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
9791 icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
9792 icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
9793 if (icmp_header_data_m) {
9794 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
9795 icmp_header_data_v |=
9796 rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
9797 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
9798 icmp_header_data_m);
9799 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
9800 icmp_header_data_v & icmp_header_data_m);
9805 * Add GTP item to matcher and to the value.
9807 * @param[in, out] matcher
9809 * @param[in, out] key
9810 * Flow matcher value.
9812 * Flow pattern to translate.
9814 * Item is inner pattern.
9817 flow_dv_translate_item_gtp(void *matcher, void *key,
9818 const struct rte_flow_item *item, int inner)
9820 const struct rte_flow_item_gtp *gtp_m = item->mask;
9821 const struct rte_flow_item_gtp *gtp_v = item->spec;
9824 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9826 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9827 uint16_t dport = RTE_GTPU_UDP_PORT;
9830 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9832 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9834 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9836 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9838 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9839 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9840 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9845 gtp_m = &rte_flow_item_gtp_mask;
9846 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
9847 gtp_m->v_pt_rsv_flags);
9848 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
9849 gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
9850 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
9851 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
9852 gtp_v->msg_type & gtp_m->msg_type);
9853 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
9854 rte_be_to_cpu_32(gtp_m->teid));
9855 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
9856 rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
9860 * Add GTP PSC item to matcher.
9862 * @param[in, out] matcher
9864 * @param[in, out] key
9865 * Flow matcher value.
9867 * Flow pattern to translate.
9870 flow_dv_translate_item_gtp_psc(void *matcher, void *key,
9871 const struct rte_flow_item *item)
9873 const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
9874 const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
9875 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9877 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9883 uint8_t next_ext_header_type;
9888 /* Always set E-flag match on one, regardless of GTP item settings. */
9889 gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
9890 gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9891 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
9892 gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
9893 gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9894 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
9895 /*Set next extension header type. */
9898 dw_2.next_ext_header_type = 0xff;
9899 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
9900 rte_cpu_to_be_32(dw_2.w32));
9903 dw_2.next_ext_header_type = 0x85;
9904 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
9905 rte_cpu_to_be_32(dw_2.w32));
9917 /*Set extension header PDU type and Qos. */
9919 gtp_psc_m = &rte_flow_item_gtp_psc_mask;
9921 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->pdu_type);
9922 dw_0.qfi = gtp_psc_m->qfi;
9923 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
9924 rte_cpu_to_be_32(dw_0.w32));
9926 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->pdu_type &
9927 gtp_psc_m->pdu_type);
9928 dw_0.qfi = gtp_psc_v->qfi & gtp_psc_m->qfi;
9929 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
9930 rte_cpu_to_be_32(dw_0.w32));
9936 * Add eCPRI item to matcher and to the value.
9939 * The devich to configure through.
9940 * @param[in, out] matcher
9942 * @param[in, out] key
9943 * Flow matcher value.
9945 * Flow pattern to translate.
9946 * @param[in] samples
9947 * Sample IDs to be used in the matching.
9950 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
9951 void *key, const struct rte_flow_item *item)
9953 struct mlx5_priv *priv = dev->data->dev_private;
9954 const struct rte_flow_item_ecpri *ecpri_m = item->mask;
9955 const struct rte_flow_item_ecpri *ecpri_v = item->spec;
9956 struct rte_ecpri_common_hdr common;
9957 void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
9959 void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
9967 ecpri_m = &rte_flow_item_ecpri_mask;
9969 * Maximal four DW samples are supported in a single matching now.
9970 * Two are used now for a eCPRI matching:
9971 * 1. Type: one byte, mask should be 0x00ff0000 in network order
9972 * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
9975 if (!ecpri_m->hdr.common.u32)
9977 samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
9978 /* Need to take the whole DW as the mask to fill the entry. */
9979 dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9980 prog_sample_field_value_0);
9981 dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9982 prog_sample_field_value_0);
9983 /* Already big endian (network order) in the header. */
9984 *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
9985 *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
9986 /* Sample#0, used for matching type, offset 0. */
9987 MLX5_SET(fte_match_set_misc4, misc4_m,
9988 prog_sample_field_id_0, samples[0]);
9989 /* It makes no sense to set the sample ID in the mask field. */
9990 MLX5_SET(fte_match_set_misc4, misc4_v,
9991 prog_sample_field_id_0, samples[0]);
9993 * Checking if message body part needs to be matched.
9994 * Some wildcard rules only matching type field should be supported.
9996 if (ecpri_m->hdr.dummy[0]) {
9997 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
9998 switch (common.type) {
9999 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
10000 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
10001 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
10002 dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
10003 prog_sample_field_value_1);
10004 dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
10005 prog_sample_field_value_1);
10006 *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
10007 *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
10008 ecpri_m->hdr.dummy[0];
10009 /* Sample#1, to match message body, offset 4. */
10010 MLX5_SET(fte_match_set_misc4, misc4_m,
10011 prog_sample_field_id_1, samples[1]);
10012 MLX5_SET(fte_match_set_misc4, misc4_v,
10013 prog_sample_field_id_1, samples[1]);
10016 /* Others, do not match any sample ID. */
10023 * Add connection tracking status item to matcher
10026 * The devich to configure through.
10027 * @param[in, out] matcher
10029 * @param[in, out] key
10030 * Flow matcher value.
10032 * Flow pattern to translate.
10035 flow_dv_translate_item_aso_ct(struct rte_eth_dev *dev,
10036 void *matcher, void *key,
10037 const struct rte_flow_item *item)
10039 uint32_t reg_value = 0;
10041 /* 8LSB 0b 11/0000/11, middle 4 bits are reserved. */
10042 uint32_t reg_mask = 0;
10043 const struct rte_flow_item_conntrack *spec = item->spec;
10044 const struct rte_flow_item_conntrack *mask = item->mask;
10046 struct rte_flow_error error;
10049 mask = &rte_flow_item_conntrack_mask;
10050 if (!spec || !mask->flags)
10052 flags = spec->flags & mask->flags;
10053 /* The conflict should be checked in the validation. */
10054 if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID)
10055 reg_value |= MLX5_CT_SYNDROME_VALID;
10056 if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
10057 reg_value |= MLX5_CT_SYNDROME_STATE_CHANGE;
10058 if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID)
10059 reg_value |= MLX5_CT_SYNDROME_INVALID;
10060 if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)
10061 reg_value |= MLX5_CT_SYNDROME_TRAP;
10062 if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
10063 reg_value |= MLX5_CT_SYNDROME_BAD_PACKET;
10064 if (mask->flags & (RTE_FLOW_CONNTRACK_PKT_STATE_VALID |
10065 RTE_FLOW_CONNTRACK_PKT_STATE_INVALID |
10066 RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED))
10068 if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
10069 reg_mask |= MLX5_CT_SYNDROME_STATE_CHANGE;
10070 if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
10071 reg_mask |= MLX5_CT_SYNDROME_BAD_PACKET;
10072 /* The REG_C_x value could be saved during startup. */
10073 reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, &error);
10074 if (reg_id == REG_NON)
10076 flow_dv_match_meta_reg(matcher, key, (enum modify_reg)reg_id,
10077 reg_value, reg_mask);
10080 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
10082 #define HEADER_IS_ZERO(match_criteria, headers) \
10083 !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
10084 matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
10087 * Calculate flow matcher enable bitmap.
10089 * @param match_criteria
10090 * Pointer to flow matcher criteria.
10093 * Bitmap of enabled fields.
10096 flow_dv_matcher_enable(uint32_t *match_criteria)
10098 uint8_t match_criteria_enable;
10100 match_criteria_enable =
10101 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
10102 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
10103 match_criteria_enable |=
10104 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
10105 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
10106 match_criteria_enable |=
10107 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
10108 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
10109 match_criteria_enable |=
10110 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
10111 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
10112 match_criteria_enable |=
10113 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
10114 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
10115 match_criteria_enable |=
10116 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
10117 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
10118 match_criteria_enable |=
10119 (!HEADER_IS_ZERO(match_criteria, misc_parameters_5)) <<
10120 MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT;
10121 return match_criteria_enable;
10125 __flow_dv_adjust_buf_size(size_t *size, uint8_t match_criteria)
10128 * Check flow matching criteria first, subtract misc5/4 length if flow
10129 * doesn't own misc5/4 parameters. In some old rdma-core releases,
10130 * misc5/4 are not supported, and matcher creation failure is expected
10131 * w/o subtration. If misc5 is provided, misc4 must be counted in since
10132 * misc5 is right after misc4.
10134 if (!(match_criteria & (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT))) {
10135 *size = MLX5_ST_SZ_BYTES(fte_match_param) -
10136 MLX5_ST_SZ_BYTES(fte_match_set_misc5);
10137 if (!(match_criteria & (1 <<
10138 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT))) {
10139 *size -= MLX5_ST_SZ_BYTES(fte_match_set_misc4);
10144 static struct mlx5_list_entry *
10145 flow_dv_matcher_clone_cb(void *tool_ctx __rte_unused,
10146 struct mlx5_list_entry *entry, void *cb_ctx)
10148 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10149 struct mlx5_flow_dv_matcher *ref = ctx->data;
10150 struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10151 typeof(*tbl), tbl);
10152 struct mlx5_flow_dv_matcher *resource = mlx5_malloc(MLX5_MEM_ANY,
10157 rte_flow_error_set(ctx->error, ENOMEM,
10158 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10159 "cannot create matcher");
10162 memcpy(resource, entry, sizeof(*resource));
10163 resource->tbl = &tbl->tbl;
10164 return &resource->entry;
10168 flow_dv_matcher_clone_free_cb(void *tool_ctx __rte_unused,
10169 struct mlx5_list_entry *entry)
10174 struct mlx5_list_entry *
10175 flow_dv_tbl_create_cb(void *tool_ctx, void *cb_ctx)
10177 struct mlx5_dev_ctx_shared *sh = tool_ctx;
10178 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10179 struct rte_eth_dev *dev = ctx->dev;
10180 struct mlx5_flow_tbl_data_entry *tbl_data;
10181 struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data2;
10182 struct rte_flow_error *error = ctx->error;
10183 union mlx5_flow_tbl_key key = { .v64 = *(uint64_t *)(ctx->data) };
10184 struct mlx5_flow_tbl_resource *tbl;
10189 tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
10191 rte_flow_error_set(error, ENOMEM,
10192 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10194 "cannot allocate flow table data entry");
10197 tbl_data->idx = idx;
10198 tbl_data->tunnel = tt_prm->tunnel;
10199 tbl_data->group_id = tt_prm->group_id;
10200 tbl_data->external = !!tt_prm->external;
10201 tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
10202 tbl_data->is_egress = !!key.is_egress;
10203 tbl_data->is_transfer = !!key.is_fdb;
10204 tbl_data->dummy = !!key.dummy;
10205 tbl_data->level = key.level;
10206 tbl_data->id = key.id;
10207 tbl = &tbl_data->tbl;
10209 return &tbl_data->entry;
10211 domain = sh->fdb_domain;
10212 else if (key.is_egress)
10213 domain = sh->tx_domain;
10215 domain = sh->rx_domain;
10216 ret = mlx5_flow_os_create_flow_tbl(domain, key.level, &tbl->obj);
10218 rte_flow_error_set(error, ENOMEM,
10219 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10220 NULL, "cannot create flow table object");
10221 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10224 if (key.level != 0) {
10225 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
10226 (tbl->obj, &tbl_data->jump.action);
10228 rte_flow_error_set(error, ENOMEM,
10229 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10231 "cannot create flow jump action");
10232 mlx5_flow_os_destroy_flow_tbl(tbl->obj);
10233 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10237 MKSTR(matcher_name, "%s_%s_%u_%u_matcher_list",
10238 key.is_fdb ? "FDB" : "NIC", key.is_egress ? "egress" : "ingress",
10239 key.level, key.id);
10240 tbl_data->matchers = mlx5_list_create(matcher_name, sh, true,
10241 flow_dv_matcher_create_cb,
10242 flow_dv_matcher_match_cb,
10243 flow_dv_matcher_remove_cb,
10244 flow_dv_matcher_clone_cb,
10245 flow_dv_matcher_clone_free_cb);
10246 if (!tbl_data->matchers) {
10247 rte_flow_error_set(error, ENOMEM,
10248 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10250 "cannot create tbl matcher list");
10251 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10252 mlx5_flow_os_destroy_flow_tbl(tbl->obj);
10253 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10256 return &tbl_data->entry;
10260 flow_dv_tbl_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
10263 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10264 struct mlx5_flow_tbl_data_entry *tbl_data =
10265 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10266 union mlx5_flow_tbl_key key = { .v64 = *(uint64_t *)(ctx->data) };
10268 return tbl_data->level != key.level ||
10269 tbl_data->id != key.id ||
10270 tbl_data->dummy != key.dummy ||
10271 tbl_data->is_transfer != !!key.is_fdb ||
10272 tbl_data->is_egress != !!key.is_egress;
10275 struct mlx5_list_entry *
10276 flow_dv_tbl_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
10279 struct mlx5_dev_ctx_shared *sh = tool_ctx;
10280 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10281 struct mlx5_flow_tbl_data_entry *tbl_data;
10282 struct rte_flow_error *error = ctx->error;
10285 tbl_data = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
10287 rte_flow_error_set(error, ENOMEM,
10288 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10290 "cannot allocate flow table data entry");
10293 memcpy(tbl_data, oentry, sizeof(*tbl_data));
10294 tbl_data->idx = idx;
10295 return &tbl_data->entry;
10299 flow_dv_tbl_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10301 struct mlx5_dev_ctx_shared *sh = tool_ctx;
10302 struct mlx5_flow_tbl_data_entry *tbl_data =
10303 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10305 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10309 * Get a flow table.
10311 * @param[in, out] dev
10312 * Pointer to rte_eth_dev structure.
10313 * @param[in] table_level
10314 * Table level to use.
10315 * @param[in] egress
10316 * Direction of the table.
10317 * @param[in] transfer
10318 * E-Switch or NIC flow.
10320 * Dummy entry for dv API.
10321 * @param[in] table_id
10323 * @param[out] error
10324 * pointer to error structure.
10327 * Returns tables resource based on the index, NULL in case of failed.
10329 struct mlx5_flow_tbl_resource *
10330 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
10331 uint32_t table_level, uint8_t egress,
10334 const struct mlx5_flow_tunnel *tunnel,
10335 uint32_t group_id, uint8_t dummy,
10337 struct rte_flow_error *error)
10339 struct mlx5_priv *priv = dev->data->dev_private;
10340 union mlx5_flow_tbl_key table_key = {
10342 .level = table_level,
10346 .is_fdb = !!transfer,
10347 .is_egress = !!egress,
10350 struct mlx5_flow_tbl_tunnel_prm tt_prm = {
10352 .group_id = group_id,
10353 .external = external,
10355 struct mlx5_flow_cb_ctx ctx = {
10358 .data = &table_key.v64,
10361 struct mlx5_list_entry *entry;
10362 struct mlx5_flow_tbl_data_entry *tbl_data;
10364 entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
10366 rte_flow_error_set(error, ENOMEM,
10367 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10368 "cannot get table");
10371 DRV_LOG(DEBUG, "table_level %u table_id %u "
10372 "tunnel %u group %u registered.",
10373 table_level, table_id,
10374 tunnel ? tunnel->tunnel_id : 0, group_id);
10375 tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10376 return &tbl_data->tbl;
10380 flow_dv_tbl_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10382 struct mlx5_dev_ctx_shared *sh = tool_ctx;
10383 struct mlx5_flow_tbl_data_entry *tbl_data =
10384 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10386 MLX5_ASSERT(entry && sh);
10387 if (tbl_data->jump.action)
10388 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10389 if (tbl_data->tbl.obj)
10390 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
10391 if (tbl_data->tunnel_offload && tbl_data->external) {
10392 struct mlx5_list_entry *he;
10393 struct mlx5_hlist *tunnel_grp_hash;
10394 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
10395 union tunnel_tbl_key tunnel_key = {
10396 .tunnel_id = tbl_data->tunnel ?
10397 tbl_data->tunnel->tunnel_id : 0,
10398 .group = tbl_data->group_id
10400 uint32_t table_level = tbl_data->level;
10401 struct mlx5_flow_cb_ctx ctx = {
10402 .data = (void *)&tunnel_key.val,
10405 tunnel_grp_hash = tbl_data->tunnel ?
10406 tbl_data->tunnel->groups :
10408 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, &ctx);
10410 mlx5_hlist_unregister(tunnel_grp_hash, he);
10412 "table_level %u id %u tunnel %u group %u released.",
10416 tbl_data->tunnel->tunnel_id : 0,
10417 tbl_data->group_id);
10419 mlx5_list_destroy(tbl_data->matchers);
10420 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10424 * Release a flow table.
10427 * Pointer to device shared structure.
10429 * Table resource to be released.
10432 * Returns 0 if table was released, else return 1;
10435 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
10436 struct mlx5_flow_tbl_resource *tbl)
10438 struct mlx5_flow_tbl_data_entry *tbl_data =
10439 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10443 return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
10447 flow_dv_matcher_match_cb(void *tool_ctx __rte_unused,
10448 struct mlx5_list_entry *entry, void *cb_ctx)
10450 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10451 struct mlx5_flow_dv_matcher *ref = ctx->data;
10452 struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
10455 return cur->crc != ref->crc ||
10456 cur->priority != ref->priority ||
10457 memcmp((const void *)cur->mask.buf,
10458 (const void *)ref->mask.buf, ref->mask.size);
10461 struct mlx5_list_entry *
10462 flow_dv_matcher_create_cb(void *tool_ctx, void *cb_ctx)
10464 struct mlx5_dev_ctx_shared *sh = tool_ctx;
10465 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10466 struct mlx5_flow_dv_matcher *ref = ctx->data;
10467 struct mlx5_flow_dv_matcher *resource;
10468 struct mlx5dv_flow_matcher_attr dv_attr = {
10469 .type = IBV_FLOW_ATTR_NORMAL,
10470 .match_mask = (void *)&ref->mask,
10472 struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10473 typeof(*tbl), tbl);
10476 resource = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*resource), 0,
10479 rte_flow_error_set(ctx->error, ENOMEM,
10480 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10481 "cannot create matcher");
10485 dv_attr.match_criteria_enable =
10486 flow_dv_matcher_enable(resource->mask.buf);
10487 __flow_dv_adjust_buf_size(&ref->mask.size,
10488 dv_attr.match_criteria_enable);
10489 dv_attr.priority = ref->priority;
10490 if (tbl->is_egress)
10491 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
10492 ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
10493 &resource->matcher_object);
10495 mlx5_free(resource);
10496 rte_flow_error_set(ctx->error, ENOMEM,
10497 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10498 "cannot create matcher");
10501 return &resource->entry;
10505 * Register the flow matcher.
10507 * @param[in, out] dev
10508 * Pointer to rte_eth_dev structure.
10509 * @param[in, out] matcher
10510 * Pointer to flow matcher.
10511 * @param[in, out] key
10512 * Pointer to flow table key.
10513 * @parm[in, out] dev_flow
10514 * Pointer to the dev_flow.
10515 * @param[out] error
10516 * pointer to error structure.
10519 * 0 on success otherwise -errno and errno is set.
10522 flow_dv_matcher_register(struct rte_eth_dev *dev,
10523 struct mlx5_flow_dv_matcher *ref,
10524 union mlx5_flow_tbl_key *key,
10525 struct mlx5_flow *dev_flow,
10526 const struct mlx5_flow_tunnel *tunnel,
10528 struct rte_flow_error *error)
10530 struct mlx5_list_entry *entry;
10531 struct mlx5_flow_dv_matcher *resource;
10532 struct mlx5_flow_tbl_resource *tbl;
10533 struct mlx5_flow_tbl_data_entry *tbl_data;
10534 struct mlx5_flow_cb_ctx ctx = {
10539 * tunnel offload API requires this registration for cases when
10540 * tunnel match rule was inserted before tunnel set rule.
10542 tbl = flow_dv_tbl_resource_get(dev, key->level,
10543 key->is_egress, key->is_fdb,
10544 dev_flow->external, tunnel,
10545 group_id, 0, key->id, error);
10547 return -rte_errno; /* No need to refill the error info */
10548 tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10550 entry = mlx5_list_register(tbl_data->matchers, &ctx);
10552 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
10553 return rte_flow_error_set(error, ENOMEM,
10554 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10555 "cannot allocate ref memory");
10557 resource = container_of(entry, typeof(*resource), entry);
10558 dev_flow->handle->dvh.matcher = resource;
10562 struct mlx5_list_entry *
10563 flow_dv_tag_create_cb(void *tool_ctx, void *cb_ctx)
10565 struct mlx5_dev_ctx_shared *sh = tool_ctx;
10566 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10567 struct mlx5_flow_dv_tag_resource *entry;
10571 entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10573 rte_flow_error_set(ctx->error, ENOMEM,
10574 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10575 "cannot allocate resource memory");
10579 entry->tag_id = *(uint32_t *)(ctx->data);
10580 ret = mlx5_flow_os_create_flow_action_tag(entry->tag_id,
10583 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
10584 rte_flow_error_set(ctx->error, ENOMEM,
10585 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10586 NULL, "cannot create action");
10589 return &entry->entry;
10593 flow_dv_tag_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
10596 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10597 struct mlx5_flow_dv_tag_resource *tag =
10598 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10600 return *(uint32_t *)(ctx->data) != tag->tag_id;
10603 struct mlx5_list_entry *
10604 flow_dv_tag_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
10607 struct mlx5_dev_ctx_shared *sh = tool_ctx;
10608 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10609 struct mlx5_flow_dv_tag_resource *entry;
10612 entry = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10614 rte_flow_error_set(ctx->error, ENOMEM,
10615 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10616 "cannot allocate tag resource memory");
10619 memcpy(entry, oentry, sizeof(*entry));
10621 return &entry->entry;
10625 flow_dv_tag_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10627 struct mlx5_dev_ctx_shared *sh = tool_ctx;
10628 struct mlx5_flow_dv_tag_resource *tag =
10629 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10631 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10635 * Find existing tag resource or create and register a new one.
10637 * @param dev[in, out]
10638 * Pointer to rte_eth_dev structure.
10639 * @param[in, out] tag_be24
10640 * Tag value in big endian then R-shift 8.
10641 * @parm[in, out] dev_flow
10642 * Pointer to the dev_flow.
10643 * @param[out] error
10644 * pointer to error structure.
10647 * 0 on success otherwise -errno and errno is set.
10650 flow_dv_tag_resource_register
10651 (struct rte_eth_dev *dev,
10653 struct mlx5_flow *dev_flow,
10654 struct rte_flow_error *error)
10656 struct mlx5_priv *priv = dev->data->dev_private;
10657 struct mlx5_flow_dv_tag_resource *resource;
10658 struct mlx5_list_entry *entry;
10659 struct mlx5_flow_cb_ctx ctx = {
10663 struct mlx5_hlist *tag_table;
10665 tag_table = flow_dv_hlist_prepare(priv->sh, &priv->sh->tag_table,
10667 MLX5_TAGS_HLIST_ARRAY_SIZE,
10668 false, false, priv->sh,
10669 flow_dv_tag_create_cb,
10670 flow_dv_tag_match_cb,
10671 flow_dv_tag_remove_cb,
10672 flow_dv_tag_clone_cb,
10673 flow_dv_tag_clone_free_cb);
10674 if (unlikely(!tag_table))
10676 entry = mlx5_hlist_register(tag_table, tag_be24, &ctx);
10678 resource = container_of(entry, struct mlx5_flow_dv_tag_resource,
10680 dev_flow->handle->dvh.rix_tag = resource->idx;
10681 dev_flow->dv.tag_resource = resource;
10688 flow_dv_tag_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10690 struct mlx5_dev_ctx_shared *sh = tool_ctx;
10691 struct mlx5_flow_dv_tag_resource *tag =
10692 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10694 MLX5_ASSERT(tag && sh && tag->action);
10695 claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
10696 DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
10697 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10704 * Pointer to Ethernet device.
10709 * 1 while a reference on it exists, 0 when freed.
10712 flow_dv_tag_release(struct rte_eth_dev *dev,
10715 struct mlx5_priv *priv = dev->data->dev_private;
10716 struct mlx5_flow_dv_tag_resource *tag;
10718 tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
10721 DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
10722 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
10723 return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
10727 * Translate port ID action to vport.
10730 * Pointer to rte_eth_dev structure.
10731 * @param[in] action
10732 * Pointer to the port ID action.
10733 * @param[out] dst_port_id
10734 * The target port ID.
10735 * @param[out] error
10736 * Pointer to the error structure.
10739 * 0 on success, a negative errno value otherwise and rte_errno is set.
10742 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
10743 const struct rte_flow_action *action,
10744 uint32_t *dst_port_id,
10745 struct rte_flow_error *error)
10748 struct mlx5_priv *priv;
10749 const struct rte_flow_action_port_id *conf =
10750 (const struct rte_flow_action_port_id *)action->conf;
10752 port = conf->original ? dev->data->port_id : conf->id;
10753 priv = mlx5_port_to_eswitch_info(port, false);
10755 return rte_flow_error_set(error, -rte_errno,
10756 RTE_FLOW_ERROR_TYPE_ACTION,
10758 "No eswitch info was found for port");
10759 #ifdef HAVE_MLX5DV_DR_CREATE_DEST_IB_PORT
10761 * This parameter is transferred to
10762 * mlx5dv_dr_action_create_dest_ib_port().
10764 *dst_port_id = priv->dev_port;
10767 * Legacy mode, no LAG configurations is supported.
10768 * This parameter is transferred to
10769 * mlx5dv_dr_action_create_dest_vport().
10771 *dst_port_id = priv->vport_id;
10777 * Create a counter with aging configuration.
10780 * Pointer to rte_eth_dev structure.
10781 * @param[in] dev_flow
10782 * Pointer to the mlx5_flow.
10783 * @param[out] count
10784 * Pointer to the counter action configuration.
10786 * Pointer to the aging action configuration.
10789 * Index to flow counter on success, 0 otherwise.
10792 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
10793 struct mlx5_flow *dev_flow,
10794 const struct rte_flow_action_count *count,
10795 const struct rte_flow_action_age *age)
10798 struct mlx5_age_param *age_param;
10800 if (count && count->shared)
10801 counter = flow_dv_counter_get_shared(dev, count->id);
10803 counter = flow_dv_counter_alloc(dev, !!age);
10804 if (!counter || age == NULL)
10806 age_param = flow_dv_counter_idx_get_age(dev, counter);
10807 age_param->context = age->context ? age->context :
10808 (void *)(uintptr_t)(dev_flow->flow_idx);
10809 age_param->timeout = age->timeout;
10810 age_param->port_id = dev->data->port_id;
10811 __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
10812 __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
10817 * Add Tx queue matcher
10820 * Pointer to the dev struct.
10821 * @param[in, out] matcher
10823 * @param[in, out] key
10824 * Flow matcher value.
10826 * Flow pattern to translate.
10828 * Item is inner pattern.
10831 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
10832 void *matcher, void *key,
10833 const struct rte_flow_item *item)
10835 const struct mlx5_rte_flow_item_tx_queue *queue_m;
10836 const struct mlx5_rte_flow_item_tx_queue *queue_v;
10838 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
10840 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
10841 struct mlx5_txq_ctrl *txq;
10845 queue_m = (const void *)item->mask;
10848 queue_v = (const void *)item->spec;
10851 txq = mlx5_txq_get(dev, queue_v->queue);
10854 queue = txq->obj->sq->id;
10855 MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
10856 MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
10857 queue & queue_m->queue);
10858 mlx5_txq_release(dev, queue_v->queue);
10862 * Set the hash fields according to the @p flow information.
10864 * @param[in] dev_flow
10865 * Pointer to the mlx5_flow.
10866 * @param[in] rss_desc
10867 * Pointer to the mlx5_flow_rss_desc.
10870 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
10871 struct mlx5_flow_rss_desc *rss_desc)
10873 uint64_t items = dev_flow->handle->layers;
10875 uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
10877 dev_flow->hash_fields = 0;
10878 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
10879 if (rss_desc->level >= 2) {
10880 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
10884 if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
10885 (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
10886 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
10887 if (rss_types & ETH_RSS_L3_SRC_ONLY)
10888 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
10889 else if (rss_types & ETH_RSS_L3_DST_ONLY)
10890 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
10892 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
10894 } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
10895 (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
10896 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
10897 if (rss_types & ETH_RSS_L3_SRC_ONLY)
10898 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
10899 else if (rss_types & ETH_RSS_L3_DST_ONLY)
10900 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
10902 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
10905 if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
10906 (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
10907 if (rss_types & ETH_RSS_UDP) {
10908 if (rss_types & ETH_RSS_L4_SRC_ONLY)
10909 dev_flow->hash_fields |=
10910 IBV_RX_HASH_SRC_PORT_UDP;
10911 else if (rss_types & ETH_RSS_L4_DST_ONLY)
10912 dev_flow->hash_fields |=
10913 IBV_RX_HASH_DST_PORT_UDP;
10915 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
10917 } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
10918 (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
10919 if (rss_types & ETH_RSS_TCP) {
10920 if (rss_types & ETH_RSS_L4_SRC_ONLY)
10921 dev_flow->hash_fields |=
10922 IBV_RX_HASH_SRC_PORT_TCP;
10923 else if (rss_types & ETH_RSS_L4_DST_ONLY)
10924 dev_flow->hash_fields |=
10925 IBV_RX_HASH_DST_PORT_TCP;
10927 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
10933 * Prepare an Rx Hash queue.
10936 * Pointer to Ethernet device.
10937 * @param[in] dev_flow
10938 * Pointer to the mlx5_flow.
10939 * @param[in] rss_desc
10940 * Pointer to the mlx5_flow_rss_desc.
10941 * @param[out] hrxq_idx
10942 * Hash Rx queue index.
10945 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
10947 static struct mlx5_hrxq *
10948 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
10949 struct mlx5_flow *dev_flow,
10950 struct mlx5_flow_rss_desc *rss_desc,
10951 uint32_t *hrxq_idx)
10953 struct mlx5_priv *priv = dev->data->dev_private;
10954 struct mlx5_flow_handle *dh = dev_flow->handle;
10955 struct mlx5_hrxq *hrxq;
10957 MLX5_ASSERT(rss_desc->queue_num);
10958 rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
10959 rss_desc->hash_fields = dev_flow->hash_fields;
10960 rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
10961 rss_desc->shared_rss = 0;
10962 *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
10965 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
10971 * Release sample sub action resource.
10973 * @param[in, out] dev
10974 * Pointer to rte_eth_dev structure.
10975 * @param[in] act_res
10976 * Pointer to sample sub action resource.
10979 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
10980 struct mlx5_flow_sub_actions_idx *act_res)
10982 if (act_res->rix_hrxq) {
10983 mlx5_hrxq_release(dev, act_res->rix_hrxq);
10984 act_res->rix_hrxq = 0;
10986 if (act_res->rix_encap_decap) {
10987 flow_dv_encap_decap_resource_release(dev,
10988 act_res->rix_encap_decap);
10989 act_res->rix_encap_decap = 0;
10991 if (act_res->rix_port_id_action) {
10992 flow_dv_port_id_action_resource_release(dev,
10993 act_res->rix_port_id_action);
10994 act_res->rix_port_id_action = 0;
10996 if (act_res->rix_tag) {
10997 flow_dv_tag_release(dev, act_res->rix_tag);
10998 act_res->rix_tag = 0;
11000 if (act_res->rix_jump) {
11001 flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump);
11002 act_res->rix_jump = 0;
11007 flow_dv_sample_match_cb(void *tool_ctx __rte_unused,
11008 struct mlx5_list_entry *entry, void *cb_ctx)
11010 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11011 struct rte_eth_dev *dev = ctx->dev;
11012 struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
11013 struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
11017 if (ctx_resource->ratio == resource->ratio &&
11018 ctx_resource->ft_type == resource->ft_type &&
11019 ctx_resource->ft_id == resource->ft_id &&
11020 ctx_resource->set_action == resource->set_action &&
11021 !memcmp((void *)&ctx_resource->sample_act,
11022 (void *)&resource->sample_act,
11023 sizeof(struct mlx5_flow_sub_actions_list))) {
11025 * Existing sample action should release the prepared
11026 * sub-actions reference counter.
11028 flow_dv_sample_sub_actions_release(dev,
11029 &ctx_resource->sample_idx);
11035 struct mlx5_list_entry *
11036 flow_dv_sample_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
11038 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11039 struct rte_eth_dev *dev = ctx->dev;
11040 struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
11041 void **sample_dv_actions = ctx_resource->sub_actions;
11042 struct mlx5_flow_dv_sample_resource *resource;
11043 struct mlx5dv_dr_flow_sampler_attr sampler_attr;
11044 struct mlx5_priv *priv = dev->data->dev_private;
11045 struct mlx5_dev_ctx_shared *sh = priv->sh;
11046 struct mlx5_flow_tbl_resource *tbl;
11048 const uint32_t next_ft_step = 1;
11049 uint32_t next_ft_id = ctx_resource->ft_id + next_ft_step;
11050 uint8_t is_egress = 0;
11051 uint8_t is_transfer = 0;
11052 struct rte_flow_error *error = ctx->error;
11054 /* Register new sample resource. */
11055 resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
11057 rte_flow_error_set(error, ENOMEM,
11058 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11060 "cannot allocate resource memory");
11063 *resource = *ctx_resource;
11064 /* Create normal path table level */
11065 if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
11067 else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
11069 tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
11070 is_egress, is_transfer,
11071 true, NULL, 0, 0, 0, error);
11073 rte_flow_error_set(error, ENOMEM,
11074 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11076 "fail to create normal path table "
11080 resource->normal_path_tbl = tbl;
11081 if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
11082 if (!sh->default_miss_action) {
11083 rte_flow_error_set(error, ENOMEM,
11084 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11086 "default miss action was not "
11090 sample_dv_actions[ctx_resource->sample_act.actions_num++] =
11091 sh->default_miss_action;
11093 /* Create a DR sample action */
11094 sampler_attr.sample_ratio = resource->ratio;
11095 sampler_attr.default_next_table = tbl->obj;
11096 sampler_attr.num_sample_actions = ctx_resource->sample_act.actions_num;
11097 sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
11098 &sample_dv_actions[0];
11099 sampler_attr.action = resource->set_action;
11100 if (mlx5_os_flow_dr_create_flow_action_sampler
11101 (&sampler_attr, &resource->verbs_action)) {
11102 rte_flow_error_set(error, ENOMEM,
11103 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11104 NULL, "cannot create sample action");
11107 resource->idx = idx;
11108 resource->dev = dev;
11109 return &resource->entry;
11111 if (resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
11112 flow_dv_sample_sub_actions_release(dev,
11113 &resource->sample_idx);
11114 if (resource->normal_path_tbl)
11115 flow_dv_tbl_resource_release(MLX5_SH(dev),
11116 resource->normal_path_tbl);
11117 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
11122 struct mlx5_list_entry *
11123 flow_dv_sample_clone_cb(void *tool_ctx __rte_unused,
11124 struct mlx5_list_entry *entry __rte_unused,
11127 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11128 struct rte_eth_dev *dev = ctx->dev;
11129 struct mlx5_flow_dv_sample_resource *resource;
11130 struct mlx5_priv *priv = dev->data->dev_private;
11131 struct mlx5_dev_ctx_shared *sh = priv->sh;
11134 resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
11136 rte_flow_error_set(ctx->error, ENOMEM,
11137 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11139 "cannot allocate resource memory");
11142 memcpy(resource, entry, sizeof(*resource));
11143 resource->idx = idx;
11144 resource->dev = dev;
11145 return &resource->entry;
11149 flow_dv_sample_clone_free_cb(void *tool_ctx __rte_unused,
11150 struct mlx5_list_entry *entry)
11152 struct mlx5_flow_dv_sample_resource *resource =
11153 container_of(entry, typeof(*resource), entry);
11154 struct rte_eth_dev *dev = resource->dev;
11155 struct mlx5_priv *priv = dev->data->dev_private;
11157 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
11161 * Find existing sample resource or create and register a new one.
11163 * @param[in, out] dev
11164 * Pointer to rte_eth_dev structure.
11166 * Pointer to sample resource reference.
11167 * @parm[in, out] dev_flow
11168 * Pointer to the dev_flow.
11169 * @param[out] error
11170 * pointer to error structure.
11173 * 0 on success otherwise -errno and errno is set.
11176 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
11177 struct mlx5_flow_dv_sample_resource *ref,
11178 struct mlx5_flow *dev_flow,
11179 struct rte_flow_error *error)
11181 struct mlx5_flow_dv_sample_resource *resource;
11182 struct mlx5_list_entry *entry;
11183 struct mlx5_priv *priv = dev->data->dev_private;
11184 struct mlx5_flow_cb_ctx ctx = {
11190 entry = mlx5_list_register(priv->sh->sample_action_list, &ctx);
11193 resource = container_of(entry, typeof(*resource), entry);
11194 dev_flow->handle->dvh.rix_sample = resource->idx;
11195 dev_flow->dv.sample_res = resource;
11200 flow_dv_dest_array_match_cb(void *tool_ctx __rte_unused,
11201 struct mlx5_list_entry *entry, void *cb_ctx)
11203 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11204 struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
11205 struct rte_eth_dev *dev = ctx->dev;
11206 struct mlx5_flow_dv_dest_array_resource *resource =
11207 container_of(entry, typeof(*resource), entry);
11210 if (ctx_resource->num_of_dest == resource->num_of_dest &&
11211 ctx_resource->ft_type == resource->ft_type &&
11212 !memcmp((void *)resource->sample_act,
11213 (void *)ctx_resource->sample_act,
11214 (ctx_resource->num_of_dest *
11215 sizeof(struct mlx5_flow_sub_actions_list)))) {
11217 * Existing sample action should release the prepared
11218 * sub-actions reference counter.
11220 for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
11221 flow_dv_sample_sub_actions_release(dev,
11222 &ctx_resource->sample_idx[idx]);
11228 struct mlx5_list_entry *
11229 flow_dv_dest_array_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
11231 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11232 struct rte_eth_dev *dev = ctx->dev;
11233 struct mlx5_flow_dv_dest_array_resource *resource;
11234 struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
11235 struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
11236 struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
11237 struct mlx5_priv *priv = dev->data->dev_private;
11238 struct mlx5_dev_ctx_shared *sh = priv->sh;
11239 struct mlx5_flow_sub_actions_list *sample_act;
11240 struct mlx5dv_dr_domain *domain;
11241 uint32_t idx = 0, res_idx = 0;
11242 struct rte_flow_error *error = ctx->error;
11243 uint64_t action_flags;
11246 /* Register new destination array resource. */
11247 resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11250 rte_flow_error_set(error, ENOMEM,
11251 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11253 "cannot allocate resource memory");
11256 *resource = *ctx_resource;
11257 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
11258 domain = sh->fdb_domain;
11259 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
11260 domain = sh->rx_domain;
11262 domain = sh->tx_domain;
11263 for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
11264 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
11265 mlx5_malloc(MLX5_MEM_ZERO,
11266 sizeof(struct mlx5dv_dr_action_dest_attr),
11268 if (!dest_attr[idx]) {
11269 rte_flow_error_set(error, ENOMEM,
11270 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11272 "cannot allocate resource memory");
11275 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
11276 sample_act = &ctx_resource->sample_act[idx];
11277 action_flags = sample_act->action_flags;
11278 switch (action_flags) {
11279 case MLX5_FLOW_ACTION_QUEUE:
11280 dest_attr[idx]->dest = sample_act->dr_queue_action;
11282 case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP):
11283 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
11284 dest_attr[idx]->dest_reformat = &dest_reformat[idx];
11285 dest_attr[idx]->dest_reformat->reformat =
11286 sample_act->dr_encap_action;
11287 dest_attr[idx]->dest_reformat->dest =
11288 sample_act->dr_port_id_action;
11290 case MLX5_FLOW_ACTION_PORT_ID:
11291 dest_attr[idx]->dest = sample_act->dr_port_id_action;
11293 case MLX5_FLOW_ACTION_JUMP:
11294 dest_attr[idx]->dest = sample_act->dr_jump_action;
11297 rte_flow_error_set(error, EINVAL,
11298 RTE_FLOW_ERROR_TYPE_ACTION,
11300 "unsupported actions type");
11304 /* create a dest array actioin */
11305 ret = mlx5_os_flow_dr_create_flow_action_dest_array
11307 resource->num_of_dest,
11309 &resource->action);
11311 rte_flow_error_set(error, ENOMEM,
11312 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11314 "cannot create destination array action");
11317 resource->idx = res_idx;
11318 resource->dev = dev;
11319 for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
11320 mlx5_free(dest_attr[idx]);
11321 return &resource->entry;
11323 for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
11324 flow_dv_sample_sub_actions_release(dev,
11325 &resource->sample_idx[idx]);
11326 if (dest_attr[idx])
11327 mlx5_free(dest_attr[idx]);
11329 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
11333 struct mlx5_list_entry *
11334 flow_dv_dest_array_clone_cb(void *tool_ctx __rte_unused,
11335 struct mlx5_list_entry *entry __rte_unused,
11338 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11339 struct rte_eth_dev *dev = ctx->dev;
11340 struct mlx5_flow_dv_dest_array_resource *resource;
11341 struct mlx5_priv *priv = dev->data->dev_private;
11342 struct mlx5_dev_ctx_shared *sh = priv->sh;
11343 uint32_t res_idx = 0;
11344 struct rte_flow_error *error = ctx->error;
11346 resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11349 rte_flow_error_set(error, ENOMEM,
11350 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11352 "cannot allocate dest-array memory");
11355 memcpy(resource, entry, sizeof(*resource));
11356 resource->idx = res_idx;
11357 resource->dev = dev;
11358 return &resource->entry;
11362 flow_dv_dest_array_clone_free_cb(void *tool_ctx __rte_unused,
11363 struct mlx5_list_entry *entry)
11365 struct mlx5_flow_dv_dest_array_resource *resource =
11366 container_of(entry, typeof(*resource), entry);
11367 struct rte_eth_dev *dev = resource->dev;
11368 struct mlx5_priv *priv = dev->data->dev_private;
11370 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
11374 * Find existing destination array resource or create and register a new one.
11376 * @param[in, out] dev
11377 * Pointer to rte_eth_dev structure.
11379 * Pointer to destination array resource reference.
11380 * @parm[in, out] dev_flow
11381 * Pointer to the dev_flow.
11382 * @param[out] error
11383 * pointer to error structure.
11386 * 0 on success otherwise -errno and errno is set.
11389 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
11390 struct mlx5_flow_dv_dest_array_resource *ref,
11391 struct mlx5_flow *dev_flow,
11392 struct rte_flow_error *error)
11394 struct mlx5_flow_dv_dest_array_resource *resource;
11395 struct mlx5_priv *priv = dev->data->dev_private;
11396 struct mlx5_list_entry *entry;
11397 struct mlx5_flow_cb_ctx ctx = {
11403 entry = mlx5_list_register(priv->sh->dest_array_list, &ctx);
11406 resource = container_of(entry, typeof(*resource), entry);
11407 dev_flow->handle->dvh.rix_dest_array = resource->idx;
11408 dev_flow->dv.dest_array_res = resource;
11413 * Convert Sample action to DV specification.
11416 * Pointer to rte_eth_dev structure.
11417 * @param[in] action
11418 * Pointer to sample action structure.
11419 * @param[in, out] dev_flow
11420 * Pointer to the mlx5_flow.
11422 * Pointer to the flow attributes.
11423 * @param[in, out] num_of_dest
11424 * Pointer to the num of destination.
11425 * @param[in, out] sample_actions
11426 * Pointer to sample actions list.
11427 * @param[in, out] res
11428 * Pointer to sample resource.
11429 * @param[out] error
11430 * Pointer to the error structure.
11433 * 0 on success, a negative errno value otherwise and rte_errno is set.
11436 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
11437 const struct rte_flow_action_sample *action,
11438 struct mlx5_flow *dev_flow,
11439 const struct rte_flow_attr *attr,
11440 uint32_t *num_of_dest,
11441 void **sample_actions,
11442 struct mlx5_flow_dv_sample_resource *res,
11443 struct rte_flow_error *error)
11445 struct mlx5_priv *priv = dev->data->dev_private;
11446 const struct rte_flow_action *sub_actions;
11447 struct mlx5_flow_sub_actions_list *sample_act;
11448 struct mlx5_flow_sub_actions_idx *sample_idx;
11449 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11450 struct rte_flow *flow = dev_flow->flow;
11451 struct mlx5_flow_rss_desc *rss_desc;
11452 uint64_t action_flags = 0;
11455 rss_desc = &wks->rss_desc;
11456 sample_act = &res->sample_act;
11457 sample_idx = &res->sample_idx;
11458 res->ratio = action->ratio;
11459 sub_actions = action->actions;
11460 for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
11461 int type = sub_actions->type;
11462 uint32_t pre_rix = 0;
11465 case RTE_FLOW_ACTION_TYPE_QUEUE:
11467 const struct rte_flow_action_queue *queue;
11468 struct mlx5_hrxq *hrxq;
11471 queue = sub_actions->conf;
11472 rss_desc->queue_num = 1;
11473 rss_desc->queue[0] = queue->index;
11474 hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11475 rss_desc, &hrxq_idx);
11477 return rte_flow_error_set
11479 RTE_FLOW_ERROR_TYPE_ACTION,
11481 "cannot create fate queue");
11482 sample_act->dr_queue_action = hrxq->action;
11483 sample_idx->rix_hrxq = hrxq_idx;
11484 sample_actions[sample_act->actions_num++] =
11487 action_flags |= MLX5_FLOW_ACTION_QUEUE;
11488 if (action_flags & MLX5_FLOW_ACTION_MARK)
11489 dev_flow->handle->rix_hrxq = hrxq_idx;
11490 dev_flow->handle->fate_action =
11491 MLX5_FLOW_FATE_QUEUE;
11494 case RTE_FLOW_ACTION_TYPE_RSS:
11496 struct mlx5_hrxq *hrxq;
11498 const struct rte_flow_action_rss *rss;
11499 const uint8_t *rss_key;
11501 rss = sub_actions->conf;
11502 memcpy(rss_desc->queue, rss->queue,
11503 rss->queue_num * sizeof(uint16_t));
11504 rss_desc->queue_num = rss->queue_num;
11505 /* NULL RSS key indicates default RSS key. */
11506 rss_key = !rss->key ? rss_hash_default_key : rss->key;
11507 memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11509 * rss->level and rss.types should be set in advance
11510 * when expanding items for RSS.
11512 flow_dv_hashfields_set(dev_flow, rss_desc);
11513 hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11514 rss_desc, &hrxq_idx);
11516 return rte_flow_error_set
11518 RTE_FLOW_ERROR_TYPE_ACTION,
11520 "cannot create fate queue");
11521 sample_act->dr_queue_action = hrxq->action;
11522 sample_idx->rix_hrxq = hrxq_idx;
11523 sample_actions[sample_act->actions_num++] =
11526 action_flags |= MLX5_FLOW_ACTION_RSS;
11527 if (action_flags & MLX5_FLOW_ACTION_MARK)
11528 dev_flow->handle->rix_hrxq = hrxq_idx;
11529 dev_flow->handle->fate_action =
11530 MLX5_FLOW_FATE_QUEUE;
11533 case RTE_FLOW_ACTION_TYPE_MARK:
11535 uint32_t tag_be = mlx5_flow_mark_set
11536 (((const struct rte_flow_action_mark *)
11537 (sub_actions->conf))->id);
11539 dev_flow->handle->mark = 1;
11540 pre_rix = dev_flow->handle->dvh.rix_tag;
11541 /* Save the mark resource before sample */
11542 pre_r = dev_flow->dv.tag_resource;
11543 if (flow_dv_tag_resource_register(dev, tag_be,
11546 MLX5_ASSERT(dev_flow->dv.tag_resource);
11547 sample_act->dr_tag_action =
11548 dev_flow->dv.tag_resource->action;
11549 sample_idx->rix_tag =
11550 dev_flow->handle->dvh.rix_tag;
11551 sample_actions[sample_act->actions_num++] =
11552 sample_act->dr_tag_action;
11553 /* Recover the mark resource after sample */
11554 dev_flow->dv.tag_resource = pre_r;
11555 dev_flow->handle->dvh.rix_tag = pre_rix;
11556 action_flags |= MLX5_FLOW_ACTION_MARK;
11559 case RTE_FLOW_ACTION_TYPE_COUNT:
11561 if (!flow->counter) {
11563 flow_dv_translate_create_counter(dev,
11564 dev_flow, sub_actions->conf,
11566 if (!flow->counter)
11567 return rte_flow_error_set
11569 RTE_FLOW_ERROR_TYPE_ACTION,
11571 "cannot create counter"
11574 sample_act->dr_cnt_action =
11575 (flow_dv_counter_get_by_idx(dev,
11576 flow->counter, NULL))->action;
11577 sample_actions[sample_act->actions_num++] =
11578 sample_act->dr_cnt_action;
11579 action_flags |= MLX5_FLOW_ACTION_COUNT;
11582 case RTE_FLOW_ACTION_TYPE_PORT_ID:
11584 struct mlx5_flow_dv_port_id_action_resource
11586 uint32_t port_id = 0;
11588 memset(&port_id_resource, 0, sizeof(port_id_resource));
11589 /* Save the port id resource before sample */
11590 pre_rix = dev_flow->handle->rix_port_id_action;
11591 pre_r = dev_flow->dv.port_id_action;
11592 if (flow_dv_translate_action_port_id(dev, sub_actions,
11595 port_id_resource.port_id = port_id;
11596 if (flow_dv_port_id_action_resource_register
11597 (dev, &port_id_resource, dev_flow, error))
11599 sample_act->dr_port_id_action =
11600 dev_flow->dv.port_id_action->action;
11601 sample_idx->rix_port_id_action =
11602 dev_flow->handle->rix_port_id_action;
11603 sample_actions[sample_act->actions_num++] =
11604 sample_act->dr_port_id_action;
11605 /* Recover the port id resource after sample */
11606 dev_flow->dv.port_id_action = pre_r;
11607 dev_flow->handle->rix_port_id_action = pre_rix;
11609 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
11612 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11613 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11614 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11615 /* Save the encap resource before sample */
11616 pre_rix = dev_flow->handle->dvh.rix_encap_decap;
11617 pre_r = dev_flow->dv.encap_decap;
11618 if (flow_dv_create_action_l2_encap(dev, sub_actions,
11623 sample_act->dr_encap_action =
11624 dev_flow->dv.encap_decap->action;
11625 sample_idx->rix_encap_decap =
11626 dev_flow->handle->dvh.rix_encap_decap;
11627 sample_actions[sample_act->actions_num++] =
11628 sample_act->dr_encap_action;
11629 /* Recover the encap resource after sample */
11630 dev_flow->dv.encap_decap = pre_r;
11631 dev_flow->handle->dvh.rix_encap_decap = pre_rix;
11632 action_flags |= MLX5_FLOW_ACTION_ENCAP;
11635 return rte_flow_error_set(error, EINVAL,
11636 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11638 "Not support for sampler action");
11641 sample_act->action_flags = action_flags;
11642 res->ft_id = dev_flow->dv.group;
11643 if (attr->transfer) {
11645 uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
11646 uint64_t set_action;
11647 } action_ctx = { .set_action = 0 };
11649 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
11650 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
11651 MLX5_MODIFICATION_TYPE_SET);
11652 MLX5_SET(set_action_in, action_ctx.action_in, field,
11653 MLX5_MODI_META_REG_C_0);
11654 MLX5_SET(set_action_in, action_ctx.action_in, data,
11655 priv->vport_meta_tag);
11656 res->set_action = action_ctx.set_action;
11657 } else if (attr->ingress) {
11658 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
11660 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
11666 * Convert Sample action to DV specification.
11669 * Pointer to rte_eth_dev structure.
11670 * @param[in, out] dev_flow
11671 * Pointer to the mlx5_flow.
11672 * @param[in] num_of_dest
11673 * The num of destination.
11674 * @param[in, out] res
11675 * Pointer to sample resource.
11676 * @param[in, out] mdest_res
11677 * Pointer to destination array resource.
11678 * @param[in] sample_actions
11679 * Pointer to sample path actions list.
11680 * @param[in] action_flags
11681 * Holds the actions detected until now.
11682 * @param[out] error
11683 * Pointer to the error structure.
11686 * 0 on success, a negative errno value otherwise and rte_errno is set.
11689 flow_dv_create_action_sample(struct rte_eth_dev *dev,
11690 struct mlx5_flow *dev_flow,
11691 uint32_t num_of_dest,
11692 struct mlx5_flow_dv_sample_resource *res,
11693 struct mlx5_flow_dv_dest_array_resource *mdest_res,
11694 void **sample_actions,
11695 uint64_t action_flags,
11696 struct rte_flow_error *error)
11698 /* update normal path action resource into last index of array */
11699 uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
11700 struct mlx5_flow_sub_actions_list *sample_act =
11701 &mdest_res->sample_act[dest_index];
11702 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11703 struct mlx5_flow_rss_desc *rss_desc;
11704 uint32_t normal_idx = 0;
11705 struct mlx5_hrxq *hrxq;
11709 rss_desc = &wks->rss_desc;
11710 if (num_of_dest > 1) {
11711 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
11712 /* Handle QP action for mirroring */
11713 hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11714 rss_desc, &hrxq_idx);
11716 return rte_flow_error_set
11718 RTE_FLOW_ERROR_TYPE_ACTION,
11720 "cannot create rx queue");
11722 mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
11723 sample_act->dr_queue_action = hrxq->action;
11724 if (action_flags & MLX5_FLOW_ACTION_MARK)
11725 dev_flow->handle->rix_hrxq = hrxq_idx;
11726 dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
11728 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
11730 mdest_res->sample_idx[dest_index].rix_encap_decap =
11731 dev_flow->handle->dvh.rix_encap_decap;
11732 sample_act->dr_encap_action =
11733 dev_flow->dv.encap_decap->action;
11734 dev_flow->handle->dvh.rix_encap_decap = 0;
11736 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
11738 mdest_res->sample_idx[dest_index].rix_port_id_action =
11739 dev_flow->handle->rix_port_id_action;
11740 sample_act->dr_port_id_action =
11741 dev_flow->dv.port_id_action->action;
11742 dev_flow->handle->rix_port_id_action = 0;
11744 if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
11746 mdest_res->sample_idx[dest_index].rix_jump =
11747 dev_flow->handle->rix_jump;
11748 sample_act->dr_jump_action =
11749 dev_flow->dv.jump->action;
11750 dev_flow->handle->rix_jump = 0;
11752 sample_act->actions_num = normal_idx;
11753 /* update sample action resource into first index of array */
11754 mdest_res->ft_type = res->ft_type;
11755 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
11756 sizeof(struct mlx5_flow_sub_actions_idx));
11757 memcpy(&mdest_res->sample_act[0], &res->sample_act,
11758 sizeof(struct mlx5_flow_sub_actions_list));
11759 mdest_res->num_of_dest = num_of_dest;
11760 if (flow_dv_dest_array_resource_register(dev, mdest_res,
11762 return rte_flow_error_set(error, EINVAL,
11763 RTE_FLOW_ERROR_TYPE_ACTION,
11764 NULL, "can't create sample "
11767 res->sub_actions = sample_actions;
11768 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
11769 return rte_flow_error_set(error, EINVAL,
11770 RTE_FLOW_ERROR_TYPE_ACTION,
11772 "can't create sample action");
11778 * Remove an ASO age action from age actions list.
11781 * Pointer to the Ethernet device structure.
11783 * Pointer to the aso age action handler.
11786 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
11787 struct mlx5_aso_age_action *age)
11789 struct mlx5_age_info *age_info;
11790 struct mlx5_age_param *age_param = &age->age_params;
11791 struct mlx5_priv *priv = dev->data->dev_private;
11792 uint16_t expected = AGE_CANDIDATE;
11794 age_info = GET_PORT_AGE_INFO(priv);
11795 if (!__atomic_compare_exchange_n(&age_param->state, &expected,
11796 AGE_FREE, false, __ATOMIC_RELAXED,
11797 __ATOMIC_RELAXED)) {
11799 * We need the lock even it is age timeout,
11800 * since age action may still in process.
11802 rte_spinlock_lock(&age_info->aged_sl);
11803 LIST_REMOVE(age, next);
11804 rte_spinlock_unlock(&age_info->aged_sl);
11805 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
11810 * Release an ASO age action.
11813 * Pointer to the Ethernet device structure.
11814 * @param[in] age_idx
11815 * Index of ASO age action to release.
11817 * True if the release operation is during flow destroy operation.
11818 * False if the release operation is during action destroy operation.
11821 * 0 when age action was removed, otherwise the number of references.
11824 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
11826 struct mlx5_priv *priv = dev->data->dev_private;
11827 struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11828 struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
11829 uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
11832 flow_dv_aso_age_remove_from_age(dev, age);
11833 rte_spinlock_lock(&mng->free_sl);
11834 LIST_INSERT_HEAD(&mng->free, age, next);
11835 rte_spinlock_unlock(&mng->free_sl);
11841 * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
11844 * Pointer to the Ethernet device structure.
11847 * 0 on success, otherwise negative errno value and rte_errno is set.
11850 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
11852 struct mlx5_priv *priv = dev->data->dev_private;
11853 struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11854 void *old_pools = mng->pools;
11855 uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
11856 uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
11857 void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
11860 rte_errno = ENOMEM;
11864 memcpy(pools, old_pools,
11865 mng->n * sizeof(struct mlx5_flow_counter_pool *));
11866 mlx5_free(old_pools);
11868 /* First ASO flow hit allocation - starting ASO data-path. */
11869 int ret = mlx5_aso_flow_hit_queue_poll_start(priv->sh);
11877 mng->pools = pools;
11882 * Create and initialize a new ASO aging pool.
11885 * Pointer to the Ethernet device structure.
11886 * @param[out] age_free
11887 * Where to put the pointer of a new age action.
11890 * The age actions pool pointer and @p age_free is set on success,
11891 * NULL otherwise and rte_errno is set.
11893 static struct mlx5_aso_age_pool *
11894 flow_dv_age_pool_create(struct rte_eth_dev *dev,
11895 struct mlx5_aso_age_action **age_free)
11897 struct mlx5_priv *priv = dev->data->dev_private;
11898 struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11899 struct mlx5_aso_age_pool *pool = NULL;
11900 struct mlx5_devx_obj *obj = NULL;
11903 obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,
11906 rte_errno = ENODATA;
11907 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
11910 pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
11912 claim_zero(mlx5_devx_cmd_destroy(obj));
11913 rte_errno = ENOMEM;
11916 pool->flow_hit_aso_obj = obj;
11917 pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
11918 rte_spinlock_lock(&mng->resize_sl);
11919 pool->index = mng->next;
11920 /* Resize pools array if there is no room for the new pool in it. */
11921 if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
11922 claim_zero(mlx5_devx_cmd_destroy(obj));
11924 rte_spinlock_unlock(&mng->resize_sl);
11927 mng->pools[pool->index] = pool;
11929 rte_spinlock_unlock(&mng->resize_sl);
11930 /* Assign the first action in the new pool, the rest go to free list. */
11931 *age_free = &pool->actions[0];
11932 for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
11933 pool->actions[i].offset = i;
11934 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
11940 * Allocate a ASO aging bit.
11943 * Pointer to the Ethernet device structure.
11944 * @param[out] error
11945 * Pointer to the error structure.
11948 * Index to ASO age action on success, 0 otherwise and rte_errno is set.
11951 flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
11953 struct mlx5_priv *priv = dev->data->dev_private;
11954 const struct mlx5_aso_age_pool *pool;
11955 struct mlx5_aso_age_action *age_free = NULL;
11956 struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11959 /* Try to get the next free age action bit. */
11960 rte_spinlock_lock(&mng->free_sl);
11961 age_free = LIST_FIRST(&mng->free);
11963 LIST_REMOVE(age_free, next);
11964 } else if (!flow_dv_age_pool_create(dev, &age_free)) {
11965 rte_spinlock_unlock(&mng->free_sl);
11966 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
11967 NULL, "failed to create ASO age pool");
11968 return 0; /* 0 is an error. */
11970 rte_spinlock_unlock(&mng->free_sl);
11971 pool = container_of
11972 ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
11973 (age_free - age_free->offset), const struct mlx5_aso_age_pool,
11975 if (!age_free->dr_action) {
11976 int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
11980 rte_flow_error_set(error, rte_errno,
11981 RTE_FLOW_ERROR_TYPE_ACTION,
11982 NULL, "failed to get reg_c "
11983 "for ASO flow hit");
11984 return 0; /* 0 is an error. */
11986 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
11987 age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
11988 (priv->sh->rx_domain,
11989 pool->flow_hit_aso_obj->obj, age_free->offset,
11990 MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
11991 (reg_c - REG_C_0));
11992 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
11993 if (!age_free->dr_action) {
11995 rte_spinlock_lock(&mng->free_sl);
11996 LIST_INSERT_HEAD(&mng->free, age_free, next);
11997 rte_spinlock_unlock(&mng->free_sl);
11998 rte_flow_error_set(error, rte_errno,
11999 RTE_FLOW_ERROR_TYPE_ACTION,
12000 NULL, "failed to create ASO "
12001 "flow hit action");
12002 return 0; /* 0 is an error. */
12005 __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
12006 return pool->index | ((age_free->offset + 1) << 16);
12010 * Initialize flow ASO age parameters.
12013 * Pointer to rte_eth_dev structure.
12014 * @param[in] age_idx
12015 * Index of ASO age action.
12016 * @param[in] context
12017 * Pointer to flow counter age context.
12018 * @param[in] timeout
12019 * Aging timeout in seconds.
12023 flow_dv_aso_age_params_init(struct rte_eth_dev *dev,
12028 struct mlx5_aso_age_action *aso_age;
12030 aso_age = flow_aso_age_get_by_idx(dev, age_idx);
12031 MLX5_ASSERT(aso_age);
12032 aso_age->age_params.context = context;
12033 aso_age->age_params.timeout = timeout;
12034 aso_age->age_params.port_id = dev->data->port_id;
12035 __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
12037 __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
12042 flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask,
12043 const struct rte_flow_item_integrity *value,
12044 void *headers_m, void *headers_v)
12047 /* application l4_ok filter aggregates all hardware l4 filters
12048 * therefore hw l4_checksum_ok must be implicitly added here.
12050 struct rte_flow_item_integrity local_item;
12052 local_item.l4_csum_ok = 1;
12053 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
12054 local_item.l4_csum_ok);
12055 if (value->l4_ok) {
12056 /* application l4_ok = 1 matches sets both hw flags
12057 * l4_ok and l4_checksum_ok flags to 1.
12059 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12060 l4_checksum_ok, local_item.l4_csum_ok);
12061 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok,
12063 MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok,
12066 /* application l4_ok = 0 matches on hw flag
12067 * l4_checksum_ok = 0 only.
12069 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12070 l4_checksum_ok, 0);
12072 } else if (mask->l4_csum_ok) {
12073 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
12075 MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
12076 value->l4_csum_ok);
12081 flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask,
12082 const struct rte_flow_item_integrity *value,
12083 void *headers_m, void *headers_v,
12087 /* application l3_ok filter aggregates all hardware l3 filters
12088 * therefore hw ipv4_checksum_ok must be implicitly added here.
12090 struct rte_flow_item_integrity local_item;
12092 local_item.ipv4_csum_ok = !!is_ipv4;
12093 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
12094 local_item.ipv4_csum_ok);
12095 if (value->l3_ok) {
12096 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12097 ipv4_checksum_ok, local_item.ipv4_csum_ok);
12098 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok,
12100 MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok,
12103 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12104 ipv4_checksum_ok, 0);
12106 } else if (mask->ipv4_csum_ok) {
12107 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
12108 mask->ipv4_csum_ok);
12109 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
12110 value->ipv4_csum_ok);
12115 flow_dv_translate_item_integrity(void *matcher, void *key,
12116 const struct rte_flow_item *head_item,
12117 const struct rte_flow_item *integrity_item)
12119 const struct rte_flow_item_integrity *mask = integrity_item->mask;
12120 const struct rte_flow_item_integrity *value = integrity_item->spec;
12121 const struct rte_flow_item *tunnel_item, *end_item, *item;
12124 uint32_t l3_protocol;
12129 mask = &rte_flow_item_integrity_mask;
12130 if (value->level > 1) {
12131 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
12133 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
12135 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
12137 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
12139 tunnel_item = mlx5_flow_find_tunnel_item(head_item);
12140 if (value->level > 1) {
12141 /* tunnel item was verified during the item validation */
12142 item = tunnel_item;
12143 end_item = mlx5_find_end_item(tunnel_item);
12146 end_item = tunnel_item ? tunnel_item :
12147 mlx5_find_end_item(integrity_item);
12149 l3_protocol = mask->l3_ok ?
12150 mlx5_flow_locate_proto_l3(&item, end_item) : 0;
12151 flow_dv_translate_integrity_l3(mask, value, headers_m, headers_v,
12152 l3_protocol == RTE_ETHER_TYPE_IPV4);
12153 flow_dv_translate_integrity_l4(mask, value, headers_m, headers_v);
12157 * Prepares DV flow counter with aging configuration.
12158 * Gets it by index when exists, creates a new one when doesn't.
12161 * Pointer to rte_eth_dev structure.
12162 * @param[in] dev_flow
12163 * Pointer to the mlx5_flow.
12164 * @param[in, out] flow
12165 * Pointer to the sub flow.
12167 * Pointer to the counter action configuration.
12169 * Pointer to the aging action configuration.
12170 * @param[out] error
12171 * Pointer to the error structure.
12174 * Pointer to the counter, NULL otherwise.
12176 static struct mlx5_flow_counter *
12177 flow_dv_prepare_counter(struct rte_eth_dev *dev,
12178 struct mlx5_flow *dev_flow,
12179 struct rte_flow *flow,
12180 const struct rte_flow_action_count *count,
12181 const struct rte_flow_action_age *age,
12182 struct rte_flow_error *error)
12184 if (!flow->counter) {
12185 flow->counter = flow_dv_translate_create_counter(dev, dev_flow,
12187 if (!flow->counter) {
12188 rte_flow_error_set(error, rte_errno,
12189 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12190 "cannot create counter object.");
12194 return flow_dv_counter_get_by_idx(dev, flow->counter, NULL);
12198 * Release an ASO CT action by its own device.
12201 * Pointer to the Ethernet device structure.
12203 * Index of ASO CT action to release.
12206 * 0 when CT action was removed, otherwise the number of references.
12209 flow_dv_aso_ct_dev_release(struct rte_eth_dev *dev, uint32_t idx)
12211 struct mlx5_priv *priv = dev->data->dev_private;
12212 struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12214 struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12215 enum mlx5_aso_ct_state state =
12216 __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
12218 /* Cannot release when CT is in the ASO SQ. */
12219 if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)
12221 ret = __atomic_sub_fetch(&ct->refcnt, 1, __ATOMIC_RELAXED);
12223 if (ct->dr_action_orig) {
12224 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12225 claim_zero(mlx5_glue->destroy_flow_action
12226 (ct->dr_action_orig));
12228 ct->dr_action_orig = NULL;
12230 if (ct->dr_action_rply) {
12231 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12232 claim_zero(mlx5_glue->destroy_flow_action
12233 (ct->dr_action_rply));
12235 ct->dr_action_rply = NULL;
12237 /* Clear the state to free, no need in 1st allocation. */
12238 MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_FREE);
12239 rte_spinlock_lock(&mng->ct_sl);
12240 LIST_INSERT_HEAD(&mng->free_cts, ct, next);
12241 rte_spinlock_unlock(&mng->ct_sl);
12247 flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t own_idx)
12249 uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
12250 uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
12251 struct rte_eth_dev *owndev = &rte_eth_devices[owner];
12254 MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
12255 if (dev->data->dev_started != 1)
12257 return flow_dv_aso_ct_dev_release(owndev, idx);
12261 * Resize the ASO CT pools array by 64 pools.
12264 * Pointer to the Ethernet device structure.
12267 * 0 on success, otherwise negative errno value and rte_errno is set.
12270 flow_dv_aso_ct_pools_resize(struct rte_eth_dev *dev)
12272 struct mlx5_priv *priv = dev->data->dev_private;
12273 struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12274 void *old_pools = mng->pools;
12275 /* Magic number now, need a macro. */
12276 uint32_t resize = mng->n + 64;
12277 uint32_t mem_size = sizeof(struct mlx5_aso_ct_pool *) * resize;
12278 void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
12281 rte_errno = ENOMEM;
12284 rte_rwlock_write_lock(&mng->resize_rwl);
12285 /* ASO SQ/QP was already initialized in the startup. */
12287 /* Realloc could be an alternative choice. */
12288 rte_memcpy(pools, old_pools,
12289 mng->n * sizeof(struct mlx5_aso_ct_pool *));
12290 mlx5_free(old_pools);
12293 mng->pools = pools;
12294 rte_rwlock_write_unlock(&mng->resize_rwl);
12299 * Create and initialize a new ASO CT pool.
12302 * Pointer to the Ethernet device structure.
12303 * @param[out] ct_free
12304 * Where to put the pointer of a new CT action.
12307 * The CT actions pool pointer and @p ct_free is set on success,
12308 * NULL otherwise and rte_errno is set.
12310 static struct mlx5_aso_ct_pool *
12311 flow_dv_ct_pool_create(struct rte_eth_dev *dev,
12312 struct mlx5_aso_ct_action **ct_free)
12314 struct mlx5_priv *priv = dev->data->dev_private;
12315 struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12316 struct mlx5_aso_ct_pool *pool = NULL;
12317 struct mlx5_devx_obj *obj = NULL;
12319 uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);
12321 obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->ctx,
12322 priv->sh->pdn, log_obj_size);
12324 rte_errno = ENODATA;
12325 DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
12328 pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
12330 rte_errno = ENOMEM;
12331 claim_zero(mlx5_devx_cmd_destroy(obj));
12334 pool->devx_obj = obj;
12335 pool->index = mng->next;
12336 /* Resize pools array if there is no room for the new pool in it. */
12337 if (pool->index == mng->n && flow_dv_aso_ct_pools_resize(dev)) {
12338 claim_zero(mlx5_devx_cmd_destroy(obj));
12342 mng->pools[pool->index] = pool;
12344 /* Assign the first action in the new pool, the rest go to free list. */
12345 *ct_free = &pool->actions[0];
12346 /* Lock outside, the list operation is safe here. */
12347 for (i = 1; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
12348 /* refcnt is 0 when allocating the memory. */
12349 pool->actions[i].offset = i;
12350 LIST_INSERT_HEAD(&mng->free_cts, &pool->actions[i], next);
12356 * Allocate a ASO CT action from free list.
12359 * Pointer to the Ethernet device structure.
12360 * @param[out] error
12361 * Pointer to the error structure.
12364 * Index to ASO CT action on success, 0 otherwise and rte_errno is set.
12367 flow_dv_aso_ct_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
12369 struct mlx5_priv *priv = dev->data->dev_private;
12370 struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12371 struct mlx5_aso_ct_action *ct = NULL;
12372 struct mlx5_aso_ct_pool *pool;
12377 if (!priv->config.devx) {
12378 rte_errno = ENOTSUP;
12381 /* Get a free CT action, if no, a new pool will be created. */
12382 rte_spinlock_lock(&mng->ct_sl);
12383 ct = LIST_FIRST(&mng->free_cts);
12385 LIST_REMOVE(ct, next);
12386 } else if (!flow_dv_ct_pool_create(dev, &ct)) {
12387 rte_spinlock_unlock(&mng->ct_sl);
12388 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
12389 NULL, "failed to create ASO CT pool");
12392 rte_spinlock_unlock(&mng->ct_sl);
12393 pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
12394 ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);
12395 /* 0: inactive, 1: created, 2+: used by flows. */
12396 __atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);
12397 reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);
12398 if (!ct->dr_action_orig) {
12399 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12400 ct->dr_action_orig = mlx5_glue->dv_create_flow_action_aso
12401 (priv->sh->rx_domain, pool->devx_obj->obj,
12403 MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_INITIATOR,
12406 RTE_SET_USED(reg_c);
12408 if (!ct->dr_action_orig) {
12409 flow_dv_aso_ct_dev_release(dev, ct_idx);
12410 rte_flow_error_set(error, rte_errno,
12411 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12412 "failed to create ASO CT action");
12416 if (!ct->dr_action_rply) {
12417 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12418 ct->dr_action_rply = mlx5_glue->dv_create_flow_action_aso
12419 (priv->sh->rx_domain, pool->devx_obj->obj,
12421 MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_RESPONDER,
12424 if (!ct->dr_action_rply) {
12425 flow_dv_aso_ct_dev_release(dev, ct_idx);
12426 rte_flow_error_set(error, rte_errno,
12427 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12428 "failed to create ASO CT action");
12436 * Create a conntrack object with context and actions by using ASO mechanism.
12439 * Pointer to rte_eth_dev structure.
12441 * Pointer to conntrack information profile.
12442 * @param[out] error
12443 * Pointer to the error structure.
12446 * Index to conntrack object on success, 0 otherwise.
12449 flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,
12450 const struct rte_flow_action_conntrack *pro,
12451 struct rte_flow_error *error)
12453 struct mlx5_priv *priv = dev->data->dev_private;
12454 struct mlx5_dev_ctx_shared *sh = priv->sh;
12455 struct mlx5_aso_ct_action *ct;
12458 if (!sh->ct_aso_en)
12459 return rte_flow_error_set(error, ENOTSUP,
12460 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12461 "Connection is not supported");
12462 idx = flow_dv_aso_ct_alloc(dev, error);
12464 return rte_flow_error_set(error, rte_errno,
12465 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12466 "Failed to allocate CT object");
12467 ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12468 if (mlx5_aso_ct_update_by_wqe(sh, ct, pro))
12469 return rte_flow_error_set(error, EBUSY,
12470 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12471 "Failed to update CT");
12472 ct->is_original = !!pro->is_original_dir;
12473 ct->peer = pro->peer_port;
12478 * Fill the flow with DV spec, lock free
12479 * (mutex should be acquired by caller).
12482 * Pointer to rte_eth_dev structure.
12483 * @param[in, out] dev_flow
12484 * Pointer to the sub flow.
12486 * Pointer to the flow attributes.
12488 * Pointer to the list of items.
12489 * @param[in] actions
12490 * Pointer to the list of actions.
12491 * @param[out] error
12492 * Pointer to the error structure.
12495 * 0 on success, a negative errno value otherwise and rte_errno is set.
12498 flow_dv_translate(struct rte_eth_dev *dev,
12499 struct mlx5_flow *dev_flow,
12500 const struct rte_flow_attr *attr,
12501 const struct rte_flow_item items[],
12502 const struct rte_flow_action actions[],
12503 struct rte_flow_error *error)
12505 struct mlx5_priv *priv = dev->data->dev_private;
12506 struct mlx5_dev_config *dev_conf = &priv->config;
12507 struct rte_flow *flow = dev_flow->flow;
12508 struct mlx5_flow_handle *handle = dev_flow->handle;
12509 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
12510 struct mlx5_flow_rss_desc *rss_desc;
12511 uint64_t item_flags = 0;
12512 uint64_t last_item = 0;
12513 uint64_t action_flags = 0;
12514 struct mlx5_flow_dv_matcher matcher = {
12516 .size = sizeof(matcher.mask.buf),
12520 bool actions_end = false;
12522 struct mlx5_flow_dv_modify_hdr_resource res;
12523 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
12524 sizeof(struct mlx5_modification_cmd) *
12525 (MLX5_MAX_MODIFY_NUM + 1)];
12527 struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
12528 const struct rte_flow_action_count *count = NULL;
12529 const struct rte_flow_action_age *non_shared_age = NULL;
12530 union flow_dv_attr flow_attr = { .attr = 0 };
12532 union mlx5_flow_tbl_key tbl_key;
12533 uint32_t modify_action_position = UINT32_MAX;
12534 void *match_mask = matcher.mask.buf;
12535 void *match_value = dev_flow->dv.value.buf;
12536 uint8_t next_protocol = 0xff;
12537 struct rte_vlan_hdr vlan = { 0 };
12538 struct mlx5_flow_dv_dest_array_resource mdest_res;
12539 struct mlx5_flow_dv_sample_resource sample_res;
12540 void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
12541 const struct rte_flow_action_sample *sample = NULL;
12542 struct mlx5_flow_sub_actions_list *sample_act;
12543 uint32_t sample_act_pos = UINT32_MAX;
12544 uint32_t age_act_pos = UINT32_MAX;
12545 uint32_t num_of_dest = 0;
12546 int tmp_actions_n = 0;
12549 const struct mlx5_flow_tunnel *tunnel = NULL;
12550 struct flow_grp_info grp_info = {
12551 .external = !!dev_flow->external,
12552 .transfer = !!attr->transfer,
12553 .fdb_def_rule = !!priv->fdb_def_rule,
12554 .skip_scale = dev_flow->skip_scale &
12555 (1 << MLX5_SCALE_FLOW_GROUP_BIT),
12556 .std_tbl_fix = true,
12558 const struct rte_flow_item *head_item = items;
12561 return rte_flow_error_set(error, ENOMEM,
12562 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12564 "failed to push flow workspace");
12565 rss_desc = &wks->rss_desc;
12566 memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
12567 memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
12568 mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12569 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12570 /* update normal path action resource into last index of array */
12571 sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
12572 if (is_tunnel_offload_active(dev)) {
12573 if (dev_flow->tunnel) {
12574 RTE_VERIFY(dev_flow->tof_type ==
12575 MLX5_TUNNEL_OFFLOAD_MISS_RULE);
12576 tunnel = dev_flow->tunnel;
12578 tunnel = mlx5_get_tof(items, actions,
12579 &dev_flow->tof_type);
12580 dev_flow->tunnel = tunnel;
12582 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
12583 (dev, attr, tunnel, dev_flow->tof_type);
12585 mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12586 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12587 ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
12591 dev_flow->dv.group = table;
12592 if (attr->transfer)
12593 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
12594 /* number of actions must be set to 0 in case of dirty stack. */
12595 mhdr_res->actions_num = 0;
12596 if (is_flow_tunnel_match_rule(dev_flow->tof_type)) {
12598 * do not add decap action if match rule drops packet
12599 * HW rejects rules with decap & drop
12601 * if tunnel match rule was inserted before matching tunnel set
12602 * rule flow table used in the match rule must be registered.
12603 * current implementation handles that in the
12604 * flow_dv_match_register() at the function end.
12606 bool add_decap = true;
12607 const struct rte_flow_action *ptr = actions;
12609 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
12610 if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
12616 if (flow_dv_create_action_l2_decap(dev, dev_flow,
12620 dev_flow->dv.actions[actions_n++] =
12621 dev_flow->dv.encap_decap->action;
12622 action_flags |= MLX5_FLOW_ACTION_DECAP;
12625 for (; !actions_end ; actions++) {
12626 const struct rte_flow_action_queue *queue;
12627 const struct rte_flow_action_rss *rss;
12628 const struct rte_flow_action *action = actions;
12629 const uint8_t *rss_key;
12630 struct mlx5_flow_tbl_resource *tbl;
12631 struct mlx5_aso_age_action *age_act;
12632 struct mlx5_flow_counter *cnt_act;
12633 uint32_t port_id = 0;
12634 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
12635 int action_type = actions->type;
12636 const struct rte_flow_action *found_action = NULL;
12637 uint32_t jump_group = 0;
12638 uint32_t owner_idx;
12639 struct mlx5_aso_ct_action *ct;
12641 if (!mlx5_flow_os_action_supported(action_type))
12642 return rte_flow_error_set(error, ENOTSUP,
12643 RTE_FLOW_ERROR_TYPE_ACTION,
12645 "action not supported");
12646 switch (action_type) {
12647 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
12648 action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
12650 case RTE_FLOW_ACTION_TYPE_VOID:
12652 case RTE_FLOW_ACTION_TYPE_PORT_ID:
12653 if (flow_dv_translate_action_port_id(dev, action,
12656 port_id_resource.port_id = port_id;
12657 MLX5_ASSERT(!handle->rix_port_id_action);
12658 if (flow_dv_port_id_action_resource_register
12659 (dev, &port_id_resource, dev_flow, error))
12661 dev_flow->dv.actions[actions_n++] =
12662 dev_flow->dv.port_id_action->action;
12663 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12664 dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
12665 sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12668 case RTE_FLOW_ACTION_TYPE_FLAG:
12669 action_flags |= MLX5_FLOW_ACTION_FLAG;
12670 dev_flow->handle->mark = 1;
12671 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12672 struct rte_flow_action_mark mark = {
12673 .id = MLX5_FLOW_MARK_DEFAULT,
12676 if (flow_dv_convert_action_mark(dev, &mark,
12680 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12683 tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
12685 * Only one FLAG or MARK is supported per device flow
12686 * right now. So the pointer to the tag resource must be
12687 * zero before the register process.
12689 MLX5_ASSERT(!handle->dvh.rix_tag);
12690 if (flow_dv_tag_resource_register(dev, tag_be,
12693 MLX5_ASSERT(dev_flow->dv.tag_resource);
12694 dev_flow->dv.actions[actions_n++] =
12695 dev_flow->dv.tag_resource->action;
12697 case RTE_FLOW_ACTION_TYPE_MARK:
12698 action_flags |= MLX5_FLOW_ACTION_MARK;
12699 dev_flow->handle->mark = 1;
12700 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12701 const struct rte_flow_action_mark *mark =
12702 (const struct rte_flow_action_mark *)
12705 if (flow_dv_convert_action_mark(dev, mark,
12709 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12713 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
12714 /* Legacy (non-extensive) MARK action. */
12715 tag_be = mlx5_flow_mark_set
12716 (((const struct rte_flow_action_mark *)
12717 (actions->conf))->id);
12718 MLX5_ASSERT(!handle->dvh.rix_tag);
12719 if (flow_dv_tag_resource_register(dev, tag_be,
12722 MLX5_ASSERT(dev_flow->dv.tag_resource);
12723 dev_flow->dv.actions[actions_n++] =
12724 dev_flow->dv.tag_resource->action;
12726 case RTE_FLOW_ACTION_TYPE_SET_META:
12727 if (flow_dv_convert_action_set_meta
12728 (dev, mhdr_res, attr,
12729 (const struct rte_flow_action_set_meta *)
12730 actions->conf, error))
12732 action_flags |= MLX5_FLOW_ACTION_SET_META;
12734 case RTE_FLOW_ACTION_TYPE_SET_TAG:
12735 if (flow_dv_convert_action_set_tag
12737 (const struct rte_flow_action_set_tag *)
12738 actions->conf, error))
12740 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12742 case RTE_FLOW_ACTION_TYPE_DROP:
12743 action_flags |= MLX5_FLOW_ACTION_DROP;
12744 dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
12746 case RTE_FLOW_ACTION_TYPE_QUEUE:
12747 queue = actions->conf;
12748 rss_desc->queue_num = 1;
12749 rss_desc->queue[0] = queue->index;
12750 action_flags |= MLX5_FLOW_ACTION_QUEUE;
12751 dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
12752 sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
12755 case RTE_FLOW_ACTION_TYPE_RSS:
12756 rss = actions->conf;
12757 memcpy(rss_desc->queue, rss->queue,
12758 rss->queue_num * sizeof(uint16_t));
12759 rss_desc->queue_num = rss->queue_num;
12760 /* NULL RSS key indicates default RSS key. */
12761 rss_key = !rss->key ? rss_hash_default_key : rss->key;
12762 memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
12764 * rss->level and rss.types should be set in advance
12765 * when expanding items for RSS.
12767 action_flags |= MLX5_FLOW_ACTION_RSS;
12768 dev_flow->handle->fate_action = rss_desc->shared_rss ?
12769 MLX5_FLOW_FATE_SHARED_RSS :
12770 MLX5_FLOW_FATE_QUEUE;
12772 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
12773 flow->age = (uint32_t)(uintptr_t)(action->conf);
12774 age_act = flow_aso_age_get_by_idx(dev, flow->age);
12775 __atomic_fetch_add(&age_act->refcnt, 1,
12777 age_act_pos = actions_n++;
12778 action_flags |= MLX5_FLOW_ACTION_AGE;
12780 case RTE_FLOW_ACTION_TYPE_AGE:
12781 non_shared_age = action->conf;
12782 age_act_pos = actions_n++;
12783 action_flags |= MLX5_FLOW_ACTION_AGE;
12785 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
12786 flow->counter = (uint32_t)(uintptr_t)(action->conf);
12787 cnt_act = flow_dv_counter_get_by_idx(dev, flow->counter,
12789 __atomic_fetch_add(&cnt_act->shared_info.refcnt, 1,
12791 /* Save information first, will apply later. */
12792 action_flags |= MLX5_FLOW_ACTION_COUNT;
12794 case RTE_FLOW_ACTION_TYPE_COUNT:
12795 if (!dev_conf->devx) {
12796 return rte_flow_error_set
12798 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12800 "count action not supported");
12802 /* Save information first, will apply later. */
12803 count = action->conf;
12804 action_flags |= MLX5_FLOW_ACTION_COUNT;
12806 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
12807 dev_flow->dv.actions[actions_n++] =
12808 priv->sh->pop_vlan_action;
12809 action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
12811 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
12812 if (!(action_flags &
12813 MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
12814 flow_dev_get_vlan_info_from_items(items, &vlan);
12815 vlan.eth_proto = rte_be_to_cpu_16
12816 ((((const struct rte_flow_action_of_push_vlan *)
12817 actions->conf)->ethertype));
12818 found_action = mlx5_flow_find_action
12820 RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
12822 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12823 found_action = mlx5_flow_find_action
12825 RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
12827 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12828 if (flow_dv_create_action_push_vlan
12829 (dev, attr, &vlan, dev_flow, error))
12831 dev_flow->dv.actions[actions_n++] =
12832 dev_flow->dv.push_vlan_res->action;
12833 action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
12835 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
12836 /* of_vlan_push action handled this action */
12837 MLX5_ASSERT(action_flags &
12838 MLX5_FLOW_ACTION_OF_PUSH_VLAN);
12840 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
12841 if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
12843 flow_dev_get_vlan_info_from_items(items, &vlan);
12844 mlx5_update_vlan_vid_pcp(actions, &vlan);
12845 /* If no VLAN push - this is a modify header action */
12846 if (flow_dv_convert_action_modify_vlan_vid
12847 (mhdr_res, actions, error))
12849 action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
12851 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
12852 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
12853 if (flow_dv_create_action_l2_encap(dev, actions,
12858 dev_flow->dv.actions[actions_n++] =
12859 dev_flow->dv.encap_decap->action;
12860 action_flags |= MLX5_FLOW_ACTION_ENCAP;
12861 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
12862 sample_act->action_flags |=
12863 MLX5_FLOW_ACTION_ENCAP;
12865 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
12866 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
12867 if (flow_dv_create_action_l2_decap(dev, dev_flow,
12871 dev_flow->dv.actions[actions_n++] =
12872 dev_flow->dv.encap_decap->action;
12873 action_flags |= MLX5_FLOW_ACTION_DECAP;
12875 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
12876 /* Handle encap with preceding decap. */
12877 if (action_flags & MLX5_FLOW_ACTION_DECAP) {
12878 if (flow_dv_create_action_raw_encap
12879 (dev, actions, dev_flow, attr, error))
12881 dev_flow->dv.actions[actions_n++] =
12882 dev_flow->dv.encap_decap->action;
12884 /* Handle encap without preceding decap. */
12885 if (flow_dv_create_action_l2_encap
12886 (dev, actions, dev_flow, attr->transfer,
12889 dev_flow->dv.actions[actions_n++] =
12890 dev_flow->dv.encap_decap->action;
12892 action_flags |= MLX5_FLOW_ACTION_ENCAP;
12893 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
12894 sample_act->action_flags |=
12895 MLX5_FLOW_ACTION_ENCAP;
12897 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
12898 while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
12900 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
12901 if (flow_dv_create_action_l2_decap
12902 (dev, dev_flow, attr->transfer, error))
12904 dev_flow->dv.actions[actions_n++] =
12905 dev_flow->dv.encap_decap->action;
12907 /* If decap is followed by encap, handle it at encap. */
12908 action_flags |= MLX5_FLOW_ACTION_DECAP;
12910 case MLX5_RTE_FLOW_ACTION_TYPE_JUMP:
12911 dev_flow->dv.actions[actions_n++] =
12912 (void *)(uintptr_t)action->conf;
12913 action_flags |= MLX5_FLOW_ACTION_JUMP;
12915 case RTE_FLOW_ACTION_TYPE_JUMP:
12916 jump_group = ((const struct rte_flow_action_jump *)
12917 action->conf)->group;
12918 grp_info.std_tbl_fix = 0;
12919 if (dev_flow->skip_scale &
12920 (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
12921 grp_info.skip_scale = 1;
12923 grp_info.skip_scale = 0;
12924 ret = mlx5_flow_group_to_table(dev, tunnel,
12930 tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
12932 !!dev_flow->external,
12933 tunnel, jump_group, 0,
12936 return rte_flow_error_set
12938 RTE_FLOW_ERROR_TYPE_ACTION,
12940 "cannot create jump action.");
12941 if (flow_dv_jump_tbl_resource_register
12942 (dev, tbl, dev_flow, error)) {
12943 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
12944 return rte_flow_error_set
12946 RTE_FLOW_ERROR_TYPE_ACTION,
12948 "cannot create jump action.");
12950 dev_flow->dv.actions[actions_n++] =
12951 dev_flow->dv.jump->action;
12952 action_flags |= MLX5_FLOW_ACTION_JUMP;
12953 dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
12954 sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
12957 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
12958 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
12959 if (flow_dv_convert_action_modify_mac
12960 (mhdr_res, actions, error))
12962 action_flags |= actions->type ==
12963 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
12964 MLX5_FLOW_ACTION_SET_MAC_SRC :
12965 MLX5_FLOW_ACTION_SET_MAC_DST;
12967 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
12968 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
12969 if (flow_dv_convert_action_modify_ipv4
12970 (mhdr_res, actions, error))
12972 action_flags |= actions->type ==
12973 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
12974 MLX5_FLOW_ACTION_SET_IPV4_SRC :
12975 MLX5_FLOW_ACTION_SET_IPV4_DST;
12977 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
12978 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
12979 if (flow_dv_convert_action_modify_ipv6
12980 (mhdr_res, actions, error))
12982 action_flags |= actions->type ==
12983 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
12984 MLX5_FLOW_ACTION_SET_IPV6_SRC :
12985 MLX5_FLOW_ACTION_SET_IPV6_DST;
12987 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
12988 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
12989 if (flow_dv_convert_action_modify_tp
12990 (mhdr_res, actions, items,
12991 &flow_attr, dev_flow, !!(action_flags &
12992 MLX5_FLOW_ACTION_DECAP), error))
12994 action_flags |= actions->type ==
12995 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
12996 MLX5_FLOW_ACTION_SET_TP_SRC :
12997 MLX5_FLOW_ACTION_SET_TP_DST;
12999 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
13000 if (flow_dv_convert_action_modify_dec_ttl
13001 (mhdr_res, items, &flow_attr, dev_flow,
13003 MLX5_FLOW_ACTION_DECAP), error))
13005 action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
13007 case RTE_FLOW_ACTION_TYPE_SET_TTL:
13008 if (flow_dv_convert_action_modify_ttl
13009 (mhdr_res, actions, items, &flow_attr,
13010 dev_flow, !!(action_flags &
13011 MLX5_FLOW_ACTION_DECAP), error))
13013 action_flags |= MLX5_FLOW_ACTION_SET_TTL;
13015 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
13016 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
13017 if (flow_dv_convert_action_modify_tcp_seq
13018 (mhdr_res, actions, error))
13020 action_flags |= actions->type ==
13021 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
13022 MLX5_FLOW_ACTION_INC_TCP_SEQ :
13023 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
13026 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
13027 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
13028 if (flow_dv_convert_action_modify_tcp_ack
13029 (mhdr_res, actions, error))
13031 action_flags |= actions->type ==
13032 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
13033 MLX5_FLOW_ACTION_INC_TCP_ACK :
13034 MLX5_FLOW_ACTION_DEC_TCP_ACK;
13036 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
13037 if (flow_dv_convert_action_set_reg
13038 (mhdr_res, actions, error))
13040 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
13042 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
13043 if (flow_dv_convert_action_copy_mreg
13044 (dev, mhdr_res, actions, error))
13046 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
13048 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
13049 action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
13050 dev_flow->handle->fate_action =
13051 MLX5_FLOW_FATE_DEFAULT_MISS;
13053 case RTE_FLOW_ACTION_TYPE_METER:
13055 return rte_flow_error_set(error, rte_errno,
13056 RTE_FLOW_ERROR_TYPE_ACTION,
13057 NULL, "Failed to get meter in flow.");
13058 /* Set the meter action. */
13059 dev_flow->dv.actions[actions_n++] =
13060 wks->fm->meter_action;
13061 action_flags |= MLX5_FLOW_ACTION_METER;
13063 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
13064 if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
13067 action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
13069 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
13070 if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
13073 action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
13075 case RTE_FLOW_ACTION_TYPE_SAMPLE:
13076 sample_act_pos = actions_n;
13077 sample = (const struct rte_flow_action_sample *)
13080 action_flags |= MLX5_FLOW_ACTION_SAMPLE;
13081 /* put encap action into group if work with port id */
13082 if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
13083 (action_flags & MLX5_FLOW_ACTION_PORT_ID))
13084 sample_act->action_flags |=
13085 MLX5_FLOW_ACTION_ENCAP;
13087 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
13088 if (flow_dv_convert_action_modify_field
13089 (dev, mhdr_res, actions, attr, error))
13091 action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
13093 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
13094 owner_idx = (uint32_t)(uintptr_t)action->conf;
13095 ct = flow_aso_ct_get_by_idx(dev, owner_idx);
13097 return rte_flow_error_set(error, EINVAL,
13098 RTE_FLOW_ERROR_TYPE_ACTION,
13100 "Failed to get CT object.");
13101 if (mlx5_aso_ct_available(priv->sh, ct))
13102 return rte_flow_error_set(error, rte_errno,
13103 RTE_FLOW_ERROR_TYPE_ACTION,
13105 "CT is unavailable.");
13106 if (ct->is_original)
13107 dev_flow->dv.actions[actions_n] =
13108 ct->dr_action_orig;
13110 dev_flow->dv.actions[actions_n] =
13111 ct->dr_action_rply;
13112 flow->indirect_type = MLX5_INDIRECT_ACTION_TYPE_CT;
13113 flow->ct = owner_idx;
13114 __atomic_fetch_add(&ct->refcnt, 1, __ATOMIC_RELAXED);
13116 action_flags |= MLX5_FLOW_ACTION_CT;
13118 case RTE_FLOW_ACTION_TYPE_END:
13119 actions_end = true;
13120 if (mhdr_res->actions_num) {
13121 /* create modify action if needed. */
13122 if (flow_dv_modify_hdr_resource_register
13123 (dev, mhdr_res, dev_flow, error))
13125 dev_flow->dv.actions[modify_action_position] =
13126 handle->dvh.modify_hdr->action;
13129 * Handle AGE and COUNT action by single HW counter
13130 * when they are not shared.
13132 if (action_flags & MLX5_FLOW_ACTION_AGE) {
13133 if ((non_shared_age &&
13134 count && !count->shared) ||
13135 !(priv->sh->flow_hit_aso_en &&
13136 (attr->group || attr->transfer))) {
13137 /* Creates age by counters. */
13138 cnt_act = flow_dv_prepare_counter
13145 dev_flow->dv.actions[age_act_pos] =
13149 if (!flow->age && non_shared_age) {
13150 flow->age = flow_dv_aso_age_alloc
13154 flow_dv_aso_age_params_init
13156 non_shared_age->context ?
13157 non_shared_age->context :
13158 (void *)(uintptr_t)
13159 (dev_flow->flow_idx),
13160 non_shared_age->timeout);
13162 age_act = flow_aso_age_get_by_idx(dev,
13164 dev_flow->dv.actions[age_act_pos] =
13165 age_act->dr_action;
13167 if (action_flags & MLX5_FLOW_ACTION_COUNT) {
13169 * Create one count action, to be used
13170 * by all sub-flows.
13172 cnt_act = flow_dv_prepare_counter(dev, dev_flow,
13177 dev_flow->dv.actions[actions_n++] =
13183 if (mhdr_res->actions_num &&
13184 modify_action_position == UINT32_MAX)
13185 modify_action_position = actions_n++;
13187 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
13188 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
13189 int item_type = items->type;
13191 if (!mlx5_flow_os_item_supported(item_type))
13192 return rte_flow_error_set(error, ENOTSUP,
13193 RTE_FLOW_ERROR_TYPE_ITEM,
13194 NULL, "item not supported");
13195 switch (item_type) {
13196 case RTE_FLOW_ITEM_TYPE_PORT_ID:
13197 flow_dv_translate_item_port_id
13198 (dev, match_mask, match_value, items, attr);
13199 last_item = MLX5_FLOW_ITEM_PORT_ID;
13201 case RTE_FLOW_ITEM_TYPE_ETH:
13202 flow_dv_translate_item_eth(match_mask, match_value,
13204 dev_flow->dv.group);
13205 matcher.priority = action_flags &
13206 MLX5_FLOW_ACTION_DEFAULT_MISS &&
13207 !dev_flow->external ?
13208 MLX5_PRIORITY_MAP_L3 :
13209 MLX5_PRIORITY_MAP_L2;
13210 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
13211 MLX5_FLOW_LAYER_OUTER_L2;
13213 case RTE_FLOW_ITEM_TYPE_VLAN:
13214 flow_dv_translate_item_vlan(dev_flow,
13215 match_mask, match_value,
13217 dev_flow->dv.group);
13218 matcher.priority = MLX5_PRIORITY_MAP_L2;
13219 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
13220 MLX5_FLOW_LAYER_INNER_VLAN) :
13221 (MLX5_FLOW_LAYER_OUTER_L2 |
13222 MLX5_FLOW_LAYER_OUTER_VLAN);
13224 case RTE_FLOW_ITEM_TYPE_IPV4:
13225 mlx5_flow_tunnel_ip_check(items, next_protocol,
13226 &item_flags, &tunnel);
13227 flow_dv_translate_item_ipv4(match_mask, match_value,
13229 dev_flow->dv.group);
13230 matcher.priority = MLX5_PRIORITY_MAP_L3;
13231 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
13232 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
13233 if (items->mask != NULL &&
13234 ((const struct rte_flow_item_ipv4 *)
13235 items->mask)->hdr.next_proto_id) {
13237 ((const struct rte_flow_item_ipv4 *)
13238 (items->spec))->hdr.next_proto_id;
13240 ((const struct rte_flow_item_ipv4 *)
13241 (items->mask))->hdr.next_proto_id;
13243 /* Reset for inner layer. */
13244 next_protocol = 0xff;
13247 case RTE_FLOW_ITEM_TYPE_IPV6:
13248 mlx5_flow_tunnel_ip_check(items, next_protocol,
13249 &item_flags, &tunnel);
13250 flow_dv_translate_item_ipv6(match_mask, match_value,
13252 dev_flow->dv.group);
13253 matcher.priority = MLX5_PRIORITY_MAP_L3;
13254 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
13255 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
13256 if (items->mask != NULL &&
13257 ((const struct rte_flow_item_ipv6 *)
13258 items->mask)->hdr.proto) {
13260 ((const struct rte_flow_item_ipv6 *)
13261 items->spec)->hdr.proto;
13263 ((const struct rte_flow_item_ipv6 *)
13264 items->mask)->hdr.proto;
13266 /* Reset for inner layer. */
13267 next_protocol = 0xff;
13270 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
13271 flow_dv_translate_item_ipv6_frag_ext(match_mask,
13274 last_item = tunnel ?
13275 MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
13276 MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
13277 if (items->mask != NULL &&
13278 ((const struct rte_flow_item_ipv6_frag_ext *)
13279 items->mask)->hdr.next_header) {
13281 ((const struct rte_flow_item_ipv6_frag_ext *)
13282 items->spec)->hdr.next_header;
13284 ((const struct rte_flow_item_ipv6_frag_ext *)
13285 items->mask)->hdr.next_header;
13287 /* Reset for inner layer. */
13288 next_protocol = 0xff;
13291 case RTE_FLOW_ITEM_TYPE_TCP:
13292 flow_dv_translate_item_tcp(match_mask, match_value,
13294 matcher.priority = MLX5_PRIORITY_MAP_L4;
13295 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
13296 MLX5_FLOW_LAYER_OUTER_L4_TCP;
13298 case RTE_FLOW_ITEM_TYPE_UDP:
13299 flow_dv_translate_item_udp(match_mask, match_value,
13301 matcher.priority = MLX5_PRIORITY_MAP_L4;
13302 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
13303 MLX5_FLOW_LAYER_OUTER_L4_UDP;
13305 case RTE_FLOW_ITEM_TYPE_GRE:
13306 flow_dv_translate_item_gre(match_mask, match_value,
13308 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13309 last_item = MLX5_FLOW_LAYER_GRE;
13311 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
13312 flow_dv_translate_item_gre_key(match_mask,
13313 match_value, items);
13314 last_item = MLX5_FLOW_LAYER_GRE_KEY;
13316 case RTE_FLOW_ITEM_TYPE_NVGRE:
13317 flow_dv_translate_item_nvgre(match_mask, match_value,
13319 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13320 last_item = MLX5_FLOW_LAYER_GRE;
13322 case RTE_FLOW_ITEM_TYPE_VXLAN:
13323 flow_dv_translate_item_vxlan(dev, attr,
13324 match_mask, match_value,
13326 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13327 last_item = MLX5_FLOW_LAYER_VXLAN;
13329 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
13330 flow_dv_translate_item_vxlan_gpe(match_mask,
13331 match_value, items,
13333 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13334 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
13336 case RTE_FLOW_ITEM_TYPE_GENEVE:
13337 flow_dv_translate_item_geneve(match_mask, match_value,
13339 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13340 last_item = MLX5_FLOW_LAYER_GENEVE;
13342 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
13343 ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
13347 return rte_flow_error_set(error, -ret,
13348 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13349 "cannot create GENEVE TLV option");
13350 flow->geneve_tlv_option = 1;
13351 last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
13353 case RTE_FLOW_ITEM_TYPE_MPLS:
13354 flow_dv_translate_item_mpls(match_mask, match_value,
13355 items, last_item, tunnel);
13356 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13357 last_item = MLX5_FLOW_LAYER_MPLS;
13359 case RTE_FLOW_ITEM_TYPE_MARK:
13360 flow_dv_translate_item_mark(dev, match_mask,
13361 match_value, items);
13362 last_item = MLX5_FLOW_ITEM_MARK;
13364 case RTE_FLOW_ITEM_TYPE_META:
13365 flow_dv_translate_item_meta(dev, match_mask,
13366 match_value, attr, items);
13367 last_item = MLX5_FLOW_ITEM_METADATA;
13369 case RTE_FLOW_ITEM_TYPE_ICMP:
13370 flow_dv_translate_item_icmp(match_mask, match_value,
13372 last_item = MLX5_FLOW_LAYER_ICMP;
13374 case RTE_FLOW_ITEM_TYPE_ICMP6:
13375 flow_dv_translate_item_icmp6(match_mask, match_value,
13377 last_item = MLX5_FLOW_LAYER_ICMP6;
13379 case RTE_FLOW_ITEM_TYPE_TAG:
13380 flow_dv_translate_item_tag(dev, match_mask,
13381 match_value, items);
13382 last_item = MLX5_FLOW_ITEM_TAG;
13384 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
13385 flow_dv_translate_mlx5_item_tag(dev, match_mask,
13386 match_value, items);
13387 last_item = MLX5_FLOW_ITEM_TAG;
13389 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
13390 flow_dv_translate_item_tx_queue(dev, match_mask,
13393 last_item = MLX5_FLOW_ITEM_TX_QUEUE;
13395 case RTE_FLOW_ITEM_TYPE_GTP:
13396 flow_dv_translate_item_gtp(match_mask, match_value,
13398 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13399 last_item = MLX5_FLOW_LAYER_GTP;
13401 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
13402 ret = flow_dv_translate_item_gtp_psc(match_mask,
13406 return rte_flow_error_set(error, -ret,
13407 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13408 "cannot create GTP PSC item");
13409 last_item = MLX5_FLOW_LAYER_GTP_PSC;
13411 case RTE_FLOW_ITEM_TYPE_ECPRI:
13412 if (!mlx5_flex_parser_ecpri_exist(dev)) {
13413 /* Create it only the first time to be used. */
13414 ret = mlx5_flex_parser_ecpri_alloc(dev);
13416 return rte_flow_error_set
13418 RTE_FLOW_ERROR_TYPE_ITEM,
13420 "cannot create eCPRI parser");
13422 flow_dv_translate_item_ecpri(dev, match_mask,
13423 match_value, items);
13424 /* No other protocol should follow eCPRI layer. */
13425 last_item = MLX5_FLOW_LAYER_ECPRI;
13427 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
13428 flow_dv_translate_item_integrity(match_mask,
13432 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
13433 flow_dv_translate_item_aso_ct(dev, match_mask,
13434 match_value, items);
13439 item_flags |= last_item;
13442 * When E-Switch mode is enabled, we have two cases where we need to
13443 * set the source port manually.
13444 * The first one, is in case of Nic steering rule, and the second is
13445 * E-Switch rule where no port_id item was found. In both cases
13446 * the source port is set according the current port in use.
13448 if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
13449 (priv->representor || priv->master)) {
13450 if (flow_dv_translate_item_port_id(dev, match_mask,
13451 match_value, NULL, attr))
13454 #ifdef RTE_LIBRTE_MLX5_DEBUG
13455 MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
13456 dev_flow->dv.value.buf));
13459 * Layers may be already initialized from prefix flow if this dev_flow
13460 * is the suffix flow.
13462 handle->layers |= item_flags;
13463 if (action_flags & MLX5_FLOW_ACTION_RSS)
13464 flow_dv_hashfields_set(dev_flow, rss_desc);
13465 /* If has RSS action in the sample action, the Sample/Mirror resource
13466 * should be registered after the hash filed be update.
13468 if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
13469 ret = flow_dv_translate_action_sample(dev,
13478 ret = flow_dv_create_action_sample(dev,
13487 return rte_flow_error_set
13489 RTE_FLOW_ERROR_TYPE_ACTION,
13491 "cannot create sample action");
13492 if (num_of_dest > 1) {
13493 dev_flow->dv.actions[sample_act_pos] =
13494 dev_flow->dv.dest_array_res->action;
13496 dev_flow->dv.actions[sample_act_pos] =
13497 dev_flow->dv.sample_res->verbs_action;
13501 * For multiple destination (sample action with ratio=1), the encap
13502 * action and port id action will be combined into group action.
13503 * So need remove the original these actions in the flow and only
13504 * use the sample action instead of.
13506 if (num_of_dest > 1 &&
13507 (sample_act->dr_port_id_action || sample_act->dr_jump_action)) {
13509 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
13511 for (i = 0; i < actions_n; i++) {
13512 if ((sample_act->dr_encap_action &&
13513 sample_act->dr_encap_action ==
13514 dev_flow->dv.actions[i]) ||
13515 (sample_act->dr_port_id_action &&
13516 sample_act->dr_port_id_action ==
13517 dev_flow->dv.actions[i]) ||
13518 (sample_act->dr_jump_action &&
13519 sample_act->dr_jump_action ==
13520 dev_flow->dv.actions[i]))
13522 temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
13524 memcpy((void *)dev_flow->dv.actions,
13525 (void *)temp_actions,
13526 tmp_actions_n * sizeof(void *));
13527 actions_n = tmp_actions_n;
13529 dev_flow->dv.actions_n = actions_n;
13530 dev_flow->act_flags = action_flags;
13531 if (wks->skip_matcher_reg)
13533 /* Register matcher. */
13534 matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
13535 matcher.mask.size);
13536 matcher.priority = mlx5_get_matcher_priority(dev, attr,
13539 * When creating meter drop flow in drop table, using original
13540 * 5-tuple match, the matcher priority should be lower than
13543 if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
13544 dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP &&
13545 matcher.priority <= MLX5_REG_BITS)
13546 matcher.priority += MLX5_REG_BITS;
13547 /* reserved field no needs to be set to 0 here. */
13548 tbl_key.is_fdb = attr->transfer;
13549 tbl_key.is_egress = attr->egress;
13550 tbl_key.level = dev_flow->dv.group;
13551 tbl_key.id = dev_flow->dv.table_id;
13552 if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
13553 tunnel, attr->group, error))
13559 * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13562 * @param[in, out] action
13563 * Shred RSS action holding hash RX queue objects.
13564 * @param[in] hash_fields
13565 * Defines combination of packet fields to participate in RX hash.
13566 * @param[in] tunnel
13568 * @param[in] hrxq_idx
13569 * Hash RX queue index to set.
13572 * 0 on success, otherwise negative errno value.
13575 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
13576 const uint64_t hash_fields,
13579 uint32_t *hrxqs = action->hrxq;
13581 switch (hash_fields & ~IBV_RX_HASH_INNER) {
13582 case MLX5_RSS_HASH_IPV4:
13583 /* fall-through. */
13584 case MLX5_RSS_HASH_IPV4_DST_ONLY:
13585 /* fall-through. */
13586 case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13587 hrxqs[0] = hrxq_idx;
13589 case MLX5_RSS_HASH_IPV4_TCP:
13590 /* fall-through. */
13591 case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13592 /* fall-through. */
13593 case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13594 hrxqs[1] = hrxq_idx;
13596 case MLX5_RSS_HASH_IPV4_UDP:
13597 /* fall-through. */
13598 case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13599 /* fall-through. */
13600 case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13601 hrxqs[2] = hrxq_idx;
13603 case MLX5_RSS_HASH_IPV6:
13604 /* fall-through. */
13605 case MLX5_RSS_HASH_IPV6_DST_ONLY:
13606 /* fall-through. */
13607 case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13608 hrxqs[3] = hrxq_idx;
13610 case MLX5_RSS_HASH_IPV6_TCP:
13611 /* fall-through. */
13612 case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13613 /* fall-through. */
13614 case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13615 hrxqs[4] = hrxq_idx;
13617 case MLX5_RSS_HASH_IPV6_UDP:
13618 /* fall-through. */
13619 case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13620 /* fall-through. */
13621 case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13622 hrxqs[5] = hrxq_idx;
13624 case MLX5_RSS_HASH_NONE:
13625 hrxqs[6] = hrxq_idx;
13633 * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13637 * Pointer to the Ethernet device structure.
13639 * Shared RSS action ID holding hash RX queue objects.
13640 * @param[in] hash_fields
13641 * Defines combination of packet fields to participate in RX hash.
13642 * @param[in] tunnel
13646 * Valid hash RX queue index, otherwise 0.
13649 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
13650 const uint64_t hash_fields)
13652 struct mlx5_priv *priv = dev->data->dev_private;
13653 struct mlx5_shared_action_rss *shared_rss =
13654 mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
13655 const uint32_t *hrxqs = shared_rss->hrxq;
13657 switch (hash_fields & ~IBV_RX_HASH_INNER) {
13658 case MLX5_RSS_HASH_IPV4:
13659 /* fall-through. */
13660 case MLX5_RSS_HASH_IPV4_DST_ONLY:
13661 /* fall-through. */
13662 case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13664 case MLX5_RSS_HASH_IPV4_TCP:
13665 /* fall-through. */
13666 case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13667 /* fall-through. */
13668 case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13670 case MLX5_RSS_HASH_IPV4_UDP:
13671 /* fall-through. */
13672 case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13673 /* fall-through. */
13674 case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13676 case MLX5_RSS_HASH_IPV6:
13677 /* fall-through. */
13678 case MLX5_RSS_HASH_IPV6_DST_ONLY:
13679 /* fall-through. */
13680 case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13682 case MLX5_RSS_HASH_IPV6_TCP:
13683 /* fall-through. */
13684 case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13685 /* fall-through. */
13686 case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13688 case MLX5_RSS_HASH_IPV6_UDP:
13689 /* fall-through. */
13690 case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13691 /* fall-through. */
13692 case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13694 case MLX5_RSS_HASH_NONE:
13703 * Apply the flow to the NIC, lock free,
13704 * (mutex should be acquired by caller).
13707 * Pointer to the Ethernet device structure.
13708 * @param[in, out] flow
13709 * Pointer to flow structure.
13710 * @param[out] error
13711 * Pointer to error structure.
13714 * 0 on success, a negative errno value otherwise and rte_errno is set.
13717 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
13718 struct rte_flow_error *error)
13720 struct mlx5_flow_dv_workspace *dv;
13721 struct mlx5_flow_handle *dh;
13722 struct mlx5_flow_handle_dv *dv_h;
13723 struct mlx5_flow *dev_flow;
13724 struct mlx5_priv *priv = dev->data->dev_private;
13725 uint32_t handle_idx;
13729 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
13730 struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
13734 for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
13735 dev_flow = &wks->flows[idx];
13736 dv = &dev_flow->dv;
13737 dh = dev_flow->handle;
13740 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
13741 if (dv->transfer) {
13742 MLX5_ASSERT(priv->sh->dr_drop_action);
13743 dv->actions[n++] = priv->sh->dr_drop_action;
13745 #ifdef HAVE_MLX5DV_DR
13746 /* DR supports drop action placeholder. */
13747 MLX5_ASSERT(priv->sh->dr_drop_action);
13748 dv->actions[n++] = priv->sh->dr_drop_action;
13750 /* For DV we use the explicit drop queue. */
13751 MLX5_ASSERT(priv->drop_queue.hrxq);
13753 priv->drop_queue.hrxq->action;
13756 } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
13757 !dv_h->rix_sample && !dv_h->rix_dest_array)) {
13758 struct mlx5_hrxq *hrxq;
13761 hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
13766 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13767 "cannot get hash queue");
13770 dh->rix_hrxq = hrxq_idx;
13771 dv->actions[n++] = hrxq->action;
13772 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13773 struct mlx5_hrxq *hrxq = NULL;
13776 hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
13777 rss_desc->shared_rss,
13778 dev_flow->hash_fields);
13780 hrxq = mlx5_ipool_get
13781 (priv->sh->ipool[MLX5_IPOOL_HRXQ],
13786 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13787 "cannot get hash queue");
13790 dh->rix_srss = rss_desc->shared_rss;
13791 dv->actions[n++] = hrxq->action;
13792 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
13793 if (!priv->sh->default_miss_action) {
13796 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13797 "default miss action not be created.");
13800 dv->actions[n++] = priv->sh->default_miss_action;
13802 misc_mask = flow_dv_matcher_enable(dv->value.buf);
13803 __flow_dv_adjust_buf_size(&dv->value.size, misc_mask);
13804 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
13805 (void *)&dv->value, n,
13806 dv->actions, &dh->drv_flow);
13810 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13812 (!priv->config.allow_duplicate_pattern &&
13814 "duplicating pattern is not allowed" :
13815 "hardware refuses to create flow");
13818 if (priv->vmwa_context &&
13819 dh->vf_vlan.tag && !dh->vf_vlan.created) {
13821 * The rule contains the VLAN pattern.
13822 * For VF we are going to create VLAN
13823 * interface to make hypervisor set correct
13824 * e-Switch vport context.
13826 mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
13831 err = rte_errno; /* Save rte_errno before cleanup. */
13832 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
13833 handle_idx, dh, next) {
13834 /* hrxq is union, don't clear it if the flag is not set. */
13835 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
13836 mlx5_hrxq_release(dev, dh->rix_hrxq);
13838 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13841 if (dh->vf_vlan.tag && dh->vf_vlan.created)
13842 mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
13844 rte_errno = err; /* Restore rte_errno. */
13849 flow_dv_matcher_remove_cb(void *tool_ctx __rte_unused,
13850 struct mlx5_list_entry *entry)
13852 struct mlx5_flow_dv_matcher *resource = container_of(entry,
13856 claim_zero(mlx5_flow_os_destroy_flow_matcher(resource->matcher_object));
13857 mlx5_free(resource);
13861 * Release the flow matcher.
13864 * Pointer to Ethernet device.
13866 * Index to port ID action resource.
13869 * 1 while a reference on it exists, 0 when freed.
13872 flow_dv_matcher_release(struct rte_eth_dev *dev,
13873 struct mlx5_flow_handle *handle)
13875 struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
13876 struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
13877 typeof(*tbl), tbl);
13880 MLX5_ASSERT(matcher->matcher_object);
13881 ret = mlx5_list_unregister(tbl->matchers, &matcher->entry);
13882 flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
13887 flow_dv_encap_decap_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
13889 struct mlx5_dev_ctx_shared *sh = tool_ctx;
13890 struct mlx5_flow_dv_encap_decap_resource *res =
13891 container_of(entry, typeof(*res), entry);
13893 claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
13894 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
13898 * Release an encap/decap resource.
13901 * Pointer to Ethernet device.
13902 * @param encap_decap_idx
13903 * Index of encap decap resource.
13906 * 1 while a reference on it exists, 0 when freed.
13909 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
13910 uint32_t encap_decap_idx)
13912 struct mlx5_priv *priv = dev->data->dev_private;
13913 struct mlx5_flow_dv_encap_decap_resource *resource;
13915 resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
13919 MLX5_ASSERT(resource->action);
13920 return mlx5_hlist_unregister(priv->sh->encaps_decaps, &resource->entry);
13924 * Release an jump to table action resource.
13927 * Pointer to Ethernet device.
13929 * Index to the jump action resource.
13932 * 1 while a reference on it exists, 0 when freed.
13935 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
13938 struct mlx5_priv *priv = dev->data->dev_private;
13939 struct mlx5_flow_tbl_data_entry *tbl_data;
13941 tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
13945 return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
13949 flow_dv_modify_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
13951 struct mlx5_flow_dv_modify_hdr_resource *res =
13952 container_of(entry, typeof(*res), entry);
13953 struct mlx5_dev_ctx_shared *sh = tool_ctx;
13955 claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
13956 mlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);
13960 * Release a modify-header resource.
13963 * Pointer to Ethernet device.
13965 * Pointer to mlx5_flow_handle.
13968 * 1 while a reference on it exists, 0 when freed.
13971 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
13972 struct mlx5_flow_handle *handle)
13974 struct mlx5_priv *priv = dev->data->dev_private;
13975 struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
13977 MLX5_ASSERT(entry->action);
13978 return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
13982 flow_dv_port_id_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
13984 struct mlx5_dev_ctx_shared *sh = tool_ctx;
13985 struct mlx5_flow_dv_port_id_action_resource *resource =
13986 container_of(entry, typeof(*resource), entry);
13988 claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
13989 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
13993 * Release port ID action resource.
13996 * Pointer to Ethernet device.
13998 * Pointer to mlx5_flow_handle.
14001 * 1 while a reference on it exists, 0 when freed.
14004 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
14007 struct mlx5_priv *priv = dev->data->dev_private;
14008 struct mlx5_flow_dv_port_id_action_resource *resource;
14010 resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
14013 MLX5_ASSERT(resource->action);
14014 return mlx5_list_unregister(priv->sh->port_id_action_list,
14019 * Release shared RSS action resource.
14022 * Pointer to Ethernet device.
14024 * Shared RSS action index.
14027 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
14029 struct mlx5_priv *priv = dev->data->dev_private;
14030 struct mlx5_shared_action_rss *shared_rss;
14032 shared_rss = mlx5_ipool_get
14033 (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
14034 __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
14038 flow_dv_push_vlan_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14040 struct mlx5_dev_ctx_shared *sh = tool_ctx;
14041 struct mlx5_flow_dv_push_vlan_action_resource *resource =
14042 container_of(entry, typeof(*resource), entry);
14044 claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14045 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
14049 * Release push vlan action resource.
14052 * Pointer to Ethernet device.
14054 * Pointer to mlx5_flow_handle.
14057 * 1 while a reference on it exists, 0 when freed.
14060 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
14061 struct mlx5_flow_handle *handle)
14063 struct mlx5_priv *priv = dev->data->dev_private;
14064 struct mlx5_flow_dv_push_vlan_action_resource *resource;
14065 uint32_t idx = handle->dvh.rix_push_vlan;
14067 resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
14070 MLX5_ASSERT(resource->action);
14071 return mlx5_list_unregister(priv->sh->push_vlan_action_list,
14076 * Release the fate resource.
14079 * Pointer to Ethernet device.
14081 * Pointer to mlx5_flow_handle.
14084 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
14085 struct mlx5_flow_handle *handle)
14087 if (!handle->rix_fate)
14089 switch (handle->fate_action) {
14090 case MLX5_FLOW_FATE_QUEUE:
14091 if (!handle->dvh.rix_sample && !handle->dvh.rix_dest_array)
14092 mlx5_hrxq_release(dev, handle->rix_hrxq);
14094 case MLX5_FLOW_FATE_JUMP:
14095 flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
14097 case MLX5_FLOW_FATE_PORT_ID:
14098 flow_dv_port_id_action_resource_release(dev,
14099 handle->rix_port_id_action);
14102 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
14105 handle->rix_fate = 0;
14109 flow_dv_sample_remove_cb(void *tool_ctx __rte_unused,
14110 struct mlx5_list_entry *entry)
14112 struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
14115 struct rte_eth_dev *dev = resource->dev;
14116 struct mlx5_priv *priv = dev->data->dev_private;
14118 if (resource->verbs_action)
14119 claim_zero(mlx5_flow_os_destroy_flow_action
14120 (resource->verbs_action));
14121 if (resource->normal_path_tbl)
14122 flow_dv_tbl_resource_release(MLX5_SH(dev),
14123 resource->normal_path_tbl);
14124 flow_dv_sample_sub_actions_release(dev, &resource->sample_idx);
14125 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
14126 DRV_LOG(DEBUG, "sample resource %p: removed", (void *)resource);
14130 * Release an sample resource.
14133 * Pointer to Ethernet device.
14135 * Pointer to mlx5_flow_handle.
14138 * 1 while a reference on it exists, 0 when freed.
14141 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
14142 struct mlx5_flow_handle *handle)
14144 struct mlx5_priv *priv = dev->data->dev_private;
14145 struct mlx5_flow_dv_sample_resource *resource;
14147 resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
14148 handle->dvh.rix_sample);
14151 MLX5_ASSERT(resource->verbs_action);
14152 return mlx5_list_unregister(priv->sh->sample_action_list,
14157 flow_dv_dest_array_remove_cb(void *tool_ctx __rte_unused,
14158 struct mlx5_list_entry *entry)
14160 struct mlx5_flow_dv_dest_array_resource *resource =
14161 container_of(entry, typeof(*resource), entry);
14162 struct rte_eth_dev *dev = resource->dev;
14163 struct mlx5_priv *priv = dev->data->dev_private;
14166 MLX5_ASSERT(resource->action);
14167 if (resource->action)
14168 claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14169 for (; i < resource->num_of_dest; i++)
14170 flow_dv_sample_sub_actions_release(dev,
14171 &resource->sample_idx[i]);
14172 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
14173 DRV_LOG(DEBUG, "destination array resource %p: removed",
14178 * Release an destination array resource.
14181 * Pointer to Ethernet device.
14183 * Pointer to mlx5_flow_handle.
14186 * 1 while a reference on it exists, 0 when freed.
14189 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
14190 struct mlx5_flow_handle *handle)
14192 struct mlx5_priv *priv = dev->data->dev_private;
14193 struct mlx5_flow_dv_dest_array_resource *resource;
14195 resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
14196 handle->dvh.rix_dest_array);
14199 MLX5_ASSERT(resource->action);
14200 return mlx5_list_unregister(priv->sh->dest_array_list,
14205 flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
14207 struct mlx5_priv *priv = dev->data->dev_private;
14208 struct mlx5_dev_ctx_shared *sh = priv->sh;
14209 struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
14210 sh->geneve_tlv_option_resource;
14211 rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
14212 if (geneve_opt_resource) {
14213 if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
14214 __ATOMIC_RELAXED))) {
14215 claim_zero(mlx5_devx_cmd_destroy
14216 (geneve_opt_resource->obj));
14217 mlx5_free(sh->geneve_tlv_option_resource);
14218 sh->geneve_tlv_option_resource = NULL;
14221 rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
14225 * Remove the flow from the NIC but keeps it in memory.
14226 * Lock free, (mutex should be acquired by caller).
14229 * Pointer to Ethernet device.
14230 * @param[in, out] flow
14231 * Pointer to flow structure.
14234 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
14236 struct mlx5_flow_handle *dh;
14237 uint32_t handle_idx;
14238 struct mlx5_priv *priv = dev->data->dev_private;
14242 handle_idx = flow->dev_handles;
14243 while (handle_idx) {
14244 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
14248 if (dh->drv_flow) {
14249 claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
14250 dh->drv_flow = NULL;
14252 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
14253 flow_dv_fate_resource_release(dev, dh);
14254 if (dh->vf_vlan.tag && dh->vf_vlan.created)
14255 mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
14256 handle_idx = dh->next.next;
14261 * Remove the flow from the NIC and the memory.
14262 * Lock free, (mutex should be acquired by caller).
14265 * Pointer to the Ethernet device structure.
14266 * @param[in, out] flow
14267 * Pointer to flow structure.
14270 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
14272 struct mlx5_flow_handle *dev_handle;
14273 struct mlx5_priv *priv = dev->data->dev_private;
14274 struct mlx5_flow_meter_info *fm = NULL;
14279 flow_dv_remove(dev, flow);
14280 if (flow->counter) {
14281 flow_dv_counter_free(dev, flow->counter);
14285 fm = flow_dv_meter_find_by_idx(priv, flow->meter);
14287 mlx5_flow_meter_detach(priv, fm);
14290 /* Keep the current age handling by default. */
14291 if (flow->indirect_type == MLX5_INDIRECT_ACTION_TYPE_CT && flow->ct)
14292 flow_dv_aso_ct_release(dev, flow->ct);
14293 else if (flow->age)
14294 flow_dv_aso_age_release(dev, flow->age);
14295 if (flow->geneve_tlv_option) {
14296 flow_dv_geneve_tlv_option_resource_release(dev);
14297 flow->geneve_tlv_option = 0;
14299 while (flow->dev_handles) {
14300 uint32_t tmp_idx = flow->dev_handles;
14302 dev_handle = mlx5_ipool_get(priv->sh->ipool
14303 [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
14306 flow->dev_handles = dev_handle->next.next;
14307 if (dev_handle->dvh.matcher)
14308 flow_dv_matcher_release(dev, dev_handle);
14309 if (dev_handle->dvh.rix_sample)
14310 flow_dv_sample_resource_release(dev, dev_handle);
14311 if (dev_handle->dvh.rix_dest_array)
14312 flow_dv_dest_array_resource_release(dev, dev_handle);
14313 if (dev_handle->dvh.rix_encap_decap)
14314 flow_dv_encap_decap_resource_release(dev,
14315 dev_handle->dvh.rix_encap_decap);
14316 if (dev_handle->dvh.modify_hdr)
14317 flow_dv_modify_hdr_resource_release(dev, dev_handle);
14318 if (dev_handle->dvh.rix_push_vlan)
14319 flow_dv_push_vlan_action_resource_release(dev,
14321 if (dev_handle->dvh.rix_tag)
14322 flow_dv_tag_release(dev,
14323 dev_handle->dvh.rix_tag);
14324 if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
14325 flow_dv_fate_resource_release(dev, dev_handle);
14327 srss = dev_handle->rix_srss;
14328 if (fm && dev_handle->is_meter_flow_id &&
14329 dev_handle->split_flow_id)
14330 mlx5_ipool_free(fm->flow_ipool,
14331 dev_handle->split_flow_id);
14332 else if (dev_handle->split_flow_id &&
14333 !dev_handle->is_meter_flow_id)
14334 mlx5_ipool_free(priv->sh->ipool
14335 [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
14336 dev_handle->split_flow_id);
14337 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
14341 flow_dv_shared_rss_action_release(dev, srss);
14345 * Release array of hash RX queue objects.
14349 * Pointer to the Ethernet device structure.
14350 * @param[in, out] hrxqs
14351 * Array of hash RX queue objects.
14354 * Total number of references to hash RX queue objects in *hrxqs* array
14355 * after this operation.
14358 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
14359 uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
14364 for (i = 0; i < RTE_DIM(*hrxqs); i++) {
14365 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
14375 * Release all hash RX queue objects representing shared RSS action.
14378 * Pointer to the Ethernet device structure.
14379 * @param[in, out] action
14380 * Shared RSS action to remove hash RX queue objects from.
14383 * Total number of references to hash RX queue objects stored in *action*
14384 * after this operation.
14385 * Expected to be 0 if no external references held.
14388 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
14389 struct mlx5_shared_action_rss *shared_rss)
14391 return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq);
14395 * Adjust L3/L4 hash value of pre-created shared RSS hrxq according to
14398 * Only one hash value is available for one L3+L4 combination:
14400 * MLX5_RSS_HASH_IPV4, MLX5_RSS_HASH_IPV4_SRC_ONLY, and
14401 * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
14402 * same slot in mlx5_rss_hash_fields.
14405 * Pointer to the shared action RSS conf.
14406 * @param[in, out] hash_field
14407 * hash_field variable needed to be adjusted.
14413 __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
14414 uint64_t *hash_field)
14416 uint64_t rss_types = rss->origin.types;
14418 switch (*hash_field & ~IBV_RX_HASH_INNER) {
14419 case MLX5_RSS_HASH_IPV4:
14420 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
14421 *hash_field &= ~MLX5_RSS_HASH_IPV4;
14422 if (rss_types & ETH_RSS_L3_DST_ONLY)
14423 *hash_field |= IBV_RX_HASH_DST_IPV4;
14424 else if (rss_types & ETH_RSS_L3_SRC_ONLY)
14425 *hash_field |= IBV_RX_HASH_SRC_IPV4;
14427 *hash_field |= MLX5_RSS_HASH_IPV4;
14430 case MLX5_RSS_HASH_IPV6:
14431 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
14432 *hash_field &= ~MLX5_RSS_HASH_IPV6;
14433 if (rss_types & ETH_RSS_L3_DST_ONLY)
14434 *hash_field |= IBV_RX_HASH_DST_IPV6;
14435 else if (rss_types & ETH_RSS_L3_SRC_ONLY)
14436 *hash_field |= IBV_RX_HASH_SRC_IPV6;
14438 *hash_field |= MLX5_RSS_HASH_IPV6;
14441 case MLX5_RSS_HASH_IPV4_UDP:
14442 /* fall-through. */
14443 case MLX5_RSS_HASH_IPV6_UDP:
14444 if (rss_types & ETH_RSS_UDP) {
14445 *hash_field &= ~MLX5_UDP_IBV_RX_HASH;
14446 if (rss_types & ETH_RSS_L4_DST_ONLY)
14447 *hash_field |= IBV_RX_HASH_DST_PORT_UDP;
14448 else if (rss_types & ETH_RSS_L4_SRC_ONLY)
14449 *hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
14451 *hash_field |= MLX5_UDP_IBV_RX_HASH;
14454 case MLX5_RSS_HASH_IPV4_TCP:
14455 /* fall-through. */
14456 case MLX5_RSS_HASH_IPV6_TCP:
14457 if (rss_types & ETH_RSS_TCP) {
14458 *hash_field &= ~MLX5_TCP_IBV_RX_HASH;
14459 if (rss_types & ETH_RSS_L4_DST_ONLY)
14460 *hash_field |= IBV_RX_HASH_DST_PORT_TCP;
14461 else if (rss_types & ETH_RSS_L4_SRC_ONLY)
14462 *hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
14464 *hash_field |= MLX5_TCP_IBV_RX_HASH;
14473 * Setup shared RSS action.
14474 * Prepare set of hash RX queue objects sufficient to handle all valid
14475 * hash_fields combinations (see enum ibv_rx_hash_fields).
14478 * Pointer to the Ethernet device structure.
14479 * @param[in] action_idx
14480 * Shared RSS action ipool index.
14481 * @param[in, out] action
14482 * Partially initialized shared RSS action.
14483 * @param[out] error
14484 * Perform verbose error reporting if not NULL. Initialized in case of
14488 * 0 on success, otherwise negative errno value.
14491 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
14492 uint32_t action_idx,
14493 struct mlx5_shared_action_rss *shared_rss,
14494 struct rte_flow_error *error)
14496 struct mlx5_flow_rss_desc rss_desc = { 0 };
14500 if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) {
14501 return rte_flow_error_set(error, rte_errno,
14502 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14503 "cannot setup indirection table");
14505 memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
14506 rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
14507 rss_desc.const_q = shared_rss->origin.queue;
14508 rss_desc.queue_num = shared_rss->origin.queue_num;
14509 /* Set non-zero value to indicate a shared RSS. */
14510 rss_desc.shared_rss = action_idx;
14511 rss_desc.ind_tbl = shared_rss->ind_tbl;
14512 for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
14514 uint64_t hash_fields = mlx5_rss_hash_fields[i];
14517 __flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields);
14518 if (shared_rss->origin.level > 1) {
14519 hash_fields |= IBV_RX_HASH_INNER;
14522 rss_desc.tunnel = tunnel;
14523 rss_desc.hash_fields = hash_fields;
14524 hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
14528 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14529 "cannot get hash queue");
14530 goto error_hrxq_new;
14532 err = __flow_dv_action_rss_hrxq_set
14533 (shared_rss, hash_fields, hrxq_idx);
14539 __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14540 if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true))
14541 shared_rss->ind_tbl = NULL;
14547 * Create shared RSS action.
14550 * Pointer to the Ethernet device structure.
14552 * Shared action configuration.
14554 * RSS action specification used to create shared action.
14555 * @param[out] error
14556 * Perform verbose error reporting if not NULL. Initialized in case of
14560 * A valid shared action ID in case of success, 0 otherwise and
14561 * rte_errno is set.
14564 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
14565 const struct rte_flow_indir_action_conf *conf,
14566 const struct rte_flow_action_rss *rss,
14567 struct rte_flow_error *error)
14569 struct mlx5_priv *priv = dev->data->dev_private;
14570 struct mlx5_shared_action_rss *shared_rss = NULL;
14571 void *queue = NULL;
14572 struct rte_flow_action_rss *origin;
14573 const uint8_t *rss_key;
14574 uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
14577 RTE_SET_USED(conf);
14578 queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
14580 shared_rss = mlx5_ipool_zmalloc
14581 (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
14582 if (!shared_rss || !queue) {
14583 rte_flow_error_set(error, ENOMEM,
14584 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14585 "cannot allocate resource memory");
14586 goto error_rss_init;
14588 if (idx > (1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET)) {
14589 rte_flow_error_set(error, E2BIG,
14590 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14591 "rss action number out of range");
14592 goto error_rss_init;
14594 shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
14595 sizeof(*shared_rss->ind_tbl),
14597 if (!shared_rss->ind_tbl) {
14598 rte_flow_error_set(error, ENOMEM,
14599 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14600 "cannot allocate resource memory");
14601 goto error_rss_init;
14603 memcpy(queue, rss->queue, queue_size);
14604 shared_rss->ind_tbl->queues = queue;
14605 shared_rss->ind_tbl->queues_n = rss->queue_num;
14606 origin = &shared_rss->origin;
14607 origin->func = rss->func;
14608 origin->level = rss->level;
14609 /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
14610 origin->types = !rss->types ? ETH_RSS_IP : rss->types;
14611 /* NULL RSS key indicates default RSS key. */
14612 rss_key = !rss->key ? rss_hash_default_key : rss->key;
14613 memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
14614 origin->key = &shared_rss->key[0];
14615 origin->key_len = MLX5_RSS_HASH_KEY_LEN;
14616 origin->queue = queue;
14617 origin->queue_num = rss->queue_num;
14618 if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
14619 goto error_rss_init;
14620 rte_spinlock_init(&shared_rss->action_rss_sl);
14621 __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
14622 rte_spinlock_lock(&priv->shared_act_sl);
14623 ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14624 &priv->rss_shared_actions, idx, shared_rss, next);
14625 rte_spinlock_unlock(&priv->shared_act_sl);
14629 if (shared_rss->ind_tbl)
14630 mlx5_free(shared_rss->ind_tbl);
14631 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14640 * Destroy the shared RSS action.
14641 * Release related hash RX queue objects.
14644 * Pointer to the Ethernet device structure.
14646 * The shared RSS action object ID to be removed.
14647 * @param[out] error
14648 * Perform verbose error reporting if not NULL. Initialized in case of
14652 * 0 on success, otherwise negative errno value.
14655 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
14656 struct rte_flow_error *error)
14658 struct mlx5_priv *priv = dev->data->dev_private;
14659 struct mlx5_shared_action_rss *shared_rss =
14660 mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14661 uint32_t old_refcnt = 1;
14663 uint16_t *queue = NULL;
14666 return rte_flow_error_set(error, EINVAL,
14667 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14668 "invalid shared action");
14669 remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14671 return rte_flow_error_set(error, EBUSY,
14672 RTE_FLOW_ERROR_TYPE_ACTION,
14674 "shared rss hrxq has references");
14675 if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
14676 0, 0, __ATOMIC_ACQUIRE,
14678 return rte_flow_error_set(error, EBUSY,
14679 RTE_FLOW_ERROR_TYPE_ACTION,
14681 "shared rss has references");
14682 queue = shared_rss->ind_tbl->queues;
14683 remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
14685 return rte_flow_error_set(error, EBUSY,
14686 RTE_FLOW_ERROR_TYPE_ACTION,
14688 "shared rss indirection table has"
14691 rte_spinlock_lock(&priv->shared_act_sl);
14692 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14693 &priv->rss_shared_actions, idx, shared_rss, next);
14694 rte_spinlock_unlock(&priv->shared_act_sl);
14695 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14701 * Create indirect action, lock free,
14702 * (mutex should be acquired by caller).
14703 * Dispatcher for action type specific call.
14706 * Pointer to the Ethernet device structure.
14708 * Shared action configuration.
14709 * @param[in] action
14710 * Action specification used to create indirect action.
14711 * @param[out] error
14712 * Perform verbose error reporting if not NULL. Initialized in case of
14716 * A valid shared action handle in case of success, NULL otherwise and
14717 * rte_errno is set.
14719 static struct rte_flow_action_handle *
14720 flow_dv_action_create(struct rte_eth_dev *dev,
14721 const struct rte_flow_indir_action_conf *conf,
14722 const struct rte_flow_action *action,
14723 struct rte_flow_error *err)
14725 struct mlx5_priv *priv = dev->data->dev_private;
14726 uint32_t age_idx = 0;
14730 switch (action->type) {
14731 case RTE_FLOW_ACTION_TYPE_RSS:
14732 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
14733 idx = (MLX5_INDIRECT_ACTION_TYPE_RSS <<
14734 MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14736 case RTE_FLOW_ACTION_TYPE_AGE:
14737 age_idx = flow_dv_aso_age_alloc(dev, err);
14742 idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
14743 MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
14744 flow_dv_aso_age_params_init(dev, age_idx,
14745 ((const struct rte_flow_action_age *)
14746 action->conf)->context ?
14747 ((const struct rte_flow_action_age *)
14748 action->conf)->context :
14749 (void *)(uintptr_t)idx,
14750 ((const struct rte_flow_action_age *)
14751 action->conf)->timeout);
14754 case RTE_FLOW_ACTION_TYPE_COUNT:
14755 ret = flow_dv_translate_create_counter(dev, NULL, NULL, NULL);
14756 idx = (MLX5_INDIRECT_ACTION_TYPE_COUNT <<
14757 MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14759 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
14760 ret = flow_dv_translate_create_conntrack(dev, action->conf,
14762 idx = MLX5_INDIRECT_ACT_CT_GEN_IDX(PORT_ID(priv), ret);
14765 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
14766 NULL, "action type not supported");
14769 return ret ? (struct rte_flow_action_handle *)(uintptr_t)idx : NULL;
14773 * Destroy the indirect action.
14774 * Release action related resources on the NIC and the memory.
14775 * Lock free, (mutex should be acquired by caller).
14776 * Dispatcher for action type specific call.
14779 * Pointer to the Ethernet device structure.
14780 * @param[in] handle
14781 * The indirect action object handle to be removed.
14782 * @param[out] error
14783 * Perform verbose error reporting if not NULL. Initialized in case of
14787 * 0 on success, otherwise negative errno value.
14790 flow_dv_action_destroy(struct rte_eth_dev *dev,
14791 struct rte_flow_action_handle *handle,
14792 struct rte_flow_error *error)
14794 uint32_t act_idx = (uint32_t)(uintptr_t)handle;
14795 uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
14796 uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
14797 struct mlx5_flow_counter *cnt;
14798 uint32_t no_flow_refcnt = 1;
14802 case MLX5_INDIRECT_ACTION_TYPE_RSS:
14803 return __flow_dv_action_rss_release(dev, idx, error);
14804 case MLX5_INDIRECT_ACTION_TYPE_COUNT:
14805 cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
14806 if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
14807 &no_flow_refcnt, 1, false,
14810 return rte_flow_error_set(error, EBUSY,
14811 RTE_FLOW_ERROR_TYPE_ACTION,
14813 "Indirect count action has references");
14814 flow_dv_counter_free(dev, idx);
14816 case MLX5_INDIRECT_ACTION_TYPE_AGE:
14817 ret = flow_dv_aso_age_release(dev, idx);
14820 * In this case, the last flow has a reference will
14821 * actually release the age action.
14823 DRV_LOG(DEBUG, "Indirect age action %" PRIu32 " was"
14824 " released with references %d.", idx, ret);
14826 case MLX5_INDIRECT_ACTION_TYPE_CT:
14827 ret = flow_dv_aso_ct_release(dev, idx);
14831 DRV_LOG(DEBUG, "Connection tracking object %u still "
14832 "has references %d.", idx, ret);
14835 return rte_flow_error_set(error, ENOTSUP,
14836 RTE_FLOW_ERROR_TYPE_ACTION,
14838 "action type not supported");
14843 * Updates in place shared RSS action configuration.
14846 * Pointer to the Ethernet device structure.
14848 * The shared RSS action object ID to be updated.
14849 * @param[in] action_conf
14850 * RSS action specification used to modify *shared_rss*.
14851 * @param[out] error
14852 * Perform verbose error reporting if not NULL. Initialized in case of
14856 * 0 on success, otherwise negative errno value.
14857 * @note: currently only support update of RSS queues.
14860 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
14861 const struct rte_flow_action_rss *action_conf,
14862 struct rte_flow_error *error)
14864 struct mlx5_priv *priv = dev->data->dev_private;
14865 struct mlx5_shared_action_rss *shared_rss =
14866 mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14868 void *queue = NULL;
14869 uint16_t *queue_old = NULL;
14870 uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
14873 return rte_flow_error_set(error, EINVAL,
14874 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14875 "invalid shared action to update");
14876 if (priv->obj_ops.ind_table_modify == NULL)
14877 return rte_flow_error_set(error, ENOTSUP,
14878 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14879 "cannot modify indirection table");
14880 queue = mlx5_malloc(MLX5_MEM_ZERO,
14881 RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
14884 return rte_flow_error_set(error, ENOMEM,
14885 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14887 "cannot allocate resource memory");
14888 memcpy(queue, action_conf->queue, queue_size);
14889 MLX5_ASSERT(shared_rss->ind_tbl);
14890 rte_spinlock_lock(&shared_rss->action_rss_sl);
14891 queue_old = shared_rss->ind_tbl->queues;
14892 ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
14893 queue, action_conf->queue_num, true);
14896 ret = rte_flow_error_set(error, rte_errno,
14897 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14898 "cannot update indirection table");
14900 mlx5_free(queue_old);
14901 shared_rss->origin.queue = queue;
14902 shared_rss->origin.queue_num = action_conf->queue_num;
14904 rte_spinlock_unlock(&shared_rss->action_rss_sl);
14909 * Updates in place conntrack context or direction.
14910 * Context update should be synchronized.
14913 * Pointer to the Ethernet device structure.
14915 * The conntrack object ID to be updated.
14916 * @param[in] update
14917 * Pointer to the structure of information to update.
14918 * @param[out] error
14919 * Perform verbose error reporting if not NULL. Initialized in case of
14923 * 0 on success, otherwise negative errno value.
14926 __flow_dv_action_ct_update(struct rte_eth_dev *dev, uint32_t idx,
14927 const struct rte_flow_modify_conntrack *update,
14928 struct rte_flow_error *error)
14930 struct mlx5_priv *priv = dev->data->dev_private;
14931 struct mlx5_aso_ct_action *ct;
14932 const struct rte_flow_action_conntrack *new_prf;
14934 uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
14937 if (PORT_ID(priv) != owner)
14938 return rte_flow_error_set(error, EACCES,
14939 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14941 "CT object owned by another port");
14942 dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
14943 ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
14945 return rte_flow_error_set(error, ENOMEM,
14946 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14948 "CT object is inactive");
14949 new_prf = &update->new_ct;
14950 if (update->direction)
14951 ct->is_original = !!new_prf->is_original_dir;
14952 if (update->state) {
14953 /* Only validate the profile when it needs to be updated. */
14954 ret = mlx5_validate_action_ct(dev, new_prf, error);
14957 ret = mlx5_aso_ct_update_by_wqe(priv->sh, ct, new_prf);
14959 return rte_flow_error_set(error, EIO,
14960 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14962 "Failed to send CT context update WQE");
14963 /* Block until ready or a failure. */
14964 ret = mlx5_aso_ct_available(priv->sh, ct);
14966 rte_flow_error_set(error, rte_errno,
14967 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14969 "Timeout to get the CT update");
14975 * Updates in place shared action configuration, lock free,
14976 * (mutex should be acquired by caller).
14979 * Pointer to the Ethernet device structure.
14980 * @param[in] handle
14981 * The indirect action object handle to be updated.
14982 * @param[in] update
14983 * Action specification used to modify the action pointed by *handle*.
14984 * *update* could be of same type with the action pointed by the *handle*
14985 * handle argument, or some other structures like a wrapper, depending on
14986 * the indirect action type.
14987 * @param[out] error
14988 * Perform verbose error reporting if not NULL. Initialized in case of
14992 * 0 on success, otherwise negative errno value.
14995 flow_dv_action_update(struct rte_eth_dev *dev,
14996 struct rte_flow_action_handle *handle,
14997 const void *update,
14998 struct rte_flow_error *err)
15000 uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15001 uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15002 uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15003 const void *action_conf;
15006 case MLX5_INDIRECT_ACTION_TYPE_RSS:
15007 action_conf = ((const struct rte_flow_action *)update)->conf;
15008 return __flow_dv_action_rss_update(dev, idx, action_conf, err);
15009 case MLX5_INDIRECT_ACTION_TYPE_CT:
15010 return __flow_dv_action_ct_update(dev, idx, update, err);
15012 return rte_flow_error_set(err, ENOTSUP,
15013 RTE_FLOW_ERROR_TYPE_ACTION,
15015 "action type update not supported");
15020 * Destroy the meter sub policy table rules.
15021 * Lock free, (mutex should be acquired by caller).
15024 * Pointer to Ethernet device.
15025 * @param[in] sub_policy
15026 * Pointer to meter sub policy table.
15029 __flow_dv_destroy_sub_policy_rules(struct rte_eth_dev *dev,
15030 struct mlx5_flow_meter_sub_policy *sub_policy)
15032 struct mlx5_priv *priv = dev->data->dev_private;
15033 struct mlx5_flow_tbl_data_entry *tbl;
15034 struct mlx5_flow_meter_policy *policy = sub_policy->main_policy;
15035 struct mlx5_flow_meter_info *next_fm;
15036 struct mlx5_sub_policy_color_rule *color_rule;
15040 for (i = 0; i < RTE_COLORS; i++) {
15042 if (i == RTE_COLOR_GREEN && policy &&
15043 policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR)
15044 next_fm = mlx5_flow_meter_find(priv,
15045 policy->act_cnt[i].next_mtr_id, NULL);
15046 TAILQ_FOREACH_SAFE(color_rule, &sub_policy->color_rules[i],
15048 claim_zero(mlx5_flow_os_destroy_flow(color_rule->rule));
15049 tbl = container_of(color_rule->matcher->tbl,
15050 typeof(*tbl), tbl);
15051 mlx5_list_unregister(tbl->matchers,
15052 &color_rule->matcher->entry);
15053 TAILQ_REMOVE(&sub_policy->color_rules[i],
15054 color_rule, next_port);
15055 mlx5_free(color_rule);
15057 mlx5_flow_meter_detach(priv, next_fm);
15060 for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15061 if (sub_policy->rix_hrxq[i]) {
15062 if (policy && !policy->is_hierarchy)
15063 mlx5_hrxq_release(dev, sub_policy->rix_hrxq[i]);
15064 sub_policy->rix_hrxq[i] = 0;
15066 if (sub_policy->jump_tbl[i]) {
15067 flow_dv_tbl_resource_release(MLX5_SH(dev),
15068 sub_policy->jump_tbl[i]);
15069 sub_policy->jump_tbl[i] = NULL;
15072 if (sub_policy->tbl_rsc) {
15073 flow_dv_tbl_resource_release(MLX5_SH(dev),
15074 sub_policy->tbl_rsc);
15075 sub_policy->tbl_rsc = NULL;
15080 * Destroy policy rules, lock free,
15081 * (mutex should be acquired by caller).
15082 * Dispatcher for action type specific call.
15085 * Pointer to the Ethernet device structure.
15086 * @param[in] mtr_policy
15087 * Meter policy struct.
15090 flow_dv_destroy_policy_rules(struct rte_eth_dev *dev,
15091 struct mlx5_flow_meter_policy *mtr_policy)
15094 struct mlx5_flow_meter_sub_policy *sub_policy;
15095 uint16_t sub_policy_num;
15097 for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15098 sub_policy_num = (mtr_policy->sub_policy_num >>
15099 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15100 MLX5_MTR_SUB_POLICY_NUM_MASK;
15101 for (j = 0; j < sub_policy_num; j++) {
15102 sub_policy = mtr_policy->sub_policys[i][j];
15104 __flow_dv_destroy_sub_policy_rules
15111 * Destroy policy action, lock free,
15112 * (mutex should be acquired by caller).
15113 * Dispatcher for action type specific call.
15116 * Pointer to the Ethernet device structure.
15117 * @param[in] mtr_policy
15118 * Meter policy struct.
15121 flow_dv_destroy_mtr_policy_acts(struct rte_eth_dev *dev,
15122 struct mlx5_flow_meter_policy *mtr_policy)
15124 struct rte_flow_action *rss_action;
15125 struct mlx5_flow_handle dev_handle;
15128 for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15129 if (mtr_policy->act_cnt[i].rix_mark) {
15130 flow_dv_tag_release(dev,
15131 mtr_policy->act_cnt[i].rix_mark);
15132 mtr_policy->act_cnt[i].rix_mark = 0;
15134 if (mtr_policy->act_cnt[i].modify_hdr) {
15135 dev_handle.dvh.modify_hdr =
15136 mtr_policy->act_cnt[i].modify_hdr;
15137 flow_dv_modify_hdr_resource_release(dev, &dev_handle);
15139 switch (mtr_policy->act_cnt[i].fate_action) {
15140 case MLX5_FLOW_FATE_SHARED_RSS:
15141 rss_action = mtr_policy->act_cnt[i].rss;
15142 mlx5_free(rss_action);
15144 case MLX5_FLOW_FATE_PORT_ID:
15145 if (mtr_policy->act_cnt[i].rix_port_id_action) {
15146 flow_dv_port_id_action_resource_release(dev,
15147 mtr_policy->act_cnt[i].rix_port_id_action);
15148 mtr_policy->act_cnt[i].rix_port_id_action = 0;
15151 case MLX5_FLOW_FATE_DROP:
15152 case MLX5_FLOW_FATE_JUMP:
15153 for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
15154 mtr_policy->act_cnt[i].dr_jump_action[j] =
15158 /*Queue action do nothing*/
15162 for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
15163 mtr_policy->dr_drop_action[j] = NULL;
15167 * Create policy action per domain, lock free,
15168 * (mutex should be acquired by caller).
15169 * Dispatcher for action type specific call.
15172 * Pointer to the Ethernet device structure.
15173 * @param[in] mtr_policy
15174 * Meter policy struct.
15175 * @param[in] action
15176 * Action specification used to create meter actions.
15177 * @param[out] error
15178 * Perform verbose error reporting if not NULL. Initialized in case of
15182 * 0 on success, otherwise negative errno value.
15185 __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
15186 struct mlx5_flow_meter_policy *mtr_policy,
15187 const struct rte_flow_action *actions[RTE_COLORS],
15188 enum mlx5_meter_domain domain,
15189 struct rte_mtr_error *error)
15191 struct mlx5_priv *priv = dev->data->dev_private;
15192 struct rte_flow_error flow_err;
15193 const struct rte_flow_action *act;
15194 uint64_t action_flags = 0;
15195 struct mlx5_flow_handle dh;
15196 struct mlx5_flow dev_flow;
15197 struct mlx5_flow_dv_port_id_action_resource port_id_action;
15199 uint8_t egress, transfer;
15200 struct mlx5_meter_policy_action_container *act_cnt = NULL;
15202 struct mlx5_flow_dv_modify_hdr_resource res;
15203 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
15204 sizeof(struct mlx5_modification_cmd) *
15205 (MLX5_MAX_MODIFY_NUM + 1)];
15207 struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
15209 egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15210 transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15211 memset(&dh, 0, sizeof(struct mlx5_flow_handle));
15212 memset(&dev_flow, 0, sizeof(struct mlx5_flow));
15213 memset(&port_id_action, 0,
15214 sizeof(struct mlx5_flow_dv_port_id_action_resource));
15215 memset(mhdr_res, 0, sizeof(*mhdr_res));
15216 mhdr_res->ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
15218 MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
15219 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
15220 dev_flow.handle = &dh;
15221 dev_flow.dv.port_id_action = &port_id_action;
15222 dev_flow.external = true;
15223 for (i = 0; i < RTE_COLORS; i++) {
15224 if (i < MLX5_MTR_RTE_COLORS)
15225 act_cnt = &mtr_policy->act_cnt[i];
15226 for (act = actions[i];
15227 act && act->type != RTE_FLOW_ACTION_TYPE_END;
15229 switch (act->type) {
15230 case RTE_FLOW_ACTION_TYPE_MARK:
15232 uint32_t tag_be = mlx5_flow_mark_set
15233 (((const struct rte_flow_action_mark *)
15236 if (i >= MLX5_MTR_RTE_COLORS)
15237 return -rte_mtr_error_set(error,
15239 RTE_MTR_ERROR_TYPE_METER_POLICY,
15241 "cannot create policy "
15242 "mark action for this color");
15243 dev_flow.handle->mark = 1;
15244 if (flow_dv_tag_resource_register(dev, tag_be,
15245 &dev_flow, &flow_err))
15246 return -rte_mtr_error_set(error,
15248 RTE_MTR_ERROR_TYPE_METER_POLICY,
15250 "cannot setup policy mark action");
15251 MLX5_ASSERT(dev_flow.dv.tag_resource);
15252 act_cnt->rix_mark =
15253 dev_flow.handle->dvh.rix_tag;
15254 action_flags |= MLX5_FLOW_ACTION_MARK;
15257 case RTE_FLOW_ACTION_TYPE_SET_TAG:
15258 if (i >= MLX5_MTR_RTE_COLORS)
15259 return -rte_mtr_error_set(error,
15261 RTE_MTR_ERROR_TYPE_METER_POLICY,
15263 "cannot create policy "
15264 "set tag action for this color");
15265 if (flow_dv_convert_action_set_tag
15267 (const struct rte_flow_action_set_tag *)
15268 act->conf, &flow_err))
15269 return -rte_mtr_error_set(error,
15271 RTE_MTR_ERROR_TYPE_METER_POLICY,
15272 NULL, "cannot convert policy "
15274 if (!mhdr_res->actions_num)
15275 return -rte_mtr_error_set(error,
15277 RTE_MTR_ERROR_TYPE_METER_POLICY,
15278 NULL, "cannot find policy "
15280 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
15282 case RTE_FLOW_ACTION_TYPE_DROP:
15284 struct mlx5_flow_mtr_mng *mtrmng =
15286 struct mlx5_flow_tbl_data_entry *tbl_data;
15289 * Create the drop table with
15290 * METER DROP level.
15292 if (!mtrmng->drop_tbl[domain]) {
15293 mtrmng->drop_tbl[domain] =
15294 flow_dv_tbl_resource_get(dev,
15295 MLX5_FLOW_TABLE_LEVEL_METER,
15296 egress, transfer, false, NULL, 0,
15297 0, MLX5_MTR_TABLE_ID_DROP, &flow_err);
15298 if (!mtrmng->drop_tbl[domain])
15299 return -rte_mtr_error_set
15301 RTE_MTR_ERROR_TYPE_METER_POLICY,
15303 "Failed to create meter drop table");
15305 tbl_data = container_of
15306 (mtrmng->drop_tbl[domain],
15307 struct mlx5_flow_tbl_data_entry, tbl);
15308 if (i < MLX5_MTR_RTE_COLORS) {
15309 act_cnt->dr_jump_action[domain] =
15310 tbl_data->jump.action;
15311 act_cnt->fate_action =
15312 MLX5_FLOW_FATE_DROP;
15314 if (i == RTE_COLOR_RED)
15315 mtr_policy->dr_drop_action[domain] =
15316 tbl_data->jump.action;
15317 action_flags |= MLX5_FLOW_ACTION_DROP;
15320 case RTE_FLOW_ACTION_TYPE_QUEUE:
15322 if (i >= MLX5_MTR_RTE_COLORS)
15323 return -rte_mtr_error_set(error,
15325 RTE_MTR_ERROR_TYPE_METER_POLICY,
15326 NULL, "cannot create policy "
15327 "fate queue for this color");
15329 ((const struct rte_flow_action_queue *)
15330 (act->conf))->index;
15331 act_cnt->fate_action =
15332 MLX5_FLOW_FATE_QUEUE;
15333 dev_flow.handle->fate_action =
15334 MLX5_FLOW_FATE_QUEUE;
15335 mtr_policy->is_queue = 1;
15336 action_flags |= MLX5_FLOW_ACTION_QUEUE;
15339 case RTE_FLOW_ACTION_TYPE_RSS:
15343 if (i >= MLX5_MTR_RTE_COLORS)
15344 return -rte_mtr_error_set(error,
15346 RTE_MTR_ERROR_TYPE_METER_POLICY,
15348 "cannot create policy "
15349 "rss action for this color");
15351 * Save RSS conf into policy struct
15352 * for translate stage.
15354 rss_size = (int)rte_flow_conv
15355 (RTE_FLOW_CONV_OP_ACTION,
15356 NULL, 0, act, &flow_err);
15358 return -rte_mtr_error_set(error,
15360 RTE_MTR_ERROR_TYPE_METER_POLICY,
15361 NULL, "Get the wrong "
15362 "rss action struct size");
15363 act_cnt->rss = mlx5_malloc(MLX5_MEM_ZERO,
15364 rss_size, 0, SOCKET_ID_ANY);
15366 return -rte_mtr_error_set(error,
15368 RTE_MTR_ERROR_TYPE_METER_POLICY,
15370 "Fail to malloc rss action memory");
15371 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION,
15372 act_cnt->rss, rss_size,
15375 return -rte_mtr_error_set(error,
15377 RTE_MTR_ERROR_TYPE_METER_POLICY,
15378 NULL, "Fail to save "
15379 "rss action into policy struct");
15380 act_cnt->fate_action =
15381 MLX5_FLOW_FATE_SHARED_RSS;
15382 action_flags |= MLX5_FLOW_ACTION_RSS;
15385 case RTE_FLOW_ACTION_TYPE_PORT_ID:
15387 struct mlx5_flow_dv_port_id_action_resource
15389 uint32_t port_id = 0;
15391 if (i >= MLX5_MTR_RTE_COLORS)
15392 return -rte_mtr_error_set(error,
15394 RTE_MTR_ERROR_TYPE_METER_POLICY,
15395 NULL, "cannot create policy "
15396 "port action for this color");
15397 memset(&port_id_resource, 0,
15398 sizeof(port_id_resource));
15399 if (flow_dv_translate_action_port_id(dev, act,
15400 &port_id, &flow_err))
15401 return -rte_mtr_error_set(error,
15403 RTE_MTR_ERROR_TYPE_METER_POLICY,
15404 NULL, "cannot translate "
15405 "policy port action");
15406 port_id_resource.port_id = port_id;
15407 if (flow_dv_port_id_action_resource_register
15408 (dev, &port_id_resource,
15409 &dev_flow, &flow_err))
15410 return -rte_mtr_error_set(error,
15412 RTE_MTR_ERROR_TYPE_METER_POLICY,
15413 NULL, "cannot setup "
15414 "policy port action");
15415 act_cnt->rix_port_id_action =
15416 dev_flow.handle->rix_port_id_action;
15417 act_cnt->fate_action =
15418 MLX5_FLOW_FATE_PORT_ID;
15419 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
15422 case RTE_FLOW_ACTION_TYPE_JUMP:
15424 uint32_t jump_group = 0;
15425 uint32_t table = 0;
15426 struct mlx5_flow_tbl_data_entry *tbl_data;
15427 struct flow_grp_info grp_info = {
15428 .external = !!dev_flow.external,
15429 .transfer = !!transfer,
15430 .fdb_def_rule = !!priv->fdb_def_rule,
15432 .skip_scale = dev_flow.skip_scale &
15433 (1 << MLX5_SCALE_FLOW_GROUP_BIT),
15435 struct mlx5_flow_meter_sub_policy *sub_policy =
15436 mtr_policy->sub_policys[domain][0];
15438 if (i >= MLX5_MTR_RTE_COLORS)
15439 return -rte_mtr_error_set(error,
15441 RTE_MTR_ERROR_TYPE_METER_POLICY,
15443 "cannot create policy "
15444 "jump action for this color");
15446 ((const struct rte_flow_action_jump *)
15448 if (mlx5_flow_group_to_table(dev, NULL,
15451 &grp_info, &flow_err))
15452 return -rte_mtr_error_set(error,
15454 RTE_MTR_ERROR_TYPE_METER_POLICY,
15455 NULL, "cannot setup "
15456 "policy jump action");
15457 sub_policy->jump_tbl[i] =
15458 flow_dv_tbl_resource_get(dev,
15461 !!dev_flow.external,
15462 NULL, jump_group, 0,
15465 (!sub_policy->jump_tbl[i])
15466 return -rte_mtr_error_set(error,
15468 RTE_MTR_ERROR_TYPE_METER_POLICY,
15469 NULL, "cannot create jump action.");
15470 tbl_data = container_of
15471 (sub_policy->jump_tbl[i],
15472 struct mlx5_flow_tbl_data_entry, tbl);
15473 act_cnt->dr_jump_action[domain] =
15474 tbl_data->jump.action;
15475 act_cnt->fate_action =
15476 MLX5_FLOW_FATE_JUMP;
15477 action_flags |= MLX5_FLOW_ACTION_JUMP;
15480 case RTE_FLOW_ACTION_TYPE_METER:
15482 const struct rte_flow_action_meter *mtr;
15483 struct mlx5_flow_meter_info *next_fm;
15484 struct mlx5_flow_meter_policy *next_policy;
15485 struct rte_flow_action tag_action;
15486 struct mlx5_rte_flow_action_set_tag set_tag;
15487 uint32_t next_mtr_idx = 0;
15490 next_fm = mlx5_flow_meter_find(priv,
15494 return -rte_mtr_error_set(error, EINVAL,
15495 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
15496 "Fail to find next meter.");
15497 if (next_fm->def_policy)
15498 return -rte_mtr_error_set(error, EINVAL,
15499 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
15500 "Hierarchy only supports termination meter.");
15501 next_policy = mlx5_flow_meter_policy_find(dev,
15502 next_fm->policy_id, NULL);
15503 MLX5_ASSERT(next_policy);
15504 if (next_fm->drop_cnt) {
15507 mlx5_flow_get_reg_id(dev,
15510 (struct rte_flow_error *)error);
15511 set_tag.offset = (priv->mtr_reg_share ?
15512 MLX5_MTR_COLOR_BITS : 0);
15513 set_tag.length = (priv->mtr_reg_share ?
15514 MLX5_MTR_IDLE_BITS_IN_COLOR_REG :
15516 set_tag.data = next_mtr_idx;
15518 (enum rte_flow_action_type)
15519 MLX5_RTE_FLOW_ACTION_TYPE_TAG;
15520 tag_action.conf = &set_tag;
15521 if (flow_dv_convert_action_set_reg
15522 (mhdr_res, &tag_action,
15523 (struct rte_flow_error *)error))
15526 MLX5_FLOW_ACTION_SET_TAG;
15528 act_cnt->fate_action = MLX5_FLOW_FATE_MTR;
15529 act_cnt->next_mtr_id = next_fm->meter_id;
15530 act_cnt->next_sub_policy = NULL;
15531 mtr_policy->is_hierarchy = 1;
15532 mtr_policy->dev = next_policy->dev;
15534 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
15538 return -rte_mtr_error_set(error, ENOTSUP,
15539 RTE_MTR_ERROR_TYPE_METER_POLICY,
15540 NULL, "action type not supported");
15542 if (action_flags & MLX5_FLOW_ACTION_SET_TAG) {
15543 /* create modify action if needed. */
15544 dev_flow.dv.group = 1;
15545 if (flow_dv_modify_hdr_resource_register
15546 (dev, mhdr_res, &dev_flow, &flow_err))
15547 return -rte_mtr_error_set(error,
15549 RTE_MTR_ERROR_TYPE_METER_POLICY,
15550 NULL, "cannot register policy "
15552 act_cnt->modify_hdr =
15553 dev_flow.handle->dvh.modify_hdr;
15561 * Create policy action per domain, lock free,
15562 * (mutex should be acquired by caller).
15563 * Dispatcher for action type specific call.
15566 * Pointer to the Ethernet device structure.
15567 * @param[in] mtr_policy
15568 * Meter policy struct.
15569 * @param[in] action
15570 * Action specification used to create meter actions.
15571 * @param[out] error
15572 * Perform verbose error reporting if not NULL. Initialized in case of
15576 * 0 on success, otherwise negative errno value.
15579 flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev,
15580 struct mlx5_flow_meter_policy *mtr_policy,
15581 const struct rte_flow_action *actions[RTE_COLORS],
15582 struct rte_mtr_error *error)
15585 uint16_t sub_policy_num;
15587 for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15588 sub_policy_num = (mtr_policy->sub_policy_num >>
15589 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15590 MLX5_MTR_SUB_POLICY_NUM_MASK;
15591 if (sub_policy_num) {
15592 ret = __flow_dv_create_domain_policy_acts(dev,
15593 mtr_policy, actions,
15594 (enum mlx5_meter_domain)i, error);
15603 * Query a DV flow rule for its statistics via DevX.
15606 * Pointer to Ethernet device.
15607 * @param[in] cnt_idx
15608 * Index to the flow counter.
15610 * Data retrieved by the query.
15611 * @param[out] error
15612 * Perform verbose error reporting if not NULL.
15615 * 0 on success, a negative errno value otherwise and rte_errno is set.
15618 flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data,
15619 struct rte_flow_error *error)
15621 struct mlx5_priv *priv = dev->data->dev_private;
15622 struct rte_flow_query_count *qc = data;
15624 if (!priv->config.devx)
15625 return rte_flow_error_set(error, ENOTSUP,
15626 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15628 "counters are not supported");
15630 uint64_t pkts, bytes;
15631 struct mlx5_flow_counter *cnt;
15632 int err = _flow_dv_query_count(dev, cnt_idx, &pkts, &bytes);
15635 return rte_flow_error_set(error, -err,
15636 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15637 NULL, "cannot read counters");
15638 cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
15641 qc->hits = pkts - cnt->hits;
15642 qc->bytes = bytes - cnt->bytes;
15645 cnt->bytes = bytes;
15649 return rte_flow_error_set(error, EINVAL,
15650 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15652 "counters are not available");
15656 flow_dv_action_query(struct rte_eth_dev *dev,
15657 const struct rte_flow_action_handle *handle, void *data,
15658 struct rte_flow_error *error)
15660 struct mlx5_age_param *age_param;
15661 struct rte_flow_query_age *resp;
15662 uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15663 uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15664 uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15665 struct mlx5_priv *priv = dev->data->dev_private;
15666 struct mlx5_aso_ct_action *ct;
15671 case MLX5_INDIRECT_ACTION_TYPE_AGE:
15672 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
15674 resp->aged = __atomic_load_n(&age_param->state,
15675 __ATOMIC_RELAXED) == AGE_TMOUT ?
15677 resp->sec_since_last_hit_valid = !resp->aged;
15678 if (resp->sec_since_last_hit_valid)
15679 resp->sec_since_last_hit = __atomic_load_n
15680 (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15682 case MLX5_INDIRECT_ACTION_TYPE_COUNT:
15683 return flow_dv_query_count(dev, idx, data, error);
15684 case MLX5_INDIRECT_ACTION_TYPE_CT:
15685 owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
15686 if (owner != PORT_ID(priv))
15687 return rte_flow_error_set(error, EACCES,
15688 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15690 "CT object owned by another port");
15691 dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
15692 ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
15695 return rte_flow_error_set(error, EFAULT,
15696 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15698 "CT object is inactive");
15699 ((struct rte_flow_action_conntrack *)data)->peer_port =
15701 ((struct rte_flow_action_conntrack *)data)->is_original_dir =
15703 if (mlx5_aso_ct_query_by_wqe(priv->sh, ct, data))
15704 return rte_flow_error_set(error, EIO,
15705 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15707 "Failed to query CT context");
15710 return rte_flow_error_set(error, ENOTSUP,
15711 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15712 "action type query not supported");
15717 * Query a flow rule AGE action for aging information.
15720 * Pointer to Ethernet device.
15722 * Pointer to the sub flow.
15724 * data retrieved by the query.
15725 * @param[out] error
15726 * Perform verbose error reporting if not NULL.
15729 * 0 on success, a negative errno value otherwise and rte_errno is set.
15732 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
15733 void *data, struct rte_flow_error *error)
15735 struct rte_flow_query_age *resp = data;
15736 struct mlx5_age_param *age_param;
15739 struct mlx5_aso_age_action *act =
15740 flow_aso_age_get_by_idx(dev, flow->age);
15742 age_param = &act->age_params;
15743 } else if (flow->counter) {
15744 age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
15746 if (!age_param || !age_param->timeout)
15747 return rte_flow_error_set
15749 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15750 NULL, "cannot read age data");
15752 return rte_flow_error_set(error, EINVAL,
15753 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15754 NULL, "age data not available");
15756 resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
15758 resp->sec_since_last_hit_valid = !resp->aged;
15759 if (resp->sec_since_last_hit_valid)
15760 resp->sec_since_last_hit = __atomic_load_n
15761 (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15768 * @see rte_flow_query()
15769 * @see rte_flow_ops
15772 flow_dv_query(struct rte_eth_dev *dev,
15773 struct rte_flow *flow __rte_unused,
15774 const struct rte_flow_action *actions __rte_unused,
15775 void *data __rte_unused,
15776 struct rte_flow_error *error __rte_unused)
15780 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
15781 switch (actions->type) {
15782 case RTE_FLOW_ACTION_TYPE_VOID:
15784 case RTE_FLOW_ACTION_TYPE_COUNT:
15785 ret = flow_dv_query_count(dev, flow->counter, data,
15788 case RTE_FLOW_ACTION_TYPE_AGE:
15789 ret = flow_dv_query_age(dev, flow, data, error);
15792 return rte_flow_error_set(error, ENOTSUP,
15793 RTE_FLOW_ERROR_TYPE_ACTION,
15795 "action not supported");
15802 * Destroy the meter table set.
15803 * Lock free, (mutex should be acquired by caller).
15806 * Pointer to Ethernet device.
15808 * Meter information table.
15811 flow_dv_destroy_mtr_tbls(struct rte_eth_dev *dev,
15812 struct mlx5_flow_meter_info *fm)
15814 struct mlx5_priv *priv = dev->data->dev_private;
15817 if (!fm || !priv->config.dv_flow_en)
15819 for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15820 if (fm->drop_rule[i]) {
15821 claim_zero(mlx5_flow_os_destroy_flow(fm->drop_rule[i]));
15822 fm->drop_rule[i] = NULL;
15828 flow_dv_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
15830 struct mlx5_priv *priv = dev->data->dev_private;
15831 struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
15832 struct mlx5_flow_tbl_data_entry *tbl;
15835 for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15836 if (mtrmng->def_rule[i]) {
15837 claim_zero(mlx5_flow_os_destroy_flow
15838 (mtrmng->def_rule[i]));
15839 mtrmng->def_rule[i] = NULL;
15841 if (mtrmng->def_matcher[i]) {
15842 tbl = container_of(mtrmng->def_matcher[i]->tbl,
15843 struct mlx5_flow_tbl_data_entry, tbl);
15844 mlx5_list_unregister(tbl->matchers,
15845 &mtrmng->def_matcher[i]->entry);
15846 mtrmng->def_matcher[i] = NULL;
15848 for (j = 0; j < MLX5_REG_BITS; j++) {
15849 if (mtrmng->drop_matcher[i][j]) {
15851 container_of(mtrmng->drop_matcher[i][j]->tbl,
15852 struct mlx5_flow_tbl_data_entry,
15854 mlx5_list_unregister(tbl->matchers,
15855 &mtrmng->drop_matcher[i][j]->entry);
15856 mtrmng->drop_matcher[i][j] = NULL;
15859 if (mtrmng->drop_tbl[i]) {
15860 flow_dv_tbl_resource_release(MLX5_SH(dev),
15861 mtrmng->drop_tbl[i]);
15862 mtrmng->drop_tbl[i] = NULL;
15867 /* Number of meter flow actions, count and jump or count and drop. */
15868 #define METER_ACTIONS 2
15871 __flow_dv_destroy_domain_def_policy(struct rte_eth_dev *dev,
15872 enum mlx5_meter_domain domain)
15874 struct mlx5_priv *priv = dev->data->dev_private;
15875 struct mlx5_flow_meter_def_policy *def_policy =
15876 priv->sh->mtrmng->def_policy[domain];
15878 __flow_dv_destroy_sub_policy_rules(dev, &def_policy->sub_policy);
15879 mlx5_free(def_policy);
15880 priv->sh->mtrmng->def_policy[domain] = NULL;
15884 * Destroy the default policy table set.
15887 * Pointer to Ethernet device.
15890 flow_dv_destroy_def_policy(struct rte_eth_dev *dev)
15892 struct mlx5_priv *priv = dev->data->dev_private;
15895 for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++)
15896 if (priv->sh->mtrmng->def_policy[i])
15897 __flow_dv_destroy_domain_def_policy(dev,
15898 (enum mlx5_meter_domain)i);
15899 priv->sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
15903 __flow_dv_create_policy_flow(struct rte_eth_dev *dev,
15904 uint32_t color_reg_c_idx,
15905 enum rte_color color, void *matcher_object,
15906 int actions_n, void *actions,
15907 bool match_src_port, const struct rte_flow_item *item,
15908 void **rule, const struct rte_flow_attr *attr)
15911 struct mlx5_flow_dv_match_params value = {
15912 .size = sizeof(value.buf),
15914 struct mlx5_flow_dv_match_params matcher = {
15915 .size = sizeof(matcher.buf),
15917 struct mlx5_priv *priv = dev->data->dev_private;
15920 if (match_src_port && (priv->representor || priv->master)) {
15921 if (flow_dv_translate_item_port_id(dev, matcher.buf,
15922 value.buf, item, attr)) {
15924 "Failed to create meter policy flow with port.");
15928 flow_dv_match_meta_reg(matcher.buf, value.buf,
15929 (enum modify_reg)color_reg_c_idx,
15930 rte_col_2_mlx5_col(color),
15932 misc_mask = flow_dv_matcher_enable(value.buf);
15933 __flow_dv_adjust_buf_size(&value.size, misc_mask);
15934 ret = mlx5_flow_os_create_flow(matcher_object,
15935 (void *)&value, actions_n, actions, rule);
15937 DRV_LOG(ERR, "Failed to create meter policy flow.");
15944 __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
15945 uint32_t color_reg_c_idx,
15947 struct mlx5_flow_meter_sub_policy *sub_policy,
15948 const struct rte_flow_attr *attr,
15949 bool match_src_port,
15950 const struct rte_flow_item *item,
15951 struct mlx5_flow_dv_matcher **policy_matcher,
15952 struct rte_flow_error *error)
15954 struct mlx5_list_entry *entry;
15955 struct mlx5_flow_tbl_resource *tbl_rsc = sub_policy->tbl_rsc;
15956 struct mlx5_flow_dv_matcher matcher = {
15958 .size = sizeof(matcher.mask.buf),
15962 struct mlx5_flow_dv_match_params value = {
15963 .size = sizeof(value.buf),
15965 struct mlx5_flow_cb_ctx ctx = {
15969 struct mlx5_flow_tbl_data_entry *tbl_data;
15970 struct mlx5_priv *priv = dev->data->dev_private;
15971 uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1;
15973 if (match_src_port && (priv->representor || priv->master)) {
15974 if (flow_dv_translate_item_port_id(dev, matcher.mask.buf,
15975 value.buf, item, attr)) {
15977 "Failed to register meter drop matcher with port.");
15981 tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl);
15982 if (priority < RTE_COLOR_RED)
15983 flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
15984 (enum modify_reg)color_reg_c_idx, 0, color_mask);
15985 matcher.priority = priority;
15986 matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
15987 matcher.mask.size);
15988 entry = mlx5_list_register(tbl_data->matchers, &ctx);
15990 DRV_LOG(ERR, "Failed to register meter drop matcher.");
15994 container_of(entry, struct mlx5_flow_dv_matcher, entry);
15999 * Create the policy rules per domain.
16002 * Pointer to Ethernet device.
16003 * @param[in] sub_policy
16004 * Pointer to sub policy table..
16005 * @param[in] egress
16006 * Direction of the table.
16007 * @param[in] transfer
16008 * E-Switch or NIC flow.
16010 * Pointer to policy action list per color.
16013 * 0 on success, -1 otherwise.
16016 __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
16017 struct mlx5_flow_meter_sub_policy *sub_policy,
16018 uint8_t egress, uint8_t transfer, bool match_src_port,
16019 struct mlx5_meter_policy_acts acts[RTE_COLORS])
16021 struct mlx5_priv *priv = dev->data->dev_private;
16022 struct rte_flow_error flow_err;
16023 uint32_t color_reg_c_idx;
16024 struct rte_flow_attr attr = {
16025 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
16028 .egress = !!egress,
16029 .transfer = !!transfer,
16033 int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
16034 struct mlx5_sub_policy_color_rule *color_rule;
16038 /* Create policy table with POLICY level. */
16039 if (!sub_policy->tbl_rsc)
16040 sub_policy->tbl_rsc = flow_dv_tbl_resource_get(dev,
16041 MLX5_FLOW_TABLE_LEVEL_POLICY,
16042 egress, transfer, false, NULL, 0, 0,
16043 sub_policy->idx, &flow_err);
16044 if (!sub_policy->tbl_rsc) {
16046 "Failed to create meter sub policy table.");
16049 /* Prepare matchers. */
16050 color_reg_c_idx = ret;
16051 for (i = 0; i < RTE_COLORS; i++) {
16052 TAILQ_INIT(&sub_policy->color_rules[i]);
16053 if (i == RTE_COLOR_YELLOW || !acts[i].actions_n)
16055 color_rule = mlx5_malloc(MLX5_MEM_ZERO,
16056 sizeof(struct mlx5_sub_policy_color_rule),
16059 DRV_LOG(ERR, "No memory to create color rule.");
16062 color_rule->src_port = priv->representor_id;
16064 /* Create matchers for Color. */
16065 if (__flow_dv_create_policy_matcher(dev,
16066 color_reg_c_idx, i, sub_policy, &attr,
16067 (i != RTE_COLOR_RED ? match_src_port : false),
16068 NULL, &color_rule->matcher, &flow_err)) {
16069 DRV_LOG(ERR, "Failed to create color matcher.");
16072 /* Create flow, matching color. */
16073 if (__flow_dv_create_policy_flow(dev,
16074 color_reg_c_idx, (enum rte_color)i,
16075 color_rule->matcher->matcher_object,
16077 acts[i].dv_actions,
16078 (i != RTE_COLOR_RED ? match_src_port : false),
16079 NULL, &color_rule->rule,
16081 DRV_LOG(ERR, "Failed to create color rule.");
16084 TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
16085 color_rule, next_port);
16090 if (color_rule->rule)
16091 mlx5_flow_os_destroy_flow(color_rule->rule);
16092 if (color_rule->matcher) {
16093 struct mlx5_flow_tbl_data_entry *tbl =
16094 container_of(color_rule->matcher->tbl,
16095 typeof(*tbl), tbl);
16096 mlx5_list_unregister(tbl->matchers,
16097 &color_rule->matcher->entry);
16099 mlx5_free(color_rule);
16105 __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
16106 struct mlx5_flow_meter_policy *mtr_policy,
16107 struct mlx5_flow_meter_sub_policy *sub_policy,
16110 struct mlx5_priv *priv = dev->data->dev_private;
16111 struct mlx5_meter_policy_acts acts[RTE_COLORS];
16112 struct mlx5_flow_dv_tag_resource *tag;
16113 struct mlx5_flow_dv_port_id_action_resource *port_action;
16114 struct mlx5_hrxq *hrxq;
16115 struct mlx5_flow_meter_info *next_fm = NULL;
16116 struct mlx5_flow_meter_policy *next_policy;
16117 struct mlx5_flow_meter_sub_policy *next_sub_policy;
16118 struct mlx5_flow_tbl_data_entry *tbl_data;
16119 struct rte_flow_error error;
16120 uint8_t egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16121 uint8_t transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16122 bool mtr_first = egress || (transfer && priv->representor_id != UINT16_MAX);
16123 bool match_src_port = false;
16126 for (i = 0; i < RTE_COLORS; i++) {
16127 acts[i].actions_n = 0;
16128 if (i == RTE_COLOR_YELLOW)
16130 if (i == RTE_COLOR_RED) {
16131 /* Only support drop on red. */
16132 acts[i].dv_actions[0] =
16133 mtr_policy->dr_drop_action[domain];
16134 acts[i].actions_n = 1;
16137 if (mtr_policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR) {
16138 struct rte_flow_attr attr = {
16139 .transfer = transfer
16142 next_fm = mlx5_flow_meter_find(priv,
16143 mtr_policy->act_cnt[i].next_mtr_id,
16147 "Failed to get next hierarchy meter.");
16150 if (mlx5_flow_meter_attach(priv, next_fm,
16152 DRV_LOG(ERR, "%s", error.message);
16156 /* Meter action must be the first for TX. */
16158 acts[i].dv_actions[acts[i].actions_n] =
16159 next_fm->meter_action;
16160 acts[i].actions_n++;
16163 if (mtr_policy->act_cnt[i].rix_mark) {
16164 tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG],
16165 mtr_policy->act_cnt[i].rix_mark);
16167 DRV_LOG(ERR, "Failed to find "
16168 "mark action for policy.");
16171 acts[i].dv_actions[acts[i].actions_n] =
16173 acts[i].actions_n++;
16175 if (mtr_policy->act_cnt[i].modify_hdr) {
16176 acts[i].dv_actions[acts[i].actions_n] =
16177 mtr_policy->act_cnt[i].modify_hdr->action;
16178 acts[i].actions_n++;
16180 if (mtr_policy->act_cnt[i].fate_action) {
16181 switch (mtr_policy->act_cnt[i].fate_action) {
16182 case MLX5_FLOW_FATE_PORT_ID:
16183 port_action = mlx5_ipool_get
16184 (priv->sh->ipool[MLX5_IPOOL_PORT_ID],
16185 mtr_policy->act_cnt[i].rix_port_id_action);
16186 if (!port_action) {
16187 DRV_LOG(ERR, "Failed to find "
16188 "port action for policy.");
16191 acts[i].dv_actions[acts[i].actions_n] =
16192 port_action->action;
16193 acts[i].actions_n++;
16194 mtr_policy->dev = dev;
16195 match_src_port = true;
16197 case MLX5_FLOW_FATE_DROP:
16198 case MLX5_FLOW_FATE_JUMP:
16199 acts[i].dv_actions[acts[i].actions_n] =
16200 mtr_policy->act_cnt[i].dr_jump_action[domain];
16201 acts[i].actions_n++;
16203 case MLX5_FLOW_FATE_SHARED_RSS:
16204 case MLX5_FLOW_FATE_QUEUE:
16205 hrxq = mlx5_ipool_get
16206 (priv->sh->ipool[MLX5_IPOOL_HRXQ],
16207 sub_policy->rix_hrxq[i]);
16209 DRV_LOG(ERR, "Failed to find "
16210 "queue action for policy.");
16213 acts[i].dv_actions[acts[i].actions_n] =
16215 acts[i].actions_n++;
16217 case MLX5_FLOW_FATE_MTR:
16220 "No next hierarchy meter.");
16224 acts[i].dv_actions[acts[i].actions_n] =
16225 next_fm->meter_action;
16226 acts[i].actions_n++;
16228 if (mtr_policy->act_cnt[i].next_sub_policy) {
16230 mtr_policy->act_cnt[i].next_sub_policy;
16233 mlx5_flow_meter_policy_find(dev,
16234 next_fm->policy_id, NULL);
16235 MLX5_ASSERT(next_policy);
16237 next_policy->sub_policys[domain][0];
16240 container_of(next_sub_policy->tbl_rsc,
16241 struct mlx5_flow_tbl_data_entry, tbl);
16242 acts[i].dv_actions[acts[i].actions_n++] =
16243 tbl_data->jump.action;
16244 if (mtr_policy->act_cnt[i].modify_hdr)
16245 match_src_port = !!transfer;
16248 /*Queue action do nothing*/
16253 if (__flow_dv_create_domain_policy_rules(dev, sub_policy,
16254 egress, transfer, match_src_port, acts)) {
16256 "Failed to create policy rules per domain.");
16262 mlx5_flow_meter_detach(priv, next_fm);
16267 * Create the policy rules.
16270 * Pointer to Ethernet device.
16271 * @param[in,out] mtr_policy
16272 * Pointer to meter policy table.
16275 * 0 on success, -1 otherwise.
16278 flow_dv_create_policy_rules(struct rte_eth_dev *dev,
16279 struct mlx5_flow_meter_policy *mtr_policy)
16282 uint16_t sub_policy_num;
16284 for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16285 sub_policy_num = (mtr_policy->sub_policy_num >>
16286 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
16287 MLX5_MTR_SUB_POLICY_NUM_MASK;
16288 if (!sub_policy_num)
16290 /* Prepare actions list and create policy rules. */
16291 if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16292 mtr_policy->sub_policys[i][0], i)) {
16294 "Failed to create policy action list per domain.");
16302 __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain)
16304 struct mlx5_priv *priv = dev->data->dev_private;
16305 struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16306 struct mlx5_flow_meter_def_policy *def_policy;
16307 struct mlx5_flow_tbl_resource *jump_tbl;
16308 struct mlx5_flow_tbl_data_entry *tbl_data;
16309 uint8_t egress, transfer;
16310 struct rte_flow_error error;
16311 struct mlx5_meter_policy_acts acts[RTE_COLORS];
16314 egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16315 transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16316 def_policy = mtrmng->def_policy[domain];
16318 def_policy = mlx5_malloc(MLX5_MEM_ZERO,
16319 sizeof(struct mlx5_flow_meter_def_policy),
16320 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
16322 DRV_LOG(ERR, "Failed to alloc "
16323 "default policy table.");
16324 goto def_policy_error;
16326 mtrmng->def_policy[domain] = def_policy;
16327 /* Create the meter suffix table with SUFFIX level. */
16328 jump_tbl = flow_dv_tbl_resource_get(dev,
16329 MLX5_FLOW_TABLE_LEVEL_METER,
16330 egress, transfer, false, NULL, 0,
16331 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
16334 "Failed to create meter suffix table.");
16335 goto def_policy_error;
16337 def_policy->sub_policy.jump_tbl[RTE_COLOR_GREEN] = jump_tbl;
16338 tbl_data = container_of(jump_tbl,
16339 struct mlx5_flow_tbl_data_entry, tbl);
16340 def_policy->dr_jump_action[RTE_COLOR_GREEN] =
16341 tbl_data->jump.action;
16342 acts[RTE_COLOR_GREEN].dv_actions[0] =
16343 tbl_data->jump.action;
16344 acts[RTE_COLOR_GREEN].actions_n = 1;
16345 /* Create jump action to the drop table. */
16346 if (!mtrmng->drop_tbl[domain]) {
16347 mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get
16348 (dev, MLX5_FLOW_TABLE_LEVEL_METER,
16349 egress, transfer, false, NULL, 0,
16350 0, MLX5_MTR_TABLE_ID_DROP, &error);
16351 if (!mtrmng->drop_tbl[domain]) {
16352 DRV_LOG(ERR, "Failed to create "
16353 "meter drop table for default policy.");
16354 goto def_policy_error;
16357 tbl_data = container_of(mtrmng->drop_tbl[domain],
16358 struct mlx5_flow_tbl_data_entry, tbl);
16359 def_policy->dr_jump_action[RTE_COLOR_RED] =
16360 tbl_data->jump.action;
16361 acts[RTE_COLOR_RED].dv_actions[0] = tbl_data->jump.action;
16362 acts[RTE_COLOR_RED].actions_n = 1;
16363 /* Create default policy rules. */
16364 ret = __flow_dv_create_domain_policy_rules(dev,
16365 &def_policy->sub_policy,
16366 egress, transfer, false, acts);
16368 DRV_LOG(ERR, "Failed to create "
16369 "default policy rules.");
16370 goto def_policy_error;
16375 __flow_dv_destroy_domain_def_policy(dev,
16376 (enum mlx5_meter_domain)domain);
16381 * Create the default policy table set.
16384 * Pointer to Ethernet device.
16386 * 0 on success, -1 otherwise.
16389 flow_dv_create_def_policy(struct rte_eth_dev *dev)
16391 struct mlx5_priv *priv = dev->data->dev_private;
16394 /* Non-termination policy table. */
16395 for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16396 if (!priv->config.dv_esw_en && i == MLX5_MTR_DOMAIN_TRANSFER)
16398 if (__flow_dv_create_domain_def_policy(dev, i)) {
16400 "Failed to create default policy");
16408 * Create the needed meter tables.
16409 * Lock free, (mutex should be acquired by caller).
16412 * Pointer to Ethernet device.
16414 * Meter information table.
16415 * @param[in] mtr_idx
16417 * @param[in] domain_bitmap
16420 * 0 on success, -1 otherwise.
16423 flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
16424 struct mlx5_flow_meter_info *fm,
16426 uint8_t domain_bitmap)
16428 struct mlx5_priv *priv = dev->data->dev_private;
16429 struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16430 struct rte_flow_error error;
16431 struct mlx5_flow_tbl_data_entry *tbl_data;
16432 uint8_t egress, transfer;
16433 void *actions[METER_ACTIONS];
16434 int domain, ret, i;
16435 struct mlx5_flow_counter *cnt;
16436 struct mlx5_flow_dv_match_params value = {
16437 .size = sizeof(value.buf),
16439 struct mlx5_flow_dv_match_params matcher_para = {
16440 .size = sizeof(matcher_para.buf),
16442 int mtr_id_reg_c = mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
16444 uint32_t mtr_id_mask = (UINT32_C(1) << mtrmng->max_mtr_bits) - 1;
16445 uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
16446 struct mlx5_list_entry *entry;
16447 struct mlx5_flow_dv_matcher matcher = {
16449 .size = sizeof(matcher.mask.buf),
16452 struct mlx5_flow_dv_matcher *drop_matcher;
16453 struct mlx5_flow_cb_ctx ctx = {
16459 if (!priv->mtr_en || mtr_id_reg_c < 0) {
16460 rte_errno = ENOTSUP;
16463 for (domain = 0; domain < MLX5_MTR_DOMAIN_MAX; domain++) {
16464 if (!(domain_bitmap & (1 << domain)) ||
16465 (mtrmng->def_rule[domain] && !fm->drop_cnt))
16467 egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16468 transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16469 /* Create the drop table with METER DROP level. */
16470 if (!mtrmng->drop_tbl[domain]) {
16471 mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get(dev,
16472 MLX5_FLOW_TABLE_LEVEL_METER,
16473 egress, transfer, false, NULL, 0,
16474 0, MLX5_MTR_TABLE_ID_DROP, &error);
16475 if (!mtrmng->drop_tbl[domain]) {
16476 DRV_LOG(ERR, "Failed to create meter drop table.");
16480 /* Create default matcher in drop table. */
16481 matcher.tbl = mtrmng->drop_tbl[domain],
16482 tbl_data = container_of(mtrmng->drop_tbl[domain],
16483 struct mlx5_flow_tbl_data_entry, tbl);
16484 if (!mtrmng->def_matcher[domain]) {
16485 flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16486 (enum modify_reg)mtr_id_reg_c,
16488 matcher.priority = MLX5_MTRS_DEFAULT_RULE_PRIORITY;
16489 matcher.crc = rte_raw_cksum
16490 ((const void *)matcher.mask.buf,
16491 matcher.mask.size);
16492 entry = mlx5_list_register(tbl_data->matchers, &ctx);
16494 DRV_LOG(ERR, "Failed to register meter "
16495 "drop default matcher.");
16498 mtrmng->def_matcher[domain] = container_of(entry,
16499 struct mlx5_flow_dv_matcher, entry);
16501 /* Create default rule in drop table. */
16502 if (!mtrmng->def_rule[domain]) {
16504 actions[i++] = priv->sh->dr_drop_action;
16505 flow_dv_match_meta_reg(matcher_para.buf, value.buf,
16506 (enum modify_reg)mtr_id_reg_c, 0, 0);
16507 misc_mask = flow_dv_matcher_enable(value.buf);
16508 __flow_dv_adjust_buf_size(&value.size, misc_mask);
16509 ret = mlx5_flow_os_create_flow
16510 (mtrmng->def_matcher[domain]->matcher_object,
16511 (void *)&value, i, actions,
16512 &mtrmng->def_rule[domain]);
16514 DRV_LOG(ERR, "Failed to create meter "
16515 "default drop rule for drop table.");
16521 MLX5_ASSERT(mtrmng->max_mtr_bits);
16522 if (!mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1]) {
16523 /* Create matchers for Drop. */
16524 flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16525 (enum modify_reg)mtr_id_reg_c, 0,
16526 (mtr_id_mask << mtr_id_offset));
16527 matcher.priority = MLX5_REG_BITS - mtrmng->max_mtr_bits;
16528 matcher.crc = rte_raw_cksum
16529 ((const void *)matcher.mask.buf,
16530 matcher.mask.size);
16531 entry = mlx5_list_register(tbl_data->matchers, &ctx);
16534 "Failed to register meter drop matcher.");
16537 mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1] =
16538 container_of(entry, struct mlx5_flow_dv_matcher,
16542 mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1];
16543 /* Create drop rule, matching meter_id only. */
16544 flow_dv_match_meta_reg(matcher_para.buf, value.buf,
16545 (enum modify_reg)mtr_id_reg_c,
16546 (mtr_idx << mtr_id_offset), UINT32_MAX);
16548 cnt = flow_dv_counter_get_by_idx(dev,
16549 fm->drop_cnt, NULL);
16550 actions[i++] = cnt->action;
16551 actions[i++] = priv->sh->dr_drop_action;
16552 misc_mask = flow_dv_matcher_enable(value.buf);
16553 __flow_dv_adjust_buf_size(&value.size, misc_mask);
16554 ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object,
16555 (void *)&value, i, actions,
16556 &fm->drop_rule[domain]);
16558 DRV_LOG(ERR, "Failed to create meter "
16559 "drop rule for drop table.");
16565 for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16566 if (fm->drop_rule[i]) {
16567 claim_zero(mlx5_flow_os_destroy_flow
16568 (fm->drop_rule[i]));
16569 fm->drop_rule[i] = NULL;
16575 static struct mlx5_flow_meter_sub_policy *
16576 __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,
16577 struct mlx5_flow_meter_policy *mtr_policy,
16578 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS],
16579 struct mlx5_flow_meter_sub_policy *next_sub_policy,
16582 struct mlx5_priv *priv = dev->data->dev_private;
16583 struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16584 uint32_t sub_policy_idx = 0;
16585 uint32_t hrxq_idx[MLX5_MTR_RTE_COLORS] = {0};
16587 struct mlx5_hrxq *hrxq;
16588 struct mlx5_flow_handle dh;
16589 struct mlx5_meter_policy_action_container *act_cnt;
16590 uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16591 uint16_t sub_policy_num;
16593 rte_spinlock_lock(&mtr_policy->sl);
16594 for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16597 hrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]);
16598 if (!hrxq_idx[i]) {
16599 rte_spinlock_unlock(&mtr_policy->sl);
16603 sub_policy_num = (mtr_policy->sub_policy_num >>
16604 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16605 MLX5_MTR_SUB_POLICY_NUM_MASK;
16606 for (i = 0; i < sub_policy_num;
16608 for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) {
16611 mtr_policy->sub_policys[domain][i]->rix_hrxq[j])
16614 if (j >= MLX5_MTR_RTE_COLORS) {
16616 * Found the sub policy table with
16617 * the same queue per color
16619 rte_spinlock_unlock(&mtr_policy->sl);
16620 for (j = 0; j < MLX5_MTR_RTE_COLORS; j++)
16621 mlx5_hrxq_release(dev, hrxq_idx[j]);
16623 return mtr_policy->sub_policys[domain][i];
16626 /* Create sub policy. */
16627 if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {
16628 /* Reuse the first dummy sub_policy*/
16629 sub_policy = mtr_policy->sub_policys[domain][0];
16630 sub_policy_idx = sub_policy->idx;
16632 sub_policy = mlx5_ipool_zmalloc
16633 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16636 sub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM) {
16637 for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
16638 mlx5_hrxq_release(dev, hrxq_idx[i]);
16639 goto rss_sub_policy_error;
16641 sub_policy->idx = sub_policy_idx;
16642 sub_policy->main_policy = mtr_policy;
16644 for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16647 sub_policy->rix_hrxq[i] = hrxq_idx[i];
16648 if (mtr_policy->is_hierarchy) {
16649 act_cnt = &mtr_policy->act_cnt[i];
16650 act_cnt->next_sub_policy = next_sub_policy;
16651 mlx5_hrxq_release(dev, hrxq_idx[i]);
16654 * Overwrite the last action from
16655 * RSS action to Queue action.
16657 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
16660 DRV_LOG(ERR, "Failed to create policy hrxq");
16661 goto rss_sub_policy_error;
16663 act_cnt = &mtr_policy->act_cnt[i];
16664 if (act_cnt->rix_mark || act_cnt->modify_hdr) {
16665 memset(&dh, 0, sizeof(struct mlx5_flow_handle));
16666 if (act_cnt->rix_mark)
16668 dh.fate_action = MLX5_FLOW_FATE_QUEUE;
16669 dh.rix_hrxq = hrxq_idx[i];
16670 flow_drv_rxq_flags_set(dev, &dh);
16674 if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16675 sub_policy, domain)) {
16676 DRV_LOG(ERR, "Failed to create policy "
16677 "rules per domain.");
16678 goto rss_sub_policy_error;
16680 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16681 i = (mtr_policy->sub_policy_num >>
16682 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16683 MLX5_MTR_SUB_POLICY_NUM_MASK;
16684 mtr_policy->sub_policys[domain][i] = sub_policy;
16686 if (i > MLX5_MTR_RSS_MAX_SUB_POLICY)
16687 goto rss_sub_policy_error;
16688 mtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16689 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
16690 mtr_policy->sub_policy_num |=
16691 (i & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16692 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
16694 rte_spinlock_unlock(&mtr_policy->sl);
16697 rss_sub_policy_error:
16699 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
16700 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16701 i = (mtr_policy->sub_policy_num >>
16702 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16703 MLX5_MTR_SUB_POLICY_NUM_MASK;
16704 mtr_policy->sub_policys[domain][i] = NULL;
16706 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16710 rte_spinlock_unlock(&mtr_policy->sl);
16715 * Find the policy table for prefix table with RSS.
16718 * Pointer to Ethernet device.
16719 * @param[in] mtr_policy
16720 * Pointer to meter policy table.
16721 * @param[in] rss_desc
16722 * Pointer to rss_desc
16724 * Pointer to table set on success, NULL otherwise and rte_errno is set.
16726 static struct mlx5_flow_meter_sub_policy *
16727 flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
16728 struct mlx5_flow_meter_policy *mtr_policy,
16729 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
16731 struct mlx5_priv *priv = dev->data->dev_private;
16732 struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16733 struct mlx5_flow_meter_info *next_fm;
16734 struct mlx5_flow_meter_policy *next_policy;
16735 struct mlx5_flow_meter_sub_policy *next_sub_policy = NULL;
16736 struct mlx5_flow_meter_policy *policies[MLX5_MTR_CHAIN_MAX_NUM];
16737 struct mlx5_flow_meter_sub_policy *sub_policies[MLX5_MTR_CHAIN_MAX_NUM];
16738 uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16739 bool reuse_sub_policy;
16744 /* Iterate hierarchy to get all policies in this hierarchy. */
16745 policies[i++] = mtr_policy;
16746 if (!mtr_policy->is_hierarchy)
16748 if (i >= MLX5_MTR_CHAIN_MAX_NUM) {
16749 DRV_LOG(ERR, "Exceed max meter number in hierarchy.");
16752 next_fm = mlx5_flow_meter_find(priv,
16753 mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
16755 DRV_LOG(ERR, "Failed to get next meter in hierarchy.");
16759 mlx5_flow_meter_policy_find(dev, next_fm->policy_id,
16761 MLX5_ASSERT(next_policy);
16762 mtr_policy = next_policy;
16766 * From last policy to the first one in hierarchy,
16767 * create/get the sub policy for each of them.
16769 sub_policy = __flow_dv_meter_get_rss_sub_policy(dev,
16773 &reuse_sub_policy);
16775 DRV_LOG(ERR, "Failed to get the sub policy.");
16778 if (!reuse_sub_policy)
16779 sub_policies[j++] = sub_policy;
16780 next_sub_policy = sub_policy;
16785 uint16_t sub_policy_num;
16787 sub_policy = sub_policies[--j];
16788 mtr_policy = sub_policy->main_policy;
16789 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
16790 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16791 sub_policy_num = (mtr_policy->sub_policy_num >>
16792 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16793 MLX5_MTR_SUB_POLICY_NUM_MASK;
16794 mtr_policy->sub_policys[domain][sub_policy_num - 1] =
16797 mtr_policy->sub_policy_num &=
16798 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16799 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i));
16800 mtr_policy->sub_policy_num |=
16801 (sub_policy_num & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16802 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i);
16803 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16811 * Create the sub policy tag rule for all meters in hierarchy.
16814 * Pointer to Ethernet device.
16816 * Meter information table.
16817 * @param[in] src_port
16818 * The src port this extra rule should use.
16820 * The src port match item.
16821 * @param[out] error
16822 * Perform verbose error reporting if not NULL.
16824 * 0 on success, a negative errno value otherwise and rte_errno is set.
16827 flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,
16828 struct mlx5_flow_meter_info *fm,
16830 const struct rte_flow_item *item,
16831 struct rte_flow_error *error)
16833 struct mlx5_priv *priv = dev->data->dev_private;
16834 struct mlx5_flow_meter_policy *mtr_policy;
16835 struct mlx5_flow_meter_sub_policy *sub_policy;
16836 struct mlx5_flow_meter_info *next_fm = NULL;
16837 struct mlx5_flow_meter_policy *next_policy;
16838 struct mlx5_flow_meter_sub_policy *next_sub_policy;
16839 struct mlx5_flow_tbl_data_entry *tbl_data;
16840 struct mlx5_sub_policy_color_rule *color_rule;
16841 struct mlx5_meter_policy_acts acts;
16842 uint32_t color_reg_c_idx;
16843 bool mtr_first = (src_port != UINT16_MAX) ? true : false;
16844 struct rte_flow_attr attr = {
16845 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
16852 uint32_t domain = MLX5_MTR_DOMAIN_TRANSFER;
16855 mtr_policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
16856 MLX5_ASSERT(mtr_policy);
16857 if (!mtr_policy->is_hierarchy)
16859 next_fm = mlx5_flow_meter_find(priv,
16860 mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
16862 return rte_flow_error_set(error, EINVAL,
16863 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
16864 "Failed to find next meter in hierarchy.");
16866 if (!next_fm->drop_cnt)
16868 color_reg_c_idx = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, error);
16869 sub_policy = mtr_policy->sub_policys[domain][0];
16870 for (i = 0; i < RTE_COLORS; i++) {
16871 bool rule_exist = false;
16872 struct mlx5_meter_policy_action_container *act_cnt;
16874 if (i >= RTE_COLOR_YELLOW)
16876 TAILQ_FOREACH(color_rule,
16877 &sub_policy->color_rules[i], next_port)
16878 if (color_rule->src_port == src_port) {
16884 color_rule = mlx5_malloc(MLX5_MEM_ZERO,
16885 sizeof(struct mlx5_sub_policy_color_rule),
16888 return rte_flow_error_set(error, ENOMEM,
16889 RTE_FLOW_ERROR_TYPE_ACTION,
16890 NULL, "No memory to create tag color rule.");
16891 color_rule->src_port = src_port;
16893 next_policy = mlx5_flow_meter_policy_find(dev,
16894 next_fm->policy_id, NULL);
16895 MLX5_ASSERT(next_policy);
16896 next_sub_policy = next_policy->sub_policys[domain][0];
16897 tbl_data = container_of(next_sub_policy->tbl_rsc,
16898 struct mlx5_flow_tbl_data_entry, tbl);
16899 act_cnt = &mtr_policy->act_cnt[i];
16901 acts.dv_actions[0] = next_fm->meter_action;
16902 acts.dv_actions[1] = act_cnt->modify_hdr->action;
16904 acts.dv_actions[0] = act_cnt->modify_hdr->action;
16905 acts.dv_actions[1] = next_fm->meter_action;
16907 acts.dv_actions[2] = tbl_data->jump.action;
16908 acts.actions_n = 3;
16909 if (mlx5_flow_meter_attach(priv, next_fm, &attr, error)) {
16913 if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
16914 i, sub_policy, &attr, true, item,
16915 &color_rule->matcher, error)) {
16916 rte_flow_error_set(error, errno,
16917 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
16918 "Failed to create hierarchy meter matcher.");
16921 if (__flow_dv_create_policy_flow(dev, color_reg_c_idx,
16923 color_rule->matcher->matcher_object,
16924 acts.actions_n, acts.dv_actions,
16926 &color_rule->rule, &attr)) {
16927 rte_flow_error_set(error, errno,
16928 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
16929 "Failed to create hierarchy meter rule.");
16932 TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
16933 color_rule, next_port);
16937 * Recursive call to iterate all meters in hierarchy and
16938 * create needed rules.
16940 return flow_dv_meter_hierarchy_rule_create(dev, next_fm,
16941 src_port, item, error);
16944 if (color_rule->rule)
16945 mlx5_flow_os_destroy_flow(color_rule->rule);
16946 if (color_rule->matcher) {
16947 struct mlx5_flow_tbl_data_entry *tbl =
16948 container_of(color_rule->matcher->tbl,
16949 typeof(*tbl), tbl);
16950 mlx5_list_unregister(tbl->matchers,
16951 &color_rule->matcher->entry);
16953 mlx5_free(color_rule);
16956 mlx5_flow_meter_detach(priv, next_fm);
16961 * Destroy the sub policy table with RX queue.
16964 * Pointer to Ethernet device.
16965 * @param[in] mtr_policy
16966 * Pointer to meter policy table.
16969 flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
16970 struct mlx5_flow_meter_policy *mtr_policy)
16972 struct mlx5_priv *priv = dev->data->dev_private;
16973 struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16974 uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16976 uint16_t sub_policy_num, new_policy_num;
16978 rte_spinlock_lock(&mtr_policy->sl);
16979 for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16980 switch (mtr_policy->act_cnt[i].fate_action) {
16981 case MLX5_FLOW_FATE_SHARED_RSS:
16982 sub_policy_num = (mtr_policy->sub_policy_num >>
16983 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16984 MLX5_MTR_SUB_POLICY_NUM_MASK;
16985 new_policy_num = sub_policy_num;
16986 for (j = 0; j < sub_policy_num; j++) {
16988 mtr_policy->sub_policys[domain][j];
16990 __flow_dv_destroy_sub_policy_rules(dev,
16993 mtr_policy->sub_policys[domain][0]) {
16994 mtr_policy->sub_policys[domain][j] =
16997 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
17003 if (new_policy_num != sub_policy_num) {
17004 mtr_policy->sub_policy_num &=
17005 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
17006 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
17007 mtr_policy->sub_policy_num |=
17009 MLX5_MTR_SUB_POLICY_NUM_MASK) <<
17010 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
17013 case MLX5_FLOW_FATE_QUEUE:
17014 sub_policy = mtr_policy->sub_policys[domain][0];
17015 __flow_dv_destroy_sub_policy_rules(dev,
17019 /*Other actions without queue and do nothing*/
17023 rte_spinlock_unlock(&mtr_policy->sl);
17027 * Validate the batch counter support in root table.
17029 * Create a simple flow with invalid counter and drop action on root table to
17030 * validate if batch counter with offset on root table is supported or not.
17033 * Pointer to rte_eth_dev structure.
17036 * 0 on success, a negative errno value otherwise and rte_errno is set.
17039 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
17041 struct mlx5_priv *priv = dev->data->dev_private;
17042 struct mlx5_dev_ctx_shared *sh = priv->sh;
17043 struct mlx5_flow_dv_match_params mask = {
17044 .size = sizeof(mask.buf),
17046 struct mlx5_flow_dv_match_params value = {
17047 .size = sizeof(value.buf),
17049 struct mlx5dv_flow_matcher_attr dv_attr = {
17050 .type = IBV_FLOW_ATTR_NORMAL | IBV_FLOW_ATTR_FLAGS_EGRESS,
17052 .match_criteria_enable = 0,
17053 .match_mask = (void *)&mask,
17055 void *actions[2] = { 0 };
17056 struct mlx5_flow_tbl_resource *tbl = NULL;
17057 struct mlx5_devx_obj *dcs = NULL;
17058 void *matcher = NULL;
17062 tbl = flow_dv_tbl_resource_get(dev, 0, 1, 0, false, NULL,
17066 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
17069 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
17073 dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
17074 __flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
17075 ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
17079 __flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
17080 ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
17084 * If batch counter with offset is not supported, the driver will not
17085 * validate the invalid offset value, flow create should success.
17086 * In this case, it means batch counter is not supported in root table.
17088 * Otherwise, if flow create is failed, counter offset is supported.
17091 DRV_LOG(INFO, "Batch counter is not supported in root "
17092 "table. Switch to fallback mode.");
17093 rte_errno = ENOTSUP;
17095 claim_zero(mlx5_flow_os_destroy_flow(flow));
17097 /* Check matcher to make sure validate fail at flow create. */
17098 if (!matcher || (matcher && errno != EINVAL))
17099 DRV_LOG(ERR, "Unexpected error in counter offset "
17100 "support detection");
17104 claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
17106 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
17108 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
17110 claim_zero(mlx5_devx_cmd_destroy(dcs));
17115 * Query a devx counter.
17118 * Pointer to the Ethernet device structure.
17120 * Index to the flow counter.
17122 * Set to clear the counter statistics.
17124 * The statistics value of packets.
17125 * @param[out] bytes
17126 * The statistics value of bytes.
17129 * 0 on success, otherwise return -1.
17132 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
17133 uint64_t *pkts, uint64_t *bytes)
17135 struct mlx5_priv *priv = dev->data->dev_private;
17136 struct mlx5_flow_counter *cnt;
17137 uint64_t inn_pkts, inn_bytes;
17140 if (!priv->config.devx)
17143 ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
17146 cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
17147 *pkts = inn_pkts - cnt->hits;
17148 *bytes = inn_bytes - cnt->bytes;
17150 cnt->hits = inn_pkts;
17151 cnt->bytes = inn_bytes;
17157 * Get aged-out flows.
17160 * Pointer to the Ethernet device structure.
17161 * @param[in] context
17162 * The address of an array of pointers to the aged-out flows contexts.
17163 * @param[in] nb_contexts
17164 * The length of context array pointers.
17165 * @param[out] error
17166 * Perform verbose error reporting if not NULL. Initialized in case of
17170 * how many contexts get in success, otherwise negative errno value.
17171 * if nb_contexts is 0, return the amount of all aged contexts.
17172 * if nb_contexts is not 0 , return the amount of aged flows reported
17173 * in the context array.
17174 * @note: only stub for now
17177 flow_get_aged_flows(struct rte_eth_dev *dev,
17179 uint32_t nb_contexts,
17180 struct rte_flow_error *error)
17182 struct mlx5_priv *priv = dev->data->dev_private;
17183 struct mlx5_age_info *age_info;
17184 struct mlx5_age_param *age_param;
17185 struct mlx5_flow_counter *counter;
17186 struct mlx5_aso_age_action *act;
17189 if (nb_contexts && !context)
17190 return rte_flow_error_set(error, EINVAL,
17191 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
17192 NULL, "empty context");
17193 age_info = GET_PORT_AGE_INFO(priv);
17194 rte_spinlock_lock(&age_info->aged_sl);
17195 LIST_FOREACH(act, &age_info->aged_aso, next) {
17198 context[nb_flows - 1] =
17199 act->age_params.context;
17200 if (!(--nb_contexts))
17204 TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
17207 age_param = MLX5_CNT_TO_AGE(counter);
17208 context[nb_flows - 1] = age_param->context;
17209 if (!(--nb_contexts))
17213 rte_spinlock_unlock(&age_info->aged_sl);
17214 MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
17219 * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
17222 flow_dv_counter_allocate(struct rte_eth_dev *dev)
17224 return flow_dv_counter_alloc(dev, 0);
17228 * Validate indirect action.
17229 * Dispatcher for action type specific validation.
17232 * Pointer to the Ethernet device structure.
17234 * Indirect action configuration.
17235 * @param[in] action
17236 * The indirect action object to validate.
17237 * @param[out] error
17238 * Perform verbose error reporting if not NULL. Initialized in case of
17242 * 0 on success, otherwise negative errno value.
17245 flow_dv_action_validate(struct rte_eth_dev *dev,
17246 const struct rte_flow_indir_action_conf *conf,
17247 const struct rte_flow_action *action,
17248 struct rte_flow_error *err)
17250 struct mlx5_priv *priv = dev->data->dev_private;
17252 RTE_SET_USED(conf);
17253 switch (action->type) {
17254 case RTE_FLOW_ACTION_TYPE_RSS:
17256 * priv->obj_ops is set according to driver capabilities.
17257 * When DevX capabilities are
17258 * sufficient, it is set to devx_obj_ops.
17259 * Otherwise, it is set to ibv_obj_ops.
17260 * ibv_obj_ops doesn't support ind_table_modify operation.
17261 * In this case the indirect RSS action can't be used.
17263 if (priv->obj_ops.ind_table_modify == NULL)
17264 return rte_flow_error_set
17266 RTE_FLOW_ERROR_TYPE_ACTION,
17268 "Indirect RSS action not supported");
17269 return mlx5_validate_action_rss(dev, action, err);
17270 case RTE_FLOW_ACTION_TYPE_AGE:
17271 if (!priv->sh->aso_age_mng)
17272 return rte_flow_error_set(err, ENOTSUP,
17273 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
17275 "Indirect age action not supported");
17276 return flow_dv_validate_action_age(0, action, dev, err);
17277 case RTE_FLOW_ACTION_TYPE_COUNT:
17279 * There are two mechanisms to share the action count.
17280 * The old mechanism uses the shared field to share, while the
17281 * new mechanism uses the indirect action API.
17282 * This validation comes to make sure that the two mechanisms
17283 * are not combined.
17285 if (is_shared_action_count(action))
17286 return rte_flow_error_set(err, ENOTSUP,
17287 RTE_FLOW_ERROR_TYPE_ACTION,
17289 "Mix shared and indirect counter is not supported");
17290 return flow_dv_validate_action_count(dev, true, 0, err);
17291 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
17292 if (!priv->sh->ct_aso_en)
17293 return rte_flow_error_set(err, ENOTSUP,
17294 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17295 "ASO CT is not supported");
17296 return mlx5_validate_action_ct(dev, action->conf, err);
17298 return rte_flow_error_set(err, ENOTSUP,
17299 RTE_FLOW_ERROR_TYPE_ACTION,
17301 "action type not supported");
17306 * Validate the meter hierarchy chain for meter policy.
17309 * Pointer to the Ethernet device structure.
17310 * @param[in] meter_id
17312 * @param[in] action_flags
17313 * Holds the actions detected until now.
17314 * @param[out] is_rss
17316 * @param[out] hierarchy_domain
17317 * The domain bitmap for hierarchy policy.
17318 * @param[out] error
17319 * Perform verbose error reporting if not NULL. Initialized in case of
17323 * 0 on success, otherwise negative errno value with error set.
17326 flow_dv_validate_policy_mtr_hierarchy(struct rte_eth_dev *dev,
17328 uint64_t action_flags,
17330 uint8_t *hierarchy_domain,
17331 struct rte_mtr_error *error)
17333 struct mlx5_priv *priv = dev->data->dev_private;
17334 struct mlx5_flow_meter_info *fm;
17335 struct mlx5_flow_meter_policy *policy;
17338 if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
17339 MLX5_FLOW_FATE_ESWITCH_ACTIONS))
17340 return -rte_mtr_error_set(error, EINVAL,
17341 RTE_MTR_ERROR_TYPE_POLICER_ACTION_GREEN,
17343 "Multiple fate actions not supported.");
17345 fm = mlx5_flow_meter_find(priv, meter_id, NULL);
17347 return -rte_mtr_error_set(error, EINVAL,
17348 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
17349 "Meter not found in meter hierarchy.");
17350 if (fm->def_policy)
17351 return -rte_mtr_error_set(error, EINVAL,
17352 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
17353 "Non termination meter not supported in hierarchy.");
17354 policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
17355 MLX5_ASSERT(policy);
17356 if (!policy->is_hierarchy) {
17357 if (policy->transfer)
17358 *hierarchy_domain |=
17359 MLX5_MTR_DOMAIN_TRANSFER_BIT;
17360 if (policy->ingress)
17361 *hierarchy_domain |=
17362 MLX5_MTR_DOMAIN_INGRESS_BIT;
17363 if (policy->egress)
17364 *hierarchy_domain |= MLX5_MTR_DOMAIN_EGRESS_BIT;
17365 *is_rss = policy->is_rss;
17368 meter_id = policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id;
17369 if (++cnt >= MLX5_MTR_CHAIN_MAX_NUM)
17370 return -rte_mtr_error_set(error, EINVAL,
17371 RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
17372 "Exceed max hierarchy meter number.");
17378 * Validate meter policy actions.
17379 * Dispatcher for action type specific validation.
17382 * Pointer to the Ethernet device structure.
17383 * @param[in] action
17384 * The meter policy action object to validate.
17386 * Attributes of flow to determine steering domain.
17387 * @param[out] error
17388 * Perform verbose error reporting if not NULL. Initialized in case of
17392 * 0 on success, otherwise negative errno value.
17395 flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
17396 const struct rte_flow_action *actions[RTE_COLORS],
17397 struct rte_flow_attr *attr,
17399 uint8_t *domain_bitmap,
17400 bool *is_def_policy,
17401 struct rte_mtr_error *error)
17403 struct mlx5_priv *priv = dev->data->dev_private;
17404 struct mlx5_dev_config *dev_conf = &priv->config;
17405 const struct rte_flow_action *act;
17406 uint64_t action_flags = 0;
17409 struct rte_flow_error flow_err;
17410 uint8_t domain_color[RTE_COLORS] = {0};
17411 uint8_t def_domain = MLX5_MTR_ALL_DOMAIN_BIT;
17412 uint8_t hierarchy_domain = 0;
17413 const struct rte_flow_action_meter *mtr;
17415 if (!priv->config.dv_esw_en)
17416 def_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
17417 *domain_bitmap = def_domain;
17418 if (actions[RTE_COLOR_YELLOW] &&
17419 actions[RTE_COLOR_YELLOW]->type != RTE_FLOW_ACTION_TYPE_END)
17420 return -rte_mtr_error_set(error, ENOTSUP,
17421 RTE_MTR_ERROR_TYPE_METER_POLICY,
17423 "Yellow color does not support any action.");
17424 if (actions[RTE_COLOR_YELLOW] &&
17425 actions[RTE_COLOR_YELLOW]->type != RTE_FLOW_ACTION_TYPE_DROP)
17426 return -rte_mtr_error_set(error, ENOTSUP,
17427 RTE_MTR_ERROR_TYPE_METER_POLICY,
17428 NULL, "Red color only supports drop action.");
17430 * Check default policy actions:
17431 * Green/Yellow: no action, Red: drop action
17433 if ((!actions[RTE_COLOR_GREEN] ||
17434 actions[RTE_COLOR_GREEN]->type == RTE_FLOW_ACTION_TYPE_END)) {
17435 *is_def_policy = true;
17438 flow_err.message = NULL;
17439 for (i = 0; i < RTE_COLORS; i++) {
17441 for (action_flags = 0, actions_n = 0;
17442 act && act->type != RTE_FLOW_ACTION_TYPE_END;
17444 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
17445 return -rte_mtr_error_set(error, ENOTSUP,
17446 RTE_MTR_ERROR_TYPE_METER_POLICY,
17447 NULL, "too many actions");
17448 switch (act->type) {
17449 case RTE_FLOW_ACTION_TYPE_PORT_ID:
17450 if (!priv->config.dv_esw_en)
17451 return -rte_mtr_error_set(error,
17453 RTE_MTR_ERROR_TYPE_METER_POLICY,
17454 NULL, "PORT action validate check"
17455 " fail for ESW disable");
17456 ret = flow_dv_validate_action_port_id(dev,
17458 act, attr, &flow_err);
17460 return -rte_mtr_error_set(error,
17462 RTE_MTR_ERROR_TYPE_METER_POLICY,
17463 NULL, flow_err.message ?
17465 "PORT action validate check fail");
17467 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
17469 case RTE_FLOW_ACTION_TYPE_MARK:
17470 ret = flow_dv_validate_action_mark(dev, act,
17474 return -rte_mtr_error_set(error,
17476 RTE_MTR_ERROR_TYPE_METER_POLICY,
17477 NULL, flow_err.message ?
17479 "Mark action validate check fail");
17480 if (dev_conf->dv_xmeta_en !=
17481 MLX5_XMETA_MODE_LEGACY)
17482 return -rte_mtr_error_set(error,
17484 RTE_MTR_ERROR_TYPE_METER_POLICY,
17485 NULL, "Extend MARK action is "
17486 "not supported. Please try use "
17487 "default policy for meter.");
17488 action_flags |= MLX5_FLOW_ACTION_MARK;
17491 case RTE_FLOW_ACTION_TYPE_SET_TAG:
17492 ret = flow_dv_validate_action_set_tag(dev,
17496 return -rte_mtr_error_set(error,
17498 RTE_MTR_ERROR_TYPE_METER_POLICY,
17499 NULL, flow_err.message ?
17501 "Set tag action validate check fail");
17503 * Count all modify-header actions
17506 if (!(action_flags &
17507 MLX5_FLOW_MODIFY_HDR_ACTIONS))
17509 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
17511 case RTE_FLOW_ACTION_TYPE_DROP:
17512 ret = mlx5_flow_validate_action_drop
17516 return -rte_mtr_error_set(error,
17518 RTE_MTR_ERROR_TYPE_METER_POLICY,
17519 NULL, flow_err.message ?
17521 "Drop action validate check fail");
17522 action_flags |= MLX5_FLOW_ACTION_DROP;
17525 case RTE_FLOW_ACTION_TYPE_QUEUE:
17527 * Check whether extensive
17528 * metadata feature is engaged.
17530 if (dev_conf->dv_flow_en &&
17531 (dev_conf->dv_xmeta_en !=
17532 MLX5_XMETA_MODE_LEGACY) &&
17533 mlx5_flow_ext_mreg_supported(dev))
17534 return -rte_mtr_error_set(error,
17536 RTE_MTR_ERROR_TYPE_METER_POLICY,
17537 NULL, "Queue action with meta "
17538 "is not supported. Please try use "
17539 "default policy for meter.");
17540 ret = mlx5_flow_validate_action_queue(act,
17544 return -rte_mtr_error_set(error,
17546 RTE_MTR_ERROR_TYPE_METER_POLICY,
17547 NULL, flow_err.message ?
17549 "Queue action validate check fail");
17550 action_flags |= MLX5_FLOW_ACTION_QUEUE;
17553 case RTE_FLOW_ACTION_TYPE_RSS:
17554 if (dev_conf->dv_flow_en &&
17555 (dev_conf->dv_xmeta_en !=
17556 MLX5_XMETA_MODE_LEGACY) &&
17557 mlx5_flow_ext_mreg_supported(dev))
17558 return -rte_mtr_error_set(error,
17560 RTE_MTR_ERROR_TYPE_METER_POLICY,
17561 NULL, "RSS action with meta "
17562 "is not supported. Please try use "
17563 "default policy for meter.");
17564 ret = mlx5_validate_action_rss(dev, act,
17567 return -rte_mtr_error_set(error,
17569 RTE_MTR_ERROR_TYPE_METER_POLICY,
17570 NULL, flow_err.message ?
17572 "RSS action validate check fail");
17573 action_flags |= MLX5_FLOW_ACTION_RSS;
17577 case RTE_FLOW_ACTION_TYPE_JUMP:
17578 ret = flow_dv_validate_action_jump(dev,
17579 NULL, act, action_flags,
17580 attr, true, &flow_err);
17582 return -rte_mtr_error_set(error,
17584 RTE_MTR_ERROR_TYPE_METER_POLICY,
17585 NULL, flow_err.message ?
17587 "Jump action validate check fail");
17589 action_flags |= MLX5_FLOW_ACTION_JUMP;
17591 case RTE_FLOW_ACTION_TYPE_METER:
17592 if (i != RTE_COLOR_GREEN)
17593 return -rte_mtr_error_set(error,
17595 RTE_MTR_ERROR_TYPE_METER_POLICY,
17596 NULL, flow_err.message ?
17598 "Meter hierarchy only supports GREEN color.");
17600 ret = flow_dv_validate_policy_mtr_hierarchy(dev,
17610 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
17613 return -rte_mtr_error_set(error, ENOTSUP,
17614 RTE_MTR_ERROR_TYPE_METER_POLICY,
17616 "Doesn't support optional action");
17619 /* Yellow is not supported, just skip. */
17620 if (i == RTE_COLOR_YELLOW)
17622 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
17623 domain_color[i] = MLX5_MTR_DOMAIN_TRANSFER_BIT;
17624 else if ((action_flags &
17625 (MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_QUEUE)) ||
17626 (action_flags & MLX5_FLOW_ACTION_MARK))
17628 * Only support MLX5_XMETA_MODE_LEGACY
17629 * so MARK action only in ingress domain.
17631 domain_color[i] = MLX5_MTR_DOMAIN_INGRESS_BIT;
17632 else if (action_flags &
17633 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
17634 domain_color[i] = hierarchy_domain;
17636 domain_color[i] = def_domain;
17638 * Validate the drop action mutual exclusion
17639 * with other actions. Drop action is mutually-exclusive
17640 * with any other action, except for Count action.
17642 if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
17643 (action_flags & ~MLX5_FLOW_ACTION_DROP)) {
17644 return -rte_mtr_error_set(error, ENOTSUP,
17645 RTE_MTR_ERROR_TYPE_METER_POLICY,
17646 NULL, "Drop action is mutually-exclusive "
17647 "with any other action");
17649 /* Eswitch has few restrictions on using items and actions */
17650 if (domain_color[i] & MLX5_MTR_DOMAIN_TRANSFER_BIT) {
17651 if (!mlx5_flow_ext_mreg_supported(dev) &&
17652 action_flags & MLX5_FLOW_ACTION_MARK)
17653 return -rte_mtr_error_set(error, ENOTSUP,
17654 RTE_MTR_ERROR_TYPE_METER_POLICY,
17655 NULL, "unsupported action MARK");
17656 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
17657 return -rte_mtr_error_set(error, ENOTSUP,
17658 RTE_MTR_ERROR_TYPE_METER_POLICY,
17659 NULL, "unsupported action QUEUE");
17660 if (action_flags & MLX5_FLOW_ACTION_RSS)
17661 return -rte_mtr_error_set(error, ENOTSUP,
17662 RTE_MTR_ERROR_TYPE_METER_POLICY,
17663 NULL, "unsupported action RSS");
17664 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
17665 return -rte_mtr_error_set(error, ENOTSUP,
17666 RTE_MTR_ERROR_TYPE_METER_POLICY,
17667 NULL, "no fate action is found");
17669 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) &&
17671 MLX5_MTR_DOMAIN_INGRESS_BIT)) {
17672 if ((domain_color[i] &
17673 MLX5_MTR_DOMAIN_EGRESS_BIT))
17675 MLX5_MTR_DOMAIN_EGRESS_BIT;
17677 return -rte_mtr_error_set(error,
17679 RTE_MTR_ERROR_TYPE_METER_POLICY,
17680 NULL, "no fate action is found");
17683 if (domain_color[i] != def_domain)
17684 *domain_bitmap = domain_color[i];
17690 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
17692 struct mlx5_priv *priv = dev->data->dev_private;
17695 if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
17696 ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
17701 if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
17702 ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
17706 if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
17707 ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
17714 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
17715 .validate = flow_dv_validate,
17716 .prepare = flow_dv_prepare,
17717 .translate = flow_dv_translate,
17718 .apply = flow_dv_apply,
17719 .remove = flow_dv_remove,
17720 .destroy = flow_dv_destroy,
17721 .query = flow_dv_query,
17722 .create_mtr_tbls = flow_dv_create_mtr_tbls,
17723 .destroy_mtr_tbls = flow_dv_destroy_mtr_tbls,
17724 .destroy_mtr_drop_tbls = flow_dv_destroy_mtr_drop_tbls,
17725 .create_meter = flow_dv_mtr_alloc,
17726 .free_meter = flow_dv_aso_mtr_release_to_pool,
17727 .validate_mtr_acts = flow_dv_validate_mtr_policy_acts,
17728 .create_mtr_acts = flow_dv_create_mtr_policy_acts,
17729 .destroy_mtr_acts = flow_dv_destroy_mtr_policy_acts,
17730 .create_policy_rules = flow_dv_create_policy_rules,
17731 .destroy_policy_rules = flow_dv_destroy_policy_rules,
17732 .create_def_policy = flow_dv_create_def_policy,
17733 .destroy_def_policy = flow_dv_destroy_def_policy,
17734 .meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,
17735 .meter_hierarchy_rule_create = flow_dv_meter_hierarchy_rule_create,
17736 .destroy_sub_policy_with_rxq = flow_dv_destroy_sub_policy_with_rxq,
17737 .counter_alloc = flow_dv_counter_allocate,
17738 .counter_free = flow_dv_counter_free,
17739 .counter_query = flow_dv_counter_query,
17740 .get_aged_flows = flow_get_aged_flows,
17741 .action_validate = flow_dv_action_validate,
17742 .action_create = flow_dv_action_create,
17743 .action_destroy = flow_dv_action_destroy,
17744 .action_update = flow_dv_action_update,
17745 .action_query = flow_dv_action_query,
17746 .sync_domain = flow_dv_sync_domain,
17749 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */