1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
14 #pragma GCC diagnostic ignored "-Wpedantic"
16 #include <infiniband/verbs.h>
18 #pragma GCC diagnostic error "-Wpedantic"
21 #include <rte_common.h>
22 #include <rte_ether.h>
23 #include <rte_ethdev_driver.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
29 #include <rte_vxlan.h>
33 #include "mlx5_defs.h"
34 #include "mlx5_glue.h"
35 #include "mlx5_flow.h"
37 #include "mlx5_rxtx.h"
39 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
41 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
42 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
45 #ifndef HAVE_MLX5DV_DR_ESWITCH
46 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
47 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
51 #ifndef HAVE_MLX5DV_DR
52 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
55 #define MLX5_ENCAPSULATION_DECISION_SIZE (sizeof(struct rte_flow_item_eth) + \
56 sizeof(struct rte_flow_item_ipv4))
57 /* VLAN header definitions */
58 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
59 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
60 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
61 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
62 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
77 * Initialize flow attributes structure according to flow items' types.
79 * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
80 * mode. For tunnel mode, the items to be modified are the outermost ones.
83 * Pointer to item specification.
85 * Pointer to flow attributes structure.
88 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
90 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
92 case RTE_FLOW_ITEM_TYPE_IPV4:
96 case RTE_FLOW_ITEM_TYPE_IPV6:
100 case RTE_FLOW_ITEM_TYPE_UDP:
104 case RTE_FLOW_ITEM_TYPE_TCP:
116 * Convert rte_mtr_color to mlx5 color.
125 rte_col_2_mlx5_col(enum rte_color rcol)
128 case RTE_COLOR_GREEN:
129 return MLX5_FLOW_COLOR_GREEN;
130 case RTE_COLOR_YELLOW:
131 return MLX5_FLOW_COLOR_YELLOW;
133 return MLX5_FLOW_COLOR_RED;
137 return MLX5_FLOW_COLOR_UNDEFINED;
140 struct field_modify_info {
141 uint32_t size; /* Size of field in protocol header, in bytes. */
142 uint32_t offset; /* Offset of field in protocol header, in bytes. */
143 enum mlx5_modification_field id;
146 struct field_modify_info modify_eth[] = {
147 {4, 0, MLX5_MODI_OUT_DMAC_47_16},
148 {2, 4, MLX5_MODI_OUT_DMAC_15_0},
149 {4, 6, MLX5_MODI_OUT_SMAC_47_16},
150 {2, 10, MLX5_MODI_OUT_SMAC_15_0},
154 struct field_modify_info modify_vlan_out_first_vid[] = {
155 /* Size in bits !!! */
156 {12, 0, MLX5_MODI_OUT_FIRST_VID},
160 struct field_modify_info modify_ipv4[] = {
161 {1, 1, MLX5_MODI_OUT_IP_DSCP},
162 {1, 8, MLX5_MODI_OUT_IPV4_TTL},
163 {4, 12, MLX5_MODI_OUT_SIPV4},
164 {4, 16, MLX5_MODI_OUT_DIPV4},
168 struct field_modify_info modify_ipv6[] = {
169 {1, 0, MLX5_MODI_OUT_IP_DSCP},
170 {1, 7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
171 {4, 8, MLX5_MODI_OUT_SIPV6_127_96},
172 {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
173 {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
174 {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
175 {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
176 {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
177 {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
178 {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
182 struct field_modify_info modify_udp[] = {
183 {2, 0, MLX5_MODI_OUT_UDP_SPORT},
184 {2, 2, MLX5_MODI_OUT_UDP_DPORT},
188 struct field_modify_info modify_tcp[] = {
189 {2, 0, MLX5_MODI_OUT_TCP_SPORT},
190 {2, 2, MLX5_MODI_OUT_TCP_DPORT},
191 {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
192 {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
197 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
198 uint8_t next_protocol, uint64_t *item_flags,
201 assert(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
202 item->type == RTE_FLOW_ITEM_TYPE_IPV6);
203 if (next_protocol == IPPROTO_IPIP) {
204 *item_flags |= MLX5_FLOW_LAYER_IPIP;
207 if (next_protocol == IPPROTO_IPV6) {
208 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
214 * Acquire the synchronizing object to protect multithreaded access
215 * to shared dv context. Lock occurs only if context is actually
216 * shared, i.e. we have multiport IB device and representors are
220 * Pointer to the rte_eth_dev structure.
223 flow_dv_shared_lock(struct rte_eth_dev *dev)
225 struct mlx5_priv *priv = dev->data->dev_private;
226 struct mlx5_ibv_shared *sh = priv->sh;
228 if (sh->dv_refcnt > 1) {
231 ret = pthread_mutex_lock(&sh->dv_mutex);
238 flow_dv_shared_unlock(struct rte_eth_dev *dev)
240 struct mlx5_priv *priv = dev->data->dev_private;
241 struct mlx5_ibv_shared *sh = priv->sh;
243 if (sh->dv_refcnt > 1) {
246 ret = pthread_mutex_unlock(&sh->dv_mutex);
252 /* Update VLAN's VID/PCP based on input rte_flow_action.
255 * Pointer to struct rte_flow_action.
257 * Pointer to struct rte_vlan_hdr.
260 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
261 struct rte_vlan_hdr *vlan)
264 if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
266 ((const struct rte_flow_action_of_set_vlan_pcp *)
267 action->conf)->vlan_pcp;
268 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
269 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
270 vlan->vlan_tci |= vlan_tci;
271 } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
272 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
273 vlan->vlan_tci |= rte_be_to_cpu_16
274 (((const struct rte_flow_action_of_set_vlan_vid *)
275 action->conf)->vlan_vid);
280 * Fetch 1, 2, 3 or 4 byte field from the byte array
281 * and return as unsigned integer in host-endian format.
284 * Pointer to data array.
286 * Size of field to extract.
289 * converted field in host endian format.
291 static inline uint32_t
292 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
301 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
304 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
305 ret = (ret << 8) | *(data + sizeof(uint16_t));
308 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
319 * Convert modify-header action to DV specification.
321 * Data length of each action is determined by provided field description
322 * and the item mask. Data bit offset and width of each action is determined
323 * by provided item mask.
326 * Pointer to item specification.
328 * Pointer to field modification information.
329 * For MLX5_MODIFICATION_TYPE_SET specifies destination field.
330 * For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
331 * For MLX5_MODIFICATION_TYPE_COPY specifies source field.
333 * Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
334 * Negative offset value sets the same offset as source offset.
335 * size field is ignored, value is taken from source field.
336 * @param[in,out] resource
337 * Pointer to the modify-header resource.
339 * Type of modification.
341 * Pointer to the error structure.
344 * 0 on success, a negative errno value otherwise and rte_errno is set.
347 flow_dv_convert_modify_action(struct rte_flow_item *item,
348 struct field_modify_info *field,
349 struct field_modify_info *dcopy,
350 struct mlx5_flow_dv_modify_hdr_resource *resource,
351 uint32_t type, struct rte_flow_error *error)
353 uint32_t i = resource->actions_num;
354 struct mlx5_modification_cmd *actions = resource->actions;
357 * The item and mask are provided in big-endian format.
358 * The fields should be presented as in big-endian format either.
359 * Mask must be always present, it defines the actual field width.
369 if (i >= MLX5_MAX_MODIFY_NUM)
370 return rte_flow_error_set(error, EINVAL,
371 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
372 "too many items to modify");
373 /* Fetch variable byte size mask from the array. */
374 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
375 field->offset, field->size);
380 /* Deduce actual data width in bits from mask value. */
381 off_b = rte_bsf32(mask);
382 size_b = sizeof(uint32_t) * CHAR_BIT -
383 off_b - __builtin_clz(mask);
385 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
386 actions[i].action_type = type;
387 actions[i].field = field->id;
388 actions[i].offset = off_b;
389 actions[i].length = size_b;
390 /* Convert entire record to expected big-endian format. */
391 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
392 if (type == MLX5_MODIFICATION_TYPE_COPY) {
394 actions[i].dst_field = dcopy->id;
395 actions[i].dst_offset =
396 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
397 /* Convert entire record to big-endian format. */
398 actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
401 data = flow_dv_fetch_field((const uint8_t *)item->spec +
402 field->offset, field->size);
403 /* Shift out the trailing masked bits from data. */
404 data = (data & mask) >> off_b;
405 actions[i].data1 = rte_cpu_to_be_32(data);
409 } while (field->size);
410 if (resource->actions_num == i)
411 return rte_flow_error_set(error, EINVAL,
412 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
413 "invalid modification flow item");
414 resource->actions_num = i;
419 * Convert modify-header set IPv4 address action to DV specification.
421 * @param[in,out] resource
422 * Pointer to the modify-header resource.
424 * Pointer to action specification.
426 * Pointer to the error structure.
429 * 0 on success, a negative errno value otherwise and rte_errno is set.
432 flow_dv_convert_action_modify_ipv4
433 (struct mlx5_flow_dv_modify_hdr_resource *resource,
434 const struct rte_flow_action *action,
435 struct rte_flow_error *error)
437 const struct rte_flow_action_set_ipv4 *conf =
438 (const struct rte_flow_action_set_ipv4 *)(action->conf);
439 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
440 struct rte_flow_item_ipv4 ipv4;
441 struct rte_flow_item_ipv4 ipv4_mask;
443 memset(&ipv4, 0, sizeof(ipv4));
444 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
445 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
446 ipv4.hdr.src_addr = conf->ipv4_addr;
447 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
449 ipv4.hdr.dst_addr = conf->ipv4_addr;
450 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
453 item.mask = &ipv4_mask;
454 return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
455 MLX5_MODIFICATION_TYPE_SET, error);
459 * Convert modify-header set IPv6 address action to DV specification.
461 * @param[in,out] resource
462 * Pointer to the modify-header resource.
464 * Pointer to action specification.
466 * Pointer to the error structure.
469 * 0 on success, a negative errno value otherwise and rte_errno is set.
472 flow_dv_convert_action_modify_ipv6
473 (struct mlx5_flow_dv_modify_hdr_resource *resource,
474 const struct rte_flow_action *action,
475 struct rte_flow_error *error)
477 const struct rte_flow_action_set_ipv6 *conf =
478 (const struct rte_flow_action_set_ipv6 *)(action->conf);
479 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
480 struct rte_flow_item_ipv6 ipv6;
481 struct rte_flow_item_ipv6 ipv6_mask;
483 memset(&ipv6, 0, sizeof(ipv6));
484 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
485 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
486 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
487 sizeof(ipv6.hdr.src_addr));
488 memcpy(&ipv6_mask.hdr.src_addr,
489 &rte_flow_item_ipv6_mask.hdr.src_addr,
490 sizeof(ipv6.hdr.src_addr));
492 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
493 sizeof(ipv6.hdr.dst_addr));
494 memcpy(&ipv6_mask.hdr.dst_addr,
495 &rte_flow_item_ipv6_mask.hdr.dst_addr,
496 sizeof(ipv6.hdr.dst_addr));
499 item.mask = &ipv6_mask;
500 return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
501 MLX5_MODIFICATION_TYPE_SET, error);
505 * Convert modify-header set MAC address action to DV specification.
507 * @param[in,out] resource
508 * Pointer to the modify-header resource.
510 * Pointer to action specification.
512 * Pointer to the error structure.
515 * 0 on success, a negative errno value otherwise and rte_errno is set.
518 flow_dv_convert_action_modify_mac
519 (struct mlx5_flow_dv_modify_hdr_resource *resource,
520 const struct rte_flow_action *action,
521 struct rte_flow_error *error)
523 const struct rte_flow_action_set_mac *conf =
524 (const struct rte_flow_action_set_mac *)(action->conf);
525 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
526 struct rte_flow_item_eth eth;
527 struct rte_flow_item_eth eth_mask;
529 memset(ð, 0, sizeof(eth));
530 memset(ð_mask, 0, sizeof(eth_mask));
531 if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
532 memcpy(ð.src.addr_bytes, &conf->mac_addr,
533 sizeof(eth.src.addr_bytes));
534 memcpy(ð_mask.src.addr_bytes,
535 &rte_flow_item_eth_mask.src.addr_bytes,
536 sizeof(eth_mask.src.addr_bytes));
538 memcpy(ð.dst.addr_bytes, &conf->mac_addr,
539 sizeof(eth.dst.addr_bytes));
540 memcpy(ð_mask.dst.addr_bytes,
541 &rte_flow_item_eth_mask.dst.addr_bytes,
542 sizeof(eth_mask.dst.addr_bytes));
545 item.mask = ð_mask;
546 return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
547 MLX5_MODIFICATION_TYPE_SET, error);
551 * Convert modify-header set VLAN VID action to DV specification.
553 * @param[in,out] resource
554 * Pointer to the modify-header resource.
556 * Pointer to action specification.
558 * Pointer to the error structure.
561 * 0 on success, a negative errno value otherwise and rte_errno is set.
564 flow_dv_convert_action_modify_vlan_vid
565 (struct mlx5_flow_dv_modify_hdr_resource *resource,
566 const struct rte_flow_action *action,
567 struct rte_flow_error *error)
569 const struct rte_flow_action_of_set_vlan_vid *conf =
570 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
571 int i = resource->actions_num;
572 struct mlx5_modification_cmd *actions = &resource->actions[i];
573 struct field_modify_info *field = modify_vlan_out_first_vid;
575 if (i >= MLX5_MAX_MODIFY_NUM)
576 return rte_flow_error_set(error, EINVAL,
577 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
578 "too many items to modify");
579 actions[i].action_type = MLX5_MODIFICATION_TYPE_SET;
580 actions[i].field = field->id;
581 actions[i].length = field->size;
582 actions[i].offset = field->offset;
583 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
584 actions[i].data1 = conf->vlan_vid;
585 actions[i].data1 = actions[i].data1 << 16;
586 resource->actions_num = ++i;
591 * Convert modify-header set TP action to DV specification.
593 * @param[in,out] resource
594 * Pointer to the modify-header resource.
596 * Pointer to action specification.
598 * Pointer to rte_flow_item objects list.
600 * Pointer to flow attributes structure.
602 * Pointer to the error structure.
605 * 0 on success, a negative errno value otherwise and rte_errno is set.
608 flow_dv_convert_action_modify_tp
609 (struct mlx5_flow_dv_modify_hdr_resource *resource,
610 const struct rte_flow_action *action,
611 const struct rte_flow_item *items,
612 union flow_dv_attr *attr,
613 struct rte_flow_error *error)
615 const struct rte_flow_action_set_tp *conf =
616 (const struct rte_flow_action_set_tp *)(action->conf);
617 struct rte_flow_item item;
618 struct rte_flow_item_udp udp;
619 struct rte_flow_item_udp udp_mask;
620 struct rte_flow_item_tcp tcp;
621 struct rte_flow_item_tcp tcp_mask;
622 struct field_modify_info *field;
625 flow_dv_attr_init(items, attr);
627 memset(&udp, 0, sizeof(udp));
628 memset(&udp_mask, 0, sizeof(udp_mask));
629 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
630 udp.hdr.src_port = conf->port;
631 udp_mask.hdr.src_port =
632 rte_flow_item_udp_mask.hdr.src_port;
634 udp.hdr.dst_port = conf->port;
635 udp_mask.hdr.dst_port =
636 rte_flow_item_udp_mask.hdr.dst_port;
638 item.type = RTE_FLOW_ITEM_TYPE_UDP;
640 item.mask = &udp_mask;
644 memset(&tcp, 0, sizeof(tcp));
645 memset(&tcp_mask, 0, sizeof(tcp_mask));
646 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
647 tcp.hdr.src_port = conf->port;
648 tcp_mask.hdr.src_port =
649 rte_flow_item_tcp_mask.hdr.src_port;
651 tcp.hdr.dst_port = conf->port;
652 tcp_mask.hdr.dst_port =
653 rte_flow_item_tcp_mask.hdr.dst_port;
655 item.type = RTE_FLOW_ITEM_TYPE_TCP;
657 item.mask = &tcp_mask;
660 return flow_dv_convert_modify_action(&item, field, NULL, resource,
661 MLX5_MODIFICATION_TYPE_SET, error);
665 * Convert modify-header set TTL action to DV specification.
667 * @param[in,out] resource
668 * Pointer to the modify-header resource.
670 * Pointer to action specification.
672 * Pointer to rte_flow_item objects list.
674 * Pointer to flow attributes structure.
676 * Pointer to the error structure.
679 * 0 on success, a negative errno value otherwise and rte_errno is set.
682 flow_dv_convert_action_modify_ttl
683 (struct mlx5_flow_dv_modify_hdr_resource *resource,
684 const struct rte_flow_action *action,
685 const struct rte_flow_item *items,
686 union flow_dv_attr *attr,
687 struct rte_flow_error *error)
689 const struct rte_flow_action_set_ttl *conf =
690 (const struct rte_flow_action_set_ttl *)(action->conf);
691 struct rte_flow_item item;
692 struct rte_flow_item_ipv4 ipv4;
693 struct rte_flow_item_ipv4 ipv4_mask;
694 struct rte_flow_item_ipv6 ipv6;
695 struct rte_flow_item_ipv6 ipv6_mask;
696 struct field_modify_info *field;
699 flow_dv_attr_init(items, attr);
701 memset(&ipv4, 0, sizeof(ipv4));
702 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
703 ipv4.hdr.time_to_live = conf->ttl_value;
704 ipv4_mask.hdr.time_to_live = 0xFF;
705 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
707 item.mask = &ipv4_mask;
711 memset(&ipv6, 0, sizeof(ipv6));
712 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
713 ipv6.hdr.hop_limits = conf->ttl_value;
714 ipv6_mask.hdr.hop_limits = 0xFF;
715 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
717 item.mask = &ipv6_mask;
720 return flow_dv_convert_modify_action(&item, field, NULL, resource,
721 MLX5_MODIFICATION_TYPE_SET, error);
725 * Convert modify-header decrement TTL action to DV specification.
727 * @param[in,out] resource
728 * Pointer to the modify-header resource.
730 * Pointer to action specification.
732 * Pointer to rte_flow_item objects list.
734 * Pointer to flow attributes structure.
736 * Pointer to the error structure.
739 * 0 on success, a negative errno value otherwise and rte_errno is set.
742 flow_dv_convert_action_modify_dec_ttl
743 (struct mlx5_flow_dv_modify_hdr_resource *resource,
744 const struct rte_flow_item *items,
745 union flow_dv_attr *attr,
746 struct rte_flow_error *error)
748 struct rte_flow_item item;
749 struct rte_flow_item_ipv4 ipv4;
750 struct rte_flow_item_ipv4 ipv4_mask;
751 struct rte_flow_item_ipv6 ipv6;
752 struct rte_flow_item_ipv6 ipv6_mask;
753 struct field_modify_info *field;
756 flow_dv_attr_init(items, attr);
758 memset(&ipv4, 0, sizeof(ipv4));
759 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
760 ipv4.hdr.time_to_live = 0xFF;
761 ipv4_mask.hdr.time_to_live = 0xFF;
762 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
764 item.mask = &ipv4_mask;
768 memset(&ipv6, 0, sizeof(ipv6));
769 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
770 ipv6.hdr.hop_limits = 0xFF;
771 ipv6_mask.hdr.hop_limits = 0xFF;
772 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
774 item.mask = &ipv6_mask;
777 return flow_dv_convert_modify_action(&item, field, NULL, resource,
778 MLX5_MODIFICATION_TYPE_ADD, error);
782 * Convert modify-header increment/decrement TCP Sequence number
783 * to DV specification.
785 * @param[in,out] resource
786 * Pointer to the modify-header resource.
788 * Pointer to action specification.
790 * Pointer to the error structure.
793 * 0 on success, a negative errno value otherwise and rte_errno is set.
796 flow_dv_convert_action_modify_tcp_seq
797 (struct mlx5_flow_dv_modify_hdr_resource *resource,
798 const struct rte_flow_action *action,
799 struct rte_flow_error *error)
801 const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
802 uint64_t value = rte_be_to_cpu_32(*conf);
803 struct rte_flow_item item;
804 struct rte_flow_item_tcp tcp;
805 struct rte_flow_item_tcp tcp_mask;
807 memset(&tcp, 0, sizeof(tcp));
808 memset(&tcp_mask, 0, sizeof(tcp_mask));
809 if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
811 * The HW has no decrement operation, only increment operation.
812 * To simulate decrement X from Y using increment operation
813 * we need to add UINT32_MAX X times to Y.
814 * Each adding of UINT32_MAX decrements Y by 1.
817 tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
818 tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
819 item.type = RTE_FLOW_ITEM_TYPE_TCP;
821 item.mask = &tcp_mask;
822 return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
823 MLX5_MODIFICATION_TYPE_ADD, error);
827 * Convert modify-header increment/decrement TCP Acknowledgment number
828 * to DV specification.
830 * @param[in,out] resource
831 * Pointer to the modify-header resource.
833 * Pointer to action specification.
835 * Pointer to the error structure.
838 * 0 on success, a negative errno value otherwise and rte_errno is set.
841 flow_dv_convert_action_modify_tcp_ack
842 (struct mlx5_flow_dv_modify_hdr_resource *resource,
843 const struct rte_flow_action *action,
844 struct rte_flow_error *error)
846 const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
847 uint64_t value = rte_be_to_cpu_32(*conf);
848 struct rte_flow_item item;
849 struct rte_flow_item_tcp tcp;
850 struct rte_flow_item_tcp tcp_mask;
852 memset(&tcp, 0, sizeof(tcp));
853 memset(&tcp_mask, 0, sizeof(tcp_mask));
854 if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
856 * The HW has no decrement operation, only increment operation.
857 * To simulate decrement X from Y using increment operation
858 * we need to add UINT32_MAX X times to Y.
859 * Each adding of UINT32_MAX decrements Y by 1.
862 tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
863 tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
864 item.type = RTE_FLOW_ITEM_TYPE_TCP;
866 item.mask = &tcp_mask;
867 return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
868 MLX5_MODIFICATION_TYPE_ADD, error);
871 static enum mlx5_modification_field reg_to_field[] = {
872 [REG_NONE] = MLX5_MODI_OUT_NONE,
873 [REG_A] = MLX5_MODI_META_DATA_REG_A,
874 [REG_B] = MLX5_MODI_META_DATA_REG_B,
875 [REG_C_0] = MLX5_MODI_META_REG_C_0,
876 [REG_C_1] = MLX5_MODI_META_REG_C_1,
877 [REG_C_2] = MLX5_MODI_META_REG_C_2,
878 [REG_C_3] = MLX5_MODI_META_REG_C_3,
879 [REG_C_4] = MLX5_MODI_META_REG_C_4,
880 [REG_C_5] = MLX5_MODI_META_REG_C_5,
881 [REG_C_6] = MLX5_MODI_META_REG_C_6,
882 [REG_C_7] = MLX5_MODI_META_REG_C_7,
886 * Convert register set to DV specification.
888 * @param[in,out] resource
889 * Pointer to the modify-header resource.
891 * Pointer to action specification.
893 * Pointer to the error structure.
896 * 0 on success, a negative errno value otherwise and rte_errno is set.
899 flow_dv_convert_action_set_reg
900 (struct mlx5_flow_dv_modify_hdr_resource *resource,
901 const struct rte_flow_action *action,
902 struct rte_flow_error *error)
904 const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
905 struct mlx5_modification_cmd *actions = resource->actions;
906 uint32_t i = resource->actions_num;
908 if (i >= MLX5_MAX_MODIFY_NUM)
909 return rte_flow_error_set(error, EINVAL,
910 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
911 "too many items to modify");
912 assert(conf->id != REG_NONE);
913 assert(conf->id < RTE_DIM(reg_to_field));
914 actions[i].action_type = MLX5_MODIFICATION_TYPE_SET;
915 actions[i].field = reg_to_field[conf->id];
916 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
917 actions[i].data1 = rte_cpu_to_be_32(conf->data);
919 resource->actions_num = i;
924 * Convert SET_TAG action to DV specification.
927 * Pointer to the rte_eth_dev structure.
928 * @param[in,out] resource
929 * Pointer to the modify-header resource.
931 * Pointer to action specification.
933 * Pointer to the error structure.
936 * 0 on success, a negative errno value otherwise and rte_errno is set.
939 flow_dv_convert_action_set_tag
940 (struct rte_eth_dev *dev,
941 struct mlx5_flow_dv_modify_hdr_resource *resource,
942 const struct rte_flow_action_set_tag *conf,
943 struct rte_flow_error *error)
945 rte_be32_t data = rte_cpu_to_be_32(conf->data);
946 rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
947 struct rte_flow_item item = {
951 struct field_modify_info reg_c_x[] = {
954 enum mlx5_modification_field reg_type;
957 ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
960 assert(ret != REG_NONE);
961 assert((unsigned int)ret < RTE_DIM(reg_to_field));
962 reg_type = reg_to_field[ret];
963 assert(reg_type > 0);
964 reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
965 return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
966 MLX5_MODIFICATION_TYPE_SET, error);
970 * Convert internal COPY_REG action to DV specification.
973 * Pointer to the rte_eth_dev structure.
975 * Pointer to the modify-header resource.
977 * Pointer to action specification.
979 * Pointer to the error structure.
982 * 0 on success, a negative errno value otherwise and rte_errno is set.
985 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
986 struct mlx5_flow_dv_modify_hdr_resource *res,
987 const struct rte_flow_action *action,
988 struct rte_flow_error *error)
990 const struct mlx5_flow_action_copy_mreg *conf = action->conf;
991 rte_be32_t mask = RTE_BE32(UINT32_MAX);
992 struct rte_flow_item item = {
996 struct field_modify_info reg_src[] = {
997 {4, 0, reg_to_field[conf->src]},
1000 struct field_modify_info reg_dst = {
1002 .id = reg_to_field[conf->dst],
1004 /* Adjust reg_c[0] usage according to reported mask. */
1005 if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1006 struct mlx5_priv *priv = dev->data->dev_private;
1007 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1010 assert(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1011 if (conf->dst == REG_C_0) {
1012 /* Copy to reg_c[0], within mask only. */
1013 reg_dst.offset = rte_bsf32(reg_c0);
1015 * Mask is ignoring the enianness, because
1016 * there is no conversion in datapath.
1018 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1019 /* Copy from destination lower bits to reg_c[0]. */
1020 mask = reg_c0 >> reg_dst.offset;
1022 /* Copy from destination upper bits to reg_c[0]. */
1023 mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1024 rte_fls_u32(reg_c0));
1027 mask = rte_cpu_to_be_32(reg_c0);
1028 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1029 /* Copy from reg_c[0] to destination lower bits. */
1032 /* Copy from reg_c[0] to destination upper bits. */
1033 reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1034 (rte_fls_u32(reg_c0) -
1039 return flow_dv_convert_modify_action(&item,
1040 reg_src, ®_dst, res,
1041 MLX5_MODIFICATION_TYPE_COPY,
1046 * Convert MARK action to DV specification. This routine is used
1047 * in extensive metadata only and requires metadata register to be
1048 * handled. In legacy mode hardware tag resource is engaged.
1051 * Pointer to the rte_eth_dev structure.
1053 * Pointer to MARK action specification.
1054 * @param[in,out] resource
1055 * Pointer to the modify-header resource.
1057 * Pointer to the error structure.
1060 * 0 on success, a negative errno value otherwise and rte_errno is set.
1063 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1064 const struct rte_flow_action_mark *conf,
1065 struct mlx5_flow_dv_modify_hdr_resource *resource,
1066 struct rte_flow_error *error)
1068 struct mlx5_priv *priv = dev->data->dev_private;
1069 rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1070 priv->sh->dv_mark_mask);
1071 rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1072 struct rte_flow_item item = {
1076 struct field_modify_info reg_c_x[] = {
1077 {4, 0, 0}, /* dynamic instead of MLX5_MODI_META_REG_C_1. */
1080 enum modify_reg reg;
1083 return rte_flow_error_set(error, EINVAL,
1084 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1085 NULL, "zero mark action mask");
1086 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1090 if (reg == REG_C_0) {
1091 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1092 uint32_t shl_c0 = rte_bsf32(msk_c0);
1094 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1095 mask = rte_cpu_to_be_32(mask) & msk_c0;
1096 mask = rte_cpu_to_be_32(mask << shl_c0);
1098 reg_c_x[0].id = reg_to_field[reg];
1099 return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1100 MLX5_MODIFICATION_TYPE_SET, error);
1104 * Get metadata register index for specified steering domain.
1107 * Pointer to the rte_eth_dev structure.
1109 * Attributes of flow to determine steering domain.
1111 * Pointer to the error structure.
1114 * positive index on success, a negative errno value otherwise
1115 * and rte_errno is set.
1117 static enum modify_reg
1118 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1119 const struct rte_flow_attr *attr,
1120 struct rte_flow_error *error)
1122 enum modify_reg reg =
1123 mlx5_flow_get_reg_id(dev, attr->transfer ?
1127 MLX5_METADATA_RX, 0, error);
1129 return rte_flow_error_set(error,
1130 ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1131 NULL, "unavailable "
1132 "metadata register");
1137 * Convert SET_META action to DV specification.
1140 * Pointer to the rte_eth_dev structure.
1141 * @param[in,out] resource
1142 * Pointer to the modify-header resource.
1144 * Attributes of flow that includes this item.
1146 * Pointer to action specification.
1148 * Pointer to the error structure.
1151 * 0 on success, a negative errno value otherwise and rte_errno is set.
1154 flow_dv_convert_action_set_meta
1155 (struct rte_eth_dev *dev,
1156 struct mlx5_flow_dv_modify_hdr_resource *resource,
1157 const struct rte_flow_attr *attr,
1158 const struct rte_flow_action_set_meta *conf,
1159 struct rte_flow_error *error)
1161 uint32_t data = conf->data;
1162 uint32_t mask = conf->mask;
1163 struct rte_flow_item item = {
1167 struct field_modify_info reg_c_x[] = {
1170 enum modify_reg reg = flow_dv_get_metadata_reg(dev, attr, error);
1175 * In datapath code there is no endianness
1176 * coversions for perfromance reasons, all
1177 * pattern conversions are done in rte_flow.
1179 if (reg == REG_C_0) {
1180 struct mlx5_priv *priv = dev->data->dev_private;
1181 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1185 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1186 shl_c0 = rte_bsf32(msk_c0);
1188 shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
1192 assert(!(~msk_c0 & rte_cpu_to_be_32(mask)));
1194 reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1195 /* The routine expects parameters in memory as big-endian ones. */
1196 return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1197 MLX5_MODIFICATION_TYPE_SET, error);
1201 * Convert modify-header set IPv4 DSCP action to DV specification.
1203 * @param[in,out] resource
1204 * Pointer to the modify-header resource.
1206 * Pointer to action specification.
1208 * Pointer to the error structure.
1211 * 0 on success, a negative errno value otherwise and rte_errno is set.
1214 flow_dv_convert_action_modify_ipv4_dscp
1215 (struct mlx5_flow_dv_modify_hdr_resource *resource,
1216 const struct rte_flow_action *action,
1217 struct rte_flow_error *error)
1219 const struct rte_flow_action_set_dscp *conf =
1220 (const struct rte_flow_action_set_dscp *)(action->conf);
1221 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1222 struct rte_flow_item_ipv4 ipv4;
1223 struct rte_flow_item_ipv4 ipv4_mask;
1225 memset(&ipv4, 0, sizeof(ipv4));
1226 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1227 ipv4.hdr.type_of_service = conf->dscp;
1228 ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1230 item.mask = &ipv4_mask;
1231 return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1232 MLX5_MODIFICATION_TYPE_SET, error);
1236 * Convert modify-header set IPv6 DSCP action to DV specification.
1238 * @param[in,out] resource
1239 * Pointer to the modify-header resource.
1241 * Pointer to action specification.
1243 * Pointer to the error structure.
1246 * 0 on success, a negative errno value otherwise and rte_errno is set.
1249 flow_dv_convert_action_modify_ipv6_dscp
1250 (struct mlx5_flow_dv_modify_hdr_resource *resource,
1251 const struct rte_flow_action *action,
1252 struct rte_flow_error *error)
1254 const struct rte_flow_action_set_dscp *conf =
1255 (const struct rte_flow_action_set_dscp *)(action->conf);
1256 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1257 struct rte_flow_item_ipv6 ipv6;
1258 struct rte_flow_item_ipv6 ipv6_mask;
1260 memset(&ipv6, 0, sizeof(ipv6));
1261 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1263 * Even though the DSCP bits offset of IPv6 is not byte aligned,
1264 * rdma-core only accept the DSCP bits byte aligned start from
1265 * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1266 * bits in IPv6 case as rdma-core requires byte aligned value.
1268 ipv6.hdr.vtc_flow = conf->dscp;
1269 ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1271 item.mask = &ipv6_mask;
1272 return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1273 MLX5_MODIFICATION_TYPE_SET, error);
1277 * Validate MARK item.
1280 * Pointer to the rte_eth_dev structure.
1282 * Item specification.
1284 * Attributes of flow that includes this item.
1286 * Pointer to error structure.
1289 * 0 on success, a negative errno value otherwise and rte_errno is set.
1292 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1293 const struct rte_flow_item *item,
1294 const struct rte_flow_attr *attr __rte_unused,
1295 struct rte_flow_error *error)
1297 struct mlx5_priv *priv = dev->data->dev_private;
1298 struct mlx5_dev_config *config = &priv->config;
1299 const struct rte_flow_item_mark *spec = item->spec;
1300 const struct rte_flow_item_mark *mask = item->mask;
1301 const struct rte_flow_item_mark nic_mask = {
1302 .id = priv->sh->dv_mark_mask,
1306 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1307 return rte_flow_error_set(error, ENOTSUP,
1308 RTE_FLOW_ERROR_TYPE_ITEM, item,
1309 "extended metadata feature"
1311 if (!mlx5_flow_ext_mreg_supported(dev))
1312 return rte_flow_error_set(error, ENOTSUP,
1313 RTE_FLOW_ERROR_TYPE_ITEM, item,
1314 "extended metadata register"
1315 " isn't supported");
1317 return rte_flow_error_set(error, ENOTSUP,
1318 RTE_FLOW_ERROR_TYPE_ITEM, item,
1319 "extended metadata register"
1320 " isn't available");
1321 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1325 return rte_flow_error_set(error, EINVAL,
1326 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1328 "data cannot be empty");
1329 if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1330 return rte_flow_error_set(error, EINVAL,
1331 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1333 "mark id exceeds the limit");
1336 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1337 (const uint8_t *)&nic_mask,
1338 sizeof(struct rte_flow_item_mark),
1346 * Validate META item.
1349 * Pointer to the rte_eth_dev structure.
1351 * Item specification.
1353 * Attributes of flow that includes this item.
1355 * Pointer to error structure.
1358 * 0 on success, a negative errno value otherwise and rte_errno is set.
1361 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1362 const struct rte_flow_item *item,
1363 const struct rte_flow_attr *attr,
1364 struct rte_flow_error *error)
1366 struct mlx5_priv *priv = dev->data->dev_private;
1367 struct mlx5_dev_config *config = &priv->config;
1368 const struct rte_flow_item_meta *spec = item->spec;
1369 const struct rte_flow_item_meta *mask = item->mask;
1370 struct rte_flow_item_meta nic_mask = {
1373 enum modify_reg reg;
1377 return rte_flow_error_set(error, EINVAL,
1378 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1380 "data cannot be empty");
1382 return rte_flow_error_set(error, EINVAL,
1383 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1384 "data cannot be zero");
1385 if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1386 if (!mlx5_flow_ext_mreg_supported(dev))
1387 return rte_flow_error_set(error, ENOTSUP,
1388 RTE_FLOW_ERROR_TYPE_ITEM, item,
1389 "extended metadata register"
1390 " isn't supported");
1391 reg = flow_dv_get_metadata_reg(dev, attr, error);
1395 return rte_flow_error_set(error, ENOTSUP,
1396 RTE_FLOW_ERROR_TYPE_ITEM, item,
1400 nic_mask.data = priv->sh->dv_meta_mask;
1403 mask = &rte_flow_item_meta_mask;
1404 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1405 (const uint8_t *)&nic_mask,
1406 sizeof(struct rte_flow_item_meta),
1412 * Validate TAG item.
1415 * Pointer to the rte_eth_dev structure.
1417 * Item specification.
1419 * Attributes of flow that includes this item.
1421 * Pointer to error structure.
1424 * 0 on success, a negative errno value otherwise and rte_errno is set.
1427 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
1428 const struct rte_flow_item *item,
1429 const struct rte_flow_attr *attr __rte_unused,
1430 struct rte_flow_error *error)
1432 const struct rte_flow_item_tag *spec = item->spec;
1433 const struct rte_flow_item_tag *mask = item->mask;
1434 const struct rte_flow_item_tag nic_mask = {
1435 .data = RTE_BE32(UINT32_MAX),
1440 if (!mlx5_flow_ext_mreg_supported(dev))
1441 return rte_flow_error_set(error, ENOTSUP,
1442 RTE_FLOW_ERROR_TYPE_ITEM, item,
1443 "extensive metadata register"
1444 " isn't supported");
1446 return rte_flow_error_set(error, EINVAL,
1447 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1449 "data cannot be empty");
1451 mask = &rte_flow_item_tag_mask;
1452 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1453 (const uint8_t *)&nic_mask,
1454 sizeof(struct rte_flow_item_tag),
1458 if (mask->index != 0xff)
1459 return rte_flow_error_set(error, EINVAL,
1460 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1461 "partial mask for tag index"
1462 " is not supported");
1463 ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
1466 assert(ret != REG_NONE);
1471 * Validate vport item.
1474 * Pointer to the rte_eth_dev structure.
1476 * Item specification.
1478 * Attributes of flow that includes this item.
1479 * @param[in] item_flags
1480 * Bit-fields that holds the items detected until now.
1482 * Pointer to error structure.
1485 * 0 on success, a negative errno value otherwise and rte_errno is set.
1488 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
1489 const struct rte_flow_item *item,
1490 const struct rte_flow_attr *attr,
1491 uint64_t item_flags,
1492 struct rte_flow_error *error)
1494 const struct rte_flow_item_port_id *spec = item->spec;
1495 const struct rte_flow_item_port_id *mask = item->mask;
1496 const struct rte_flow_item_port_id switch_mask = {
1499 struct mlx5_priv *esw_priv;
1500 struct mlx5_priv *dev_priv;
1503 if (!attr->transfer)
1504 return rte_flow_error_set(error, EINVAL,
1505 RTE_FLOW_ERROR_TYPE_ITEM,
1507 "match on port id is valid only"
1508 " when transfer flag is enabled");
1509 if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
1510 return rte_flow_error_set(error, ENOTSUP,
1511 RTE_FLOW_ERROR_TYPE_ITEM, item,
1512 "multiple source ports are not"
1515 mask = &switch_mask;
1516 if (mask->id != 0xffffffff)
1517 return rte_flow_error_set(error, ENOTSUP,
1518 RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1520 "no support for partial mask on"
1522 ret = mlx5_flow_item_acceptable
1523 (item, (const uint8_t *)mask,
1524 (const uint8_t *)&rte_flow_item_port_id_mask,
1525 sizeof(struct rte_flow_item_port_id),
1531 esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
1533 return rte_flow_error_set(error, rte_errno,
1534 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1535 "failed to obtain E-Switch info for"
1537 dev_priv = mlx5_dev_to_eswitch_info(dev);
1539 return rte_flow_error_set(error, rte_errno,
1540 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1542 "failed to obtain E-Switch info");
1543 if (esw_priv->domain_id != dev_priv->domain_id)
1544 return rte_flow_error_set(error, EINVAL,
1545 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1546 "cannot match on a port from a"
1547 " different E-Switch");
1552 * Validate GTP item.
1555 * Pointer to the rte_eth_dev structure.
1557 * Item specification.
1558 * @param[in] item_flags
1559 * Bit-fields that holds the items detected until now.
1561 * Pointer to error structure.
1564 * 0 on success, a negative errno value otherwise and rte_errno is set.
1567 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
1568 const struct rte_flow_item *item,
1569 uint64_t item_flags,
1570 struct rte_flow_error *error)
1572 struct mlx5_priv *priv = dev->data->dev_private;
1573 const struct rte_flow_item_gtp *mask = item->mask;
1574 const struct rte_flow_item_gtp nic_mask = {
1576 .teid = RTE_BE32(0xffffffff),
1579 if (!priv->config.hca_attr.tunnel_stateless_gtp)
1580 return rte_flow_error_set(error, ENOTSUP,
1581 RTE_FLOW_ERROR_TYPE_ITEM, item,
1582 "GTP support is not enabled");
1583 if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1584 return rte_flow_error_set(error, ENOTSUP,
1585 RTE_FLOW_ERROR_TYPE_ITEM, item,
1586 "multiple tunnel layers not"
1588 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1589 return rte_flow_error_set(error, EINVAL,
1590 RTE_FLOW_ERROR_TYPE_ITEM, item,
1591 "no outer UDP layer found");
1593 mask = &rte_flow_item_gtp_mask;
1594 return mlx5_flow_item_acceptable
1595 (item, (const uint8_t *)mask,
1596 (const uint8_t *)&nic_mask,
1597 sizeof(struct rte_flow_item_gtp),
1602 * Validate the pop VLAN action.
1605 * Pointer to the rte_eth_dev structure.
1606 * @param[in] action_flags
1607 * Holds the actions detected until now.
1609 * Pointer to the pop vlan action.
1610 * @param[in] item_flags
1611 * The items found in this flow rule.
1613 * Pointer to flow attributes.
1615 * Pointer to error structure.
1618 * 0 on success, a negative errno value otherwise and rte_errno is set.
1621 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
1622 uint64_t action_flags,
1623 const struct rte_flow_action *action,
1624 uint64_t item_flags,
1625 const struct rte_flow_attr *attr,
1626 struct rte_flow_error *error)
1628 struct mlx5_priv *priv = dev->data->dev_private;
1632 if (!priv->sh->pop_vlan_action)
1633 return rte_flow_error_set(error, ENOTSUP,
1634 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1636 "pop vlan action is not supported");
1638 * Check for inconsistencies:
1639 * fail strip_vlan in a flow that matches packets without VLAN tags.
1640 * fail strip_vlan in a flow that matches packets without explicitly a
1641 * matching on VLAN tag ?
1643 if (action_flags & MLX5_FLOW_ACTION_OF_POP_VLAN)
1644 return rte_flow_error_set(error, ENOTSUP,
1645 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1647 "no support for multiple vlan pop "
1649 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
1650 return rte_flow_error_set(error, ENOTSUP,
1651 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1653 "cannot pop vlan without a "
1654 "match on (outer) vlan in the flow");
1655 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1656 return rte_flow_error_set(error, EINVAL,
1657 RTE_FLOW_ERROR_TYPE_ACTION, action,
1658 "wrong action order, port_id should "
1659 "be after pop VLAN action");
1664 * Get VLAN default info from vlan match info.
1667 * Pointer to the rte_eth_dev structure.
1669 * the list of item specifications.
1671 * pointer VLAN info to fill to.
1673 * Pointer to error structure.
1676 * 0 on success, a negative errno value otherwise and rte_errno is set.
1679 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
1680 struct rte_vlan_hdr *vlan)
1682 const struct rte_flow_item_vlan nic_mask = {
1683 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
1684 MLX5DV_FLOW_VLAN_VID_MASK),
1685 .inner_type = RTE_BE16(0xffff),
1690 for (; items->type != RTE_FLOW_ITEM_TYPE_END &&
1691 items->type != RTE_FLOW_ITEM_TYPE_VLAN; items++)
1693 if (items->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1694 const struct rte_flow_item_vlan *vlan_m = items->mask;
1695 const struct rte_flow_item_vlan *vlan_v = items->spec;
1699 /* Only full match values are accepted */
1700 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
1701 MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
1702 vlan->vlan_tci &= MLX5DV_FLOW_VLAN_PCP_MASK;
1704 rte_be_to_cpu_16(vlan_v->tci &
1705 MLX5DV_FLOW_VLAN_PCP_MASK_BE);
1707 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
1708 MLX5DV_FLOW_VLAN_VID_MASK_BE) {
1709 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
1711 rte_be_to_cpu_16(vlan_v->tci &
1712 MLX5DV_FLOW_VLAN_VID_MASK_BE);
1714 if (vlan_m->inner_type == nic_mask.inner_type)
1715 vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
1716 vlan_m->inner_type);
1721 * Validate the push VLAN action.
1723 * @param[in] action_flags
1724 * Holds the actions detected until now.
1726 * Pointer to the encap action.
1728 * Pointer to flow attributes
1730 * Pointer to error structure.
1733 * 0 on success, a negative errno value otherwise and rte_errno is set.
1736 flow_dv_validate_action_push_vlan(uint64_t action_flags,
1737 uint64_t item_flags __rte_unused,
1738 const struct rte_flow_action *action,
1739 const struct rte_flow_attr *attr,
1740 struct rte_flow_error *error)
1742 const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
1744 if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
1745 push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
1746 return rte_flow_error_set(error, EINVAL,
1747 RTE_FLOW_ERROR_TYPE_ACTION, action,
1748 "invalid vlan ethertype");
1749 if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
1750 return rte_flow_error_set(error, ENOTSUP,
1751 RTE_FLOW_ERROR_TYPE_ACTION, action,
1752 "no support for multiple VLAN "
1754 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1755 return rte_flow_error_set(error, EINVAL,
1756 RTE_FLOW_ERROR_TYPE_ACTION, action,
1757 "wrong action order, port_id should "
1758 "be after push VLAN");
1764 * Validate the set VLAN PCP.
1766 * @param[in] action_flags
1767 * Holds the actions detected until now.
1768 * @param[in] actions
1769 * Pointer to the list of actions remaining in the flow rule.
1771 * Pointer to flow attributes
1773 * Pointer to error structure.
1776 * 0 on success, a negative errno value otherwise and rte_errno is set.
1779 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
1780 const struct rte_flow_action actions[],
1781 struct rte_flow_error *error)
1783 const struct rte_flow_action *action = actions;
1784 const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
1786 if (conf->vlan_pcp > 7)
1787 return rte_flow_error_set(error, EINVAL,
1788 RTE_FLOW_ERROR_TYPE_ACTION, action,
1789 "VLAN PCP value is too big");
1790 if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
1791 return rte_flow_error_set(error, ENOTSUP,
1792 RTE_FLOW_ERROR_TYPE_ACTION, action,
1793 "set VLAN PCP action must follow "
1794 "the push VLAN action");
1795 if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
1796 return rte_flow_error_set(error, ENOTSUP,
1797 RTE_FLOW_ERROR_TYPE_ACTION, action,
1798 "Multiple VLAN PCP modification are "
1800 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1801 return rte_flow_error_set(error, EINVAL,
1802 RTE_FLOW_ERROR_TYPE_ACTION, action,
1803 "wrong action order, port_id should "
1804 "be after set VLAN PCP");
1809 * Validate the set VLAN VID.
1811 * @param[in] item_flags
1812 * Holds the items detected in this rule.
1813 * @param[in] actions
1814 * Pointer to the list of actions remaining in the flow rule.
1816 * Pointer to flow attributes
1818 * Pointer to error structure.
1821 * 0 on success, a negative errno value otherwise and rte_errno is set.
1824 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
1825 uint64_t action_flags,
1826 const struct rte_flow_action actions[],
1827 struct rte_flow_error *error)
1829 const struct rte_flow_action *action = actions;
1830 const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
1832 if (conf->vlan_vid > RTE_BE16(0xFFE))
1833 return rte_flow_error_set(error, EINVAL,
1834 RTE_FLOW_ERROR_TYPE_ACTION, action,
1835 "VLAN VID value is too big");
1836 /* there is an of_push_vlan action before us */
1837 if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) {
1838 if (mlx5_flow_find_action(actions + 1,
1839 RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID))
1840 return rte_flow_error_set(error, ENOTSUP,
1841 RTE_FLOW_ERROR_TYPE_ACTION, action,
1842 "Multiple VLAN VID modifications are "
1849 * Action is on an existing VLAN header:
1850 * Need to verify this is a single modify CID action.
1851 * Rule mast include a match on outer VLAN.
1853 if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
1854 return rte_flow_error_set(error, ENOTSUP,
1855 RTE_FLOW_ERROR_TYPE_ACTION, action,
1856 "Multiple VLAN VID modifications are "
1858 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
1859 return rte_flow_error_set(error, EINVAL,
1860 RTE_FLOW_ERROR_TYPE_ACTION, action,
1861 "match on VLAN is required in order "
1863 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1864 return rte_flow_error_set(error, EINVAL,
1865 RTE_FLOW_ERROR_TYPE_ACTION, action,
1866 "wrong action order, port_id should "
1867 "be after set VLAN VID");
1872 * Validate the FLAG action.
1875 * Pointer to the rte_eth_dev structure.
1876 * @param[in] action_flags
1877 * Holds the actions detected until now.
1879 * Pointer to flow attributes
1881 * Pointer to error structure.
1884 * 0 on success, a negative errno value otherwise and rte_errno is set.
1887 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
1888 uint64_t action_flags,
1889 const struct rte_flow_attr *attr,
1890 struct rte_flow_error *error)
1892 struct mlx5_priv *priv = dev->data->dev_private;
1893 struct mlx5_dev_config *config = &priv->config;
1896 /* Fall back if no extended metadata register support. */
1897 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1898 return mlx5_flow_validate_action_flag(action_flags, attr,
1900 /* Extensive metadata mode requires registers. */
1901 if (!mlx5_flow_ext_mreg_supported(dev))
1902 return rte_flow_error_set(error, ENOTSUP,
1903 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1904 "no metadata registers "
1905 "to support flag action");
1906 if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
1907 return rte_flow_error_set(error, ENOTSUP,
1908 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1909 "extended metadata register"
1910 " isn't available");
1911 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1915 if (action_flags & MLX5_FLOW_ACTION_MARK)
1916 return rte_flow_error_set(error, EINVAL,
1917 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1918 "can't mark and flag in same flow");
1919 if (action_flags & MLX5_FLOW_ACTION_FLAG)
1920 return rte_flow_error_set(error, EINVAL,
1921 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1923 " actions in same flow");
1928 * Validate MARK action.
1931 * Pointer to the rte_eth_dev structure.
1933 * Pointer to action.
1934 * @param[in] action_flags
1935 * Holds the actions detected until now.
1937 * Pointer to flow attributes
1939 * Pointer to error structure.
1942 * 0 on success, a negative errno value otherwise and rte_errno is set.
1945 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
1946 const struct rte_flow_action *action,
1947 uint64_t action_flags,
1948 const struct rte_flow_attr *attr,
1949 struct rte_flow_error *error)
1951 struct mlx5_priv *priv = dev->data->dev_private;
1952 struct mlx5_dev_config *config = &priv->config;
1953 const struct rte_flow_action_mark *mark = action->conf;
1956 /* Fall back if no extended metadata register support. */
1957 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1958 return mlx5_flow_validate_action_mark(action, action_flags,
1960 /* Extensive metadata mode requires registers. */
1961 if (!mlx5_flow_ext_mreg_supported(dev))
1962 return rte_flow_error_set(error, ENOTSUP,
1963 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1964 "no metadata registers "
1965 "to support mark action");
1966 if (!priv->sh->dv_mark_mask)
1967 return rte_flow_error_set(error, ENOTSUP,
1968 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1969 "extended metadata register"
1970 " isn't available");
1971 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1976 return rte_flow_error_set(error, EINVAL,
1977 RTE_FLOW_ERROR_TYPE_ACTION, action,
1978 "configuration cannot be null");
1979 if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
1980 return rte_flow_error_set(error, EINVAL,
1981 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1983 "mark id exceeds the limit");
1984 if (action_flags & MLX5_FLOW_ACTION_FLAG)
1985 return rte_flow_error_set(error, EINVAL,
1986 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1987 "can't flag and mark in same flow");
1988 if (action_flags & MLX5_FLOW_ACTION_MARK)
1989 return rte_flow_error_set(error, EINVAL,
1990 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1991 "can't have 2 mark actions in same"
1997 * Validate SET_META action.
2000 * Pointer to the rte_eth_dev structure.
2002 * Pointer to the encap action.
2003 * @param[in] action_flags
2004 * Holds the actions detected until now.
2006 * Pointer to flow attributes
2008 * Pointer to error structure.
2011 * 0 on success, a negative errno value otherwise and rte_errno is set.
2014 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
2015 const struct rte_flow_action *action,
2016 uint64_t action_flags __rte_unused,
2017 const struct rte_flow_attr *attr,
2018 struct rte_flow_error *error)
2020 const struct rte_flow_action_set_meta *conf;
2021 uint32_t nic_mask = UINT32_MAX;
2022 enum modify_reg reg;
2024 if (!mlx5_flow_ext_mreg_supported(dev))
2025 return rte_flow_error_set(error, ENOTSUP,
2026 RTE_FLOW_ERROR_TYPE_ACTION, action,
2027 "extended metadata register"
2028 " isn't supported");
2029 reg = flow_dv_get_metadata_reg(dev, attr, error);
2032 if (reg != REG_A && reg != REG_B) {
2033 struct mlx5_priv *priv = dev->data->dev_private;
2035 nic_mask = priv->sh->dv_meta_mask;
2037 if (!(action->conf))
2038 return rte_flow_error_set(error, EINVAL,
2039 RTE_FLOW_ERROR_TYPE_ACTION, action,
2040 "configuration cannot be null");
2041 conf = (const struct rte_flow_action_set_meta *)action->conf;
2043 return rte_flow_error_set(error, EINVAL,
2044 RTE_FLOW_ERROR_TYPE_ACTION, action,
2045 "zero mask doesn't have any effect");
2046 if (conf->mask & ~nic_mask)
2047 return rte_flow_error_set(error, EINVAL,
2048 RTE_FLOW_ERROR_TYPE_ACTION, action,
2049 "meta data must be within reg C0");
2050 if (!(conf->data & conf->mask))
2051 return rte_flow_error_set(error, EINVAL,
2052 RTE_FLOW_ERROR_TYPE_ACTION, action,
2053 "zero value has no effect");
2058 * Validate SET_TAG action.
2061 * Pointer to the rte_eth_dev structure.
2063 * Pointer to the encap action.
2064 * @param[in] action_flags
2065 * Holds the actions detected until now.
2067 * Pointer to flow attributes
2069 * Pointer to error structure.
2072 * 0 on success, a negative errno value otherwise and rte_errno is set.
2075 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
2076 const struct rte_flow_action *action,
2077 uint64_t action_flags,
2078 const struct rte_flow_attr *attr,
2079 struct rte_flow_error *error)
2081 const struct rte_flow_action_set_tag *conf;
2082 const uint64_t terminal_action_flags =
2083 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
2084 MLX5_FLOW_ACTION_RSS;
2087 if (!mlx5_flow_ext_mreg_supported(dev))
2088 return rte_flow_error_set(error, ENOTSUP,
2089 RTE_FLOW_ERROR_TYPE_ACTION, action,
2090 "extensive metadata register"
2091 " isn't supported");
2092 if (!(action->conf))
2093 return rte_flow_error_set(error, EINVAL,
2094 RTE_FLOW_ERROR_TYPE_ACTION, action,
2095 "configuration cannot be null");
2096 conf = (const struct rte_flow_action_set_tag *)action->conf;
2098 return rte_flow_error_set(error, EINVAL,
2099 RTE_FLOW_ERROR_TYPE_ACTION, action,
2100 "zero mask doesn't have any effect");
2101 ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
2104 if (!attr->transfer && attr->ingress &&
2105 (action_flags & terminal_action_flags))
2106 return rte_flow_error_set(error, EINVAL,
2107 RTE_FLOW_ERROR_TYPE_ACTION, action,
2108 "set_tag has no effect"
2109 " with terminal actions");
2114 * Validate count action.
2119 * Pointer to error structure.
2122 * 0 on success, a negative errno value otherwise and rte_errno is set.
2125 flow_dv_validate_action_count(struct rte_eth_dev *dev,
2126 struct rte_flow_error *error)
2128 struct mlx5_priv *priv = dev->data->dev_private;
2130 if (!priv->config.devx)
2132 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
2136 return rte_flow_error_set
2138 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2140 "count action not supported");
2144 * Validate the L2 encap action.
2146 * @param[in] action_flags
2147 * Holds the actions detected until now.
2149 * Pointer to the encap action.
2151 * Pointer to flow attributes
2153 * Pointer to error structure.
2156 * 0 on success, a negative errno value otherwise and rte_errno is set.
2159 flow_dv_validate_action_l2_encap(uint64_t action_flags,
2160 const struct rte_flow_action *action,
2161 const struct rte_flow_attr *attr,
2162 struct rte_flow_error *error)
2164 if (!(action->conf))
2165 return rte_flow_error_set(error, EINVAL,
2166 RTE_FLOW_ERROR_TYPE_ACTION, action,
2167 "configuration cannot be null");
2168 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
2169 return rte_flow_error_set(error, EINVAL,
2170 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2171 "can only have a single encap or"
2172 " decap action in a flow");
2173 if (!attr->transfer && attr->ingress)
2174 return rte_flow_error_set(error, ENOTSUP,
2175 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2177 "encap action not supported for "
2183 * Validate the L2 decap action.
2185 * @param[in] action_flags
2186 * Holds the actions detected until now.
2188 * Pointer to flow attributes
2190 * Pointer to error structure.
2193 * 0 on success, a negative errno value otherwise and rte_errno is set.
2196 flow_dv_validate_action_l2_decap(uint64_t action_flags,
2197 const struct rte_flow_attr *attr,
2198 struct rte_flow_error *error)
2200 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
2201 return rte_flow_error_set(error, EINVAL,
2202 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2203 "can only have a single encap or"
2204 " decap action in a flow");
2205 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
2206 return rte_flow_error_set(error, EINVAL,
2207 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2208 "can't have decap action after"
2211 return rte_flow_error_set(error, ENOTSUP,
2212 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2214 "decap action not supported for "
2220 * Validate the raw encap action.
2222 * @param[in] action_flags
2223 * Holds the actions detected until now.
2225 * Pointer to the encap action.
2227 * Pointer to flow attributes
2229 * Pointer to error structure.
2232 * 0 on success, a negative errno value otherwise and rte_errno is set.
2235 flow_dv_validate_action_raw_encap(uint64_t action_flags,
2236 const struct rte_flow_action *action,
2237 const struct rte_flow_attr *attr,
2238 struct rte_flow_error *error)
2240 const struct rte_flow_action_raw_encap *raw_encap =
2241 (const struct rte_flow_action_raw_encap *)action->conf;
2242 if (!(action->conf))
2243 return rte_flow_error_set(error, EINVAL,
2244 RTE_FLOW_ERROR_TYPE_ACTION, action,
2245 "configuration cannot be null");
2246 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
2247 return rte_flow_error_set(error, EINVAL,
2248 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2249 "can only have a single encap"
2250 " action in a flow");
2251 /* encap without preceding decap is not supported for ingress */
2252 if (!attr->transfer && attr->ingress &&
2253 !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
2254 return rte_flow_error_set(error, ENOTSUP,
2255 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2257 "encap action not supported for "
2259 if (!raw_encap->size || !raw_encap->data)
2260 return rte_flow_error_set(error, EINVAL,
2261 RTE_FLOW_ERROR_TYPE_ACTION, action,
2262 "raw encap data cannot be empty");
2267 * Validate the raw decap action.
2269 * @param[in] action_flags
2270 * Holds the actions detected until now.
2272 * Pointer to the encap action.
2274 * Pointer to flow attributes
2276 * Pointer to error structure.
2279 * 0 on success, a negative errno value otherwise and rte_errno is set.
2282 flow_dv_validate_action_raw_decap(uint64_t action_flags,
2283 const struct rte_flow_action *action,
2284 const struct rte_flow_attr *attr,
2285 struct rte_flow_error *error)
2287 const struct rte_flow_action_raw_decap *decap = action->conf;
2289 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
2290 return rte_flow_error_set(error, EINVAL,
2291 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2292 "can't have encap action before"
2294 if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
2295 return rte_flow_error_set(error, EINVAL,
2296 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2297 "can only have a single decap"
2298 " action in a flow");
2299 /* decap action is valid on egress only if it is followed by encap */
2300 if (attr->egress && decap &&
2301 decap->size > MLX5_ENCAPSULATION_DECISION_SIZE) {
2302 return rte_flow_error_set(error, ENOTSUP,
2303 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2304 NULL, "decap action not supported"
2306 } else if (decap && decap->size > MLX5_ENCAPSULATION_DECISION_SIZE &&
2307 (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) {
2308 return rte_flow_error_set(error, EINVAL,
2309 RTE_FLOW_ERROR_TYPE_ACTION,
2311 "can't have decap action "
2312 "after modify action");
2318 * Find existing encap/decap resource or create and register a new one.
2320 * @param[in, out] dev
2321 * Pointer to rte_eth_dev structure.
2322 * @param[in, out] resource
2323 * Pointer to encap/decap resource.
2324 * @parm[in, out] dev_flow
2325 * Pointer to the dev_flow.
2327 * pointer to error structure.
2330 * 0 on success otherwise -errno and errno is set.
2333 flow_dv_encap_decap_resource_register
2334 (struct rte_eth_dev *dev,
2335 struct mlx5_flow_dv_encap_decap_resource *resource,
2336 struct mlx5_flow *dev_flow,
2337 struct rte_flow_error *error)
2339 struct mlx5_priv *priv = dev->data->dev_private;
2340 struct mlx5_ibv_shared *sh = priv->sh;
2341 struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2342 struct mlx5dv_dr_domain *domain;
2344 resource->flags = dev_flow->group ? 0 : 1;
2345 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2346 domain = sh->fdb_domain;
2347 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
2348 domain = sh->rx_domain;
2350 domain = sh->tx_domain;
2351 /* Lookup a matching resource from cache. */
2352 LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
2353 if (resource->reformat_type == cache_resource->reformat_type &&
2354 resource->ft_type == cache_resource->ft_type &&
2355 resource->flags == cache_resource->flags &&
2356 resource->size == cache_resource->size &&
2357 !memcmp((const void *)resource->buf,
2358 (const void *)cache_resource->buf,
2360 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
2361 (void *)cache_resource,
2362 rte_atomic32_read(&cache_resource->refcnt));
2363 rte_atomic32_inc(&cache_resource->refcnt);
2364 dev_flow->dv.encap_decap = cache_resource;
2368 /* Register new encap/decap resource. */
2369 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
2370 if (!cache_resource)
2371 return rte_flow_error_set(error, ENOMEM,
2372 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2373 "cannot allocate resource memory");
2374 *cache_resource = *resource;
2375 cache_resource->verbs_action =
2376 mlx5_glue->dv_create_flow_action_packet_reformat
2377 (sh->ctx, cache_resource->reformat_type,
2378 cache_resource->ft_type, domain, cache_resource->flags,
2379 cache_resource->size,
2380 (cache_resource->size ? cache_resource->buf : NULL));
2381 if (!cache_resource->verbs_action) {
2382 rte_free(cache_resource);
2383 return rte_flow_error_set(error, ENOMEM,
2384 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2385 NULL, "cannot create action");
2387 rte_atomic32_init(&cache_resource->refcnt);
2388 rte_atomic32_inc(&cache_resource->refcnt);
2389 LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
2390 dev_flow->dv.encap_decap = cache_resource;
2391 DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
2392 (void *)cache_resource,
2393 rte_atomic32_read(&cache_resource->refcnt));
2398 * Find existing table jump resource or create and register a new one.
2400 * @param[in, out] dev
2401 * Pointer to rte_eth_dev structure.
2402 * @param[in, out] tbl
2403 * Pointer to flow table resource.
2404 * @parm[in, out] dev_flow
2405 * Pointer to the dev_flow.
2407 * pointer to error structure.
2410 * 0 on success otherwise -errno and errno is set.
2413 flow_dv_jump_tbl_resource_register
2414 (struct rte_eth_dev *dev __rte_unused,
2415 struct mlx5_flow_tbl_resource *tbl,
2416 struct mlx5_flow *dev_flow,
2417 struct rte_flow_error *error)
2419 struct mlx5_flow_tbl_data_entry *tbl_data =
2420 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
2424 cnt = rte_atomic32_read(&tbl_data->jump.refcnt);
2426 tbl_data->jump.action =
2427 mlx5_glue->dr_create_flow_action_dest_flow_tbl
2429 if (!tbl_data->jump.action)
2430 return rte_flow_error_set(error, ENOMEM,
2431 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2432 NULL, "cannot create jump action");
2433 DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++",
2434 (void *)&tbl_data->jump, cnt);
2436 assert(tbl_data->jump.action);
2437 DRV_LOG(DEBUG, "existed jump table resource %p: refcnt %d++",
2438 (void *)&tbl_data->jump, cnt);
2440 rte_atomic32_inc(&tbl_data->jump.refcnt);
2441 dev_flow->dv.jump = &tbl_data->jump;
2446 * Find existing table port ID resource or create and register a new one.
2448 * @param[in, out] dev
2449 * Pointer to rte_eth_dev structure.
2450 * @param[in, out] resource
2451 * Pointer to port ID action resource.
2452 * @parm[in, out] dev_flow
2453 * Pointer to the dev_flow.
2455 * pointer to error structure.
2458 * 0 on success otherwise -errno and errno is set.
2461 flow_dv_port_id_action_resource_register
2462 (struct rte_eth_dev *dev,
2463 struct mlx5_flow_dv_port_id_action_resource *resource,
2464 struct mlx5_flow *dev_flow,
2465 struct rte_flow_error *error)
2467 struct mlx5_priv *priv = dev->data->dev_private;
2468 struct mlx5_ibv_shared *sh = priv->sh;
2469 struct mlx5_flow_dv_port_id_action_resource *cache_resource;
2471 /* Lookup a matching resource from cache. */
2472 LIST_FOREACH(cache_resource, &sh->port_id_action_list, next) {
2473 if (resource->port_id == cache_resource->port_id) {
2474 DRV_LOG(DEBUG, "port id action resource resource %p: "
2476 (void *)cache_resource,
2477 rte_atomic32_read(&cache_resource->refcnt));
2478 rte_atomic32_inc(&cache_resource->refcnt);
2479 dev_flow->dv.port_id_action = cache_resource;
2483 /* Register new port id action resource. */
2484 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
2485 if (!cache_resource)
2486 return rte_flow_error_set(error, ENOMEM,
2487 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2488 "cannot allocate resource memory");
2489 *cache_resource = *resource;
2491 * Depending on rdma_core version the glue routine calls
2492 * either mlx5dv_dr_action_create_dest_ib_port(domain, ibv_port)
2493 * or mlx5dv_dr_action_create_dest_vport(domain, vport_id).
2495 cache_resource->action =
2496 mlx5_glue->dr_create_flow_action_dest_port
2497 (priv->sh->fdb_domain, resource->port_id);
2498 if (!cache_resource->action) {
2499 rte_free(cache_resource);
2500 return rte_flow_error_set(error, ENOMEM,
2501 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2502 NULL, "cannot create action");
2504 rte_atomic32_init(&cache_resource->refcnt);
2505 rte_atomic32_inc(&cache_resource->refcnt);
2506 LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
2507 dev_flow->dv.port_id_action = cache_resource;
2508 DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
2509 (void *)cache_resource,
2510 rte_atomic32_read(&cache_resource->refcnt));
2515 * Find existing push vlan resource or create and register a new one.
2517 * @param [in, out] dev
2518 * Pointer to rte_eth_dev structure.
2519 * @param[in, out] resource
2520 * Pointer to port ID action resource.
2521 * @parm[in, out] dev_flow
2522 * Pointer to the dev_flow.
2524 * pointer to error structure.
2527 * 0 on success otherwise -errno and errno is set.
2530 flow_dv_push_vlan_action_resource_register
2531 (struct rte_eth_dev *dev,
2532 struct mlx5_flow_dv_push_vlan_action_resource *resource,
2533 struct mlx5_flow *dev_flow,
2534 struct rte_flow_error *error)
2536 struct mlx5_priv *priv = dev->data->dev_private;
2537 struct mlx5_ibv_shared *sh = priv->sh;
2538 struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
2539 struct mlx5dv_dr_domain *domain;
2541 /* Lookup a matching resource from cache. */
2542 LIST_FOREACH(cache_resource, &sh->push_vlan_action_list, next) {
2543 if (resource->vlan_tag == cache_resource->vlan_tag &&
2544 resource->ft_type == cache_resource->ft_type) {
2545 DRV_LOG(DEBUG, "push-VLAN action resource resource %p: "
2547 (void *)cache_resource,
2548 rte_atomic32_read(&cache_resource->refcnt));
2549 rte_atomic32_inc(&cache_resource->refcnt);
2550 dev_flow->dv.push_vlan_res = cache_resource;
2554 /* Register new push_vlan action resource. */
2555 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
2556 if (!cache_resource)
2557 return rte_flow_error_set(error, ENOMEM,
2558 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2559 "cannot allocate resource memory");
2560 *cache_resource = *resource;
2561 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2562 domain = sh->fdb_domain;
2563 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
2564 domain = sh->rx_domain;
2566 domain = sh->tx_domain;
2567 cache_resource->action =
2568 mlx5_glue->dr_create_flow_action_push_vlan(domain,
2569 resource->vlan_tag);
2570 if (!cache_resource->action) {
2571 rte_free(cache_resource);
2572 return rte_flow_error_set(error, ENOMEM,
2573 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2574 NULL, "cannot create action");
2576 rte_atomic32_init(&cache_resource->refcnt);
2577 rte_atomic32_inc(&cache_resource->refcnt);
2578 LIST_INSERT_HEAD(&sh->push_vlan_action_list, cache_resource, next);
2579 dev_flow->dv.push_vlan_res = cache_resource;
2580 DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
2581 (void *)cache_resource,
2582 rte_atomic32_read(&cache_resource->refcnt));
2586 * Get the size of specific rte_flow_item_type
2588 * @param[in] item_type
2589 * Tested rte_flow_item_type.
2592 * sizeof struct item_type, 0 if void or irrelevant.
2595 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
2599 switch (item_type) {
2600 case RTE_FLOW_ITEM_TYPE_ETH:
2601 retval = sizeof(struct rte_flow_item_eth);
2603 case RTE_FLOW_ITEM_TYPE_VLAN:
2604 retval = sizeof(struct rte_flow_item_vlan);
2606 case RTE_FLOW_ITEM_TYPE_IPV4:
2607 retval = sizeof(struct rte_flow_item_ipv4);
2609 case RTE_FLOW_ITEM_TYPE_IPV6:
2610 retval = sizeof(struct rte_flow_item_ipv6);
2612 case RTE_FLOW_ITEM_TYPE_UDP:
2613 retval = sizeof(struct rte_flow_item_udp);
2615 case RTE_FLOW_ITEM_TYPE_TCP:
2616 retval = sizeof(struct rte_flow_item_tcp);
2618 case RTE_FLOW_ITEM_TYPE_VXLAN:
2619 retval = sizeof(struct rte_flow_item_vxlan);
2621 case RTE_FLOW_ITEM_TYPE_GRE:
2622 retval = sizeof(struct rte_flow_item_gre);
2624 case RTE_FLOW_ITEM_TYPE_NVGRE:
2625 retval = sizeof(struct rte_flow_item_nvgre);
2627 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
2628 retval = sizeof(struct rte_flow_item_vxlan_gpe);
2630 case RTE_FLOW_ITEM_TYPE_MPLS:
2631 retval = sizeof(struct rte_flow_item_mpls);
2633 case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
2641 #define MLX5_ENCAP_IPV4_VERSION 0x40
2642 #define MLX5_ENCAP_IPV4_IHL_MIN 0x05
2643 #define MLX5_ENCAP_IPV4_TTL_DEF 0x40
2644 #define MLX5_ENCAP_IPV6_VTC_FLOW 0x60000000
2645 #define MLX5_ENCAP_IPV6_HOP_LIMIT 0xff
2646 #define MLX5_ENCAP_VXLAN_FLAGS 0x08000000
2647 #define MLX5_ENCAP_VXLAN_GPE_FLAGS 0x04
2650 * Convert the encap action data from list of rte_flow_item to raw buffer
2653 * Pointer to rte_flow_item objects list.
2655 * Pointer to the output buffer.
2657 * Pointer to the output buffer size.
2659 * Pointer to the error structure.
2662 * 0 on success, a negative errno value otherwise and rte_errno is set.
2665 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
2666 size_t *size, struct rte_flow_error *error)
2668 struct rte_ether_hdr *eth = NULL;
2669 struct rte_vlan_hdr *vlan = NULL;
2670 struct rte_ipv4_hdr *ipv4 = NULL;
2671 struct rte_ipv6_hdr *ipv6 = NULL;
2672 struct rte_udp_hdr *udp = NULL;
2673 struct rte_vxlan_hdr *vxlan = NULL;
2674 struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
2675 struct rte_gre_hdr *gre = NULL;
2677 size_t temp_size = 0;
2680 return rte_flow_error_set(error, EINVAL,
2681 RTE_FLOW_ERROR_TYPE_ACTION,
2682 NULL, "invalid empty data");
2683 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2684 len = flow_dv_get_item_len(items->type);
2685 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
2686 return rte_flow_error_set(error, EINVAL,
2687 RTE_FLOW_ERROR_TYPE_ACTION,
2688 (void *)items->type,
2689 "items total size is too big"
2690 " for encap action");
2691 rte_memcpy((void *)&buf[temp_size], items->spec, len);
2692 switch (items->type) {
2693 case RTE_FLOW_ITEM_TYPE_ETH:
2694 eth = (struct rte_ether_hdr *)&buf[temp_size];
2696 case RTE_FLOW_ITEM_TYPE_VLAN:
2697 vlan = (struct rte_vlan_hdr *)&buf[temp_size];
2699 return rte_flow_error_set(error, EINVAL,
2700 RTE_FLOW_ERROR_TYPE_ACTION,
2701 (void *)items->type,
2702 "eth header not found");
2703 if (!eth->ether_type)
2704 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
2706 case RTE_FLOW_ITEM_TYPE_IPV4:
2707 ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
2709 return rte_flow_error_set(error, EINVAL,
2710 RTE_FLOW_ERROR_TYPE_ACTION,
2711 (void *)items->type,
2712 "neither eth nor vlan"
2714 if (vlan && !vlan->eth_proto)
2715 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
2716 else if (eth && !eth->ether_type)
2717 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
2718 if (!ipv4->version_ihl)
2719 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
2720 MLX5_ENCAP_IPV4_IHL_MIN;
2721 if (!ipv4->time_to_live)
2722 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
2724 case RTE_FLOW_ITEM_TYPE_IPV6:
2725 ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
2727 return rte_flow_error_set(error, EINVAL,
2728 RTE_FLOW_ERROR_TYPE_ACTION,
2729 (void *)items->type,
2730 "neither eth nor vlan"
2732 if (vlan && !vlan->eth_proto)
2733 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
2734 else if (eth && !eth->ether_type)
2735 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
2736 if (!ipv6->vtc_flow)
2738 RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
2739 if (!ipv6->hop_limits)
2740 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
2742 case RTE_FLOW_ITEM_TYPE_UDP:
2743 udp = (struct rte_udp_hdr *)&buf[temp_size];
2745 return rte_flow_error_set(error, EINVAL,
2746 RTE_FLOW_ERROR_TYPE_ACTION,
2747 (void *)items->type,
2748 "ip header not found");
2749 if (ipv4 && !ipv4->next_proto_id)
2750 ipv4->next_proto_id = IPPROTO_UDP;
2751 else if (ipv6 && !ipv6->proto)
2752 ipv6->proto = IPPROTO_UDP;
2754 case RTE_FLOW_ITEM_TYPE_VXLAN:
2755 vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
2757 return rte_flow_error_set(error, EINVAL,
2758 RTE_FLOW_ERROR_TYPE_ACTION,
2759 (void *)items->type,
2760 "udp header not found");
2762 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
2763 if (!vxlan->vx_flags)
2765 RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
2767 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
2768 vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
2770 return rte_flow_error_set(error, EINVAL,
2771 RTE_FLOW_ERROR_TYPE_ACTION,
2772 (void *)items->type,
2773 "udp header not found");
2774 if (!vxlan_gpe->proto)
2775 return rte_flow_error_set(error, EINVAL,
2776 RTE_FLOW_ERROR_TYPE_ACTION,
2777 (void *)items->type,
2778 "next protocol not found");
2781 RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
2782 if (!vxlan_gpe->vx_flags)
2783 vxlan_gpe->vx_flags =
2784 MLX5_ENCAP_VXLAN_GPE_FLAGS;
2786 case RTE_FLOW_ITEM_TYPE_GRE:
2787 case RTE_FLOW_ITEM_TYPE_NVGRE:
2788 gre = (struct rte_gre_hdr *)&buf[temp_size];
2790 return rte_flow_error_set(error, EINVAL,
2791 RTE_FLOW_ERROR_TYPE_ACTION,
2792 (void *)items->type,
2793 "next protocol not found");
2795 return rte_flow_error_set(error, EINVAL,
2796 RTE_FLOW_ERROR_TYPE_ACTION,
2797 (void *)items->type,
2798 "ip header not found");
2799 if (ipv4 && !ipv4->next_proto_id)
2800 ipv4->next_proto_id = IPPROTO_GRE;
2801 else if (ipv6 && !ipv6->proto)
2802 ipv6->proto = IPPROTO_GRE;
2804 case RTE_FLOW_ITEM_TYPE_VOID:
2807 return rte_flow_error_set(error, EINVAL,
2808 RTE_FLOW_ERROR_TYPE_ACTION,
2809 (void *)items->type,
2810 "unsupported item type");
2820 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
2822 struct rte_ether_hdr *eth = NULL;
2823 struct rte_vlan_hdr *vlan = NULL;
2824 struct rte_ipv6_hdr *ipv6 = NULL;
2825 struct rte_udp_hdr *udp = NULL;
2829 eth = (struct rte_ether_hdr *)data;
2830 next_hdr = (char *)(eth + 1);
2831 proto = RTE_BE16(eth->ether_type);
2834 while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
2835 vlan = (struct rte_vlan_hdr *)next_hdr;
2836 proto = RTE_BE16(vlan->eth_proto);
2837 next_hdr += sizeof(struct rte_vlan_hdr);
2840 /* HW calculates IPv4 csum. no need to proceed */
2841 if (proto == RTE_ETHER_TYPE_IPV4)
2844 /* non IPv4/IPv6 header. not supported */
2845 if (proto != RTE_ETHER_TYPE_IPV6) {
2846 return rte_flow_error_set(error, ENOTSUP,
2847 RTE_FLOW_ERROR_TYPE_ACTION,
2848 NULL, "Cannot offload non IPv4/IPv6");
2851 ipv6 = (struct rte_ipv6_hdr *)next_hdr;
2853 /* ignore non UDP */
2854 if (ipv6->proto != IPPROTO_UDP)
2857 udp = (struct rte_udp_hdr *)(ipv6 + 1);
2858 udp->dgram_cksum = 0;
2864 * Convert L2 encap action to DV specification.
2867 * Pointer to rte_eth_dev structure.
2869 * Pointer to action structure.
2870 * @param[in, out] dev_flow
2871 * Pointer to the mlx5_flow.
2872 * @param[in] transfer
2873 * Mark if the flow is E-Switch flow.
2875 * Pointer to the error structure.
2878 * 0 on success, a negative errno value otherwise and rte_errno is set.
2881 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
2882 const struct rte_flow_action *action,
2883 struct mlx5_flow *dev_flow,
2885 struct rte_flow_error *error)
2887 const struct rte_flow_item *encap_data;
2888 const struct rte_flow_action_raw_encap *raw_encap_data;
2889 struct mlx5_flow_dv_encap_decap_resource res = {
2891 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
2892 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
2893 MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
2896 if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
2898 (const struct rte_flow_action_raw_encap *)action->conf;
2899 res.size = raw_encap_data->size;
2900 memcpy(res.buf, raw_encap_data->data, res.size);
2901 if (flow_dv_zero_encap_udp_csum(res.buf, error))
2904 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
2906 ((const struct rte_flow_action_vxlan_encap *)
2907 action->conf)->definition;
2910 ((const struct rte_flow_action_nvgre_encap *)
2911 action->conf)->definition;
2912 if (flow_dv_convert_encap_data(encap_data, res.buf,
2916 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
2917 return rte_flow_error_set(error, EINVAL,
2918 RTE_FLOW_ERROR_TYPE_ACTION,
2919 NULL, "can't create L2 encap action");
2924 * Convert L2 decap action to DV specification.
2927 * Pointer to rte_eth_dev structure.
2928 * @param[in, out] dev_flow
2929 * Pointer to the mlx5_flow.
2930 * @param[in] transfer
2931 * Mark if the flow is E-Switch flow.
2933 * Pointer to the error structure.
2936 * 0 on success, a negative errno value otherwise and rte_errno is set.
2939 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
2940 struct mlx5_flow *dev_flow,
2942 struct rte_flow_error *error)
2944 struct mlx5_flow_dv_encap_decap_resource res = {
2947 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
2948 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
2949 MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
2952 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
2953 return rte_flow_error_set(error, EINVAL,
2954 RTE_FLOW_ERROR_TYPE_ACTION,
2955 NULL, "can't create L2 decap action");
2960 * Convert raw decap/encap (L3 tunnel) action to DV specification.
2963 * Pointer to rte_eth_dev structure.
2965 * Pointer to action structure.
2966 * @param[in, out] dev_flow
2967 * Pointer to the mlx5_flow.
2969 * Pointer to the flow attributes.
2971 * Pointer to the error structure.
2974 * 0 on success, a negative errno value otherwise and rte_errno is set.
2977 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
2978 const struct rte_flow_action *action,
2979 struct mlx5_flow *dev_flow,
2980 const struct rte_flow_attr *attr,
2981 struct rte_flow_error *error)
2983 const struct rte_flow_action_raw_encap *encap_data;
2984 struct mlx5_flow_dv_encap_decap_resource res;
2986 encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
2987 res.size = encap_data->size;
2988 memcpy(res.buf, encap_data->data, res.size);
2989 res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
2990 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
2991 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
2993 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
2995 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
2996 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
2997 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
2998 return rte_flow_error_set(error, EINVAL,
2999 RTE_FLOW_ERROR_TYPE_ACTION,
3000 NULL, "can't create encap action");
3005 * Create action push VLAN.
3008 * Pointer to rte_eth_dev structure.
3009 * @param[in] vlan_tag
3010 * the vlan tag to push to the Ethernet header.
3011 * @param[in, out] dev_flow
3012 * Pointer to the mlx5_flow.
3014 * Pointer to the flow attributes.
3016 * Pointer to the error structure.
3019 * 0 on success, a negative errno value otherwise and rte_errno is set.
3022 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
3023 const struct rte_flow_attr *attr,
3024 const struct rte_vlan_hdr *vlan,
3025 struct mlx5_flow *dev_flow,
3026 struct rte_flow_error *error)
3028 struct mlx5_flow_dv_push_vlan_action_resource res;
3031 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
3034 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3036 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3037 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3038 return flow_dv_push_vlan_action_resource_register
3039 (dev, &res, dev_flow, error);
3043 * Validate the modify-header actions.
3045 * @param[in] action_flags
3046 * Holds the actions detected until now.
3048 * Pointer to the modify action.
3050 * Pointer to error structure.
3053 * 0 on success, a negative errno value otherwise and rte_errno is set.
3056 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
3057 const struct rte_flow_action *action,
3058 struct rte_flow_error *error)
3060 if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
3061 return rte_flow_error_set(error, EINVAL,
3062 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3063 NULL, "action configuration not set");
3064 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
3065 return rte_flow_error_set(error, EINVAL,
3066 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3067 "can't have encap action before"
3073 * Validate the modify-header MAC address actions.
3075 * @param[in] action_flags
3076 * Holds the actions detected until now.
3078 * Pointer to the modify action.
3079 * @param[in] item_flags
3080 * Holds the items detected.
3082 * Pointer to error structure.
3085 * 0 on success, a negative errno value otherwise and rte_errno is set.
3088 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
3089 const struct rte_flow_action *action,
3090 const uint64_t item_flags,
3091 struct rte_flow_error *error)
3095 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3097 if (!(item_flags & MLX5_FLOW_LAYER_L2))
3098 return rte_flow_error_set(error, EINVAL,
3099 RTE_FLOW_ERROR_TYPE_ACTION,
3101 "no L2 item in pattern");
3107 * Validate the modify-header IPv4 address actions.
3109 * @param[in] action_flags
3110 * Holds the actions detected until now.
3112 * Pointer to the modify action.
3113 * @param[in] item_flags
3114 * Holds the items detected.
3116 * Pointer to error structure.
3119 * 0 on success, a negative errno value otherwise and rte_errno is set.
3122 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
3123 const struct rte_flow_action *action,
3124 const uint64_t item_flags,
3125 struct rte_flow_error *error)
3129 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3131 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
3132 return rte_flow_error_set(error, EINVAL,
3133 RTE_FLOW_ERROR_TYPE_ACTION,
3135 "no ipv4 item in pattern");
3141 * Validate the modify-header IPv6 address actions.
3143 * @param[in] action_flags
3144 * Holds the actions detected until now.
3146 * Pointer to the modify action.
3147 * @param[in] item_flags
3148 * Holds the items detected.
3150 * Pointer to error structure.
3153 * 0 on success, a negative errno value otherwise and rte_errno is set.
3156 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
3157 const struct rte_flow_action *action,
3158 const uint64_t item_flags,
3159 struct rte_flow_error *error)
3163 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3165 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
3166 return rte_flow_error_set(error, EINVAL,
3167 RTE_FLOW_ERROR_TYPE_ACTION,
3169 "no ipv6 item in pattern");
3175 * Validate the modify-header TP actions.
3177 * @param[in] action_flags
3178 * Holds the actions detected until now.
3180 * Pointer to the modify action.
3181 * @param[in] item_flags
3182 * Holds the items detected.
3184 * Pointer to error structure.
3187 * 0 on success, a negative errno value otherwise and rte_errno is set.
3190 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
3191 const struct rte_flow_action *action,
3192 const uint64_t item_flags,
3193 struct rte_flow_error *error)
3197 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3199 if (!(item_flags & MLX5_FLOW_LAYER_L4))
3200 return rte_flow_error_set(error, EINVAL,
3201 RTE_FLOW_ERROR_TYPE_ACTION,
3202 NULL, "no transport layer "
3209 * Validate the modify-header actions of increment/decrement
3210 * TCP Sequence-number.
3212 * @param[in] action_flags
3213 * Holds the actions detected until now.
3215 * Pointer to the modify action.
3216 * @param[in] item_flags
3217 * Holds the items detected.
3219 * Pointer to error structure.
3222 * 0 on success, a negative errno value otherwise and rte_errno is set.
3225 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
3226 const struct rte_flow_action *action,
3227 const uint64_t item_flags,
3228 struct rte_flow_error *error)
3232 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3234 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
3235 return rte_flow_error_set(error, EINVAL,
3236 RTE_FLOW_ERROR_TYPE_ACTION,
3237 NULL, "no TCP item in"
3239 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
3240 (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
3241 (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
3242 (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
3243 return rte_flow_error_set(error, EINVAL,
3244 RTE_FLOW_ERROR_TYPE_ACTION,
3246 "cannot decrease and increase"
3247 " TCP sequence number"
3248 " at the same time");
3254 * Validate the modify-header actions of increment/decrement
3255 * TCP Acknowledgment number.
3257 * @param[in] action_flags
3258 * Holds the actions detected until now.
3260 * Pointer to the modify action.
3261 * @param[in] item_flags
3262 * Holds the items detected.
3264 * Pointer to error structure.
3267 * 0 on success, a negative errno value otherwise and rte_errno is set.
3270 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
3271 const struct rte_flow_action *action,
3272 const uint64_t item_flags,
3273 struct rte_flow_error *error)
3277 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3279 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
3280 return rte_flow_error_set(error, EINVAL,
3281 RTE_FLOW_ERROR_TYPE_ACTION,
3282 NULL, "no TCP item in"
3284 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
3285 (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
3286 (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
3287 (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
3288 return rte_flow_error_set(error, EINVAL,
3289 RTE_FLOW_ERROR_TYPE_ACTION,
3291 "cannot decrease and increase"
3292 " TCP acknowledgment number"
3293 " at the same time");
3299 * Validate the modify-header TTL actions.
3301 * @param[in] action_flags
3302 * Holds the actions detected until now.
3304 * Pointer to the modify action.
3305 * @param[in] item_flags
3306 * Holds the items detected.
3308 * Pointer to error structure.
3311 * 0 on success, a negative errno value otherwise and rte_errno is set.
3314 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
3315 const struct rte_flow_action *action,
3316 const uint64_t item_flags,
3317 struct rte_flow_error *error)
3321 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3323 if (!(item_flags & MLX5_FLOW_LAYER_L3))
3324 return rte_flow_error_set(error, EINVAL,
3325 RTE_FLOW_ERROR_TYPE_ACTION,
3327 "no IP protocol in pattern");
3333 * Validate jump action.
3336 * Pointer to the jump action.
3337 * @param[in] action_flags
3338 * Holds the actions detected until now.
3339 * @param[in] attributes
3340 * Pointer to flow attributes
3341 * @param[in] external
3342 * Action belongs to flow rule created by request external to PMD.
3344 * Pointer to error structure.
3347 * 0 on success, a negative errno value otherwise and rte_errno is set.
3350 flow_dv_validate_action_jump(const struct rte_flow_action *action,
3351 uint64_t action_flags,
3352 const struct rte_flow_attr *attributes,
3353 bool external, struct rte_flow_error *error)
3355 uint32_t target_group, table;
3358 if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
3359 MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3360 return rte_flow_error_set(error, EINVAL,
3361 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3362 "can't have 2 fate actions in"
3364 if (action_flags & MLX5_FLOW_ACTION_METER)
3365 return rte_flow_error_set(error, ENOTSUP,
3366 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3367 "jump with meter not support");
3369 return rte_flow_error_set(error, EINVAL,
3370 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3371 NULL, "action configuration not set");
3373 ((const struct rte_flow_action_jump *)action->conf)->group;
3374 ret = mlx5_flow_group_to_table(attributes, external, target_group,
3378 if (attributes->group == target_group)
3379 return rte_flow_error_set(error, EINVAL,
3380 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3381 "target group must be other than"
3382 " the current flow group");
3387 * Validate the port_id action.
3390 * Pointer to rte_eth_dev structure.
3391 * @param[in] action_flags
3392 * Bit-fields that holds the actions detected until now.
3394 * Port_id RTE action structure.
3396 * Attributes of flow that includes this action.
3398 * Pointer to error structure.
3401 * 0 on success, a negative errno value otherwise and rte_errno is set.
3404 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
3405 uint64_t action_flags,
3406 const struct rte_flow_action *action,
3407 const struct rte_flow_attr *attr,
3408 struct rte_flow_error *error)
3410 const struct rte_flow_action_port_id *port_id;
3411 struct mlx5_priv *act_priv;
3412 struct mlx5_priv *dev_priv;
3415 if (!attr->transfer)
3416 return rte_flow_error_set(error, ENOTSUP,
3417 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3419 "port id action is valid in transfer"
3421 if (!action || !action->conf)
3422 return rte_flow_error_set(error, ENOTSUP,
3423 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3425 "port id action parameters must be"
3427 if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
3428 MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3429 return rte_flow_error_set(error, EINVAL,
3430 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3431 "can have only one fate actions in"
3433 dev_priv = mlx5_dev_to_eswitch_info(dev);
3435 return rte_flow_error_set(error, rte_errno,
3436 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3438 "failed to obtain E-Switch info");
3439 port_id = action->conf;
3440 port = port_id->original ? dev->data->port_id : port_id->id;
3441 act_priv = mlx5_port_to_eswitch_info(port, false);
3443 return rte_flow_error_set
3445 RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
3446 "failed to obtain E-Switch port id for port");
3447 if (act_priv->domain_id != dev_priv->domain_id)
3448 return rte_flow_error_set
3450 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3451 "port does not belong to"
3452 " E-Switch being configured");
3457 * Get the maximum number of modify header actions.
3460 * Pointer to rte_eth_dev structure.
3462 * Flags bits to check if root level.
3465 * Max number of modify header actions device can support.
3468 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev, uint64_t flags)
3471 * There's no way to directly query the max cap. Although it has to be
3472 * acquried by iterative trial, it is a safe assumption that more
3473 * actions are supported by FW if extensive metadata register is
3474 * supported. (Only in the root table)
3476 if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
3477 return MLX5_MAX_MODIFY_NUM;
3479 return mlx5_flow_ext_mreg_supported(dev) ?
3480 MLX5_ROOT_TBL_MODIFY_NUM :
3481 MLX5_ROOT_TBL_MODIFY_NUM_NO_MREG;
3485 * Validate the meter action.
3488 * Pointer to rte_eth_dev structure.
3489 * @param[in] action_flags
3490 * Bit-fields that holds the actions detected until now.
3492 * Pointer to the meter action.
3494 * Attributes of flow that includes this action.
3496 * Pointer to error structure.
3499 * 0 on success, a negative errno value otherwise and rte_ernno is set.
3502 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
3503 uint64_t action_flags,
3504 const struct rte_flow_action *action,
3505 const struct rte_flow_attr *attr,
3506 struct rte_flow_error *error)
3508 struct mlx5_priv *priv = dev->data->dev_private;
3509 const struct rte_flow_action_meter *am = action->conf;
3510 struct mlx5_flow_meter *fm;
3513 return rte_flow_error_set(error, EINVAL,
3514 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3515 "meter action conf is NULL");
3517 if (action_flags & MLX5_FLOW_ACTION_METER)
3518 return rte_flow_error_set(error, ENOTSUP,
3519 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3520 "meter chaining not support");
3521 if (action_flags & MLX5_FLOW_ACTION_JUMP)
3522 return rte_flow_error_set(error, ENOTSUP,
3523 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3524 "meter with jump not support");
3526 return rte_flow_error_set(error, ENOTSUP,
3527 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3529 "meter action not supported");
3530 fm = mlx5_flow_meter_find(priv, am->mtr_id);
3532 return rte_flow_error_set(error, EINVAL,
3533 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3535 if (fm->ref_cnt && (!(fm->attr.transfer == attr->transfer ||
3536 (!fm->attr.ingress && !attr->ingress && attr->egress) ||
3537 (!fm->attr.egress && !attr->egress && attr->ingress))))
3538 return rte_flow_error_set(error, EINVAL,
3539 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3540 "Flow attributes are either invalid "
3541 "or have a conflict with current "
3542 "meter attributes");
3547 * Validate the modify-header IPv4 DSCP actions.
3549 * @param[in] action_flags
3550 * Holds the actions detected until now.
3552 * Pointer to the modify action.
3553 * @param[in] item_flags
3554 * Holds the items detected.
3556 * Pointer to error structure.
3559 * 0 on success, a negative errno value otherwise and rte_errno is set.
3562 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
3563 const struct rte_flow_action *action,
3564 const uint64_t item_flags,
3565 struct rte_flow_error *error)
3569 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3571 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
3572 return rte_flow_error_set(error, EINVAL,
3573 RTE_FLOW_ERROR_TYPE_ACTION,
3575 "no ipv4 item in pattern");
3581 * Validate the modify-header IPv6 DSCP actions.
3583 * @param[in] action_flags
3584 * Holds the actions detected until now.
3586 * Pointer to the modify action.
3587 * @param[in] item_flags
3588 * Holds the items detected.
3590 * Pointer to error structure.
3593 * 0 on success, a negative errno value otherwise and rte_errno is set.
3596 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
3597 const struct rte_flow_action *action,
3598 const uint64_t item_flags,
3599 struct rte_flow_error *error)
3603 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3605 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
3606 return rte_flow_error_set(error, EINVAL,
3607 RTE_FLOW_ERROR_TYPE_ACTION,
3609 "no ipv6 item in pattern");
3615 * Find existing modify-header resource or create and register a new one.
3617 * @param dev[in, out]
3618 * Pointer to rte_eth_dev structure.
3619 * @param[in, out] resource
3620 * Pointer to modify-header resource.
3621 * @parm[in, out] dev_flow
3622 * Pointer to the dev_flow.
3624 * pointer to error structure.
3627 * 0 on success otherwise -errno and errno is set.
3630 flow_dv_modify_hdr_resource_register
3631 (struct rte_eth_dev *dev,
3632 struct mlx5_flow_dv_modify_hdr_resource *resource,
3633 struct mlx5_flow *dev_flow,
3634 struct rte_flow_error *error)
3636 struct mlx5_priv *priv = dev->data->dev_private;
3637 struct mlx5_ibv_shared *sh = priv->sh;
3638 struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
3639 struct mlx5dv_dr_domain *ns;
3640 uint32_t actions_len;
3643 dev_flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
3644 if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
3646 return rte_flow_error_set(error, EOVERFLOW,
3647 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3648 "too many modify header items");
3649 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3650 ns = sh->fdb_domain;
3651 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
3655 /* Lookup a matching resource from cache. */
3656 actions_len = resource->actions_num * sizeof(resource->actions[0]);
3657 LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
3658 if (resource->ft_type == cache_resource->ft_type &&
3659 resource->actions_num == cache_resource->actions_num &&
3660 resource->flags == cache_resource->flags &&
3661 !memcmp((const void *)resource->actions,
3662 (const void *)cache_resource->actions,
3664 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
3665 (void *)cache_resource,
3666 rte_atomic32_read(&cache_resource->refcnt));
3667 rte_atomic32_inc(&cache_resource->refcnt);
3668 dev_flow->dv.modify_hdr = cache_resource;
3672 /* Register new modify-header resource. */
3673 cache_resource = rte_calloc(__func__, 1,
3674 sizeof(*cache_resource) + actions_len, 0);
3675 if (!cache_resource)
3676 return rte_flow_error_set(error, ENOMEM,
3677 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3678 "cannot allocate resource memory");
3679 *cache_resource = *resource;
3680 rte_memcpy(cache_resource->actions, resource->actions, actions_len);
3681 cache_resource->verbs_action =
3682 mlx5_glue->dv_create_flow_action_modify_header
3683 (sh->ctx, cache_resource->ft_type, ns,
3684 cache_resource->flags, actions_len,
3685 (uint64_t *)cache_resource->actions);
3686 if (!cache_resource->verbs_action) {
3687 rte_free(cache_resource);
3688 return rte_flow_error_set(error, ENOMEM,
3689 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3690 NULL, "cannot create action");
3692 rte_atomic32_init(&cache_resource->refcnt);
3693 rte_atomic32_inc(&cache_resource->refcnt);
3694 LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
3695 dev_flow->dv.modify_hdr = cache_resource;
3696 DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
3697 (void *)cache_resource,
3698 rte_atomic32_read(&cache_resource->refcnt));
3702 #define MLX5_CNT_CONTAINER_RESIZE 64
3705 * Get or create a flow counter.
3708 * Pointer to the Ethernet device structure.
3710 * Indicate if this counter is shared with other flows.
3712 * Counter identifier.
3715 * pointer to flow counter on success, NULL otherwise and rte_errno is set.
3717 static struct mlx5_flow_counter *
3718 flow_dv_counter_alloc_fallback(struct rte_eth_dev *dev, uint32_t shared,
3721 struct mlx5_priv *priv = dev->data->dev_private;
3722 struct mlx5_flow_counter *cnt = NULL;
3723 struct mlx5_devx_obj *dcs = NULL;
3725 if (!priv->config.devx) {
3726 rte_errno = ENOTSUP;
3730 TAILQ_FOREACH(cnt, &priv->sh->cmng.flow_counters, next) {
3731 if (cnt->shared && cnt->id == id) {
3737 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
3740 cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
3742 claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
3746 struct mlx5_flow_counter tmpl = {
3752 tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
3754 claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
3760 TAILQ_INSERT_HEAD(&priv->sh->cmng.flow_counters, cnt, next);
3765 * Release a flow counter.
3768 * Pointer to the Ethernet device structure.
3769 * @param[in] counter
3770 * Pointer to the counter handler.
3773 flow_dv_counter_release_fallback(struct rte_eth_dev *dev,
3774 struct mlx5_flow_counter *counter)
3776 struct mlx5_priv *priv = dev->data->dev_private;
3780 if (--counter->ref_cnt == 0) {
3781 TAILQ_REMOVE(&priv->sh->cmng.flow_counters, counter, next);
3782 claim_zero(mlx5_devx_cmd_destroy(counter->dcs));
3788 * Query a devx flow counter.
3791 * Pointer to the Ethernet device structure.
3793 * Pointer to the flow counter.
3795 * The statistics value of packets.
3797 * The statistics value of bytes.
3800 * 0 on success, otherwise a negative errno value and rte_errno is set.
3803 _flow_dv_query_count_fallback(struct rte_eth_dev *dev __rte_unused,
3804 struct mlx5_flow_counter *cnt, uint64_t *pkts,
3807 return mlx5_devx_cmd_flow_counter_query(cnt->dcs, 0, 0, pkts, bytes,
3812 * Get a pool by a counter.
3815 * Pointer to the counter.
3820 static struct mlx5_flow_counter_pool *
3821 flow_dv_counter_pool_get(struct mlx5_flow_counter *cnt)
3824 cnt -= cnt->dcs->id % MLX5_COUNTERS_PER_POOL;
3825 return (struct mlx5_flow_counter_pool *)cnt - 1;
3831 * Get a pool by devx counter ID.
3834 * Pointer to the counter container.
3836 * The counter devx ID.
3839 * The counter pool pointer if exists, NULL otherwise,
3841 static struct mlx5_flow_counter_pool *
3842 flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id)
3844 struct mlx5_flow_counter_pool *pool;
3846 TAILQ_FOREACH(pool, &cont->pool_list, next) {
3847 int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
3848 MLX5_COUNTERS_PER_POOL;
3850 if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
3857 * Allocate a new memory for the counter values wrapped by all the needed
3861 * Pointer to the Ethernet device structure.
3863 * The raw memory areas - each one for MLX5_COUNTERS_PER_POOL counters.
3866 * The new memory management pointer on success, otherwise NULL and rte_errno
3869 static struct mlx5_counter_stats_mem_mng *
3870 flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n)
3872 struct mlx5_ibv_shared *sh = ((struct mlx5_priv *)
3873 (dev->data->dev_private))->sh;
3874 struct mlx5_devx_mkey_attr mkey_attr;
3875 struct mlx5_counter_stats_mem_mng *mem_mng;
3876 volatile struct flow_counter_stats *raw_data;
3877 int size = (sizeof(struct flow_counter_stats) *
3878 MLX5_COUNTERS_PER_POOL +
3879 sizeof(struct mlx5_counter_stats_raw)) * raws_n +
3880 sizeof(struct mlx5_counter_stats_mem_mng);
3881 uint8_t *mem = rte_calloc(__func__, 1, size, sysconf(_SC_PAGESIZE));
3888 mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
3889 size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
3890 mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size,
3891 IBV_ACCESS_LOCAL_WRITE);
3892 if (!mem_mng->umem) {
3897 mkey_attr.addr = (uintptr_t)mem;
3898 mkey_attr.size = size;
3899 mkey_attr.umem_id = mem_mng->umem->umem_id;
3900 mkey_attr.pd = sh->pdn;
3901 mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
3903 mlx5_glue->devx_umem_dereg(mem_mng->umem);
3908 mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
3909 raw_data = (volatile struct flow_counter_stats *)mem;
3910 for (i = 0; i < raws_n; ++i) {
3911 mem_mng->raws[i].mem_mng = mem_mng;
3912 mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
3914 LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
3919 * Resize a counter container.
3922 * Pointer to the Ethernet device structure.
3924 * Whether the pool is for counter that was allocated by batch command.
3927 * The new container pointer on success, otherwise NULL and rte_errno is set.
3929 static struct mlx5_pools_container *
3930 flow_dv_container_resize(struct rte_eth_dev *dev, uint32_t batch)
3932 struct mlx5_priv *priv = dev->data->dev_private;
3933 struct mlx5_pools_container *cont =
3934 MLX5_CNT_CONTAINER(priv->sh, batch, 0);
3935 struct mlx5_pools_container *new_cont =
3936 MLX5_CNT_CONTAINER_UNUSED(priv->sh, batch, 0);
3937 struct mlx5_counter_stats_mem_mng *mem_mng;
3938 uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE;
3939 uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
3942 if (cont != MLX5_CNT_CONTAINER(priv->sh, batch, 1)) {
3943 /* The last resize still hasn't detected by the host thread. */
3947 new_cont->pools = rte_calloc(__func__, 1, mem_size, 0);
3948 if (!new_cont->pools) {
3953 memcpy(new_cont->pools, cont->pools, cont->n *
3954 sizeof(struct mlx5_flow_counter_pool *));
3955 mem_mng = flow_dv_create_counter_stat_mem_mng(dev,
3956 MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES);
3958 rte_free(new_cont->pools);
3961 for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
3962 LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws,
3963 mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE +
3965 new_cont->n = resize;
3966 rte_atomic16_set(&new_cont->n_valid, rte_atomic16_read(&cont->n_valid));
3967 TAILQ_INIT(&new_cont->pool_list);
3968 TAILQ_CONCAT(&new_cont->pool_list, &cont->pool_list, next);
3969 new_cont->init_mem_mng = mem_mng;
3971 /* Flip the master container. */
3972 priv->sh->cmng.mhi[batch] ^= (uint8_t)1;
3977 * Query a devx flow counter.
3980 * Pointer to the Ethernet device structure.
3982 * Pointer to the flow counter.
3984 * The statistics value of packets.
3986 * The statistics value of bytes.
3989 * 0 on success, otherwise a negative errno value and rte_errno is set.
3992 _flow_dv_query_count(struct rte_eth_dev *dev,
3993 struct mlx5_flow_counter *cnt, uint64_t *pkts,
3996 struct mlx5_priv *priv = dev->data->dev_private;
3997 struct mlx5_flow_counter_pool *pool =
3998 flow_dv_counter_pool_get(cnt);
3999 int offset = cnt - &pool->counters_raw[0];
4001 if (priv->counter_fallback)
4002 return _flow_dv_query_count_fallback(dev, cnt, pkts, bytes);
4004 rte_spinlock_lock(&pool->sl);
4006 * The single counters allocation may allocate smaller ID than the
4007 * current allocated in parallel to the host reading.
4008 * In this case the new counter values must be reported as 0.
4010 if (unlikely(!cnt->batch && cnt->dcs->id < pool->raw->min_dcs_id)) {
4014 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
4015 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
4017 rte_spinlock_unlock(&pool->sl);
4022 * Create and initialize a new counter pool.
4025 * Pointer to the Ethernet device structure.
4027 * The devX counter handle.
4029 * Whether the pool is for counter that was allocated by batch command.
4032 * A new pool pointer on success, NULL otherwise and rte_errno is set.
4034 static struct mlx5_flow_counter_pool *
4035 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
4038 struct mlx5_priv *priv = dev->data->dev_private;
4039 struct mlx5_flow_counter_pool *pool;
4040 struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
4042 int16_t n_valid = rte_atomic16_read(&cont->n_valid);
4045 if (cont->n == n_valid) {
4046 cont = flow_dv_container_resize(dev, batch);
4050 size = sizeof(*pool) + MLX5_COUNTERS_PER_POOL *
4051 sizeof(struct mlx5_flow_counter);
4052 pool = rte_calloc(__func__, 1, size, 0);
4057 pool->min_dcs = dcs;
4058 pool->raw = cont->init_mem_mng->raws + n_valid %
4059 MLX5_CNT_CONTAINER_RESIZE;
4060 pool->raw_hw = NULL;
4061 rte_spinlock_init(&pool->sl);
4063 * The generation of the new allocated counters in this pool is 0, 2 in
4064 * the pool generation makes all the counters valid for allocation.
4066 rte_atomic64_set(&pool->query_gen, 0x2);
4067 TAILQ_INIT(&pool->counters);
4068 TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
4069 cont->pools[n_valid] = pool;
4070 /* Pool initialization must be updated before host thread access. */
4072 rte_atomic16_add(&cont->n_valid, 1);
4077 * Prepare a new counter and/or a new counter pool.
4080 * Pointer to the Ethernet device structure.
4081 * @param[out] cnt_free
4082 * Where to put the pointer of a new counter.
4084 * Whether the pool is for counter that was allocated by batch command.
4087 * The free counter pool pointer and @p cnt_free is set on success,
4088 * NULL otherwise and rte_errno is set.
4090 static struct mlx5_flow_counter_pool *
4091 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
4092 struct mlx5_flow_counter **cnt_free,
4095 struct mlx5_priv *priv = dev->data->dev_private;
4096 struct mlx5_flow_counter_pool *pool;
4097 struct mlx5_devx_obj *dcs = NULL;
4098 struct mlx5_flow_counter *cnt;
4102 /* bulk_bitmap must be 0 for single counter allocation. */
4103 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
4106 pool = flow_dv_find_pool_by_id
4107 (MLX5_CNT_CONTAINER(priv->sh, batch, 0), dcs->id);
4109 pool = flow_dv_pool_create(dev, dcs, batch);
4111 mlx5_devx_cmd_destroy(dcs);
4114 } else if (dcs->id < pool->min_dcs->id) {
4115 rte_atomic64_set(&pool->a64_dcs,
4116 (int64_t)(uintptr_t)dcs);
4118 cnt = &pool->counters_raw[dcs->id % MLX5_COUNTERS_PER_POOL];
4119 TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
4124 /* bulk_bitmap is in 128 counters units. */
4125 if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4)
4126 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
4128 rte_errno = ENODATA;
4131 pool = flow_dv_pool_create(dev, dcs, batch);
4133 mlx5_devx_cmd_destroy(dcs);
4136 for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
4137 cnt = &pool->counters_raw[i];
4139 TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
4141 *cnt_free = &pool->counters_raw[0];
4146 * Search for existed shared counter.
4149 * Pointer to the relevant counter pool container.
4151 * The shared counter ID to search.
4154 * NULL if not existed, otherwise pointer to the shared counter.
4156 static struct mlx5_flow_counter *
4157 flow_dv_counter_shared_search(struct mlx5_pools_container *cont,
4160 static struct mlx5_flow_counter *cnt;
4161 struct mlx5_flow_counter_pool *pool;
4164 TAILQ_FOREACH(pool, &cont->pool_list, next) {
4165 for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
4166 cnt = &pool->counters_raw[i];
4167 if (cnt->ref_cnt && cnt->shared && cnt->id == id)
4175 * Allocate a flow counter.
4178 * Pointer to the Ethernet device structure.
4180 * Indicate if this counter is shared with other flows.
4182 * Counter identifier.
4184 * Counter flow group.
4187 * pointer to flow counter on success, NULL otherwise and rte_errno is set.
4189 static struct mlx5_flow_counter *
4190 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
4193 struct mlx5_priv *priv = dev->data->dev_private;
4194 struct mlx5_flow_counter_pool *pool = NULL;
4195 struct mlx5_flow_counter *cnt_free = NULL;
4197 * Currently group 0 flow counter cannot be assigned to a flow if it is
4198 * not the first one in the batch counter allocation, so it is better
4199 * to allocate counters one by one for these flows in a separate
4201 * A counter can be shared between different groups so need to take
4202 * shared counters from the single container.
4204 uint32_t batch = (group && !shared) ? 1 : 0;
4205 struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
4208 if (priv->counter_fallback)
4209 return flow_dv_counter_alloc_fallback(dev, shared, id);
4210 if (!priv->config.devx) {
4211 rte_errno = ENOTSUP;
4215 cnt_free = flow_dv_counter_shared_search(cont, id);
4217 if (cnt_free->ref_cnt + 1 == 0) {
4221 cnt_free->ref_cnt++;
4225 /* Pools which has a free counters are in the start. */
4226 TAILQ_FOREACH(pool, &cont->pool_list, next) {
4228 * The free counter reset values must be updated between the
4229 * counter release to the counter allocation, so, at least one
4230 * query must be done in this time. ensure it by saving the
4231 * query generation in the release time.
4232 * The free list is sorted according to the generation - so if
4233 * the first one is not updated, all the others are not
4236 cnt_free = TAILQ_FIRST(&pool->counters);
4237 if (cnt_free && cnt_free->query_gen + 1 <
4238 rte_atomic64_read(&pool->query_gen))
4243 pool = flow_dv_counter_pool_prepare(dev, &cnt_free, batch);
4247 cnt_free->batch = batch;
4248 /* Create a DV counter action only in the first time usage. */
4249 if (!cnt_free->action) {
4251 struct mlx5_devx_obj *dcs;
4254 offset = cnt_free - &pool->counters_raw[0];
4255 dcs = pool->min_dcs;
4258 dcs = cnt_free->dcs;
4260 cnt_free->action = mlx5_glue->dv_create_flow_action_counter
4262 if (!cnt_free->action) {
4267 /* Update the counter reset values. */
4268 if (_flow_dv_query_count(dev, cnt_free, &cnt_free->hits,
4271 cnt_free->shared = shared;
4272 cnt_free->ref_cnt = 1;
4274 if (!priv->sh->cmng.query_thread_on)
4275 /* Start the asynchronous batch query by the host thread. */
4276 mlx5_set_query_alarm(priv->sh);
4277 TAILQ_REMOVE(&pool->counters, cnt_free, next);
4278 if (TAILQ_EMPTY(&pool->counters)) {
4279 /* Move the pool to the end of the container pool list. */
4280 TAILQ_REMOVE(&cont->pool_list, pool, next);
4281 TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
4287 * Release a flow counter.
4290 * Pointer to the Ethernet device structure.
4291 * @param[in] counter
4292 * Pointer to the counter handler.
4295 flow_dv_counter_release(struct rte_eth_dev *dev,
4296 struct mlx5_flow_counter *counter)
4298 struct mlx5_priv *priv = dev->data->dev_private;
4302 if (priv->counter_fallback) {
4303 flow_dv_counter_release_fallback(dev, counter);
4306 if (--counter->ref_cnt == 0) {
4307 struct mlx5_flow_counter_pool *pool =
4308 flow_dv_counter_pool_get(counter);
4310 /* Put the counter in the end - the last updated one. */
4311 TAILQ_INSERT_TAIL(&pool->counters, counter, next);
4312 counter->query_gen = rte_atomic64_read(&pool->query_gen);
4317 * Verify the @p attributes will be correctly understood by the NIC and store
4318 * them in the @p flow if everything is correct.
4321 * Pointer to dev struct.
4322 * @param[in] attributes
4323 * Pointer to flow attributes
4324 * @param[in] external
4325 * This flow rule is created by request external to PMD.
4327 * Pointer to error structure.
4330 * 0 on success, a negative errno value otherwise and rte_errno is set.
4333 flow_dv_validate_attributes(struct rte_eth_dev *dev,
4334 const struct rte_flow_attr *attributes,
4335 bool external __rte_unused,
4336 struct rte_flow_error *error)
4338 struct mlx5_priv *priv = dev->data->dev_private;
4339 uint32_t priority_max = priv->config.flow_prio - 1;
4341 #ifndef HAVE_MLX5DV_DR
4342 if (attributes->group)
4343 return rte_flow_error_set(error, ENOTSUP,
4344 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
4346 "groups are not supported");
4351 ret = mlx5_flow_group_to_table(attributes, external,
4357 if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
4358 attributes->priority >= priority_max)
4359 return rte_flow_error_set(error, ENOTSUP,
4360 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
4362 "priority out of range");
4363 if (attributes->transfer) {
4364 if (!priv->config.dv_esw_en)
4365 return rte_flow_error_set
4367 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4368 "E-Switch dr is not supported");
4369 if (!(priv->representor || priv->master))
4370 return rte_flow_error_set
4371 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4372 NULL, "E-Switch configuration can only be"
4373 " done by a master or a representor device");
4374 if (attributes->egress)
4375 return rte_flow_error_set
4377 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
4378 "egress is not supported");
4380 if (!(attributes->egress ^ attributes->ingress))
4381 return rte_flow_error_set(error, ENOTSUP,
4382 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
4383 "must specify exactly one of "
4384 "ingress or egress");
4389 * Internal validation function. For validating both actions and items.
4392 * Pointer to the rte_eth_dev structure.
4394 * Pointer to the flow attributes.
4396 * Pointer to the list of items.
4397 * @param[in] actions
4398 * Pointer to the list of actions.
4399 * @param[in] external
4400 * This flow rule is created by request external to PMD.
4402 * Pointer to the error structure.
4405 * 0 on success, a negative errno value otherwise and rte_errno is set.
4408 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
4409 const struct rte_flow_item items[],
4410 const struct rte_flow_action actions[],
4411 bool external, struct rte_flow_error *error)
4414 uint64_t action_flags = 0;
4415 uint64_t item_flags = 0;
4416 uint64_t last_item = 0;
4417 uint8_t next_protocol = 0xff;
4418 uint16_t ether_type = 0;
4420 const struct rte_flow_item *gre_item = NULL;
4421 struct rte_flow_item_tcp nic_tcp_mask = {
4424 .src_port = RTE_BE16(UINT16_MAX),
4425 .dst_port = RTE_BE16(UINT16_MAX),
4428 struct mlx5_priv *priv = dev->data->dev_private;
4429 struct mlx5_dev_config *dev_conf = &priv->config;
4433 ret = flow_dv_validate_attributes(dev, attr, external, error);
4436 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4437 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
4438 int type = items->type;
4441 case RTE_FLOW_ITEM_TYPE_VOID:
4443 case RTE_FLOW_ITEM_TYPE_PORT_ID:
4444 ret = flow_dv_validate_item_port_id
4445 (dev, items, attr, item_flags, error);
4448 last_item = MLX5_FLOW_ITEM_PORT_ID;
4450 case RTE_FLOW_ITEM_TYPE_ETH:
4451 ret = mlx5_flow_validate_item_eth(items, item_flags,
4455 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
4456 MLX5_FLOW_LAYER_OUTER_L2;
4457 if (items->mask != NULL && items->spec != NULL) {
4459 ((const struct rte_flow_item_eth *)
4462 ((const struct rte_flow_item_eth *)
4464 ether_type = rte_be_to_cpu_16(ether_type);
4469 case RTE_FLOW_ITEM_TYPE_VLAN:
4470 ret = mlx5_flow_validate_item_vlan(items, item_flags,
4474 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
4475 MLX5_FLOW_LAYER_OUTER_VLAN;
4476 if (items->mask != NULL && items->spec != NULL) {
4478 ((const struct rte_flow_item_vlan *)
4479 items->spec)->inner_type;
4481 ((const struct rte_flow_item_vlan *)
4482 items->mask)->inner_type;
4483 ether_type = rte_be_to_cpu_16(ether_type);
4488 case RTE_FLOW_ITEM_TYPE_IPV4:
4489 mlx5_flow_tunnel_ip_check(items, next_protocol,
4490 &item_flags, &tunnel);
4491 ret = mlx5_flow_validate_item_ipv4(items, item_flags,
4497 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4498 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4499 if (items->mask != NULL &&
4500 ((const struct rte_flow_item_ipv4 *)
4501 items->mask)->hdr.next_proto_id) {
4503 ((const struct rte_flow_item_ipv4 *)
4504 (items->spec))->hdr.next_proto_id;
4506 ((const struct rte_flow_item_ipv4 *)
4507 (items->mask))->hdr.next_proto_id;
4509 /* Reset for inner layer. */
4510 next_protocol = 0xff;
4513 case RTE_FLOW_ITEM_TYPE_IPV6:
4514 mlx5_flow_tunnel_ip_check(items, next_protocol,
4515 &item_flags, &tunnel);
4516 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
4522 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4523 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4524 if (items->mask != NULL &&
4525 ((const struct rte_flow_item_ipv6 *)
4526 items->mask)->hdr.proto) {
4528 ((const struct rte_flow_item_ipv6 *)
4529 items->spec)->hdr.proto;
4531 ((const struct rte_flow_item_ipv6 *)
4532 items->mask)->hdr.proto;
4534 /* Reset for inner layer. */
4535 next_protocol = 0xff;
4538 case RTE_FLOW_ITEM_TYPE_TCP:
4539 ret = mlx5_flow_validate_item_tcp
4546 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
4547 MLX5_FLOW_LAYER_OUTER_L4_TCP;
4549 case RTE_FLOW_ITEM_TYPE_UDP:
4550 ret = mlx5_flow_validate_item_udp(items, item_flags,
4555 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
4556 MLX5_FLOW_LAYER_OUTER_L4_UDP;
4558 case RTE_FLOW_ITEM_TYPE_GRE:
4559 ret = mlx5_flow_validate_item_gre(items, item_flags,
4560 next_protocol, error);
4564 last_item = MLX5_FLOW_LAYER_GRE;
4566 case RTE_FLOW_ITEM_TYPE_NVGRE:
4567 ret = mlx5_flow_validate_item_nvgre(items, item_flags,
4572 last_item = MLX5_FLOW_LAYER_NVGRE;
4574 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
4575 ret = mlx5_flow_validate_item_gre_key
4576 (items, item_flags, gre_item, error);
4579 last_item = MLX5_FLOW_LAYER_GRE_KEY;
4581 case RTE_FLOW_ITEM_TYPE_VXLAN:
4582 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
4586 last_item = MLX5_FLOW_LAYER_VXLAN;
4588 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4589 ret = mlx5_flow_validate_item_vxlan_gpe(items,
4594 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
4596 case RTE_FLOW_ITEM_TYPE_GENEVE:
4597 ret = mlx5_flow_validate_item_geneve(items,
4602 last_item = MLX5_FLOW_LAYER_GENEVE;
4604 case RTE_FLOW_ITEM_TYPE_MPLS:
4605 ret = mlx5_flow_validate_item_mpls(dev, items,
4610 last_item = MLX5_FLOW_LAYER_MPLS;
4613 case RTE_FLOW_ITEM_TYPE_MARK:
4614 ret = flow_dv_validate_item_mark(dev, items, attr,
4618 last_item = MLX5_FLOW_ITEM_MARK;
4620 case RTE_FLOW_ITEM_TYPE_META:
4621 ret = flow_dv_validate_item_meta(dev, items, attr,
4625 last_item = MLX5_FLOW_ITEM_METADATA;
4627 case RTE_FLOW_ITEM_TYPE_ICMP:
4628 ret = mlx5_flow_validate_item_icmp(items, item_flags,
4633 last_item = MLX5_FLOW_LAYER_ICMP;
4635 case RTE_FLOW_ITEM_TYPE_ICMP6:
4636 ret = mlx5_flow_validate_item_icmp6(items, item_flags,
4641 last_item = MLX5_FLOW_LAYER_ICMP6;
4643 case RTE_FLOW_ITEM_TYPE_TAG:
4644 ret = flow_dv_validate_item_tag(dev, items,
4648 last_item = MLX5_FLOW_ITEM_TAG;
4650 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
4651 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
4653 case RTE_FLOW_ITEM_TYPE_GTP:
4654 ret = flow_dv_validate_item_gtp(dev, items, item_flags,
4658 last_item = MLX5_FLOW_LAYER_GTP;
4661 return rte_flow_error_set(error, ENOTSUP,
4662 RTE_FLOW_ERROR_TYPE_ITEM,
4663 NULL, "item not supported");
4665 item_flags |= last_item;
4667 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4668 int type = actions->type;
4669 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
4670 return rte_flow_error_set(error, ENOTSUP,
4671 RTE_FLOW_ERROR_TYPE_ACTION,
4672 actions, "too many actions");
4674 case RTE_FLOW_ACTION_TYPE_VOID:
4676 case RTE_FLOW_ACTION_TYPE_PORT_ID:
4677 ret = flow_dv_validate_action_port_id(dev,
4684 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
4687 case RTE_FLOW_ACTION_TYPE_FLAG:
4688 ret = flow_dv_validate_action_flag(dev, action_flags,
4692 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
4693 /* Count all modify-header actions as one. */
4694 if (!(action_flags &
4695 MLX5_FLOW_MODIFY_HDR_ACTIONS))
4697 action_flags |= MLX5_FLOW_ACTION_FLAG |
4698 MLX5_FLOW_ACTION_MARK_EXT;
4700 action_flags |= MLX5_FLOW_ACTION_FLAG;
4704 case RTE_FLOW_ACTION_TYPE_MARK:
4705 ret = flow_dv_validate_action_mark(dev, actions,
4710 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
4711 /* Count all modify-header actions as one. */
4712 if (!(action_flags &
4713 MLX5_FLOW_MODIFY_HDR_ACTIONS))
4715 action_flags |= MLX5_FLOW_ACTION_MARK |
4716 MLX5_FLOW_ACTION_MARK_EXT;
4718 action_flags |= MLX5_FLOW_ACTION_MARK;
4722 case RTE_FLOW_ACTION_TYPE_SET_META:
4723 ret = flow_dv_validate_action_set_meta(dev, actions,
4728 /* Count all modify-header actions as one action. */
4729 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4731 action_flags |= MLX5_FLOW_ACTION_SET_META;
4733 case RTE_FLOW_ACTION_TYPE_SET_TAG:
4734 ret = flow_dv_validate_action_set_tag(dev, actions,
4739 /* Count all modify-header actions as one action. */
4740 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4742 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
4744 case RTE_FLOW_ACTION_TYPE_DROP:
4745 ret = mlx5_flow_validate_action_drop(action_flags,
4749 action_flags |= MLX5_FLOW_ACTION_DROP;
4752 case RTE_FLOW_ACTION_TYPE_QUEUE:
4753 ret = mlx5_flow_validate_action_queue(actions,
4758 action_flags |= MLX5_FLOW_ACTION_QUEUE;
4761 case RTE_FLOW_ACTION_TYPE_RSS:
4762 ret = mlx5_flow_validate_action_rss(actions,
4768 action_flags |= MLX5_FLOW_ACTION_RSS;
4771 case RTE_FLOW_ACTION_TYPE_COUNT:
4772 ret = flow_dv_validate_action_count(dev, error);
4775 action_flags |= MLX5_FLOW_ACTION_COUNT;
4778 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
4779 if (flow_dv_validate_action_pop_vlan(dev,
4785 action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
4788 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
4789 ret = flow_dv_validate_action_push_vlan(action_flags,
4795 action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
4798 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
4799 ret = flow_dv_validate_action_set_vlan_pcp
4800 (action_flags, actions, error);
4803 /* Count PCP with push_vlan command. */
4804 action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
4806 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
4807 ret = flow_dv_validate_action_set_vlan_vid
4808 (item_flags, action_flags,
4812 /* Count VID with push_vlan command. */
4813 action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
4815 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
4816 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
4817 ret = flow_dv_validate_action_l2_encap(action_flags,
4822 action_flags |= actions->type ==
4823 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
4824 MLX5_FLOW_ACTION_VXLAN_ENCAP :
4825 MLX5_FLOW_ACTION_NVGRE_ENCAP;
4828 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
4829 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
4830 ret = flow_dv_validate_action_l2_decap(action_flags,
4834 action_flags |= actions->type ==
4835 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
4836 MLX5_FLOW_ACTION_VXLAN_DECAP :
4837 MLX5_FLOW_ACTION_NVGRE_DECAP;
4840 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4841 ret = flow_dv_validate_action_raw_encap(action_flags,
4846 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
4849 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
4850 ret = flow_dv_validate_action_raw_decap(action_flags,
4855 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
4858 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
4859 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
4860 ret = flow_dv_validate_action_modify_mac(action_flags,
4866 /* Count all modify-header actions as one action. */
4867 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4869 action_flags |= actions->type ==
4870 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
4871 MLX5_FLOW_ACTION_SET_MAC_SRC :
4872 MLX5_FLOW_ACTION_SET_MAC_DST;
4875 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
4876 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
4877 ret = flow_dv_validate_action_modify_ipv4(action_flags,
4883 /* Count all modify-header actions as one action. */
4884 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4886 action_flags |= actions->type ==
4887 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
4888 MLX5_FLOW_ACTION_SET_IPV4_SRC :
4889 MLX5_FLOW_ACTION_SET_IPV4_DST;
4891 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
4892 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
4893 ret = flow_dv_validate_action_modify_ipv6(action_flags,
4899 /* Count all modify-header actions as one action. */
4900 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4902 action_flags |= actions->type ==
4903 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
4904 MLX5_FLOW_ACTION_SET_IPV6_SRC :
4905 MLX5_FLOW_ACTION_SET_IPV6_DST;
4907 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
4908 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
4909 ret = flow_dv_validate_action_modify_tp(action_flags,
4915 /* Count all modify-header actions as one action. */
4916 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4918 action_flags |= actions->type ==
4919 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
4920 MLX5_FLOW_ACTION_SET_TP_SRC :
4921 MLX5_FLOW_ACTION_SET_TP_DST;
4923 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
4924 case RTE_FLOW_ACTION_TYPE_SET_TTL:
4925 ret = flow_dv_validate_action_modify_ttl(action_flags,
4931 /* Count all modify-header actions as one action. */
4932 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4934 action_flags |= actions->type ==
4935 RTE_FLOW_ACTION_TYPE_SET_TTL ?
4936 MLX5_FLOW_ACTION_SET_TTL :
4937 MLX5_FLOW_ACTION_DEC_TTL;
4939 case RTE_FLOW_ACTION_TYPE_JUMP:
4940 ret = flow_dv_validate_action_jump(actions,
4947 action_flags |= MLX5_FLOW_ACTION_JUMP;
4949 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
4950 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
4951 ret = flow_dv_validate_action_modify_tcp_seq
4958 /* Count all modify-header actions as one action. */
4959 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4961 action_flags |= actions->type ==
4962 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
4963 MLX5_FLOW_ACTION_INC_TCP_SEQ :
4964 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
4966 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
4967 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
4968 ret = flow_dv_validate_action_modify_tcp_ack
4975 /* Count all modify-header actions as one action. */
4976 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4978 action_flags |= actions->type ==
4979 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
4980 MLX5_FLOW_ACTION_INC_TCP_ACK :
4981 MLX5_FLOW_ACTION_DEC_TCP_ACK;
4983 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
4984 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
4985 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
4987 case RTE_FLOW_ACTION_TYPE_METER:
4988 ret = mlx5_flow_validate_action_meter(dev,
4994 action_flags |= MLX5_FLOW_ACTION_METER;
4997 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
4998 ret = flow_dv_validate_action_modify_ipv4_dscp
5005 /* Count all modify-header actions as one action. */
5006 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5008 action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
5010 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
5011 ret = flow_dv_validate_action_modify_ipv6_dscp
5018 /* Count all modify-header actions as one action. */
5019 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5021 action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
5024 return rte_flow_error_set(error, ENOTSUP,
5025 RTE_FLOW_ERROR_TYPE_ACTION,
5027 "action not supported");
5031 * Validate the drop action mutual exclusion with other actions.
5032 * Drop action is mutually-exclusive with any other action, except for
5035 if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
5036 (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
5037 return rte_flow_error_set(error, EINVAL,
5038 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5039 "Drop action is mutually-exclusive "
5040 "with any other action, except for "
5042 /* Eswitch has few restrictions on using items and actions */
5043 if (attr->transfer) {
5044 if (!mlx5_flow_ext_mreg_supported(dev) &&
5045 action_flags & MLX5_FLOW_ACTION_FLAG)
5046 return rte_flow_error_set(error, ENOTSUP,
5047 RTE_FLOW_ERROR_TYPE_ACTION,
5049 "unsupported action FLAG");
5050 if (!mlx5_flow_ext_mreg_supported(dev) &&
5051 action_flags & MLX5_FLOW_ACTION_MARK)
5052 return rte_flow_error_set(error, ENOTSUP,
5053 RTE_FLOW_ERROR_TYPE_ACTION,
5055 "unsupported action MARK");
5056 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
5057 return rte_flow_error_set(error, ENOTSUP,
5058 RTE_FLOW_ERROR_TYPE_ACTION,
5060 "unsupported action QUEUE");
5061 if (action_flags & MLX5_FLOW_ACTION_RSS)
5062 return rte_flow_error_set(error, ENOTSUP,
5063 RTE_FLOW_ERROR_TYPE_ACTION,
5065 "unsupported action RSS");
5066 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
5067 return rte_flow_error_set(error, EINVAL,
5068 RTE_FLOW_ERROR_TYPE_ACTION,
5070 "no fate action is found");
5072 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
5073 return rte_flow_error_set(error, EINVAL,
5074 RTE_FLOW_ERROR_TYPE_ACTION,
5076 "no fate action is found");
5082 * Internal preparation function. Allocates the DV flow size,
5083 * this size is constant.
5086 * Pointer to the flow attributes.
5088 * Pointer to the list of items.
5089 * @param[in] actions
5090 * Pointer to the list of actions.
5092 * Pointer to the error structure.
5095 * Pointer to mlx5_flow object on success,
5096 * otherwise NULL and rte_errno is set.
5098 static struct mlx5_flow *
5099 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
5100 const struct rte_flow_item items[] __rte_unused,
5101 const struct rte_flow_action actions[] __rte_unused,
5102 struct rte_flow_error *error)
5104 size_t size = sizeof(struct mlx5_flow);
5105 struct mlx5_flow *dev_flow;
5107 dev_flow = rte_calloc(__func__, 1, size, 0);
5109 rte_flow_error_set(error, ENOMEM,
5110 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5111 "not enough memory to create flow");
5114 dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
5115 dev_flow->ingress = attr->ingress;
5116 dev_flow->transfer = attr->transfer;
5122 * Sanity check for match mask and value. Similar to check_valid_spec() in
5123 * kernel driver. If unmasked bit is present in value, it returns failure.
5126 * pointer to match mask buffer.
5127 * @param match_value
5128 * pointer to match value buffer.
5131 * 0 if valid, -EINVAL otherwise.
5134 flow_dv_check_valid_spec(void *match_mask, void *match_value)
5136 uint8_t *m = match_mask;
5137 uint8_t *v = match_value;
5140 for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
5143 "match_value differs from match_criteria"
5144 " %p[%u] != %p[%u]",
5145 match_value, i, match_mask, i);
5154 * Add Ethernet item to matcher and to the value.
5156 * @param[in, out] matcher
5158 * @param[in, out] key
5159 * Flow matcher value.
5161 * Flow pattern to translate.
5163 * Item is inner pattern.
5166 flow_dv_translate_item_eth(void *matcher, void *key,
5167 const struct rte_flow_item *item, int inner)
5169 const struct rte_flow_item_eth *eth_m = item->mask;
5170 const struct rte_flow_item_eth *eth_v = item->spec;
5171 const struct rte_flow_item_eth nic_mask = {
5172 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
5173 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
5174 .type = RTE_BE16(0xffff),
5186 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5188 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5190 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5192 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5194 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
5195 ð_m->dst, sizeof(eth_m->dst));
5196 /* The value must be in the range of the mask. */
5197 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
5198 for (i = 0; i < sizeof(eth_m->dst); ++i)
5199 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
5200 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
5201 ð_m->src, sizeof(eth_m->src));
5202 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
5203 /* The value must be in the range of the mask. */
5204 for (i = 0; i < sizeof(eth_m->dst); ++i)
5205 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
5206 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
5207 rte_be_to_cpu_16(eth_m->type));
5208 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
5209 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
5213 * Add VLAN item to matcher and to the value.
5215 * @param[in, out] dev_flow
5217 * @param[in, out] matcher
5219 * @param[in, out] key
5220 * Flow matcher value.
5222 * Flow pattern to translate.
5224 * Item is inner pattern.
5227 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
5228 void *matcher, void *key,
5229 const struct rte_flow_item *item,
5232 const struct rte_flow_item_vlan *vlan_m = item->mask;
5233 const struct rte_flow_item_vlan *vlan_v = item->spec;
5242 vlan_m = &rte_flow_item_vlan_mask;
5244 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5246 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5248 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5250 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5252 * This is workaround, masks are not supported,
5253 * and pre-validated.
5255 dev_flow->dv.vf_vlan.tag =
5256 rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
5258 tci_m = rte_be_to_cpu_16(vlan_m->tci);
5259 tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
5260 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
5261 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
5262 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
5263 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
5264 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
5265 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
5266 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
5267 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
5268 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
5269 rte_be_to_cpu_16(vlan_m->inner_type));
5270 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
5271 rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
5275 * Add IPV4 item to matcher and to the value.
5277 * @param[in, out] matcher
5279 * @param[in, out] key
5280 * Flow matcher value.
5282 * Flow pattern to translate.
5284 * Item is inner pattern.
5286 * The group to insert the rule.
5289 flow_dv_translate_item_ipv4(void *matcher, void *key,
5290 const struct rte_flow_item *item,
5291 int inner, uint32_t group)
5293 const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
5294 const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
5295 const struct rte_flow_item_ipv4 nic_mask = {
5297 .src_addr = RTE_BE32(0xffffffff),
5298 .dst_addr = RTE_BE32(0xffffffff),
5299 .type_of_service = 0xff,
5300 .next_proto_id = 0xff,
5310 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5312 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5314 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5316 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5319 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
5321 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
5322 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
5327 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
5328 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
5329 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
5330 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
5331 *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
5332 *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
5333 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
5334 src_ipv4_src_ipv6.ipv4_layout.ipv4);
5335 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
5336 src_ipv4_src_ipv6.ipv4_layout.ipv4);
5337 *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
5338 *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
5339 tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
5340 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
5341 ipv4_m->hdr.type_of_service);
5342 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
5343 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
5344 ipv4_m->hdr.type_of_service >> 2);
5345 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
5346 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
5347 ipv4_m->hdr.next_proto_id);
5348 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
5349 ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
5353 * Add IPV6 item to matcher and to the value.
5355 * @param[in, out] matcher
5357 * @param[in, out] key
5358 * Flow matcher value.
5360 * Flow pattern to translate.
5362 * Item is inner pattern.
5364 * The group to insert the rule.
5367 flow_dv_translate_item_ipv6(void *matcher, void *key,
5368 const struct rte_flow_item *item,
5369 int inner, uint32_t group)
5371 const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
5372 const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
5373 const struct rte_flow_item_ipv6 nic_mask = {
5376 "\xff\xff\xff\xff\xff\xff\xff\xff"
5377 "\xff\xff\xff\xff\xff\xff\xff\xff",
5379 "\xff\xff\xff\xff\xff\xff\xff\xff"
5380 "\xff\xff\xff\xff\xff\xff\xff\xff",
5381 .vtc_flow = RTE_BE32(0xffffffff),
5388 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5389 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5398 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5400 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5402 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5404 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5407 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
5409 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
5410 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
5415 size = sizeof(ipv6_m->hdr.dst_addr);
5416 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
5417 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
5418 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
5419 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
5420 memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
5421 for (i = 0; i < size; ++i)
5422 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
5423 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
5424 src_ipv4_src_ipv6.ipv6_layout.ipv6);
5425 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
5426 src_ipv4_src_ipv6.ipv6_layout.ipv6);
5427 memcpy(l24_m, ipv6_m->hdr.src_addr, size);
5428 for (i = 0; i < size; ++i)
5429 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
5431 vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
5432 vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
5433 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
5434 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
5435 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
5436 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
5439 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
5441 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
5444 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
5446 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
5450 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
5452 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
5453 ipv6_v->hdr.proto & ipv6_m->hdr.proto);
5457 * Add TCP item to matcher and to the value.
5459 * @param[in, out] matcher
5461 * @param[in, out] key
5462 * Flow matcher value.
5464 * Flow pattern to translate.
5466 * Item is inner pattern.
5469 flow_dv_translate_item_tcp(void *matcher, void *key,
5470 const struct rte_flow_item *item,
5473 const struct rte_flow_item_tcp *tcp_m = item->mask;
5474 const struct rte_flow_item_tcp *tcp_v = item->spec;
5479 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5481 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5483 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5485 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5487 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
5488 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
5492 tcp_m = &rte_flow_item_tcp_mask;
5493 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
5494 rte_be_to_cpu_16(tcp_m->hdr.src_port));
5495 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
5496 rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
5497 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
5498 rte_be_to_cpu_16(tcp_m->hdr.dst_port));
5499 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
5500 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
5501 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
5502 tcp_m->hdr.tcp_flags);
5503 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
5504 (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
5508 * Add UDP item to matcher and to the value.
5510 * @param[in, out] matcher
5512 * @param[in, out] key
5513 * Flow matcher value.
5515 * Flow pattern to translate.
5517 * Item is inner pattern.
5520 flow_dv_translate_item_udp(void *matcher, void *key,
5521 const struct rte_flow_item *item,
5524 const struct rte_flow_item_udp *udp_m = item->mask;
5525 const struct rte_flow_item_udp *udp_v = item->spec;
5530 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5532 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5534 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5536 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5538 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
5539 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
5543 udp_m = &rte_flow_item_udp_mask;
5544 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
5545 rte_be_to_cpu_16(udp_m->hdr.src_port));
5546 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
5547 rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
5548 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
5549 rte_be_to_cpu_16(udp_m->hdr.dst_port));
5550 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
5551 rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
5555 * Add GRE optional Key item to matcher and to the value.
5557 * @param[in, out] matcher
5559 * @param[in, out] key
5560 * Flow matcher value.
5562 * Flow pattern to translate.
5564 * Item is inner pattern.
5567 flow_dv_translate_item_gre_key(void *matcher, void *key,
5568 const struct rte_flow_item *item)
5570 const rte_be32_t *key_m = item->mask;
5571 const rte_be32_t *key_v = item->spec;
5572 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5573 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5574 rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
5579 key_m = &gre_key_default_mask;
5580 /* GRE K bit must be on and should already be validated */
5581 MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
5582 MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
5583 MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
5584 rte_be_to_cpu_32(*key_m) >> 8);
5585 MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
5586 rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
5587 MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
5588 rte_be_to_cpu_32(*key_m) & 0xFF);
5589 MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
5590 rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
5594 * Add GRE item to matcher and to the value.
5596 * @param[in, out] matcher
5598 * @param[in, out] key
5599 * Flow matcher value.
5601 * Flow pattern to translate.
5603 * Item is inner pattern.
5606 flow_dv_translate_item_gre(void *matcher, void *key,
5607 const struct rte_flow_item *item,
5610 const struct rte_flow_item_gre *gre_m = item->mask;
5611 const struct rte_flow_item_gre *gre_v = item->spec;
5614 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5615 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5622 uint16_t s_present:1;
5623 uint16_t k_present:1;
5624 uint16_t rsvd_bit1:1;
5625 uint16_t c_present:1;
5629 } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
5632 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5634 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5636 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5638 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5640 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
5641 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
5645 gre_m = &rte_flow_item_gre_mask;
5646 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
5647 rte_be_to_cpu_16(gre_m->protocol));
5648 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
5649 rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
5650 gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
5651 gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
5652 MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
5653 gre_crks_rsvd0_ver_m.c_present);
5654 MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
5655 gre_crks_rsvd0_ver_v.c_present &
5656 gre_crks_rsvd0_ver_m.c_present);
5657 MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
5658 gre_crks_rsvd0_ver_m.k_present);
5659 MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
5660 gre_crks_rsvd0_ver_v.k_present &
5661 gre_crks_rsvd0_ver_m.k_present);
5662 MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
5663 gre_crks_rsvd0_ver_m.s_present);
5664 MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
5665 gre_crks_rsvd0_ver_v.s_present &
5666 gre_crks_rsvd0_ver_m.s_present);
5670 * Add NVGRE item to matcher and to the value.
5672 * @param[in, out] matcher
5674 * @param[in, out] key
5675 * Flow matcher value.
5677 * Flow pattern to translate.
5679 * Item is inner pattern.
5682 flow_dv_translate_item_nvgre(void *matcher, void *key,
5683 const struct rte_flow_item *item,
5686 const struct rte_flow_item_nvgre *nvgre_m = item->mask;
5687 const struct rte_flow_item_nvgre *nvgre_v = item->spec;
5688 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5689 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5690 const char *tni_flow_id_m = (const char *)nvgre_m->tni;
5691 const char *tni_flow_id_v = (const char *)nvgre_v->tni;
5697 /* For NVGRE, GRE header fields must be set with defined values. */
5698 const struct rte_flow_item_gre gre_spec = {
5699 .c_rsvd0_ver = RTE_BE16(0x2000),
5700 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
5702 const struct rte_flow_item_gre gre_mask = {
5703 .c_rsvd0_ver = RTE_BE16(0xB000),
5704 .protocol = RTE_BE16(UINT16_MAX),
5706 const struct rte_flow_item gre_item = {
5711 flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
5715 nvgre_m = &rte_flow_item_nvgre_mask;
5716 size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
5717 gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
5718 gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
5719 memcpy(gre_key_m, tni_flow_id_m, size);
5720 for (i = 0; i < size; ++i)
5721 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
5725 * Add VXLAN item to matcher and to the value.
5727 * @param[in, out] matcher
5729 * @param[in, out] key
5730 * Flow matcher value.
5732 * Flow pattern to translate.
5734 * Item is inner pattern.
5737 flow_dv_translate_item_vxlan(void *matcher, void *key,
5738 const struct rte_flow_item *item,
5741 const struct rte_flow_item_vxlan *vxlan_m = item->mask;
5742 const struct rte_flow_item_vxlan *vxlan_v = item->spec;
5745 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5746 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5754 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5756 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5758 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5760 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5762 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
5763 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
5764 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
5765 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
5766 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
5771 vxlan_m = &rte_flow_item_vxlan_mask;
5772 size = sizeof(vxlan_m->vni);
5773 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
5774 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
5775 memcpy(vni_m, vxlan_m->vni, size);
5776 for (i = 0; i < size; ++i)
5777 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
5781 * Add Geneve item to matcher and to the value.
5783 * @param[in, out] matcher
5785 * @param[in, out] key
5786 * Flow matcher value.
5788 * Flow pattern to translate.
5790 * Item is inner pattern.
5794 flow_dv_translate_item_geneve(void *matcher, void *key,
5795 const struct rte_flow_item *item, int inner)
5797 const struct rte_flow_item_geneve *geneve_m = item->mask;
5798 const struct rte_flow_item_geneve *geneve_v = item->spec;
5801 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5802 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5811 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5813 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5815 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5817 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5819 dport = MLX5_UDP_PORT_GENEVE;
5820 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
5821 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
5822 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
5827 geneve_m = &rte_flow_item_geneve_mask;
5828 size = sizeof(geneve_m->vni);
5829 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
5830 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
5831 memcpy(vni_m, geneve_m->vni, size);
5832 for (i = 0; i < size; ++i)
5833 vni_v[i] = vni_m[i] & geneve_v->vni[i];
5834 MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
5835 rte_be_to_cpu_16(geneve_m->protocol));
5836 MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
5837 rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
5838 gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
5839 gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
5840 MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
5841 MLX5_GENEVE_OAMF_VAL(gbhdr_m));
5842 MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
5843 MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
5844 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
5845 MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
5846 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
5847 MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
5848 MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
5852 * Add MPLS item to matcher and to the value.
5854 * @param[in, out] matcher
5856 * @param[in, out] key
5857 * Flow matcher value.
5859 * Flow pattern to translate.
5860 * @param[in] prev_layer
5861 * The protocol layer indicated in previous item.
5863 * Item is inner pattern.
5866 flow_dv_translate_item_mpls(void *matcher, void *key,
5867 const struct rte_flow_item *item,
5868 uint64_t prev_layer,
5871 const uint32_t *in_mpls_m = item->mask;
5872 const uint32_t *in_mpls_v = item->spec;
5873 uint32_t *out_mpls_m = 0;
5874 uint32_t *out_mpls_v = 0;
5875 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5876 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5877 void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
5879 void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
5880 void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
5881 void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5883 switch (prev_layer) {
5884 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
5885 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
5886 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
5887 MLX5_UDP_PORT_MPLS);
5889 case MLX5_FLOW_LAYER_GRE:
5890 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
5891 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
5892 RTE_ETHER_TYPE_MPLS);
5895 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
5896 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
5903 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
5904 switch (prev_layer) {
5905 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
5907 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
5908 outer_first_mpls_over_udp);
5910 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
5911 outer_first_mpls_over_udp);
5913 case MLX5_FLOW_LAYER_GRE:
5915 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
5916 outer_first_mpls_over_gre);
5918 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
5919 outer_first_mpls_over_gre);
5922 /* Inner MPLS not over GRE is not supported. */
5925 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
5929 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
5935 if (out_mpls_m && out_mpls_v) {
5936 *out_mpls_m = *in_mpls_m;
5937 *out_mpls_v = *in_mpls_v & *in_mpls_m;
5942 * Add metadata register item to matcher
5944 * @param[in, out] matcher
5946 * @param[in, out] key
5947 * Flow matcher value.
5948 * @param[in] reg_type
5949 * Type of device metadata register
5956 flow_dv_match_meta_reg(void *matcher, void *key,
5957 enum modify_reg reg_type,
5958 uint32_t data, uint32_t mask)
5961 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
5963 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
5969 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
5970 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
5973 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
5974 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
5978 * The metadata register C0 field might be divided into
5979 * source vport index and META item value, we should set
5980 * this field according to specified mask, not as whole one.
5982 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
5984 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
5985 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
5988 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
5991 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
5992 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
5995 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
5996 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
5999 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
6000 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
6003 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
6004 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
6007 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
6008 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
6011 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
6012 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
6015 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
6016 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
6025 * Add MARK item to matcher
6028 * The device to configure through.
6029 * @param[in, out] matcher
6031 * @param[in, out] key
6032 * Flow matcher value.
6034 * Flow pattern to translate.
6037 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
6038 void *matcher, void *key,
6039 const struct rte_flow_item *item)
6041 struct mlx5_priv *priv = dev->data->dev_private;
6042 const struct rte_flow_item_mark *mark;
6046 mark = item->mask ? (const void *)item->mask :
6047 &rte_flow_item_mark_mask;
6048 mask = mark->id & priv->sh->dv_mark_mask;
6049 mark = (const void *)item->spec;
6051 value = mark->id & priv->sh->dv_mark_mask & mask;
6053 enum modify_reg reg;
6055 /* Get the metadata register index for the mark. */
6056 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
6058 if (reg == REG_C_0) {
6059 struct mlx5_priv *priv = dev->data->dev_private;
6060 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
6061 uint32_t shl_c0 = rte_bsf32(msk_c0);
6067 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
6072 * Add META item to matcher
6075 * The devich to configure through.
6076 * @param[in, out] matcher
6078 * @param[in, out] key
6079 * Flow matcher value.
6081 * Attributes of flow that includes this item.
6083 * Flow pattern to translate.
6086 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
6087 void *matcher, void *key,
6088 const struct rte_flow_attr *attr,
6089 const struct rte_flow_item *item)
6091 const struct rte_flow_item_meta *meta_m;
6092 const struct rte_flow_item_meta *meta_v;
6094 meta_m = (const void *)item->mask;
6096 meta_m = &rte_flow_item_meta_mask;
6097 meta_v = (const void *)item->spec;
6099 enum modify_reg reg;
6100 uint32_t value = meta_v->data;
6101 uint32_t mask = meta_m->data;
6103 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
6107 * In datapath code there is no endianness
6108 * coversions for perfromance reasons, all
6109 * pattern conversions are done in rte_flow.
6111 value = rte_cpu_to_be_32(value);
6112 mask = rte_cpu_to_be_32(mask);
6113 if (reg == REG_C_0) {
6114 struct mlx5_priv *priv = dev->data->dev_private;
6115 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
6116 uint32_t shl_c0 = rte_bsf32(msk_c0);
6117 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
6118 uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
6126 assert(!(~msk_c0 & mask));
6128 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
6133 * Add vport metadata Reg C0 item to matcher
6135 * @param[in, out] matcher
6137 * @param[in, out] key
6138 * Flow matcher value.
6140 * Flow pattern to translate.
6143 flow_dv_translate_item_meta_vport(void *matcher, void *key,
6144 uint32_t value, uint32_t mask)
6146 flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
6150 * Add tag item to matcher
6153 * The devich to configure through.
6154 * @param[in, out] matcher
6156 * @param[in, out] key
6157 * Flow matcher value.
6159 * Flow pattern to translate.
6162 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
6163 void *matcher, void *key,
6164 const struct rte_flow_item *item)
6166 const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
6167 const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
6168 uint32_t mask, value;
6171 value = tag_v->data;
6172 mask = tag_m ? tag_m->data : UINT32_MAX;
6173 if (tag_v->id == REG_C_0) {
6174 struct mlx5_priv *priv = dev->data->dev_private;
6175 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
6176 uint32_t shl_c0 = rte_bsf32(msk_c0);
6182 flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
6186 * Add TAG item to matcher
6189 * The devich to configure through.
6190 * @param[in, out] matcher
6192 * @param[in, out] key
6193 * Flow matcher value.
6195 * Flow pattern to translate.
6198 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
6199 void *matcher, void *key,
6200 const struct rte_flow_item *item)
6202 const struct rte_flow_item_tag *tag_v = item->spec;
6203 const struct rte_flow_item_tag *tag_m = item->mask;
6204 enum modify_reg reg;
6207 tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
6208 /* Get the metadata register index for the tag. */
6209 reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
6211 flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
6215 * Add source vport match to the specified matcher.
6217 * @param[in, out] matcher
6219 * @param[in, out] key
6220 * Flow matcher value.
6222 * Source vport value to match
6227 flow_dv_translate_item_source_vport(void *matcher, void *key,
6228 int16_t port, uint16_t mask)
6230 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6231 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6233 MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
6234 MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
6238 * Translate port-id item to eswitch match on port-id.
6241 * The devich to configure through.
6242 * @param[in, out] matcher
6244 * @param[in, out] key
6245 * Flow matcher value.
6247 * Flow pattern to translate.
6250 * 0 on success, a negative errno value otherwise.
6253 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
6254 void *key, const struct rte_flow_item *item)
6256 const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
6257 const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
6258 struct mlx5_priv *priv;
6261 mask = pid_m ? pid_m->id : 0xffff;
6262 id = pid_v ? pid_v->id : dev->data->port_id;
6263 priv = mlx5_port_to_eswitch_info(id, item == NULL);
6266 /* Translate to vport field or to metadata, depending on mode. */
6267 if (priv->vport_meta_mask)
6268 flow_dv_translate_item_meta_vport(matcher, key,
6269 priv->vport_meta_tag,
6270 priv->vport_meta_mask);
6272 flow_dv_translate_item_source_vport(matcher, key,
6273 priv->vport_id, mask);
6278 * Add ICMP6 item to matcher and to the value.
6280 * @param[in, out] matcher
6282 * @param[in, out] key
6283 * Flow matcher value.
6285 * Flow pattern to translate.
6287 * Item is inner pattern.
6290 flow_dv_translate_item_icmp6(void *matcher, void *key,
6291 const struct rte_flow_item *item,
6294 const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
6295 const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
6298 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
6300 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
6302 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6304 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6306 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6308 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6310 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
6311 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
6315 icmp6_m = &rte_flow_item_icmp6_mask;
6316 MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
6317 MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
6318 icmp6_v->type & icmp6_m->type);
6319 MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
6320 MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
6321 icmp6_v->code & icmp6_m->code);
6325 * Add ICMP item to matcher and to the value.
6327 * @param[in, out] matcher
6329 * @param[in, out] key
6330 * Flow matcher value.
6332 * Flow pattern to translate.
6334 * Item is inner pattern.
6337 flow_dv_translate_item_icmp(void *matcher, void *key,
6338 const struct rte_flow_item *item,
6341 const struct rte_flow_item_icmp *icmp_m = item->mask;
6342 const struct rte_flow_item_icmp *icmp_v = item->spec;
6345 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
6347 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
6349 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6351 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6353 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6355 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6357 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
6358 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
6362 icmp_m = &rte_flow_item_icmp_mask;
6363 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
6364 icmp_m->hdr.icmp_type);
6365 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
6366 icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
6367 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
6368 icmp_m->hdr.icmp_code);
6369 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
6370 icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
6374 * Add GTP item to matcher and to the value.
6376 * @param[in, out] matcher
6378 * @param[in, out] key
6379 * Flow matcher value.
6381 * Flow pattern to translate.
6383 * Item is inner pattern.
6386 flow_dv_translate_item_gtp(void *matcher, void *key,
6387 const struct rte_flow_item *item, int inner)
6389 const struct rte_flow_item_gtp *gtp_m = item->mask;
6390 const struct rte_flow_item_gtp *gtp_v = item->spec;
6393 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
6395 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
6396 uint16_t dport = RTE_GTPU_UDP_PORT;
6399 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6401 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6403 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6405 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6407 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
6408 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
6409 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
6414 gtp_m = &rte_flow_item_gtp_mask;
6415 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
6416 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
6417 gtp_v->msg_type & gtp_m->msg_type);
6418 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
6419 rte_be_to_cpu_32(gtp_m->teid));
6420 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
6421 rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
6424 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
6426 #define HEADER_IS_ZERO(match_criteria, headers) \
6427 !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
6428 matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
6431 * Calculate flow matcher enable bitmap.
6433 * @param match_criteria
6434 * Pointer to flow matcher criteria.
6437 * Bitmap of enabled fields.
6440 flow_dv_matcher_enable(uint32_t *match_criteria)
6442 uint8_t match_criteria_enable;
6444 match_criteria_enable =
6445 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
6446 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
6447 match_criteria_enable |=
6448 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
6449 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
6450 match_criteria_enable |=
6451 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
6452 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
6453 match_criteria_enable |=
6454 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
6455 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
6456 match_criteria_enable |=
6457 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
6458 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
6459 return match_criteria_enable;
6466 * @param[in, out] dev
6467 * Pointer to rte_eth_dev structure.
6468 * @param[in] table_id
6471 * Direction of the table.
6472 * @param[in] transfer
6473 * E-Switch or NIC flow.
6475 * pointer to error structure.
6478 * Returns tables resource based on the index, NULL in case of failed.
6480 static struct mlx5_flow_tbl_resource *
6481 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
6482 uint32_t table_id, uint8_t egress,
6484 struct rte_flow_error *error)
6486 struct mlx5_priv *priv = dev->data->dev_private;
6487 struct mlx5_ibv_shared *sh = priv->sh;
6488 struct mlx5_flow_tbl_resource *tbl;
6489 union mlx5_flow_tbl_key table_key = {
6491 .table_id = table_id,
6493 .domain = !!transfer,
6494 .direction = !!egress,
6497 struct mlx5_hlist_entry *pos = mlx5_hlist_lookup(sh->flow_tbls,
6499 struct mlx5_flow_tbl_data_entry *tbl_data;
6504 tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
6506 tbl = &tbl_data->tbl;
6507 rte_atomic32_inc(&tbl->refcnt);
6510 tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0);
6512 rte_flow_error_set(error, ENOMEM,
6513 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6515 "cannot allocate flow table data entry");
6518 tbl = &tbl_data->tbl;
6519 pos = &tbl_data->entry;
6521 domain = sh->fdb_domain;
6523 domain = sh->tx_domain;
6525 domain = sh->rx_domain;
6526 tbl->obj = mlx5_glue->dr_create_flow_tbl(domain, table_id);
6528 rte_flow_error_set(error, ENOMEM,
6529 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6530 NULL, "cannot create flow table object");
6535 * No multi-threads now, but still better to initialize the reference
6536 * count before insert it into the hash list.
6538 rte_atomic32_init(&tbl->refcnt);
6539 /* Jump action reference count is initialized here. */
6540 rte_atomic32_init(&tbl_data->jump.refcnt);
6541 pos->key = table_key.v64;
6542 ret = mlx5_hlist_insert(sh->flow_tbls, pos);
6544 rte_flow_error_set(error, -ret,
6545 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6546 "cannot insert flow table data entry");
6547 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
6550 rte_atomic32_inc(&tbl->refcnt);
6555 * Release a flow table.
6558 * Pointer to rte_eth_dev structure.
6560 * Table resource to be released.
6563 * Returns 0 if table was released, else return 1;
6566 flow_dv_tbl_resource_release(struct rte_eth_dev *dev,
6567 struct mlx5_flow_tbl_resource *tbl)
6569 struct mlx5_priv *priv = dev->data->dev_private;
6570 struct mlx5_ibv_shared *sh = priv->sh;
6571 struct mlx5_flow_tbl_data_entry *tbl_data =
6572 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
6576 if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
6577 struct mlx5_hlist_entry *pos = &tbl_data->entry;
6579 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
6581 /* remove the entry from the hash list and free memory. */
6582 mlx5_hlist_remove(sh->flow_tbls, pos);
6590 * Register the flow matcher.
6592 * @param[in, out] dev
6593 * Pointer to rte_eth_dev structure.
6594 * @param[in, out] matcher
6595 * Pointer to flow matcher.
6596 * @param[in, out] key
6597 * Pointer to flow table key.
6598 * @parm[in, out] dev_flow
6599 * Pointer to the dev_flow.
6601 * pointer to error structure.
6604 * 0 on success otherwise -errno and errno is set.
6607 flow_dv_matcher_register(struct rte_eth_dev *dev,
6608 struct mlx5_flow_dv_matcher *matcher,
6609 union mlx5_flow_tbl_key *key,
6610 struct mlx5_flow *dev_flow,
6611 struct rte_flow_error *error)
6613 struct mlx5_priv *priv = dev->data->dev_private;
6614 struct mlx5_ibv_shared *sh = priv->sh;
6615 struct mlx5_flow_dv_matcher *cache_matcher;
6616 struct mlx5dv_flow_matcher_attr dv_attr = {
6617 .type = IBV_FLOW_ATTR_NORMAL,
6618 .match_mask = (void *)&matcher->mask,
6620 struct mlx5_flow_tbl_resource *tbl;
6621 struct mlx5_flow_tbl_data_entry *tbl_data;
6623 tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
6624 key->domain, error);
6626 return -rte_errno; /* No need to refill the error info */
6627 tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
6628 /* Lookup from cache. */
6629 LIST_FOREACH(cache_matcher, &tbl_data->matchers, next) {
6630 if (matcher->crc == cache_matcher->crc &&
6631 matcher->priority == cache_matcher->priority &&
6632 !memcmp((const void *)matcher->mask.buf,
6633 (const void *)cache_matcher->mask.buf,
6634 cache_matcher->mask.size)) {
6636 "%s group %u priority %hd use %s "
6637 "matcher %p: refcnt %d++",
6638 key->domain ? "FDB" : "NIC", key->table_id,
6639 cache_matcher->priority,
6640 key->direction ? "tx" : "rx",
6641 (void *)cache_matcher,
6642 rte_atomic32_read(&cache_matcher->refcnt));
6643 rte_atomic32_inc(&cache_matcher->refcnt);
6644 dev_flow->dv.matcher = cache_matcher;
6645 /* old matcher should not make the table ref++. */
6646 flow_dv_tbl_resource_release(dev, tbl);
6650 /* Register new matcher. */
6651 cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
6652 if (!cache_matcher) {
6653 flow_dv_tbl_resource_release(dev, tbl);
6654 return rte_flow_error_set(error, ENOMEM,
6655 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6656 "cannot allocate matcher memory");
6658 *cache_matcher = *matcher;
6659 dv_attr.match_criteria_enable =
6660 flow_dv_matcher_enable(cache_matcher->mask.buf);
6661 dv_attr.priority = matcher->priority;
6663 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
6664 cache_matcher->matcher_object =
6665 mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
6666 if (!cache_matcher->matcher_object) {
6667 rte_free(cache_matcher);
6668 #ifdef HAVE_MLX5DV_DR
6669 flow_dv_tbl_resource_release(dev, tbl);
6671 return rte_flow_error_set(error, ENOMEM,
6672 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6673 NULL, "cannot create matcher");
6675 /* Save the table information */
6676 cache_matcher->tbl = tbl;
6677 rte_atomic32_init(&cache_matcher->refcnt);
6678 /* only matcher ref++, table ref++ already done above in get API. */
6679 rte_atomic32_inc(&cache_matcher->refcnt);
6680 LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next);
6681 dev_flow->dv.matcher = cache_matcher;
6682 DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d",
6683 key->domain ? "FDB" : "NIC", key->table_id,
6684 cache_matcher->priority,
6685 key->direction ? "tx" : "rx", (void *)cache_matcher,
6686 rte_atomic32_read(&cache_matcher->refcnt));
6691 * Find existing tag resource or create and register a new one.
6693 * @param dev[in, out]
6694 * Pointer to rte_eth_dev structure.
6695 * @param[in, out] tag_be24
6696 * Tag value in big endian then R-shift 8.
6697 * @parm[in, out] dev_flow
6698 * Pointer to the dev_flow.
6700 * pointer to error structure.
6703 * 0 on success otherwise -errno and errno is set.
6706 flow_dv_tag_resource_register
6707 (struct rte_eth_dev *dev,
6709 struct mlx5_flow *dev_flow,
6710 struct rte_flow_error *error)
6712 struct mlx5_priv *priv = dev->data->dev_private;
6713 struct mlx5_ibv_shared *sh = priv->sh;
6714 struct mlx5_flow_dv_tag_resource *cache_resource;
6715 struct mlx5_hlist_entry *entry;
6717 /* Lookup a matching resource from cache. */
6718 entry = mlx5_hlist_lookup(sh->tag_table, (uint64_t)tag_be24);
6720 cache_resource = container_of
6721 (entry, struct mlx5_flow_dv_tag_resource, entry);
6722 rte_atomic32_inc(&cache_resource->refcnt);
6723 dev_flow->dv.tag_resource = cache_resource;
6724 DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
6725 (void *)cache_resource,
6726 rte_atomic32_read(&cache_resource->refcnt));
6729 /* Register new resource. */
6730 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
6731 if (!cache_resource)
6732 return rte_flow_error_set(error, ENOMEM,
6733 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6734 "cannot allocate resource memory");
6735 cache_resource->entry.key = (uint64_t)tag_be24;
6736 cache_resource->action = mlx5_glue->dv_create_flow_action_tag(tag_be24);
6737 if (!cache_resource->action) {
6738 rte_free(cache_resource);
6739 return rte_flow_error_set(error, ENOMEM,
6740 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6741 NULL, "cannot create action");
6743 rte_atomic32_init(&cache_resource->refcnt);
6744 rte_atomic32_inc(&cache_resource->refcnt);
6745 if (mlx5_hlist_insert(sh->tag_table, &cache_resource->entry)) {
6746 mlx5_glue->destroy_flow_action(cache_resource->action);
6747 rte_free(cache_resource);
6748 return rte_flow_error_set(error, EEXIST,
6749 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6750 NULL, "cannot insert tag");
6752 dev_flow->dv.tag_resource = cache_resource;
6753 DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++",
6754 (void *)cache_resource,
6755 rte_atomic32_read(&cache_resource->refcnt));
6763 * Pointer to Ethernet device.
6765 * Pointer to mlx5_flow.
6768 * 1 while a reference on it exists, 0 when freed.
6771 flow_dv_tag_release(struct rte_eth_dev *dev,
6772 struct mlx5_flow_dv_tag_resource *tag)
6774 struct mlx5_priv *priv = dev->data->dev_private;
6775 struct mlx5_ibv_shared *sh = priv->sh;
6778 DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
6779 dev->data->port_id, (void *)tag,
6780 rte_atomic32_read(&tag->refcnt));
6781 if (rte_atomic32_dec_and_test(&tag->refcnt)) {
6782 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
6783 mlx5_hlist_remove(sh->tag_table, &tag->entry);
6784 DRV_LOG(DEBUG, "port %u tag %p: removed",
6785 dev->data->port_id, (void *)tag);
6793 * Translate port ID action to vport.
6796 * Pointer to rte_eth_dev structure.
6798 * Pointer to the port ID action.
6799 * @param[out] dst_port_id
6800 * The target port ID.
6802 * Pointer to the error structure.
6805 * 0 on success, a negative errno value otherwise and rte_errno is set.
6808 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
6809 const struct rte_flow_action *action,
6810 uint32_t *dst_port_id,
6811 struct rte_flow_error *error)
6814 struct mlx5_priv *priv;
6815 const struct rte_flow_action_port_id *conf =
6816 (const struct rte_flow_action_port_id *)action->conf;
6818 port = conf->original ? dev->data->port_id : conf->id;
6819 priv = mlx5_port_to_eswitch_info(port, false);
6821 return rte_flow_error_set(error, -rte_errno,
6822 RTE_FLOW_ERROR_TYPE_ACTION,
6824 "No eswitch info was found for port");
6825 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
6827 * This parameter is transferred to
6828 * mlx5dv_dr_action_create_dest_ib_port().
6830 *dst_port_id = priv->ibv_port;
6833 * Legacy mode, no LAG configurations is supported.
6834 * This parameter is transferred to
6835 * mlx5dv_dr_action_create_dest_vport().
6837 *dst_port_id = priv->vport_id;
6843 * Add Tx queue matcher
6846 * Pointer to the dev struct.
6847 * @param[in, out] matcher
6849 * @param[in, out] key
6850 * Flow matcher value.
6852 * Flow pattern to translate.
6854 * Item is inner pattern.
6857 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
6858 void *matcher, void *key,
6859 const struct rte_flow_item *item)
6861 const struct mlx5_rte_flow_item_tx_queue *queue_m;
6862 const struct mlx5_rte_flow_item_tx_queue *queue_v;
6864 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6866 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6867 struct mlx5_txq_ctrl *txq;
6871 queue_m = (const void *)item->mask;
6874 queue_v = (const void *)item->spec;
6877 txq = mlx5_txq_get(dev, queue_v->queue);
6880 queue = txq->obj->sq->id;
6881 MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
6882 MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
6883 queue & queue_m->queue);
6884 mlx5_txq_release(dev, queue_v->queue);
6888 * Set the hash fields according to the @p flow information.
6890 * @param[in] dev_flow
6891 * Pointer to the mlx5_flow.
6894 flow_dv_hashfields_set(struct mlx5_flow *dev_flow)
6896 struct rte_flow *flow = dev_flow->flow;
6897 uint64_t items = dev_flow->layers;
6899 uint64_t rss_types = rte_eth_rss_hf_refine(flow->rss.types);
6901 dev_flow->hash_fields = 0;
6902 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
6903 if (flow->rss.level >= 2) {
6904 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
6908 if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
6909 (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
6910 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
6911 if (rss_types & ETH_RSS_L3_SRC_ONLY)
6912 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
6913 else if (rss_types & ETH_RSS_L3_DST_ONLY)
6914 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
6916 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
6918 } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
6919 (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
6920 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
6921 if (rss_types & ETH_RSS_L3_SRC_ONLY)
6922 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
6923 else if (rss_types & ETH_RSS_L3_DST_ONLY)
6924 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
6926 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
6929 if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
6930 (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
6931 if (rss_types & ETH_RSS_UDP) {
6932 if (rss_types & ETH_RSS_L4_SRC_ONLY)
6933 dev_flow->hash_fields |=
6934 IBV_RX_HASH_SRC_PORT_UDP;
6935 else if (rss_types & ETH_RSS_L4_DST_ONLY)
6936 dev_flow->hash_fields |=
6937 IBV_RX_HASH_DST_PORT_UDP;
6939 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
6941 } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
6942 (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
6943 if (rss_types & ETH_RSS_TCP) {
6944 if (rss_types & ETH_RSS_L4_SRC_ONLY)
6945 dev_flow->hash_fields |=
6946 IBV_RX_HASH_SRC_PORT_TCP;
6947 else if (rss_types & ETH_RSS_L4_DST_ONLY)
6948 dev_flow->hash_fields |=
6949 IBV_RX_HASH_DST_PORT_TCP;
6951 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
6957 * Fill the flow with DV spec, lock free
6958 * (mutex should be acquired by caller).
6961 * Pointer to rte_eth_dev structure.
6962 * @param[in, out] dev_flow
6963 * Pointer to the sub flow.
6965 * Pointer to the flow attributes.
6967 * Pointer to the list of items.
6968 * @param[in] actions
6969 * Pointer to the list of actions.
6971 * Pointer to the error structure.
6974 * 0 on success, a negative errno value otherwise and rte_errno is set.
6977 __flow_dv_translate(struct rte_eth_dev *dev,
6978 struct mlx5_flow *dev_flow,
6979 const struct rte_flow_attr *attr,
6980 const struct rte_flow_item items[],
6981 const struct rte_flow_action actions[],
6982 struct rte_flow_error *error)
6984 struct mlx5_priv *priv = dev->data->dev_private;
6985 struct mlx5_dev_config *dev_conf = &priv->config;
6986 struct rte_flow *flow = dev_flow->flow;
6987 uint64_t item_flags = 0;
6988 uint64_t last_item = 0;
6989 uint64_t action_flags = 0;
6990 uint64_t priority = attr->priority;
6991 struct mlx5_flow_dv_matcher matcher = {
6993 .size = sizeof(matcher.mask.buf),
6997 bool actions_end = false;
6999 struct mlx5_flow_dv_modify_hdr_resource res;
7000 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
7001 sizeof(struct mlx5_modification_cmd) *
7002 (MLX5_MAX_MODIFY_NUM + 1)];
7004 struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
7005 union flow_dv_attr flow_attr = { .attr = 0 };
7007 union mlx5_flow_tbl_key tbl_key;
7008 uint32_t modify_action_position = UINT32_MAX;
7009 void *match_mask = matcher.mask.buf;
7010 void *match_value = dev_flow->dv.value.buf;
7011 uint8_t next_protocol = 0xff;
7012 struct rte_vlan_hdr vlan = { 0 };
7016 mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
7017 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
7018 ret = mlx5_flow_group_to_table(attr, dev_flow->external, attr->group,
7022 dev_flow->group = table;
7024 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
7025 if (priority == MLX5_FLOW_PRIO_RSVD)
7026 priority = dev_conf->flow_prio - 1;
7027 /* number of actions must be set to 0 in case of dirty stack. */
7028 mhdr_res->actions_num = 0;
7029 for (; !actions_end ; actions++) {
7030 const struct rte_flow_action_queue *queue;
7031 const struct rte_flow_action_rss *rss;
7032 const struct rte_flow_action *action = actions;
7033 const struct rte_flow_action_count *count = action->conf;
7034 const uint8_t *rss_key;
7035 const struct rte_flow_action_jump *jump_data;
7036 const struct rte_flow_action_meter *mtr;
7037 struct mlx5_flow_tbl_resource *tbl;
7038 uint32_t port_id = 0;
7039 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
7040 int action_type = actions->type;
7041 const struct rte_flow_action *found_action = NULL;
7043 switch (action_type) {
7044 case RTE_FLOW_ACTION_TYPE_VOID:
7046 case RTE_FLOW_ACTION_TYPE_PORT_ID:
7047 if (flow_dv_translate_action_port_id(dev, action,
7050 port_id_resource.port_id = port_id;
7051 if (flow_dv_port_id_action_resource_register
7052 (dev, &port_id_resource, dev_flow, error))
7054 dev_flow->dv.actions[actions_n++] =
7055 dev_flow->dv.port_id_action->action;
7056 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
7058 case RTE_FLOW_ACTION_TYPE_FLAG:
7059 action_flags |= MLX5_FLOW_ACTION_FLAG;
7060 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7061 struct rte_flow_action_mark mark = {
7062 .id = MLX5_FLOW_MARK_DEFAULT,
7065 if (flow_dv_convert_action_mark(dev, &mark,
7069 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
7072 tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
7073 if (!dev_flow->dv.tag_resource)
7074 if (flow_dv_tag_resource_register
7075 (dev, tag_be, dev_flow, error))
7077 dev_flow->dv.actions[actions_n++] =
7078 dev_flow->dv.tag_resource->action;
7080 case RTE_FLOW_ACTION_TYPE_MARK:
7081 action_flags |= MLX5_FLOW_ACTION_MARK;
7082 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7083 const struct rte_flow_action_mark *mark =
7084 (const struct rte_flow_action_mark *)
7087 if (flow_dv_convert_action_mark(dev, mark,
7091 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
7095 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
7096 /* Legacy (non-extensive) MARK action. */
7097 tag_be = mlx5_flow_mark_set
7098 (((const struct rte_flow_action_mark *)
7099 (actions->conf))->id);
7100 if (!dev_flow->dv.tag_resource)
7101 if (flow_dv_tag_resource_register
7102 (dev, tag_be, dev_flow, error))
7104 dev_flow->dv.actions[actions_n++] =
7105 dev_flow->dv.tag_resource->action;
7107 case RTE_FLOW_ACTION_TYPE_SET_META:
7108 if (flow_dv_convert_action_set_meta
7109 (dev, mhdr_res, attr,
7110 (const struct rte_flow_action_set_meta *)
7111 actions->conf, error))
7113 action_flags |= MLX5_FLOW_ACTION_SET_META;
7115 case RTE_FLOW_ACTION_TYPE_SET_TAG:
7116 if (flow_dv_convert_action_set_tag
7118 (const struct rte_flow_action_set_tag *)
7119 actions->conf, error))
7121 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7123 case RTE_FLOW_ACTION_TYPE_DROP:
7124 action_flags |= MLX5_FLOW_ACTION_DROP;
7126 case RTE_FLOW_ACTION_TYPE_QUEUE:
7127 assert(flow->rss.queue);
7128 queue = actions->conf;
7129 flow->rss.queue_num = 1;
7130 (*flow->rss.queue)[0] = queue->index;
7131 action_flags |= MLX5_FLOW_ACTION_QUEUE;
7133 case RTE_FLOW_ACTION_TYPE_RSS:
7134 assert(flow->rss.queue);
7135 rss = actions->conf;
7136 if (flow->rss.queue)
7137 memcpy((*flow->rss.queue), rss->queue,
7138 rss->queue_num * sizeof(uint16_t));
7139 flow->rss.queue_num = rss->queue_num;
7140 /* NULL RSS key indicates default RSS key. */
7141 rss_key = !rss->key ? rss_hash_default_key : rss->key;
7142 memcpy(flow->rss.key, rss_key, MLX5_RSS_HASH_KEY_LEN);
7144 * rss->level and rss.types should be set in advance
7145 * when expanding items for RSS.
7147 action_flags |= MLX5_FLOW_ACTION_RSS;
7149 case RTE_FLOW_ACTION_TYPE_COUNT:
7150 if (!dev_conf->devx) {
7151 rte_errno = ENOTSUP;
7154 flow->counter = flow_dv_counter_alloc(dev,
7158 if (flow->counter == NULL)
7160 dev_flow->dv.actions[actions_n++] =
7161 flow->counter->action;
7162 action_flags |= MLX5_FLOW_ACTION_COUNT;
7165 if (rte_errno == ENOTSUP)
7166 return rte_flow_error_set
7168 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7170 "count action not supported");
7172 return rte_flow_error_set
7174 RTE_FLOW_ERROR_TYPE_ACTION,
7176 "cannot create counter"
7179 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
7180 dev_flow->dv.actions[actions_n++] =
7181 priv->sh->pop_vlan_action;
7182 action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
7184 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7185 flow_dev_get_vlan_info_from_items(items, &vlan);
7186 vlan.eth_proto = rte_be_to_cpu_16
7187 ((((const struct rte_flow_action_of_push_vlan *)
7188 actions->conf)->ethertype));
7189 found_action = mlx5_flow_find_action
7191 RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
7193 mlx5_update_vlan_vid_pcp(found_action, &vlan);
7194 found_action = mlx5_flow_find_action
7196 RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
7198 mlx5_update_vlan_vid_pcp(found_action, &vlan);
7199 if (flow_dv_create_action_push_vlan
7200 (dev, attr, &vlan, dev_flow, error))
7202 dev_flow->dv.actions[actions_n++] =
7203 dev_flow->dv.push_vlan_res->action;
7204 action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
7206 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
7207 /* of_vlan_push action handled this action */
7208 assert(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN);
7210 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7211 if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7213 flow_dev_get_vlan_info_from_items(items, &vlan);
7214 mlx5_update_vlan_vid_pcp(actions, &vlan);
7215 /* If no VLAN push - this is a modify header action */
7216 if (flow_dv_convert_action_modify_vlan_vid
7217 (mhdr_res, actions, error))
7219 action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
7221 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7222 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7223 if (flow_dv_create_action_l2_encap(dev, actions,
7228 dev_flow->dv.actions[actions_n++] =
7229 dev_flow->dv.encap_decap->verbs_action;
7230 action_flags |= actions->type ==
7231 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
7232 MLX5_FLOW_ACTION_VXLAN_ENCAP :
7233 MLX5_FLOW_ACTION_NVGRE_ENCAP;
7235 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7236 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7237 if (flow_dv_create_action_l2_decap(dev, dev_flow,
7241 dev_flow->dv.actions[actions_n++] =
7242 dev_flow->dv.encap_decap->verbs_action;
7243 action_flags |= actions->type ==
7244 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
7245 MLX5_FLOW_ACTION_VXLAN_DECAP :
7246 MLX5_FLOW_ACTION_NVGRE_DECAP;
7248 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7249 /* Handle encap with preceding decap. */
7250 if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
7251 if (flow_dv_create_action_raw_encap
7252 (dev, actions, dev_flow, attr, error))
7254 dev_flow->dv.actions[actions_n++] =
7255 dev_flow->dv.encap_decap->verbs_action;
7257 /* Handle encap without preceding decap. */
7258 if (flow_dv_create_action_l2_encap
7259 (dev, actions, dev_flow, attr->transfer,
7262 dev_flow->dv.actions[actions_n++] =
7263 dev_flow->dv.encap_decap->verbs_action;
7265 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
7267 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7268 /* Check if this decap is followed by encap. */
7269 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
7270 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
7273 /* Handle decap only if it isn't followed by encap. */
7274 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7275 if (flow_dv_create_action_l2_decap
7276 (dev, dev_flow, attr->transfer, error))
7278 dev_flow->dv.actions[actions_n++] =
7279 dev_flow->dv.encap_decap->verbs_action;
7281 /* If decap is followed by encap, handle it at encap. */
7282 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
7284 case RTE_FLOW_ACTION_TYPE_JUMP:
7285 jump_data = action->conf;
7286 ret = mlx5_flow_group_to_table(attr, dev_flow->external,
7287 jump_data->group, &table,
7291 tbl = flow_dv_tbl_resource_get(dev, table,
7293 attr->transfer, error);
7295 return rte_flow_error_set
7297 RTE_FLOW_ERROR_TYPE_ACTION,
7299 "cannot create jump action.");
7300 if (flow_dv_jump_tbl_resource_register
7301 (dev, tbl, dev_flow, error)) {
7302 flow_dv_tbl_resource_release(dev, tbl);
7303 return rte_flow_error_set
7305 RTE_FLOW_ERROR_TYPE_ACTION,
7307 "cannot create jump action.");
7309 dev_flow->dv.actions[actions_n++] =
7310 dev_flow->dv.jump->action;
7311 action_flags |= MLX5_FLOW_ACTION_JUMP;
7313 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
7314 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
7315 if (flow_dv_convert_action_modify_mac
7316 (mhdr_res, actions, error))
7318 action_flags |= actions->type ==
7319 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
7320 MLX5_FLOW_ACTION_SET_MAC_SRC :
7321 MLX5_FLOW_ACTION_SET_MAC_DST;
7323 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
7324 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
7325 if (flow_dv_convert_action_modify_ipv4
7326 (mhdr_res, actions, error))
7328 action_flags |= actions->type ==
7329 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
7330 MLX5_FLOW_ACTION_SET_IPV4_SRC :
7331 MLX5_FLOW_ACTION_SET_IPV4_DST;
7333 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
7334 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
7335 if (flow_dv_convert_action_modify_ipv6
7336 (mhdr_res, actions, error))
7338 action_flags |= actions->type ==
7339 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
7340 MLX5_FLOW_ACTION_SET_IPV6_SRC :
7341 MLX5_FLOW_ACTION_SET_IPV6_DST;
7343 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
7344 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
7345 if (flow_dv_convert_action_modify_tp
7346 (mhdr_res, actions, items,
7349 action_flags |= actions->type ==
7350 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
7351 MLX5_FLOW_ACTION_SET_TP_SRC :
7352 MLX5_FLOW_ACTION_SET_TP_DST;
7354 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
7355 if (flow_dv_convert_action_modify_dec_ttl
7356 (mhdr_res, items, &flow_attr, error))
7358 action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
7360 case RTE_FLOW_ACTION_TYPE_SET_TTL:
7361 if (flow_dv_convert_action_modify_ttl
7362 (mhdr_res, actions, items,
7365 action_flags |= MLX5_FLOW_ACTION_SET_TTL;
7367 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
7368 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
7369 if (flow_dv_convert_action_modify_tcp_seq
7370 (mhdr_res, actions, error))
7372 action_flags |= actions->type ==
7373 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
7374 MLX5_FLOW_ACTION_INC_TCP_SEQ :
7375 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
7378 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
7379 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
7380 if (flow_dv_convert_action_modify_tcp_ack
7381 (mhdr_res, actions, error))
7383 action_flags |= actions->type ==
7384 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
7385 MLX5_FLOW_ACTION_INC_TCP_ACK :
7386 MLX5_FLOW_ACTION_DEC_TCP_ACK;
7388 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
7389 if (flow_dv_convert_action_set_reg
7390 (mhdr_res, actions, error))
7392 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7394 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
7395 if (flow_dv_convert_action_copy_mreg
7396 (dev, mhdr_res, actions, error))
7398 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7400 case RTE_FLOW_ACTION_TYPE_METER:
7401 mtr = actions->conf;
7403 flow->meter = mlx5_flow_meter_attach(priv,
7407 return rte_flow_error_set(error,
7409 RTE_FLOW_ERROR_TYPE_ACTION,
7412 "or invalid parameters");
7414 /* Set the meter action. */
7415 dev_flow->dv.actions[actions_n++] =
7416 flow->meter->mfts->meter_action;
7417 action_flags |= MLX5_FLOW_ACTION_METER;
7419 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
7420 if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
7423 action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
7425 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
7426 if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
7429 action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
7431 case RTE_FLOW_ACTION_TYPE_END:
7433 if (mhdr_res->actions_num) {
7434 /* create modify action if needed. */
7435 if (flow_dv_modify_hdr_resource_register
7436 (dev, mhdr_res, dev_flow, error))
7438 dev_flow->dv.actions[modify_action_position] =
7439 dev_flow->dv.modify_hdr->verbs_action;
7445 if (mhdr_res->actions_num &&
7446 modify_action_position == UINT32_MAX)
7447 modify_action_position = actions_n++;
7449 dev_flow->dv.actions_n = actions_n;
7450 dev_flow->actions = action_flags;
7451 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
7452 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
7453 int item_type = items->type;
7455 switch (item_type) {
7456 case RTE_FLOW_ITEM_TYPE_PORT_ID:
7457 flow_dv_translate_item_port_id(dev, match_mask,
7458 match_value, items);
7459 last_item = MLX5_FLOW_ITEM_PORT_ID;
7461 case RTE_FLOW_ITEM_TYPE_ETH:
7462 flow_dv_translate_item_eth(match_mask, match_value,
7464 matcher.priority = MLX5_PRIORITY_MAP_L2;
7465 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
7466 MLX5_FLOW_LAYER_OUTER_L2;
7468 case RTE_FLOW_ITEM_TYPE_VLAN:
7469 flow_dv_translate_item_vlan(dev_flow,
7470 match_mask, match_value,
7472 matcher.priority = MLX5_PRIORITY_MAP_L2;
7473 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
7474 MLX5_FLOW_LAYER_INNER_VLAN) :
7475 (MLX5_FLOW_LAYER_OUTER_L2 |
7476 MLX5_FLOW_LAYER_OUTER_VLAN);
7478 case RTE_FLOW_ITEM_TYPE_IPV4:
7479 mlx5_flow_tunnel_ip_check(items, next_protocol,
7480 &item_flags, &tunnel);
7481 flow_dv_translate_item_ipv4(match_mask, match_value,
7484 matcher.priority = MLX5_PRIORITY_MAP_L3;
7485 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
7486 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
7487 if (items->mask != NULL &&
7488 ((const struct rte_flow_item_ipv4 *)
7489 items->mask)->hdr.next_proto_id) {
7491 ((const struct rte_flow_item_ipv4 *)
7492 (items->spec))->hdr.next_proto_id;
7494 ((const struct rte_flow_item_ipv4 *)
7495 (items->mask))->hdr.next_proto_id;
7497 /* Reset for inner layer. */
7498 next_protocol = 0xff;
7501 case RTE_FLOW_ITEM_TYPE_IPV6:
7502 mlx5_flow_tunnel_ip_check(items, next_protocol,
7503 &item_flags, &tunnel);
7504 flow_dv_translate_item_ipv6(match_mask, match_value,
7507 matcher.priority = MLX5_PRIORITY_MAP_L3;
7508 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
7509 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
7510 if (items->mask != NULL &&
7511 ((const struct rte_flow_item_ipv6 *)
7512 items->mask)->hdr.proto) {
7514 ((const struct rte_flow_item_ipv6 *)
7515 items->spec)->hdr.proto;
7517 ((const struct rte_flow_item_ipv6 *)
7518 items->mask)->hdr.proto;
7520 /* Reset for inner layer. */
7521 next_protocol = 0xff;
7524 case RTE_FLOW_ITEM_TYPE_TCP:
7525 flow_dv_translate_item_tcp(match_mask, match_value,
7527 matcher.priority = MLX5_PRIORITY_MAP_L4;
7528 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
7529 MLX5_FLOW_LAYER_OUTER_L4_TCP;
7531 case RTE_FLOW_ITEM_TYPE_UDP:
7532 flow_dv_translate_item_udp(match_mask, match_value,
7534 matcher.priority = MLX5_PRIORITY_MAP_L4;
7535 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
7536 MLX5_FLOW_LAYER_OUTER_L4_UDP;
7538 case RTE_FLOW_ITEM_TYPE_GRE:
7539 flow_dv_translate_item_gre(match_mask, match_value,
7541 last_item = MLX5_FLOW_LAYER_GRE;
7543 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
7544 flow_dv_translate_item_gre_key(match_mask,
7545 match_value, items);
7546 last_item = MLX5_FLOW_LAYER_GRE_KEY;
7548 case RTE_FLOW_ITEM_TYPE_NVGRE:
7549 flow_dv_translate_item_nvgre(match_mask, match_value,
7551 last_item = MLX5_FLOW_LAYER_GRE;
7553 case RTE_FLOW_ITEM_TYPE_VXLAN:
7554 flow_dv_translate_item_vxlan(match_mask, match_value,
7556 last_item = MLX5_FLOW_LAYER_VXLAN;
7558 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
7559 flow_dv_translate_item_vxlan(match_mask, match_value,
7561 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
7563 case RTE_FLOW_ITEM_TYPE_GENEVE:
7564 flow_dv_translate_item_geneve(match_mask, match_value,
7566 last_item = MLX5_FLOW_LAYER_GENEVE;
7568 case RTE_FLOW_ITEM_TYPE_MPLS:
7569 flow_dv_translate_item_mpls(match_mask, match_value,
7570 items, last_item, tunnel);
7571 last_item = MLX5_FLOW_LAYER_MPLS;
7573 case RTE_FLOW_ITEM_TYPE_MARK:
7574 flow_dv_translate_item_mark(dev, match_mask,
7575 match_value, items);
7576 last_item = MLX5_FLOW_ITEM_MARK;
7578 case RTE_FLOW_ITEM_TYPE_META:
7579 flow_dv_translate_item_meta(dev, match_mask,
7580 match_value, attr, items);
7581 last_item = MLX5_FLOW_ITEM_METADATA;
7583 case RTE_FLOW_ITEM_TYPE_ICMP:
7584 flow_dv_translate_item_icmp(match_mask, match_value,
7586 last_item = MLX5_FLOW_LAYER_ICMP;
7588 case RTE_FLOW_ITEM_TYPE_ICMP6:
7589 flow_dv_translate_item_icmp6(match_mask, match_value,
7591 last_item = MLX5_FLOW_LAYER_ICMP6;
7593 case RTE_FLOW_ITEM_TYPE_TAG:
7594 flow_dv_translate_item_tag(dev, match_mask,
7595 match_value, items);
7596 last_item = MLX5_FLOW_ITEM_TAG;
7598 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
7599 flow_dv_translate_mlx5_item_tag(dev, match_mask,
7600 match_value, items);
7601 last_item = MLX5_FLOW_ITEM_TAG;
7603 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
7604 flow_dv_translate_item_tx_queue(dev, match_mask,
7607 last_item = MLX5_FLOW_ITEM_TX_QUEUE;
7609 case RTE_FLOW_ITEM_TYPE_GTP:
7610 flow_dv_translate_item_gtp(match_mask, match_value,
7612 last_item = MLX5_FLOW_LAYER_GTP;
7617 item_flags |= last_item;
7620 * In case of ingress traffic when E-Switch mode is enabled,
7621 * we have two cases where we need to set the source port manually.
7622 * The first one, is in case of Nic steering rule, and the second is
7623 * E-Switch rule where no port_id item was found. In both cases
7624 * the source port is set according the current port in use.
7626 if ((attr->ingress && !(item_flags & MLX5_FLOW_ITEM_PORT_ID)) &&
7627 (priv->representor || priv->master)) {
7628 if (flow_dv_translate_item_port_id(dev, match_mask,
7632 assert(!flow_dv_check_valid_spec(matcher.mask.buf,
7633 dev_flow->dv.value.buf));
7634 dev_flow->layers = item_flags;
7635 if (action_flags & MLX5_FLOW_ACTION_RSS)
7636 flow_dv_hashfields_set(dev_flow);
7637 /* Register matcher. */
7638 matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
7640 matcher.priority = mlx5_flow_adjust_priority(dev, priority,
7642 /* reserved field no needs to be set to 0 here. */
7643 tbl_key.domain = attr->transfer;
7644 tbl_key.direction = attr->egress;
7645 tbl_key.table_id = dev_flow->group;
7646 if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
7652 * Apply the flow to the NIC, lock free,
7653 * (mutex should be acquired by caller).
7656 * Pointer to the Ethernet device structure.
7657 * @param[in, out] flow
7658 * Pointer to flow structure.
7660 * Pointer to error structure.
7663 * 0 on success, a negative errno value otherwise and rte_errno is set.
7666 __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
7667 struct rte_flow_error *error)
7669 struct mlx5_flow_dv *dv;
7670 struct mlx5_flow *dev_flow;
7671 struct mlx5_priv *priv = dev->data->dev_private;
7675 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
7678 if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) {
7679 if (dev_flow->transfer) {
7680 dv->actions[n++] = priv->sh->esw_drop_action;
7682 dv->hrxq = mlx5_hrxq_drop_new(dev);
7686 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7688 "cannot get drop hash queue");
7691 dv->actions[n++] = dv->hrxq->action;
7693 } else if (dev_flow->actions &
7694 (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
7695 struct mlx5_hrxq *hrxq;
7697 assert(flow->rss.queue);
7698 hrxq = mlx5_hrxq_get(dev, flow->rss.key,
7699 MLX5_RSS_HASH_KEY_LEN,
7700 dev_flow->hash_fields,
7702 flow->rss.queue_num);
7704 hrxq = mlx5_hrxq_new
7705 (dev, flow->rss.key,
7706 MLX5_RSS_HASH_KEY_LEN,
7707 dev_flow->hash_fields,
7709 flow->rss.queue_num,
7710 !!(dev_flow->layers &
7711 MLX5_FLOW_LAYER_TUNNEL));
7716 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7717 "cannot get hash queue");
7721 dv->actions[n++] = dv->hrxq->action;
7724 mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
7725 (void *)&dv->value, n,
7728 rte_flow_error_set(error, errno,
7729 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7731 "hardware refuses to create flow");
7734 if (priv->vmwa_context &&
7735 dev_flow->dv.vf_vlan.tag &&
7736 !dev_flow->dv.vf_vlan.created) {
7738 * The rule contains the VLAN pattern.
7739 * For VF we are going to create VLAN
7740 * interface to make hypervisor set correct
7741 * e-Switch vport context.
7743 mlx5_vlan_vmwa_acquire(dev, &dev_flow->dv.vf_vlan);
7748 err = rte_errno; /* Save rte_errno before cleanup. */
7749 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
7750 struct mlx5_flow_dv *dv = &dev_flow->dv;
7752 if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
7753 mlx5_hrxq_drop_release(dev);
7755 mlx5_hrxq_release(dev, dv->hrxq);
7758 if (dev_flow->dv.vf_vlan.tag &&
7759 dev_flow->dv.vf_vlan.created)
7760 mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
7762 rte_errno = err; /* Restore rte_errno. */
7767 * Release the flow matcher.
7770 * Pointer to Ethernet device.
7772 * Pointer to mlx5_flow.
7775 * 1 while a reference on it exists, 0 when freed.
7778 flow_dv_matcher_release(struct rte_eth_dev *dev,
7779 struct mlx5_flow *flow)
7781 struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
7783 assert(matcher->matcher_object);
7784 DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
7785 dev->data->port_id, (void *)matcher,
7786 rte_atomic32_read(&matcher->refcnt));
7787 if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
7788 claim_zero(mlx5_glue->dv_destroy_flow_matcher
7789 (matcher->matcher_object));
7790 LIST_REMOVE(matcher, next);
7791 /* table ref-- in release interface. */
7792 flow_dv_tbl_resource_release(dev, matcher->tbl);
7794 DRV_LOG(DEBUG, "port %u matcher %p: removed",
7795 dev->data->port_id, (void *)matcher);
7802 * Release an encap/decap resource.
7805 * Pointer to mlx5_flow.
7808 * 1 while a reference on it exists, 0 when freed.
7811 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
7813 struct mlx5_flow_dv_encap_decap_resource *cache_resource =
7814 flow->dv.encap_decap;
7816 assert(cache_resource->verbs_action);
7817 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
7818 (void *)cache_resource,
7819 rte_atomic32_read(&cache_resource->refcnt));
7820 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
7821 claim_zero(mlx5_glue->destroy_flow_action
7822 (cache_resource->verbs_action));
7823 LIST_REMOVE(cache_resource, next);
7824 rte_free(cache_resource);
7825 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
7826 (void *)cache_resource);
7833 * Release an jump to table action resource.
7836 * Pointer to Ethernet device.
7838 * Pointer to mlx5_flow.
7841 * 1 while a reference on it exists, 0 when freed.
7844 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
7845 struct mlx5_flow *flow)
7847 struct mlx5_flow_dv_jump_tbl_resource *cache_resource = flow->dv.jump;
7848 struct mlx5_flow_tbl_data_entry *tbl_data =
7849 container_of(cache_resource,
7850 struct mlx5_flow_tbl_data_entry, jump);
7852 assert(cache_resource->action);
7853 DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
7854 (void *)cache_resource,
7855 rte_atomic32_read(&cache_resource->refcnt));
7856 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
7857 claim_zero(mlx5_glue->destroy_flow_action
7858 (cache_resource->action));
7859 /* jump action memory free is inside the table release. */
7860 flow_dv_tbl_resource_release(dev, &tbl_data->tbl);
7861 DRV_LOG(DEBUG, "jump table resource %p: removed",
7862 (void *)cache_resource);
7869 * Release a modify-header resource.
7872 * Pointer to mlx5_flow.
7875 * 1 while a reference on it exists, 0 when freed.
7878 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
7880 struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
7881 flow->dv.modify_hdr;
7883 assert(cache_resource->verbs_action);
7884 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
7885 (void *)cache_resource,
7886 rte_atomic32_read(&cache_resource->refcnt));
7887 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
7888 claim_zero(mlx5_glue->destroy_flow_action
7889 (cache_resource->verbs_action));
7890 LIST_REMOVE(cache_resource, next);
7891 rte_free(cache_resource);
7892 DRV_LOG(DEBUG, "modify-header resource %p: removed",
7893 (void *)cache_resource);
7900 * Release port ID action resource.
7903 * Pointer to mlx5_flow.
7906 * 1 while a reference on it exists, 0 when freed.
7909 flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
7911 struct mlx5_flow_dv_port_id_action_resource *cache_resource =
7912 flow->dv.port_id_action;
7914 assert(cache_resource->action);
7915 DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
7916 (void *)cache_resource,
7917 rte_atomic32_read(&cache_resource->refcnt));
7918 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
7919 claim_zero(mlx5_glue->destroy_flow_action
7920 (cache_resource->action));
7921 LIST_REMOVE(cache_resource, next);
7922 rte_free(cache_resource);
7923 DRV_LOG(DEBUG, "port id action resource %p: removed",
7924 (void *)cache_resource);
7931 * Release push vlan action resource.
7934 * Pointer to mlx5_flow.
7937 * 1 while a reference on it exists, 0 when freed.
7940 flow_dv_push_vlan_action_resource_release(struct mlx5_flow *flow)
7942 struct mlx5_flow_dv_push_vlan_action_resource *cache_resource =
7943 flow->dv.push_vlan_res;
7945 assert(cache_resource->action);
7946 DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
7947 (void *)cache_resource,
7948 rte_atomic32_read(&cache_resource->refcnt));
7949 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
7950 claim_zero(mlx5_glue->destroy_flow_action
7951 (cache_resource->action));
7952 LIST_REMOVE(cache_resource, next);
7953 rte_free(cache_resource);
7954 DRV_LOG(DEBUG, "push vlan action resource %p: removed",
7955 (void *)cache_resource);
7962 * Remove the flow from the NIC but keeps it in memory.
7963 * Lock free, (mutex should be acquired by caller).
7966 * Pointer to Ethernet device.
7967 * @param[in, out] flow
7968 * Pointer to flow structure.
7971 __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
7973 struct mlx5_flow_dv *dv;
7974 struct mlx5_flow *dev_flow;
7978 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
7981 claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
7985 if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
7986 mlx5_hrxq_drop_release(dev);
7988 mlx5_hrxq_release(dev, dv->hrxq);
7991 if (dev_flow->dv.vf_vlan.tag &&
7992 dev_flow->dv.vf_vlan.created)
7993 mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
7998 * Remove the flow from the NIC and the memory.
7999 * Lock free, (mutex should be acquired by caller).
8002 * Pointer to the Ethernet device structure.
8003 * @param[in, out] flow
8004 * Pointer to flow structure.
8007 __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
8009 struct mlx5_flow *dev_flow;
8013 __flow_dv_remove(dev, flow);
8014 if (flow->counter) {
8015 flow_dv_counter_release(dev, flow->counter);
8016 flow->counter = NULL;
8019 mlx5_flow_meter_detach(flow->meter);
8022 while (!LIST_EMPTY(&flow->dev_flows)) {
8023 dev_flow = LIST_FIRST(&flow->dev_flows);
8024 LIST_REMOVE(dev_flow, next);
8025 if (dev_flow->dv.matcher)
8026 flow_dv_matcher_release(dev, dev_flow);
8027 if (dev_flow->dv.encap_decap)
8028 flow_dv_encap_decap_resource_release(dev_flow);
8029 if (dev_flow->dv.modify_hdr)
8030 flow_dv_modify_hdr_resource_release(dev_flow);
8031 if (dev_flow->dv.jump)
8032 flow_dv_jump_tbl_resource_release(dev, dev_flow);
8033 if (dev_flow->dv.port_id_action)
8034 flow_dv_port_id_action_resource_release(dev_flow);
8035 if (dev_flow->dv.push_vlan_res)
8036 flow_dv_push_vlan_action_resource_release(dev_flow);
8037 if (dev_flow->dv.tag_resource)
8038 flow_dv_tag_release(dev, dev_flow->dv.tag_resource);
8044 * Query a dv flow rule for its statistics via devx.
8047 * Pointer to Ethernet device.
8049 * Pointer to the sub flow.
8051 * data retrieved by the query.
8053 * Perform verbose error reporting if not NULL.
8056 * 0 on success, a negative errno value otherwise and rte_errno is set.
8059 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
8060 void *data, struct rte_flow_error *error)
8062 struct mlx5_priv *priv = dev->data->dev_private;
8063 struct rte_flow_query_count *qc = data;
8065 if (!priv->config.devx)
8066 return rte_flow_error_set(error, ENOTSUP,
8067 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8069 "counters are not supported");
8070 if (flow->counter) {
8071 uint64_t pkts, bytes;
8072 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
8076 return rte_flow_error_set(error, -err,
8077 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8078 NULL, "cannot read counters");
8081 qc->hits = pkts - flow->counter->hits;
8082 qc->bytes = bytes - flow->counter->bytes;
8084 flow->counter->hits = pkts;
8085 flow->counter->bytes = bytes;
8089 return rte_flow_error_set(error, EINVAL,
8090 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8092 "counters are not available");
8098 * @see rte_flow_query()
8102 flow_dv_query(struct rte_eth_dev *dev,
8103 struct rte_flow *flow __rte_unused,
8104 const struct rte_flow_action *actions __rte_unused,
8105 void *data __rte_unused,
8106 struct rte_flow_error *error __rte_unused)
8110 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
8111 switch (actions->type) {
8112 case RTE_FLOW_ACTION_TYPE_VOID:
8114 case RTE_FLOW_ACTION_TYPE_COUNT:
8115 ret = flow_dv_query_count(dev, flow, data, error);
8118 return rte_flow_error_set(error, ENOTSUP,
8119 RTE_FLOW_ERROR_TYPE_ACTION,
8121 "action not supported");
8128 * Destroy the meter table set.
8129 * Lock free, (mutex should be acquired by caller).
8132 * Pointer to Ethernet device.
8134 * Pointer to the meter table set.
8140 flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
8141 struct mlx5_meter_domains_infos *tbl)
8143 struct mlx5_priv *priv = dev->data->dev_private;
8144 struct mlx5_meter_domains_infos *mtd =
8145 (struct mlx5_meter_domains_infos *)tbl;
8147 if (!mtd || !priv->config.dv_flow_en)
8149 if (mtd->ingress.policer_rules[RTE_MTR_DROPPED])
8150 claim_zero(mlx5_glue->dv_destroy_flow
8151 (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
8152 if (mtd->egress.policer_rules[RTE_MTR_DROPPED])
8153 claim_zero(mlx5_glue->dv_destroy_flow
8154 (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
8155 if (mtd->transfer.policer_rules[RTE_MTR_DROPPED])
8156 claim_zero(mlx5_glue->dv_destroy_flow
8157 (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
8158 if (mtd->egress.color_matcher)
8159 claim_zero(mlx5_glue->dv_destroy_flow_matcher
8160 (mtd->egress.color_matcher));
8161 if (mtd->egress.any_matcher)
8162 claim_zero(mlx5_glue->dv_destroy_flow_matcher
8163 (mtd->egress.any_matcher));
8164 if (mtd->egress.tbl)
8165 claim_zero(flow_dv_tbl_resource_release(dev,
8167 if (mtd->ingress.color_matcher)
8168 claim_zero(mlx5_glue->dv_destroy_flow_matcher
8169 (mtd->ingress.color_matcher));
8170 if (mtd->ingress.any_matcher)
8171 claim_zero(mlx5_glue->dv_destroy_flow_matcher
8172 (mtd->ingress.any_matcher));
8173 if (mtd->ingress.tbl)
8174 claim_zero(flow_dv_tbl_resource_release(dev,
8176 if (mtd->transfer.color_matcher)
8177 claim_zero(mlx5_glue->dv_destroy_flow_matcher
8178 (mtd->transfer.color_matcher));
8179 if (mtd->transfer.any_matcher)
8180 claim_zero(mlx5_glue->dv_destroy_flow_matcher
8181 (mtd->transfer.any_matcher));
8182 if (mtd->transfer.tbl)
8183 claim_zero(flow_dv_tbl_resource_release(dev,
8184 mtd->transfer.tbl));
8186 claim_zero(mlx5_glue->destroy_flow_action(mtd->drop_actn));
8191 /* Number of meter flow actions, count and jump or count and drop. */
8192 #define METER_ACTIONS 2
8195 * Create specify domain meter table and suffix table.
8198 * Pointer to Ethernet device.
8199 * @param[in,out] mtb
8200 * Pointer to DV meter table set.
8203 * @param[in] transfer
8205 * @param[in] color_reg_c_idx
8206 * Reg C index for color match.
8209 * 0 on success, -1 otherwise and rte_errno is set.
8212 flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev,
8213 struct mlx5_meter_domains_infos *mtb,
8214 uint8_t egress, uint8_t transfer,
8215 uint32_t color_reg_c_idx)
8217 struct mlx5_priv *priv = dev->data->dev_private;
8218 struct mlx5_ibv_shared *sh = priv->sh;
8219 struct mlx5_flow_dv_match_params mask = {
8220 .size = sizeof(mask.buf),
8222 struct mlx5_flow_dv_match_params value = {
8223 .size = sizeof(value.buf),
8225 struct mlx5dv_flow_matcher_attr dv_attr = {
8226 .type = IBV_FLOW_ATTR_NORMAL,
8228 .match_criteria_enable = 0,
8229 .match_mask = (void *)&mask,
8231 void *actions[METER_ACTIONS];
8232 struct mlx5_flow_tbl_resource **sfx_tbl;
8233 struct mlx5_meter_domain_info *dtb;
8234 struct rte_flow_error error;
8238 sfx_tbl = &sh->fdb_mtr_sfx_tbl;
8239 dtb = &mtb->transfer;
8240 } else if (egress) {
8241 sfx_tbl = &sh->tx_mtr_sfx_tbl;
8244 sfx_tbl = &sh->rx_mtr_sfx_tbl;
8245 dtb = &mtb->ingress;
8247 /* If the suffix table in missing, create it. */
8249 *sfx_tbl = flow_dv_tbl_resource_get(dev,
8250 MLX5_FLOW_TABLE_LEVEL_SUFFIX,
8251 egress, transfer, &error);
8253 DRV_LOG(ERR, "Failed to create meter suffix table.");
8257 /* Create the meter table with METER level. */
8258 dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER,
8259 egress, transfer, &error);
8261 DRV_LOG(ERR, "Failed to create meter policer table.");
8264 /* Create matchers, Any and Color. */
8265 dv_attr.priority = 3;
8266 dv_attr.match_criteria_enable = 0;
8267 dtb->any_matcher = mlx5_glue->dv_create_flow_matcher(sh->ctx,
8270 if (!dtb->any_matcher) {
8271 DRV_LOG(ERR, "Failed to create meter"
8272 " policer default matcher.");
8275 dv_attr.priority = 0;
8276 dv_attr.match_criteria_enable =
8277 1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
8278 flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx,
8279 rte_col_2_mlx5_col(RTE_COLORS), UINT32_MAX);
8280 dtb->color_matcher = mlx5_glue->dv_create_flow_matcher(sh->ctx,
8283 if (!dtb->color_matcher) {
8284 DRV_LOG(ERR, "Failed to create meter policer color matcher.");
8287 if (mtb->count_actns[RTE_MTR_DROPPED])
8288 actions[i++] = mtb->count_actns[RTE_MTR_DROPPED];
8289 actions[i++] = mtb->drop_actn;
8290 /* Default rule: lowest priority, match any, actions: drop. */
8291 dtb->policer_rules[RTE_MTR_DROPPED] =
8292 mlx5_glue->dv_create_flow(dtb->any_matcher,
8293 (void *)&value, i, actions);
8294 if (!dtb->policer_rules[RTE_MTR_DROPPED]) {
8295 DRV_LOG(ERR, "Failed to create meter policer drop rule.");
8304 * Create the needed meter and suffix tables.
8305 * Lock free, (mutex should be acquired by caller).
8308 * Pointer to Ethernet device.
8310 * Pointer to the flow meter.
8313 * Pointer to table set on success, NULL otherwise and rte_errno is set.
8315 static struct mlx5_meter_domains_infos *
8316 flow_dv_create_mtr_tbl(struct rte_eth_dev *dev,
8317 const struct mlx5_flow_meter *fm)
8319 struct mlx5_priv *priv = dev->data->dev_private;
8320 struct mlx5_meter_domains_infos *mtb;
8324 if (!priv->mtr_en) {
8325 rte_errno = ENOTSUP;
8328 mtb = rte_calloc(__func__, 1, sizeof(*mtb), 0);
8330 DRV_LOG(ERR, "Failed to allocate memory for meter.");
8333 /* Create meter count actions */
8334 for (i = 0; i <= RTE_MTR_DROPPED; i++) {
8335 if (!fm->policer_stats.cnt[i])
8337 mtb->count_actns[i] = fm->policer_stats.cnt[i]->action;
8339 /* Create drop action. */
8340 mtb->drop_actn = mlx5_glue->dr_create_flow_action_drop();
8341 if (!mtb->drop_actn) {
8342 DRV_LOG(ERR, "Failed to create drop action.");
8345 /* Egress meter table. */
8346 ret = flow_dv_prepare_mtr_tables(dev, mtb, 1, 0, priv->mtr_color_reg);
8348 DRV_LOG(ERR, "Failed to prepare egress meter table.");
8351 /* Ingress meter table. */
8352 ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 0, priv->mtr_color_reg);
8354 DRV_LOG(ERR, "Failed to prepare ingress meter table.");
8357 /* FDB meter table. */
8358 if (priv->config.dv_esw_en) {
8359 ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 1,
8360 priv->mtr_color_reg);
8362 DRV_LOG(ERR, "Failed to prepare fdb meter table.");
8368 flow_dv_destroy_mtr_tbl(dev, mtb);
8373 * Destroy domain policer rule.
8376 * Pointer to domain table.
8379 flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt)
8383 for (i = 0; i < RTE_MTR_DROPPED; i++) {
8384 if (dt->policer_rules[i]) {
8385 claim_zero(mlx5_glue->dv_destroy_flow
8386 (dt->policer_rules[i]));
8387 dt->policer_rules[i] = NULL;
8390 if (dt->jump_actn) {
8391 claim_zero(mlx5_glue->destroy_flow_action(dt->jump_actn));
8392 dt->jump_actn = NULL;
8397 * Destroy policer rules.
8400 * Pointer to Ethernet device.
8402 * Pointer to flow meter structure.
8404 * Pointer to flow attributes.
8410 flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused,
8411 const struct mlx5_flow_meter *fm,
8412 const struct rte_flow_attr *attr)
8414 struct mlx5_meter_domains_infos *mtb = fm ? fm->mfts : NULL;
8419 flow_dv_destroy_domain_policer_rule(&mtb->egress);
8421 flow_dv_destroy_domain_policer_rule(&mtb->ingress);
8423 flow_dv_destroy_domain_policer_rule(&mtb->transfer);
8428 * Create specify domain meter policer rule.
8431 * Pointer to flow meter structure.
8433 * Pointer to DV meter table set.
8435 * Pointer to suffix table.
8436 * @param[in] mtr_reg_c
8437 * Color match REG_C.
8440 * 0 on success, -1 otherwise.
8443 flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm,
8444 struct mlx5_meter_domain_info *dtb,
8445 struct mlx5_flow_tbl_resource *sfx_tb,
8448 struct mlx5_flow_dv_match_params matcher = {
8449 .size = sizeof(matcher.buf),
8451 struct mlx5_flow_dv_match_params value = {
8452 .size = sizeof(value.buf),
8454 struct mlx5_meter_domains_infos *mtb = fm->mfts;
8455 void *actions[METER_ACTIONS];
8458 /* Create jump action. */
8461 if (!dtb->jump_actn)
8463 mlx5_glue->dr_create_flow_action_dest_flow_tbl
8465 if (!dtb->jump_actn) {
8466 DRV_LOG(ERR, "Failed to create policer jump action.");
8469 for (i = 0; i < RTE_MTR_DROPPED; i++) {
8472 flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c,
8473 rte_col_2_mlx5_col(i), UINT32_MAX);
8474 if (mtb->count_actns[i])
8475 actions[j++] = mtb->count_actns[i];
8476 if (fm->params.action[i] == MTR_POLICER_ACTION_DROP)
8477 actions[j++] = mtb->drop_actn;
8479 actions[j++] = dtb->jump_actn;
8480 dtb->policer_rules[i] =
8481 mlx5_glue->dv_create_flow(dtb->color_matcher,
8484 if (!dtb->policer_rules[i]) {
8485 DRV_LOG(ERR, "Failed to create policer rule.");
8496 * Create policer rules.
8499 * Pointer to Ethernet device.
8501 * Pointer to flow meter structure.
8503 * Pointer to flow attributes.
8506 * 0 on success, -1 otherwise.
8509 flow_dv_create_policer_rules(struct rte_eth_dev *dev,
8510 struct mlx5_flow_meter *fm,
8511 const struct rte_flow_attr *attr)
8513 struct mlx5_priv *priv = dev->data->dev_private;
8514 struct mlx5_meter_domains_infos *mtb = fm->mfts;
8518 ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress,
8519 priv->sh->tx_mtr_sfx_tbl,
8520 priv->mtr_color_reg);
8522 DRV_LOG(ERR, "Failed to create egress policer.");
8526 if (attr->ingress) {
8527 ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress,
8528 priv->sh->rx_mtr_sfx_tbl,
8529 priv->mtr_color_reg);
8531 DRV_LOG(ERR, "Failed to create ingress policer.");
8535 if (attr->transfer) {
8536 ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer,
8537 priv->sh->fdb_mtr_sfx_tbl,
8538 priv->mtr_color_reg);
8540 DRV_LOG(ERR, "Failed to create transfer policer.");
8546 flow_dv_destroy_policer_rules(dev, fm, attr);
8551 * Query a devx counter.
8554 * Pointer to the Ethernet device structure.
8556 * Pointer to the flow counter.
8558 * Set to clear the counter statistics.
8560 * The statistics value of packets.
8562 * The statistics value of bytes.
8565 * 0 on success, otherwise return -1.
8568 flow_dv_counter_query(struct rte_eth_dev *dev,
8569 struct mlx5_flow_counter *cnt, bool clear,
8570 uint64_t *pkts, uint64_t *bytes)
8572 struct mlx5_priv *priv = dev->data->dev_private;
8573 uint64_t inn_pkts, inn_bytes;
8576 if (!priv->config.devx)
8578 ret = _flow_dv_query_count(dev, cnt, &inn_pkts, &inn_bytes);
8581 *pkts = inn_pkts - cnt->hits;
8582 *bytes = inn_bytes - cnt->bytes;
8584 cnt->hits = inn_pkts;
8585 cnt->bytes = inn_bytes;
8591 * Mutex-protected thunk to lock-free __flow_dv_translate().
8594 flow_dv_translate(struct rte_eth_dev *dev,
8595 struct mlx5_flow *dev_flow,
8596 const struct rte_flow_attr *attr,
8597 const struct rte_flow_item items[],
8598 const struct rte_flow_action actions[],
8599 struct rte_flow_error *error)
8603 flow_dv_shared_lock(dev);
8604 ret = __flow_dv_translate(dev, dev_flow, attr, items, actions, error);
8605 flow_dv_shared_unlock(dev);
8610 * Mutex-protected thunk to lock-free __flow_dv_apply().
8613 flow_dv_apply(struct rte_eth_dev *dev,
8614 struct rte_flow *flow,
8615 struct rte_flow_error *error)
8619 flow_dv_shared_lock(dev);
8620 ret = __flow_dv_apply(dev, flow, error);
8621 flow_dv_shared_unlock(dev);
8626 * Mutex-protected thunk to lock-free __flow_dv_remove().
8629 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
8631 flow_dv_shared_lock(dev);
8632 __flow_dv_remove(dev, flow);
8633 flow_dv_shared_unlock(dev);
8637 * Mutex-protected thunk to lock-free __flow_dv_destroy().
8640 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
8642 flow_dv_shared_lock(dev);
8643 __flow_dv_destroy(dev, flow);
8644 flow_dv_shared_unlock(dev);
8648 * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
8650 static struct mlx5_flow_counter *
8651 flow_dv_counter_allocate(struct rte_eth_dev *dev)
8653 struct mlx5_flow_counter *cnt;
8655 flow_dv_shared_lock(dev);
8656 cnt = flow_dv_counter_alloc(dev, 0, 0, 1);
8657 flow_dv_shared_unlock(dev);
8662 * Mutex-protected thunk to lock-free flow_dv_counter_release().
8665 flow_dv_counter_free(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt)
8667 flow_dv_shared_lock(dev);
8668 flow_dv_counter_release(dev, cnt);
8669 flow_dv_shared_unlock(dev);
8672 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
8673 .validate = flow_dv_validate,
8674 .prepare = flow_dv_prepare,
8675 .translate = flow_dv_translate,
8676 .apply = flow_dv_apply,
8677 .remove = flow_dv_remove,
8678 .destroy = flow_dv_destroy,
8679 .query = flow_dv_query,
8680 .create_mtr_tbls = flow_dv_create_mtr_tbl,
8681 .destroy_mtr_tbls = flow_dv_destroy_mtr_tbl,
8682 .create_policer_rules = flow_dv_create_policer_rules,
8683 .destroy_policer_rules = flow_dv_destroy_policer_rules,
8684 .counter_alloc = flow_dv_counter_allocate,
8685 .counter_free = flow_dv_counter_free,
8686 .counter_query = flow_dv_counter_query,
8689 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */