1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #pragma GCC diagnostic ignored "-Wpedantic"
15 #include <infiniband/verbs.h>
17 #pragma GCC diagnostic error "-Wpedantic"
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_eth_ctrl.h>
23 #include <rte_ethdev_driver.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
30 #include "mlx5_defs.h"
32 #include "mlx5_glue.h"
33 #include "mlx5_flow.h"
35 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
37 #define MLX5_ENCAP_MAX_LEN 132
43 * Pointer to the rte_eth_dev structure.
47 * Attributes of flow that includes this item.
49 * Pointer to error structure.
52 * 0 on success, a negative errno value otherwise and rte_errno is set.
55 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
56 const struct rte_flow_item *item,
57 const struct rte_flow_attr *attr,
58 struct rte_flow_error *error)
60 const struct rte_flow_item_meta *spec = item->spec;
61 const struct rte_flow_item_meta *mask = item->mask;
62 const struct rte_flow_item_meta nic_mask = {
63 .data = RTE_BE32(UINT32_MAX)
66 uint64_t offloads = dev->data->dev_conf.txmode.offloads;
68 if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
69 return rte_flow_error_set(error, EPERM,
70 RTE_FLOW_ERROR_TYPE_ITEM,
72 "match on metadata offload "
73 "configuration is off for this port");
75 return rte_flow_error_set(error, EINVAL,
76 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
78 "data cannot be empty");
80 return rte_flow_error_set(error, EINVAL,
81 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
83 "data cannot be zero");
85 mask = &rte_flow_item_meta_mask;
86 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
87 (const uint8_t *)&nic_mask,
88 sizeof(struct rte_flow_item_meta),
93 return rte_flow_error_set(error, ENOTSUP,
94 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
96 "pattern not supported for ingress");
101 * Validate the L2 encap action.
103 * @param[in] action_flags
104 * Holds the actions detected until now.
106 * Pointer to the encap action.
108 * Pointer to flow attributes
110 * Pointer to error structure.
113 * 0 on success, a negative errno value otherwise and rte_errno is set.
116 flow_dv_validate_action_l2_encap(uint64_t action_flags,
117 const struct rte_flow_action *action,
118 const struct rte_flow_attr *attr,
119 struct rte_flow_error *error)
122 return rte_flow_error_set(error, EINVAL,
123 RTE_FLOW_ERROR_TYPE_ACTION, action,
124 "configuration cannot be null");
125 if (action_flags & MLX5_FLOW_ACTION_DROP)
126 return rte_flow_error_set(error, EINVAL,
127 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
128 "can't drop and encap in same flow");
129 if (action_flags & (MLX5_FLOW_ACTION_VXLAN_ENCAP |
130 MLX5_FLOW_ACTION_VXLAN_DECAP))
131 return rte_flow_error_set(error, EINVAL,
132 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
133 "can only have a single encap or"
134 " decap action in a flow");
136 return rte_flow_error_set(error, ENOTSUP,
137 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
139 "encap action not supported for "
145 * Validate the L2 decap action.
147 * @param[in] action_flags
148 * Holds the actions detected until now.
150 * Pointer to flow attributes
152 * Pointer to error structure.
155 * 0 on success, a negative errno value otherwise and rte_errno is set.
158 flow_dv_validate_action_l2_decap(uint64_t action_flags,
159 const struct rte_flow_attr *attr,
160 struct rte_flow_error *error)
162 if (action_flags & MLX5_FLOW_ACTION_DROP)
163 return rte_flow_error_set(error, EINVAL,
164 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
165 "can't drop and decap in same flow");
166 if (action_flags & (MLX5_FLOW_ACTION_VXLAN_ENCAP |
167 MLX5_FLOW_ACTION_VXLAN_DECAP))
168 return rte_flow_error_set(error, EINVAL,
169 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
170 "can only have a single encap or"
171 " decap action in a flow");
173 return rte_flow_error_set(error, ENOTSUP,
174 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
176 "decap action not supported for "
182 * Get the size of specific rte_flow_item_type
184 * @param[in] item_type
185 * Tested rte_flow_item_type.
188 * sizeof struct item_type, 0 if void or irrelevant.
191 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
196 case RTE_FLOW_ITEM_TYPE_ETH:
197 retval = sizeof(struct rte_flow_item_eth);
199 case RTE_FLOW_ITEM_TYPE_VLAN:
200 retval = sizeof(struct rte_flow_item_vlan);
202 case RTE_FLOW_ITEM_TYPE_IPV4:
203 retval = sizeof(struct rte_flow_item_ipv4);
205 case RTE_FLOW_ITEM_TYPE_IPV6:
206 retval = sizeof(struct rte_flow_item_ipv6);
208 case RTE_FLOW_ITEM_TYPE_UDP:
209 retval = sizeof(struct rte_flow_item_udp);
211 case RTE_FLOW_ITEM_TYPE_TCP:
212 retval = sizeof(struct rte_flow_item_tcp);
214 case RTE_FLOW_ITEM_TYPE_VXLAN:
215 retval = sizeof(struct rte_flow_item_vxlan);
217 case RTE_FLOW_ITEM_TYPE_GRE:
218 retval = sizeof(struct rte_flow_item_gre);
220 case RTE_FLOW_ITEM_TYPE_NVGRE:
221 retval = sizeof(struct rte_flow_item_nvgre);
223 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
224 retval = sizeof(struct rte_flow_item_vxlan_gpe);
226 case RTE_FLOW_ITEM_TYPE_MPLS:
227 retval = sizeof(struct rte_flow_item_mpls);
229 case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
237 #define MLX5_ENCAP_IPV4_VERSION 0x40
238 #define MLX5_ENCAP_IPV4_IHL_MIN 0x05
239 #define MLX5_ENCAP_IPV4_TTL_DEF 0x40
240 #define MLX5_ENCAP_IPV6_VTC_FLOW 0x60000000
241 #define MLX5_ENCAP_IPV6_HOP_LIMIT 0xff
242 #define MLX5_ENCAP_VXLAN_FLAGS 0x08000000
243 #define MLX5_ENCAP_VXLAN_GPE_FLAGS 0x04
246 * Convert the encap action data from list of rte_flow_item to raw buffer
249 * Pointer to rte_flow_item objects list.
251 * Pointer to the output buffer.
253 * Pointer to the output buffer size.
255 * Pointer to the error structure.
258 * 0 on success, a negative errno value otherwise and rte_errno is set.
261 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
262 size_t *size, struct rte_flow_error *error)
264 struct ether_hdr *eth = NULL;
265 struct vlan_hdr *vlan = NULL;
266 struct ipv4_hdr *ipv4 = NULL;
267 struct ipv6_hdr *ipv6 = NULL;
268 struct udp_hdr *udp = NULL;
269 struct vxlan_hdr *vxlan = NULL;
270 struct vxlan_gpe_hdr *vxlan_gpe = NULL;
272 size_t temp_size = 0;
275 return rte_flow_error_set(error, EINVAL,
276 RTE_FLOW_ERROR_TYPE_ACTION,
277 NULL, "invalid empty data");
278 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
279 len = flow_dv_get_item_len(items->type);
280 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
281 return rte_flow_error_set(error, EINVAL,
282 RTE_FLOW_ERROR_TYPE_ACTION,
284 "items total size is too big"
285 " for encap action");
286 rte_memcpy((void *)&buf[temp_size], items->spec, len);
287 switch (items->type) {
288 case RTE_FLOW_ITEM_TYPE_ETH:
289 eth = (struct ether_hdr *)&buf[temp_size];
291 case RTE_FLOW_ITEM_TYPE_VLAN:
292 vlan = (struct vlan_hdr *)&buf[temp_size];
294 return rte_flow_error_set(error, EINVAL,
295 RTE_FLOW_ERROR_TYPE_ACTION,
297 "eth header not found");
298 if (!eth->ether_type)
299 eth->ether_type = RTE_BE16(ETHER_TYPE_VLAN);
301 case RTE_FLOW_ITEM_TYPE_IPV4:
302 ipv4 = (struct ipv4_hdr *)&buf[temp_size];
304 return rte_flow_error_set(error, EINVAL,
305 RTE_FLOW_ERROR_TYPE_ACTION,
307 "neither eth nor vlan"
309 if (vlan && !vlan->eth_proto)
310 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv4);
311 else if (eth && !eth->ether_type)
312 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv4);
313 if (!ipv4->version_ihl)
314 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
315 MLX5_ENCAP_IPV4_IHL_MIN;
316 if (!ipv4->time_to_live)
317 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
319 case RTE_FLOW_ITEM_TYPE_IPV6:
320 ipv6 = (struct ipv6_hdr *)&buf[temp_size];
322 return rte_flow_error_set(error, EINVAL,
323 RTE_FLOW_ERROR_TYPE_ACTION,
325 "neither eth nor vlan"
327 if (vlan && !vlan->eth_proto)
328 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv6);
329 else if (eth && !eth->ether_type)
330 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv6);
333 RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
334 if (!ipv6->hop_limits)
335 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
337 case RTE_FLOW_ITEM_TYPE_UDP:
338 udp = (struct udp_hdr *)&buf[temp_size];
340 return rte_flow_error_set(error, EINVAL,
341 RTE_FLOW_ERROR_TYPE_ACTION,
343 "ip header not found");
344 if (ipv4 && !ipv4->next_proto_id)
345 ipv4->next_proto_id = IPPROTO_UDP;
346 else if (ipv6 && !ipv6->proto)
347 ipv6->proto = IPPROTO_UDP;
349 case RTE_FLOW_ITEM_TYPE_VXLAN:
350 vxlan = (struct vxlan_hdr *)&buf[temp_size];
352 return rte_flow_error_set(error, EINVAL,
353 RTE_FLOW_ERROR_TYPE_ACTION,
355 "udp header not found");
357 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
358 if (!vxlan->vx_flags)
360 RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
362 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
363 vxlan_gpe = (struct vxlan_gpe_hdr *)&buf[temp_size];
365 return rte_flow_error_set(error, EINVAL,
366 RTE_FLOW_ERROR_TYPE_ACTION,
368 "udp header not found");
369 if (!vxlan_gpe->proto)
370 return rte_flow_error_set(error, EINVAL,
371 RTE_FLOW_ERROR_TYPE_ACTION,
373 "next protocol not found");
376 RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
377 if (!vxlan_gpe->vx_flags)
378 vxlan_gpe->vx_flags =
379 MLX5_ENCAP_VXLAN_GPE_FLAGS;
381 case RTE_FLOW_ITEM_TYPE_VOID:
384 return rte_flow_error_set(error, EINVAL,
385 RTE_FLOW_ERROR_TYPE_ACTION,
387 "unsupported item type");
397 * Convert L2 encap action to DV specification.
400 * Pointer to rte_eth_dev structure.
402 * Pointer to action structure.
404 * Pointer to the error structure.
407 * Pointer to action on success, NULL otherwise and rte_errno is set.
409 static struct ibv_flow_action *
410 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
411 const struct rte_flow_action *action,
412 struct rte_flow_error *error)
414 struct ibv_flow_action *verbs_action = NULL;
415 const struct rte_flow_item *encap_data;
416 struct priv *priv = dev->data->dev_private;
417 uint8_t buf[MLX5_ENCAP_MAX_LEN];
419 int convert_result = 0;
421 encap_data = ((const struct rte_flow_action_vxlan_encap *)
422 action->conf)->definition;
423 convert_result = flow_dv_convert_encap_data(encap_data, buf,
427 verbs_action = mlx5_glue->dv_create_flow_action_packet_reformat
428 (priv->ctx, size, buf,
429 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
430 MLX5DV_FLOW_TABLE_TYPE_NIC_TX);
432 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
433 NULL, "cannot create L2 encap action");
438 * Convert L2 decap action to DV specification.
441 * Pointer to rte_eth_dev structure.
443 * Pointer to the error structure.
446 * Pointer to action on success, NULL otherwise and rte_errno is set.
448 static struct ibv_flow_action *
449 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
450 struct rte_flow_error *error)
452 struct ibv_flow_action *verbs_action = NULL;
453 struct priv *priv = dev->data->dev_private;
455 verbs_action = mlx5_glue->dv_create_flow_action_packet_reformat
457 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
458 MLX5DV_FLOW_TABLE_TYPE_NIC_RX);
460 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
461 NULL, "cannot create L2 decap action");
466 * Verify the @p attributes will be correctly understood by the NIC and store
467 * them in the @p flow if everything is correct.
470 * Pointer to dev struct.
471 * @param[in] attributes
472 * Pointer to flow attributes
474 * Pointer to error structure.
477 * 0 on success, a negative errno value otherwise and rte_errno is set.
480 flow_dv_validate_attributes(struct rte_eth_dev *dev,
481 const struct rte_flow_attr *attributes,
482 struct rte_flow_error *error)
484 struct priv *priv = dev->data->dev_private;
485 uint32_t priority_max = priv->config.flow_prio - 1;
487 if (attributes->group)
488 return rte_flow_error_set(error, ENOTSUP,
489 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
491 "groups is not supported");
492 if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
493 attributes->priority >= priority_max)
494 return rte_flow_error_set(error, ENOTSUP,
495 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
497 "priority out of range");
498 if (attributes->transfer)
499 return rte_flow_error_set(error, ENOTSUP,
500 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
502 "transfer is not supported");
503 if (!(attributes->egress ^ attributes->ingress))
504 return rte_flow_error_set(error, ENOTSUP,
505 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
506 "must specify exactly one of "
507 "ingress or egress");
512 * Internal validation function. For validating both actions and items.
515 * Pointer to the rte_eth_dev structure.
517 * Pointer to the flow attributes.
519 * Pointer to the list of items.
521 * Pointer to the list of actions.
523 * Pointer to the error structure.
526 * 0 on success, a negative errno value otherwise and rte_ernno is set.
529 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
530 const struct rte_flow_item items[],
531 const struct rte_flow_action actions[],
532 struct rte_flow_error *error)
535 uint64_t action_flags = 0;
536 uint64_t item_flags = 0;
538 uint8_t next_protocol = 0xff;
543 ret = flow_dv_validate_attributes(dev, attr, error);
546 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
547 tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
548 switch (items->type) {
549 case RTE_FLOW_ITEM_TYPE_VOID:
551 case RTE_FLOW_ITEM_TYPE_ETH:
552 ret = mlx5_flow_validate_item_eth(items, item_flags,
556 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
557 MLX5_FLOW_LAYER_OUTER_L2;
559 case RTE_FLOW_ITEM_TYPE_VLAN:
560 ret = mlx5_flow_validate_item_vlan(items, item_flags,
564 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
565 MLX5_FLOW_LAYER_OUTER_VLAN;
567 case RTE_FLOW_ITEM_TYPE_IPV4:
568 ret = mlx5_flow_validate_item_ipv4(items, item_flags,
572 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
573 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
574 if (items->mask != NULL &&
575 ((const struct rte_flow_item_ipv4 *)
576 items->mask)->hdr.next_proto_id)
578 ((const struct rte_flow_item_ipv4 *)
579 (items->spec))->hdr.next_proto_id;
581 case RTE_FLOW_ITEM_TYPE_IPV6:
582 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
586 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
587 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
588 if (items->mask != NULL &&
589 ((const struct rte_flow_item_ipv6 *)
590 items->mask)->hdr.proto)
592 ((const struct rte_flow_item_ipv6 *)
593 items->spec)->hdr.proto;
595 case RTE_FLOW_ITEM_TYPE_TCP:
596 ret = mlx5_flow_validate_item_tcp
599 &rte_flow_item_tcp_mask,
603 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
604 MLX5_FLOW_LAYER_OUTER_L4_TCP;
606 case RTE_FLOW_ITEM_TYPE_UDP:
607 ret = mlx5_flow_validate_item_udp(items, item_flags,
612 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
613 MLX5_FLOW_LAYER_OUTER_L4_UDP;
615 case RTE_FLOW_ITEM_TYPE_GRE:
616 case RTE_FLOW_ITEM_TYPE_NVGRE:
617 ret = mlx5_flow_validate_item_gre(items, item_flags,
618 next_protocol, error);
621 item_flags |= MLX5_FLOW_LAYER_GRE;
623 case RTE_FLOW_ITEM_TYPE_VXLAN:
624 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
628 item_flags |= MLX5_FLOW_LAYER_VXLAN;
630 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
631 ret = mlx5_flow_validate_item_vxlan_gpe(items,
636 item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
638 case RTE_FLOW_ITEM_TYPE_META:
639 ret = flow_dv_validate_item_meta(dev, items, attr,
643 item_flags |= MLX5_FLOW_ITEM_METADATA;
646 return rte_flow_error_set(error, ENOTSUP,
647 RTE_FLOW_ERROR_TYPE_ITEM,
648 NULL, "item not supported");
651 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
652 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
653 return rte_flow_error_set(error, ENOTSUP,
654 RTE_FLOW_ERROR_TYPE_ACTION,
655 actions, "too many actions");
656 switch (actions->type) {
657 case RTE_FLOW_ACTION_TYPE_VOID:
659 case RTE_FLOW_ACTION_TYPE_FLAG:
660 ret = mlx5_flow_validate_action_flag(action_flags,
664 action_flags |= MLX5_FLOW_ACTION_FLAG;
667 case RTE_FLOW_ACTION_TYPE_MARK:
668 ret = mlx5_flow_validate_action_mark(actions,
673 action_flags |= MLX5_FLOW_ACTION_MARK;
676 case RTE_FLOW_ACTION_TYPE_DROP:
677 ret = mlx5_flow_validate_action_drop(action_flags,
681 action_flags |= MLX5_FLOW_ACTION_DROP;
684 case RTE_FLOW_ACTION_TYPE_QUEUE:
685 ret = mlx5_flow_validate_action_queue(actions,
690 action_flags |= MLX5_FLOW_ACTION_QUEUE;
693 case RTE_FLOW_ACTION_TYPE_RSS:
694 ret = mlx5_flow_validate_action_rss(actions,
699 action_flags |= MLX5_FLOW_ACTION_RSS;
702 case RTE_FLOW_ACTION_TYPE_COUNT:
703 ret = mlx5_flow_validate_action_count(dev, attr, error);
706 action_flags |= MLX5_FLOW_ACTION_COUNT;
709 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
710 ret = flow_dv_validate_action_l2_encap(action_flags,
715 action_flags |= MLX5_FLOW_ACTION_VXLAN_ENCAP;
718 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
719 ret = flow_dv_validate_action_l2_decap(action_flags,
723 action_flags |= MLX5_FLOW_ACTION_VXLAN_DECAP;
727 return rte_flow_error_set(error, ENOTSUP,
728 RTE_FLOW_ERROR_TYPE_ACTION,
730 "action not supported");
733 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
734 return rte_flow_error_set(error, EINVAL,
735 RTE_FLOW_ERROR_TYPE_ACTION, actions,
736 "no fate action is found");
741 * Internal preparation function. Allocates the DV flow size,
742 * this size is constant.
745 * Pointer to the flow attributes.
747 * Pointer to the list of items.
749 * Pointer to the list of actions.
750 * @param[out] item_flags
751 * Pointer to bit mask of all items detected.
752 * @param[out] action_flags
753 * Pointer to bit mask of all actions detected.
755 * Pointer to the error structure.
758 * Pointer to mlx5_flow object on success,
759 * otherwise NULL and rte_ernno is set.
761 static struct mlx5_flow *
762 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
763 const struct rte_flow_item items[] __rte_unused,
764 const struct rte_flow_action actions[] __rte_unused,
765 uint64_t *item_flags __rte_unused,
766 uint64_t *action_flags __rte_unused,
767 struct rte_flow_error *error)
769 uint32_t size = sizeof(struct mlx5_flow);
770 struct mlx5_flow *flow;
772 flow = rte_calloc(__func__, 1, size, 0);
774 rte_flow_error_set(error, ENOMEM,
775 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
776 "not enough memory to create flow");
779 flow->dv.value.size = MLX5_ST_SZ_DB(fte_match_param);
784 * Add Ethernet item to matcher and to the value.
786 * @param[in, out] matcher
788 * @param[in, out] key
789 * Flow matcher value.
791 * Flow pattern to translate.
793 * Item is inner pattern.
796 flow_dv_translate_item_eth(void *matcher, void *key,
797 const struct rte_flow_item *item, int inner)
799 const struct rte_flow_item_eth *eth_m = item->mask;
800 const struct rte_flow_item_eth *eth_v = item->spec;
801 const struct rte_flow_item_eth nic_mask = {
802 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
803 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
804 .type = RTE_BE16(0xffff),
816 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
818 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
820 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
822 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
824 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
825 ð_m->dst, sizeof(eth_m->dst));
826 /* The value must be in the range of the mask. */
827 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
828 for (i = 0; i < sizeof(eth_m->dst); ++i)
829 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
830 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
831 ð_m->src, sizeof(eth_m->src));
832 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
833 /* The value must be in the range of the mask. */
834 for (i = 0; i < sizeof(eth_m->dst); ++i)
835 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
836 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
837 rte_be_to_cpu_16(eth_m->type));
838 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
839 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
843 * Add VLAN item to matcher and to the value.
845 * @param[in, out] matcher
847 * @param[in, out] key
848 * Flow matcher value.
850 * Flow pattern to translate.
852 * Item is inner pattern.
855 flow_dv_translate_item_vlan(void *matcher, void *key,
856 const struct rte_flow_item *item,
859 const struct rte_flow_item_vlan *vlan_m = item->mask;
860 const struct rte_flow_item_vlan *vlan_v = item->spec;
861 const struct rte_flow_item_vlan nic_mask = {
862 .tci = RTE_BE16(0x0fff),
863 .inner_type = RTE_BE16(0xffff),
875 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
877 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
879 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
881 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
883 tci_m = rte_be_to_cpu_16(vlan_m->tci);
884 tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
885 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
886 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
887 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
888 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
889 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
890 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
891 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
892 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
896 * Add IPV4 item to matcher and to the value.
898 * @param[in, out] matcher
900 * @param[in, out] key
901 * Flow matcher value.
903 * Flow pattern to translate.
905 * Item is inner pattern.
908 flow_dv_translate_item_ipv4(void *matcher, void *key,
909 const struct rte_flow_item *item,
912 const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
913 const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
914 const struct rte_flow_item_ipv4 nic_mask = {
916 .src_addr = RTE_BE32(0xffffffff),
917 .dst_addr = RTE_BE32(0xffffffff),
918 .type_of_service = 0xff,
919 .next_proto_id = 0xff,
929 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
931 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
933 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
935 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
937 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
938 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
943 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
944 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
945 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
946 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
947 *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
948 *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
949 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
950 src_ipv4_src_ipv6.ipv4_layout.ipv4);
951 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
952 src_ipv4_src_ipv6.ipv4_layout.ipv4);
953 *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
954 *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
955 tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
956 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
957 ipv4_m->hdr.type_of_service);
958 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
959 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
960 ipv4_m->hdr.type_of_service >> 2);
961 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
962 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
963 ipv4_m->hdr.next_proto_id);
964 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
965 ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
969 * Add IPV6 item to matcher and to the value.
971 * @param[in, out] matcher
973 * @param[in, out] key
974 * Flow matcher value.
976 * Flow pattern to translate.
978 * Item is inner pattern.
981 flow_dv_translate_item_ipv6(void *matcher, void *key,
982 const struct rte_flow_item *item,
985 const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
986 const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
987 const struct rte_flow_item_ipv6 nic_mask = {
990 "\xff\xff\xff\xff\xff\xff\xff\xff"
991 "\xff\xff\xff\xff\xff\xff\xff\xff",
993 "\xff\xff\xff\xff\xff\xff\xff\xff"
994 "\xff\xff\xff\xff\xff\xff\xff\xff",
995 .vtc_flow = RTE_BE32(0xffffffff),
1002 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
1003 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
1012 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1014 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
1016 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1018 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1020 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
1021 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
1026 size = sizeof(ipv6_m->hdr.dst_addr);
1027 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
1028 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
1029 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1030 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
1031 memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
1032 for (i = 0; i < size; ++i)
1033 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
1034 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
1035 src_ipv4_src_ipv6.ipv6_layout.ipv6);
1036 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1037 src_ipv4_src_ipv6.ipv6_layout.ipv6);
1038 memcpy(l24_m, ipv6_m->hdr.src_addr, size);
1039 for (i = 0; i < size; ++i)
1040 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
1042 vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
1043 vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
1044 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
1045 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
1046 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
1047 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
1050 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
1052 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
1055 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
1057 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
1061 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
1063 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
1064 ipv6_v->hdr.proto & ipv6_m->hdr.proto);
1068 * Add TCP item to matcher and to the value.
1070 * @param[in, out] matcher
1072 * @param[in, out] key
1073 * Flow matcher value.
1075 * Flow pattern to translate.
1077 * Item is inner pattern.
1080 flow_dv_translate_item_tcp(void *matcher, void *key,
1081 const struct rte_flow_item *item,
1084 const struct rte_flow_item_tcp *tcp_m = item->mask;
1085 const struct rte_flow_item_tcp *tcp_v = item->spec;
1090 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1092 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
1094 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1096 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1098 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
1099 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
1103 tcp_m = &rte_flow_item_tcp_mask;
1104 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
1105 rte_be_to_cpu_16(tcp_m->hdr.src_port));
1106 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
1107 rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
1108 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
1109 rte_be_to_cpu_16(tcp_m->hdr.dst_port));
1110 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
1111 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
1115 * Add UDP item to matcher and to the value.
1117 * @param[in, out] matcher
1119 * @param[in, out] key
1120 * Flow matcher value.
1122 * Flow pattern to translate.
1124 * Item is inner pattern.
1127 flow_dv_translate_item_udp(void *matcher, void *key,
1128 const struct rte_flow_item *item,
1131 const struct rte_flow_item_udp *udp_m = item->mask;
1132 const struct rte_flow_item_udp *udp_v = item->spec;
1137 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1139 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
1141 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1143 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1145 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
1146 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
1150 udp_m = &rte_flow_item_udp_mask;
1151 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
1152 rte_be_to_cpu_16(udp_m->hdr.src_port));
1153 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
1154 rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
1155 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
1156 rte_be_to_cpu_16(udp_m->hdr.dst_port));
1157 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
1158 rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
1162 * Add GRE item to matcher and to the value.
1164 * @param[in, out] matcher
1166 * @param[in, out] key
1167 * Flow matcher value.
1169 * Flow pattern to translate.
1171 * Item is inner pattern.
1174 flow_dv_translate_item_gre(void *matcher, void *key,
1175 const struct rte_flow_item *item,
1178 const struct rte_flow_item_gre *gre_m = item->mask;
1179 const struct rte_flow_item_gre *gre_v = item->spec;
1182 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
1183 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
1186 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1188 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
1190 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1192 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1194 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
1195 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
1199 gre_m = &rte_flow_item_gre_mask;
1200 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
1201 rte_be_to_cpu_16(gre_m->protocol));
1202 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
1203 rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
1207 * Add NVGRE item to matcher and to the value.
1209 * @param[in, out] matcher
1211 * @param[in, out] key
1212 * Flow matcher value.
1214 * Flow pattern to translate.
1216 * Item is inner pattern.
1219 flow_dv_translate_item_nvgre(void *matcher, void *key,
1220 const struct rte_flow_item *item,
1223 const struct rte_flow_item_nvgre *nvgre_m = item->mask;
1224 const struct rte_flow_item_nvgre *nvgre_v = item->spec;
1225 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
1226 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
1227 const char *tni_flow_id_m = (const char *)nvgre_m->tni;
1228 const char *tni_flow_id_v = (const char *)nvgre_v->tni;
1234 flow_dv_translate_item_gre(matcher, key, item, inner);
1238 nvgre_m = &rte_flow_item_nvgre_mask;
1239 size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
1240 gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
1241 gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
1242 memcpy(gre_key_m, tni_flow_id_m, size);
1243 for (i = 0; i < size; ++i)
1244 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
1248 * Add VXLAN item to matcher and to the value.
1250 * @param[in, out] matcher
1252 * @param[in, out] key
1253 * Flow matcher value.
1255 * Flow pattern to translate.
1257 * Item is inner pattern.
1260 flow_dv_translate_item_vxlan(void *matcher, void *key,
1261 const struct rte_flow_item *item,
1264 const struct rte_flow_item_vxlan *vxlan_m = item->mask;
1265 const struct rte_flow_item_vxlan *vxlan_v = item->spec;
1268 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
1269 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
1277 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1279 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
1281 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1283 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1285 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
1286 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
1287 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
1288 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
1289 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
1294 vxlan_m = &rte_flow_item_vxlan_mask;
1295 size = sizeof(vxlan_m->vni);
1296 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
1297 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
1298 memcpy(vni_m, vxlan_m->vni, size);
1299 for (i = 0; i < size; ++i)
1300 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
1304 * Add META item to matcher
1306 * @param[in, out] matcher
1308 * @param[in, out] key
1309 * Flow matcher value.
1311 * Flow pattern to translate.
1313 * Item is inner pattern.
1316 flow_dv_translate_item_meta(void *matcher, void *key,
1317 const struct rte_flow_item *item)
1319 const struct rte_flow_item_meta *meta_m;
1320 const struct rte_flow_item_meta *meta_v;
1322 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
1324 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
1326 meta_m = (const void *)item->mask;
1328 meta_m = &rte_flow_item_meta_mask;
1329 meta_v = (const void *)item->spec;
1331 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
1332 rte_be_to_cpu_32(meta_m->data));
1333 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
1334 rte_be_to_cpu_32(meta_v->data & meta_m->data));
1339 * Update the matcher and the value based the selected item.
1341 * @param[in, out] matcher
1343 * @param[in, out] key
1344 * Flow matcher value.
1346 * Flow pattern to translate.
1347 * @param[in, out] dev_flow
1348 * Pointer to the mlx5_flow.
1350 * Item is inner pattern.
1353 flow_dv_create_item(void *matcher, void *key,
1354 const struct rte_flow_item *item,
1355 struct mlx5_flow *dev_flow,
1358 struct mlx5_flow_dv_matcher *tmatcher = matcher;
1360 switch (item->type) {
1361 case RTE_FLOW_ITEM_TYPE_ETH:
1362 flow_dv_translate_item_eth(tmatcher->mask.buf, key, item,
1364 tmatcher->priority = MLX5_PRIORITY_MAP_L2;
1366 case RTE_FLOW_ITEM_TYPE_VLAN:
1367 flow_dv_translate_item_vlan(tmatcher->mask.buf, key, item,
1370 case RTE_FLOW_ITEM_TYPE_IPV4:
1371 flow_dv_translate_item_ipv4(tmatcher->mask.buf, key, item,
1373 tmatcher->priority = MLX5_PRIORITY_MAP_L3;
1374 dev_flow->dv.hash_fields |=
1375 mlx5_flow_hashfields_adjust(dev_flow, inner,
1376 MLX5_IPV4_LAYER_TYPES,
1377 MLX5_IPV4_IBV_RX_HASH);
1379 case RTE_FLOW_ITEM_TYPE_IPV6:
1380 flow_dv_translate_item_ipv6(tmatcher->mask.buf, key, item,
1382 tmatcher->priority = MLX5_PRIORITY_MAP_L3;
1383 dev_flow->dv.hash_fields |=
1384 mlx5_flow_hashfields_adjust(dev_flow, inner,
1385 MLX5_IPV6_LAYER_TYPES,
1386 MLX5_IPV6_IBV_RX_HASH);
1388 case RTE_FLOW_ITEM_TYPE_TCP:
1389 flow_dv_translate_item_tcp(tmatcher->mask.buf, key, item,
1391 tmatcher->priority = MLX5_PRIORITY_MAP_L4;
1392 dev_flow->dv.hash_fields |=
1393 mlx5_flow_hashfields_adjust(dev_flow, inner,
1395 (IBV_RX_HASH_SRC_PORT_TCP |
1396 IBV_RX_HASH_DST_PORT_TCP));
1398 case RTE_FLOW_ITEM_TYPE_UDP:
1399 flow_dv_translate_item_udp(tmatcher->mask.buf, key, item,
1401 tmatcher->priority = MLX5_PRIORITY_MAP_L4;
1402 dev_flow->verbs.hash_fields |=
1403 mlx5_flow_hashfields_adjust(dev_flow, inner,
1405 (IBV_RX_HASH_SRC_PORT_UDP |
1406 IBV_RX_HASH_DST_PORT_UDP));
1408 case RTE_FLOW_ITEM_TYPE_GRE:
1409 flow_dv_translate_item_gre(tmatcher->mask.buf, key, item,
1412 case RTE_FLOW_ITEM_TYPE_NVGRE:
1413 flow_dv_translate_item_nvgre(tmatcher->mask.buf, key, item,
1416 case RTE_FLOW_ITEM_TYPE_VXLAN:
1417 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1418 flow_dv_translate_item_vxlan(tmatcher->mask.buf, key, item,
1421 case RTE_FLOW_ITEM_TYPE_META:
1422 flow_dv_translate_item_meta(tmatcher->mask.buf, key, item);
1430 * Store the requested actions in an array.
1433 * Pointer to rte_eth_dev structure.
1435 * Flow action to translate.
1436 * @param[in, out] dev_flow
1437 * Pointer to the mlx5_flow.
1439 * Pointer to the error structure.
1442 * 0 on success, a negative errno value otherwise and rte_errno is set.
1445 flow_dv_create_action(struct rte_eth_dev *dev,
1446 const struct rte_flow_action *action,
1447 struct mlx5_flow *dev_flow,
1448 struct rte_flow_error *error)
1450 const struct rte_flow_action_queue *queue;
1451 const struct rte_flow_action_rss *rss;
1452 int actions_n = dev_flow->dv.actions_n;
1453 struct rte_flow *flow = dev_flow->flow;
1455 switch (action->type) {
1456 case RTE_FLOW_ACTION_TYPE_VOID:
1458 case RTE_FLOW_ACTION_TYPE_FLAG:
1459 dev_flow->dv.actions[actions_n].type = MLX5DV_FLOW_ACTION_TAG;
1460 dev_flow->dv.actions[actions_n].tag_value =
1461 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
1463 flow->actions |= MLX5_FLOW_ACTION_FLAG;
1465 case RTE_FLOW_ACTION_TYPE_MARK:
1466 dev_flow->dv.actions[actions_n].type = MLX5DV_FLOW_ACTION_TAG;
1467 dev_flow->dv.actions[actions_n].tag_value =
1469 (((const struct rte_flow_action_mark *)
1470 (action->conf))->id);
1471 flow->actions |= MLX5_FLOW_ACTION_MARK;
1474 case RTE_FLOW_ACTION_TYPE_DROP:
1475 dev_flow->dv.actions[actions_n].type = MLX5DV_FLOW_ACTION_DROP;
1476 flow->actions |= MLX5_FLOW_ACTION_DROP;
1478 case RTE_FLOW_ACTION_TYPE_QUEUE:
1479 queue = action->conf;
1480 flow->rss.queue_num = 1;
1481 (*flow->queue)[0] = queue->index;
1482 flow->actions |= MLX5_FLOW_ACTION_QUEUE;
1484 case RTE_FLOW_ACTION_TYPE_RSS:
1487 memcpy((*flow->queue), rss->queue,
1488 rss->queue_num * sizeof(uint16_t));
1489 flow->rss.queue_num = rss->queue_num;
1490 memcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN);
1491 flow->rss.types = rss->types;
1492 flow->rss.level = rss->level;
1493 /* Added to array only in apply since we need the QP */
1494 flow->actions |= MLX5_FLOW_ACTION_RSS;
1496 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
1497 dev_flow->dv.actions[actions_n].type =
1498 MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
1499 dev_flow->dv.actions[actions_n].action =
1500 flow_dv_create_action_l2_encap(dev, action,
1502 if (!(dev_flow->dv.actions[actions_n].action))
1504 dev_flow->dv.encap_decap_verbs_action =
1505 dev_flow->dv.actions[actions_n].action;
1506 flow->actions |= MLX5_FLOW_ACTION_VXLAN_ENCAP;
1509 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
1510 dev_flow->dv.actions[actions_n].type =
1511 MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
1512 dev_flow->dv.actions[actions_n].action =
1513 flow_dv_create_action_l2_decap(dev, error);
1514 if (!(dev_flow->dv.actions[actions_n].action))
1516 dev_flow->dv.encap_decap_verbs_action =
1517 dev_flow->dv.actions[actions_n].action;
1518 flow->actions |= MLX5_FLOW_ACTION_VXLAN_DECAP;
1524 dev_flow->dv.actions_n = actions_n;
1528 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
1530 #define HEADER_IS_ZERO(match_criteria, headers) \
1531 !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
1532 matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
1535 * Calculate flow matcher enable bitmap.
1537 * @param match_criteria
1538 * Pointer to flow matcher criteria.
1541 * Bitmap of enabled fields.
1544 flow_dv_matcher_enable(uint32_t *match_criteria)
1546 uint8_t match_criteria_enable;
1548 match_criteria_enable =
1549 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
1550 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
1551 match_criteria_enable |=
1552 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
1553 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
1554 match_criteria_enable |=
1555 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
1556 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
1557 match_criteria_enable |=
1558 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
1559 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
1561 return match_criteria_enable;
1565 * Register the flow matcher.
1567 * @param dev[in, out]
1568 * Pointer to rte_eth_dev structure.
1569 * @param[in, out] matcher
1570 * Pointer to flow matcher.
1571 * @parm[in, out] dev_flow
1572 * Pointer to the dev_flow.
1574 * pointer to error structure.
1577 * 0 on success otherwise -errno and errno is set.
1580 flow_dv_matcher_register(struct rte_eth_dev *dev,
1581 struct mlx5_flow_dv_matcher *matcher,
1582 struct mlx5_flow *dev_flow,
1583 struct rte_flow_error *error)
1585 struct priv *priv = dev->data->dev_private;
1586 struct mlx5_flow_dv_matcher *cache_matcher;
1587 struct mlx5dv_flow_matcher_attr dv_attr = {
1588 .type = IBV_FLOW_ATTR_NORMAL,
1589 .match_mask = (void *)&matcher->mask,
1592 /* Lookup from cache. */
1593 LIST_FOREACH(cache_matcher, &priv->matchers, next) {
1594 if (matcher->crc == cache_matcher->crc &&
1595 matcher->priority == cache_matcher->priority &&
1596 matcher->egress == cache_matcher->egress &&
1597 !memcmp((const void *)matcher->mask.buf,
1598 (const void *)cache_matcher->mask.buf,
1599 cache_matcher->mask.size)) {
1601 "priority %hd use %s matcher %p: refcnt %d++",
1602 cache_matcher->priority,
1603 cache_matcher->egress ? "tx" : "rx",
1604 (void *)cache_matcher,
1605 rte_atomic32_read(&cache_matcher->refcnt));
1606 rte_atomic32_inc(&cache_matcher->refcnt);
1607 dev_flow->dv.matcher = cache_matcher;
1611 /* Register new matcher. */
1612 cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
1614 return rte_flow_error_set(error, ENOMEM,
1615 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1616 "cannot allocate matcher memory");
1617 *cache_matcher = *matcher;
1618 dv_attr.match_criteria_enable =
1619 flow_dv_matcher_enable(cache_matcher->mask.buf);
1620 dv_attr.priority = matcher->priority;
1621 if (matcher->egress)
1622 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
1623 cache_matcher->matcher_object =
1624 mlx5_glue->dv_create_flow_matcher(priv->ctx, &dv_attr);
1625 if (!cache_matcher->matcher_object) {
1626 rte_free(cache_matcher);
1627 return rte_flow_error_set(error, ENOMEM,
1628 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1629 NULL, "cannot create matcher");
1631 rte_atomic32_inc(&cache_matcher->refcnt);
1632 LIST_INSERT_HEAD(&priv->matchers, cache_matcher, next);
1633 dev_flow->dv.matcher = cache_matcher;
1634 DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
1635 cache_matcher->priority,
1636 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
1637 rte_atomic32_read(&cache_matcher->refcnt));
1642 * Fill the flow with DV spec.
1645 * Pointer to rte_eth_dev structure.
1646 * @param[in, out] dev_flow
1647 * Pointer to the sub flow.
1649 * Pointer to the flow attributes.
1651 * Pointer to the list of items.
1652 * @param[in] actions
1653 * Pointer to the list of actions.
1655 * Pointer to the error structure.
1658 * 0 on success, a negative errno value otherwise and rte_ernno is set.
1661 flow_dv_translate(struct rte_eth_dev *dev,
1662 struct mlx5_flow *dev_flow,
1663 const struct rte_flow_attr *attr,
1664 const struct rte_flow_item items[],
1665 const struct rte_flow_action actions[] __rte_unused,
1666 struct rte_flow_error *error)
1668 struct priv *priv = dev->data->dev_private;
1669 uint64_t priority = attr->priority;
1670 struct mlx5_flow_dv_matcher matcher = {
1672 .size = sizeof(matcher.mask.buf),
1675 void *match_value = dev_flow->dv.value.buf;
1678 if (priority == MLX5_FLOW_PRIO_RSVD)
1679 priority = priv->config.flow_prio - 1;
1680 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1681 tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
1682 flow_dv_create_item(&matcher, match_value, items, dev_flow,
1685 matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
1687 if (priority == MLX5_FLOW_PRIO_RSVD)
1688 priority = priv->config.flow_prio - 1;
1689 matcher.priority = mlx5_flow_adjust_priority(dev, priority,
1691 matcher.egress = attr->egress;
1692 if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
1694 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++)
1695 if (flow_dv_create_action(dev, actions, dev_flow, error))
1701 * Apply the flow to the NIC.
1704 * Pointer to the Ethernet device structure.
1705 * @param[in, out] flow
1706 * Pointer to flow structure.
1708 * Pointer to error structure.
1711 * 0 on success, a negative errno value otherwise and rte_errno is set.
1714 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
1715 struct rte_flow_error *error)
1717 struct mlx5_flow_dv *dv;
1718 struct mlx5_flow *dev_flow;
1722 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1725 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
1726 dv->hrxq = mlx5_hrxq_drop_new(dev);
1730 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1731 "cannot get drop hash queue");
1734 dv->actions[n].type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;
1735 dv->actions[n].qp = dv->hrxq->qp;
1737 } else if (flow->actions &
1738 (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
1739 struct mlx5_hrxq *hrxq;
1740 hrxq = mlx5_hrxq_get(dev, flow->key,
1741 MLX5_RSS_HASH_KEY_LEN,
1744 flow->rss.queue_num);
1746 hrxq = mlx5_hrxq_new
1747 (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
1748 dv->hash_fields, (*flow->queue),
1749 flow->rss.queue_num,
1750 !!(dev_flow->layers &
1751 MLX5_FLOW_LAYER_TUNNEL));
1755 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1756 "cannot get hash queue");
1760 dv->actions[n].type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;
1761 dv->actions[n].qp = hrxq->qp;
1765 mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
1766 (void *)&dv->value, n,
1769 rte_flow_error_set(error, errno,
1770 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1772 "hardware refuses to create flow");
1778 err = rte_errno; /* Save rte_errno before cleanup. */
1779 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1780 struct mlx5_flow_dv *dv = &dev_flow->dv;
1782 if (flow->actions & MLX5_FLOW_ACTION_DROP)
1783 mlx5_hrxq_drop_release(dev);
1785 mlx5_hrxq_release(dev, dv->hrxq);
1789 rte_errno = err; /* Restore rte_errno. */
1794 * Release the flow matcher.
1797 * Pointer to Ethernet device.
1799 * Pointer to mlx5_flow.
1802 * 1 while a reference on it exists, 0 when freed.
1805 flow_dv_matcher_release(struct rte_eth_dev *dev,
1806 struct mlx5_flow *flow)
1808 struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
1810 assert(matcher->matcher_object);
1811 DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
1812 dev->data->port_id, (void *)matcher,
1813 rte_atomic32_read(&matcher->refcnt));
1814 if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
1815 claim_zero(mlx5_glue->dv_destroy_flow_matcher
1816 (matcher->matcher_object));
1817 LIST_REMOVE(matcher, next);
1819 DRV_LOG(DEBUG, "port %u matcher %p: removed",
1820 dev->data->port_id, (void *)matcher);
1827 * Remove the flow from the NIC but keeps it in memory.
1830 * Pointer to Ethernet device.
1831 * @param[in, out] flow
1832 * Pointer to flow structure.
1835 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
1837 struct mlx5_flow_dv *dv;
1838 struct mlx5_flow *dev_flow;
1842 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1845 claim_zero(mlx5_glue->destroy_flow(dv->flow));
1849 if (flow->actions & MLX5_FLOW_ACTION_DROP)
1850 mlx5_hrxq_drop_release(dev);
1852 mlx5_hrxq_release(dev, dv->hrxq);
1857 flow->counter = NULL;
1861 * Remove the flow from the NIC and the memory.
1864 * Pointer to the Ethernet device structure.
1865 * @param[in, out] flow
1866 * Pointer to flow structure.
1869 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1871 struct mlx5_flow *dev_flow;
1875 flow_dv_remove(dev, flow);
1876 while (!LIST_EMPTY(&flow->dev_flows)) {
1877 dev_flow = LIST_FIRST(&flow->dev_flows);
1878 LIST_REMOVE(dev_flow, next);
1879 if (dev_flow->dv.matcher)
1880 flow_dv_matcher_release(dev, dev_flow);
1881 if (dev_flow->dv.encap_decap_verbs_action) {
1882 claim_zero(mlx5_glue->destroy_flow_action
1883 (dev_flow->dv.encap_decap_verbs_action));
1884 dev_flow->dv.encap_decap_verbs_action = NULL;
1893 * @see rte_flow_query()
1897 flow_dv_query(struct rte_eth_dev *dev __rte_unused,
1898 struct rte_flow *flow __rte_unused,
1899 const struct rte_flow_action *actions __rte_unused,
1900 void *data __rte_unused,
1901 struct rte_flow_error *error __rte_unused)
1903 rte_errno = ENOTSUP;
1908 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
1909 .validate = flow_dv_validate,
1910 .prepare = flow_dv_prepare,
1911 .translate = flow_dv_translate,
1912 .apply = flow_dv_apply,
1913 .remove = flow_dv_remove,
1914 .destroy = flow_dv_destroy,
1915 .query = flow_dv_query,
1918 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */