1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #pragma GCC diagnostic ignored "-Wpedantic"
15 #include <infiniband/verbs.h>
17 #pragma GCC diagnostic error "-Wpedantic"
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_eth_ctrl.h>
23 #include <rte_ethdev_driver.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
30 #include "mlx5_defs.h"
32 #include "mlx5_glue.h"
33 #include "mlx5_flow.h"
35 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
37 #define MLX5_ENCAP_MAX_LEN 132
43 * Pointer to the rte_eth_dev structure.
47 * Attributes of flow that includes this item.
49 * Pointer to error structure.
52 * 0 on success, a negative errno value otherwise and rte_errno is set.
55 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
56 const struct rte_flow_item *item,
57 const struct rte_flow_attr *attr,
58 struct rte_flow_error *error)
60 const struct rte_flow_item_meta *spec = item->spec;
61 const struct rte_flow_item_meta *mask = item->mask;
62 const struct rte_flow_item_meta nic_mask = {
63 .data = RTE_BE32(UINT32_MAX)
66 uint64_t offloads = dev->data->dev_conf.txmode.offloads;
68 if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
69 return rte_flow_error_set(error, EPERM,
70 RTE_FLOW_ERROR_TYPE_ITEM,
72 "match on metadata offload "
73 "configuration is off for this port");
75 return rte_flow_error_set(error, EINVAL,
76 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
78 "data cannot be empty");
80 return rte_flow_error_set(error, EINVAL,
81 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
83 "data cannot be zero");
85 mask = &rte_flow_item_meta_mask;
86 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
87 (const uint8_t *)&nic_mask,
88 sizeof(struct rte_flow_item_meta),
93 return rte_flow_error_set(error, ENOTSUP,
94 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
96 "pattern not supported for ingress");
101 * Validate the L2 encap action.
103 * @param[in] action_flags
104 * Holds the actions detected until now.
106 * Pointer to the encap action.
108 * Pointer to flow attributes
110 * Pointer to error structure.
113 * 0 on success, a negative errno value otherwise and rte_errno is set.
116 flow_dv_validate_action_l2_encap(uint64_t action_flags,
117 const struct rte_flow_action *action,
118 const struct rte_flow_attr *attr,
119 struct rte_flow_error *error)
122 return rte_flow_error_set(error, EINVAL,
123 RTE_FLOW_ERROR_TYPE_ACTION, action,
124 "configuration cannot be null");
125 if (action_flags & MLX5_FLOW_ACTION_DROP)
126 return rte_flow_error_set(error, EINVAL,
127 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
128 "can't drop and encap in same flow");
129 if (action_flags & MLX5_FLOW_ACTION_VXLAN_ENCAP)
130 return rte_flow_error_set(error, EINVAL,
131 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
132 "can only have a single encap"
133 " action in a flow");
135 return rte_flow_error_set(error, ENOTSUP,
136 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
138 "encap action not supported for "
144 * Get the size of specific rte_flow_item_type
146 * @param[in] item_type
147 * Tested rte_flow_item_type.
150 * sizeof struct item_type, 0 if void or irrelevant.
153 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
158 case RTE_FLOW_ITEM_TYPE_ETH:
159 retval = sizeof(struct rte_flow_item_eth);
161 case RTE_FLOW_ITEM_TYPE_VLAN:
162 retval = sizeof(struct rte_flow_item_vlan);
164 case RTE_FLOW_ITEM_TYPE_IPV4:
165 retval = sizeof(struct rte_flow_item_ipv4);
167 case RTE_FLOW_ITEM_TYPE_IPV6:
168 retval = sizeof(struct rte_flow_item_ipv6);
170 case RTE_FLOW_ITEM_TYPE_UDP:
171 retval = sizeof(struct rte_flow_item_udp);
173 case RTE_FLOW_ITEM_TYPE_TCP:
174 retval = sizeof(struct rte_flow_item_tcp);
176 case RTE_FLOW_ITEM_TYPE_VXLAN:
177 retval = sizeof(struct rte_flow_item_vxlan);
179 case RTE_FLOW_ITEM_TYPE_GRE:
180 retval = sizeof(struct rte_flow_item_gre);
182 case RTE_FLOW_ITEM_TYPE_NVGRE:
183 retval = sizeof(struct rte_flow_item_nvgre);
185 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
186 retval = sizeof(struct rte_flow_item_vxlan_gpe);
188 case RTE_FLOW_ITEM_TYPE_MPLS:
189 retval = sizeof(struct rte_flow_item_mpls);
191 case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
199 #define MLX5_ENCAP_IPV4_VERSION 0x40
200 #define MLX5_ENCAP_IPV4_IHL_MIN 0x05
201 #define MLX5_ENCAP_IPV4_TTL_DEF 0x40
202 #define MLX5_ENCAP_IPV6_VTC_FLOW 0x60000000
203 #define MLX5_ENCAP_IPV6_HOP_LIMIT 0xff
204 #define MLX5_ENCAP_VXLAN_FLAGS 0x08000000
205 #define MLX5_ENCAP_VXLAN_GPE_FLAGS 0x04
208 * Convert the encap action data from list of rte_flow_item to raw buffer
211 * Pointer to rte_flow_item objects list.
213 * Pointer to the output buffer.
215 * Pointer to the output buffer size.
217 * Pointer to the error structure.
220 * 0 on success, a negative errno value otherwise and rte_errno is set.
223 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
224 size_t *size, struct rte_flow_error *error)
226 struct ether_hdr *eth = NULL;
227 struct vlan_hdr *vlan = NULL;
228 struct ipv4_hdr *ipv4 = NULL;
229 struct ipv6_hdr *ipv6 = NULL;
230 struct udp_hdr *udp = NULL;
231 struct vxlan_hdr *vxlan = NULL;
232 struct vxlan_gpe_hdr *vxlan_gpe = NULL;
234 size_t temp_size = 0;
237 return rte_flow_error_set(error, EINVAL,
238 RTE_FLOW_ERROR_TYPE_ACTION,
239 NULL, "invalid empty data");
240 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
241 len = flow_dv_get_item_len(items->type);
242 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
243 return rte_flow_error_set(error, EINVAL,
244 RTE_FLOW_ERROR_TYPE_ACTION,
246 "items total size is too big"
247 " for encap action");
248 rte_memcpy((void *)&buf[temp_size], items->spec, len);
249 switch (items->type) {
250 case RTE_FLOW_ITEM_TYPE_ETH:
251 eth = (struct ether_hdr *)&buf[temp_size];
253 case RTE_FLOW_ITEM_TYPE_VLAN:
254 vlan = (struct vlan_hdr *)&buf[temp_size];
256 return rte_flow_error_set(error, EINVAL,
257 RTE_FLOW_ERROR_TYPE_ACTION,
259 "eth header not found");
260 if (!eth->ether_type)
261 eth->ether_type = RTE_BE16(ETHER_TYPE_VLAN);
263 case RTE_FLOW_ITEM_TYPE_IPV4:
264 ipv4 = (struct ipv4_hdr *)&buf[temp_size];
266 return rte_flow_error_set(error, EINVAL,
267 RTE_FLOW_ERROR_TYPE_ACTION,
269 "neither eth nor vlan"
271 if (vlan && !vlan->eth_proto)
272 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv4);
273 else if (eth && !eth->ether_type)
274 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv4);
275 if (!ipv4->version_ihl)
276 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
277 MLX5_ENCAP_IPV4_IHL_MIN;
278 if (!ipv4->time_to_live)
279 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
281 case RTE_FLOW_ITEM_TYPE_IPV6:
282 ipv6 = (struct ipv6_hdr *)&buf[temp_size];
284 return rte_flow_error_set(error, EINVAL,
285 RTE_FLOW_ERROR_TYPE_ACTION,
287 "neither eth nor vlan"
289 if (vlan && !vlan->eth_proto)
290 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv6);
291 else if (eth && !eth->ether_type)
292 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv6);
295 RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
296 if (!ipv6->hop_limits)
297 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
299 case RTE_FLOW_ITEM_TYPE_UDP:
300 udp = (struct udp_hdr *)&buf[temp_size];
302 return rte_flow_error_set(error, EINVAL,
303 RTE_FLOW_ERROR_TYPE_ACTION,
305 "ip header not found");
306 if (ipv4 && !ipv4->next_proto_id)
307 ipv4->next_proto_id = IPPROTO_UDP;
308 else if (ipv6 && !ipv6->proto)
309 ipv6->proto = IPPROTO_UDP;
311 case RTE_FLOW_ITEM_TYPE_VXLAN:
312 vxlan = (struct vxlan_hdr *)&buf[temp_size];
314 return rte_flow_error_set(error, EINVAL,
315 RTE_FLOW_ERROR_TYPE_ACTION,
317 "udp header not found");
319 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
320 if (!vxlan->vx_flags)
322 RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
324 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
325 vxlan_gpe = (struct vxlan_gpe_hdr *)&buf[temp_size];
327 return rte_flow_error_set(error, EINVAL,
328 RTE_FLOW_ERROR_TYPE_ACTION,
330 "udp header not found");
331 if (!vxlan_gpe->proto)
332 return rte_flow_error_set(error, EINVAL,
333 RTE_FLOW_ERROR_TYPE_ACTION,
335 "next protocol not found");
338 RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
339 if (!vxlan_gpe->vx_flags)
340 vxlan_gpe->vx_flags =
341 MLX5_ENCAP_VXLAN_GPE_FLAGS;
343 case RTE_FLOW_ITEM_TYPE_VOID:
346 return rte_flow_error_set(error, EINVAL,
347 RTE_FLOW_ERROR_TYPE_ACTION,
349 "unsupported item type");
359 * Convert L2 encap action to DV specification.
362 * Pointer to rte_eth_dev structure.
364 * Pointer to action structure.
366 * Pointer to the error structure.
369 * Pointer to action on success, NULL otherwise and rte_errno is set.
371 static struct ibv_flow_action *
372 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
373 const struct rte_flow_action *action,
374 struct rte_flow_error *error)
376 struct ibv_flow_action *verbs_action = NULL;
377 const struct rte_flow_item *encap_data;
378 struct priv *priv = dev->data->dev_private;
379 uint8_t buf[MLX5_ENCAP_MAX_LEN];
381 int convert_result = 0;
383 encap_data = ((const struct rte_flow_action_vxlan_encap *)
384 action->conf)->definition;
385 convert_result = flow_dv_convert_encap_data(encap_data, buf,
389 verbs_action = mlx5_glue->dv_create_flow_action_packet_reformat
390 (priv->ctx, size, buf,
391 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
392 MLX5DV_FLOW_TABLE_TYPE_NIC_TX);
394 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
395 NULL, "cannot create L2 encap action");
400 * Verify the @p attributes will be correctly understood by the NIC and store
401 * them in the @p flow if everything is correct.
404 * Pointer to dev struct.
405 * @param[in] attributes
406 * Pointer to flow attributes
408 * Pointer to error structure.
411 * 0 on success, a negative errno value otherwise and rte_errno is set.
414 flow_dv_validate_attributes(struct rte_eth_dev *dev,
415 const struct rte_flow_attr *attributes,
416 struct rte_flow_error *error)
418 struct priv *priv = dev->data->dev_private;
419 uint32_t priority_max = priv->config.flow_prio - 1;
421 if (attributes->group)
422 return rte_flow_error_set(error, ENOTSUP,
423 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
425 "groups is not supported");
426 if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
427 attributes->priority >= priority_max)
428 return rte_flow_error_set(error, ENOTSUP,
429 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
431 "priority out of range");
432 if (attributes->transfer)
433 return rte_flow_error_set(error, ENOTSUP,
434 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
436 "transfer is not supported");
437 if (!(attributes->egress ^ attributes->ingress))
438 return rte_flow_error_set(error, ENOTSUP,
439 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
440 "must specify exactly one of "
441 "ingress or egress");
446 * Internal validation function. For validating both actions and items.
449 * Pointer to the rte_eth_dev structure.
451 * Pointer to the flow attributes.
453 * Pointer to the list of items.
455 * Pointer to the list of actions.
457 * Pointer to the error structure.
460 * 0 on success, a negative errno value otherwise and rte_ernno is set.
463 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
464 const struct rte_flow_item items[],
465 const struct rte_flow_action actions[],
466 struct rte_flow_error *error)
469 uint64_t action_flags = 0;
470 uint64_t item_flags = 0;
472 uint8_t next_protocol = 0xff;
477 ret = flow_dv_validate_attributes(dev, attr, error);
480 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
481 tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
482 switch (items->type) {
483 case RTE_FLOW_ITEM_TYPE_VOID:
485 case RTE_FLOW_ITEM_TYPE_ETH:
486 ret = mlx5_flow_validate_item_eth(items, item_flags,
490 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
491 MLX5_FLOW_LAYER_OUTER_L2;
493 case RTE_FLOW_ITEM_TYPE_VLAN:
494 ret = mlx5_flow_validate_item_vlan(items, item_flags,
498 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
499 MLX5_FLOW_LAYER_OUTER_VLAN;
501 case RTE_FLOW_ITEM_TYPE_IPV4:
502 ret = mlx5_flow_validate_item_ipv4(items, item_flags,
506 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
507 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
508 if (items->mask != NULL &&
509 ((const struct rte_flow_item_ipv4 *)
510 items->mask)->hdr.next_proto_id)
512 ((const struct rte_flow_item_ipv4 *)
513 (items->spec))->hdr.next_proto_id;
515 case RTE_FLOW_ITEM_TYPE_IPV6:
516 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
520 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
521 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
522 if (items->mask != NULL &&
523 ((const struct rte_flow_item_ipv6 *)
524 items->mask)->hdr.proto)
526 ((const struct rte_flow_item_ipv6 *)
527 items->spec)->hdr.proto;
529 case RTE_FLOW_ITEM_TYPE_TCP:
530 ret = mlx5_flow_validate_item_tcp
533 &rte_flow_item_tcp_mask,
537 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
538 MLX5_FLOW_LAYER_OUTER_L4_TCP;
540 case RTE_FLOW_ITEM_TYPE_UDP:
541 ret = mlx5_flow_validate_item_udp(items, item_flags,
546 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
547 MLX5_FLOW_LAYER_OUTER_L4_UDP;
549 case RTE_FLOW_ITEM_TYPE_GRE:
550 case RTE_FLOW_ITEM_TYPE_NVGRE:
551 ret = mlx5_flow_validate_item_gre(items, item_flags,
552 next_protocol, error);
555 item_flags |= MLX5_FLOW_LAYER_GRE;
557 case RTE_FLOW_ITEM_TYPE_VXLAN:
558 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
562 item_flags |= MLX5_FLOW_LAYER_VXLAN;
564 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
565 ret = mlx5_flow_validate_item_vxlan_gpe(items,
570 item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
572 case RTE_FLOW_ITEM_TYPE_META:
573 ret = flow_dv_validate_item_meta(dev, items, attr,
577 item_flags |= MLX5_FLOW_ITEM_METADATA;
580 return rte_flow_error_set(error, ENOTSUP,
581 RTE_FLOW_ERROR_TYPE_ITEM,
582 NULL, "item not supported");
585 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
586 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
587 return rte_flow_error_set(error, ENOTSUP,
588 RTE_FLOW_ERROR_TYPE_ACTION,
589 actions, "too many actions");
590 switch (actions->type) {
591 case RTE_FLOW_ACTION_TYPE_VOID:
593 case RTE_FLOW_ACTION_TYPE_FLAG:
594 ret = mlx5_flow_validate_action_flag(action_flags,
598 action_flags |= MLX5_FLOW_ACTION_FLAG;
601 case RTE_FLOW_ACTION_TYPE_MARK:
602 ret = mlx5_flow_validate_action_mark(actions,
607 action_flags |= MLX5_FLOW_ACTION_MARK;
610 case RTE_FLOW_ACTION_TYPE_DROP:
611 ret = mlx5_flow_validate_action_drop(action_flags,
615 action_flags |= MLX5_FLOW_ACTION_DROP;
618 case RTE_FLOW_ACTION_TYPE_QUEUE:
619 ret = mlx5_flow_validate_action_queue(actions,
624 action_flags |= MLX5_FLOW_ACTION_QUEUE;
627 case RTE_FLOW_ACTION_TYPE_RSS:
628 ret = mlx5_flow_validate_action_rss(actions,
633 action_flags |= MLX5_FLOW_ACTION_RSS;
636 case RTE_FLOW_ACTION_TYPE_COUNT:
637 ret = mlx5_flow_validate_action_count(dev, attr, error);
640 action_flags |= MLX5_FLOW_ACTION_COUNT;
643 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
644 ret = flow_dv_validate_action_l2_encap(action_flags,
649 action_flags |= MLX5_FLOW_ACTION_VXLAN_ENCAP;
654 return rte_flow_error_set(error, ENOTSUP,
655 RTE_FLOW_ERROR_TYPE_ACTION,
657 "action not supported");
660 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
661 return rte_flow_error_set(error, EINVAL,
662 RTE_FLOW_ERROR_TYPE_ACTION, actions,
663 "no fate action is found");
668 * Internal preparation function. Allocates the DV flow size,
669 * this size is constant.
672 * Pointer to the flow attributes.
674 * Pointer to the list of items.
676 * Pointer to the list of actions.
677 * @param[out] item_flags
678 * Pointer to bit mask of all items detected.
679 * @param[out] action_flags
680 * Pointer to bit mask of all actions detected.
682 * Pointer to the error structure.
685 * Pointer to mlx5_flow object on success,
686 * otherwise NULL and rte_ernno is set.
688 static struct mlx5_flow *
689 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
690 const struct rte_flow_item items[] __rte_unused,
691 const struct rte_flow_action actions[] __rte_unused,
692 uint64_t *item_flags __rte_unused,
693 uint64_t *action_flags __rte_unused,
694 struct rte_flow_error *error)
696 uint32_t size = sizeof(struct mlx5_flow);
697 struct mlx5_flow *flow;
699 flow = rte_calloc(__func__, 1, size, 0);
701 rte_flow_error_set(error, ENOMEM,
702 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
703 "not enough memory to create flow");
706 flow->dv.value.size = MLX5_ST_SZ_DB(fte_match_param);
711 * Add Ethernet item to matcher and to the value.
713 * @param[in, out] matcher
715 * @param[in, out] key
716 * Flow matcher value.
718 * Flow pattern to translate.
720 * Item is inner pattern.
723 flow_dv_translate_item_eth(void *matcher, void *key,
724 const struct rte_flow_item *item, int inner)
726 const struct rte_flow_item_eth *eth_m = item->mask;
727 const struct rte_flow_item_eth *eth_v = item->spec;
728 const struct rte_flow_item_eth nic_mask = {
729 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
730 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
731 .type = RTE_BE16(0xffff),
743 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
745 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
747 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
749 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
751 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
752 ð_m->dst, sizeof(eth_m->dst));
753 /* The value must be in the range of the mask. */
754 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
755 for (i = 0; i < sizeof(eth_m->dst); ++i)
756 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
757 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
758 ð_m->src, sizeof(eth_m->src));
759 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
760 /* The value must be in the range of the mask. */
761 for (i = 0; i < sizeof(eth_m->dst); ++i)
762 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
763 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
764 rte_be_to_cpu_16(eth_m->type));
765 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
766 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
770 * Add VLAN item to matcher and to the value.
772 * @param[in, out] matcher
774 * @param[in, out] key
775 * Flow matcher value.
777 * Flow pattern to translate.
779 * Item is inner pattern.
782 flow_dv_translate_item_vlan(void *matcher, void *key,
783 const struct rte_flow_item *item,
786 const struct rte_flow_item_vlan *vlan_m = item->mask;
787 const struct rte_flow_item_vlan *vlan_v = item->spec;
788 const struct rte_flow_item_vlan nic_mask = {
789 .tci = RTE_BE16(0x0fff),
790 .inner_type = RTE_BE16(0xffff),
802 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
804 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
806 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
808 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
810 tci_m = rte_be_to_cpu_16(vlan_m->tci);
811 tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
812 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
813 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
814 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
815 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
816 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
817 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
818 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
819 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
823 * Add IPV4 item to matcher and to the value.
825 * @param[in, out] matcher
827 * @param[in, out] key
828 * Flow matcher value.
830 * Flow pattern to translate.
832 * Item is inner pattern.
835 flow_dv_translate_item_ipv4(void *matcher, void *key,
836 const struct rte_flow_item *item,
839 const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
840 const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
841 const struct rte_flow_item_ipv4 nic_mask = {
843 .src_addr = RTE_BE32(0xffffffff),
844 .dst_addr = RTE_BE32(0xffffffff),
845 .type_of_service = 0xff,
846 .next_proto_id = 0xff,
856 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
858 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
860 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
862 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
864 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
865 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
870 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
871 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
872 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
873 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
874 *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
875 *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
876 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
877 src_ipv4_src_ipv6.ipv4_layout.ipv4);
878 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
879 src_ipv4_src_ipv6.ipv4_layout.ipv4);
880 *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
881 *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
882 tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
883 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
884 ipv4_m->hdr.type_of_service);
885 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
886 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
887 ipv4_m->hdr.type_of_service >> 2);
888 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
889 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
890 ipv4_m->hdr.next_proto_id);
891 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
892 ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
896 * Add IPV6 item to matcher and to the value.
898 * @param[in, out] matcher
900 * @param[in, out] key
901 * Flow matcher value.
903 * Flow pattern to translate.
905 * Item is inner pattern.
908 flow_dv_translate_item_ipv6(void *matcher, void *key,
909 const struct rte_flow_item *item,
912 const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
913 const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
914 const struct rte_flow_item_ipv6 nic_mask = {
917 "\xff\xff\xff\xff\xff\xff\xff\xff"
918 "\xff\xff\xff\xff\xff\xff\xff\xff",
920 "\xff\xff\xff\xff\xff\xff\xff\xff"
921 "\xff\xff\xff\xff\xff\xff\xff\xff",
922 .vtc_flow = RTE_BE32(0xffffffff),
929 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
930 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
939 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
941 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
943 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
945 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
947 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
948 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
953 size = sizeof(ipv6_m->hdr.dst_addr);
954 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
955 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
956 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
957 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
958 memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
959 for (i = 0; i < size; ++i)
960 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
961 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
962 src_ipv4_src_ipv6.ipv6_layout.ipv6);
963 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
964 src_ipv4_src_ipv6.ipv6_layout.ipv6);
965 memcpy(l24_m, ipv6_m->hdr.src_addr, size);
966 for (i = 0; i < size; ++i)
967 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
969 vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
970 vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
971 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
972 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
973 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
974 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
977 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
979 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
982 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
984 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
988 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
990 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
991 ipv6_v->hdr.proto & ipv6_m->hdr.proto);
995 * Add TCP item to matcher and to the value.
997 * @param[in, out] matcher
999 * @param[in, out] key
1000 * Flow matcher value.
1002 * Flow pattern to translate.
1004 * Item is inner pattern.
1007 flow_dv_translate_item_tcp(void *matcher, void *key,
1008 const struct rte_flow_item *item,
1011 const struct rte_flow_item_tcp *tcp_m = item->mask;
1012 const struct rte_flow_item_tcp *tcp_v = item->spec;
1017 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1019 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
1021 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1023 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1025 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
1026 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
1030 tcp_m = &rte_flow_item_tcp_mask;
1031 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
1032 rte_be_to_cpu_16(tcp_m->hdr.src_port));
1033 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
1034 rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
1035 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
1036 rte_be_to_cpu_16(tcp_m->hdr.dst_port));
1037 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
1038 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
1042 * Add UDP item to matcher and to the value.
1044 * @param[in, out] matcher
1046 * @param[in, out] key
1047 * Flow matcher value.
1049 * Flow pattern to translate.
1051 * Item is inner pattern.
1054 flow_dv_translate_item_udp(void *matcher, void *key,
1055 const struct rte_flow_item *item,
1058 const struct rte_flow_item_udp *udp_m = item->mask;
1059 const struct rte_flow_item_udp *udp_v = item->spec;
1064 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1066 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
1068 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1070 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1072 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
1073 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
1077 udp_m = &rte_flow_item_udp_mask;
1078 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
1079 rte_be_to_cpu_16(udp_m->hdr.src_port));
1080 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
1081 rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
1082 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
1083 rte_be_to_cpu_16(udp_m->hdr.dst_port));
1084 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
1085 rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
1089 * Add GRE item to matcher and to the value.
1091 * @param[in, out] matcher
1093 * @param[in, out] key
1094 * Flow matcher value.
1096 * Flow pattern to translate.
1098 * Item is inner pattern.
1101 flow_dv_translate_item_gre(void *matcher, void *key,
1102 const struct rte_flow_item *item,
1105 const struct rte_flow_item_gre *gre_m = item->mask;
1106 const struct rte_flow_item_gre *gre_v = item->spec;
1109 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
1110 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
1113 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1115 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
1117 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1119 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1121 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
1122 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
1126 gre_m = &rte_flow_item_gre_mask;
1127 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
1128 rte_be_to_cpu_16(gre_m->protocol));
1129 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
1130 rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
1134 * Add NVGRE item to matcher and to the value.
1136 * @param[in, out] matcher
1138 * @param[in, out] key
1139 * Flow matcher value.
1141 * Flow pattern to translate.
1143 * Item is inner pattern.
1146 flow_dv_translate_item_nvgre(void *matcher, void *key,
1147 const struct rte_flow_item *item,
1150 const struct rte_flow_item_nvgre *nvgre_m = item->mask;
1151 const struct rte_flow_item_nvgre *nvgre_v = item->spec;
1152 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
1153 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
1154 const char *tni_flow_id_m = (const char *)nvgre_m->tni;
1155 const char *tni_flow_id_v = (const char *)nvgre_v->tni;
1161 flow_dv_translate_item_gre(matcher, key, item, inner);
1165 nvgre_m = &rte_flow_item_nvgre_mask;
1166 size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
1167 gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
1168 gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
1169 memcpy(gre_key_m, tni_flow_id_m, size);
1170 for (i = 0; i < size; ++i)
1171 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
1175 * Add VXLAN item to matcher and to the value.
1177 * @param[in, out] matcher
1179 * @param[in, out] key
1180 * Flow matcher value.
1182 * Flow pattern to translate.
1184 * Item is inner pattern.
1187 flow_dv_translate_item_vxlan(void *matcher, void *key,
1188 const struct rte_flow_item *item,
1191 const struct rte_flow_item_vxlan *vxlan_m = item->mask;
1192 const struct rte_flow_item_vxlan *vxlan_v = item->spec;
1195 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
1196 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
1204 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1206 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
1208 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1210 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1212 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
1213 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
1214 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
1215 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
1216 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
1221 vxlan_m = &rte_flow_item_vxlan_mask;
1222 size = sizeof(vxlan_m->vni);
1223 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
1224 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
1225 memcpy(vni_m, vxlan_m->vni, size);
1226 for (i = 0; i < size; ++i)
1227 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
1231 * Add META item to matcher
1233 * @param[in, out] matcher
1235 * @param[in, out] key
1236 * Flow matcher value.
1238 * Flow pattern to translate.
1240 * Item is inner pattern.
1243 flow_dv_translate_item_meta(void *matcher, void *key,
1244 const struct rte_flow_item *item)
1246 const struct rte_flow_item_meta *meta_m;
1247 const struct rte_flow_item_meta *meta_v;
1249 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
1251 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
1253 meta_m = (const void *)item->mask;
1255 meta_m = &rte_flow_item_meta_mask;
1256 meta_v = (const void *)item->spec;
1258 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
1259 rte_be_to_cpu_32(meta_m->data));
1260 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
1261 rte_be_to_cpu_32(meta_v->data & meta_m->data));
1266 * Update the matcher and the value based the selected item.
1268 * @param[in, out] matcher
1270 * @param[in, out] key
1271 * Flow matcher value.
1273 * Flow pattern to translate.
1274 * @param[in, out] dev_flow
1275 * Pointer to the mlx5_flow.
1277 * Item is inner pattern.
1280 flow_dv_create_item(void *matcher, void *key,
1281 const struct rte_flow_item *item,
1282 struct mlx5_flow *dev_flow,
1285 struct mlx5_flow_dv_matcher *tmatcher = matcher;
1287 switch (item->type) {
1288 case RTE_FLOW_ITEM_TYPE_ETH:
1289 flow_dv_translate_item_eth(tmatcher->mask.buf, key, item,
1291 tmatcher->priority = MLX5_PRIORITY_MAP_L2;
1293 case RTE_FLOW_ITEM_TYPE_VLAN:
1294 flow_dv_translate_item_vlan(tmatcher->mask.buf, key, item,
1297 case RTE_FLOW_ITEM_TYPE_IPV4:
1298 flow_dv_translate_item_ipv4(tmatcher->mask.buf, key, item,
1300 tmatcher->priority = MLX5_PRIORITY_MAP_L3;
1301 dev_flow->dv.hash_fields |=
1302 mlx5_flow_hashfields_adjust(dev_flow, inner,
1303 MLX5_IPV4_LAYER_TYPES,
1304 MLX5_IPV4_IBV_RX_HASH);
1306 case RTE_FLOW_ITEM_TYPE_IPV6:
1307 flow_dv_translate_item_ipv6(tmatcher->mask.buf, key, item,
1309 tmatcher->priority = MLX5_PRIORITY_MAP_L3;
1310 dev_flow->dv.hash_fields |=
1311 mlx5_flow_hashfields_adjust(dev_flow, inner,
1312 MLX5_IPV6_LAYER_TYPES,
1313 MLX5_IPV6_IBV_RX_HASH);
1315 case RTE_FLOW_ITEM_TYPE_TCP:
1316 flow_dv_translate_item_tcp(tmatcher->mask.buf, key, item,
1318 tmatcher->priority = MLX5_PRIORITY_MAP_L4;
1319 dev_flow->dv.hash_fields |=
1320 mlx5_flow_hashfields_adjust(dev_flow, inner,
1322 (IBV_RX_HASH_SRC_PORT_TCP |
1323 IBV_RX_HASH_DST_PORT_TCP));
1325 case RTE_FLOW_ITEM_TYPE_UDP:
1326 flow_dv_translate_item_udp(tmatcher->mask.buf, key, item,
1328 tmatcher->priority = MLX5_PRIORITY_MAP_L4;
1329 dev_flow->verbs.hash_fields |=
1330 mlx5_flow_hashfields_adjust(dev_flow, inner,
1332 (IBV_RX_HASH_SRC_PORT_UDP |
1333 IBV_RX_HASH_DST_PORT_UDP));
1335 case RTE_FLOW_ITEM_TYPE_GRE:
1336 flow_dv_translate_item_gre(tmatcher->mask.buf, key, item,
1339 case RTE_FLOW_ITEM_TYPE_NVGRE:
1340 flow_dv_translate_item_nvgre(tmatcher->mask.buf, key, item,
1343 case RTE_FLOW_ITEM_TYPE_VXLAN:
1344 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1345 flow_dv_translate_item_vxlan(tmatcher->mask.buf, key, item,
1348 case RTE_FLOW_ITEM_TYPE_META:
1349 flow_dv_translate_item_meta(tmatcher->mask.buf, key, item);
1357 * Store the requested actions in an array.
1360 * Pointer to rte_eth_dev structure.
1362 * Flow action to translate.
1363 * @param[in, out] dev_flow
1364 * Pointer to the mlx5_flow.
1366 * Pointer to the error structure.
1369 * 0 on success, a negative errno value otherwise and rte_errno is set.
1372 flow_dv_create_action(struct rte_eth_dev *dev,
1373 const struct rte_flow_action *action,
1374 struct mlx5_flow *dev_flow,
1375 struct rte_flow_error *error)
1377 const struct rte_flow_action_queue *queue;
1378 const struct rte_flow_action_rss *rss;
1379 int actions_n = dev_flow->dv.actions_n;
1380 struct rte_flow *flow = dev_flow->flow;
1382 switch (action->type) {
1383 case RTE_FLOW_ACTION_TYPE_VOID:
1385 case RTE_FLOW_ACTION_TYPE_FLAG:
1386 dev_flow->dv.actions[actions_n].type = MLX5DV_FLOW_ACTION_TAG;
1387 dev_flow->dv.actions[actions_n].tag_value =
1388 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
1390 flow->actions |= MLX5_FLOW_ACTION_FLAG;
1392 case RTE_FLOW_ACTION_TYPE_MARK:
1393 dev_flow->dv.actions[actions_n].type = MLX5DV_FLOW_ACTION_TAG;
1394 dev_flow->dv.actions[actions_n].tag_value =
1396 (((const struct rte_flow_action_mark *)
1397 (action->conf))->id);
1398 flow->actions |= MLX5_FLOW_ACTION_MARK;
1401 case RTE_FLOW_ACTION_TYPE_DROP:
1402 dev_flow->dv.actions[actions_n].type = MLX5DV_FLOW_ACTION_DROP;
1403 flow->actions |= MLX5_FLOW_ACTION_DROP;
1405 case RTE_FLOW_ACTION_TYPE_QUEUE:
1406 queue = action->conf;
1407 flow->rss.queue_num = 1;
1408 (*flow->queue)[0] = queue->index;
1409 flow->actions |= MLX5_FLOW_ACTION_QUEUE;
1411 case RTE_FLOW_ACTION_TYPE_RSS:
1414 memcpy((*flow->queue), rss->queue,
1415 rss->queue_num * sizeof(uint16_t));
1416 flow->rss.queue_num = rss->queue_num;
1417 memcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN);
1418 flow->rss.types = rss->types;
1419 flow->rss.level = rss->level;
1420 /* Added to array only in apply since we need the QP */
1421 flow->actions |= MLX5_FLOW_ACTION_RSS;
1423 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
1424 dev_flow->dv.actions[actions_n].type =
1425 MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
1426 dev_flow->dv.actions[actions_n].action =
1427 flow_dv_create_action_l2_encap(dev, action,
1429 if (!(dev_flow->dv.actions[actions_n].action))
1431 dev_flow->dv.encap_decap_verbs_action =
1432 dev_flow->dv.actions[actions_n].action;
1433 flow->actions |= MLX5_FLOW_ACTION_VXLAN_ENCAP;
1439 dev_flow->dv.actions_n = actions_n;
1443 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
1445 #define HEADER_IS_ZERO(match_criteria, headers) \
1446 !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
1447 matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
1450 * Calculate flow matcher enable bitmap.
1452 * @param match_criteria
1453 * Pointer to flow matcher criteria.
1456 * Bitmap of enabled fields.
1459 flow_dv_matcher_enable(uint32_t *match_criteria)
1461 uint8_t match_criteria_enable;
1463 match_criteria_enable =
1464 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
1465 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
1466 match_criteria_enable |=
1467 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
1468 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
1469 match_criteria_enable |=
1470 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
1471 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
1472 match_criteria_enable |=
1473 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
1474 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
1476 return match_criteria_enable;
1480 * Register the flow matcher.
1482 * @param dev[in, out]
1483 * Pointer to rte_eth_dev structure.
1484 * @param[in, out] matcher
1485 * Pointer to flow matcher.
1486 * @parm[in, out] dev_flow
1487 * Pointer to the dev_flow.
1489 * pointer to error structure.
1492 * 0 on success otherwise -errno and errno is set.
1495 flow_dv_matcher_register(struct rte_eth_dev *dev,
1496 struct mlx5_flow_dv_matcher *matcher,
1497 struct mlx5_flow *dev_flow,
1498 struct rte_flow_error *error)
1500 struct priv *priv = dev->data->dev_private;
1501 struct mlx5_flow_dv_matcher *cache_matcher;
1502 struct mlx5dv_flow_matcher_attr dv_attr = {
1503 .type = IBV_FLOW_ATTR_NORMAL,
1504 .match_mask = (void *)&matcher->mask,
1507 /* Lookup from cache. */
1508 LIST_FOREACH(cache_matcher, &priv->matchers, next) {
1509 if (matcher->crc == cache_matcher->crc &&
1510 matcher->priority == cache_matcher->priority &&
1511 matcher->egress == cache_matcher->egress &&
1512 !memcmp((const void *)matcher->mask.buf,
1513 (const void *)cache_matcher->mask.buf,
1514 cache_matcher->mask.size)) {
1516 "priority %hd use %s matcher %p: refcnt %d++",
1517 cache_matcher->priority,
1518 cache_matcher->egress ? "tx" : "rx",
1519 (void *)cache_matcher,
1520 rte_atomic32_read(&cache_matcher->refcnt));
1521 rte_atomic32_inc(&cache_matcher->refcnt);
1522 dev_flow->dv.matcher = cache_matcher;
1526 /* Register new matcher. */
1527 cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
1529 return rte_flow_error_set(error, ENOMEM,
1530 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1531 "cannot allocate matcher memory");
1532 *cache_matcher = *matcher;
1533 dv_attr.match_criteria_enable =
1534 flow_dv_matcher_enable(cache_matcher->mask.buf);
1535 dv_attr.priority = matcher->priority;
1536 if (matcher->egress)
1537 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
1538 cache_matcher->matcher_object =
1539 mlx5_glue->dv_create_flow_matcher(priv->ctx, &dv_attr);
1540 if (!cache_matcher->matcher_object) {
1541 rte_free(cache_matcher);
1542 return rte_flow_error_set(error, ENOMEM,
1543 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1544 NULL, "cannot create matcher");
1546 rte_atomic32_inc(&cache_matcher->refcnt);
1547 LIST_INSERT_HEAD(&priv->matchers, cache_matcher, next);
1548 dev_flow->dv.matcher = cache_matcher;
1549 DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
1550 cache_matcher->priority,
1551 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
1552 rte_atomic32_read(&cache_matcher->refcnt));
1557 * Fill the flow with DV spec.
1560 * Pointer to rte_eth_dev structure.
1561 * @param[in, out] dev_flow
1562 * Pointer to the sub flow.
1564 * Pointer to the flow attributes.
1566 * Pointer to the list of items.
1567 * @param[in] actions
1568 * Pointer to the list of actions.
1570 * Pointer to the error structure.
1573 * 0 on success, a negative errno value otherwise and rte_ernno is set.
1576 flow_dv_translate(struct rte_eth_dev *dev,
1577 struct mlx5_flow *dev_flow,
1578 const struct rte_flow_attr *attr,
1579 const struct rte_flow_item items[],
1580 const struct rte_flow_action actions[] __rte_unused,
1581 struct rte_flow_error *error)
1583 struct priv *priv = dev->data->dev_private;
1584 uint64_t priority = attr->priority;
1585 struct mlx5_flow_dv_matcher matcher = {
1587 .size = sizeof(matcher.mask.buf),
1590 void *match_value = dev_flow->dv.value.buf;
1593 if (priority == MLX5_FLOW_PRIO_RSVD)
1594 priority = priv->config.flow_prio - 1;
1595 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1596 tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
1597 flow_dv_create_item(&matcher, match_value, items, dev_flow,
1600 matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
1602 if (priority == MLX5_FLOW_PRIO_RSVD)
1603 priority = priv->config.flow_prio - 1;
1604 matcher.priority = mlx5_flow_adjust_priority(dev, priority,
1606 matcher.egress = attr->egress;
1607 if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
1609 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++)
1610 if (flow_dv_create_action(dev, actions, dev_flow, error))
1616 * Apply the flow to the NIC.
1619 * Pointer to the Ethernet device structure.
1620 * @param[in, out] flow
1621 * Pointer to flow structure.
1623 * Pointer to error structure.
1626 * 0 on success, a negative errno value otherwise and rte_errno is set.
1629 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
1630 struct rte_flow_error *error)
1632 struct mlx5_flow_dv *dv;
1633 struct mlx5_flow *dev_flow;
1637 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1640 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
1641 dv->hrxq = mlx5_hrxq_drop_new(dev);
1645 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1646 "cannot get drop hash queue");
1649 dv->actions[n].type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;
1650 dv->actions[n].qp = dv->hrxq->qp;
1652 } else if (flow->actions &
1653 (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
1654 struct mlx5_hrxq *hrxq;
1655 hrxq = mlx5_hrxq_get(dev, flow->key,
1656 MLX5_RSS_HASH_KEY_LEN,
1659 flow->rss.queue_num);
1661 hrxq = mlx5_hrxq_new
1662 (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
1663 dv->hash_fields, (*flow->queue),
1664 flow->rss.queue_num,
1665 !!(dev_flow->layers &
1666 MLX5_FLOW_LAYER_TUNNEL));
1670 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1671 "cannot get hash queue");
1675 dv->actions[n].type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;
1676 dv->actions[n].qp = hrxq->qp;
1680 mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
1681 (void *)&dv->value, n,
1684 rte_flow_error_set(error, errno,
1685 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1687 "hardware refuses to create flow");
1693 err = rte_errno; /* Save rte_errno before cleanup. */
1694 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1695 struct mlx5_flow_dv *dv = &dev_flow->dv;
1697 if (flow->actions & MLX5_FLOW_ACTION_DROP)
1698 mlx5_hrxq_drop_release(dev);
1700 mlx5_hrxq_release(dev, dv->hrxq);
1704 rte_errno = err; /* Restore rte_errno. */
1709 * Release the flow matcher.
1712 * Pointer to Ethernet device.
1714 * Pointer to mlx5_flow.
1717 * 1 while a reference on it exists, 0 when freed.
1720 flow_dv_matcher_release(struct rte_eth_dev *dev,
1721 struct mlx5_flow *flow)
1723 struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
1725 assert(matcher->matcher_object);
1726 DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
1727 dev->data->port_id, (void *)matcher,
1728 rte_atomic32_read(&matcher->refcnt));
1729 if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
1730 claim_zero(mlx5_glue->dv_destroy_flow_matcher
1731 (matcher->matcher_object));
1732 LIST_REMOVE(matcher, next);
1734 DRV_LOG(DEBUG, "port %u matcher %p: removed",
1735 dev->data->port_id, (void *)matcher);
1742 * Remove the flow from the NIC but keeps it in memory.
1745 * Pointer to Ethernet device.
1746 * @param[in, out] flow
1747 * Pointer to flow structure.
1750 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
1752 struct mlx5_flow_dv *dv;
1753 struct mlx5_flow *dev_flow;
1757 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1760 claim_zero(mlx5_glue->destroy_flow(dv->flow));
1764 if (flow->actions & MLX5_FLOW_ACTION_DROP)
1765 mlx5_hrxq_drop_release(dev);
1767 mlx5_hrxq_release(dev, dv->hrxq);
1772 flow->counter = NULL;
1776 * Remove the flow from the NIC and the memory.
1779 * Pointer to the Ethernet device structure.
1780 * @param[in, out] flow
1781 * Pointer to flow structure.
1784 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1786 struct mlx5_flow *dev_flow;
1790 flow_dv_remove(dev, flow);
1791 while (!LIST_EMPTY(&flow->dev_flows)) {
1792 dev_flow = LIST_FIRST(&flow->dev_flows);
1793 LIST_REMOVE(dev_flow, next);
1794 if (dev_flow->dv.matcher)
1795 flow_dv_matcher_release(dev, dev_flow);
1796 if (dev_flow->dv.encap_decap_verbs_action) {
1797 claim_zero(mlx5_glue->destroy_flow_action
1798 (dev_flow->dv.encap_decap_verbs_action));
1799 dev_flow->dv.encap_decap_verbs_action = NULL;
1808 * @see rte_flow_query()
1812 flow_dv_query(struct rte_eth_dev *dev __rte_unused,
1813 struct rte_flow *flow __rte_unused,
1814 const struct rte_flow_action *actions __rte_unused,
1815 void *data __rte_unused,
1816 struct rte_flow_error *error __rte_unused)
1818 rte_errno = ENOTSUP;
1823 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
1824 .validate = flow_dv_validate,
1825 .prepare = flow_dv_prepare,
1826 .translate = flow_dv_translate,
1827 .apply = flow_dv_apply,
1828 .remove = flow_dv_remove,
1829 .destroy = flow_dv_destroy,
1830 .query = flow_dv_query,
1833 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */