1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #pragma GCC diagnostic ignored "-Wpedantic"
15 #include <infiniband/verbs.h>
17 #pragma GCC diagnostic error "-Wpedantic"
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_eth_ctrl.h>
23 #include <rte_ethdev_driver.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
31 #include "mlx5_defs.h"
33 #include "mlx5_glue.h"
34 #include "mlx5_flow.h"
36 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
38 #define MLX5_ENCAP_MAX_LEN 132
44 * Pointer to the rte_eth_dev structure.
48 * Attributes of flow that includes this item.
50 * Pointer to error structure.
53 * 0 on success, a negative errno value otherwise and rte_errno is set.
56 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
57 const struct rte_flow_item *item,
58 const struct rte_flow_attr *attr,
59 struct rte_flow_error *error)
61 const struct rte_flow_item_meta *spec = item->spec;
62 const struct rte_flow_item_meta *mask = item->mask;
63 const struct rte_flow_item_meta nic_mask = {
64 .data = RTE_BE32(UINT32_MAX)
67 uint64_t offloads = dev->data->dev_conf.txmode.offloads;
69 if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
70 return rte_flow_error_set(error, EPERM,
71 RTE_FLOW_ERROR_TYPE_ITEM,
73 "match on metadata offload "
74 "configuration is off for this port");
76 return rte_flow_error_set(error, EINVAL,
77 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
79 "data cannot be empty");
81 return rte_flow_error_set(error, EINVAL,
82 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
84 "data cannot be zero");
86 mask = &rte_flow_item_meta_mask;
87 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
88 (const uint8_t *)&nic_mask,
89 sizeof(struct rte_flow_item_meta),
94 return rte_flow_error_set(error, ENOTSUP,
95 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
97 "pattern not supported for ingress");
102 * Validate the L2 encap action.
104 * @param[in] action_flags
105 * Holds the actions detected until now.
107 * Pointer to the encap action.
109 * Pointer to flow attributes
111 * Pointer to error structure.
114 * 0 on success, a negative errno value otherwise and rte_errno is set.
117 flow_dv_validate_action_l2_encap(uint64_t action_flags,
118 const struct rte_flow_action *action,
119 const struct rte_flow_attr *attr,
120 struct rte_flow_error *error)
123 return rte_flow_error_set(error, EINVAL,
124 RTE_FLOW_ERROR_TYPE_ACTION, action,
125 "configuration cannot be null");
126 if (action_flags & MLX5_FLOW_ACTION_DROP)
127 return rte_flow_error_set(error, EINVAL,
128 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
129 "can't drop and encap in same flow");
130 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
131 return rte_flow_error_set(error, EINVAL,
132 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
133 "can only have a single encap or"
134 " decap action in a flow");
136 return rte_flow_error_set(error, ENOTSUP,
137 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
139 "encap action not supported for "
145 * Validate the L2 decap action.
147 * @param[in] action_flags
148 * Holds the actions detected until now.
150 * Pointer to flow attributes
152 * Pointer to error structure.
155 * 0 on success, a negative errno value otherwise and rte_errno is set.
158 flow_dv_validate_action_l2_decap(uint64_t action_flags,
159 const struct rte_flow_attr *attr,
160 struct rte_flow_error *error)
162 if (action_flags & MLX5_FLOW_ACTION_DROP)
163 return rte_flow_error_set(error, EINVAL,
164 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
165 "can't drop and decap in same flow");
166 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
167 return rte_flow_error_set(error, EINVAL,
168 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
169 "can only have a single encap or"
170 " decap action in a flow");
172 return rte_flow_error_set(error, ENOTSUP,
173 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
175 "decap action not supported for "
181 * Validate the raw encap action.
183 * @param[in] action_flags
184 * Holds the actions detected until now.
186 * Pointer to the encap action.
188 * Pointer to flow attributes
190 * Pointer to error structure.
193 * 0 on success, a negative errno value otherwise and rte_errno is set.
196 flow_dv_validate_action_raw_encap(uint64_t action_flags,
197 const struct rte_flow_action *action,
198 const struct rte_flow_attr *attr,
199 struct rte_flow_error *error)
202 return rte_flow_error_set(error, EINVAL,
203 RTE_FLOW_ERROR_TYPE_ACTION, action,
204 "configuration cannot be null");
205 if (action_flags & MLX5_FLOW_ACTION_DROP)
206 return rte_flow_error_set(error, EINVAL,
207 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
208 "can't drop and encap in same flow");
209 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
210 return rte_flow_error_set(error, EINVAL,
211 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
212 "can only have a single encap"
213 " action in a flow");
214 /* encap without preceding decap is not supported for ingress */
215 if (attr->ingress && !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
216 return rte_flow_error_set(error, ENOTSUP,
217 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
219 "encap action not supported for "
225 * Validate the raw decap action.
227 * @param[in] action_flags
228 * Holds the actions detected until now.
230 * Pointer to the encap action.
232 * Pointer to flow attributes
234 * Pointer to error structure.
237 * 0 on success, a negative errno value otherwise and rte_errno is set.
240 flow_dv_validate_action_raw_decap(uint64_t action_flags,
241 const struct rte_flow_action *action,
242 const struct rte_flow_attr *attr,
243 struct rte_flow_error *error)
245 if (action_flags & MLX5_FLOW_ACTION_DROP)
246 return rte_flow_error_set(error, EINVAL,
247 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
248 "can't drop and decap in same flow");
249 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
250 return rte_flow_error_set(error, EINVAL,
251 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
252 "can't have encap action before"
254 if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
255 return rte_flow_error_set(error, EINVAL,
256 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
257 "can only have a single decap"
258 " action in a flow");
259 /* decap action is valid on egress only if it is followed by encap */
261 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
262 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
265 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
266 return rte_flow_error_set
268 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
269 NULL, "decap action not supported"
276 * Get the size of specific rte_flow_item_type
278 * @param[in] item_type
279 * Tested rte_flow_item_type.
282 * sizeof struct item_type, 0 if void or irrelevant.
285 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
290 case RTE_FLOW_ITEM_TYPE_ETH:
291 retval = sizeof(struct rte_flow_item_eth);
293 case RTE_FLOW_ITEM_TYPE_VLAN:
294 retval = sizeof(struct rte_flow_item_vlan);
296 case RTE_FLOW_ITEM_TYPE_IPV4:
297 retval = sizeof(struct rte_flow_item_ipv4);
299 case RTE_FLOW_ITEM_TYPE_IPV6:
300 retval = sizeof(struct rte_flow_item_ipv6);
302 case RTE_FLOW_ITEM_TYPE_UDP:
303 retval = sizeof(struct rte_flow_item_udp);
305 case RTE_FLOW_ITEM_TYPE_TCP:
306 retval = sizeof(struct rte_flow_item_tcp);
308 case RTE_FLOW_ITEM_TYPE_VXLAN:
309 retval = sizeof(struct rte_flow_item_vxlan);
311 case RTE_FLOW_ITEM_TYPE_GRE:
312 retval = sizeof(struct rte_flow_item_gre);
314 case RTE_FLOW_ITEM_TYPE_NVGRE:
315 retval = sizeof(struct rte_flow_item_nvgre);
317 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
318 retval = sizeof(struct rte_flow_item_vxlan_gpe);
320 case RTE_FLOW_ITEM_TYPE_MPLS:
321 retval = sizeof(struct rte_flow_item_mpls);
323 case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
331 #define MLX5_ENCAP_IPV4_VERSION 0x40
332 #define MLX5_ENCAP_IPV4_IHL_MIN 0x05
333 #define MLX5_ENCAP_IPV4_TTL_DEF 0x40
334 #define MLX5_ENCAP_IPV6_VTC_FLOW 0x60000000
335 #define MLX5_ENCAP_IPV6_HOP_LIMIT 0xff
336 #define MLX5_ENCAP_VXLAN_FLAGS 0x08000000
337 #define MLX5_ENCAP_VXLAN_GPE_FLAGS 0x04
340 * Convert the encap action data from list of rte_flow_item to raw buffer
343 * Pointer to rte_flow_item objects list.
345 * Pointer to the output buffer.
347 * Pointer to the output buffer size.
349 * Pointer to the error structure.
352 * 0 on success, a negative errno value otherwise and rte_errno is set.
355 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
356 size_t *size, struct rte_flow_error *error)
358 struct ether_hdr *eth = NULL;
359 struct vlan_hdr *vlan = NULL;
360 struct ipv4_hdr *ipv4 = NULL;
361 struct ipv6_hdr *ipv6 = NULL;
362 struct udp_hdr *udp = NULL;
363 struct vxlan_hdr *vxlan = NULL;
364 struct vxlan_gpe_hdr *vxlan_gpe = NULL;
365 struct gre_hdr *gre = NULL;
367 size_t temp_size = 0;
370 return rte_flow_error_set(error, EINVAL,
371 RTE_FLOW_ERROR_TYPE_ACTION,
372 NULL, "invalid empty data");
373 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
374 len = flow_dv_get_item_len(items->type);
375 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
376 return rte_flow_error_set(error, EINVAL,
377 RTE_FLOW_ERROR_TYPE_ACTION,
379 "items total size is too big"
380 " for encap action");
381 rte_memcpy((void *)&buf[temp_size], items->spec, len);
382 switch (items->type) {
383 case RTE_FLOW_ITEM_TYPE_ETH:
384 eth = (struct ether_hdr *)&buf[temp_size];
386 case RTE_FLOW_ITEM_TYPE_VLAN:
387 vlan = (struct vlan_hdr *)&buf[temp_size];
389 return rte_flow_error_set(error, EINVAL,
390 RTE_FLOW_ERROR_TYPE_ACTION,
392 "eth header not found");
393 if (!eth->ether_type)
394 eth->ether_type = RTE_BE16(ETHER_TYPE_VLAN);
396 case RTE_FLOW_ITEM_TYPE_IPV4:
397 ipv4 = (struct ipv4_hdr *)&buf[temp_size];
399 return rte_flow_error_set(error, EINVAL,
400 RTE_FLOW_ERROR_TYPE_ACTION,
402 "neither eth nor vlan"
404 if (vlan && !vlan->eth_proto)
405 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv4);
406 else if (eth && !eth->ether_type)
407 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv4);
408 if (!ipv4->version_ihl)
409 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
410 MLX5_ENCAP_IPV4_IHL_MIN;
411 if (!ipv4->time_to_live)
412 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
414 case RTE_FLOW_ITEM_TYPE_IPV6:
415 ipv6 = (struct ipv6_hdr *)&buf[temp_size];
417 return rte_flow_error_set(error, EINVAL,
418 RTE_FLOW_ERROR_TYPE_ACTION,
420 "neither eth nor vlan"
422 if (vlan && !vlan->eth_proto)
423 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv6);
424 else if (eth && !eth->ether_type)
425 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv6);
428 RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
429 if (!ipv6->hop_limits)
430 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
432 case RTE_FLOW_ITEM_TYPE_UDP:
433 udp = (struct udp_hdr *)&buf[temp_size];
435 return rte_flow_error_set(error, EINVAL,
436 RTE_FLOW_ERROR_TYPE_ACTION,
438 "ip header not found");
439 if (ipv4 && !ipv4->next_proto_id)
440 ipv4->next_proto_id = IPPROTO_UDP;
441 else if (ipv6 && !ipv6->proto)
442 ipv6->proto = IPPROTO_UDP;
444 case RTE_FLOW_ITEM_TYPE_VXLAN:
445 vxlan = (struct vxlan_hdr *)&buf[temp_size];
447 return rte_flow_error_set(error, EINVAL,
448 RTE_FLOW_ERROR_TYPE_ACTION,
450 "udp header not found");
452 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
453 if (!vxlan->vx_flags)
455 RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
457 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
458 vxlan_gpe = (struct vxlan_gpe_hdr *)&buf[temp_size];
460 return rte_flow_error_set(error, EINVAL,
461 RTE_FLOW_ERROR_TYPE_ACTION,
463 "udp header not found");
464 if (!vxlan_gpe->proto)
465 return rte_flow_error_set(error, EINVAL,
466 RTE_FLOW_ERROR_TYPE_ACTION,
468 "next protocol not found");
471 RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
472 if (!vxlan_gpe->vx_flags)
473 vxlan_gpe->vx_flags =
474 MLX5_ENCAP_VXLAN_GPE_FLAGS;
476 case RTE_FLOW_ITEM_TYPE_GRE:
477 case RTE_FLOW_ITEM_TYPE_NVGRE:
478 gre = (struct gre_hdr *)&buf[temp_size];
480 return rte_flow_error_set(error, EINVAL,
481 RTE_FLOW_ERROR_TYPE_ACTION,
483 "next protocol not found");
485 return rte_flow_error_set(error, EINVAL,
486 RTE_FLOW_ERROR_TYPE_ACTION,
488 "ip header not found");
489 if (ipv4 && !ipv4->next_proto_id)
490 ipv4->next_proto_id = IPPROTO_GRE;
491 else if (ipv6 && !ipv6->proto)
492 ipv6->proto = IPPROTO_GRE;
494 case RTE_FLOW_ITEM_TYPE_VOID:
497 return rte_flow_error_set(error, EINVAL,
498 RTE_FLOW_ERROR_TYPE_ACTION,
500 "unsupported item type");
510 * Convert L2 encap action to DV specification.
513 * Pointer to rte_eth_dev structure.
515 * Pointer to action structure.
517 * Pointer to the error structure.
520 * Pointer to action on success, NULL otherwise and rte_errno is set.
522 static struct ibv_flow_action *
523 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
524 const struct rte_flow_action *action,
525 struct rte_flow_error *error)
527 struct ibv_flow_action *verbs_action = NULL;
528 const struct rte_flow_item *encap_data;
529 const struct rte_flow_action_raw_encap *raw_encap_data;
530 struct priv *priv = dev->data->dev_private;
531 uint8_t buf[MLX5_ENCAP_MAX_LEN];
532 uint8_t *buf_ptr = buf;
534 int convert_result = 0;
536 if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
538 (const struct rte_flow_action_raw_encap *)action->conf;
539 buf_ptr = raw_encap_data->data;
540 size = raw_encap_data->size;
542 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
544 ((const struct rte_flow_action_vxlan_encap *)
545 action->conf)->definition;
548 ((const struct rte_flow_action_nvgre_encap *)
549 action->conf)->definition;
550 convert_result = flow_dv_convert_encap_data(encap_data, buf,
555 verbs_action = mlx5_glue->dv_create_flow_action_packet_reformat
556 (priv->ctx, size, buf_ptr,
557 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
558 MLX5DV_FLOW_TABLE_TYPE_NIC_TX);
560 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
561 NULL, "cannot create L2 encap action");
566 * Convert L2 decap action to DV specification.
569 * Pointer to rte_eth_dev structure.
571 * Pointer to the error structure.
574 * Pointer to action on success, NULL otherwise and rte_errno is set.
576 static struct ibv_flow_action *
577 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
578 struct rte_flow_error *error)
580 struct ibv_flow_action *verbs_action = NULL;
581 struct priv *priv = dev->data->dev_private;
583 verbs_action = mlx5_glue->dv_create_flow_action_packet_reformat
585 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
586 MLX5DV_FLOW_TABLE_TYPE_NIC_RX);
588 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
589 NULL, "cannot create L2 decap action");
594 * Convert raw decap/encap (L3 tunnel) action to DV specification.
597 * Pointer to rte_eth_dev structure.
599 * Pointer to action structure.
601 * Pointer to the flow attributes.
603 * Pointer to the error structure.
606 * Pointer to action on success, NULL otherwise and rte_errno is set.
608 static struct ibv_flow_action *
609 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
610 const struct rte_flow_action *action,
611 const struct rte_flow_attr *attr,
612 struct rte_flow_error *error)
614 struct ibv_flow_action *verbs_action = NULL;
615 const struct rte_flow_action_raw_encap *encap_data;
616 struct priv *priv = dev->data->dev_private;
617 enum mlx5dv_flow_action_packet_reformat_type reformat_type;
618 enum mlx5dv_flow_table_type ft_type;
620 encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
621 reformat_type = attr->egress ?
622 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
623 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
624 ft_type = attr->egress ?
625 MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
626 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
627 verbs_action = mlx5_glue->dv_create_flow_action_packet_reformat
628 (priv->ctx, encap_data->size,
629 (encap_data->size ? encap_data->data : NULL),
630 reformat_type, ft_type);
632 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
633 NULL, "cannot create encap action");
638 * Verify the @p attributes will be correctly understood by the NIC and store
639 * them in the @p flow if everything is correct.
642 * Pointer to dev struct.
643 * @param[in] attributes
644 * Pointer to flow attributes
646 * Pointer to error structure.
649 * 0 on success, a negative errno value otherwise and rte_errno is set.
652 flow_dv_validate_attributes(struct rte_eth_dev *dev,
653 const struct rte_flow_attr *attributes,
654 struct rte_flow_error *error)
656 struct priv *priv = dev->data->dev_private;
657 uint32_t priority_max = priv->config.flow_prio - 1;
659 if (attributes->group)
660 return rte_flow_error_set(error, ENOTSUP,
661 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
663 "groups is not supported");
664 if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
665 attributes->priority >= priority_max)
666 return rte_flow_error_set(error, ENOTSUP,
667 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
669 "priority out of range");
670 if (attributes->transfer)
671 return rte_flow_error_set(error, ENOTSUP,
672 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
674 "transfer is not supported");
675 if (!(attributes->egress ^ attributes->ingress))
676 return rte_flow_error_set(error, ENOTSUP,
677 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
678 "must specify exactly one of "
679 "ingress or egress");
684 * Internal validation function. For validating both actions and items.
687 * Pointer to the rte_eth_dev structure.
689 * Pointer to the flow attributes.
691 * Pointer to the list of items.
693 * Pointer to the list of actions.
695 * Pointer to the error structure.
698 * 0 on success, a negative errno value otherwise and rte_ernno is set.
701 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
702 const struct rte_flow_item items[],
703 const struct rte_flow_action actions[],
704 struct rte_flow_error *error)
707 uint64_t action_flags = 0;
708 uint64_t item_flags = 0;
710 uint8_t next_protocol = 0xff;
715 ret = flow_dv_validate_attributes(dev, attr, error);
718 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
719 tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
720 switch (items->type) {
721 case RTE_FLOW_ITEM_TYPE_VOID:
723 case RTE_FLOW_ITEM_TYPE_ETH:
724 ret = mlx5_flow_validate_item_eth(items, item_flags,
728 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
729 MLX5_FLOW_LAYER_OUTER_L2;
731 case RTE_FLOW_ITEM_TYPE_VLAN:
732 ret = mlx5_flow_validate_item_vlan(items, item_flags,
736 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
737 MLX5_FLOW_LAYER_OUTER_VLAN;
739 case RTE_FLOW_ITEM_TYPE_IPV4:
740 ret = mlx5_flow_validate_item_ipv4(items, item_flags,
744 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
745 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
746 if (items->mask != NULL &&
747 ((const struct rte_flow_item_ipv4 *)
748 items->mask)->hdr.next_proto_id)
750 ((const struct rte_flow_item_ipv4 *)
751 (items->spec))->hdr.next_proto_id;
753 case RTE_FLOW_ITEM_TYPE_IPV6:
754 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
758 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
759 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
760 if (items->mask != NULL &&
761 ((const struct rte_flow_item_ipv6 *)
762 items->mask)->hdr.proto)
764 ((const struct rte_flow_item_ipv6 *)
765 items->spec)->hdr.proto;
767 case RTE_FLOW_ITEM_TYPE_TCP:
768 ret = mlx5_flow_validate_item_tcp
771 &rte_flow_item_tcp_mask,
775 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
776 MLX5_FLOW_LAYER_OUTER_L4_TCP;
778 case RTE_FLOW_ITEM_TYPE_UDP:
779 ret = mlx5_flow_validate_item_udp(items, item_flags,
784 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
785 MLX5_FLOW_LAYER_OUTER_L4_UDP;
787 case RTE_FLOW_ITEM_TYPE_GRE:
788 case RTE_FLOW_ITEM_TYPE_NVGRE:
789 ret = mlx5_flow_validate_item_gre(items, item_flags,
790 next_protocol, error);
793 item_flags |= MLX5_FLOW_LAYER_GRE;
795 case RTE_FLOW_ITEM_TYPE_VXLAN:
796 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
800 item_flags |= MLX5_FLOW_LAYER_VXLAN;
802 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
803 ret = mlx5_flow_validate_item_vxlan_gpe(items,
808 item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
810 case RTE_FLOW_ITEM_TYPE_META:
811 ret = flow_dv_validate_item_meta(dev, items, attr,
815 item_flags |= MLX5_FLOW_ITEM_METADATA;
818 return rte_flow_error_set(error, ENOTSUP,
819 RTE_FLOW_ERROR_TYPE_ITEM,
820 NULL, "item not supported");
823 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
824 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
825 return rte_flow_error_set(error, ENOTSUP,
826 RTE_FLOW_ERROR_TYPE_ACTION,
827 actions, "too many actions");
828 switch (actions->type) {
829 case RTE_FLOW_ACTION_TYPE_VOID:
831 case RTE_FLOW_ACTION_TYPE_FLAG:
832 ret = mlx5_flow_validate_action_flag(action_flags,
836 action_flags |= MLX5_FLOW_ACTION_FLAG;
839 case RTE_FLOW_ACTION_TYPE_MARK:
840 ret = mlx5_flow_validate_action_mark(actions,
845 action_flags |= MLX5_FLOW_ACTION_MARK;
848 case RTE_FLOW_ACTION_TYPE_DROP:
849 ret = mlx5_flow_validate_action_drop(action_flags,
853 action_flags |= MLX5_FLOW_ACTION_DROP;
856 case RTE_FLOW_ACTION_TYPE_QUEUE:
857 ret = mlx5_flow_validate_action_queue(actions,
862 action_flags |= MLX5_FLOW_ACTION_QUEUE;
865 case RTE_FLOW_ACTION_TYPE_RSS:
866 ret = mlx5_flow_validate_action_rss(actions,
871 action_flags |= MLX5_FLOW_ACTION_RSS;
874 case RTE_FLOW_ACTION_TYPE_COUNT:
875 ret = mlx5_flow_validate_action_count(dev, attr, error);
878 action_flags |= MLX5_FLOW_ACTION_COUNT;
881 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
882 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
883 ret = flow_dv_validate_action_l2_encap(action_flags,
888 action_flags |= actions->type ==
889 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
890 MLX5_FLOW_ACTION_VXLAN_ENCAP :
891 MLX5_FLOW_ACTION_NVGRE_ENCAP;
894 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
895 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
896 ret = flow_dv_validate_action_l2_decap(action_flags,
900 action_flags |= actions->type ==
901 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
902 MLX5_FLOW_ACTION_VXLAN_DECAP :
903 MLX5_FLOW_ACTION_NVGRE_DECAP;
906 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
907 ret = flow_dv_validate_action_raw_encap(action_flags,
912 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
915 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
916 ret = flow_dv_validate_action_raw_decap(action_flags,
921 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
925 return rte_flow_error_set(error, ENOTSUP,
926 RTE_FLOW_ERROR_TYPE_ACTION,
928 "action not supported");
931 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
932 return rte_flow_error_set(error, EINVAL,
933 RTE_FLOW_ERROR_TYPE_ACTION, actions,
934 "no fate action is found");
939 * Internal preparation function. Allocates the DV flow size,
940 * this size is constant.
943 * Pointer to the flow attributes.
945 * Pointer to the list of items.
947 * Pointer to the list of actions.
948 * @param[out] item_flags
949 * Pointer to bit mask of all items detected.
950 * @param[out] action_flags
951 * Pointer to bit mask of all actions detected.
953 * Pointer to the error structure.
956 * Pointer to mlx5_flow object on success,
957 * otherwise NULL and rte_ernno is set.
959 static struct mlx5_flow *
960 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
961 const struct rte_flow_item items[] __rte_unused,
962 const struct rte_flow_action actions[] __rte_unused,
963 uint64_t *item_flags __rte_unused,
964 uint64_t *action_flags __rte_unused,
965 struct rte_flow_error *error)
967 uint32_t size = sizeof(struct mlx5_flow);
968 struct mlx5_flow *flow;
970 flow = rte_calloc(__func__, 1, size, 0);
972 rte_flow_error_set(error, ENOMEM,
973 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
974 "not enough memory to create flow");
977 flow->dv.value.size = MLX5_ST_SZ_DB(fte_match_param);
982 * Add Ethernet item to matcher and to the value.
984 * @param[in, out] matcher
986 * @param[in, out] key
987 * Flow matcher value.
989 * Flow pattern to translate.
991 * Item is inner pattern.
994 flow_dv_translate_item_eth(void *matcher, void *key,
995 const struct rte_flow_item *item, int inner)
997 const struct rte_flow_item_eth *eth_m = item->mask;
998 const struct rte_flow_item_eth *eth_v = item->spec;
999 const struct rte_flow_item_eth nic_mask = {
1000 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1001 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1002 .type = RTE_BE16(0xffff),
1014 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1016 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
1018 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1020 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1022 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
1023 ð_m->dst, sizeof(eth_m->dst));
1024 /* The value must be in the range of the mask. */
1025 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
1026 for (i = 0; i < sizeof(eth_m->dst); ++i)
1027 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
1028 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
1029 ð_m->src, sizeof(eth_m->src));
1030 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
1031 /* The value must be in the range of the mask. */
1032 for (i = 0; i < sizeof(eth_m->dst); ++i)
1033 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
1034 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
1035 rte_be_to_cpu_16(eth_m->type));
1036 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
1037 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
1041 * Add VLAN item to matcher and to the value.
1043 * @param[in, out] matcher
1045 * @param[in, out] key
1046 * Flow matcher value.
1048 * Flow pattern to translate.
1050 * Item is inner pattern.
1053 flow_dv_translate_item_vlan(void *matcher, void *key,
1054 const struct rte_flow_item *item,
1057 const struct rte_flow_item_vlan *vlan_m = item->mask;
1058 const struct rte_flow_item_vlan *vlan_v = item->spec;
1059 const struct rte_flow_item_vlan nic_mask = {
1060 .tci = RTE_BE16(0x0fff),
1061 .inner_type = RTE_BE16(0xffff),
1073 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1075 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
1077 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1079 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1081 tci_m = rte_be_to_cpu_16(vlan_m->tci);
1082 tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
1083 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
1084 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
1085 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
1086 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
1087 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
1088 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
1089 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
1090 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
1094 * Add IPV4 item to matcher and to the value.
1096 * @param[in, out] matcher
1098 * @param[in, out] key
1099 * Flow matcher value.
1101 * Flow pattern to translate.
1103 * Item is inner pattern.
1106 flow_dv_translate_item_ipv4(void *matcher, void *key,
1107 const struct rte_flow_item *item,
1110 const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
1111 const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
1112 const struct rte_flow_item_ipv4 nic_mask = {
1114 .src_addr = RTE_BE32(0xffffffff),
1115 .dst_addr = RTE_BE32(0xffffffff),
1116 .type_of_service = 0xff,
1117 .next_proto_id = 0xff,
1127 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1129 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
1131 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1133 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1135 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
1136 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
1141 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
1142 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
1143 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1144 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
1145 *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
1146 *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
1147 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
1148 src_ipv4_src_ipv6.ipv4_layout.ipv4);
1149 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1150 src_ipv4_src_ipv6.ipv4_layout.ipv4);
1151 *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
1152 *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
1153 tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
1154 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
1155 ipv4_m->hdr.type_of_service);
1156 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
1157 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
1158 ipv4_m->hdr.type_of_service >> 2);
1159 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
1160 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
1161 ipv4_m->hdr.next_proto_id);
1162 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
1163 ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
1167 * Add IPV6 item to matcher and to the value.
1169 * @param[in, out] matcher
1171 * @param[in, out] key
1172 * Flow matcher value.
1174 * Flow pattern to translate.
1176 * Item is inner pattern.
1179 flow_dv_translate_item_ipv6(void *matcher, void *key,
1180 const struct rte_flow_item *item,
1183 const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
1184 const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
1185 const struct rte_flow_item_ipv6 nic_mask = {
1188 "\xff\xff\xff\xff\xff\xff\xff\xff"
1189 "\xff\xff\xff\xff\xff\xff\xff\xff",
1191 "\xff\xff\xff\xff\xff\xff\xff\xff"
1192 "\xff\xff\xff\xff\xff\xff\xff\xff",
1193 .vtc_flow = RTE_BE32(0xffffffff),
1200 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
1201 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
1210 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1212 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
1214 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1216 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1218 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
1219 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
1224 size = sizeof(ipv6_m->hdr.dst_addr);
1225 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
1226 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
1227 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1228 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
1229 memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
1230 for (i = 0; i < size; ++i)
1231 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
1232 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
1233 src_ipv4_src_ipv6.ipv6_layout.ipv6);
1234 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1235 src_ipv4_src_ipv6.ipv6_layout.ipv6);
1236 memcpy(l24_m, ipv6_m->hdr.src_addr, size);
1237 for (i = 0; i < size; ++i)
1238 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
1240 vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
1241 vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
1242 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
1243 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
1244 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
1245 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
1248 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
1250 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
1253 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
1255 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
1259 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
1261 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
1262 ipv6_v->hdr.proto & ipv6_m->hdr.proto);
1266 * Add TCP item to matcher and to the value.
1268 * @param[in, out] matcher
1270 * @param[in, out] key
1271 * Flow matcher value.
1273 * Flow pattern to translate.
1275 * Item is inner pattern.
1278 flow_dv_translate_item_tcp(void *matcher, void *key,
1279 const struct rte_flow_item *item,
1282 const struct rte_flow_item_tcp *tcp_m = item->mask;
1283 const struct rte_flow_item_tcp *tcp_v = item->spec;
1288 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1290 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
1292 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1294 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1296 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
1297 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
1301 tcp_m = &rte_flow_item_tcp_mask;
1302 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
1303 rte_be_to_cpu_16(tcp_m->hdr.src_port));
1304 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
1305 rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
1306 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
1307 rte_be_to_cpu_16(tcp_m->hdr.dst_port));
1308 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
1309 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
1313 * Add UDP item to matcher and to the value.
1315 * @param[in, out] matcher
1317 * @param[in, out] key
1318 * Flow matcher value.
1320 * Flow pattern to translate.
1322 * Item is inner pattern.
1325 flow_dv_translate_item_udp(void *matcher, void *key,
1326 const struct rte_flow_item *item,
1329 const struct rte_flow_item_udp *udp_m = item->mask;
1330 const struct rte_flow_item_udp *udp_v = item->spec;
1335 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1337 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
1339 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1341 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1343 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
1344 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
1348 udp_m = &rte_flow_item_udp_mask;
1349 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
1350 rte_be_to_cpu_16(udp_m->hdr.src_port));
1351 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
1352 rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
1353 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
1354 rte_be_to_cpu_16(udp_m->hdr.dst_port));
1355 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
1356 rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
1360 * Add GRE item to matcher and to the value.
1362 * @param[in, out] matcher
1364 * @param[in, out] key
1365 * Flow matcher value.
1367 * Flow pattern to translate.
1369 * Item is inner pattern.
1372 flow_dv_translate_item_gre(void *matcher, void *key,
1373 const struct rte_flow_item *item,
1376 const struct rte_flow_item_gre *gre_m = item->mask;
1377 const struct rte_flow_item_gre *gre_v = item->spec;
1380 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
1381 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
1384 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1386 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
1388 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1390 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1392 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
1393 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
1397 gre_m = &rte_flow_item_gre_mask;
1398 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
1399 rte_be_to_cpu_16(gre_m->protocol));
1400 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
1401 rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
1405 * Add NVGRE item to matcher and to the value.
1407 * @param[in, out] matcher
1409 * @param[in, out] key
1410 * Flow matcher value.
1412 * Flow pattern to translate.
1414 * Item is inner pattern.
1417 flow_dv_translate_item_nvgre(void *matcher, void *key,
1418 const struct rte_flow_item *item,
1421 const struct rte_flow_item_nvgre *nvgre_m = item->mask;
1422 const struct rte_flow_item_nvgre *nvgre_v = item->spec;
1423 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
1424 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
1425 const char *tni_flow_id_m = (const char *)nvgre_m->tni;
1426 const char *tni_flow_id_v = (const char *)nvgre_v->tni;
1432 flow_dv_translate_item_gre(matcher, key, item, inner);
1436 nvgre_m = &rte_flow_item_nvgre_mask;
1437 size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
1438 gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
1439 gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
1440 memcpy(gre_key_m, tni_flow_id_m, size);
1441 for (i = 0; i < size; ++i)
1442 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
1446 * Add VXLAN item to matcher and to the value.
1448 * @param[in, out] matcher
1450 * @param[in, out] key
1451 * Flow matcher value.
1453 * Flow pattern to translate.
1455 * Item is inner pattern.
1458 flow_dv_translate_item_vxlan(void *matcher, void *key,
1459 const struct rte_flow_item *item,
1462 const struct rte_flow_item_vxlan *vxlan_m = item->mask;
1463 const struct rte_flow_item_vxlan *vxlan_v = item->spec;
1466 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
1467 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
1475 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1477 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
1479 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1481 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1483 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
1484 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
1485 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
1486 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
1487 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
1492 vxlan_m = &rte_flow_item_vxlan_mask;
1493 size = sizeof(vxlan_m->vni);
1494 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
1495 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
1496 memcpy(vni_m, vxlan_m->vni, size);
1497 for (i = 0; i < size; ++i)
1498 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
1502 * Add META item to matcher
1504 * @param[in, out] matcher
1506 * @param[in, out] key
1507 * Flow matcher value.
1509 * Flow pattern to translate.
1511 * Item is inner pattern.
1514 flow_dv_translate_item_meta(void *matcher, void *key,
1515 const struct rte_flow_item *item)
1517 const struct rte_flow_item_meta *meta_m;
1518 const struct rte_flow_item_meta *meta_v;
1520 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
1522 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
1524 meta_m = (const void *)item->mask;
1526 meta_m = &rte_flow_item_meta_mask;
1527 meta_v = (const void *)item->spec;
1529 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
1530 rte_be_to_cpu_32(meta_m->data));
1531 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
1532 rte_be_to_cpu_32(meta_v->data & meta_m->data));
1537 * Update the matcher and the value based the selected item.
1539 * @param[in, out] matcher
1541 * @param[in, out] key
1542 * Flow matcher value.
1544 * Flow pattern to translate.
1545 * @param[in, out] dev_flow
1546 * Pointer to the mlx5_flow.
1548 * Item is inner pattern.
1551 flow_dv_create_item(void *matcher, void *key,
1552 const struct rte_flow_item *item,
1553 struct mlx5_flow *dev_flow,
1556 struct mlx5_flow_dv_matcher *tmatcher = matcher;
1558 switch (item->type) {
1559 case RTE_FLOW_ITEM_TYPE_ETH:
1560 flow_dv_translate_item_eth(tmatcher->mask.buf, key, item,
1562 tmatcher->priority = MLX5_PRIORITY_MAP_L2;
1564 case RTE_FLOW_ITEM_TYPE_VLAN:
1565 flow_dv_translate_item_vlan(tmatcher->mask.buf, key, item,
1568 case RTE_FLOW_ITEM_TYPE_IPV4:
1569 flow_dv_translate_item_ipv4(tmatcher->mask.buf, key, item,
1571 tmatcher->priority = MLX5_PRIORITY_MAP_L3;
1572 dev_flow->dv.hash_fields |=
1573 mlx5_flow_hashfields_adjust(dev_flow, inner,
1574 MLX5_IPV4_LAYER_TYPES,
1575 MLX5_IPV4_IBV_RX_HASH);
1577 case RTE_FLOW_ITEM_TYPE_IPV6:
1578 flow_dv_translate_item_ipv6(tmatcher->mask.buf, key, item,
1580 tmatcher->priority = MLX5_PRIORITY_MAP_L3;
1581 dev_flow->dv.hash_fields |=
1582 mlx5_flow_hashfields_adjust(dev_flow, inner,
1583 MLX5_IPV6_LAYER_TYPES,
1584 MLX5_IPV6_IBV_RX_HASH);
1586 case RTE_FLOW_ITEM_TYPE_TCP:
1587 flow_dv_translate_item_tcp(tmatcher->mask.buf, key, item,
1589 tmatcher->priority = MLX5_PRIORITY_MAP_L4;
1590 dev_flow->dv.hash_fields |=
1591 mlx5_flow_hashfields_adjust(dev_flow, inner,
1593 (IBV_RX_HASH_SRC_PORT_TCP |
1594 IBV_RX_HASH_DST_PORT_TCP));
1596 case RTE_FLOW_ITEM_TYPE_UDP:
1597 flow_dv_translate_item_udp(tmatcher->mask.buf, key, item,
1599 tmatcher->priority = MLX5_PRIORITY_MAP_L4;
1600 dev_flow->verbs.hash_fields |=
1601 mlx5_flow_hashfields_adjust(dev_flow, inner,
1603 (IBV_RX_HASH_SRC_PORT_UDP |
1604 IBV_RX_HASH_DST_PORT_UDP));
1606 case RTE_FLOW_ITEM_TYPE_GRE:
1607 flow_dv_translate_item_gre(tmatcher->mask.buf, key, item,
1610 case RTE_FLOW_ITEM_TYPE_NVGRE:
1611 flow_dv_translate_item_nvgre(tmatcher->mask.buf, key, item,
1614 case RTE_FLOW_ITEM_TYPE_VXLAN:
1615 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1616 flow_dv_translate_item_vxlan(tmatcher->mask.buf, key, item,
1619 case RTE_FLOW_ITEM_TYPE_META:
1620 flow_dv_translate_item_meta(tmatcher->mask.buf, key, item);
1628 * Store the requested actions in an array.
1631 * Pointer to rte_eth_dev structure.
1633 * Flow action to translate.
1634 * @param[in, out] dev_flow
1635 * Pointer to the mlx5_flow.
1637 * Pointer to the flow attributes.
1639 * Pointer to the error structure.
1642 * 0 on success, a negative errno value otherwise and rte_errno is set.
1645 flow_dv_create_action(struct rte_eth_dev *dev,
1646 const struct rte_flow_action *action,
1647 struct mlx5_flow *dev_flow,
1648 const struct rte_flow_attr *attr,
1649 struct rte_flow_error *error)
1651 const struct rte_flow_action_queue *queue;
1652 const struct rte_flow_action_rss *rss;
1653 int actions_n = dev_flow->dv.actions_n;
1654 struct rte_flow *flow = dev_flow->flow;
1655 const struct rte_flow_action *action_ptr = action;
1657 switch (action->type) {
1658 case RTE_FLOW_ACTION_TYPE_VOID:
1660 case RTE_FLOW_ACTION_TYPE_FLAG:
1661 dev_flow->dv.actions[actions_n].type = MLX5DV_FLOW_ACTION_TAG;
1662 dev_flow->dv.actions[actions_n].tag_value =
1663 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
1665 flow->actions |= MLX5_FLOW_ACTION_FLAG;
1667 case RTE_FLOW_ACTION_TYPE_MARK:
1668 dev_flow->dv.actions[actions_n].type = MLX5DV_FLOW_ACTION_TAG;
1669 dev_flow->dv.actions[actions_n].tag_value =
1671 (((const struct rte_flow_action_mark *)
1672 (action->conf))->id);
1673 flow->actions |= MLX5_FLOW_ACTION_MARK;
1676 case RTE_FLOW_ACTION_TYPE_DROP:
1677 dev_flow->dv.actions[actions_n].type = MLX5DV_FLOW_ACTION_DROP;
1678 flow->actions |= MLX5_FLOW_ACTION_DROP;
1680 case RTE_FLOW_ACTION_TYPE_QUEUE:
1681 queue = action->conf;
1682 flow->rss.queue_num = 1;
1683 (*flow->queue)[0] = queue->index;
1684 flow->actions |= MLX5_FLOW_ACTION_QUEUE;
1686 case RTE_FLOW_ACTION_TYPE_RSS:
1689 memcpy((*flow->queue), rss->queue,
1690 rss->queue_num * sizeof(uint16_t));
1691 flow->rss.queue_num = rss->queue_num;
1692 memcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN);
1693 flow->rss.types = rss->types;
1694 flow->rss.level = rss->level;
1695 /* Added to array only in apply since we need the QP */
1696 flow->actions |= MLX5_FLOW_ACTION_RSS;
1698 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
1699 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
1700 dev_flow->dv.actions[actions_n].type =
1701 MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
1702 dev_flow->dv.actions[actions_n].action =
1703 flow_dv_create_action_l2_encap(dev, action,
1705 if (!(dev_flow->dv.actions[actions_n].action))
1707 dev_flow->dv.encap_decap_verbs_action =
1708 dev_flow->dv.actions[actions_n].action;
1709 flow->actions |= action->type ==
1710 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
1711 MLX5_FLOW_ACTION_VXLAN_ENCAP :
1712 MLX5_FLOW_ACTION_NVGRE_ENCAP;
1715 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
1716 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
1717 dev_flow->dv.actions[actions_n].type =
1718 MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
1719 dev_flow->dv.actions[actions_n].action =
1720 flow_dv_create_action_l2_decap(dev, error);
1721 if (!(dev_flow->dv.actions[actions_n].action))
1723 dev_flow->dv.encap_decap_verbs_action =
1724 dev_flow->dv.actions[actions_n].action;
1725 flow->actions |= action->type ==
1726 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
1727 MLX5_FLOW_ACTION_VXLAN_DECAP :
1728 MLX5_FLOW_ACTION_NVGRE_DECAP;
1731 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
1732 /* Handle encap action with preceding decap */
1733 if (flow->actions & MLX5_FLOW_ACTION_RAW_DECAP) {
1734 dev_flow->dv.actions[actions_n].type =
1735 MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
1736 dev_flow->dv.actions[actions_n].action =
1737 flow_dv_create_action_raw_encap
1740 if (!(dev_flow->dv.actions[actions_n].action))
1742 dev_flow->dv.encap_decap_verbs_action =
1743 dev_flow->dv.actions[actions_n].action;
1745 /* Handle encap action without preceding decap */
1746 dev_flow->dv.actions[actions_n].type =
1747 MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
1748 dev_flow->dv.actions[actions_n].action =
1749 flow_dv_create_action_l2_encap
1750 (dev, action, error);
1751 if (!(dev_flow->dv.actions[actions_n].action))
1753 dev_flow->dv.encap_decap_verbs_action =
1754 dev_flow->dv.actions[actions_n].action;
1756 flow->actions |= MLX5_FLOW_ACTION_RAW_ENCAP;
1759 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
1760 /* Check if this decap action is followed by encap. */
1761 for (; action_ptr->type != RTE_FLOW_ACTION_TYPE_END &&
1762 action_ptr->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
1765 /* Handle decap action only if it isn't followed by encap */
1766 if (action_ptr->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
1767 dev_flow->dv.actions[actions_n].type =
1768 MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
1769 dev_flow->dv.actions[actions_n].action =
1770 flow_dv_create_action_l2_decap(dev,
1772 if (!(dev_flow->dv.actions[actions_n].action))
1774 dev_flow->dv.encap_decap_verbs_action =
1775 dev_flow->dv.actions[actions_n].action;
1778 /* If decap is followed by encap, handle it at encap case. */
1779 flow->actions |= MLX5_FLOW_ACTION_RAW_DECAP;
1784 dev_flow->dv.actions_n = actions_n;
1788 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
1790 #define HEADER_IS_ZERO(match_criteria, headers) \
1791 !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
1792 matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
1795 * Calculate flow matcher enable bitmap.
1797 * @param match_criteria
1798 * Pointer to flow matcher criteria.
1801 * Bitmap of enabled fields.
1804 flow_dv_matcher_enable(uint32_t *match_criteria)
1806 uint8_t match_criteria_enable;
1808 match_criteria_enable =
1809 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
1810 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
1811 match_criteria_enable |=
1812 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
1813 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
1814 match_criteria_enable |=
1815 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
1816 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
1817 match_criteria_enable |=
1818 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
1819 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
1821 return match_criteria_enable;
1825 * Register the flow matcher.
1827 * @param dev[in, out]
1828 * Pointer to rte_eth_dev structure.
1829 * @param[in, out] matcher
1830 * Pointer to flow matcher.
1831 * @parm[in, out] dev_flow
1832 * Pointer to the dev_flow.
1834 * pointer to error structure.
1837 * 0 on success otherwise -errno and errno is set.
1840 flow_dv_matcher_register(struct rte_eth_dev *dev,
1841 struct mlx5_flow_dv_matcher *matcher,
1842 struct mlx5_flow *dev_flow,
1843 struct rte_flow_error *error)
1845 struct priv *priv = dev->data->dev_private;
1846 struct mlx5_flow_dv_matcher *cache_matcher;
1847 struct mlx5dv_flow_matcher_attr dv_attr = {
1848 .type = IBV_FLOW_ATTR_NORMAL,
1849 .match_mask = (void *)&matcher->mask,
1852 /* Lookup from cache. */
1853 LIST_FOREACH(cache_matcher, &priv->matchers, next) {
1854 if (matcher->crc == cache_matcher->crc &&
1855 matcher->priority == cache_matcher->priority &&
1856 matcher->egress == cache_matcher->egress &&
1857 !memcmp((const void *)matcher->mask.buf,
1858 (const void *)cache_matcher->mask.buf,
1859 cache_matcher->mask.size)) {
1861 "priority %hd use %s matcher %p: refcnt %d++",
1862 cache_matcher->priority,
1863 cache_matcher->egress ? "tx" : "rx",
1864 (void *)cache_matcher,
1865 rte_atomic32_read(&cache_matcher->refcnt));
1866 rte_atomic32_inc(&cache_matcher->refcnt);
1867 dev_flow->dv.matcher = cache_matcher;
1871 /* Register new matcher. */
1872 cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
1874 return rte_flow_error_set(error, ENOMEM,
1875 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1876 "cannot allocate matcher memory");
1877 *cache_matcher = *matcher;
1878 dv_attr.match_criteria_enable =
1879 flow_dv_matcher_enable(cache_matcher->mask.buf);
1880 dv_attr.priority = matcher->priority;
1881 if (matcher->egress)
1882 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
1883 cache_matcher->matcher_object =
1884 mlx5_glue->dv_create_flow_matcher(priv->ctx, &dv_attr);
1885 if (!cache_matcher->matcher_object) {
1886 rte_free(cache_matcher);
1887 return rte_flow_error_set(error, ENOMEM,
1888 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1889 NULL, "cannot create matcher");
1891 rte_atomic32_inc(&cache_matcher->refcnt);
1892 LIST_INSERT_HEAD(&priv->matchers, cache_matcher, next);
1893 dev_flow->dv.matcher = cache_matcher;
1894 DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
1895 cache_matcher->priority,
1896 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
1897 rte_atomic32_read(&cache_matcher->refcnt));
1902 * Fill the flow with DV spec.
1905 * Pointer to rte_eth_dev structure.
1906 * @param[in, out] dev_flow
1907 * Pointer to the sub flow.
1909 * Pointer to the flow attributes.
1911 * Pointer to the list of items.
1912 * @param[in] actions
1913 * Pointer to the list of actions.
1915 * Pointer to the error structure.
1918 * 0 on success, a negative errno value otherwise and rte_ernno is set.
1921 flow_dv_translate(struct rte_eth_dev *dev,
1922 struct mlx5_flow *dev_flow,
1923 const struct rte_flow_attr *attr,
1924 const struct rte_flow_item items[],
1925 const struct rte_flow_action actions[],
1926 struct rte_flow_error *error)
1928 struct priv *priv = dev->data->dev_private;
1929 uint64_t priority = attr->priority;
1930 struct mlx5_flow_dv_matcher matcher = {
1932 .size = sizeof(matcher.mask.buf),
1935 void *match_value = dev_flow->dv.value.buf;
1938 if (priority == MLX5_FLOW_PRIO_RSVD)
1939 priority = priv->config.flow_prio - 1;
1940 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1941 tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
1942 flow_dv_create_item(&matcher, match_value, items, dev_flow,
1945 matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
1947 if (priority == MLX5_FLOW_PRIO_RSVD)
1948 priority = priv->config.flow_prio - 1;
1949 matcher.priority = mlx5_flow_adjust_priority(dev, priority,
1951 matcher.egress = attr->egress;
1952 if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
1954 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++)
1955 if (flow_dv_create_action(dev, actions, dev_flow, attr, error))
1961 * Apply the flow to the NIC.
1964 * Pointer to the Ethernet device structure.
1965 * @param[in, out] flow
1966 * Pointer to flow structure.
1968 * Pointer to error structure.
1971 * 0 on success, a negative errno value otherwise and rte_errno is set.
1974 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
1975 struct rte_flow_error *error)
1977 struct mlx5_flow_dv *dv;
1978 struct mlx5_flow *dev_flow;
1982 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1985 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
1986 dv->hrxq = mlx5_hrxq_drop_new(dev);
1990 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1991 "cannot get drop hash queue");
1994 dv->actions[n].type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;
1995 dv->actions[n].qp = dv->hrxq->qp;
1997 } else if (flow->actions &
1998 (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
1999 struct mlx5_hrxq *hrxq;
2000 hrxq = mlx5_hrxq_get(dev, flow->key,
2001 MLX5_RSS_HASH_KEY_LEN,
2004 flow->rss.queue_num);
2006 hrxq = mlx5_hrxq_new
2007 (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
2008 dv->hash_fields, (*flow->queue),
2009 flow->rss.queue_num,
2010 !!(dev_flow->layers &
2011 MLX5_FLOW_LAYER_TUNNEL));
2015 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2016 "cannot get hash queue");
2020 dv->actions[n].type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;
2021 dv->actions[n].qp = hrxq->qp;
2025 mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
2026 (void *)&dv->value, n,
2029 rte_flow_error_set(error, errno,
2030 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2032 "hardware refuses to create flow");
2038 err = rte_errno; /* Save rte_errno before cleanup. */
2039 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
2040 struct mlx5_flow_dv *dv = &dev_flow->dv;
2042 if (flow->actions & MLX5_FLOW_ACTION_DROP)
2043 mlx5_hrxq_drop_release(dev);
2045 mlx5_hrxq_release(dev, dv->hrxq);
2049 rte_errno = err; /* Restore rte_errno. */
2054 * Release the flow matcher.
2057 * Pointer to Ethernet device.
2059 * Pointer to mlx5_flow.
2062 * 1 while a reference on it exists, 0 when freed.
2065 flow_dv_matcher_release(struct rte_eth_dev *dev,
2066 struct mlx5_flow *flow)
2068 struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
2070 assert(matcher->matcher_object);
2071 DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
2072 dev->data->port_id, (void *)matcher,
2073 rte_atomic32_read(&matcher->refcnt));
2074 if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
2075 claim_zero(mlx5_glue->dv_destroy_flow_matcher
2076 (matcher->matcher_object));
2077 LIST_REMOVE(matcher, next);
2079 DRV_LOG(DEBUG, "port %u matcher %p: removed",
2080 dev->data->port_id, (void *)matcher);
2087 * Remove the flow from the NIC but keeps it in memory.
2090 * Pointer to Ethernet device.
2091 * @param[in, out] flow
2092 * Pointer to flow structure.
2095 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
2097 struct mlx5_flow_dv *dv;
2098 struct mlx5_flow *dev_flow;
2102 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
2105 claim_zero(mlx5_glue->destroy_flow(dv->flow));
2109 if (flow->actions & MLX5_FLOW_ACTION_DROP)
2110 mlx5_hrxq_drop_release(dev);
2112 mlx5_hrxq_release(dev, dv->hrxq);
2117 flow->counter = NULL;
2121 * Remove the flow from the NIC and the memory.
2124 * Pointer to the Ethernet device structure.
2125 * @param[in, out] flow
2126 * Pointer to flow structure.
2129 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
2131 struct mlx5_flow *dev_flow;
2135 flow_dv_remove(dev, flow);
2136 while (!LIST_EMPTY(&flow->dev_flows)) {
2137 dev_flow = LIST_FIRST(&flow->dev_flows);
2138 LIST_REMOVE(dev_flow, next);
2139 if (dev_flow->dv.matcher)
2140 flow_dv_matcher_release(dev, dev_flow);
2141 if (dev_flow->dv.encap_decap_verbs_action) {
2142 claim_zero(mlx5_glue->destroy_flow_action
2143 (dev_flow->dv.encap_decap_verbs_action));
2144 dev_flow->dv.encap_decap_verbs_action = NULL;
2153 * @see rte_flow_query()
2157 flow_dv_query(struct rte_eth_dev *dev __rte_unused,
2158 struct rte_flow *flow __rte_unused,
2159 const struct rte_flow_action *actions __rte_unused,
2160 void *data __rte_unused,
2161 struct rte_flow_error *error __rte_unused)
2163 rte_errno = ENOTSUP;
2168 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
2169 .validate = flow_dv_validate,
2170 .prepare = flow_dv_prepare,
2171 .translate = flow_dv_translate,
2172 .apply = flow_dv_apply,
2173 .remove = flow_dv_remove,
2174 .destroy = flow_dv_destroy,
2175 .query = flow_dv_query,
2178 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */