1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #pragma GCC diagnostic ignored "-Wpedantic"
15 #include <infiniband/verbs.h>
17 #pragma GCC diagnostic error "-Wpedantic"
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_eth_ctrl.h>
23 #include <rte_ethdev_driver.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
30 #include "mlx5_defs.h"
32 #include "mlx5_glue.h"
33 #include "mlx5_flow.h"
35 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
38 * Verify the @p attributes will be correctly understood by the NIC and store
39 * them in the @p flow if everything is correct.
42 * Pointer to dev struct.
43 * @param[in] attributes
44 * Pointer to flow attributes
46 * Pointer to error structure.
49 * 0 on success, a negative errno value otherwise and rte_errno is set.
52 flow_dv_validate_attributes(struct rte_eth_dev *dev,
53 const struct rte_flow_attr *attributes,
54 struct rte_flow_error *error)
56 struct priv *priv = dev->data->dev_private;
57 uint32_t priority_max = priv->config.flow_prio - 1;
59 if (attributes->group)
60 return rte_flow_error_set(error, ENOTSUP,
61 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
63 "groups is not supported");
64 if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
65 attributes->priority >= priority_max)
66 return rte_flow_error_set(error, ENOTSUP,
67 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
69 "priority out of range");
70 if (attributes->egress)
71 return rte_flow_error_set(error, ENOTSUP,
72 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
74 "egress is not supported");
75 if (attributes->transfer)
76 return rte_flow_error_set(error, ENOTSUP,
77 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
79 "transfer is not supported");
80 if (!attributes->ingress)
81 return rte_flow_error_set(error, EINVAL,
82 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
84 "ingress attribute is mandatory");
89 * Internal validation function. For validating both actions and items.
92 * Pointer to the rte_eth_dev structure.
94 * Pointer to the flow attributes.
96 * Pointer to the list of items.
98 * Pointer to the list of actions.
100 * Pointer to the error structure.
103 * 0 on success, a negative errno value otherwise and rte_ernno is set.
106 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
107 const struct rte_flow_item items[],
108 const struct rte_flow_action actions[],
109 struct rte_flow_error *error)
112 uint32_t action_flags = 0;
113 uint32_t item_flags = 0;
115 uint8_t next_protocol = 0xff;
120 ret = flow_dv_validate_attributes(dev, attr, error);
123 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
124 switch (items->type) {
125 case RTE_FLOW_ITEM_TYPE_VOID:
127 case RTE_FLOW_ITEM_TYPE_ETH:
128 ret = mlx5_flow_validate_item_eth(items, item_flags,
132 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
133 MLX5_FLOW_LAYER_OUTER_L2;
135 case RTE_FLOW_ITEM_TYPE_VLAN:
136 ret = mlx5_flow_validate_item_vlan(items, item_flags,
140 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
141 MLX5_FLOW_LAYER_OUTER_VLAN;
143 case RTE_FLOW_ITEM_TYPE_IPV4:
144 ret = mlx5_flow_validate_item_ipv4(items, item_flags,
148 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
149 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
150 if (items->mask != NULL &&
151 ((const struct rte_flow_item_ipv4 *)
152 items->mask)->hdr.next_proto_id)
154 ((const struct rte_flow_item_ipv4 *)
155 (items->spec))->hdr.next_proto_id;
157 case RTE_FLOW_ITEM_TYPE_IPV6:
158 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
162 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
163 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
164 if (items->mask != NULL &&
165 ((const struct rte_flow_item_ipv6 *)
166 items->mask)->hdr.proto)
168 ((const struct rte_flow_item_ipv6 *)
169 items->spec)->hdr.proto;
171 case RTE_FLOW_ITEM_TYPE_UDP:
172 ret = mlx5_flow_validate_item_udp(items, item_flags,
177 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
178 MLX5_FLOW_LAYER_OUTER_L4_UDP;
180 case RTE_FLOW_ITEM_TYPE_TCP:
181 ret = mlx5_flow_validate_item_tcp(items, item_flags,
182 next_protocol, error);
185 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
186 MLX5_FLOW_LAYER_OUTER_L4_TCP;
188 case RTE_FLOW_ITEM_TYPE_VXLAN:
189 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
193 item_flags |= MLX5_FLOW_LAYER_VXLAN;
195 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
196 ret = mlx5_flow_validate_item_vxlan_gpe(items,
201 item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
203 case RTE_FLOW_ITEM_TYPE_GRE:
204 ret = mlx5_flow_validate_item_gre(items, item_flags,
205 next_protocol, error);
208 item_flags |= MLX5_FLOW_LAYER_GRE;
210 case RTE_FLOW_ITEM_TYPE_MPLS:
211 ret = mlx5_flow_validate_item_mpls(items, item_flags,
216 item_flags |= MLX5_FLOW_LAYER_MPLS;
219 return rte_flow_error_set(error, ENOTSUP,
220 RTE_FLOW_ERROR_TYPE_ITEM,
221 NULL, "item not supported");
224 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
225 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
226 return rte_flow_error_set(error, ENOTSUP,
227 RTE_FLOW_ERROR_TYPE_ACTION,
228 actions, "too many actions");
229 tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
230 switch (actions->type) {
231 case RTE_FLOW_ACTION_TYPE_VOID:
233 case RTE_FLOW_ACTION_TYPE_FLAG:
234 ret = mlx5_flow_validate_action_flag(action_flags,
238 action_flags |= MLX5_FLOW_ACTION_FLAG;
241 case RTE_FLOW_ACTION_TYPE_MARK:
242 ret = mlx5_flow_validate_action_mark(actions,
247 action_flags |= MLX5_FLOW_ACTION_MARK;
250 case RTE_FLOW_ACTION_TYPE_DROP:
251 ret = mlx5_flow_validate_action_drop(action_flags,
255 action_flags |= MLX5_FLOW_ACTION_DROP;
258 case RTE_FLOW_ACTION_TYPE_QUEUE:
259 ret = mlx5_flow_validate_action_queue(actions,
264 action_flags |= MLX5_FLOW_ACTION_QUEUE;
267 case RTE_FLOW_ACTION_TYPE_RSS:
268 ret = mlx5_flow_validate_action_rss(actions,
273 action_flags |= MLX5_FLOW_ACTION_RSS;
276 case RTE_FLOW_ACTION_TYPE_COUNT:
277 ret = mlx5_flow_validate_action_count(dev, error);
280 action_flags |= MLX5_FLOW_ACTION_COUNT;
284 return rte_flow_error_set(error, ENOTSUP,
285 RTE_FLOW_ERROR_TYPE_ACTION,
287 "action not supported");
290 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS))
291 return rte_flow_error_set(error, EINVAL,
292 RTE_FLOW_ERROR_TYPE_ACTION, actions,
293 "no fate action is found");
298 * Fills the flow_ops with the function pointers.
300 * @param[out] flow_ops
301 * Pointer to driver_ops structure.
304 mlx5_flow_dv_get_driver_ops(struct mlx5_flow_driver_ops *flow_ops)
306 *flow_ops = (struct mlx5_flow_driver_ops) {
307 .validate = flow_dv_validate,
316 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */