4 * Copyright 2016 6WIND S.A.
5 * Copyright 2016 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <rte_common.h>
40 #include <rte_errno.h>
41 #include <rte_branch_prediction.h>
42 #include "rte_ethdev.h"
43 #include "rte_flow_driver.h"
47 * Flow elements description tables.
49 struct rte_flow_desc_data {
54 /** Generate flow_item[] entry. */
55 #define MK_FLOW_ITEM(t, s) \
56 [RTE_FLOW_ITEM_TYPE_ ## t] = { \
61 /** Information about known flow pattern items. */
62 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
64 MK_FLOW_ITEM(VOID, 0),
65 MK_FLOW_ITEM(INVERT, 0),
66 MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
68 MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
69 MK_FLOW_ITEM(PORT, sizeof(struct rte_flow_item_port)),
70 MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), /* +pattern[] */
71 MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
72 MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
73 MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
74 MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
75 MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
76 MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
77 MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
78 MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
79 MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
80 MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
81 MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
82 MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
83 MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
84 MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
87 /** Generate flow_action[] entry. */
88 #define MK_FLOW_ACTION(t, s) \
89 [RTE_FLOW_ACTION_TYPE_ ## t] = { \
94 /** Information about known flow actions. */
95 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
96 MK_FLOW_ACTION(END, 0),
97 MK_FLOW_ACTION(VOID, 0),
98 MK_FLOW_ACTION(PASSTHRU, 0),
99 MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
100 MK_FLOW_ACTION(FLAG, 0),
101 MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
102 MK_FLOW_ACTION(DROP, 0),
103 MK_FLOW_ACTION(COUNT, 0),
104 MK_FLOW_ACTION(DUP, sizeof(struct rte_flow_action_dup)),
105 MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), /* +queue[] */
106 MK_FLOW_ACTION(PF, 0),
107 MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
111 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
115 if (rte_eth_dev_is_removed(port_id))
116 return rte_flow_error_set(error, EIO,
117 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
118 NULL, rte_strerror(EIO));
122 /* Get generic flow operations structure from a port. */
123 const struct rte_flow_ops *
124 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
126 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
127 const struct rte_flow_ops *ops;
130 if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
132 else if (unlikely(!dev->dev_ops->filter_ctrl ||
133 dev->dev_ops->filter_ctrl(dev,
134 RTE_ETH_FILTER_GENERIC,
141 rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
142 NULL, rte_strerror(code));
146 /* Check whether a flow rule can be created on a given port. */
148 rte_flow_validate(uint16_t port_id,
149 const struct rte_flow_attr *attr,
150 const struct rte_flow_item pattern[],
151 const struct rte_flow_action actions[],
152 struct rte_flow_error *error)
154 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
155 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
159 if (likely(!!ops->validate))
160 return flow_err(port_id, ops->validate(dev, attr, pattern,
161 actions, error), error);
162 return rte_flow_error_set(error, ENOSYS,
163 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
164 NULL, rte_strerror(ENOSYS));
167 /* Create a flow rule on a given port. */
169 rte_flow_create(uint16_t port_id,
170 const struct rte_flow_attr *attr,
171 const struct rte_flow_item pattern[],
172 const struct rte_flow_action actions[],
173 struct rte_flow_error *error)
175 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
176 struct rte_flow *flow;
177 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
181 if (likely(!!ops->create)) {
182 flow = ops->create(dev, attr, pattern, actions, error);
184 flow_err(port_id, -rte_errno, error);
187 rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
188 NULL, rte_strerror(ENOSYS));
192 /* Destroy a flow rule on a given port. */
194 rte_flow_destroy(uint16_t port_id,
195 struct rte_flow *flow,
196 struct rte_flow_error *error)
198 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
199 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
203 if (likely(!!ops->destroy))
204 return flow_err(port_id, ops->destroy(dev, flow, error),
206 return rte_flow_error_set(error, ENOSYS,
207 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
208 NULL, rte_strerror(ENOSYS));
211 /* Destroy all flow rules associated with a port. */
213 rte_flow_flush(uint16_t port_id,
214 struct rte_flow_error *error)
216 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
217 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
221 if (likely(!!ops->flush))
222 return flow_err(port_id, ops->flush(dev, error), error);
223 return rte_flow_error_set(error, ENOSYS,
224 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
225 NULL, rte_strerror(ENOSYS));
228 /* Query an existing flow rule. */
230 rte_flow_query(uint16_t port_id,
231 struct rte_flow *flow,
232 enum rte_flow_action_type action,
234 struct rte_flow_error *error)
236 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
237 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
241 if (likely(!!ops->query))
242 return flow_err(port_id, ops->query(dev, flow, action, data,
244 return rte_flow_error_set(error, ENOSYS,
245 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
246 NULL, rte_strerror(ENOSYS));
249 /* Restrict ingress traffic to the defined flow rules. */
251 rte_flow_isolate(uint16_t port_id,
253 struct rte_flow_error *error)
255 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
256 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
260 if (likely(!!ops->isolate))
261 return flow_err(port_id, ops->isolate(dev, set, error), error);
262 return rte_flow_error_set(error, ENOSYS,
263 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
264 NULL, rte_strerror(ENOSYS));
267 /* Initialize flow error structure. */
269 rte_flow_error_set(struct rte_flow_error *error,
271 enum rte_flow_error_type type,
276 *error = (struct rte_flow_error){
286 /** Compute storage space needed by item specification. */
288 flow_item_spec_size(const struct rte_flow_item *item,
289 size_t *size, size_t *pad)
295 switch (item->type) {
297 const struct rte_flow_item_raw *raw;
300 /* Not a fall-through */
301 case RTE_FLOW_ITEM_TYPE_RAW:
302 spec.raw = item->spec;
303 *size = offsetof(struct rte_flow_item_raw, pattern) +
304 spec.raw->length * sizeof(*spec.raw->pattern);
307 *size = rte_flow_desc_item[item->type].size;
311 *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
314 /** Compute storage space needed by action configuration. */
316 flow_action_conf_size(const struct rte_flow_action *action,
317 size_t *size, size_t *pad)
323 switch (action->type) {
325 const struct rte_flow_action_rss *rss;
328 /* Not a fall-through. */
329 case RTE_FLOW_ACTION_TYPE_RSS:
330 conf.rss = action->conf;
331 *size = offsetof(struct rte_flow_action_rss, queue) +
332 conf.rss->num * sizeof(*conf.rss->queue);
335 *size = rte_flow_desc_action[action->type].size;
339 *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
342 /** Store a full rte_flow description. */
344 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
345 const struct rte_flow_attr *attr,
346 const struct rte_flow_item *items,
347 const struct rte_flow_action *actions)
349 struct rte_flow_desc *fd = NULL;
358 const struct rte_flow_item *item;
362 fd->items = (void *)&fd->data[off1];
364 struct rte_flow_item *dst = NULL;
366 if ((size_t)item->type >=
367 RTE_DIM(rte_flow_desc_item) ||
368 !rte_flow_desc_item[item->type].name) {
373 dst = memcpy(fd->data + off1, item,
375 off1 += sizeof(*item);
376 flow_item_spec_size(item, &tmp, &pad);
379 dst->spec = memcpy(fd->data + off2,
385 dst->last = memcpy(fd->data + off2,
391 dst->mask = memcpy(fd->data + off2,
395 off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
396 } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END);
397 off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
400 const struct rte_flow_action *action;
404 fd->actions = (void *)&fd->data[off1];
406 struct rte_flow_action *dst = NULL;
408 if ((size_t)action->type >=
409 RTE_DIM(rte_flow_desc_action) ||
410 !rte_flow_desc_action[action->type].name) {
415 dst = memcpy(fd->data + off1, action,
417 off1 += sizeof(*action);
418 flow_action_conf_size(action, &tmp, &pad);
421 dst->conf = memcpy(fd->data + off2,
425 off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
426 } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END);
430 off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
431 tmp = RTE_ALIGN_CEIL(offsetof(struct rte_flow_desc, data),
433 size = tmp + off1 + off2;
438 *fd = (const struct rte_flow_desc) {
442 tmp -= offsetof(struct rte_flow_desc, data);