1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
11 #include <rte_common.h>
12 #include <rte_errno.h>
13 #include <rte_branch_prediction.h>
14 #include "rte_ethdev.h"
15 #include "rte_flow_driver.h"
19 * Flow elements description tables.
21 struct rte_flow_desc_data {
26 /** Generate flow_item[] entry. */
27 #define MK_FLOW_ITEM(t, s) \
28 [RTE_FLOW_ITEM_TYPE_ ## t] = { \
33 /** Information about known flow pattern items. */
34 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
36 MK_FLOW_ITEM(VOID, 0),
37 MK_FLOW_ITEM(INVERT, 0),
38 MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
40 MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
41 MK_FLOW_ITEM(PORT, sizeof(struct rte_flow_item_port)),
42 MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
43 MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
44 MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
45 MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
46 MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
47 MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
48 MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
49 MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
50 MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
51 MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
52 MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
53 MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
54 MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
55 MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
56 MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
59 /** Generate flow_action[] entry. */
60 #define MK_FLOW_ACTION(t, s) \
61 [RTE_FLOW_ACTION_TYPE_ ## t] = { \
66 /** Information about known flow actions. */
67 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
68 MK_FLOW_ACTION(END, 0),
69 MK_FLOW_ACTION(VOID, 0),
70 MK_FLOW_ACTION(PASSTHRU, 0),
71 MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
72 MK_FLOW_ACTION(FLAG, 0),
73 MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
74 MK_FLOW_ACTION(DROP, 0),
75 MK_FLOW_ACTION(COUNT, 0),
76 MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
77 MK_FLOW_ACTION(PF, 0),
78 MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
82 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
86 if (rte_eth_dev_is_removed(port_id))
87 return rte_flow_error_set(error, EIO,
88 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
89 NULL, rte_strerror(EIO));
93 /* Get generic flow operations structure from a port. */
94 const struct rte_flow_ops *
95 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
97 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
98 const struct rte_flow_ops *ops;
101 if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
103 else if (unlikely(!dev->dev_ops->filter_ctrl ||
104 dev->dev_ops->filter_ctrl(dev,
105 RTE_ETH_FILTER_GENERIC,
112 rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
113 NULL, rte_strerror(code));
117 /* Check whether a flow rule can be created on a given port. */
119 rte_flow_validate(uint16_t port_id,
120 const struct rte_flow_attr *attr,
121 const struct rte_flow_item pattern[],
122 const struct rte_flow_action actions[],
123 struct rte_flow_error *error)
125 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
126 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
130 if (likely(!!ops->validate))
131 return flow_err(port_id, ops->validate(dev, attr, pattern,
132 actions, error), error);
133 return rte_flow_error_set(error, ENOSYS,
134 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
135 NULL, rte_strerror(ENOSYS));
138 /* Create a flow rule on a given port. */
140 rte_flow_create(uint16_t port_id,
141 const struct rte_flow_attr *attr,
142 const struct rte_flow_item pattern[],
143 const struct rte_flow_action actions[],
144 struct rte_flow_error *error)
146 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
147 struct rte_flow *flow;
148 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
152 if (likely(!!ops->create)) {
153 flow = ops->create(dev, attr, pattern, actions, error);
155 flow_err(port_id, -rte_errno, error);
158 rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
159 NULL, rte_strerror(ENOSYS));
163 /* Destroy a flow rule on a given port. */
165 rte_flow_destroy(uint16_t port_id,
166 struct rte_flow *flow,
167 struct rte_flow_error *error)
169 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
170 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
174 if (likely(!!ops->destroy))
175 return flow_err(port_id, ops->destroy(dev, flow, error),
177 return rte_flow_error_set(error, ENOSYS,
178 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
179 NULL, rte_strerror(ENOSYS));
182 /* Destroy all flow rules associated with a port. */
184 rte_flow_flush(uint16_t port_id,
185 struct rte_flow_error *error)
187 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
188 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
192 if (likely(!!ops->flush))
193 return flow_err(port_id, ops->flush(dev, error), error);
194 return rte_flow_error_set(error, ENOSYS,
195 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
196 NULL, rte_strerror(ENOSYS));
199 /* Query an existing flow rule. */
201 rte_flow_query(uint16_t port_id,
202 struct rte_flow *flow,
203 enum rte_flow_action_type action,
205 struct rte_flow_error *error)
207 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
208 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
212 if (likely(!!ops->query))
213 return flow_err(port_id, ops->query(dev, flow, action, data,
215 return rte_flow_error_set(error, ENOSYS,
216 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
217 NULL, rte_strerror(ENOSYS));
220 /* Restrict ingress traffic to the defined flow rules. */
222 rte_flow_isolate(uint16_t port_id,
224 struct rte_flow_error *error)
226 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
227 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
231 if (likely(!!ops->isolate))
232 return flow_err(port_id, ops->isolate(dev, set, error), error);
233 return rte_flow_error_set(error, ENOSYS,
234 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
235 NULL, rte_strerror(ENOSYS));
238 /* Initialize flow error structure. */
240 rte_flow_error_set(struct rte_flow_error *error,
242 enum rte_flow_error_type type,
247 *error = (struct rte_flow_error){
257 /** Pattern item specification types. */
258 enum item_spec_type {
264 /** Compute storage space needed by item specification and copy it. */
266 flow_item_spec_copy(void *buf, const struct rte_flow_item *item,
267 enum item_spec_type type)
270 const void *item_spec =
271 type == ITEM_SPEC ? item->spec :
272 type == ITEM_LAST ? item->last :
273 type == ITEM_MASK ? item->mask :
278 switch (item->type) {
280 const struct rte_flow_item_raw *raw;
283 struct rte_flow_item_raw *raw;
287 case RTE_FLOW_ITEM_TYPE_RAW:
290 off = RTE_ALIGN_CEIL(sizeof(struct rte_flow_item_raw),
291 sizeof(*src.raw->pattern));
292 size = off + src.raw->length * sizeof(*src.raw->pattern);
294 memcpy(dst.raw, src.raw, sizeof(*src.raw));
295 dst.raw->pattern = memcpy((uint8_t *)dst.raw + off,
301 size = rte_flow_desc_item[item->type].size;
303 memcpy(buf, item_spec, size);
307 return RTE_ALIGN_CEIL(size, sizeof(double));
310 /** Compute storage space needed by action configuration and copy it. */
312 flow_action_conf_copy(void *buf, const struct rte_flow_action *action)
318 switch (action->type) {
320 const struct rte_flow_action_rss *rss;
323 struct rte_flow_action_rss *rss;
327 case RTE_FLOW_ACTION_TYPE_RSS:
328 src.rss = action->conf;
332 *dst.rss = (struct rte_flow_action_rss){
333 .func = src.rss->func,
334 .types = src.rss->types,
335 .key_len = src.rss->key_len,
336 .queue_num = src.rss->queue_num,
338 off += sizeof(*src.rss);
339 if (src.rss->key_len) {
340 off = RTE_ALIGN_CEIL(off, sizeof(double));
341 size = sizeof(*src.rss->key) * src.rss->key_len;
343 dst.rss->key = memcpy
344 ((void *)((uintptr_t)dst.rss + off),
348 if (src.rss->queue_num) {
349 off = RTE_ALIGN_CEIL(off, sizeof(double));
350 size = sizeof(*src.rss->queue) * src.rss->queue_num;
352 dst.rss->queue = memcpy
353 ((void *)((uintptr_t)dst.rss + off),
354 src.rss->queue, size);
360 size = rte_flow_desc_action[action->type].size;
362 memcpy(buf, action->conf, size);
366 return RTE_ALIGN_CEIL(size, sizeof(double));
369 /** Store a full rte_flow description. */
371 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
372 const struct rte_flow_attr *attr,
373 const struct rte_flow_item *items,
374 const struct rte_flow_action *actions)
376 struct rte_flow_desc *fd = NULL;
384 const struct rte_flow_item *item;
388 fd->items = (void *)&fd->data[off1];
390 struct rte_flow_item *dst = NULL;
392 if ((size_t)item->type >=
393 RTE_DIM(rte_flow_desc_item) ||
394 !rte_flow_desc_item[item->type].name) {
399 dst = memcpy(fd->data + off1, item,
401 off1 += sizeof(*item);
404 dst->spec = fd->data + off2;
405 off2 += flow_item_spec_copy
406 (fd ? fd->data + off2 : NULL, item,
411 dst->last = fd->data + off2;
412 off2 += flow_item_spec_copy
413 (fd ? fd->data + off2 : NULL, item,
418 dst->mask = fd->data + off2;
419 off2 += flow_item_spec_copy
420 (fd ? fd->data + off2 : NULL, item,
423 off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
424 } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END);
425 off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
428 const struct rte_flow_action *action;
432 fd->actions = (void *)&fd->data[off1];
434 struct rte_flow_action *dst = NULL;
436 if ((size_t)action->type >=
437 RTE_DIM(rte_flow_desc_action) ||
438 !rte_flow_desc_action[action->type].name) {
443 dst = memcpy(fd->data + off1, action,
445 off1 += sizeof(*action);
448 dst->conf = fd->data + off2;
449 off2 += flow_action_conf_copy
450 (fd ? fd->data + off2 : NULL, action);
452 off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
453 } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END);
457 off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
458 tmp = RTE_ALIGN_CEIL(offsetof(struct rte_flow_desc, data),
460 size = tmp + off1 + off2;
465 *fd = (const struct rte_flow_desc) {
469 tmp -= offsetof(struct rte_flow_desc, data);