1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
11 #include <rte_common.h>
12 #include <rte_errno.h>
13 #include <rte_branch_prediction.h>
14 #include "rte_ethdev.h"
15 #include "rte_flow_driver.h"
19 * Flow elements description tables.
21 struct rte_flow_desc_data {
26 /** Generate flow_item[] entry. */
27 #define MK_FLOW_ITEM(t, s) \
28 [RTE_FLOW_ITEM_TYPE_ ## t] = { \
33 /** Information about known flow pattern items. */
34 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
36 MK_FLOW_ITEM(VOID, 0),
37 MK_FLOW_ITEM(INVERT, 0),
38 MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
40 MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
41 MK_FLOW_ITEM(PHY_PORT, sizeof(struct rte_flow_item_phy_port)),
42 MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
43 MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
44 MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
45 MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
46 MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
47 MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
48 MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
49 MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
50 MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
51 MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
52 MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
53 MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
54 MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
55 MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
56 MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
57 MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
58 MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
59 MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
60 MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
61 MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
62 MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
63 MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
64 MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
65 MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
66 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
67 MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
68 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
71 /** Generate flow_action[] entry. */
72 #define MK_FLOW_ACTION(t, s) \
73 [RTE_FLOW_ACTION_TYPE_ ## t] = { \
78 /** Information about known flow actions. */
79 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
80 MK_FLOW_ACTION(END, 0),
81 MK_FLOW_ACTION(VOID, 0),
82 MK_FLOW_ACTION(PASSTHRU, 0),
83 MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
84 MK_FLOW_ACTION(FLAG, 0),
85 MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
86 MK_FLOW_ACTION(DROP, 0),
87 MK_FLOW_ACTION(COUNT, 0),
88 MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
89 MK_FLOW_ACTION(PF, 0),
90 MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
91 MK_FLOW_ACTION(PHY_PORT, sizeof(struct rte_flow_action_phy_port)),
92 MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
93 MK_FLOW_ACTION(OF_SET_MPLS_TTL,
94 sizeof(struct rte_flow_action_of_set_mpls_ttl)),
95 MK_FLOW_ACTION(OF_DEC_MPLS_TTL, 0),
96 MK_FLOW_ACTION(OF_SET_NW_TTL,
97 sizeof(struct rte_flow_action_of_set_nw_ttl)),
98 MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
99 MK_FLOW_ACTION(OF_COPY_TTL_OUT, 0),
100 MK_FLOW_ACTION(OF_COPY_TTL_IN, 0),
104 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
108 if (rte_eth_dev_is_removed(port_id))
109 return rte_flow_error_set(error, EIO,
110 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
111 NULL, rte_strerror(EIO));
115 /* Get generic flow operations structure from a port. */
116 const struct rte_flow_ops *
117 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
119 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
120 const struct rte_flow_ops *ops;
123 if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
125 else if (unlikely(!dev->dev_ops->filter_ctrl ||
126 dev->dev_ops->filter_ctrl(dev,
127 RTE_ETH_FILTER_GENERIC,
134 rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
135 NULL, rte_strerror(code));
139 /* Check whether a flow rule can be created on a given port. */
141 rte_flow_validate(uint16_t port_id,
142 const struct rte_flow_attr *attr,
143 const struct rte_flow_item pattern[],
144 const struct rte_flow_action actions[],
145 struct rte_flow_error *error)
147 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
148 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
152 if (likely(!!ops->validate))
153 return flow_err(port_id, ops->validate(dev, attr, pattern,
154 actions, error), error);
155 return rte_flow_error_set(error, ENOSYS,
156 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
157 NULL, rte_strerror(ENOSYS));
160 /* Create a flow rule on a given port. */
162 rte_flow_create(uint16_t port_id,
163 const struct rte_flow_attr *attr,
164 const struct rte_flow_item pattern[],
165 const struct rte_flow_action actions[],
166 struct rte_flow_error *error)
168 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
169 struct rte_flow *flow;
170 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
174 if (likely(!!ops->create)) {
175 flow = ops->create(dev, attr, pattern, actions, error);
177 flow_err(port_id, -rte_errno, error);
180 rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
181 NULL, rte_strerror(ENOSYS));
185 /* Destroy a flow rule on a given port. */
187 rte_flow_destroy(uint16_t port_id,
188 struct rte_flow *flow,
189 struct rte_flow_error *error)
191 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
192 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
196 if (likely(!!ops->destroy))
197 return flow_err(port_id, ops->destroy(dev, flow, error),
199 return rte_flow_error_set(error, ENOSYS,
200 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
201 NULL, rte_strerror(ENOSYS));
204 /* Destroy all flow rules associated with a port. */
206 rte_flow_flush(uint16_t port_id,
207 struct rte_flow_error *error)
209 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
210 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
214 if (likely(!!ops->flush))
215 return flow_err(port_id, ops->flush(dev, error), error);
216 return rte_flow_error_set(error, ENOSYS,
217 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
218 NULL, rte_strerror(ENOSYS));
221 /* Query an existing flow rule. */
223 rte_flow_query(uint16_t port_id,
224 struct rte_flow *flow,
225 enum rte_flow_action_type action,
227 struct rte_flow_error *error)
229 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
230 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
234 if (likely(!!ops->query))
235 return flow_err(port_id, ops->query(dev, flow, action, data,
237 return rte_flow_error_set(error, ENOSYS,
238 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
239 NULL, rte_strerror(ENOSYS));
242 /* Restrict ingress traffic to the defined flow rules. */
244 rte_flow_isolate(uint16_t port_id,
246 struct rte_flow_error *error)
248 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
249 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
253 if (likely(!!ops->isolate))
254 return flow_err(port_id, ops->isolate(dev, set, error), error);
255 return rte_flow_error_set(error, ENOSYS,
256 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
257 NULL, rte_strerror(ENOSYS));
260 /* Initialize flow error structure. */
262 rte_flow_error_set(struct rte_flow_error *error,
264 enum rte_flow_error_type type,
269 *error = (struct rte_flow_error){
279 /** Pattern item specification types. */
280 enum item_spec_type {
286 /** Compute storage space needed by item specification and copy it. */
288 flow_item_spec_copy(void *buf, const struct rte_flow_item *item,
289 enum item_spec_type type)
292 const void *item_spec =
293 type == ITEM_SPEC ? item->spec :
294 type == ITEM_LAST ? item->last :
295 type == ITEM_MASK ? item->mask :
300 switch (item->type) {
302 const struct rte_flow_item_raw *raw;
305 struct rte_flow_item_raw *raw;
309 case RTE_FLOW_ITEM_TYPE_RAW:
312 off = RTE_ALIGN_CEIL(sizeof(struct rte_flow_item_raw),
313 sizeof(*src.raw->pattern));
314 size = off + src.raw->length * sizeof(*src.raw->pattern);
316 memcpy(dst.raw, src.raw, sizeof(*src.raw));
317 dst.raw->pattern = memcpy((uint8_t *)dst.raw + off,
323 size = rte_flow_desc_item[item->type].size;
325 memcpy(buf, item_spec, size);
329 return RTE_ALIGN_CEIL(size, sizeof(double));
332 /** Compute storage space needed by action configuration and copy it. */
334 flow_action_conf_copy(void *buf, const struct rte_flow_action *action)
340 switch (action->type) {
342 const struct rte_flow_action_rss *rss;
345 struct rte_flow_action_rss *rss;
349 case RTE_FLOW_ACTION_TYPE_RSS:
350 src.rss = action->conf;
354 *dst.rss = (struct rte_flow_action_rss){
355 .func = src.rss->func,
356 .level = src.rss->level,
357 .types = src.rss->types,
358 .key_len = src.rss->key_len,
359 .queue_num = src.rss->queue_num,
361 off += sizeof(*src.rss);
362 if (src.rss->key_len) {
363 off = RTE_ALIGN_CEIL(off, sizeof(double));
364 size = sizeof(*src.rss->key) * src.rss->key_len;
366 dst.rss->key = memcpy
367 ((void *)((uintptr_t)dst.rss + off),
371 if (src.rss->queue_num) {
372 off = RTE_ALIGN_CEIL(off, sizeof(double));
373 size = sizeof(*src.rss->queue) * src.rss->queue_num;
375 dst.rss->queue = memcpy
376 ((void *)((uintptr_t)dst.rss + off),
377 src.rss->queue, size);
383 size = rte_flow_desc_action[action->type].size;
385 memcpy(buf, action->conf, size);
389 return RTE_ALIGN_CEIL(size, sizeof(double));
392 /** Store a full rte_flow description. */
394 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
395 const struct rte_flow_attr *attr,
396 const struct rte_flow_item *items,
397 const struct rte_flow_action *actions)
399 struct rte_flow_desc *fd = NULL;
407 const struct rte_flow_item *item;
411 fd->items = (void *)&fd->data[off1];
413 struct rte_flow_item *dst = NULL;
415 if ((size_t)item->type >=
416 RTE_DIM(rte_flow_desc_item) ||
417 !rte_flow_desc_item[item->type].name) {
422 dst = memcpy(fd->data + off1, item,
424 off1 += sizeof(*item);
427 dst->spec = fd->data + off2;
428 off2 += flow_item_spec_copy
429 (fd ? fd->data + off2 : NULL, item,
434 dst->last = fd->data + off2;
435 off2 += flow_item_spec_copy
436 (fd ? fd->data + off2 : NULL, item,
441 dst->mask = fd->data + off2;
442 off2 += flow_item_spec_copy
443 (fd ? fd->data + off2 : NULL, item,
446 off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
447 } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END);
448 off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
451 const struct rte_flow_action *action;
455 fd->actions = (void *)&fd->data[off1];
457 struct rte_flow_action *dst = NULL;
459 if ((size_t)action->type >=
460 RTE_DIM(rte_flow_desc_action) ||
461 !rte_flow_desc_action[action->type].name) {
466 dst = memcpy(fd->data + off1, action,
468 off1 += sizeof(*action);
471 dst->conf = fd->data + off2;
472 off2 += flow_action_conf_copy
473 (fd ? fd->data + off2 : NULL, action);
475 off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
476 } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END);
480 off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
481 tmp = RTE_ALIGN_CEIL(offsetof(struct rte_flow_desc, data),
483 size = tmp + off1 + off2;
488 *fd = (const struct rte_flow_desc) {
492 tmp -= offsetof(struct rte_flow_desc, data);