1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
11 #include <rte_common.h>
12 #include <rte_errno.h>
13 #include <rte_branch_prediction.h>
14 #include "rte_ethdev.h"
15 #include "rte_flow_driver.h"
19 * Flow elements description tables.
21 struct rte_flow_desc_data {
26 /** Generate flow_item[] entry. */
27 #define MK_FLOW_ITEM(t, s) \
28 [RTE_FLOW_ITEM_TYPE_ ## t] = { \
33 /** Information about known flow pattern items. */
34 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
36 MK_FLOW_ITEM(VOID, 0),
37 MK_FLOW_ITEM(INVERT, 0),
38 MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
40 MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
41 MK_FLOW_ITEM(PHY_PORT, sizeof(struct rte_flow_item_phy_port)),
42 MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
43 MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
44 MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
45 MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
46 MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
47 MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
48 MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
49 MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
50 MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
51 MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
52 MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
53 MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
54 MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
55 MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
56 MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
57 MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
58 MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
59 MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
60 MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
61 MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
62 MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
63 MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
64 MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
65 MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
66 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
67 MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
68 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
71 /** Generate flow_action[] entry. */
72 #define MK_FLOW_ACTION(t, s) \
73 [RTE_FLOW_ACTION_TYPE_ ## t] = { \
78 /** Information about known flow actions. */
79 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
80 MK_FLOW_ACTION(END, 0),
81 MK_FLOW_ACTION(VOID, 0),
82 MK_FLOW_ACTION(PASSTHRU, 0),
83 MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
84 MK_FLOW_ACTION(FLAG, 0),
85 MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
86 MK_FLOW_ACTION(DROP, 0),
87 MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
88 MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
89 MK_FLOW_ACTION(PF, 0),
90 MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
91 MK_FLOW_ACTION(PHY_PORT, sizeof(struct rte_flow_action_phy_port)),
92 MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
93 MK_FLOW_ACTION(OF_SET_MPLS_TTL,
94 sizeof(struct rte_flow_action_of_set_mpls_ttl)),
95 MK_FLOW_ACTION(OF_DEC_MPLS_TTL, 0),
96 MK_FLOW_ACTION(OF_SET_NW_TTL,
97 sizeof(struct rte_flow_action_of_set_nw_ttl)),
98 MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
99 MK_FLOW_ACTION(OF_COPY_TTL_OUT, 0),
100 MK_FLOW_ACTION(OF_COPY_TTL_IN, 0),
101 MK_FLOW_ACTION(OF_POP_VLAN, 0),
102 MK_FLOW_ACTION(OF_PUSH_VLAN,
103 sizeof(struct rte_flow_action_of_push_vlan)),
104 MK_FLOW_ACTION(OF_SET_VLAN_VID,
105 sizeof(struct rte_flow_action_of_set_vlan_vid)),
106 MK_FLOW_ACTION(OF_SET_VLAN_PCP,
107 sizeof(struct rte_flow_action_of_set_vlan_pcp)),
108 MK_FLOW_ACTION(OF_POP_MPLS,
109 sizeof(struct rte_flow_action_of_pop_mpls)),
110 MK_FLOW_ACTION(OF_PUSH_MPLS,
111 sizeof(struct rte_flow_action_of_push_mpls)),
115 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
119 if (rte_eth_dev_is_removed(port_id))
120 return rte_flow_error_set(error, EIO,
121 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
122 NULL, rte_strerror(EIO));
126 /* Get generic flow operations structure from a port. */
127 const struct rte_flow_ops *
128 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
130 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
131 const struct rte_flow_ops *ops;
134 if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
136 else if (unlikely(!dev->dev_ops->filter_ctrl ||
137 dev->dev_ops->filter_ctrl(dev,
138 RTE_ETH_FILTER_GENERIC,
145 rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
146 NULL, rte_strerror(code));
150 /* Check whether a flow rule can be created on a given port. */
152 rte_flow_validate(uint16_t port_id,
153 const struct rte_flow_attr *attr,
154 const struct rte_flow_item pattern[],
155 const struct rte_flow_action actions[],
156 struct rte_flow_error *error)
158 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
159 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
163 if (likely(!!ops->validate))
164 return flow_err(port_id, ops->validate(dev, attr, pattern,
165 actions, error), error);
166 return rte_flow_error_set(error, ENOSYS,
167 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
168 NULL, rte_strerror(ENOSYS));
171 /* Create a flow rule on a given port. */
173 rte_flow_create(uint16_t port_id,
174 const struct rte_flow_attr *attr,
175 const struct rte_flow_item pattern[],
176 const struct rte_flow_action actions[],
177 struct rte_flow_error *error)
179 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
180 struct rte_flow *flow;
181 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
185 if (likely(!!ops->create)) {
186 flow = ops->create(dev, attr, pattern, actions, error);
188 flow_err(port_id, -rte_errno, error);
191 rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
192 NULL, rte_strerror(ENOSYS));
196 /* Destroy a flow rule on a given port. */
198 rte_flow_destroy(uint16_t port_id,
199 struct rte_flow *flow,
200 struct rte_flow_error *error)
202 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
203 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
207 if (likely(!!ops->destroy))
208 return flow_err(port_id, ops->destroy(dev, flow, error),
210 return rte_flow_error_set(error, ENOSYS,
211 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
212 NULL, rte_strerror(ENOSYS));
215 /* Destroy all flow rules associated with a port. */
217 rte_flow_flush(uint16_t port_id,
218 struct rte_flow_error *error)
220 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
221 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
225 if (likely(!!ops->flush))
226 return flow_err(port_id, ops->flush(dev, error), error);
227 return rte_flow_error_set(error, ENOSYS,
228 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
229 NULL, rte_strerror(ENOSYS));
232 /* Query an existing flow rule. */
234 rte_flow_query(uint16_t port_id,
235 struct rte_flow *flow,
236 const struct rte_flow_action *action,
238 struct rte_flow_error *error)
240 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
241 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
245 if (likely(!!ops->query))
246 return flow_err(port_id, ops->query(dev, flow, action, data,
248 return rte_flow_error_set(error, ENOSYS,
249 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
250 NULL, rte_strerror(ENOSYS));
253 /* Restrict ingress traffic to the defined flow rules. */
255 rte_flow_isolate(uint16_t port_id,
257 struct rte_flow_error *error)
259 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
260 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
264 if (likely(!!ops->isolate))
265 return flow_err(port_id, ops->isolate(dev, set, error), error);
266 return rte_flow_error_set(error, ENOSYS,
267 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
268 NULL, rte_strerror(ENOSYS));
271 /* Initialize flow error structure. */
273 rte_flow_error_set(struct rte_flow_error *error,
275 enum rte_flow_error_type type,
280 *error = (struct rte_flow_error){
290 /** Pattern item specification types. */
291 enum item_spec_type {
297 /** Compute storage space needed by item specification and copy it. */
299 flow_item_spec_copy(void *buf, const struct rte_flow_item *item,
300 enum item_spec_type type)
304 type == ITEM_SPEC ? item->spec :
305 type == ITEM_LAST ? item->last :
306 type == ITEM_MASK ? item->mask :
309 if (!item->spec || !data)
311 switch (item->type) {
313 const struct rte_flow_item_raw *raw;
316 const struct rte_flow_item_raw *raw;
319 const struct rte_flow_item_raw *raw;
322 const struct rte_flow_item_raw *raw;
325 struct rte_flow_item_raw *raw;
329 case RTE_FLOW_ITEM_TYPE_RAW:
330 spec.raw = item->spec;
331 last.raw = item->last ? item->last : item->spec;
332 mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
335 off = RTE_ALIGN_CEIL(sizeof(struct rte_flow_item_raw),
336 sizeof(*src.raw->pattern));
337 if (type == ITEM_SPEC ||
338 (type == ITEM_MASK &&
339 ((spec.raw->length & mask.raw->length) >=
340 (last.raw->length & mask.raw->length))))
341 size = spec.raw->length & mask.raw->length;
343 size = last.raw->length & mask.raw->length;
344 size = off + size * sizeof(*src.raw->pattern);
346 memcpy(dst.raw, src.raw, sizeof(*src.raw));
347 dst.raw->pattern = memcpy((uint8_t *)dst.raw + off,
353 size = rte_flow_desc_item[item->type].size;
355 memcpy(buf, data, size);
359 return RTE_ALIGN_CEIL(size, sizeof(double));
362 /** Compute storage space needed by action configuration and copy it. */
364 flow_action_conf_copy(void *buf, const struct rte_flow_action *action)
370 switch (action->type) {
372 const struct rte_flow_action_rss *rss;
375 struct rte_flow_action_rss *rss;
379 case RTE_FLOW_ACTION_TYPE_RSS:
380 src.rss = action->conf;
384 *dst.rss = (struct rte_flow_action_rss){
385 .func = src.rss->func,
386 .level = src.rss->level,
387 .types = src.rss->types,
388 .key_len = src.rss->key_len,
389 .queue_num = src.rss->queue_num,
391 off += sizeof(*src.rss);
392 if (src.rss->key_len) {
393 off = RTE_ALIGN_CEIL(off, sizeof(double));
394 size = sizeof(*src.rss->key) * src.rss->key_len;
396 dst.rss->key = memcpy
397 ((void *)((uintptr_t)dst.rss + off),
401 if (src.rss->queue_num) {
402 off = RTE_ALIGN_CEIL(off, sizeof(double));
403 size = sizeof(*src.rss->queue) * src.rss->queue_num;
405 dst.rss->queue = memcpy
406 ((void *)((uintptr_t)dst.rss + off),
407 src.rss->queue, size);
413 size = rte_flow_desc_action[action->type].size;
415 memcpy(buf, action->conf, size);
419 return RTE_ALIGN_CEIL(size, sizeof(double));
422 /** Store a full rte_flow description. */
424 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
425 const struct rte_flow_attr *attr,
426 const struct rte_flow_item *items,
427 const struct rte_flow_action *actions)
429 struct rte_flow_desc *fd = NULL;
437 const struct rte_flow_item *item;
441 fd->items = (void *)&fd->data[off1];
443 struct rte_flow_item *dst = NULL;
445 if ((size_t)item->type >=
446 RTE_DIM(rte_flow_desc_item) ||
447 !rte_flow_desc_item[item->type].name) {
452 dst = memcpy(fd->data + off1, item,
454 off1 += sizeof(*item);
457 dst->spec = fd->data + off2;
458 off2 += flow_item_spec_copy
459 (fd ? fd->data + off2 : NULL, item,
464 dst->last = fd->data + off2;
465 off2 += flow_item_spec_copy
466 (fd ? fd->data + off2 : NULL, item,
471 dst->mask = fd->data + off2;
472 off2 += flow_item_spec_copy
473 (fd ? fd->data + off2 : NULL, item,
476 off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
477 } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END);
478 off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
481 const struct rte_flow_action *action;
485 fd->actions = (void *)&fd->data[off1];
487 struct rte_flow_action *dst = NULL;
489 if ((size_t)action->type >=
490 RTE_DIM(rte_flow_desc_action) ||
491 !rte_flow_desc_action[action->type].name) {
496 dst = memcpy(fd->data + off1, action,
498 off1 += sizeof(*action);
501 dst->conf = fd->data + off2;
502 off2 += flow_action_conf_copy
503 (fd ? fd->data + off2 : NULL, action);
505 off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
506 } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END);
510 off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
511 tmp = RTE_ALIGN_CEIL(offsetof(struct rte_flow_desc, data),
513 size = tmp + off1 + off2;
518 *fd = (const struct rte_flow_desc) {
522 tmp -= offsetof(struct rte_flow_desc, data);
531 * Expand RSS flows into several possible flows according to the RSS hash
532 * fields requested and the driver capabilities.
534 int __rte_experimental
535 rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size,
536 const struct rte_flow_item *pattern, uint64_t types,
537 const struct rte_flow_expand_node graph[],
538 int graph_root_index)
541 const struct rte_flow_item *item;
542 const struct rte_flow_expand_node *node = &graph[graph_root_index];
543 const int *next_node;
544 const int *stack[elt_n];
546 struct rte_flow_item flow_items[elt_n];
549 size_t user_pattern_size = 0;
552 lsize = offsetof(struct rte_flow_expand_rss, entry) +
553 elt_n * sizeof(buf->entry[0]);
555 buf->entry[0].priority = 0;
556 buf->entry[0].pattern = (void *)&buf->entry[elt_n];
558 addr = buf->entry[0].pattern;
560 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
561 const struct rte_flow_expand_node *next = NULL;
563 for (i = 0; node->next && node->next[i]; ++i) {
564 next = &graph[node->next[i]];
565 if (next->type == item->type)
570 user_pattern_size += sizeof(*item);
572 user_pattern_size += sizeof(*item); /* Handle END item. */
573 lsize += user_pattern_size;
574 /* Copy the user pattern in the first entry of the buffer. */
576 rte_memcpy(addr, pattern, user_pattern_size);
577 addr = (void *)(((uintptr_t)addr) + user_pattern_size);
580 /* Start expanding. */
581 memset(flow_items, 0, sizeof(flow_items));
582 user_pattern_size -= sizeof(*item);
583 next_node = node->next;
584 stack[stack_pos] = next_node;
585 node = next_node ? &graph[*next_node] : NULL;
587 flow_items[stack_pos].type = node->type;
588 if (node->rss_types & types) {
590 * compute the number of items to copy from the
591 * expansion and copy it.
592 * When the stack_pos is 0, there are 1 element in it,
593 * plus the addition END item.
595 int elt = stack_pos + 2;
597 flow_items[stack_pos + 1].type = RTE_FLOW_ITEM_TYPE_END;
598 lsize += elt * sizeof(*item) + user_pattern_size;
600 size_t n = elt * sizeof(*item);
602 buf->entry[buf->entries].priority =
604 buf->entry[buf->entries].pattern = addr;
606 rte_memcpy(addr, buf->entry[0].pattern,
608 addr = (void *)(((uintptr_t)addr) +
610 rte_memcpy(addr, flow_items, n);
611 addr = (void *)(((uintptr_t)addr) + n);
616 next_node = node->next;
617 if (stack_pos++ == elt_n) {
621 stack[stack_pos] = next_node;
622 } else if (*(next_node + 1)) {
623 /* Follow up with the next possibility. */
626 /* Move to the next path. */
628 next_node = stack[--stack_pos];
630 stack[stack_pos] = next_node;
632 node = *next_node ? &graph[*next_node] : NULL;