1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
11 #include <rte_common.h>
12 #include <rte_errno.h>
13 #include <rte_branch_prediction.h>
14 #include "rte_ethdev.h"
15 #include "rte_flow_driver.h"
19 * Flow elements description tables.
21 struct rte_flow_desc_data {
26 /** Generate flow_item[] entry. */
27 #define MK_FLOW_ITEM(t, s) \
28 [RTE_FLOW_ITEM_TYPE_ ## t] = { \
33 /** Information about known flow pattern items. */
34 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
36 MK_FLOW_ITEM(VOID, 0),
37 MK_FLOW_ITEM(INVERT, 0),
38 MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
40 MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
41 MK_FLOW_ITEM(PHY_PORT, sizeof(struct rte_flow_item_phy_port)),
42 MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
43 MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
44 MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
45 MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
46 MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
47 MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
48 MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
49 MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
50 MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
51 MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
52 MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
53 MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
54 MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
55 MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
56 MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
57 MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
58 MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
59 MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
60 MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
61 MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
62 MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
63 MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
64 MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
65 MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
66 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
67 MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
68 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
71 /** Generate flow_action[] entry. */
72 #define MK_FLOW_ACTION(t, s) \
73 [RTE_FLOW_ACTION_TYPE_ ## t] = { \
78 /** Information about known flow actions. */
79 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
80 MK_FLOW_ACTION(END, 0),
81 MK_FLOW_ACTION(VOID, 0),
82 MK_FLOW_ACTION(PASSTHRU, 0),
83 MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
84 MK_FLOW_ACTION(FLAG, 0),
85 MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
86 MK_FLOW_ACTION(DROP, 0),
87 MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
88 MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
89 MK_FLOW_ACTION(PF, 0),
90 MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
91 MK_FLOW_ACTION(PHY_PORT, sizeof(struct rte_flow_action_phy_port)),
92 MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
93 MK_FLOW_ACTION(OF_SET_MPLS_TTL,
94 sizeof(struct rte_flow_action_of_set_mpls_ttl)),
95 MK_FLOW_ACTION(OF_DEC_MPLS_TTL, 0),
96 MK_FLOW_ACTION(OF_SET_NW_TTL,
97 sizeof(struct rte_flow_action_of_set_nw_ttl)),
98 MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
99 MK_FLOW_ACTION(OF_COPY_TTL_OUT, 0),
100 MK_FLOW_ACTION(OF_COPY_TTL_IN, 0),
101 MK_FLOW_ACTION(OF_POP_VLAN, 0),
102 MK_FLOW_ACTION(OF_PUSH_VLAN,
103 sizeof(struct rte_flow_action_of_push_vlan)),
104 MK_FLOW_ACTION(OF_SET_VLAN_VID,
105 sizeof(struct rte_flow_action_of_set_vlan_vid)),
106 MK_FLOW_ACTION(OF_SET_VLAN_PCP,
107 sizeof(struct rte_flow_action_of_set_vlan_pcp)),
108 MK_FLOW_ACTION(OF_POP_MPLS,
109 sizeof(struct rte_flow_action_of_pop_mpls)),
110 MK_FLOW_ACTION(OF_PUSH_MPLS,
111 sizeof(struct rte_flow_action_of_push_mpls)),
115 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
119 if (rte_eth_dev_is_removed(port_id))
120 return rte_flow_error_set(error, EIO,
121 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
122 NULL, rte_strerror(EIO));
126 /* Get generic flow operations structure from a port. */
127 const struct rte_flow_ops *
128 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
130 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
131 const struct rte_flow_ops *ops;
134 if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
136 else if (unlikely(!dev->dev_ops->filter_ctrl ||
137 dev->dev_ops->filter_ctrl(dev,
138 RTE_ETH_FILTER_GENERIC,
145 rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
146 NULL, rte_strerror(code));
150 /* Check whether a flow rule can be created on a given port. */
152 rte_flow_validate(uint16_t port_id,
153 const struct rte_flow_attr *attr,
154 const struct rte_flow_item pattern[],
155 const struct rte_flow_action actions[],
156 struct rte_flow_error *error)
158 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
159 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
163 if (likely(!!ops->validate))
164 return flow_err(port_id, ops->validate(dev, attr, pattern,
165 actions, error), error);
166 return rte_flow_error_set(error, ENOSYS,
167 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
168 NULL, rte_strerror(ENOSYS));
171 /* Create a flow rule on a given port. */
173 rte_flow_create(uint16_t port_id,
174 const struct rte_flow_attr *attr,
175 const struct rte_flow_item pattern[],
176 const struct rte_flow_action actions[],
177 struct rte_flow_error *error)
179 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
180 struct rte_flow *flow;
181 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
185 if (likely(!!ops->create)) {
186 flow = ops->create(dev, attr, pattern, actions, error);
188 flow_err(port_id, -rte_errno, error);
191 rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
192 NULL, rte_strerror(ENOSYS));
196 /* Destroy a flow rule on a given port. */
198 rte_flow_destroy(uint16_t port_id,
199 struct rte_flow *flow,
200 struct rte_flow_error *error)
202 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
203 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
207 if (likely(!!ops->destroy))
208 return flow_err(port_id, ops->destroy(dev, flow, error),
210 return rte_flow_error_set(error, ENOSYS,
211 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
212 NULL, rte_strerror(ENOSYS));
215 /* Destroy all flow rules associated with a port. */
217 rte_flow_flush(uint16_t port_id,
218 struct rte_flow_error *error)
220 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
221 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
225 if (likely(!!ops->flush))
226 return flow_err(port_id, ops->flush(dev, error), error);
227 return rte_flow_error_set(error, ENOSYS,
228 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
229 NULL, rte_strerror(ENOSYS));
232 /* Query an existing flow rule. */
234 rte_flow_query(uint16_t port_id,
235 struct rte_flow *flow,
236 const struct rte_flow_action *action,
238 struct rte_flow_error *error)
240 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
241 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
245 if (likely(!!ops->query))
246 return flow_err(port_id, ops->query(dev, flow, action, data,
248 return rte_flow_error_set(error, ENOSYS,
249 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
250 NULL, rte_strerror(ENOSYS));
253 /* Restrict ingress traffic to the defined flow rules. */
255 rte_flow_isolate(uint16_t port_id,
257 struct rte_flow_error *error)
259 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
260 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
264 if (likely(!!ops->isolate))
265 return flow_err(port_id, ops->isolate(dev, set, error), error);
266 return rte_flow_error_set(error, ENOSYS,
267 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
268 NULL, rte_strerror(ENOSYS));
271 /* Initialize flow error structure. */
273 rte_flow_error_set(struct rte_flow_error *error,
275 enum rte_flow_error_type type,
280 *error = (struct rte_flow_error){
290 /** Pattern item specification types. */
291 enum rte_flow_conv_item_spec_type {
292 RTE_FLOW_CONV_ITEM_SPEC,
293 RTE_FLOW_CONV_ITEM_LAST,
294 RTE_FLOW_CONV_ITEM_MASK,
298 * Copy pattern item specification.
301 * Output buffer. Can be NULL if @p size is zero.
303 * Size of @p buf in bytes.
305 * Pattern item to copy specification from.
307 * Specification selector for either @p spec, @p last or @p mask.
310 * Number of bytes needed to store pattern item specification regardless
311 * of @p size. @p buf contents are truncated to @p size if not large
315 rte_flow_conv_item_spec(void *buf, const size_t size,
316 const struct rte_flow_item *item,
317 enum rte_flow_conv_item_spec_type type)
321 type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec :
322 type == RTE_FLOW_CONV_ITEM_LAST ? item->last :
323 type == RTE_FLOW_CONV_ITEM_MASK ? item->mask :
326 switch (item->type) {
328 const struct rte_flow_item_raw *raw;
331 const struct rte_flow_item_raw *raw;
334 const struct rte_flow_item_raw *raw;
337 const struct rte_flow_item_raw *raw;
340 struct rte_flow_item_raw *raw;
344 case RTE_FLOW_ITEM_TYPE_RAW:
345 spec.raw = item->spec;
346 last.raw = item->last ? item->last : item->spec;
347 mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
351 (&(struct rte_flow_item_raw){
352 .relative = src.raw->relative,
353 .search = src.raw->search,
354 .reserved = src.raw->reserved,
355 .offset = src.raw->offset,
356 .limit = src.raw->limit,
357 .length = src.raw->length,
359 size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size);
360 off = sizeof(*dst.raw);
361 if (type == RTE_FLOW_CONV_ITEM_SPEC ||
362 (type == RTE_FLOW_CONV_ITEM_MASK &&
363 ((spec.raw->length & mask.raw->length) >=
364 (last.raw->length & mask.raw->length))))
365 tmp = spec.raw->length & mask.raw->length;
367 tmp = last.raw->length & mask.raw->length;
369 off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern));
370 if (size >= off + tmp)
371 dst.raw->pattern = rte_memcpy
372 ((void *)((uintptr_t)dst.raw + off),
373 src.raw->pattern, tmp);
378 off = rte_flow_desc_item[item->type].size;
379 rte_memcpy(buf, data, (size > off ? off : size));
386 * Copy action configuration.
389 * Output buffer. Can be NULL if @p size is zero.
391 * Size of @p buf in bytes.
393 * Action to copy configuration from.
396 * Number of bytes needed to store pattern item specification regardless
397 * of @p size. @p buf contents are truncated to @p size if not large
401 rte_flow_conv_action_conf(void *buf, const size_t size,
402 const struct rte_flow_action *action)
406 switch (action->type) {
408 const struct rte_flow_action_rss *rss;
411 struct rte_flow_action_rss *rss;
415 case RTE_FLOW_ACTION_TYPE_RSS:
416 src.rss = action->conf;
419 (&(struct rte_flow_action_rss){
420 .func = src.rss->func,
421 .level = src.rss->level,
422 .types = src.rss->types,
423 .key_len = src.rss->key_len,
424 .queue_num = src.rss->queue_num,
426 size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size);
427 off = sizeof(*dst.rss);
428 if (src.rss->key_len) {
429 off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key));
430 tmp = sizeof(*src.rss->key) * src.rss->key_len;
431 if (size >= off + tmp)
432 dst.rss->key = rte_memcpy
433 ((void *)((uintptr_t)dst.rss + off),
437 if (src.rss->queue_num) {
438 off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue));
439 tmp = sizeof(*src.rss->queue) * src.rss->queue_num;
440 if (size >= off + tmp)
441 dst.rss->queue = rte_memcpy
442 ((void *)((uintptr_t)dst.rss + off),
443 src.rss->queue, tmp);
448 off = rte_flow_desc_action[action->type].size;
449 rte_memcpy(buf, action->conf, (size > off ? off : size));
456 * Copy a list of pattern items.
459 * Destination buffer. Can be NULL if @p size is zero.
461 * Size of @p dst in bytes.
463 * Source pattern items.
465 * Maximum number of pattern items to process from @p src or 0 to process
466 * the entire list. In both cases, processing stops after
467 * RTE_FLOW_ITEM_TYPE_END is encountered.
469 * Perform verbose error reporting if not NULL.
472 * A positive value representing the number of bytes needed to store
473 * pattern items regardless of @p size on success (@p buf contents are
474 * truncated to @p size if not large enough), a negative errno value
475 * otherwise and rte_errno is set.
478 rte_flow_conv_pattern(struct rte_flow_item *dst,
480 const struct rte_flow_item *src,
482 struct rte_flow_error *error)
484 uintptr_t data = (uintptr_t)dst;
489 for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
490 if ((size_t)src->type >= RTE_DIM(rte_flow_desc_item) ||
491 !rte_flow_desc_item[src->type].name)
492 return rte_flow_error_set
493 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src,
494 "cannot convert unknown item type");
495 if (size >= off + sizeof(*dst))
496 *dst = (struct rte_flow_item){
508 off = RTE_ALIGN_CEIL(off, sizeof(double));
509 ret = rte_flow_conv_item_spec
510 ((void *)(data + off),
511 size > off ? size - off : 0, src,
512 RTE_FLOW_CONV_ITEM_SPEC);
513 if (size && size >= off + ret)
514 dst->spec = (void *)(data + off);
519 off = RTE_ALIGN_CEIL(off, sizeof(double));
520 ret = rte_flow_conv_item_spec
521 ((void *)(data + off),
522 size > off ? size - off : 0, src,
523 RTE_FLOW_CONV_ITEM_LAST);
524 if (size && size >= off + ret)
525 dst->last = (void *)(data + off);
529 off = RTE_ALIGN_CEIL(off, sizeof(double));
530 ret = rte_flow_conv_item_spec
531 ((void *)(data + off),
532 size > off ? size - off : 0, src,
533 RTE_FLOW_CONV_ITEM_MASK);
534 if (size && size >= off + ret)
535 dst->mask = (void *)(data + off);
545 * Copy a list of actions.
548 * Destination buffer. Can be NULL if @p size is zero.
550 * Size of @p dst in bytes.
554 * Maximum number of actions to process from @p src or 0 to process the
555 * entire list. In both cases, processing stops after
556 * RTE_FLOW_ACTION_TYPE_END is encountered.
558 * Perform verbose error reporting if not NULL.
561 * A positive value representing the number of bytes needed to store
562 * actions regardless of @p size on success (@p buf contents are truncated
563 * to @p size if not large enough), a negative errno value otherwise and
567 rte_flow_conv_actions(struct rte_flow_action *dst,
569 const struct rte_flow_action *src,
571 struct rte_flow_error *error)
573 uintptr_t data = (uintptr_t)dst;
578 for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
579 if ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) ||
580 !rte_flow_desc_action[src->type].name)
581 return rte_flow_error_set
582 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
583 src, "cannot convert unknown action type");
584 if (size >= off + sizeof(*dst))
585 *dst = (struct rte_flow_action){
597 off = RTE_ALIGN_CEIL(off, sizeof(double));
598 ret = rte_flow_conv_action_conf
599 ((void *)(data + off),
600 size > off ? size - off : 0, src);
601 if (size && size >= off + ret)
602 dst->conf = (void *)(data + off);
612 * Copy flow rule components.
614 * This comprises the flow rule descriptor itself, attributes, pattern and
615 * actions list. NULL components in @p src are skipped.
618 * Destination buffer. Can be NULL if @p size is zero.
620 * Size of @p dst in bytes.
622 * Source flow rule descriptor.
624 * Perform verbose error reporting if not NULL.
627 * A positive value representing the number of bytes needed to store all
628 * components including the descriptor regardless of @p size on success
629 * (@p buf contents are truncated to @p size if not large enough), a
630 * negative errno value otherwise and rte_errno is set.
633 rte_flow_conv_rule(struct rte_flow_conv_rule *dst,
635 const struct rte_flow_conv_rule *src,
636 struct rte_flow_error *error)
642 (&(struct rte_flow_conv_rule){
647 size > sizeof(*dst) ? sizeof(*dst) : size);
650 off = RTE_ALIGN_CEIL(off, sizeof(double));
651 if (size && size >= off + sizeof(*dst->attr))
652 dst->attr = rte_memcpy
653 ((void *)((uintptr_t)dst + off),
654 src->attr_ro, sizeof(*dst->attr));
655 off += sizeof(*dst->attr);
657 if (src->pattern_ro) {
658 off = RTE_ALIGN_CEIL(off, sizeof(double));
659 ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off),
660 size > off ? size - off : 0,
661 src->pattern_ro, 0, error);
664 if (size && size >= off + (size_t)ret)
665 dst->pattern = (void *)((uintptr_t)dst + off);
668 if (src->actions_ro) {
669 off = RTE_ALIGN_CEIL(off, sizeof(double));
670 ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off),
671 size > off ? size - off : 0,
672 src->actions_ro, 0, error);
675 if (size >= off + (size_t)ret)
676 dst->actions = (void *)((uintptr_t)dst + off);
682 /** Helper function to convert flow API objects. */
684 rte_flow_conv(enum rte_flow_conv_op op,
688 struct rte_flow_error *error)
691 const struct rte_flow_attr *attr;
693 case RTE_FLOW_CONV_OP_NONE:
695 case RTE_FLOW_CONV_OP_ATTR:
697 if (size > sizeof(*attr))
698 size = sizeof(*attr);
699 rte_memcpy(dst, attr, size);
700 return sizeof(*attr);
701 case RTE_FLOW_CONV_OP_ITEM:
702 return rte_flow_conv_pattern(dst, size, src, 1, error);
703 case RTE_FLOW_CONV_OP_ACTION:
704 return rte_flow_conv_actions(dst, size, src, 1, error);
705 case RTE_FLOW_CONV_OP_PATTERN:
706 return rte_flow_conv_pattern(dst, size, src, 0, error);
707 case RTE_FLOW_CONV_OP_ACTIONS:
708 return rte_flow_conv_actions(dst, size, src, 0, error);
709 case RTE_FLOW_CONV_OP_RULE:
710 return rte_flow_conv_rule(dst, size, src, error);
712 return rte_flow_error_set
713 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
714 "unknown object conversion operation");
717 /** Store a full rte_flow description. */
719 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
720 const struct rte_flow_attr *attr,
721 const struct rte_flow_item *items,
722 const struct rte_flow_action *actions)
725 * Overlap struct rte_flow_conv with struct rte_flow_desc in order
726 * to convert the former to the latter without wasting space.
728 struct rte_flow_conv_rule *dst =
730 (void *)((uintptr_t)desc +
731 (offsetof(struct rte_flow_desc, actions) -
732 offsetof(struct rte_flow_conv_rule, actions))) :
735 len > sizeof(*desc) - sizeof(*dst) ?
736 len - (sizeof(*desc) - sizeof(*dst)) :
738 struct rte_flow_conv_rule src = {
741 .actions_ro = actions,
745 RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) <
746 sizeof(struct rte_flow_conv_rule));
748 (&dst->pattern != &desc->items ||
749 &dst->actions != &desc->actions ||
750 (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) {
754 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL);
757 ret += sizeof(*desc) - sizeof(*dst);
759 (&(struct rte_flow_desc){
762 .items = dst_size ? dst->pattern : NULL,
763 .actions = dst_size ? dst->actions : NULL,
765 len > sizeof(*desc) ? sizeof(*desc) : len);
770 * Expand RSS flows into several possible flows according to the RSS hash
771 * fields requested and the driver capabilities.
773 int __rte_experimental
774 rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size,
775 const struct rte_flow_item *pattern, uint64_t types,
776 const struct rte_flow_expand_node graph[],
777 int graph_root_index)
780 const struct rte_flow_item *item;
781 const struct rte_flow_expand_node *node = &graph[graph_root_index];
782 const int *next_node;
783 const int *stack[elt_n];
785 struct rte_flow_item flow_items[elt_n];
788 size_t user_pattern_size = 0;
791 lsize = offsetof(struct rte_flow_expand_rss, entry) +
792 elt_n * sizeof(buf->entry[0]);
794 buf->entry[0].priority = 0;
795 buf->entry[0].pattern = (void *)&buf->entry[elt_n];
797 addr = buf->entry[0].pattern;
799 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
800 const struct rte_flow_expand_node *next = NULL;
802 for (i = 0; node->next && node->next[i]; ++i) {
803 next = &graph[node->next[i]];
804 if (next->type == item->type)
809 user_pattern_size += sizeof(*item);
811 user_pattern_size += sizeof(*item); /* Handle END item. */
812 lsize += user_pattern_size;
813 /* Copy the user pattern in the first entry of the buffer. */
815 rte_memcpy(addr, pattern, user_pattern_size);
816 addr = (void *)(((uintptr_t)addr) + user_pattern_size);
819 /* Start expanding. */
820 memset(flow_items, 0, sizeof(flow_items));
821 user_pattern_size -= sizeof(*item);
822 next_node = node->next;
823 stack[stack_pos] = next_node;
824 node = next_node ? &graph[*next_node] : NULL;
826 flow_items[stack_pos].type = node->type;
827 if (node->rss_types & types) {
829 * compute the number of items to copy from the
830 * expansion and copy it.
831 * When the stack_pos is 0, there are 1 element in it,
832 * plus the addition END item.
834 int elt = stack_pos + 2;
836 flow_items[stack_pos + 1].type = RTE_FLOW_ITEM_TYPE_END;
837 lsize += elt * sizeof(*item) + user_pattern_size;
839 size_t n = elt * sizeof(*item);
841 buf->entry[buf->entries].priority =
843 buf->entry[buf->entries].pattern = addr;
845 rte_memcpy(addr, buf->entry[0].pattern,
847 addr = (void *)(((uintptr_t)addr) +
849 rte_memcpy(addr, flow_items, n);
850 addr = (void *)(((uintptr_t)addr) + n);
855 next_node = node->next;
856 if (stack_pos++ == elt_n) {
860 stack[stack_pos] = next_node;
861 } else if (*(next_node + 1)) {
862 /* Follow up with the next possibility. */
865 /* Move to the next path. */
867 next_node = stack[--stack_pos];
869 stack[stack_pos] = next_node;
871 node = *next_node ? &graph[*next_node] : NULL;