1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
11 #include <rte_common.h>
12 #include <rte_errno.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_string_fns.h>
15 #include "rte_ethdev.h"
16 #include "rte_flow_driver.h"
20 * Flow elements description tables.
22 struct rte_flow_desc_data {
27 /** Generate flow_item[] entry. */
28 #define MK_FLOW_ITEM(t, s) \
29 [RTE_FLOW_ITEM_TYPE_ ## t] = { \
34 /** Information about known flow pattern items. */
35 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
37 MK_FLOW_ITEM(VOID, 0),
38 MK_FLOW_ITEM(INVERT, 0),
39 MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
41 MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
42 MK_FLOW_ITEM(PHY_PORT, sizeof(struct rte_flow_item_phy_port)),
43 MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
44 MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
45 MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
46 MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
47 MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
48 MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
49 MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
50 MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
51 MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
52 MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
53 MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
54 MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
55 MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
56 MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
57 MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
58 MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
59 MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
60 MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
61 MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
62 MK_FLOW_ITEM(ESP, sizeof(struct rte_flow_item_esp)),
63 MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
64 MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
65 MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
66 MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
67 MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
68 MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
69 MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
70 MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
71 MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
72 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
73 MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
74 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
75 MK_FLOW_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
78 /** Generate flow_action[] entry. */
79 #define MK_FLOW_ACTION(t, s) \
80 [RTE_FLOW_ACTION_TYPE_ ## t] = { \
85 /** Information about known flow actions. */
86 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
87 MK_FLOW_ACTION(END, 0),
88 MK_FLOW_ACTION(VOID, 0),
89 MK_FLOW_ACTION(PASSTHRU, 0),
90 MK_FLOW_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
91 MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
92 MK_FLOW_ACTION(FLAG, 0),
93 MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
94 MK_FLOW_ACTION(DROP, 0),
95 MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
96 MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
97 MK_FLOW_ACTION(PF, 0),
98 MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
99 MK_FLOW_ACTION(PHY_PORT, sizeof(struct rte_flow_action_phy_port)),
100 MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
101 MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)),
102 MK_FLOW_ACTION(SECURITY, sizeof(struct rte_flow_action_security)),
103 MK_FLOW_ACTION(OF_SET_MPLS_TTL,
104 sizeof(struct rte_flow_action_of_set_mpls_ttl)),
105 MK_FLOW_ACTION(OF_DEC_MPLS_TTL, 0),
106 MK_FLOW_ACTION(OF_SET_NW_TTL,
107 sizeof(struct rte_flow_action_of_set_nw_ttl)),
108 MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
109 MK_FLOW_ACTION(OF_COPY_TTL_OUT, 0),
110 MK_FLOW_ACTION(OF_COPY_TTL_IN, 0),
111 MK_FLOW_ACTION(OF_POP_VLAN, 0),
112 MK_FLOW_ACTION(OF_PUSH_VLAN,
113 sizeof(struct rte_flow_action_of_push_vlan)),
114 MK_FLOW_ACTION(OF_SET_VLAN_VID,
115 sizeof(struct rte_flow_action_of_set_vlan_vid)),
116 MK_FLOW_ACTION(OF_SET_VLAN_PCP,
117 sizeof(struct rte_flow_action_of_set_vlan_pcp)),
118 MK_FLOW_ACTION(OF_POP_MPLS,
119 sizeof(struct rte_flow_action_of_pop_mpls)),
120 MK_FLOW_ACTION(OF_PUSH_MPLS,
121 sizeof(struct rte_flow_action_of_push_mpls)),
122 MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
123 MK_FLOW_ACTION(VXLAN_DECAP, 0),
124 MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
125 MK_FLOW_ACTION(NVGRE_DECAP, 0),
126 MK_FLOW_ACTION(SET_IPV4_SRC,
127 sizeof(struct rte_flow_action_set_ipv4)),
128 MK_FLOW_ACTION(SET_IPV4_DST,
129 sizeof(struct rte_flow_action_set_ipv4)),
130 MK_FLOW_ACTION(SET_IPV6_SRC,
131 sizeof(struct rte_flow_action_set_ipv6)),
132 MK_FLOW_ACTION(SET_IPV6_DST,
133 sizeof(struct rte_flow_action_set_ipv6)),
134 MK_FLOW_ACTION(SET_TP_SRC,
135 sizeof(struct rte_flow_action_set_tp)),
136 MK_FLOW_ACTION(SET_TP_DST,
137 sizeof(struct rte_flow_action_set_tp)),
138 MK_FLOW_ACTION(MAC_SWAP, 0),
142 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
146 if (rte_eth_dev_is_removed(port_id))
147 return rte_flow_error_set(error, EIO,
148 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
149 NULL, rte_strerror(EIO));
153 /* Get generic flow operations structure from a port. */
154 const struct rte_flow_ops *
155 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
157 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
158 const struct rte_flow_ops *ops;
161 if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
163 else if (unlikely(!dev->dev_ops->filter_ctrl ||
164 dev->dev_ops->filter_ctrl(dev,
165 RTE_ETH_FILTER_GENERIC,
172 rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
173 NULL, rte_strerror(code));
177 /* Check whether a flow rule can be created on a given port. */
179 rte_flow_validate(uint16_t port_id,
180 const struct rte_flow_attr *attr,
181 const struct rte_flow_item pattern[],
182 const struct rte_flow_action actions[],
183 struct rte_flow_error *error)
185 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
186 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
190 if (likely(!!ops->validate))
191 return flow_err(port_id, ops->validate(dev, attr, pattern,
192 actions, error), error);
193 return rte_flow_error_set(error, ENOSYS,
194 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
195 NULL, rte_strerror(ENOSYS));
198 /* Create a flow rule on a given port. */
200 rte_flow_create(uint16_t port_id,
201 const struct rte_flow_attr *attr,
202 const struct rte_flow_item pattern[],
203 const struct rte_flow_action actions[],
204 struct rte_flow_error *error)
206 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
207 struct rte_flow *flow;
208 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
212 if (likely(!!ops->create)) {
213 flow = ops->create(dev, attr, pattern, actions, error);
215 flow_err(port_id, -rte_errno, error);
218 rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
219 NULL, rte_strerror(ENOSYS));
223 /* Destroy a flow rule on a given port. */
225 rte_flow_destroy(uint16_t port_id,
226 struct rte_flow *flow,
227 struct rte_flow_error *error)
229 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
230 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
234 if (likely(!!ops->destroy))
235 return flow_err(port_id, ops->destroy(dev, flow, error),
237 return rte_flow_error_set(error, ENOSYS,
238 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
239 NULL, rte_strerror(ENOSYS));
242 /* Destroy all flow rules associated with a port. */
244 rte_flow_flush(uint16_t port_id,
245 struct rte_flow_error *error)
247 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
248 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
252 if (likely(!!ops->flush))
253 return flow_err(port_id, ops->flush(dev, error), error);
254 return rte_flow_error_set(error, ENOSYS,
255 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
256 NULL, rte_strerror(ENOSYS));
259 /* Query an existing flow rule. */
261 rte_flow_query(uint16_t port_id,
262 struct rte_flow *flow,
263 const struct rte_flow_action *action,
265 struct rte_flow_error *error)
267 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
268 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
272 if (likely(!!ops->query))
273 return flow_err(port_id, ops->query(dev, flow, action, data,
275 return rte_flow_error_set(error, ENOSYS,
276 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
277 NULL, rte_strerror(ENOSYS));
280 /* Restrict ingress traffic to the defined flow rules. */
282 rte_flow_isolate(uint16_t port_id,
284 struct rte_flow_error *error)
286 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
287 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
291 if (likely(!!ops->isolate))
292 return flow_err(port_id, ops->isolate(dev, set, error), error);
293 return rte_flow_error_set(error, ENOSYS,
294 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
295 NULL, rte_strerror(ENOSYS));
298 /* Initialize flow error structure. */
300 rte_flow_error_set(struct rte_flow_error *error,
302 enum rte_flow_error_type type,
307 *error = (struct rte_flow_error){
317 /** Pattern item specification types. */
318 enum rte_flow_conv_item_spec_type {
319 RTE_FLOW_CONV_ITEM_SPEC,
320 RTE_FLOW_CONV_ITEM_LAST,
321 RTE_FLOW_CONV_ITEM_MASK,
325 * Copy pattern item specification.
328 * Output buffer. Can be NULL if @p size is zero.
330 * Size of @p buf in bytes.
332 * Pattern item to copy specification from.
334 * Specification selector for either @p spec, @p last or @p mask.
337 * Number of bytes needed to store pattern item specification regardless
338 * of @p size. @p buf contents are truncated to @p size if not large
342 rte_flow_conv_item_spec(void *buf, const size_t size,
343 const struct rte_flow_item *item,
344 enum rte_flow_conv_item_spec_type type)
348 type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec :
349 type == RTE_FLOW_CONV_ITEM_LAST ? item->last :
350 type == RTE_FLOW_CONV_ITEM_MASK ? item->mask :
353 switch (item->type) {
355 const struct rte_flow_item_raw *raw;
358 const struct rte_flow_item_raw *raw;
361 const struct rte_flow_item_raw *raw;
364 const struct rte_flow_item_raw *raw;
367 struct rte_flow_item_raw *raw;
371 case RTE_FLOW_ITEM_TYPE_RAW:
372 spec.raw = item->spec;
373 last.raw = item->last ? item->last : item->spec;
374 mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
378 (&(struct rte_flow_item_raw){
379 .relative = src.raw->relative,
380 .search = src.raw->search,
381 .reserved = src.raw->reserved,
382 .offset = src.raw->offset,
383 .limit = src.raw->limit,
384 .length = src.raw->length,
386 size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size);
387 off = sizeof(*dst.raw);
388 if (type == RTE_FLOW_CONV_ITEM_SPEC ||
389 (type == RTE_FLOW_CONV_ITEM_MASK &&
390 ((spec.raw->length & mask.raw->length) >=
391 (last.raw->length & mask.raw->length))))
392 tmp = spec.raw->length & mask.raw->length;
394 tmp = last.raw->length & mask.raw->length;
396 off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern));
397 if (size >= off + tmp)
398 dst.raw->pattern = rte_memcpy
399 ((void *)((uintptr_t)dst.raw + off),
400 src.raw->pattern, tmp);
405 off = rte_flow_desc_item[item->type].size;
406 rte_memcpy(buf, data, (size > off ? off : size));
413 * Copy action configuration.
416 * Output buffer. Can be NULL if @p size is zero.
418 * Size of @p buf in bytes.
420 * Action to copy configuration from.
423 * Number of bytes needed to store pattern item specification regardless
424 * of @p size. @p buf contents are truncated to @p size if not large
428 rte_flow_conv_action_conf(void *buf, const size_t size,
429 const struct rte_flow_action *action)
433 switch (action->type) {
435 const struct rte_flow_action_rss *rss;
436 const struct rte_flow_action_vxlan_encap *vxlan_encap;
437 const struct rte_flow_action_nvgre_encap *nvgre_encap;
440 struct rte_flow_action_rss *rss;
441 struct rte_flow_action_vxlan_encap *vxlan_encap;
442 struct rte_flow_action_nvgre_encap *nvgre_encap;
447 case RTE_FLOW_ACTION_TYPE_RSS:
448 src.rss = action->conf;
451 (&(struct rte_flow_action_rss){
452 .func = src.rss->func,
453 .level = src.rss->level,
454 .types = src.rss->types,
455 .key_len = src.rss->key_len,
456 .queue_num = src.rss->queue_num,
458 size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size);
459 off = sizeof(*dst.rss);
460 if (src.rss->key_len) {
461 off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key));
462 tmp = sizeof(*src.rss->key) * src.rss->key_len;
463 if (size >= off + tmp)
464 dst.rss->key = rte_memcpy
465 ((void *)((uintptr_t)dst.rss + off),
469 if (src.rss->queue_num) {
470 off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue));
471 tmp = sizeof(*src.rss->queue) * src.rss->queue_num;
472 if (size >= off + tmp)
473 dst.rss->queue = rte_memcpy
474 ((void *)((uintptr_t)dst.rss + off),
475 src.rss->queue, tmp);
479 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
480 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
481 src.vxlan_encap = action->conf;
482 dst.vxlan_encap = buf;
483 RTE_BUILD_BUG_ON(sizeof(*src.vxlan_encap) !=
484 sizeof(*src.nvgre_encap) ||
485 offsetof(struct rte_flow_action_vxlan_encap,
487 offsetof(struct rte_flow_action_nvgre_encap,
489 off = sizeof(*dst.vxlan_encap);
490 if (src.vxlan_encap->definition) {
492 (off, sizeof(*dst.vxlan_encap->definition));
494 (RTE_FLOW_CONV_OP_PATTERN,
495 (void *)((uintptr_t)dst.vxlan_encap + off),
496 size > off ? size - off : 0,
497 src.vxlan_encap->definition, NULL);
500 if (size >= off + ret)
501 dst.vxlan_encap->definition =
502 (void *)((uintptr_t)dst.vxlan_encap +
508 off = rte_flow_desc_action[action->type].size;
509 rte_memcpy(buf, action->conf, (size > off ? off : size));
516 * Copy a list of pattern items.
519 * Destination buffer. Can be NULL if @p size is zero.
521 * Size of @p dst in bytes.
523 * Source pattern items.
525 * Maximum number of pattern items to process from @p src or 0 to process
526 * the entire list. In both cases, processing stops after
527 * RTE_FLOW_ITEM_TYPE_END is encountered.
529 * Perform verbose error reporting if not NULL.
532 * A positive value representing the number of bytes needed to store
533 * pattern items regardless of @p size on success (@p buf contents are
534 * truncated to @p size if not large enough), a negative errno value
535 * otherwise and rte_errno is set.
538 rte_flow_conv_pattern(struct rte_flow_item *dst,
540 const struct rte_flow_item *src,
542 struct rte_flow_error *error)
544 uintptr_t data = (uintptr_t)dst;
549 for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
550 if ((size_t)src->type >= RTE_DIM(rte_flow_desc_item) ||
551 !rte_flow_desc_item[src->type].name)
552 return rte_flow_error_set
553 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src,
554 "cannot convert unknown item type");
555 if (size >= off + sizeof(*dst))
556 *dst = (struct rte_flow_item){
568 off = RTE_ALIGN_CEIL(off, sizeof(double));
569 ret = rte_flow_conv_item_spec
570 ((void *)(data + off),
571 size > off ? size - off : 0, src,
572 RTE_FLOW_CONV_ITEM_SPEC);
573 if (size && size >= off + ret)
574 dst->spec = (void *)(data + off);
579 off = RTE_ALIGN_CEIL(off, sizeof(double));
580 ret = rte_flow_conv_item_spec
581 ((void *)(data + off),
582 size > off ? size - off : 0, src,
583 RTE_FLOW_CONV_ITEM_LAST);
584 if (size && size >= off + ret)
585 dst->last = (void *)(data + off);
589 off = RTE_ALIGN_CEIL(off, sizeof(double));
590 ret = rte_flow_conv_item_spec
591 ((void *)(data + off),
592 size > off ? size - off : 0, src,
593 RTE_FLOW_CONV_ITEM_MASK);
594 if (size && size >= off + ret)
595 dst->mask = (void *)(data + off);
605 * Copy a list of actions.
608 * Destination buffer. Can be NULL if @p size is zero.
610 * Size of @p dst in bytes.
614 * Maximum number of actions to process from @p src or 0 to process the
615 * entire list. In both cases, processing stops after
616 * RTE_FLOW_ACTION_TYPE_END is encountered.
618 * Perform verbose error reporting if not NULL.
621 * A positive value representing the number of bytes needed to store
622 * actions regardless of @p size on success (@p buf contents are truncated
623 * to @p size if not large enough), a negative errno value otherwise and
627 rte_flow_conv_actions(struct rte_flow_action *dst,
629 const struct rte_flow_action *src,
631 struct rte_flow_error *error)
633 uintptr_t data = (uintptr_t)dst;
638 for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
639 if ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) ||
640 !rte_flow_desc_action[src->type].name)
641 return rte_flow_error_set
642 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
643 src, "cannot convert unknown action type");
644 if (size >= off + sizeof(*dst))
645 *dst = (struct rte_flow_action){
657 off = RTE_ALIGN_CEIL(off, sizeof(double));
658 ret = rte_flow_conv_action_conf
659 ((void *)(data + off),
660 size > off ? size - off : 0, src);
661 if (size && size >= off + ret)
662 dst->conf = (void *)(data + off);
672 * Copy flow rule components.
674 * This comprises the flow rule descriptor itself, attributes, pattern and
675 * actions list. NULL components in @p src are skipped.
678 * Destination buffer. Can be NULL if @p size is zero.
680 * Size of @p dst in bytes.
682 * Source flow rule descriptor.
684 * Perform verbose error reporting if not NULL.
687 * A positive value representing the number of bytes needed to store all
688 * components including the descriptor regardless of @p size on success
689 * (@p buf contents are truncated to @p size if not large enough), a
690 * negative errno value otherwise and rte_errno is set.
693 rte_flow_conv_rule(struct rte_flow_conv_rule *dst,
695 const struct rte_flow_conv_rule *src,
696 struct rte_flow_error *error)
702 (&(struct rte_flow_conv_rule){
707 size > sizeof(*dst) ? sizeof(*dst) : size);
710 off = RTE_ALIGN_CEIL(off, sizeof(double));
711 if (size && size >= off + sizeof(*dst->attr))
712 dst->attr = rte_memcpy
713 ((void *)((uintptr_t)dst + off),
714 src->attr_ro, sizeof(*dst->attr));
715 off += sizeof(*dst->attr);
717 if (src->pattern_ro) {
718 off = RTE_ALIGN_CEIL(off, sizeof(double));
719 ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off),
720 size > off ? size - off : 0,
721 src->pattern_ro, 0, error);
724 if (size && size >= off + (size_t)ret)
725 dst->pattern = (void *)((uintptr_t)dst + off);
728 if (src->actions_ro) {
729 off = RTE_ALIGN_CEIL(off, sizeof(double));
730 ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off),
731 size > off ? size - off : 0,
732 src->actions_ro, 0, error);
735 if (size >= off + (size_t)ret)
736 dst->actions = (void *)((uintptr_t)dst + off);
743 * Retrieve the name of a pattern item/action type.
746 * Nonzero when @p src represents an action type instead of a pattern item
749 * Nonzero to write string address instead of contents into @p dst.
751 * Destination buffer. Can be NULL if @p size is zero.
753 * Size of @p dst in bytes.
755 * Depending on @p is_action, source pattern item or action type cast as a
758 * Perform verbose error reporting if not NULL.
761 * A positive value representing the number of bytes needed to store the
762 * name or its address regardless of @p size on success (@p buf contents
763 * are truncated to @p size if not large enough), a negative errno value
764 * otherwise and rte_errno is set.
767 rte_flow_conv_name(int is_action,
772 struct rte_flow_error *error)
775 const struct rte_flow_desc_data *data;
778 static const struct desc_info info_rep[2] = {
779 { rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), },
780 { rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), },
782 const struct desc_info *const info = &info_rep[!!is_action];
783 unsigned int type = (uintptr_t)src;
785 if (type >= info->num)
786 return rte_flow_error_set
787 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
788 "unknown object type to retrieve the name of");
790 return strlcpy(dst, info->data[type].name, size);
791 if (size >= sizeof(const char **))
792 *((const char **)dst) = info->data[type].name;
793 return sizeof(const char **);
796 /** Helper function to convert flow API objects. */
798 rte_flow_conv(enum rte_flow_conv_op op,
802 struct rte_flow_error *error)
805 const struct rte_flow_attr *attr;
807 case RTE_FLOW_CONV_OP_NONE:
809 case RTE_FLOW_CONV_OP_ATTR:
811 if (size > sizeof(*attr))
812 size = sizeof(*attr);
813 rte_memcpy(dst, attr, size);
814 return sizeof(*attr);
815 case RTE_FLOW_CONV_OP_ITEM:
816 return rte_flow_conv_pattern(dst, size, src, 1, error);
817 case RTE_FLOW_CONV_OP_ACTION:
818 return rte_flow_conv_actions(dst, size, src, 1, error);
819 case RTE_FLOW_CONV_OP_PATTERN:
820 return rte_flow_conv_pattern(dst, size, src, 0, error);
821 case RTE_FLOW_CONV_OP_ACTIONS:
822 return rte_flow_conv_actions(dst, size, src, 0, error);
823 case RTE_FLOW_CONV_OP_RULE:
824 return rte_flow_conv_rule(dst, size, src, error);
825 case RTE_FLOW_CONV_OP_ITEM_NAME:
826 return rte_flow_conv_name(0, 0, dst, size, src, error);
827 case RTE_FLOW_CONV_OP_ACTION_NAME:
828 return rte_flow_conv_name(1, 0, dst, size, src, error);
829 case RTE_FLOW_CONV_OP_ITEM_NAME_PTR:
830 return rte_flow_conv_name(0, 1, dst, size, src, error);
831 case RTE_FLOW_CONV_OP_ACTION_NAME_PTR:
832 return rte_flow_conv_name(1, 1, dst, size, src, error);
834 return rte_flow_error_set
835 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
836 "unknown object conversion operation");
839 /** Store a full rte_flow description. */
841 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
842 const struct rte_flow_attr *attr,
843 const struct rte_flow_item *items,
844 const struct rte_flow_action *actions)
847 * Overlap struct rte_flow_conv with struct rte_flow_desc in order
848 * to convert the former to the latter without wasting space.
850 struct rte_flow_conv_rule *dst =
852 (void *)((uintptr_t)desc +
853 (offsetof(struct rte_flow_desc, actions) -
854 offsetof(struct rte_flow_conv_rule, actions))) :
857 len > sizeof(*desc) - sizeof(*dst) ?
858 len - (sizeof(*desc) - sizeof(*dst)) :
860 struct rte_flow_conv_rule src = {
863 .actions_ro = actions,
867 RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) <
868 sizeof(struct rte_flow_conv_rule));
870 (&dst->pattern != &desc->items ||
871 &dst->actions != &desc->actions ||
872 (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) {
876 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL);
879 ret += sizeof(*desc) - sizeof(*dst);
881 (&(struct rte_flow_desc){
884 .items = dst_size ? dst->pattern : NULL,
885 .actions = dst_size ? dst->actions : NULL,
887 len > sizeof(*desc) ? sizeof(*desc) : len);
892 * Expand RSS flows into several possible flows according to the RSS hash
893 * fields requested and the driver capabilities.
895 int __rte_experimental
896 rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size,
897 const struct rte_flow_item *pattern, uint64_t types,
898 const struct rte_flow_expand_node graph[],
899 int graph_root_index)
902 const struct rte_flow_item *item;
903 const struct rte_flow_expand_node *node = &graph[graph_root_index];
904 const int *next_node;
905 const int *stack[elt_n];
907 struct rte_flow_item flow_items[elt_n];
910 size_t user_pattern_size = 0;
913 lsize = offsetof(struct rte_flow_expand_rss, entry) +
914 elt_n * sizeof(buf->entry[0]);
916 buf->entry[0].priority = 0;
917 buf->entry[0].pattern = (void *)&buf->entry[elt_n];
919 addr = buf->entry[0].pattern;
921 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
922 const struct rte_flow_expand_node *next = NULL;
924 for (i = 0; node->next && node->next[i]; ++i) {
925 next = &graph[node->next[i]];
926 if (next->type == item->type)
931 user_pattern_size += sizeof(*item);
933 user_pattern_size += sizeof(*item); /* Handle END item. */
934 lsize += user_pattern_size;
935 /* Copy the user pattern in the first entry of the buffer. */
937 rte_memcpy(addr, pattern, user_pattern_size);
938 addr = (void *)(((uintptr_t)addr) + user_pattern_size);
941 /* Start expanding. */
942 memset(flow_items, 0, sizeof(flow_items));
943 user_pattern_size -= sizeof(*item);
944 next_node = node->next;
945 stack[stack_pos] = next_node;
946 node = next_node ? &graph[*next_node] : NULL;
948 flow_items[stack_pos].type = node->type;
949 if (node->rss_types & types) {
951 * compute the number of items to copy from the
952 * expansion and copy it.
953 * When the stack_pos is 0, there are 1 element in it,
954 * plus the addition END item.
956 int elt = stack_pos + 2;
958 flow_items[stack_pos + 1].type = RTE_FLOW_ITEM_TYPE_END;
959 lsize += elt * sizeof(*item) + user_pattern_size;
961 size_t n = elt * sizeof(*item);
963 buf->entry[buf->entries].priority =
965 buf->entry[buf->entries].pattern = addr;
967 rte_memcpy(addr, buf->entry[0].pattern,
969 addr = (void *)(((uintptr_t)addr) +
971 rte_memcpy(addr, flow_items, n);
972 addr = (void *)(((uintptr_t)addr) + n);
977 next_node = node->next;
978 if (stack_pos++ == elt_n) {
982 stack[stack_pos] = next_node;
983 } else if (*(next_node + 1)) {
984 /* Follow up with the next possibility. */
987 /* Move to the next path. */
989 next_node = stack[--stack_pos];
991 stack[stack_pos] = next_node;
993 node = *next_node ? &graph[*next_node] : NULL;