4 * Copyright 2016 6WIND S.A.
5 * Copyright 2016 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <arpa/inet.h>
43 #include <rte_common.h>
44 #include <rte_ethdev.h>
45 #include <rte_byteorder.h>
46 #include <cmdline_parse.h>
47 #include <cmdline_parse_etheraddr.h>
52 /** Parser token indices. */
72 /* Top-level command. */
75 /* Sub-level commands. */
83 /* Destroy arguments. */
86 /* Query arguments. */
92 /* Validate/create arguments. */
98 /* Validate/create pattern. */
136 /* Validate/create actions. */
144 /** Size of pattern[] field in struct rte_flow_item_raw. */
145 #define ITEM_RAW_PATTERN_SIZE 36
147 /** Storage size for struct rte_flow_item_raw including pattern. */
148 #define ITEM_RAW_SIZE \
149 (offsetof(struct rte_flow_item_raw, pattern) + ITEM_RAW_PATTERN_SIZE)
151 /** Maximum number of subsequent tokens and arguments on the stack. */
152 #define CTX_STACK_SIZE 16
154 /** Parser context. */
156 /** Stack of subsequent token lists to process. */
157 const enum index *next[CTX_STACK_SIZE];
158 /** Arguments for stacked tokens. */
159 const void *args[CTX_STACK_SIZE];
160 enum index curr; /**< Current token index. */
161 enum index prev; /**< Index of the last token seen. */
162 int next_num; /**< Number of entries in next[]. */
163 int args_num; /**< Number of entries in args[]. */
164 uint32_t reparse:1; /**< Start over from the beginning. */
165 uint32_t eol:1; /**< EOL has been detected. */
166 uint32_t last:1; /**< No more arguments. */
167 uint16_t port; /**< Current port ID (for completions). */
168 uint32_t objdata; /**< Object-specific data. */
169 void *object; /**< Address of current object for relative offsets. */
170 void *objmask; /**< Object a full mask must be written to. */
173 /** Token argument. */
175 uint32_t hton:1; /**< Use network byte ordering. */
176 uint32_t sign:1; /**< Value is signed. */
177 uint32_t offset; /**< Relative offset from ctx->object. */
178 uint32_t size; /**< Field size. */
179 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
182 /** Parser token definition. */
184 /** Type displayed during completion (defaults to "TOKEN"). */
186 /** Help displayed during completion (defaults to token name). */
188 /** Private data used by parser functions. */
191 * Lists of subsequent tokens to push on the stack. Each call to the
192 * parser consumes the last entry of that stack.
194 const enum index *const *next;
195 /** Arguments stack for subsequent tokens that need them. */
196 const struct arg *const *args;
198 * Token-processing callback, returns -1 in case of error, the
199 * length of the matched string otherwise. If NULL, attempts to
200 * match the token name.
202 * If buf is not NULL, the result should be stored in it according
203 * to context. An error is returned if not large enough.
205 int (*call)(struct context *ctx, const struct token *token,
206 const char *str, unsigned int len,
207 void *buf, unsigned int size);
209 * Callback that provides possible values for this token, used for
210 * completion. Returns -1 in case of error, the number of possible
211 * values otherwise. If NULL, the token name is used.
213 * If buf is not NULL, entry index ent is written to buf and the
214 * full length of the entry is returned (same behavior as
217 int (*comp)(struct context *ctx, const struct token *token,
218 unsigned int ent, char *buf, unsigned int size);
219 /** Mandatory token name, no default value. */
223 /** Static initializer for the next field. */
224 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
226 /** Static initializer for a NEXT() entry. */
227 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
229 /** Static initializer for the args field. */
230 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
232 /** Static initializer for ARGS() to target a field. */
233 #define ARGS_ENTRY(s, f) \
234 (&(const struct arg){ \
235 .offset = offsetof(s, f), \
236 .size = sizeof(((s *)0)->f), \
239 /** Static initializer for ARGS() to target a bit-field. */
240 #define ARGS_ENTRY_BF(s, f, b) \
241 (&(const struct arg){ \
243 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
246 /** Static initializer for ARGS() to target a pointer. */
247 #define ARGS_ENTRY_PTR(s, f) \
248 (&(const struct arg){ \
249 .size = sizeof(*((s *)0)->f), \
252 /** Static initializer for ARGS() with arbitrary size. */
253 #define ARGS_ENTRY_USZ(s, f, sz) \
254 (&(const struct arg){ \
255 .offset = offsetof(s, f), \
259 /** Same as ARGS_ENTRY() using network byte ordering. */
260 #define ARGS_ENTRY_HTON(s, f) \
261 (&(const struct arg){ \
263 .offset = offsetof(s, f), \
264 .size = sizeof(((s *)0)->f), \
267 /** Parser output buffer layout expected by cmd_flow_parsed(). */
269 enum index command; /**< Flow command. */
270 uint16_t port; /**< Affected port ID. */
273 struct rte_flow_attr attr;
274 struct rte_flow_item *pattern;
275 struct rte_flow_action *actions;
279 } vc; /**< Validate/create arguments. */
283 } destroy; /**< Destroy arguments. */
286 enum rte_flow_action_type action;
287 } query; /**< Query arguments. */
291 } list; /**< List arguments. */
292 } args; /**< Command arguments. */
295 /** Private data for pattern items. */
296 struct parse_item_priv {
297 enum rte_flow_item_type type; /**< Item type. */
298 uint32_t size; /**< Size of item specification structure. */
301 #define PRIV_ITEM(t, s) \
302 (&(const struct parse_item_priv){ \
303 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
307 /** Private data for actions. */
308 struct parse_action_priv {
309 enum rte_flow_action_type type; /**< Action type. */
310 uint32_t size; /**< Size of action configuration structure. */
313 #define PRIV_ACTION(t, s) \
314 (&(const struct parse_action_priv){ \
315 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
319 static const enum index next_vc_attr[] = {
328 static const enum index next_destroy_attr[] = {
334 static const enum index next_list_attr[] = {
340 static const enum index item_param[] = {
349 static const enum index next_item[] = {
365 static const enum index item_any[] = {
371 static const enum index item_vf[] = {
377 static const enum index item_port[] = {
383 static const enum index item_raw[] = {
393 static const enum index item_eth[] = {
401 static const enum index item_vlan[] = {
408 static const enum index item_ipv4[] = {
415 static const enum index item_ipv6[] = {
422 static const enum index next_action[] = {
429 static int parse_init(struct context *, const struct token *,
430 const char *, unsigned int,
431 void *, unsigned int);
432 static int parse_vc(struct context *, const struct token *,
433 const char *, unsigned int,
434 void *, unsigned int);
435 static int parse_vc_spec(struct context *, const struct token *,
436 const char *, unsigned int, void *, unsigned int);
437 static int parse_destroy(struct context *, const struct token *,
438 const char *, unsigned int,
439 void *, unsigned int);
440 static int parse_flush(struct context *, const struct token *,
441 const char *, unsigned int,
442 void *, unsigned int);
443 static int parse_query(struct context *, const struct token *,
444 const char *, unsigned int,
445 void *, unsigned int);
446 static int parse_action(struct context *, const struct token *,
447 const char *, unsigned int,
448 void *, unsigned int);
449 static int parse_list(struct context *, const struct token *,
450 const char *, unsigned int,
451 void *, unsigned int);
452 static int parse_int(struct context *, const struct token *,
453 const char *, unsigned int,
454 void *, unsigned int);
455 static int parse_prefix(struct context *, const struct token *,
456 const char *, unsigned int,
457 void *, unsigned int);
458 static int parse_boolean(struct context *, const struct token *,
459 const char *, unsigned int,
460 void *, unsigned int);
461 static int parse_string(struct context *, const struct token *,
462 const char *, unsigned int,
463 void *, unsigned int);
464 static int parse_mac_addr(struct context *, const struct token *,
465 const char *, unsigned int,
466 void *, unsigned int);
467 static int parse_ipv4_addr(struct context *, const struct token *,
468 const char *, unsigned int,
469 void *, unsigned int);
470 static int parse_ipv6_addr(struct context *, const struct token *,
471 const char *, unsigned int,
472 void *, unsigned int);
473 static int parse_port(struct context *, const struct token *,
474 const char *, unsigned int,
475 void *, unsigned int);
476 static int comp_none(struct context *, const struct token *,
477 unsigned int, char *, unsigned int);
478 static int comp_boolean(struct context *, const struct token *,
479 unsigned int, char *, unsigned int);
480 static int comp_action(struct context *, const struct token *,
481 unsigned int, char *, unsigned int);
482 static int comp_port(struct context *, const struct token *,
483 unsigned int, char *, unsigned int);
484 static int comp_rule_id(struct context *, const struct token *,
485 unsigned int, char *, unsigned int);
487 /** Token definitions. */
488 static const struct token token_list[] = {
489 /* Special tokens. */
492 .help = "null entry, abused as the entry point",
493 .next = NEXT(NEXT_ENTRY(FLOW)),
498 .help = "command may end here",
504 .help = "integer value",
509 .name = "{unsigned}",
511 .help = "unsigned integer value",
518 .help = "prefix length for bit-mask",
519 .call = parse_prefix,
525 .help = "any boolean value",
526 .call = parse_boolean,
527 .comp = comp_boolean,
532 .help = "fixed string",
533 .call = parse_string,
537 .name = "{MAC address}",
539 .help = "standard MAC address notation",
540 .call = parse_mac_addr,
544 .name = "{IPv4 address}",
545 .type = "IPV4 ADDRESS",
546 .help = "standard IPv4 address notation",
547 .call = parse_ipv4_addr,
551 .name = "{IPv6 address}",
552 .type = "IPV6 ADDRESS",
553 .help = "standard IPv6 address notation",
554 .call = parse_ipv6_addr,
560 .help = "rule identifier",
562 .comp = comp_rule_id,
567 .help = "port identifier",
572 .name = "{group_id}",
574 .help = "group identifier",
581 .help = "priority level",
585 /* Top-level command. */
588 .type = "{command} {port_id} [{arg} [...]]",
589 .help = "manage ingress/egress flow rules",
590 .next = NEXT(NEXT_ENTRY
599 /* Sub-level commands. */
602 .help = "check whether a flow rule can be created",
603 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
604 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
609 .help = "create a flow rule",
610 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
611 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
616 .help = "destroy specific flow rules",
617 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
618 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
619 .call = parse_destroy,
623 .help = "destroy all flow rules",
624 .next = NEXT(NEXT_ENTRY(PORT_ID)),
625 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
630 .help = "query an existing flow rule",
631 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
633 NEXT_ENTRY(PORT_ID)),
634 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action),
635 ARGS_ENTRY(struct buffer, args.query.rule),
636 ARGS_ENTRY(struct buffer, port)),
641 .help = "list existing flow rules",
642 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
643 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
646 /* Destroy arguments. */
649 .help = "specify a rule identifier",
650 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
651 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
652 .call = parse_destroy,
654 /* Query arguments. */
658 .help = "action to query, must be part of the rule",
659 .call = parse_action,
662 /* List arguments. */
665 .help = "specify a group",
666 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
667 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
670 /* Validate/create attributes. */
673 .help = "specify a group",
674 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
675 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
680 .help = "specify a priority level",
681 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
682 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
687 .help = "affect rule to ingress",
688 .next = NEXT(next_vc_attr),
693 .help = "affect rule to egress",
694 .next = NEXT(next_vc_attr),
697 /* Validate/create pattern. */
700 .help = "submit a list of pattern items",
701 .next = NEXT(next_item),
706 .help = "match value perfectly (with full bit-mask)",
707 .call = parse_vc_spec,
709 [ITEM_PARAM_SPEC] = {
711 .help = "match value according to configured bit-mask",
712 .call = parse_vc_spec,
714 [ITEM_PARAM_LAST] = {
716 .help = "specify upper bound to establish a range",
717 .call = parse_vc_spec,
719 [ITEM_PARAM_MASK] = {
721 .help = "specify bit-mask with relevant bits set to one",
722 .call = parse_vc_spec,
724 [ITEM_PARAM_PREFIX] = {
726 .help = "generate bit-mask from a prefix length",
727 .call = parse_vc_spec,
731 .help = "specify next pattern item",
732 .next = NEXT(next_item),
736 .help = "end list of pattern items",
737 .priv = PRIV_ITEM(END, 0),
738 .next = NEXT(NEXT_ENTRY(ACTIONS)),
743 .help = "no-op pattern item",
744 .priv = PRIV_ITEM(VOID, 0),
745 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
750 .help = "perform actions when pattern does not match",
751 .priv = PRIV_ITEM(INVERT, 0),
752 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
757 .help = "match any protocol for the current layer",
758 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
759 .next = NEXT(item_any),
764 .help = "number of layers covered",
765 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
766 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
770 .help = "match packets addressed to the physical function",
771 .priv = PRIV_ITEM(PF, 0),
772 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
777 .help = "match packets addressed to a virtual function ID",
778 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
779 .next = NEXT(item_vf),
784 .help = "destination VF ID",
785 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
786 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
790 .help = "device-specific physical port index to use",
791 .priv = PRIV_ITEM(PORT, sizeof(struct rte_flow_item_port)),
792 .next = NEXT(item_port),
795 [ITEM_PORT_INDEX] = {
797 .help = "physical port index",
798 .next = NEXT(item_port, NEXT_ENTRY(UNSIGNED), item_param),
799 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port, index)),
803 .help = "match an arbitrary byte string",
804 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
805 .next = NEXT(item_raw),
808 [ITEM_RAW_RELATIVE] = {
810 .help = "look for pattern after the previous item",
811 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
812 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
815 [ITEM_RAW_SEARCH] = {
817 .help = "search pattern from offset (see also limit)",
818 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
819 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
822 [ITEM_RAW_OFFSET] = {
824 .help = "absolute or relative offset for pattern",
825 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
826 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
830 .help = "search area limit for start of pattern",
831 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
832 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
834 [ITEM_RAW_PATTERN] = {
836 .help = "byte string to look for",
837 .next = NEXT(item_raw,
839 NEXT_ENTRY(ITEM_PARAM_IS,
842 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, length),
843 ARGS_ENTRY_USZ(struct rte_flow_item_raw,
845 ITEM_RAW_PATTERN_SIZE)),
849 .help = "match Ethernet header",
850 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
851 .next = NEXT(item_eth),
856 .help = "destination MAC",
857 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
858 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_eth, dst)),
862 .help = "source MAC",
863 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
864 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_eth, src)),
869 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
870 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
874 .help = "match 802.1Q/ad VLAN tag",
875 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
876 .next = NEXT(item_vlan),
881 .help = "tag protocol identifier",
882 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
883 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tpid)),
887 .help = "tag control information",
888 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
889 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
893 .help = "match IPv4 header",
894 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
895 .next = NEXT(item_ipv4),
900 .help = "source address",
901 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
902 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
907 .help = "destination address",
908 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
909 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
914 .help = "match IPv6 header",
915 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
916 .next = NEXT(item_ipv6),
921 .help = "source address",
922 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
923 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
928 .help = "destination address",
929 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
930 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
933 /* Validate/create actions. */
936 .help = "submit a list of associated actions",
937 .next = NEXT(next_action),
942 .help = "specify next action",
943 .next = NEXT(next_action),
947 .help = "end list of actions",
948 .priv = PRIV_ACTION(END, 0),
953 .help = "no-op action",
954 .priv = PRIV_ACTION(VOID, 0),
955 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
958 [ACTION_PASSTHRU] = {
960 .help = "let subsequent rule process matched packets",
961 .priv = PRIV_ACTION(PASSTHRU, 0),
962 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
967 /** Remove and return last entry from argument stack. */
968 static const struct arg *
969 pop_args(struct context *ctx)
971 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
974 /** Add entry on top of the argument stack. */
976 push_args(struct context *ctx, const struct arg *arg)
978 if (ctx->args_num == CTX_STACK_SIZE)
980 ctx->args[ctx->args_num++] = arg;
984 /** Spread value into buffer according to bit-mask. */
986 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
988 uint32_t i = arg->size;
996 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1005 unsigned int shift = 0;
1006 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
1008 for (shift = 0; arg->mask[i] >> shift; ++shift) {
1009 if (!(arg->mask[i] & (1 << shift)))
1014 *buf &= ~(1 << shift);
1015 *buf |= (val & 1) << shift;
1024 * Parse a prefix length and generate a bit-mask.
1026 * Last argument (ctx->args) is retrieved to determine mask size, storage
1027 * location and whether the result must use network byte ordering.
1030 parse_prefix(struct context *ctx, const struct token *token,
1031 const char *str, unsigned int len,
1032 void *buf, unsigned int size)
1034 const struct arg *arg = pop_args(ctx);
1035 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
1042 /* Argument is expected. */
1046 u = strtoumax(str, &end, 0);
1047 if (errno || (size_t)(end - str) != len)
1052 extra = arg_entry_bf_fill(NULL, 0, arg);
1061 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
1062 !arg_entry_bf_fill(ctx->objmask, -1, arg))
1069 if (bytes > size || bytes + !!extra > size)
1073 buf = (uint8_t *)ctx->object + arg->offset;
1074 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1076 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
1077 memset(buf, 0x00, size - bytes);
1079 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
1083 memset(buf, 0xff, bytes);
1084 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
1086 ((uint8_t *)buf)[bytes] = conv[extra];
1089 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
1092 push_args(ctx, arg);
1096 /** Default parsing function for token name matching. */
1098 parse_default(struct context *ctx, const struct token *token,
1099 const char *str, unsigned int len,
1100 void *buf, unsigned int size)
1105 if (strncmp(str, token->name, len))
1110 /** Parse flow command, initialize output buffer for subsequent tokens. */
1112 parse_init(struct context *ctx, const struct token *token,
1113 const char *str, unsigned int len,
1114 void *buf, unsigned int size)
1116 struct buffer *out = buf;
1118 /* Token name must match. */
1119 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1121 /* Nothing else to do if there is no buffer. */
1124 /* Make sure buffer is large enough. */
1125 if (size < sizeof(*out))
1127 /* Initialize buffer. */
1128 memset(out, 0x00, sizeof(*out));
1129 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
1132 ctx->objmask = NULL;
1136 /** Parse tokens for validate/create commands. */
1138 parse_vc(struct context *ctx, const struct token *token,
1139 const char *str, unsigned int len,
1140 void *buf, unsigned int size)
1142 struct buffer *out = buf;
1146 /* Token name must match. */
1147 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1149 /* Nothing else to do if there is no buffer. */
1152 if (!out->command) {
1153 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
1155 if (sizeof(*out) > size)
1157 out->command = ctx->curr;
1160 ctx->objmask = NULL;
1161 out->args.vc.data = (uint8_t *)out + size;
1165 ctx->object = &out->args.vc.attr;
1166 ctx->objmask = NULL;
1167 switch (ctx->curr) {
1172 out->args.vc.attr.ingress = 1;
1175 out->args.vc.attr.egress = 1;
1178 out->args.vc.pattern =
1179 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1181 ctx->object = out->args.vc.pattern;
1182 ctx->objmask = NULL;
1185 out->args.vc.actions =
1186 (void *)RTE_ALIGN_CEIL((uintptr_t)
1187 (out->args.vc.pattern +
1188 out->args.vc.pattern_n),
1190 ctx->object = out->args.vc.actions;
1191 ctx->objmask = NULL;
1198 if (!out->args.vc.actions) {
1199 const struct parse_item_priv *priv = token->priv;
1200 struct rte_flow_item *item =
1201 out->args.vc.pattern + out->args.vc.pattern_n;
1203 data_size = priv->size * 3; /* spec, last, mask */
1204 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1205 (out->args.vc.data - data_size),
1207 if ((uint8_t *)item + sizeof(*item) > data)
1209 *item = (struct rte_flow_item){
1212 ++out->args.vc.pattern_n;
1214 ctx->objmask = NULL;
1216 const struct parse_action_priv *priv = token->priv;
1217 struct rte_flow_action *action =
1218 out->args.vc.actions + out->args.vc.actions_n;
1220 data_size = priv->size; /* configuration */
1221 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1222 (out->args.vc.data - data_size),
1224 if ((uint8_t *)action + sizeof(*action) > data)
1226 *action = (struct rte_flow_action){
1229 ++out->args.vc.actions_n;
1230 ctx->object = action;
1231 ctx->objmask = NULL;
1233 memset(data, 0, data_size);
1234 out->args.vc.data = data;
1235 ctx->objdata = data_size;
1239 /** Parse pattern item parameter type. */
1241 parse_vc_spec(struct context *ctx, const struct token *token,
1242 const char *str, unsigned int len,
1243 void *buf, unsigned int size)
1245 struct buffer *out = buf;
1246 struct rte_flow_item *item;
1252 /* Token name must match. */
1253 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1255 /* Parse parameter types. */
1256 switch (ctx->curr) {
1261 case ITEM_PARAM_SPEC:
1264 case ITEM_PARAM_LAST:
1267 case ITEM_PARAM_PREFIX:
1268 /* Modify next token to expect a prefix. */
1269 if (ctx->next_num < 2)
1271 ctx->next[ctx->next_num - 2] = NEXT_ENTRY(PREFIX);
1273 case ITEM_PARAM_MASK:
1279 /* Nothing else to do if there is no buffer. */
1282 if (!out->args.vc.pattern_n)
1284 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
1285 data_size = ctx->objdata / 3; /* spec, last, mask */
1286 /* Point to selected object. */
1287 ctx->object = out->args.vc.data + (data_size * index);
1289 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
1290 item->mask = ctx->objmask;
1292 ctx->objmask = NULL;
1293 /* Update relevant item pointer. */
1294 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
1299 /** Parse tokens for destroy command. */
1301 parse_destroy(struct context *ctx, const struct token *token,
1302 const char *str, unsigned int len,
1303 void *buf, unsigned int size)
1305 struct buffer *out = buf;
1307 /* Token name must match. */
1308 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1310 /* Nothing else to do if there is no buffer. */
1313 if (!out->command) {
1314 if (ctx->curr != DESTROY)
1316 if (sizeof(*out) > size)
1318 out->command = ctx->curr;
1321 ctx->objmask = NULL;
1322 out->args.destroy.rule =
1323 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1327 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
1328 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
1331 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
1332 ctx->objmask = NULL;
1336 /** Parse tokens for flush command. */
1338 parse_flush(struct context *ctx, const struct token *token,
1339 const char *str, unsigned int len,
1340 void *buf, unsigned int size)
1342 struct buffer *out = buf;
1344 /* Token name must match. */
1345 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1347 /* Nothing else to do if there is no buffer. */
1350 if (!out->command) {
1351 if (ctx->curr != FLUSH)
1353 if (sizeof(*out) > size)
1355 out->command = ctx->curr;
1358 ctx->objmask = NULL;
1363 /** Parse tokens for query command. */
1365 parse_query(struct context *ctx, const struct token *token,
1366 const char *str, unsigned int len,
1367 void *buf, unsigned int size)
1369 struct buffer *out = buf;
1371 /* Token name must match. */
1372 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1374 /* Nothing else to do if there is no buffer. */
1377 if (!out->command) {
1378 if (ctx->curr != QUERY)
1380 if (sizeof(*out) > size)
1382 out->command = ctx->curr;
1385 ctx->objmask = NULL;
1390 /** Parse action names. */
1392 parse_action(struct context *ctx, const struct token *token,
1393 const char *str, unsigned int len,
1394 void *buf, unsigned int size)
1396 struct buffer *out = buf;
1397 const struct arg *arg = pop_args(ctx);
1401 /* Argument is expected. */
1404 /* Parse action name. */
1405 for (i = 0; next_action[i]; ++i) {
1406 const struct parse_action_priv *priv;
1408 token = &token_list[next_action[i]];
1409 if (strncmp(token->name, str, len))
1415 memcpy((uint8_t *)ctx->object + arg->offset,
1421 push_args(ctx, arg);
1425 /** Parse tokens for list command. */
1427 parse_list(struct context *ctx, const struct token *token,
1428 const char *str, unsigned int len,
1429 void *buf, unsigned int size)
1431 struct buffer *out = buf;
1433 /* Token name must match. */
1434 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1436 /* Nothing else to do if there is no buffer. */
1439 if (!out->command) {
1440 if (ctx->curr != LIST)
1442 if (sizeof(*out) > size)
1444 out->command = ctx->curr;
1447 ctx->objmask = NULL;
1448 out->args.list.group =
1449 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1453 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
1454 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
1457 ctx->object = out->args.list.group + out->args.list.group_n++;
1458 ctx->objmask = NULL;
1463 * Parse signed/unsigned integers 8 to 64-bit long.
1465 * Last argument (ctx->args) is retrieved to determine integer type and
1469 parse_int(struct context *ctx, const struct token *token,
1470 const char *str, unsigned int len,
1471 void *buf, unsigned int size)
1473 const struct arg *arg = pop_args(ctx);
1478 /* Argument is expected. */
1483 (uintmax_t)strtoimax(str, &end, 0) :
1484 strtoumax(str, &end, 0);
1485 if (errno || (size_t)(end - str) != len)
1490 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
1491 !arg_entry_bf_fill(ctx->objmask, -1, arg))
1495 buf = (uint8_t *)ctx->object + arg->offset;
1499 case sizeof(uint8_t):
1500 *(uint8_t *)buf = u;
1502 case sizeof(uint16_t):
1503 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
1505 case sizeof(uint32_t):
1506 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
1508 case sizeof(uint64_t):
1509 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
1514 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
1516 buf = (uint8_t *)ctx->objmask + arg->offset;
1521 push_args(ctx, arg);
1528 * Two arguments (ctx->args) are retrieved from the stack to store data and
1529 * its length (in that order).
1532 parse_string(struct context *ctx, const struct token *token,
1533 const char *str, unsigned int len,
1534 void *buf, unsigned int size)
1536 const struct arg *arg_data = pop_args(ctx);
1537 const struct arg *arg_len = pop_args(ctx);
1538 char tmp[16]; /* Ought to be enough. */
1541 /* Arguments are expected. */
1545 push_args(ctx, arg_data);
1548 size = arg_data->size;
1549 /* Bit-mask fill is not supported. */
1550 if (arg_data->mask || size < len)
1554 /* Let parse_int() fill length information first. */
1555 ret = snprintf(tmp, sizeof(tmp), "%u", len);
1558 push_args(ctx, arg_len);
1559 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
1564 buf = (uint8_t *)ctx->object + arg_data->offset;
1565 /* Output buffer is not necessarily NUL-terminated. */
1566 memcpy(buf, str, len);
1567 memset((uint8_t *)buf + len, 0x55, size - len);
1569 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
1572 push_args(ctx, arg_len);
1573 push_args(ctx, arg_data);
1578 * Parse a MAC address.
1580 * Last argument (ctx->args) is retrieved to determine storage size and
1584 parse_mac_addr(struct context *ctx, const struct token *token,
1585 const char *str, unsigned int len,
1586 void *buf, unsigned int size)
1588 const struct arg *arg = pop_args(ctx);
1589 struct ether_addr tmp;
1593 /* Argument is expected. */
1597 /* Bit-mask fill is not supported. */
1598 if (arg->mask || size != sizeof(tmp))
1600 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
1601 if (ret < 0 || (unsigned int)ret != len)
1605 buf = (uint8_t *)ctx->object + arg->offset;
1606 memcpy(buf, &tmp, size);
1608 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
1611 push_args(ctx, arg);
1616 * Parse an IPv4 address.
1618 * Last argument (ctx->args) is retrieved to determine storage size and
1622 parse_ipv4_addr(struct context *ctx, const struct token *token,
1623 const char *str, unsigned int len,
1624 void *buf, unsigned int size)
1626 const struct arg *arg = pop_args(ctx);
1631 /* Argument is expected. */
1635 /* Bit-mask fill is not supported. */
1636 if (arg->mask || size != sizeof(tmp))
1638 /* Only network endian is supported. */
1641 memcpy(str2, str, len);
1643 ret = inet_pton(AF_INET, str2, &tmp);
1645 /* Attempt integer parsing. */
1646 push_args(ctx, arg);
1647 return parse_int(ctx, token, str, len, buf, size);
1651 buf = (uint8_t *)ctx->object + arg->offset;
1652 memcpy(buf, &tmp, size);
1654 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
1657 push_args(ctx, arg);
1662 * Parse an IPv6 address.
1664 * Last argument (ctx->args) is retrieved to determine storage size and
1668 parse_ipv6_addr(struct context *ctx, const struct token *token,
1669 const char *str, unsigned int len,
1670 void *buf, unsigned int size)
1672 const struct arg *arg = pop_args(ctx);
1674 struct in6_addr tmp;
1678 /* Argument is expected. */
1682 /* Bit-mask fill is not supported. */
1683 if (arg->mask || size != sizeof(tmp))
1685 /* Only network endian is supported. */
1688 memcpy(str2, str, len);
1690 ret = inet_pton(AF_INET6, str2, &tmp);
1695 buf = (uint8_t *)ctx->object + arg->offset;
1696 memcpy(buf, &tmp, size);
1698 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
1701 push_args(ctx, arg);
1705 /** Boolean values (even indices stand for false). */
1706 static const char *const boolean_name[] = {
1715 * Parse a boolean value.
1717 * Last argument (ctx->args) is retrieved to determine storage size and
1721 parse_boolean(struct context *ctx, const struct token *token,
1722 const char *str, unsigned int len,
1723 void *buf, unsigned int size)
1725 const struct arg *arg = pop_args(ctx);
1729 /* Argument is expected. */
1732 for (i = 0; boolean_name[i]; ++i)
1733 if (!strncmp(str, boolean_name[i], len))
1735 /* Process token as integer. */
1736 if (boolean_name[i])
1737 str = i & 1 ? "1" : "0";
1738 push_args(ctx, arg);
1739 ret = parse_int(ctx, token, str, strlen(str), buf, size);
1740 return ret > 0 ? (int)len : ret;
1743 /** Parse port and update context. */
1745 parse_port(struct context *ctx, const struct token *token,
1746 const char *str, unsigned int len,
1747 void *buf, unsigned int size)
1749 struct buffer *out = &(struct buffer){ .port = 0 };
1757 ctx->objmask = NULL;
1758 size = sizeof(*out);
1760 ret = parse_int(ctx, token, str, len, out, size);
1762 ctx->port = out->port;
1768 /** No completion. */
1770 comp_none(struct context *ctx, const struct token *token,
1771 unsigned int ent, char *buf, unsigned int size)
1781 /** Complete boolean values. */
1783 comp_boolean(struct context *ctx, const struct token *token,
1784 unsigned int ent, char *buf, unsigned int size)
1790 for (i = 0; boolean_name[i]; ++i)
1791 if (buf && i == ent)
1792 return snprintf(buf, size, "%s", boolean_name[i]);
1798 /** Complete action names. */
1800 comp_action(struct context *ctx, const struct token *token,
1801 unsigned int ent, char *buf, unsigned int size)
1807 for (i = 0; next_action[i]; ++i)
1808 if (buf && i == ent)
1809 return snprintf(buf, size, "%s",
1810 token_list[next_action[i]].name);
1816 /** Complete available ports. */
1818 comp_port(struct context *ctx, const struct token *token,
1819 unsigned int ent, char *buf, unsigned int size)
1826 FOREACH_PORT(p, ports) {
1827 if (buf && i == ent)
1828 return snprintf(buf, size, "%u", p);
1836 /** Complete available rule IDs. */
1838 comp_rule_id(struct context *ctx, const struct token *token,
1839 unsigned int ent, char *buf, unsigned int size)
1842 struct rte_port *port;
1843 struct port_flow *pf;
1846 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
1847 ctx->port == (uint16_t)RTE_PORT_ALL)
1849 port = &ports[ctx->port];
1850 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
1851 if (buf && i == ent)
1852 return snprintf(buf, size, "%u", pf->id);
1860 /** Internal context. */
1861 static struct context cmd_flow_context;
1863 /** Global parser instance (cmdline API). */
1864 cmdline_parse_inst_t cmd_flow;
1866 /** Initialize context. */
1868 cmd_flow_context_init(struct context *ctx)
1870 /* A full memset() is not necessary. */
1881 ctx->objmask = NULL;
1884 /** Parse a token (cmdline API). */
1886 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
1889 struct context *ctx = &cmd_flow_context;
1890 const struct token *token;
1891 const enum index *list;
1896 /* Restart as requested. */
1898 cmd_flow_context_init(ctx);
1899 token = &token_list[ctx->curr];
1900 /* Check argument length. */
1903 for (len = 0; src[len]; ++len)
1904 if (src[len] == '#' || isspace(src[len]))
1908 /* Last argument and EOL detection. */
1909 for (i = len; src[i]; ++i)
1910 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
1912 else if (!isspace(src[i])) {
1917 if (src[i] == '\r' || src[i] == '\n') {
1921 /* Initialize context if necessary. */
1922 if (!ctx->next_num) {
1925 ctx->next[ctx->next_num++] = token->next[0];
1927 /* Process argument through candidates. */
1928 ctx->prev = ctx->curr;
1929 list = ctx->next[ctx->next_num - 1];
1930 for (i = 0; list[i]; ++i) {
1931 const struct token *next = &token_list[list[i]];
1934 ctx->curr = list[i];
1936 tmp = next->call(ctx, next, src, len, result, size);
1938 tmp = parse_default(ctx, next, src, len, result, size);
1939 if (tmp == -1 || tmp != len)
1947 /* Push subsequent tokens if any. */
1949 for (i = 0; token->next[i]; ++i) {
1950 if (ctx->next_num == RTE_DIM(ctx->next))
1952 ctx->next[ctx->next_num++] = token->next[i];
1954 /* Push arguments if any. */
1956 for (i = 0; token->args[i]; ++i) {
1957 if (ctx->args_num == RTE_DIM(ctx->args))
1959 ctx->args[ctx->args_num++] = token->args[i];
1964 /** Return number of completion entries (cmdline API). */
1966 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
1968 struct context *ctx = &cmd_flow_context;
1969 const struct token *token = &token_list[ctx->curr];
1970 const enum index *list;
1974 /* Tell cmd_flow_parse() that context must be reinitialized. */
1976 /* Count number of tokens in current list. */
1978 list = ctx->next[ctx->next_num - 1];
1980 list = token->next[0];
1981 for (i = 0; list[i]; ++i)
1986 * If there is a single token, use its completion callback, otherwise
1987 * return the number of entries.
1989 token = &token_list[list[0]];
1990 if (i == 1 && token->comp) {
1991 /* Save index for cmd_flow_get_help(). */
1992 ctx->prev = list[0];
1993 return token->comp(ctx, token, 0, NULL, 0);
1998 /** Return a completion entry (cmdline API). */
2000 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
2001 char *dst, unsigned int size)
2003 struct context *ctx = &cmd_flow_context;
2004 const struct token *token = &token_list[ctx->curr];
2005 const enum index *list;
2009 /* Tell cmd_flow_parse() that context must be reinitialized. */
2011 /* Count number of tokens in current list. */
2013 list = ctx->next[ctx->next_num - 1];
2015 list = token->next[0];
2016 for (i = 0; list[i]; ++i)
2020 /* If there is a single token, use its completion callback. */
2021 token = &token_list[list[0]];
2022 if (i == 1 && token->comp) {
2023 /* Save index for cmd_flow_get_help(). */
2024 ctx->prev = list[0];
2025 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
2027 /* Otherwise make sure the index is valid and use defaults. */
2030 token = &token_list[list[index]];
2031 snprintf(dst, size, "%s", token->name);
2032 /* Save index for cmd_flow_get_help(). */
2033 ctx->prev = list[index];
2037 /** Populate help strings for current token (cmdline API). */
2039 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
2041 struct context *ctx = &cmd_flow_context;
2042 const struct token *token = &token_list[ctx->prev];
2045 /* Tell cmd_flow_parse() that context must be reinitialized. */
2049 /* Set token type and update global help with details. */
2050 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
2052 cmd_flow.help_str = token->help;
2054 cmd_flow.help_str = token->name;
2058 /** Token definition template (cmdline API). */
2059 static struct cmdline_token_hdr cmd_flow_token_hdr = {
2060 .ops = &(struct cmdline_token_ops){
2061 .parse = cmd_flow_parse,
2062 .complete_get_nb = cmd_flow_complete_get_nb,
2063 .complete_get_elt = cmd_flow_complete_get_elt,
2064 .get_help = cmd_flow_get_help,
2069 /** Populate the next dynamic token. */
2071 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
2072 cmdline_parse_token_hdr_t *(*hdrs)[])
2074 struct context *ctx = &cmd_flow_context;
2076 /* Always reinitialize context before requesting the first token. */
2078 cmd_flow_context_init(ctx);
2079 /* Return NULL when no more tokens are expected. */
2080 if (!ctx->next_num && ctx->curr) {
2084 /* Determine if command should end here. */
2085 if (ctx->eol && ctx->last && ctx->next_num) {
2086 const enum index *list = ctx->next[ctx->next_num - 1];
2089 for (i = 0; list[i]; ++i) {
2096 *hdr = &cmd_flow_token_hdr;
2099 /** Dispatch parsed buffer to function calls. */
2101 cmd_flow_parsed(const struct buffer *in)
2103 switch (in->command) {
2105 port_flow_validate(in->port, &in->args.vc.attr,
2106 in->args.vc.pattern, in->args.vc.actions);
2109 port_flow_create(in->port, &in->args.vc.attr,
2110 in->args.vc.pattern, in->args.vc.actions);
2113 port_flow_destroy(in->port, in->args.destroy.rule_n,
2114 in->args.destroy.rule);
2117 port_flow_flush(in->port);
2120 port_flow_query(in->port, in->args.query.rule,
2121 in->args.query.action);
2124 port_flow_list(in->port, in->args.list.group_n,
2125 in->args.list.group);
2132 /** Token generator and output processing callback (cmdline API). */
2134 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
2137 cmd_flow_tok(arg0, arg2);
2139 cmd_flow_parsed(arg0);
2142 /** Global parser instance (cmdline API). */
2143 cmdline_parse_inst_t cmd_flow = {
2145 .data = NULL, /**< Unused. */
2146 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
2149 }, /**< Tokens are returned by cmd_flow_tok(). */