4 * Copyright 2016 6WIND S.A.
5 * Copyright 2016 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <rte_common.h>
43 #include <rte_ethdev.h>
44 #include <rte_byteorder.h>
45 #include <cmdline_parse.h>
50 /** Parser token indices. */
65 /* Top-level command. */
68 /* Sub-level commands. */
76 /* Destroy arguments. */
79 /* Query arguments. */
85 /* Validate/create arguments. */
91 /* Validate/create pattern. */
103 /* Validate/create actions. */
111 /** Maximum number of subsequent tokens and arguments on the stack. */
112 #define CTX_STACK_SIZE 16
114 /** Parser context. */
116 /** Stack of subsequent token lists to process. */
117 const enum index *next[CTX_STACK_SIZE];
118 /** Arguments for stacked tokens. */
119 const void *args[CTX_STACK_SIZE];
120 enum index curr; /**< Current token index. */
121 enum index prev; /**< Index of the last token seen. */
122 int next_num; /**< Number of entries in next[]. */
123 int args_num; /**< Number of entries in args[]. */
124 uint32_t reparse:1; /**< Start over from the beginning. */
125 uint32_t eol:1; /**< EOL has been detected. */
126 uint32_t last:1; /**< No more arguments. */
127 uint16_t port; /**< Current port ID (for completions). */
128 uint32_t objdata; /**< Object-specific data. */
129 void *object; /**< Address of current object for relative offsets. */
130 void *objmask; /**< Object a full mask must be written to. */
133 /** Token argument. */
135 uint32_t hton:1; /**< Use network byte ordering. */
136 uint32_t sign:1; /**< Value is signed. */
137 uint32_t offset; /**< Relative offset from ctx->object. */
138 uint32_t size; /**< Field size. */
139 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
142 /** Parser token definition. */
144 /** Type displayed during completion (defaults to "TOKEN"). */
146 /** Help displayed during completion (defaults to token name). */
148 /** Private data used by parser functions. */
151 * Lists of subsequent tokens to push on the stack. Each call to the
152 * parser consumes the last entry of that stack.
154 const enum index *const *next;
155 /** Arguments stack for subsequent tokens that need them. */
156 const struct arg *const *args;
158 * Token-processing callback, returns -1 in case of error, the
159 * length of the matched string otherwise. If NULL, attempts to
160 * match the token name.
162 * If buf is not NULL, the result should be stored in it according
163 * to context. An error is returned if not large enough.
165 int (*call)(struct context *ctx, const struct token *token,
166 const char *str, unsigned int len,
167 void *buf, unsigned int size);
169 * Callback that provides possible values for this token, used for
170 * completion. Returns -1 in case of error, the number of possible
171 * values otherwise. If NULL, the token name is used.
173 * If buf is not NULL, entry index ent is written to buf and the
174 * full length of the entry is returned (same behavior as
177 int (*comp)(struct context *ctx, const struct token *token,
178 unsigned int ent, char *buf, unsigned int size);
179 /** Mandatory token name, no default value. */
183 /** Static initializer for the next field. */
184 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
186 /** Static initializer for a NEXT() entry. */
187 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
189 /** Static initializer for the args field. */
190 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
192 /** Static initializer for ARGS() to target a field. */
193 #define ARGS_ENTRY(s, f) \
194 (&(const struct arg){ \
195 .offset = offsetof(s, f), \
196 .size = sizeof(((s *)0)->f), \
199 /** Static initializer for ARGS() to target a bit-field. */
200 #define ARGS_ENTRY_BF(s, f, b) \
201 (&(const struct arg){ \
203 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
206 /** Static initializer for ARGS() to target a pointer. */
207 #define ARGS_ENTRY_PTR(s, f) \
208 (&(const struct arg){ \
209 .size = sizeof(*((s *)0)->f), \
212 /** Parser output buffer layout expected by cmd_flow_parsed(). */
214 enum index command; /**< Flow command. */
215 uint16_t port; /**< Affected port ID. */
218 struct rte_flow_attr attr;
219 struct rte_flow_item *pattern;
220 struct rte_flow_action *actions;
224 } vc; /**< Validate/create arguments. */
228 } destroy; /**< Destroy arguments. */
231 enum rte_flow_action_type action;
232 } query; /**< Query arguments. */
236 } list; /**< List arguments. */
237 } args; /**< Command arguments. */
240 /** Private data for pattern items. */
241 struct parse_item_priv {
242 enum rte_flow_item_type type; /**< Item type. */
243 uint32_t size; /**< Size of item specification structure. */
246 #define PRIV_ITEM(t, s) \
247 (&(const struct parse_item_priv){ \
248 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
252 /** Private data for actions. */
253 struct parse_action_priv {
254 enum rte_flow_action_type type; /**< Action type. */
255 uint32_t size; /**< Size of action configuration structure. */
258 #define PRIV_ACTION(t, s) \
259 (&(const struct parse_action_priv){ \
260 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
264 static const enum index next_vc_attr[] = {
273 static const enum index next_destroy_attr[] = {
279 static const enum index next_list_attr[] = {
286 static const enum index item_param[] = {
295 static const enum index next_item[] = {
302 static const enum index next_action[] = {
309 static int parse_init(struct context *, const struct token *,
310 const char *, unsigned int,
311 void *, unsigned int);
312 static int parse_vc(struct context *, const struct token *,
313 const char *, unsigned int,
314 void *, unsigned int);
315 static int parse_vc_spec(struct context *, const struct token *,
316 const char *, unsigned int, void *, unsigned int);
317 static int parse_destroy(struct context *, const struct token *,
318 const char *, unsigned int,
319 void *, unsigned int);
320 static int parse_flush(struct context *, const struct token *,
321 const char *, unsigned int,
322 void *, unsigned int);
323 static int parse_query(struct context *, const struct token *,
324 const char *, unsigned int,
325 void *, unsigned int);
326 static int parse_action(struct context *, const struct token *,
327 const char *, unsigned int,
328 void *, unsigned int);
329 static int parse_list(struct context *, const struct token *,
330 const char *, unsigned int,
331 void *, unsigned int);
332 static int parse_int(struct context *, const struct token *,
333 const char *, unsigned int,
334 void *, unsigned int);
335 static int parse_prefix(struct context *, const struct token *,
336 const char *, unsigned int,
337 void *, unsigned int);
338 static int parse_port(struct context *, const struct token *,
339 const char *, unsigned int,
340 void *, unsigned int);
341 static int comp_none(struct context *, const struct token *,
342 unsigned int, char *, unsigned int);
343 static int comp_action(struct context *, const struct token *,
344 unsigned int, char *, unsigned int);
345 static int comp_port(struct context *, const struct token *,
346 unsigned int, char *, unsigned int);
347 static int comp_rule_id(struct context *, const struct token *,
348 unsigned int, char *, unsigned int);
350 /** Token definitions. */
351 static const struct token token_list[] = {
352 /* Special tokens. */
355 .help = "null entry, abused as the entry point",
356 .next = NEXT(NEXT_ENTRY(FLOW)),
361 .help = "command may end here",
367 .help = "integer value",
372 .name = "{unsigned}",
374 .help = "unsigned integer value",
381 .help = "prefix length for bit-mask",
382 .call = parse_prefix,
388 .help = "rule identifier",
390 .comp = comp_rule_id,
395 .help = "port identifier",
400 .name = "{group_id}",
402 .help = "group identifier",
409 .help = "priority level",
413 /* Top-level command. */
416 .type = "{command} {port_id} [{arg} [...]]",
417 .help = "manage ingress/egress flow rules",
418 .next = NEXT(NEXT_ENTRY
427 /* Sub-level commands. */
430 .help = "check whether a flow rule can be created",
431 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
432 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
437 .help = "create a flow rule",
438 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
439 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
444 .help = "destroy specific flow rules",
445 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
446 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
447 .call = parse_destroy,
451 .help = "destroy all flow rules",
452 .next = NEXT(NEXT_ENTRY(PORT_ID)),
453 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
458 .help = "query an existing flow rule",
459 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
461 NEXT_ENTRY(PORT_ID)),
462 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action),
463 ARGS_ENTRY(struct buffer, args.query.rule),
464 ARGS_ENTRY(struct buffer, port)),
469 .help = "list existing flow rules",
470 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
471 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
474 /* Destroy arguments. */
477 .help = "specify a rule identifier",
478 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
479 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
480 .call = parse_destroy,
482 /* Query arguments. */
486 .help = "action to query, must be part of the rule",
487 .call = parse_action,
490 /* List arguments. */
493 .help = "specify a group",
494 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
495 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
498 /* Validate/create attributes. */
501 .help = "specify a group",
502 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
503 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
508 .help = "specify a priority level",
509 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
510 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
515 .help = "affect rule to ingress",
516 .next = NEXT(next_vc_attr),
521 .help = "affect rule to egress",
522 .next = NEXT(next_vc_attr),
525 /* Validate/create pattern. */
528 .help = "submit a list of pattern items",
529 .next = NEXT(next_item),
534 .help = "match value perfectly (with full bit-mask)",
535 .call = parse_vc_spec,
537 [ITEM_PARAM_SPEC] = {
539 .help = "match value according to configured bit-mask",
540 .call = parse_vc_spec,
542 [ITEM_PARAM_LAST] = {
544 .help = "specify upper bound to establish a range",
545 .call = parse_vc_spec,
547 [ITEM_PARAM_MASK] = {
549 .help = "specify bit-mask with relevant bits set to one",
550 .call = parse_vc_spec,
552 [ITEM_PARAM_PREFIX] = {
554 .help = "generate bit-mask from a prefix length",
555 .call = parse_vc_spec,
559 .help = "specify next pattern item",
560 .next = NEXT(next_item),
564 .help = "end list of pattern items",
565 .priv = PRIV_ITEM(END, 0),
566 .next = NEXT(NEXT_ENTRY(ACTIONS)),
571 .help = "no-op pattern item",
572 .priv = PRIV_ITEM(VOID, 0),
573 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
578 .help = "perform actions when pattern does not match",
579 .priv = PRIV_ITEM(INVERT, 0),
580 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
583 /* Validate/create actions. */
586 .help = "submit a list of associated actions",
587 .next = NEXT(next_action),
592 .help = "specify next action",
593 .next = NEXT(next_action),
597 .help = "end list of actions",
598 .priv = PRIV_ACTION(END, 0),
603 .help = "no-op action",
604 .priv = PRIV_ACTION(VOID, 0),
605 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
608 [ACTION_PASSTHRU] = {
610 .help = "let subsequent rule process matched packets",
611 .priv = PRIV_ACTION(PASSTHRU, 0),
612 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
617 /** Remove and return last entry from argument stack. */
618 static const struct arg *
619 pop_args(struct context *ctx)
621 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
624 /** Add entry on top of the argument stack. */
626 push_args(struct context *ctx, const struct arg *arg)
628 if (ctx->args_num == CTX_STACK_SIZE)
630 ctx->args[ctx->args_num++] = arg;
634 /** Spread value into buffer according to bit-mask. */
636 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
638 uint32_t i = arg->size;
646 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
655 unsigned int shift = 0;
656 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
658 for (shift = 0; arg->mask[i] >> shift; ++shift) {
659 if (!(arg->mask[i] & (1 << shift)))
664 *buf &= ~(1 << shift);
665 *buf |= (val & 1) << shift;
674 * Parse a prefix length and generate a bit-mask.
676 * Last argument (ctx->args) is retrieved to determine mask size, storage
677 * location and whether the result must use network byte ordering.
680 parse_prefix(struct context *ctx, const struct token *token,
681 const char *str, unsigned int len,
682 void *buf, unsigned int size)
684 const struct arg *arg = pop_args(ctx);
685 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
692 /* Argument is expected. */
696 u = strtoumax(str, &end, 0);
697 if (errno || (size_t)(end - str) != len)
702 extra = arg_entry_bf_fill(NULL, 0, arg);
711 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
712 !arg_entry_bf_fill(ctx->objmask, -1, arg))
719 if (bytes > size || bytes + !!extra > size)
723 buf = (uint8_t *)ctx->object + arg->offset;
724 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
726 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
727 memset(buf, 0x00, size - bytes);
729 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
733 memset(buf, 0xff, bytes);
734 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
736 ((uint8_t *)buf)[bytes] = conv[extra];
739 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
746 /** Default parsing function for token name matching. */
748 parse_default(struct context *ctx, const struct token *token,
749 const char *str, unsigned int len,
750 void *buf, unsigned int size)
755 if (strncmp(str, token->name, len))
760 /** Parse flow command, initialize output buffer for subsequent tokens. */
762 parse_init(struct context *ctx, const struct token *token,
763 const char *str, unsigned int len,
764 void *buf, unsigned int size)
766 struct buffer *out = buf;
768 /* Token name must match. */
769 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
771 /* Nothing else to do if there is no buffer. */
774 /* Make sure buffer is large enough. */
775 if (size < sizeof(*out))
777 /* Initialize buffer. */
778 memset(out, 0x00, sizeof(*out));
779 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
786 /** Parse tokens for validate/create commands. */
788 parse_vc(struct context *ctx, const struct token *token,
789 const char *str, unsigned int len,
790 void *buf, unsigned int size)
792 struct buffer *out = buf;
796 /* Token name must match. */
797 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
799 /* Nothing else to do if there is no buffer. */
803 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
805 if (sizeof(*out) > size)
807 out->command = ctx->curr;
811 out->args.vc.data = (uint8_t *)out + size;
815 ctx->object = &out->args.vc.attr;
822 out->args.vc.attr.ingress = 1;
825 out->args.vc.attr.egress = 1;
828 out->args.vc.pattern =
829 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
831 ctx->object = out->args.vc.pattern;
835 out->args.vc.actions =
836 (void *)RTE_ALIGN_CEIL((uintptr_t)
837 (out->args.vc.pattern +
838 out->args.vc.pattern_n),
840 ctx->object = out->args.vc.actions;
848 if (!out->args.vc.actions) {
849 const struct parse_item_priv *priv = token->priv;
850 struct rte_flow_item *item =
851 out->args.vc.pattern + out->args.vc.pattern_n;
853 data_size = priv->size * 3; /* spec, last, mask */
854 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
855 (out->args.vc.data - data_size),
857 if ((uint8_t *)item + sizeof(*item) > data)
859 *item = (struct rte_flow_item){
862 ++out->args.vc.pattern_n;
866 const struct parse_action_priv *priv = token->priv;
867 struct rte_flow_action *action =
868 out->args.vc.actions + out->args.vc.actions_n;
870 data_size = priv->size; /* configuration */
871 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
872 (out->args.vc.data - data_size),
874 if ((uint8_t *)action + sizeof(*action) > data)
876 *action = (struct rte_flow_action){
879 ++out->args.vc.actions_n;
880 ctx->object = action;
883 memset(data, 0, data_size);
884 out->args.vc.data = data;
885 ctx->objdata = data_size;
889 /** Parse pattern item parameter type. */
891 parse_vc_spec(struct context *ctx, const struct token *token,
892 const char *str, unsigned int len,
893 void *buf, unsigned int size)
895 struct buffer *out = buf;
896 struct rte_flow_item *item;
902 /* Token name must match. */
903 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
905 /* Parse parameter types. */
911 case ITEM_PARAM_SPEC:
914 case ITEM_PARAM_LAST:
917 case ITEM_PARAM_PREFIX:
918 /* Modify next token to expect a prefix. */
919 if (ctx->next_num < 2)
921 ctx->next[ctx->next_num - 2] = NEXT_ENTRY(PREFIX);
923 case ITEM_PARAM_MASK:
929 /* Nothing else to do if there is no buffer. */
932 if (!out->args.vc.pattern_n)
934 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
935 data_size = ctx->objdata / 3; /* spec, last, mask */
936 /* Point to selected object. */
937 ctx->object = out->args.vc.data + (data_size * index);
939 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
940 item->mask = ctx->objmask;
943 /* Update relevant item pointer. */
944 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
949 /** Parse tokens for destroy command. */
951 parse_destroy(struct context *ctx, const struct token *token,
952 const char *str, unsigned int len,
953 void *buf, unsigned int size)
955 struct buffer *out = buf;
957 /* Token name must match. */
958 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
960 /* Nothing else to do if there is no buffer. */
964 if (ctx->curr != DESTROY)
966 if (sizeof(*out) > size)
968 out->command = ctx->curr;
972 out->args.destroy.rule =
973 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
977 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
978 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
981 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
986 /** Parse tokens for flush command. */
988 parse_flush(struct context *ctx, const struct token *token,
989 const char *str, unsigned int len,
990 void *buf, unsigned int size)
992 struct buffer *out = buf;
994 /* Token name must match. */
995 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
997 /* Nothing else to do if there is no buffer. */
1000 if (!out->command) {
1001 if (ctx->curr != FLUSH)
1003 if (sizeof(*out) > size)
1005 out->command = ctx->curr;
1008 ctx->objmask = NULL;
1013 /** Parse tokens for query command. */
1015 parse_query(struct context *ctx, const struct token *token,
1016 const char *str, unsigned int len,
1017 void *buf, unsigned int size)
1019 struct buffer *out = buf;
1021 /* Token name must match. */
1022 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1024 /* Nothing else to do if there is no buffer. */
1027 if (!out->command) {
1028 if (ctx->curr != QUERY)
1030 if (sizeof(*out) > size)
1032 out->command = ctx->curr;
1035 ctx->objmask = NULL;
1040 /** Parse action names. */
1042 parse_action(struct context *ctx, const struct token *token,
1043 const char *str, unsigned int len,
1044 void *buf, unsigned int size)
1046 struct buffer *out = buf;
1047 const struct arg *arg = pop_args(ctx);
1051 /* Argument is expected. */
1054 /* Parse action name. */
1055 for (i = 0; next_action[i]; ++i) {
1056 const struct parse_action_priv *priv;
1058 token = &token_list[next_action[i]];
1059 if (strncmp(token->name, str, len))
1065 memcpy((uint8_t *)ctx->object + arg->offset,
1071 push_args(ctx, arg);
1075 /** Parse tokens for list command. */
1077 parse_list(struct context *ctx, const struct token *token,
1078 const char *str, unsigned int len,
1079 void *buf, unsigned int size)
1081 struct buffer *out = buf;
1083 /* Token name must match. */
1084 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1086 /* Nothing else to do if there is no buffer. */
1089 if (!out->command) {
1090 if (ctx->curr != LIST)
1092 if (sizeof(*out) > size)
1094 out->command = ctx->curr;
1097 ctx->objmask = NULL;
1098 out->args.list.group =
1099 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1103 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
1104 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
1107 ctx->object = out->args.list.group + out->args.list.group_n++;
1108 ctx->objmask = NULL;
1113 * Parse signed/unsigned integers 8 to 64-bit long.
1115 * Last argument (ctx->args) is retrieved to determine integer type and
1119 parse_int(struct context *ctx, const struct token *token,
1120 const char *str, unsigned int len,
1121 void *buf, unsigned int size)
1123 const struct arg *arg = pop_args(ctx);
1128 /* Argument is expected. */
1133 (uintmax_t)strtoimax(str, &end, 0) :
1134 strtoumax(str, &end, 0);
1135 if (errno || (size_t)(end - str) != len)
1140 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
1141 !arg_entry_bf_fill(ctx->objmask, -1, arg))
1145 buf = (uint8_t *)ctx->object + arg->offset;
1149 case sizeof(uint8_t):
1150 *(uint8_t *)buf = u;
1152 case sizeof(uint16_t):
1153 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
1155 case sizeof(uint32_t):
1156 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
1158 case sizeof(uint64_t):
1159 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
1164 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
1166 buf = (uint8_t *)ctx->objmask + arg->offset;
1171 push_args(ctx, arg);
1175 /** Parse port and update context. */
1177 parse_port(struct context *ctx, const struct token *token,
1178 const char *str, unsigned int len,
1179 void *buf, unsigned int size)
1181 struct buffer *out = &(struct buffer){ .port = 0 };
1189 ctx->objmask = NULL;
1190 size = sizeof(*out);
1192 ret = parse_int(ctx, token, str, len, out, size);
1194 ctx->port = out->port;
1200 /** No completion. */
1202 comp_none(struct context *ctx, const struct token *token,
1203 unsigned int ent, char *buf, unsigned int size)
1213 /** Complete action names. */
1215 comp_action(struct context *ctx, const struct token *token,
1216 unsigned int ent, char *buf, unsigned int size)
1222 for (i = 0; next_action[i]; ++i)
1223 if (buf && i == ent)
1224 return snprintf(buf, size, "%s",
1225 token_list[next_action[i]].name);
1231 /** Complete available ports. */
1233 comp_port(struct context *ctx, const struct token *token,
1234 unsigned int ent, char *buf, unsigned int size)
1241 FOREACH_PORT(p, ports) {
1242 if (buf && i == ent)
1243 return snprintf(buf, size, "%u", p);
1251 /** Complete available rule IDs. */
1253 comp_rule_id(struct context *ctx, const struct token *token,
1254 unsigned int ent, char *buf, unsigned int size)
1257 struct rte_port *port;
1258 struct port_flow *pf;
1261 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
1262 ctx->port == (uint16_t)RTE_PORT_ALL)
1264 port = &ports[ctx->port];
1265 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
1266 if (buf && i == ent)
1267 return snprintf(buf, size, "%u", pf->id);
1275 /** Internal context. */
1276 static struct context cmd_flow_context;
1278 /** Global parser instance (cmdline API). */
1279 cmdline_parse_inst_t cmd_flow;
1281 /** Initialize context. */
1283 cmd_flow_context_init(struct context *ctx)
1285 /* A full memset() is not necessary. */
1296 ctx->objmask = NULL;
1299 /** Parse a token (cmdline API). */
1301 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
1304 struct context *ctx = &cmd_flow_context;
1305 const struct token *token;
1306 const enum index *list;
1311 /* Restart as requested. */
1313 cmd_flow_context_init(ctx);
1314 token = &token_list[ctx->curr];
1315 /* Check argument length. */
1318 for (len = 0; src[len]; ++len)
1319 if (src[len] == '#' || isspace(src[len]))
1323 /* Last argument and EOL detection. */
1324 for (i = len; src[i]; ++i)
1325 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
1327 else if (!isspace(src[i])) {
1332 if (src[i] == '\r' || src[i] == '\n') {
1336 /* Initialize context if necessary. */
1337 if (!ctx->next_num) {
1340 ctx->next[ctx->next_num++] = token->next[0];
1342 /* Process argument through candidates. */
1343 ctx->prev = ctx->curr;
1344 list = ctx->next[ctx->next_num - 1];
1345 for (i = 0; list[i]; ++i) {
1346 const struct token *next = &token_list[list[i]];
1349 ctx->curr = list[i];
1351 tmp = next->call(ctx, next, src, len, result, size);
1353 tmp = parse_default(ctx, next, src, len, result, size);
1354 if (tmp == -1 || tmp != len)
1362 /* Push subsequent tokens if any. */
1364 for (i = 0; token->next[i]; ++i) {
1365 if (ctx->next_num == RTE_DIM(ctx->next))
1367 ctx->next[ctx->next_num++] = token->next[i];
1369 /* Push arguments if any. */
1371 for (i = 0; token->args[i]; ++i) {
1372 if (ctx->args_num == RTE_DIM(ctx->args))
1374 ctx->args[ctx->args_num++] = token->args[i];
1379 /** Return number of completion entries (cmdline API). */
1381 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
1383 struct context *ctx = &cmd_flow_context;
1384 const struct token *token = &token_list[ctx->curr];
1385 const enum index *list;
1389 /* Tell cmd_flow_parse() that context must be reinitialized. */
1391 /* Count number of tokens in current list. */
1393 list = ctx->next[ctx->next_num - 1];
1395 list = token->next[0];
1396 for (i = 0; list[i]; ++i)
1401 * If there is a single token, use its completion callback, otherwise
1402 * return the number of entries.
1404 token = &token_list[list[0]];
1405 if (i == 1 && token->comp) {
1406 /* Save index for cmd_flow_get_help(). */
1407 ctx->prev = list[0];
1408 return token->comp(ctx, token, 0, NULL, 0);
1413 /** Return a completion entry (cmdline API). */
1415 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
1416 char *dst, unsigned int size)
1418 struct context *ctx = &cmd_flow_context;
1419 const struct token *token = &token_list[ctx->curr];
1420 const enum index *list;
1424 /* Tell cmd_flow_parse() that context must be reinitialized. */
1426 /* Count number of tokens in current list. */
1428 list = ctx->next[ctx->next_num - 1];
1430 list = token->next[0];
1431 for (i = 0; list[i]; ++i)
1435 /* If there is a single token, use its completion callback. */
1436 token = &token_list[list[0]];
1437 if (i == 1 && token->comp) {
1438 /* Save index for cmd_flow_get_help(). */
1439 ctx->prev = list[0];
1440 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
1442 /* Otherwise make sure the index is valid and use defaults. */
1445 token = &token_list[list[index]];
1446 snprintf(dst, size, "%s", token->name);
1447 /* Save index for cmd_flow_get_help(). */
1448 ctx->prev = list[index];
1452 /** Populate help strings for current token (cmdline API). */
1454 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
1456 struct context *ctx = &cmd_flow_context;
1457 const struct token *token = &token_list[ctx->prev];
1460 /* Tell cmd_flow_parse() that context must be reinitialized. */
1464 /* Set token type and update global help with details. */
1465 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
1467 cmd_flow.help_str = token->help;
1469 cmd_flow.help_str = token->name;
1473 /** Token definition template (cmdline API). */
1474 static struct cmdline_token_hdr cmd_flow_token_hdr = {
1475 .ops = &(struct cmdline_token_ops){
1476 .parse = cmd_flow_parse,
1477 .complete_get_nb = cmd_flow_complete_get_nb,
1478 .complete_get_elt = cmd_flow_complete_get_elt,
1479 .get_help = cmd_flow_get_help,
1484 /** Populate the next dynamic token. */
1486 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
1487 cmdline_parse_token_hdr_t *(*hdrs)[])
1489 struct context *ctx = &cmd_flow_context;
1491 /* Always reinitialize context before requesting the first token. */
1493 cmd_flow_context_init(ctx);
1494 /* Return NULL when no more tokens are expected. */
1495 if (!ctx->next_num && ctx->curr) {
1499 /* Determine if command should end here. */
1500 if (ctx->eol && ctx->last && ctx->next_num) {
1501 const enum index *list = ctx->next[ctx->next_num - 1];
1504 for (i = 0; list[i]; ++i) {
1511 *hdr = &cmd_flow_token_hdr;
1514 /** Dispatch parsed buffer to function calls. */
1516 cmd_flow_parsed(const struct buffer *in)
1518 switch (in->command) {
1520 port_flow_validate(in->port, &in->args.vc.attr,
1521 in->args.vc.pattern, in->args.vc.actions);
1524 port_flow_create(in->port, &in->args.vc.attr,
1525 in->args.vc.pattern, in->args.vc.actions);
1528 port_flow_destroy(in->port, in->args.destroy.rule_n,
1529 in->args.destroy.rule);
1532 port_flow_flush(in->port);
1535 port_flow_query(in->port, in->args.query.rule,
1536 in->args.query.action);
1539 port_flow_list(in->port, in->args.list.group_n,
1540 in->args.list.group);
1547 /** Token generator and output processing callback (cmdline API). */
1549 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
1552 cmd_flow_tok(arg0, arg2);
1554 cmd_flow_parsed(arg0);
1557 /** Global parser instance (cmdline API). */
1558 cmdline_parse_inst_t cmd_flow = {
1560 .data = NULL, /**< Unused. */
1561 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
1564 }, /**< Tokens are returned by cmd_flow_tok(). */