4 * Copyright 2016 6WIND S.A.
5 * Copyright 2016 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <rte_common.h>
43 #include <rte_ethdev.h>
44 #include <rte_byteorder.h>
45 #include <cmdline_parse.h>
50 /** Parser token indices. */
65 /* Top-level command. */
68 /* Sub-level commands. */
76 /* Destroy arguments. */
79 /* Query arguments. */
85 /* Validate/create arguments. */
91 /* Validate/create pattern. */
110 /* Validate/create actions. */
118 /** Maximum number of subsequent tokens and arguments on the stack. */
119 #define CTX_STACK_SIZE 16
121 /** Parser context. */
123 /** Stack of subsequent token lists to process. */
124 const enum index *next[CTX_STACK_SIZE];
125 /** Arguments for stacked tokens. */
126 const void *args[CTX_STACK_SIZE];
127 enum index curr; /**< Current token index. */
128 enum index prev; /**< Index of the last token seen. */
129 int next_num; /**< Number of entries in next[]. */
130 int args_num; /**< Number of entries in args[]. */
131 uint32_t reparse:1; /**< Start over from the beginning. */
132 uint32_t eol:1; /**< EOL has been detected. */
133 uint32_t last:1; /**< No more arguments. */
134 uint16_t port; /**< Current port ID (for completions). */
135 uint32_t objdata; /**< Object-specific data. */
136 void *object; /**< Address of current object for relative offsets. */
137 void *objmask; /**< Object a full mask must be written to. */
140 /** Token argument. */
142 uint32_t hton:1; /**< Use network byte ordering. */
143 uint32_t sign:1; /**< Value is signed. */
144 uint32_t offset; /**< Relative offset from ctx->object. */
145 uint32_t size; /**< Field size. */
146 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
149 /** Parser token definition. */
151 /** Type displayed during completion (defaults to "TOKEN"). */
153 /** Help displayed during completion (defaults to token name). */
155 /** Private data used by parser functions. */
158 * Lists of subsequent tokens to push on the stack. Each call to the
159 * parser consumes the last entry of that stack.
161 const enum index *const *next;
162 /** Arguments stack for subsequent tokens that need them. */
163 const struct arg *const *args;
165 * Token-processing callback, returns -1 in case of error, the
166 * length of the matched string otherwise. If NULL, attempts to
167 * match the token name.
169 * If buf is not NULL, the result should be stored in it according
170 * to context. An error is returned if not large enough.
172 int (*call)(struct context *ctx, const struct token *token,
173 const char *str, unsigned int len,
174 void *buf, unsigned int size);
176 * Callback that provides possible values for this token, used for
177 * completion. Returns -1 in case of error, the number of possible
178 * values otherwise. If NULL, the token name is used.
180 * If buf is not NULL, entry index ent is written to buf and the
181 * full length of the entry is returned (same behavior as
184 int (*comp)(struct context *ctx, const struct token *token,
185 unsigned int ent, char *buf, unsigned int size);
186 /** Mandatory token name, no default value. */
190 /** Static initializer for the next field. */
191 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
193 /** Static initializer for a NEXT() entry. */
194 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
196 /** Static initializer for the args field. */
197 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
199 /** Static initializer for ARGS() to target a field. */
200 #define ARGS_ENTRY(s, f) \
201 (&(const struct arg){ \
202 .offset = offsetof(s, f), \
203 .size = sizeof(((s *)0)->f), \
206 /** Static initializer for ARGS() to target a bit-field. */
207 #define ARGS_ENTRY_BF(s, f, b) \
208 (&(const struct arg){ \
210 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
213 /** Static initializer for ARGS() to target a pointer. */
214 #define ARGS_ENTRY_PTR(s, f) \
215 (&(const struct arg){ \
216 .size = sizeof(*((s *)0)->f), \
219 /** Parser output buffer layout expected by cmd_flow_parsed(). */
221 enum index command; /**< Flow command. */
222 uint16_t port; /**< Affected port ID. */
225 struct rte_flow_attr attr;
226 struct rte_flow_item *pattern;
227 struct rte_flow_action *actions;
231 } vc; /**< Validate/create arguments. */
235 } destroy; /**< Destroy arguments. */
238 enum rte_flow_action_type action;
239 } query; /**< Query arguments. */
243 } list; /**< List arguments. */
244 } args; /**< Command arguments. */
247 /** Private data for pattern items. */
248 struct parse_item_priv {
249 enum rte_flow_item_type type; /**< Item type. */
250 uint32_t size; /**< Size of item specification structure. */
253 #define PRIV_ITEM(t, s) \
254 (&(const struct parse_item_priv){ \
255 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
259 /** Private data for actions. */
260 struct parse_action_priv {
261 enum rte_flow_action_type type; /**< Action type. */
262 uint32_t size; /**< Size of action configuration structure. */
265 #define PRIV_ACTION(t, s) \
266 (&(const struct parse_action_priv){ \
267 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
271 static const enum index next_vc_attr[] = {
280 static const enum index next_destroy_attr[] = {
286 static const enum index next_list_attr[] = {
292 static const enum index item_param[] = {
301 static const enum index next_item[] = {
312 static const enum index item_any[] = {
318 static const enum index item_vf[] = {
324 static const enum index item_port[] = {
330 static const enum index next_action[] = {
337 static int parse_init(struct context *, const struct token *,
338 const char *, unsigned int,
339 void *, unsigned int);
340 static int parse_vc(struct context *, const struct token *,
341 const char *, unsigned int,
342 void *, unsigned int);
343 static int parse_vc_spec(struct context *, const struct token *,
344 const char *, unsigned int, void *, unsigned int);
345 static int parse_destroy(struct context *, const struct token *,
346 const char *, unsigned int,
347 void *, unsigned int);
348 static int parse_flush(struct context *, const struct token *,
349 const char *, unsigned int,
350 void *, unsigned int);
351 static int parse_query(struct context *, const struct token *,
352 const char *, unsigned int,
353 void *, unsigned int);
354 static int parse_action(struct context *, const struct token *,
355 const char *, unsigned int,
356 void *, unsigned int);
357 static int parse_list(struct context *, const struct token *,
358 const char *, unsigned int,
359 void *, unsigned int);
360 static int parse_int(struct context *, const struct token *,
361 const char *, unsigned int,
362 void *, unsigned int);
363 static int parse_prefix(struct context *, const struct token *,
364 const char *, unsigned int,
365 void *, unsigned int);
366 static int parse_port(struct context *, const struct token *,
367 const char *, unsigned int,
368 void *, unsigned int);
369 static int comp_none(struct context *, const struct token *,
370 unsigned int, char *, unsigned int);
371 static int comp_action(struct context *, const struct token *,
372 unsigned int, char *, unsigned int);
373 static int comp_port(struct context *, const struct token *,
374 unsigned int, char *, unsigned int);
375 static int comp_rule_id(struct context *, const struct token *,
376 unsigned int, char *, unsigned int);
378 /** Token definitions. */
379 static const struct token token_list[] = {
380 /* Special tokens. */
383 .help = "null entry, abused as the entry point",
384 .next = NEXT(NEXT_ENTRY(FLOW)),
389 .help = "command may end here",
395 .help = "integer value",
400 .name = "{unsigned}",
402 .help = "unsigned integer value",
409 .help = "prefix length for bit-mask",
410 .call = parse_prefix,
416 .help = "rule identifier",
418 .comp = comp_rule_id,
423 .help = "port identifier",
428 .name = "{group_id}",
430 .help = "group identifier",
437 .help = "priority level",
441 /* Top-level command. */
444 .type = "{command} {port_id} [{arg} [...]]",
445 .help = "manage ingress/egress flow rules",
446 .next = NEXT(NEXT_ENTRY
455 /* Sub-level commands. */
458 .help = "check whether a flow rule can be created",
459 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
460 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
465 .help = "create a flow rule",
466 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
467 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
472 .help = "destroy specific flow rules",
473 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
474 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
475 .call = parse_destroy,
479 .help = "destroy all flow rules",
480 .next = NEXT(NEXT_ENTRY(PORT_ID)),
481 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
486 .help = "query an existing flow rule",
487 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
489 NEXT_ENTRY(PORT_ID)),
490 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action),
491 ARGS_ENTRY(struct buffer, args.query.rule),
492 ARGS_ENTRY(struct buffer, port)),
497 .help = "list existing flow rules",
498 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
499 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
502 /* Destroy arguments. */
505 .help = "specify a rule identifier",
506 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
507 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
508 .call = parse_destroy,
510 /* Query arguments. */
514 .help = "action to query, must be part of the rule",
515 .call = parse_action,
518 /* List arguments. */
521 .help = "specify a group",
522 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
523 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
526 /* Validate/create attributes. */
529 .help = "specify a group",
530 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
531 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
536 .help = "specify a priority level",
537 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
538 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
543 .help = "affect rule to ingress",
544 .next = NEXT(next_vc_attr),
549 .help = "affect rule to egress",
550 .next = NEXT(next_vc_attr),
553 /* Validate/create pattern. */
556 .help = "submit a list of pattern items",
557 .next = NEXT(next_item),
562 .help = "match value perfectly (with full bit-mask)",
563 .call = parse_vc_spec,
565 [ITEM_PARAM_SPEC] = {
567 .help = "match value according to configured bit-mask",
568 .call = parse_vc_spec,
570 [ITEM_PARAM_LAST] = {
572 .help = "specify upper bound to establish a range",
573 .call = parse_vc_spec,
575 [ITEM_PARAM_MASK] = {
577 .help = "specify bit-mask with relevant bits set to one",
578 .call = parse_vc_spec,
580 [ITEM_PARAM_PREFIX] = {
582 .help = "generate bit-mask from a prefix length",
583 .call = parse_vc_spec,
587 .help = "specify next pattern item",
588 .next = NEXT(next_item),
592 .help = "end list of pattern items",
593 .priv = PRIV_ITEM(END, 0),
594 .next = NEXT(NEXT_ENTRY(ACTIONS)),
599 .help = "no-op pattern item",
600 .priv = PRIV_ITEM(VOID, 0),
601 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
606 .help = "perform actions when pattern does not match",
607 .priv = PRIV_ITEM(INVERT, 0),
608 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
613 .help = "match any protocol for the current layer",
614 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
615 .next = NEXT(item_any),
620 .help = "number of layers covered",
621 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
622 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
626 .help = "match packets addressed to the physical function",
627 .priv = PRIV_ITEM(PF, 0),
628 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
633 .help = "match packets addressed to a virtual function ID",
634 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
635 .next = NEXT(item_vf),
640 .help = "destination VF ID",
641 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
642 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
646 .help = "device-specific physical port index to use",
647 .priv = PRIV_ITEM(PORT, sizeof(struct rte_flow_item_port)),
648 .next = NEXT(item_port),
651 [ITEM_PORT_INDEX] = {
653 .help = "physical port index",
654 .next = NEXT(item_port, NEXT_ENTRY(UNSIGNED), item_param),
655 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port, index)),
657 /* Validate/create actions. */
660 .help = "submit a list of associated actions",
661 .next = NEXT(next_action),
666 .help = "specify next action",
667 .next = NEXT(next_action),
671 .help = "end list of actions",
672 .priv = PRIV_ACTION(END, 0),
677 .help = "no-op action",
678 .priv = PRIV_ACTION(VOID, 0),
679 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
682 [ACTION_PASSTHRU] = {
684 .help = "let subsequent rule process matched packets",
685 .priv = PRIV_ACTION(PASSTHRU, 0),
686 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
691 /** Remove and return last entry from argument stack. */
692 static const struct arg *
693 pop_args(struct context *ctx)
695 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
698 /** Add entry on top of the argument stack. */
700 push_args(struct context *ctx, const struct arg *arg)
702 if (ctx->args_num == CTX_STACK_SIZE)
704 ctx->args[ctx->args_num++] = arg;
708 /** Spread value into buffer according to bit-mask. */
710 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
712 uint32_t i = arg->size;
720 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
729 unsigned int shift = 0;
730 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
732 for (shift = 0; arg->mask[i] >> shift; ++shift) {
733 if (!(arg->mask[i] & (1 << shift)))
738 *buf &= ~(1 << shift);
739 *buf |= (val & 1) << shift;
748 * Parse a prefix length and generate a bit-mask.
750 * Last argument (ctx->args) is retrieved to determine mask size, storage
751 * location and whether the result must use network byte ordering.
754 parse_prefix(struct context *ctx, const struct token *token,
755 const char *str, unsigned int len,
756 void *buf, unsigned int size)
758 const struct arg *arg = pop_args(ctx);
759 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
766 /* Argument is expected. */
770 u = strtoumax(str, &end, 0);
771 if (errno || (size_t)(end - str) != len)
776 extra = arg_entry_bf_fill(NULL, 0, arg);
785 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
786 !arg_entry_bf_fill(ctx->objmask, -1, arg))
793 if (bytes > size || bytes + !!extra > size)
797 buf = (uint8_t *)ctx->object + arg->offset;
798 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
800 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
801 memset(buf, 0x00, size - bytes);
803 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
807 memset(buf, 0xff, bytes);
808 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
810 ((uint8_t *)buf)[bytes] = conv[extra];
813 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
820 /** Default parsing function for token name matching. */
822 parse_default(struct context *ctx, const struct token *token,
823 const char *str, unsigned int len,
824 void *buf, unsigned int size)
829 if (strncmp(str, token->name, len))
834 /** Parse flow command, initialize output buffer for subsequent tokens. */
836 parse_init(struct context *ctx, const struct token *token,
837 const char *str, unsigned int len,
838 void *buf, unsigned int size)
840 struct buffer *out = buf;
842 /* Token name must match. */
843 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
845 /* Nothing else to do if there is no buffer. */
848 /* Make sure buffer is large enough. */
849 if (size < sizeof(*out))
851 /* Initialize buffer. */
852 memset(out, 0x00, sizeof(*out));
853 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
860 /** Parse tokens for validate/create commands. */
862 parse_vc(struct context *ctx, const struct token *token,
863 const char *str, unsigned int len,
864 void *buf, unsigned int size)
866 struct buffer *out = buf;
870 /* Token name must match. */
871 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
873 /* Nothing else to do if there is no buffer. */
877 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
879 if (sizeof(*out) > size)
881 out->command = ctx->curr;
885 out->args.vc.data = (uint8_t *)out + size;
889 ctx->object = &out->args.vc.attr;
896 out->args.vc.attr.ingress = 1;
899 out->args.vc.attr.egress = 1;
902 out->args.vc.pattern =
903 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
905 ctx->object = out->args.vc.pattern;
909 out->args.vc.actions =
910 (void *)RTE_ALIGN_CEIL((uintptr_t)
911 (out->args.vc.pattern +
912 out->args.vc.pattern_n),
914 ctx->object = out->args.vc.actions;
922 if (!out->args.vc.actions) {
923 const struct parse_item_priv *priv = token->priv;
924 struct rte_flow_item *item =
925 out->args.vc.pattern + out->args.vc.pattern_n;
927 data_size = priv->size * 3; /* spec, last, mask */
928 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
929 (out->args.vc.data - data_size),
931 if ((uint8_t *)item + sizeof(*item) > data)
933 *item = (struct rte_flow_item){
936 ++out->args.vc.pattern_n;
940 const struct parse_action_priv *priv = token->priv;
941 struct rte_flow_action *action =
942 out->args.vc.actions + out->args.vc.actions_n;
944 data_size = priv->size; /* configuration */
945 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
946 (out->args.vc.data - data_size),
948 if ((uint8_t *)action + sizeof(*action) > data)
950 *action = (struct rte_flow_action){
953 ++out->args.vc.actions_n;
954 ctx->object = action;
957 memset(data, 0, data_size);
958 out->args.vc.data = data;
959 ctx->objdata = data_size;
963 /** Parse pattern item parameter type. */
965 parse_vc_spec(struct context *ctx, const struct token *token,
966 const char *str, unsigned int len,
967 void *buf, unsigned int size)
969 struct buffer *out = buf;
970 struct rte_flow_item *item;
976 /* Token name must match. */
977 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
979 /* Parse parameter types. */
985 case ITEM_PARAM_SPEC:
988 case ITEM_PARAM_LAST:
991 case ITEM_PARAM_PREFIX:
992 /* Modify next token to expect a prefix. */
993 if (ctx->next_num < 2)
995 ctx->next[ctx->next_num - 2] = NEXT_ENTRY(PREFIX);
997 case ITEM_PARAM_MASK:
1003 /* Nothing else to do if there is no buffer. */
1006 if (!out->args.vc.pattern_n)
1008 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
1009 data_size = ctx->objdata / 3; /* spec, last, mask */
1010 /* Point to selected object. */
1011 ctx->object = out->args.vc.data + (data_size * index);
1013 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
1014 item->mask = ctx->objmask;
1016 ctx->objmask = NULL;
1017 /* Update relevant item pointer. */
1018 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
1023 /** Parse tokens for destroy command. */
1025 parse_destroy(struct context *ctx, const struct token *token,
1026 const char *str, unsigned int len,
1027 void *buf, unsigned int size)
1029 struct buffer *out = buf;
1031 /* Token name must match. */
1032 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1034 /* Nothing else to do if there is no buffer. */
1037 if (!out->command) {
1038 if (ctx->curr != DESTROY)
1040 if (sizeof(*out) > size)
1042 out->command = ctx->curr;
1045 ctx->objmask = NULL;
1046 out->args.destroy.rule =
1047 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1051 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
1052 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
1055 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
1056 ctx->objmask = NULL;
1060 /** Parse tokens for flush command. */
1062 parse_flush(struct context *ctx, const struct token *token,
1063 const char *str, unsigned int len,
1064 void *buf, unsigned int size)
1066 struct buffer *out = buf;
1068 /* Token name must match. */
1069 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1071 /* Nothing else to do if there is no buffer. */
1074 if (!out->command) {
1075 if (ctx->curr != FLUSH)
1077 if (sizeof(*out) > size)
1079 out->command = ctx->curr;
1082 ctx->objmask = NULL;
1087 /** Parse tokens for query command. */
1089 parse_query(struct context *ctx, const struct token *token,
1090 const char *str, unsigned int len,
1091 void *buf, unsigned int size)
1093 struct buffer *out = buf;
1095 /* Token name must match. */
1096 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1098 /* Nothing else to do if there is no buffer. */
1101 if (!out->command) {
1102 if (ctx->curr != QUERY)
1104 if (sizeof(*out) > size)
1106 out->command = ctx->curr;
1109 ctx->objmask = NULL;
1114 /** Parse action names. */
1116 parse_action(struct context *ctx, const struct token *token,
1117 const char *str, unsigned int len,
1118 void *buf, unsigned int size)
1120 struct buffer *out = buf;
1121 const struct arg *arg = pop_args(ctx);
1125 /* Argument is expected. */
1128 /* Parse action name. */
1129 for (i = 0; next_action[i]; ++i) {
1130 const struct parse_action_priv *priv;
1132 token = &token_list[next_action[i]];
1133 if (strncmp(token->name, str, len))
1139 memcpy((uint8_t *)ctx->object + arg->offset,
1145 push_args(ctx, arg);
1149 /** Parse tokens for list command. */
1151 parse_list(struct context *ctx, const struct token *token,
1152 const char *str, unsigned int len,
1153 void *buf, unsigned int size)
1155 struct buffer *out = buf;
1157 /* Token name must match. */
1158 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1160 /* Nothing else to do if there is no buffer. */
1163 if (!out->command) {
1164 if (ctx->curr != LIST)
1166 if (sizeof(*out) > size)
1168 out->command = ctx->curr;
1171 ctx->objmask = NULL;
1172 out->args.list.group =
1173 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1177 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
1178 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
1181 ctx->object = out->args.list.group + out->args.list.group_n++;
1182 ctx->objmask = NULL;
1187 * Parse signed/unsigned integers 8 to 64-bit long.
1189 * Last argument (ctx->args) is retrieved to determine integer type and
1193 parse_int(struct context *ctx, const struct token *token,
1194 const char *str, unsigned int len,
1195 void *buf, unsigned int size)
1197 const struct arg *arg = pop_args(ctx);
1202 /* Argument is expected. */
1207 (uintmax_t)strtoimax(str, &end, 0) :
1208 strtoumax(str, &end, 0);
1209 if (errno || (size_t)(end - str) != len)
1214 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
1215 !arg_entry_bf_fill(ctx->objmask, -1, arg))
1219 buf = (uint8_t *)ctx->object + arg->offset;
1223 case sizeof(uint8_t):
1224 *(uint8_t *)buf = u;
1226 case sizeof(uint16_t):
1227 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
1229 case sizeof(uint32_t):
1230 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
1232 case sizeof(uint64_t):
1233 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
1238 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
1240 buf = (uint8_t *)ctx->objmask + arg->offset;
1245 push_args(ctx, arg);
1249 /** Parse port and update context. */
1251 parse_port(struct context *ctx, const struct token *token,
1252 const char *str, unsigned int len,
1253 void *buf, unsigned int size)
1255 struct buffer *out = &(struct buffer){ .port = 0 };
1263 ctx->objmask = NULL;
1264 size = sizeof(*out);
1266 ret = parse_int(ctx, token, str, len, out, size);
1268 ctx->port = out->port;
1274 /** No completion. */
1276 comp_none(struct context *ctx, const struct token *token,
1277 unsigned int ent, char *buf, unsigned int size)
1287 /** Complete action names. */
1289 comp_action(struct context *ctx, const struct token *token,
1290 unsigned int ent, char *buf, unsigned int size)
1296 for (i = 0; next_action[i]; ++i)
1297 if (buf && i == ent)
1298 return snprintf(buf, size, "%s",
1299 token_list[next_action[i]].name);
1305 /** Complete available ports. */
1307 comp_port(struct context *ctx, const struct token *token,
1308 unsigned int ent, char *buf, unsigned int size)
1315 FOREACH_PORT(p, ports) {
1316 if (buf && i == ent)
1317 return snprintf(buf, size, "%u", p);
1325 /** Complete available rule IDs. */
1327 comp_rule_id(struct context *ctx, const struct token *token,
1328 unsigned int ent, char *buf, unsigned int size)
1331 struct rte_port *port;
1332 struct port_flow *pf;
1335 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
1336 ctx->port == (uint16_t)RTE_PORT_ALL)
1338 port = &ports[ctx->port];
1339 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
1340 if (buf && i == ent)
1341 return snprintf(buf, size, "%u", pf->id);
1349 /** Internal context. */
1350 static struct context cmd_flow_context;
1352 /** Global parser instance (cmdline API). */
1353 cmdline_parse_inst_t cmd_flow;
1355 /** Initialize context. */
1357 cmd_flow_context_init(struct context *ctx)
1359 /* A full memset() is not necessary. */
1370 ctx->objmask = NULL;
1373 /** Parse a token (cmdline API). */
1375 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
1378 struct context *ctx = &cmd_flow_context;
1379 const struct token *token;
1380 const enum index *list;
1385 /* Restart as requested. */
1387 cmd_flow_context_init(ctx);
1388 token = &token_list[ctx->curr];
1389 /* Check argument length. */
1392 for (len = 0; src[len]; ++len)
1393 if (src[len] == '#' || isspace(src[len]))
1397 /* Last argument and EOL detection. */
1398 for (i = len; src[i]; ++i)
1399 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
1401 else if (!isspace(src[i])) {
1406 if (src[i] == '\r' || src[i] == '\n') {
1410 /* Initialize context if necessary. */
1411 if (!ctx->next_num) {
1414 ctx->next[ctx->next_num++] = token->next[0];
1416 /* Process argument through candidates. */
1417 ctx->prev = ctx->curr;
1418 list = ctx->next[ctx->next_num - 1];
1419 for (i = 0; list[i]; ++i) {
1420 const struct token *next = &token_list[list[i]];
1423 ctx->curr = list[i];
1425 tmp = next->call(ctx, next, src, len, result, size);
1427 tmp = parse_default(ctx, next, src, len, result, size);
1428 if (tmp == -1 || tmp != len)
1436 /* Push subsequent tokens if any. */
1438 for (i = 0; token->next[i]; ++i) {
1439 if (ctx->next_num == RTE_DIM(ctx->next))
1441 ctx->next[ctx->next_num++] = token->next[i];
1443 /* Push arguments if any. */
1445 for (i = 0; token->args[i]; ++i) {
1446 if (ctx->args_num == RTE_DIM(ctx->args))
1448 ctx->args[ctx->args_num++] = token->args[i];
1453 /** Return number of completion entries (cmdline API). */
1455 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
1457 struct context *ctx = &cmd_flow_context;
1458 const struct token *token = &token_list[ctx->curr];
1459 const enum index *list;
1463 /* Tell cmd_flow_parse() that context must be reinitialized. */
1465 /* Count number of tokens in current list. */
1467 list = ctx->next[ctx->next_num - 1];
1469 list = token->next[0];
1470 for (i = 0; list[i]; ++i)
1475 * If there is a single token, use its completion callback, otherwise
1476 * return the number of entries.
1478 token = &token_list[list[0]];
1479 if (i == 1 && token->comp) {
1480 /* Save index for cmd_flow_get_help(). */
1481 ctx->prev = list[0];
1482 return token->comp(ctx, token, 0, NULL, 0);
1487 /** Return a completion entry (cmdline API). */
1489 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
1490 char *dst, unsigned int size)
1492 struct context *ctx = &cmd_flow_context;
1493 const struct token *token = &token_list[ctx->curr];
1494 const enum index *list;
1498 /* Tell cmd_flow_parse() that context must be reinitialized. */
1500 /* Count number of tokens in current list. */
1502 list = ctx->next[ctx->next_num - 1];
1504 list = token->next[0];
1505 for (i = 0; list[i]; ++i)
1509 /* If there is a single token, use its completion callback. */
1510 token = &token_list[list[0]];
1511 if (i == 1 && token->comp) {
1512 /* Save index for cmd_flow_get_help(). */
1513 ctx->prev = list[0];
1514 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
1516 /* Otherwise make sure the index is valid and use defaults. */
1519 token = &token_list[list[index]];
1520 snprintf(dst, size, "%s", token->name);
1521 /* Save index for cmd_flow_get_help(). */
1522 ctx->prev = list[index];
1526 /** Populate help strings for current token (cmdline API). */
1528 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
1530 struct context *ctx = &cmd_flow_context;
1531 const struct token *token = &token_list[ctx->prev];
1534 /* Tell cmd_flow_parse() that context must be reinitialized. */
1538 /* Set token type and update global help with details. */
1539 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
1541 cmd_flow.help_str = token->help;
1543 cmd_flow.help_str = token->name;
1547 /** Token definition template (cmdline API). */
1548 static struct cmdline_token_hdr cmd_flow_token_hdr = {
1549 .ops = &(struct cmdline_token_ops){
1550 .parse = cmd_flow_parse,
1551 .complete_get_nb = cmd_flow_complete_get_nb,
1552 .complete_get_elt = cmd_flow_complete_get_elt,
1553 .get_help = cmd_flow_get_help,
1558 /** Populate the next dynamic token. */
1560 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
1561 cmdline_parse_token_hdr_t *(*hdrs)[])
1563 struct context *ctx = &cmd_flow_context;
1565 /* Always reinitialize context before requesting the first token. */
1567 cmd_flow_context_init(ctx);
1568 /* Return NULL when no more tokens are expected. */
1569 if (!ctx->next_num && ctx->curr) {
1573 /* Determine if command should end here. */
1574 if (ctx->eol && ctx->last && ctx->next_num) {
1575 const enum index *list = ctx->next[ctx->next_num - 1];
1578 for (i = 0; list[i]; ++i) {
1585 *hdr = &cmd_flow_token_hdr;
1588 /** Dispatch parsed buffer to function calls. */
1590 cmd_flow_parsed(const struct buffer *in)
1592 switch (in->command) {
1594 port_flow_validate(in->port, &in->args.vc.attr,
1595 in->args.vc.pattern, in->args.vc.actions);
1598 port_flow_create(in->port, &in->args.vc.attr,
1599 in->args.vc.pattern, in->args.vc.actions);
1602 port_flow_destroy(in->port, in->args.destroy.rule_n,
1603 in->args.destroy.rule);
1606 port_flow_flush(in->port);
1609 port_flow_query(in->port, in->args.query.rule,
1610 in->args.query.action);
1613 port_flow_list(in->port, in->args.list.group_n,
1614 in->args.list.group);
1621 /** Token generator and output processing callback (cmdline API). */
1623 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
1626 cmd_flow_tok(arg0, arg2);
1628 cmd_flow_parsed(arg0);
1631 /** Global parser instance (cmdline API). */
1632 cmdline_parse_inst_t cmd_flow = {
1634 .data = NULL, /**< Unused. */
1635 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
1638 }, /**< Tokens are returned by cmd_flow_tok(). */