4 * Copyright 2016 6WIND S.A.
5 * Copyright 2016 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <arpa/inet.h>
43 #include <rte_common.h>
44 #include <rte_ethdev.h>
45 #include <rte_byteorder.h>
46 #include <cmdline_parse.h>
47 #include <cmdline_parse_etheraddr.h>
52 /** Parser token indices. */
72 /* Top-level command. */
75 /* Sub-level commands. */
83 /* Destroy arguments. */
86 /* Query arguments. */
92 /* Validate/create arguments. */
98 /* Validate/create pattern. */
150 /* Validate/create actions. */
158 /** Size of pattern[] field in struct rte_flow_item_raw. */
159 #define ITEM_RAW_PATTERN_SIZE 36
161 /** Storage size for struct rte_flow_item_raw including pattern. */
162 #define ITEM_RAW_SIZE \
163 (offsetof(struct rte_flow_item_raw, pattern) + ITEM_RAW_PATTERN_SIZE)
165 /** Maximum number of subsequent tokens and arguments on the stack. */
166 #define CTX_STACK_SIZE 16
168 /** Parser context. */
170 /** Stack of subsequent token lists to process. */
171 const enum index *next[CTX_STACK_SIZE];
172 /** Arguments for stacked tokens. */
173 const void *args[CTX_STACK_SIZE];
174 enum index curr; /**< Current token index. */
175 enum index prev; /**< Index of the last token seen. */
176 int next_num; /**< Number of entries in next[]. */
177 int args_num; /**< Number of entries in args[]. */
178 uint32_t reparse:1; /**< Start over from the beginning. */
179 uint32_t eol:1; /**< EOL has been detected. */
180 uint32_t last:1; /**< No more arguments. */
181 uint16_t port; /**< Current port ID (for completions). */
182 uint32_t objdata; /**< Object-specific data. */
183 void *object; /**< Address of current object for relative offsets. */
184 void *objmask; /**< Object a full mask must be written to. */
187 /** Token argument. */
189 uint32_t hton:1; /**< Use network byte ordering. */
190 uint32_t sign:1; /**< Value is signed. */
191 uint32_t offset; /**< Relative offset from ctx->object. */
192 uint32_t size; /**< Field size. */
193 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
196 /** Parser token definition. */
198 /** Type displayed during completion (defaults to "TOKEN"). */
200 /** Help displayed during completion (defaults to token name). */
202 /** Private data used by parser functions. */
205 * Lists of subsequent tokens to push on the stack. Each call to the
206 * parser consumes the last entry of that stack.
208 const enum index *const *next;
209 /** Arguments stack for subsequent tokens that need them. */
210 const struct arg *const *args;
212 * Token-processing callback, returns -1 in case of error, the
213 * length of the matched string otherwise. If NULL, attempts to
214 * match the token name.
216 * If buf is not NULL, the result should be stored in it according
217 * to context. An error is returned if not large enough.
219 int (*call)(struct context *ctx, const struct token *token,
220 const char *str, unsigned int len,
221 void *buf, unsigned int size);
223 * Callback that provides possible values for this token, used for
224 * completion. Returns -1 in case of error, the number of possible
225 * values otherwise. If NULL, the token name is used.
227 * If buf is not NULL, entry index ent is written to buf and the
228 * full length of the entry is returned (same behavior as
231 int (*comp)(struct context *ctx, const struct token *token,
232 unsigned int ent, char *buf, unsigned int size);
233 /** Mandatory token name, no default value. */
237 /** Static initializer for the next field. */
238 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
240 /** Static initializer for a NEXT() entry. */
241 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
243 /** Static initializer for the args field. */
244 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
246 /** Static initializer for ARGS() to target a field. */
247 #define ARGS_ENTRY(s, f) \
248 (&(const struct arg){ \
249 .offset = offsetof(s, f), \
250 .size = sizeof(((s *)0)->f), \
253 /** Static initializer for ARGS() to target a bit-field. */
254 #define ARGS_ENTRY_BF(s, f, b) \
255 (&(const struct arg){ \
257 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
260 /** Static initializer for ARGS() to target a pointer. */
261 #define ARGS_ENTRY_PTR(s, f) \
262 (&(const struct arg){ \
263 .size = sizeof(*((s *)0)->f), \
266 /** Static initializer for ARGS() with arbitrary size. */
267 #define ARGS_ENTRY_USZ(s, f, sz) \
268 (&(const struct arg){ \
269 .offset = offsetof(s, f), \
273 /** Same as ARGS_ENTRY() using network byte ordering. */
274 #define ARGS_ENTRY_HTON(s, f) \
275 (&(const struct arg){ \
277 .offset = offsetof(s, f), \
278 .size = sizeof(((s *)0)->f), \
281 /** Parser output buffer layout expected by cmd_flow_parsed(). */
283 enum index command; /**< Flow command. */
284 uint16_t port; /**< Affected port ID. */
287 struct rte_flow_attr attr;
288 struct rte_flow_item *pattern;
289 struct rte_flow_action *actions;
293 } vc; /**< Validate/create arguments. */
297 } destroy; /**< Destroy arguments. */
300 enum rte_flow_action_type action;
301 } query; /**< Query arguments. */
305 } list; /**< List arguments. */
306 } args; /**< Command arguments. */
309 /** Private data for pattern items. */
310 struct parse_item_priv {
311 enum rte_flow_item_type type; /**< Item type. */
312 uint32_t size; /**< Size of item specification structure. */
315 #define PRIV_ITEM(t, s) \
316 (&(const struct parse_item_priv){ \
317 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
321 /** Private data for actions. */
322 struct parse_action_priv {
323 enum rte_flow_action_type type; /**< Action type. */
324 uint32_t size; /**< Size of action configuration structure. */
327 #define PRIV_ACTION(t, s) \
328 (&(const struct parse_action_priv){ \
329 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
333 static const enum index next_vc_attr[] = {
342 static const enum index next_destroy_attr[] = {
348 static const enum index next_list_attr[] = {
354 static const enum index item_param[] = {
363 static const enum index next_item[] = {
384 static const enum index item_any[] = {
390 static const enum index item_vf[] = {
396 static const enum index item_port[] = {
402 static const enum index item_raw[] = {
412 static const enum index item_eth[] = {
420 static const enum index item_vlan[] = {
427 static const enum index item_ipv4[] = {
434 static const enum index item_ipv6[] = {
441 static const enum index item_icmp[] = {
448 static const enum index item_udp[] = {
455 static const enum index item_tcp[] = {
462 static const enum index item_sctp[] = {
469 static const enum index item_vxlan[] = {
475 static const enum index next_action[] = {
482 static int parse_init(struct context *, const struct token *,
483 const char *, unsigned int,
484 void *, unsigned int);
485 static int parse_vc(struct context *, const struct token *,
486 const char *, unsigned int,
487 void *, unsigned int);
488 static int parse_vc_spec(struct context *, const struct token *,
489 const char *, unsigned int, void *, unsigned int);
490 static int parse_destroy(struct context *, const struct token *,
491 const char *, unsigned int,
492 void *, unsigned int);
493 static int parse_flush(struct context *, const struct token *,
494 const char *, unsigned int,
495 void *, unsigned int);
496 static int parse_query(struct context *, const struct token *,
497 const char *, unsigned int,
498 void *, unsigned int);
499 static int parse_action(struct context *, const struct token *,
500 const char *, unsigned int,
501 void *, unsigned int);
502 static int parse_list(struct context *, const struct token *,
503 const char *, unsigned int,
504 void *, unsigned int);
505 static int parse_int(struct context *, const struct token *,
506 const char *, unsigned int,
507 void *, unsigned int);
508 static int parse_prefix(struct context *, const struct token *,
509 const char *, unsigned int,
510 void *, unsigned int);
511 static int parse_boolean(struct context *, const struct token *,
512 const char *, unsigned int,
513 void *, unsigned int);
514 static int parse_string(struct context *, const struct token *,
515 const char *, unsigned int,
516 void *, unsigned int);
517 static int parse_mac_addr(struct context *, const struct token *,
518 const char *, unsigned int,
519 void *, unsigned int);
520 static int parse_ipv4_addr(struct context *, const struct token *,
521 const char *, unsigned int,
522 void *, unsigned int);
523 static int parse_ipv6_addr(struct context *, const struct token *,
524 const char *, unsigned int,
525 void *, unsigned int);
526 static int parse_port(struct context *, const struct token *,
527 const char *, unsigned int,
528 void *, unsigned int);
529 static int comp_none(struct context *, const struct token *,
530 unsigned int, char *, unsigned int);
531 static int comp_boolean(struct context *, const struct token *,
532 unsigned int, char *, unsigned int);
533 static int comp_action(struct context *, const struct token *,
534 unsigned int, char *, unsigned int);
535 static int comp_port(struct context *, const struct token *,
536 unsigned int, char *, unsigned int);
537 static int comp_rule_id(struct context *, const struct token *,
538 unsigned int, char *, unsigned int);
540 /** Token definitions. */
541 static const struct token token_list[] = {
542 /* Special tokens. */
545 .help = "null entry, abused as the entry point",
546 .next = NEXT(NEXT_ENTRY(FLOW)),
551 .help = "command may end here",
557 .help = "integer value",
562 .name = "{unsigned}",
564 .help = "unsigned integer value",
571 .help = "prefix length for bit-mask",
572 .call = parse_prefix,
578 .help = "any boolean value",
579 .call = parse_boolean,
580 .comp = comp_boolean,
585 .help = "fixed string",
586 .call = parse_string,
590 .name = "{MAC address}",
592 .help = "standard MAC address notation",
593 .call = parse_mac_addr,
597 .name = "{IPv4 address}",
598 .type = "IPV4 ADDRESS",
599 .help = "standard IPv4 address notation",
600 .call = parse_ipv4_addr,
604 .name = "{IPv6 address}",
605 .type = "IPV6 ADDRESS",
606 .help = "standard IPv6 address notation",
607 .call = parse_ipv6_addr,
613 .help = "rule identifier",
615 .comp = comp_rule_id,
620 .help = "port identifier",
625 .name = "{group_id}",
627 .help = "group identifier",
634 .help = "priority level",
638 /* Top-level command. */
641 .type = "{command} {port_id} [{arg} [...]]",
642 .help = "manage ingress/egress flow rules",
643 .next = NEXT(NEXT_ENTRY
652 /* Sub-level commands. */
655 .help = "check whether a flow rule can be created",
656 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
657 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
662 .help = "create a flow rule",
663 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
664 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
669 .help = "destroy specific flow rules",
670 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
671 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
672 .call = parse_destroy,
676 .help = "destroy all flow rules",
677 .next = NEXT(NEXT_ENTRY(PORT_ID)),
678 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
683 .help = "query an existing flow rule",
684 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
686 NEXT_ENTRY(PORT_ID)),
687 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action),
688 ARGS_ENTRY(struct buffer, args.query.rule),
689 ARGS_ENTRY(struct buffer, port)),
694 .help = "list existing flow rules",
695 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
696 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
699 /* Destroy arguments. */
702 .help = "specify a rule identifier",
703 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
704 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
705 .call = parse_destroy,
707 /* Query arguments. */
711 .help = "action to query, must be part of the rule",
712 .call = parse_action,
715 /* List arguments. */
718 .help = "specify a group",
719 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
720 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
723 /* Validate/create attributes. */
726 .help = "specify a group",
727 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
728 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
733 .help = "specify a priority level",
734 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
735 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
740 .help = "affect rule to ingress",
741 .next = NEXT(next_vc_attr),
746 .help = "affect rule to egress",
747 .next = NEXT(next_vc_attr),
750 /* Validate/create pattern. */
753 .help = "submit a list of pattern items",
754 .next = NEXT(next_item),
759 .help = "match value perfectly (with full bit-mask)",
760 .call = parse_vc_spec,
762 [ITEM_PARAM_SPEC] = {
764 .help = "match value according to configured bit-mask",
765 .call = parse_vc_spec,
767 [ITEM_PARAM_LAST] = {
769 .help = "specify upper bound to establish a range",
770 .call = parse_vc_spec,
772 [ITEM_PARAM_MASK] = {
774 .help = "specify bit-mask with relevant bits set to one",
775 .call = parse_vc_spec,
777 [ITEM_PARAM_PREFIX] = {
779 .help = "generate bit-mask from a prefix length",
780 .call = parse_vc_spec,
784 .help = "specify next pattern item",
785 .next = NEXT(next_item),
789 .help = "end list of pattern items",
790 .priv = PRIV_ITEM(END, 0),
791 .next = NEXT(NEXT_ENTRY(ACTIONS)),
796 .help = "no-op pattern item",
797 .priv = PRIV_ITEM(VOID, 0),
798 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
803 .help = "perform actions when pattern does not match",
804 .priv = PRIV_ITEM(INVERT, 0),
805 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
810 .help = "match any protocol for the current layer",
811 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
812 .next = NEXT(item_any),
817 .help = "number of layers covered",
818 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
819 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
823 .help = "match packets addressed to the physical function",
824 .priv = PRIV_ITEM(PF, 0),
825 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
830 .help = "match packets addressed to a virtual function ID",
831 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
832 .next = NEXT(item_vf),
837 .help = "destination VF ID",
838 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
839 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
843 .help = "device-specific physical port index to use",
844 .priv = PRIV_ITEM(PORT, sizeof(struct rte_flow_item_port)),
845 .next = NEXT(item_port),
848 [ITEM_PORT_INDEX] = {
850 .help = "physical port index",
851 .next = NEXT(item_port, NEXT_ENTRY(UNSIGNED), item_param),
852 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port, index)),
856 .help = "match an arbitrary byte string",
857 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
858 .next = NEXT(item_raw),
861 [ITEM_RAW_RELATIVE] = {
863 .help = "look for pattern after the previous item",
864 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
865 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
868 [ITEM_RAW_SEARCH] = {
870 .help = "search pattern from offset (see also limit)",
871 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
872 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
875 [ITEM_RAW_OFFSET] = {
877 .help = "absolute or relative offset for pattern",
878 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
879 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
883 .help = "search area limit for start of pattern",
884 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
885 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
887 [ITEM_RAW_PATTERN] = {
889 .help = "byte string to look for",
890 .next = NEXT(item_raw,
892 NEXT_ENTRY(ITEM_PARAM_IS,
895 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, length),
896 ARGS_ENTRY_USZ(struct rte_flow_item_raw,
898 ITEM_RAW_PATTERN_SIZE)),
902 .help = "match Ethernet header",
903 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
904 .next = NEXT(item_eth),
909 .help = "destination MAC",
910 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
911 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_eth, dst)),
915 .help = "source MAC",
916 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
917 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_eth, src)),
922 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
923 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
927 .help = "match 802.1Q/ad VLAN tag",
928 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
929 .next = NEXT(item_vlan),
934 .help = "tag protocol identifier",
935 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
936 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tpid)),
940 .help = "tag control information",
941 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
942 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
946 .help = "match IPv4 header",
947 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
948 .next = NEXT(item_ipv4),
953 .help = "source address",
954 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
955 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
960 .help = "destination address",
961 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
962 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
967 .help = "match IPv6 header",
968 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
969 .next = NEXT(item_ipv6),
974 .help = "source address",
975 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
976 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
981 .help = "destination address",
982 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
983 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
988 .help = "match ICMP header",
989 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
990 .next = NEXT(item_icmp),
995 .help = "ICMP packet type",
996 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
997 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1000 [ITEM_ICMP_CODE] = {
1002 .help = "ICMP packet code",
1003 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1004 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1009 .help = "match UDP header",
1010 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1011 .next = NEXT(item_udp),
1016 .help = "UDP source port",
1017 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1018 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1023 .help = "UDP destination port",
1024 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1025 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1030 .help = "match TCP header",
1031 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1032 .next = NEXT(item_tcp),
1037 .help = "TCP source port",
1038 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1039 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1044 .help = "TCP destination port",
1045 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1046 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1051 .help = "match SCTP header",
1052 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1053 .next = NEXT(item_sctp),
1058 .help = "SCTP source port",
1059 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1060 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1065 .help = "SCTP destination port",
1066 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1067 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1072 .help = "match VXLAN header",
1073 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1074 .next = NEXT(item_vxlan),
1077 [ITEM_VXLAN_VNI] = {
1079 .help = "VXLAN identifier",
1080 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1081 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
1083 /* Validate/create actions. */
1086 .help = "submit a list of associated actions",
1087 .next = NEXT(next_action),
1092 .help = "specify next action",
1093 .next = NEXT(next_action),
1097 .help = "end list of actions",
1098 .priv = PRIV_ACTION(END, 0),
1103 .help = "no-op action",
1104 .priv = PRIV_ACTION(VOID, 0),
1105 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1108 [ACTION_PASSTHRU] = {
1110 .help = "let subsequent rule process matched packets",
1111 .priv = PRIV_ACTION(PASSTHRU, 0),
1112 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1117 /** Remove and return last entry from argument stack. */
1118 static const struct arg *
1119 pop_args(struct context *ctx)
1121 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
1124 /** Add entry on top of the argument stack. */
1126 push_args(struct context *ctx, const struct arg *arg)
1128 if (ctx->args_num == CTX_STACK_SIZE)
1130 ctx->args[ctx->args_num++] = arg;
1134 /** Spread value into buffer according to bit-mask. */
1136 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
1138 uint32_t i = arg->size;
1146 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1155 unsigned int shift = 0;
1156 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
1158 for (shift = 0; arg->mask[i] >> shift; ++shift) {
1159 if (!(arg->mask[i] & (1 << shift)))
1164 *buf &= ~(1 << shift);
1165 *buf |= (val & 1) << shift;
1174 * Parse a prefix length and generate a bit-mask.
1176 * Last argument (ctx->args) is retrieved to determine mask size, storage
1177 * location and whether the result must use network byte ordering.
1180 parse_prefix(struct context *ctx, const struct token *token,
1181 const char *str, unsigned int len,
1182 void *buf, unsigned int size)
1184 const struct arg *arg = pop_args(ctx);
1185 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
1192 /* Argument is expected. */
1196 u = strtoumax(str, &end, 0);
1197 if (errno || (size_t)(end - str) != len)
1202 extra = arg_entry_bf_fill(NULL, 0, arg);
1211 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
1212 !arg_entry_bf_fill(ctx->objmask, -1, arg))
1219 if (bytes > size || bytes + !!extra > size)
1223 buf = (uint8_t *)ctx->object + arg->offset;
1224 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1226 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
1227 memset(buf, 0x00, size - bytes);
1229 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
1233 memset(buf, 0xff, bytes);
1234 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
1236 ((uint8_t *)buf)[bytes] = conv[extra];
1239 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
1242 push_args(ctx, arg);
1246 /** Default parsing function for token name matching. */
1248 parse_default(struct context *ctx, const struct token *token,
1249 const char *str, unsigned int len,
1250 void *buf, unsigned int size)
1255 if (strncmp(str, token->name, len))
1260 /** Parse flow command, initialize output buffer for subsequent tokens. */
1262 parse_init(struct context *ctx, const struct token *token,
1263 const char *str, unsigned int len,
1264 void *buf, unsigned int size)
1266 struct buffer *out = buf;
1268 /* Token name must match. */
1269 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1271 /* Nothing else to do if there is no buffer. */
1274 /* Make sure buffer is large enough. */
1275 if (size < sizeof(*out))
1277 /* Initialize buffer. */
1278 memset(out, 0x00, sizeof(*out));
1279 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
1282 ctx->objmask = NULL;
1286 /** Parse tokens for validate/create commands. */
1288 parse_vc(struct context *ctx, const struct token *token,
1289 const char *str, unsigned int len,
1290 void *buf, unsigned int size)
1292 struct buffer *out = buf;
1296 /* Token name must match. */
1297 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1299 /* Nothing else to do if there is no buffer. */
1302 if (!out->command) {
1303 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
1305 if (sizeof(*out) > size)
1307 out->command = ctx->curr;
1310 ctx->objmask = NULL;
1311 out->args.vc.data = (uint8_t *)out + size;
1315 ctx->object = &out->args.vc.attr;
1316 ctx->objmask = NULL;
1317 switch (ctx->curr) {
1322 out->args.vc.attr.ingress = 1;
1325 out->args.vc.attr.egress = 1;
1328 out->args.vc.pattern =
1329 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1331 ctx->object = out->args.vc.pattern;
1332 ctx->objmask = NULL;
1335 out->args.vc.actions =
1336 (void *)RTE_ALIGN_CEIL((uintptr_t)
1337 (out->args.vc.pattern +
1338 out->args.vc.pattern_n),
1340 ctx->object = out->args.vc.actions;
1341 ctx->objmask = NULL;
1348 if (!out->args.vc.actions) {
1349 const struct parse_item_priv *priv = token->priv;
1350 struct rte_flow_item *item =
1351 out->args.vc.pattern + out->args.vc.pattern_n;
1353 data_size = priv->size * 3; /* spec, last, mask */
1354 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1355 (out->args.vc.data - data_size),
1357 if ((uint8_t *)item + sizeof(*item) > data)
1359 *item = (struct rte_flow_item){
1362 ++out->args.vc.pattern_n;
1364 ctx->objmask = NULL;
1366 const struct parse_action_priv *priv = token->priv;
1367 struct rte_flow_action *action =
1368 out->args.vc.actions + out->args.vc.actions_n;
1370 data_size = priv->size; /* configuration */
1371 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1372 (out->args.vc.data - data_size),
1374 if ((uint8_t *)action + sizeof(*action) > data)
1376 *action = (struct rte_flow_action){
1379 ++out->args.vc.actions_n;
1380 ctx->object = action;
1381 ctx->objmask = NULL;
1383 memset(data, 0, data_size);
1384 out->args.vc.data = data;
1385 ctx->objdata = data_size;
1389 /** Parse pattern item parameter type. */
1391 parse_vc_spec(struct context *ctx, const struct token *token,
1392 const char *str, unsigned int len,
1393 void *buf, unsigned int size)
1395 struct buffer *out = buf;
1396 struct rte_flow_item *item;
1402 /* Token name must match. */
1403 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1405 /* Parse parameter types. */
1406 switch (ctx->curr) {
1411 case ITEM_PARAM_SPEC:
1414 case ITEM_PARAM_LAST:
1417 case ITEM_PARAM_PREFIX:
1418 /* Modify next token to expect a prefix. */
1419 if (ctx->next_num < 2)
1421 ctx->next[ctx->next_num - 2] = NEXT_ENTRY(PREFIX);
1423 case ITEM_PARAM_MASK:
1429 /* Nothing else to do if there is no buffer. */
1432 if (!out->args.vc.pattern_n)
1434 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
1435 data_size = ctx->objdata / 3; /* spec, last, mask */
1436 /* Point to selected object. */
1437 ctx->object = out->args.vc.data + (data_size * index);
1439 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
1440 item->mask = ctx->objmask;
1442 ctx->objmask = NULL;
1443 /* Update relevant item pointer. */
1444 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
1449 /** Parse tokens for destroy command. */
1451 parse_destroy(struct context *ctx, const struct token *token,
1452 const char *str, unsigned int len,
1453 void *buf, unsigned int size)
1455 struct buffer *out = buf;
1457 /* Token name must match. */
1458 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1460 /* Nothing else to do if there is no buffer. */
1463 if (!out->command) {
1464 if (ctx->curr != DESTROY)
1466 if (sizeof(*out) > size)
1468 out->command = ctx->curr;
1471 ctx->objmask = NULL;
1472 out->args.destroy.rule =
1473 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1477 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
1478 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
1481 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
1482 ctx->objmask = NULL;
1486 /** Parse tokens for flush command. */
1488 parse_flush(struct context *ctx, const struct token *token,
1489 const char *str, unsigned int len,
1490 void *buf, unsigned int size)
1492 struct buffer *out = buf;
1494 /* Token name must match. */
1495 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1497 /* Nothing else to do if there is no buffer. */
1500 if (!out->command) {
1501 if (ctx->curr != FLUSH)
1503 if (sizeof(*out) > size)
1505 out->command = ctx->curr;
1508 ctx->objmask = NULL;
1513 /** Parse tokens for query command. */
1515 parse_query(struct context *ctx, const struct token *token,
1516 const char *str, unsigned int len,
1517 void *buf, unsigned int size)
1519 struct buffer *out = buf;
1521 /* Token name must match. */
1522 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1524 /* Nothing else to do if there is no buffer. */
1527 if (!out->command) {
1528 if (ctx->curr != QUERY)
1530 if (sizeof(*out) > size)
1532 out->command = ctx->curr;
1535 ctx->objmask = NULL;
1540 /** Parse action names. */
1542 parse_action(struct context *ctx, const struct token *token,
1543 const char *str, unsigned int len,
1544 void *buf, unsigned int size)
1546 struct buffer *out = buf;
1547 const struct arg *arg = pop_args(ctx);
1551 /* Argument is expected. */
1554 /* Parse action name. */
1555 for (i = 0; next_action[i]; ++i) {
1556 const struct parse_action_priv *priv;
1558 token = &token_list[next_action[i]];
1559 if (strncmp(token->name, str, len))
1565 memcpy((uint8_t *)ctx->object + arg->offset,
1571 push_args(ctx, arg);
1575 /** Parse tokens for list command. */
1577 parse_list(struct context *ctx, const struct token *token,
1578 const char *str, unsigned int len,
1579 void *buf, unsigned int size)
1581 struct buffer *out = buf;
1583 /* Token name must match. */
1584 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1586 /* Nothing else to do if there is no buffer. */
1589 if (!out->command) {
1590 if (ctx->curr != LIST)
1592 if (sizeof(*out) > size)
1594 out->command = ctx->curr;
1597 ctx->objmask = NULL;
1598 out->args.list.group =
1599 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1603 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
1604 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
1607 ctx->object = out->args.list.group + out->args.list.group_n++;
1608 ctx->objmask = NULL;
1613 * Parse signed/unsigned integers 8 to 64-bit long.
1615 * Last argument (ctx->args) is retrieved to determine integer type and
1619 parse_int(struct context *ctx, const struct token *token,
1620 const char *str, unsigned int len,
1621 void *buf, unsigned int size)
1623 const struct arg *arg = pop_args(ctx);
1628 /* Argument is expected. */
1633 (uintmax_t)strtoimax(str, &end, 0) :
1634 strtoumax(str, &end, 0);
1635 if (errno || (size_t)(end - str) != len)
1640 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
1641 !arg_entry_bf_fill(ctx->objmask, -1, arg))
1645 buf = (uint8_t *)ctx->object + arg->offset;
1649 case sizeof(uint8_t):
1650 *(uint8_t *)buf = u;
1652 case sizeof(uint16_t):
1653 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
1655 case sizeof(uint8_t [3]):
1656 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1658 ((uint8_t *)buf)[0] = u;
1659 ((uint8_t *)buf)[1] = u >> 8;
1660 ((uint8_t *)buf)[2] = u >> 16;
1664 ((uint8_t *)buf)[0] = u >> 16;
1665 ((uint8_t *)buf)[1] = u >> 8;
1666 ((uint8_t *)buf)[2] = u;
1668 case sizeof(uint32_t):
1669 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
1671 case sizeof(uint64_t):
1672 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
1677 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
1679 buf = (uint8_t *)ctx->objmask + arg->offset;
1684 push_args(ctx, arg);
1691 * Two arguments (ctx->args) are retrieved from the stack to store data and
1692 * its length (in that order).
1695 parse_string(struct context *ctx, const struct token *token,
1696 const char *str, unsigned int len,
1697 void *buf, unsigned int size)
1699 const struct arg *arg_data = pop_args(ctx);
1700 const struct arg *arg_len = pop_args(ctx);
1701 char tmp[16]; /* Ought to be enough. */
1704 /* Arguments are expected. */
1708 push_args(ctx, arg_data);
1711 size = arg_data->size;
1712 /* Bit-mask fill is not supported. */
1713 if (arg_data->mask || size < len)
1717 /* Let parse_int() fill length information first. */
1718 ret = snprintf(tmp, sizeof(tmp), "%u", len);
1721 push_args(ctx, arg_len);
1722 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
1727 buf = (uint8_t *)ctx->object + arg_data->offset;
1728 /* Output buffer is not necessarily NUL-terminated. */
1729 memcpy(buf, str, len);
1730 memset((uint8_t *)buf + len, 0x55, size - len);
1732 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
1735 push_args(ctx, arg_len);
1736 push_args(ctx, arg_data);
1741 * Parse a MAC address.
1743 * Last argument (ctx->args) is retrieved to determine storage size and
1747 parse_mac_addr(struct context *ctx, const struct token *token,
1748 const char *str, unsigned int len,
1749 void *buf, unsigned int size)
1751 const struct arg *arg = pop_args(ctx);
1752 struct ether_addr tmp;
1756 /* Argument is expected. */
1760 /* Bit-mask fill is not supported. */
1761 if (arg->mask || size != sizeof(tmp))
1763 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
1764 if (ret < 0 || (unsigned int)ret != len)
1768 buf = (uint8_t *)ctx->object + arg->offset;
1769 memcpy(buf, &tmp, size);
1771 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
1774 push_args(ctx, arg);
1779 * Parse an IPv4 address.
1781 * Last argument (ctx->args) is retrieved to determine storage size and
1785 parse_ipv4_addr(struct context *ctx, const struct token *token,
1786 const char *str, unsigned int len,
1787 void *buf, unsigned int size)
1789 const struct arg *arg = pop_args(ctx);
1794 /* Argument is expected. */
1798 /* Bit-mask fill is not supported. */
1799 if (arg->mask || size != sizeof(tmp))
1801 /* Only network endian is supported. */
1804 memcpy(str2, str, len);
1806 ret = inet_pton(AF_INET, str2, &tmp);
1808 /* Attempt integer parsing. */
1809 push_args(ctx, arg);
1810 return parse_int(ctx, token, str, len, buf, size);
1814 buf = (uint8_t *)ctx->object + arg->offset;
1815 memcpy(buf, &tmp, size);
1817 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
1820 push_args(ctx, arg);
1825 * Parse an IPv6 address.
1827 * Last argument (ctx->args) is retrieved to determine storage size and
1831 parse_ipv6_addr(struct context *ctx, const struct token *token,
1832 const char *str, unsigned int len,
1833 void *buf, unsigned int size)
1835 const struct arg *arg = pop_args(ctx);
1837 struct in6_addr tmp;
1841 /* Argument is expected. */
1845 /* Bit-mask fill is not supported. */
1846 if (arg->mask || size != sizeof(tmp))
1848 /* Only network endian is supported. */
1851 memcpy(str2, str, len);
1853 ret = inet_pton(AF_INET6, str2, &tmp);
1858 buf = (uint8_t *)ctx->object + arg->offset;
1859 memcpy(buf, &tmp, size);
1861 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
1864 push_args(ctx, arg);
1868 /** Boolean values (even indices stand for false). */
1869 static const char *const boolean_name[] = {
1878 * Parse a boolean value.
1880 * Last argument (ctx->args) is retrieved to determine storage size and
1884 parse_boolean(struct context *ctx, const struct token *token,
1885 const char *str, unsigned int len,
1886 void *buf, unsigned int size)
1888 const struct arg *arg = pop_args(ctx);
1892 /* Argument is expected. */
1895 for (i = 0; boolean_name[i]; ++i)
1896 if (!strncmp(str, boolean_name[i], len))
1898 /* Process token as integer. */
1899 if (boolean_name[i])
1900 str = i & 1 ? "1" : "0";
1901 push_args(ctx, arg);
1902 ret = parse_int(ctx, token, str, strlen(str), buf, size);
1903 return ret > 0 ? (int)len : ret;
1906 /** Parse port and update context. */
1908 parse_port(struct context *ctx, const struct token *token,
1909 const char *str, unsigned int len,
1910 void *buf, unsigned int size)
1912 struct buffer *out = &(struct buffer){ .port = 0 };
1920 ctx->objmask = NULL;
1921 size = sizeof(*out);
1923 ret = parse_int(ctx, token, str, len, out, size);
1925 ctx->port = out->port;
1931 /** No completion. */
1933 comp_none(struct context *ctx, const struct token *token,
1934 unsigned int ent, char *buf, unsigned int size)
1944 /** Complete boolean values. */
1946 comp_boolean(struct context *ctx, const struct token *token,
1947 unsigned int ent, char *buf, unsigned int size)
1953 for (i = 0; boolean_name[i]; ++i)
1954 if (buf && i == ent)
1955 return snprintf(buf, size, "%s", boolean_name[i]);
1961 /** Complete action names. */
1963 comp_action(struct context *ctx, const struct token *token,
1964 unsigned int ent, char *buf, unsigned int size)
1970 for (i = 0; next_action[i]; ++i)
1971 if (buf && i == ent)
1972 return snprintf(buf, size, "%s",
1973 token_list[next_action[i]].name);
1979 /** Complete available ports. */
1981 comp_port(struct context *ctx, const struct token *token,
1982 unsigned int ent, char *buf, unsigned int size)
1989 FOREACH_PORT(p, ports) {
1990 if (buf && i == ent)
1991 return snprintf(buf, size, "%u", p);
1999 /** Complete available rule IDs. */
2001 comp_rule_id(struct context *ctx, const struct token *token,
2002 unsigned int ent, char *buf, unsigned int size)
2005 struct rte_port *port;
2006 struct port_flow *pf;
2009 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
2010 ctx->port == (uint16_t)RTE_PORT_ALL)
2012 port = &ports[ctx->port];
2013 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
2014 if (buf && i == ent)
2015 return snprintf(buf, size, "%u", pf->id);
2023 /** Internal context. */
2024 static struct context cmd_flow_context;
2026 /** Global parser instance (cmdline API). */
2027 cmdline_parse_inst_t cmd_flow;
2029 /** Initialize context. */
2031 cmd_flow_context_init(struct context *ctx)
2033 /* A full memset() is not necessary. */
2044 ctx->objmask = NULL;
2047 /** Parse a token (cmdline API). */
2049 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
2052 struct context *ctx = &cmd_flow_context;
2053 const struct token *token;
2054 const enum index *list;
2059 /* Restart as requested. */
2061 cmd_flow_context_init(ctx);
2062 token = &token_list[ctx->curr];
2063 /* Check argument length. */
2066 for (len = 0; src[len]; ++len)
2067 if (src[len] == '#' || isspace(src[len]))
2071 /* Last argument and EOL detection. */
2072 for (i = len; src[i]; ++i)
2073 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
2075 else if (!isspace(src[i])) {
2080 if (src[i] == '\r' || src[i] == '\n') {
2084 /* Initialize context if necessary. */
2085 if (!ctx->next_num) {
2088 ctx->next[ctx->next_num++] = token->next[0];
2090 /* Process argument through candidates. */
2091 ctx->prev = ctx->curr;
2092 list = ctx->next[ctx->next_num - 1];
2093 for (i = 0; list[i]; ++i) {
2094 const struct token *next = &token_list[list[i]];
2097 ctx->curr = list[i];
2099 tmp = next->call(ctx, next, src, len, result, size);
2101 tmp = parse_default(ctx, next, src, len, result, size);
2102 if (tmp == -1 || tmp != len)
2110 /* Push subsequent tokens if any. */
2112 for (i = 0; token->next[i]; ++i) {
2113 if (ctx->next_num == RTE_DIM(ctx->next))
2115 ctx->next[ctx->next_num++] = token->next[i];
2117 /* Push arguments if any. */
2119 for (i = 0; token->args[i]; ++i) {
2120 if (ctx->args_num == RTE_DIM(ctx->args))
2122 ctx->args[ctx->args_num++] = token->args[i];
2127 /** Return number of completion entries (cmdline API). */
2129 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
2131 struct context *ctx = &cmd_flow_context;
2132 const struct token *token = &token_list[ctx->curr];
2133 const enum index *list;
2137 /* Tell cmd_flow_parse() that context must be reinitialized. */
2139 /* Count number of tokens in current list. */
2141 list = ctx->next[ctx->next_num - 1];
2143 list = token->next[0];
2144 for (i = 0; list[i]; ++i)
2149 * If there is a single token, use its completion callback, otherwise
2150 * return the number of entries.
2152 token = &token_list[list[0]];
2153 if (i == 1 && token->comp) {
2154 /* Save index for cmd_flow_get_help(). */
2155 ctx->prev = list[0];
2156 return token->comp(ctx, token, 0, NULL, 0);
2161 /** Return a completion entry (cmdline API). */
2163 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
2164 char *dst, unsigned int size)
2166 struct context *ctx = &cmd_flow_context;
2167 const struct token *token = &token_list[ctx->curr];
2168 const enum index *list;
2172 /* Tell cmd_flow_parse() that context must be reinitialized. */
2174 /* Count number of tokens in current list. */
2176 list = ctx->next[ctx->next_num - 1];
2178 list = token->next[0];
2179 for (i = 0; list[i]; ++i)
2183 /* If there is a single token, use its completion callback. */
2184 token = &token_list[list[0]];
2185 if (i == 1 && token->comp) {
2186 /* Save index for cmd_flow_get_help(). */
2187 ctx->prev = list[0];
2188 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
2190 /* Otherwise make sure the index is valid and use defaults. */
2193 token = &token_list[list[index]];
2194 snprintf(dst, size, "%s", token->name);
2195 /* Save index for cmd_flow_get_help(). */
2196 ctx->prev = list[index];
2200 /** Populate help strings for current token (cmdline API). */
2202 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
2204 struct context *ctx = &cmd_flow_context;
2205 const struct token *token = &token_list[ctx->prev];
2208 /* Tell cmd_flow_parse() that context must be reinitialized. */
2212 /* Set token type and update global help with details. */
2213 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
2215 cmd_flow.help_str = token->help;
2217 cmd_flow.help_str = token->name;
2221 /** Token definition template (cmdline API). */
2222 static struct cmdline_token_hdr cmd_flow_token_hdr = {
2223 .ops = &(struct cmdline_token_ops){
2224 .parse = cmd_flow_parse,
2225 .complete_get_nb = cmd_flow_complete_get_nb,
2226 .complete_get_elt = cmd_flow_complete_get_elt,
2227 .get_help = cmd_flow_get_help,
2232 /** Populate the next dynamic token. */
2234 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
2235 cmdline_parse_token_hdr_t *(*hdrs)[])
2237 struct context *ctx = &cmd_flow_context;
2239 /* Always reinitialize context before requesting the first token. */
2241 cmd_flow_context_init(ctx);
2242 /* Return NULL when no more tokens are expected. */
2243 if (!ctx->next_num && ctx->curr) {
2247 /* Determine if command should end here. */
2248 if (ctx->eol && ctx->last && ctx->next_num) {
2249 const enum index *list = ctx->next[ctx->next_num - 1];
2252 for (i = 0; list[i]; ++i) {
2259 *hdr = &cmd_flow_token_hdr;
2262 /** Dispatch parsed buffer to function calls. */
2264 cmd_flow_parsed(const struct buffer *in)
2266 switch (in->command) {
2268 port_flow_validate(in->port, &in->args.vc.attr,
2269 in->args.vc.pattern, in->args.vc.actions);
2272 port_flow_create(in->port, &in->args.vc.attr,
2273 in->args.vc.pattern, in->args.vc.actions);
2276 port_flow_destroy(in->port, in->args.destroy.rule_n,
2277 in->args.destroy.rule);
2280 port_flow_flush(in->port);
2283 port_flow_query(in->port, in->args.query.rule,
2284 in->args.query.action);
2287 port_flow_list(in->port, in->args.list.group_n,
2288 in->args.list.group);
2295 /** Token generator and output processing callback (cmdline API). */
2297 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
2300 cmd_flow_tok(arg0, arg2);
2302 cmd_flow_parsed(arg0);
2305 /** Global parser instance (cmdline API). */
2306 cmdline_parse_inst_t cmd_flow = {
2308 .data = NULL, /**< Unused. */
2309 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
2312 }, /**< Tokens are returned by cmd_flow_tok(). */