4 * Copyright 2016 6WIND S.A.
5 * Copyright 2016 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <arpa/inet.h>
43 #include <rte_common.h>
44 #include <rte_ethdev.h>
45 #include <rte_byteorder.h>
46 #include <cmdline_parse.h>
47 #include <cmdline_parse_etheraddr.h>
52 /** Parser token indices. */
72 /* Top-level command. */
75 /* Sub-level commands. */
83 /* Destroy arguments. */
86 /* Query arguments. */
92 /* Validate/create arguments. */
98 /* Validate/create pattern. */
150 /* Validate/create actions. */
167 /** Size of pattern[] field in struct rte_flow_item_raw. */
168 #define ITEM_RAW_PATTERN_SIZE 36
170 /** Storage size for struct rte_flow_item_raw including pattern. */
171 #define ITEM_RAW_SIZE \
172 (offsetof(struct rte_flow_item_raw, pattern) + ITEM_RAW_PATTERN_SIZE)
174 /** Maximum number of subsequent tokens and arguments on the stack. */
175 #define CTX_STACK_SIZE 16
177 /** Parser context. */
179 /** Stack of subsequent token lists to process. */
180 const enum index *next[CTX_STACK_SIZE];
181 /** Arguments for stacked tokens. */
182 const void *args[CTX_STACK_SIZE];
183 enum index curr; /**< Current token index. */
184 enum index prev; /**< Index of the last token seen. */
185 int next_num; /**< Number of entries in next[]. */
186 int args_num; /**< Number of entries in args[]. */
187 uint32_t reparse:1; /**< Start over from the beginning. */
188 uint32_t eol:1; /**< EOL has been detected. */
189 uint32_t last:1; /**< No more arguments. */
190 uint16_t port; /**< Current port ID (for completions). */
191 uint32_t objdata; /**< Object-specific data. */
192 void *object; /**< Address of current object for relative offsets. */
193 void *objmask; /**< Object a full mask must be written to. */
196 /** Token argument. */
198 uint32_t hton:1; /**< Use network byte ordering. */
199 uint32_t sign:1; /**< Value is signed. */
200 uint32_t offset; /**< Relative offset from ctx->object. */
201 uint32_t size; /**< Field size. */
202 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
205 /** Parser token definition. */
207 /** Type displayed during completion (defaults to "TOKEN"). */
209 /** Help displayed during completion (defaults to token name). */
211 /** Private data used by parser functions. */
214 * Lists of subsequent tokens to push on the stack. Each call to the
215 * parser consumes the last entry of that stack.
217 const enum index *const *next;
218 /** Arguments stack for subsequent tokens that need them. */
219 const struct arg *const *args;
221 * Token-processing callback, returns -1 in case of error, the
222 * length of the matched string otherwise. If NULL, attempts to
223 * match the token name.
225 * If buf is not NULL, the result should be stored in it according
226 * to context. An error is returned if not large enough.
228 int (*call)(struct context *ctx, const struct token *token,
229 const char *str, unsigned int len,
230 void *buf, unsigned int size);
232 * Callback that provides possible values for this token, used for
233 * completion. Returns -1 in case of error, the number of possible
234 * values otherwise. If NULL, the token name is used.
236 * If buf is not NULL, entry index ent is written to buf and the
237 * full length of the entry is returned (same behavior as
240 int (*comp)(struct context *ctx, const struct token *token,
241 unsigned int ent, char *buf, unsigned int size);
242 /** Mandatory token name, no default value. */
246 /** Static initializer for the next field. */
247 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
249 /** Static initializer for a NEXT() entry. */
250 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
252 /** Static initializer for the args field. */
253 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
255 /** Static initializer for ARGS() to target a field. */
256 #define ARGS_ENTRY(s, f) \
257 (&(const struct arg){ \
258 .offset = offsetof(s, f), \
259 .size = sizeof(((s *)0)->f), \
262 /** Static initializer for ARGS() to target a bit-field. */
263 #define ARGS_ENTRY_BF(s, f, b) \
264 (&(const struct arg){ \
266 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
269 /** Static initializer for ARGS() to target a pointer. */
270 #define ARGS_ENTRY_PTR(s, f) \
271 (&(const struct arg){ \
272 .size = sizeof(*((s *)0)->f), \
275 /** Static initializer for ARGS() with arbitrary size. */
276 #define ARGS_ENTRY_USZ(s, f, sz) \
277 (&(const struct arg){ \
278 .offset = offsetof(s, f), \
282 /** Same as ARGS_ENTRY() using network byte ordering. */
283 #define ARGS_ENTRY_HTON(s, f) \
284 (&(const struct arg){ \
286 .offset = offsetof(s, f), \
287 .size = sizeof(((s *)0)->f), \
290 /** Parser output buffer layout expected by cmd_flow_parsed(). */
292 enum index command; /**< Flow command. */
293 uint16_t port; /**< Affected port ID. */
296 struct rte_flow_attr attr;
297 struct rte_flow_item *pattern;
298 struct rte_flow_action *actions;
302 } vc; /**< Validate/create arguments. */
306 } destroy; /**< Destroy arguments. */
309 enum rte_flow_action_type action;
310 } query; /**< Query arguments. */
314 } list; /**< List arguments. */
315 } args; /**< Command arguments. */
318 /** Private data for pattern items. */
319 struct parse_item_priv {
320 enum rte_flow_item_type type; /**< Item type. */
321 uint32_t size; /**< Size of item specification structure. */
324 #define PRIV_ITEM(t, s) \
325 (&(const struct parse_item_priv){ \
326 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
330 /** Private data for actions. */
331 struct parse_action_priv {
332 enum rte_flow_action_type type; /**< Action type. */
333 uint32_t size; /**< Size of action configuration structure. */
336 #define PRIV_ACTION(t, s) \
337 (&(const struct parse_action_priv){ \
338 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
342 static const enum index next_vc_attr[] = {
351 static const enum index next_destroy_attr[] = {
357 static const enum index next_list_attr[] = {
363 static const enum index item_param[] = {
372 static const enum index next_item[] = {
393 static const enum index item_any[] = {
399 static const enum index item_vf[] = {
405 static const enum index item_port[] = {
411 static const enum index item_raw[] = {
421 static const enum index item_eth[] = {
429 static const enum index item_vlan[] = {
436 static const enum index item_ipv4[] = {
443 static const enum index item_ipv6[] = {
450 static const enum index item_icmp[] = {
457 static const enum index item_udp[] = {
464 static const enum index item_tcp[] = {
471 static const enum index item_sctp[] = {
478 static const enum index item_vxlan[] = {
484 static const enum index next_action[] = {
497 static const enum index action_mark[] = {
503 static const enum index action_vf[] = {
510 static int parse_init(struct context *, const struct token *,
511 const char *, unsigned int,
512 void *, unsigned int);
513 static int parse_vc(struct context *, const struct token *,
514 const char *, unsigned int,
515 void *, unsigned int);
516 static int parse_vc_spec(struct context *, const struct token *,
517 const char *, unsigned int, void *, unsigned int);
518 static int parse_vc_conf(struct context *, const struct token *,
519 const char *, unsigned int, void *, unsigned int);
520 static int parse_destroy(struct context *, const struct token *,
521 const char *, unsigned int,
522 void *, unsigned int);
523 static int parse_flush(struct context *, const struct token *,
524 const char *, unsigned int,
525 void *, unsigned int);
526 static int parse_query(struct context *, const struct token *,
527 const char *, unsigned int,
528 void *, unsigned int);
529 static int parse_action(struct context *, const struct token *,
530 const char *, unsigned int,
531 void *, unsigned int);
532 static int parse_list(struct context *, const struct token *,
533 const char *, unsigned int,
534 void *, unsigned int);
535 static int parse_int(struct context *, const struct token *,
536 const char *, unsigned int,
537 void *, unsigned int);
538 static int parse_prefix(struct context *, const struct token *,
539 const char *, unsigned int,
540 void *, unsigned int);
541 static int parse_boolean(struct context *, const struct token *,
542 const char *, unsigned int,
543 void *, unsigned int);
544 static int parse_string(struct context *, const struct token *,
545 const char *, unsigned int,
546 void *, unsigned int);
547 static int parse_mac_addr(struct context *, const struct token *,
548 const char *, unsigned int,
549 void *, unsigned int);
550 static int parse_ipv4_addr(struct context *, const struct token *,
551 const char *, unsigned int,
552 void *, unsigned int);
553 static int parse_ipv6_addr(struct context *, const struct token *,
554 const char *, unsigned int,
555 void *, unsigned int);
556 static int parse_port(struct context *, const struct token *,
557 const char *, unsigned int,
558 void *, unsigned int);
559 static int comp_none(struct context *, const struct token *,
560 unsigned int, char *, unsigned int);
561 static int comp_boolean(struct context *, const struct token *,
562 unsigned int, char *, unsigned int);
563 static int comp_action(struct context *, const struct token *,
564 unsigned int, char *, unsigned int);
565 static int comp_port(struct context *, const struct token *,
566 unsigned int, char *, unsigned int);
567 static int comp_rule_id(struct context *, const struct token *,
568 unsigned int, char *, unsigned int);
570 /** Token definitions. */
571 static const struct token token_list[] = {
572 /* Special tokens. */
575 .help = "null entry, abused as the entry point",
576 .next = NEXT(NEXT_ENTRY(FLOW)),
581 .help = "command may end here",
587 .help = "integer value",
592 .name = "{unsigned}",
594 .help = "unsigned integer value",
601 .help = "prefix length for bit-mask",
602 .call = parse_prefix,
608 .help = "any boolean value",
609 .call = parse_boolean,
610 .comp = comp_boolean,
615 .help = "fixed string",
616 .call = parse_string,
620 .name = "{MAC address}",
622 .help = "standard MAC address notation",
623 .call = parse_mac_addr,
627 .name = "{IPv4 address}",
628 .type = "IPV4 ADDRESS",
629 .help = "standard IPv4 address notation",
630 .call = parse_ipv4_addr,
634 .name = "{IPv6 address}",
635 .type = "IPV6 ADDRESS",
636 .help = "standard IPv6 address notation",
637 .call = parse_ipv6_addr,
643 .help = "rule identifier",
645 .comp = comp_rule_id,
650 .help = "port identifier",
655 .name = "{group_id}",
657 .help = "group identifier",
664 .help = "priority level",
668 /* Top-level command. */
671 .type = "{command} {port_id} [{arg} [...]]",
672 .help = "manage ingress/egress flow rules",
673 .next = NEXT(NEXT_ENTRY
682 /* Sub-level commands. */
685 .help = "check whether a flow rule can be created",
686 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
687 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
692 .help = "create a flow rule",
693 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
694 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
699 .help = "destroy specific flow rules",
700 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
701 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
702 .call = parse_destroy,
706 .help = "destroy all flow rules",
707 .next = NEXT(NEXT_ENTRY(PORT_ID)),
708 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
713 .help = "query an existing flow rule",
714 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
716 NEXT_ENTRY(PORT_ID)),
717 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action),
718 ARGS_ENTRY(struct buffer, args.query.rule),
719 ARGS_ENTRY(struct buffer, port)),
724 .help = "list existing flow rules",
725 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
726 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
729 /* Destroy arguments. */
732 .help = "specify a rule identifier",
733 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
734 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
735 .call = parse_destroy,
737 /* Query arguments. */
741 .help = "action to query, must be part of the rule",
742 .call = parse_action,
745 /* List arguments. */
748 .help = "specify a group",
749 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
750 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
753 /* Validate/create attributes. */
756 .help = "specify a group",
757 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
758 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
763 .help = "specify a priority level",
764 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
765 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
770 .help = "affect rule to ingress",
771 .next = NEXT(next_vc_attr),
776 .help = "affect rule to egress",
777 .next = NEXT(next_vc_attr),
780 /* Validate/create pattern. */
783 .help = "submit a list of pattern items",
784 .next = NEXT(next_item),
789 .help = "match value perfectly (with full bit-mask)",
790 .call = parse_vc_spec,
792 [ITEM_PARAM_SPEC] = {
794 .help = "match value according to configured bit-mask",
795 .call = parse_vc_spec,
797 [ITEM_PARAM_LAST] = {
799 .help = "specify upper bound to establish a range",
800 .call = parse_vc_spec,
802 [ITEM_PARAM_MASK] = {
804 .help = "specify bit-mask with relevant bits set to one",
805 .call = parse_vc_spec,
807 [ITEM_PARAM_PREFIX] = {
809 .help = "generate bit-mask from a prefix length",
810 .call = parse_vc_spec,
814 .help = "specify next pattern item",
815 .next = NEXT(next_item),
819 .help = "end list of pattern items",
820 .priv = PRIV_ITEM(END, 0),
821 .next = NEXT(NEXT_ENTRY(ACTIONS)),
826 .help = "no-op pattern item",
827 .priv = PRIV_ITEM(VOID, 0),
828 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
833 .help = "perform actions when pattern does not match",
834 .priv = PRIV_ITEM(INVERT, 0),
835 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
840 .help = "match any protocol for the current layer",
841 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
842 .next = NEXT(item_any),
847 .help = "number of layers covered",
848 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
849 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
853 .help = "match packets addressed to the physical function",
854 .priv = PRIV_ITEM(PF, 0),
855 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
860 .help = "match packets addressed to a virtual function ID",
861 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
862 .next = NEXT(item_vf),
867 .help = "destination VF ID",
868 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
869 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
873 .help = "device-specific physical port index to use",
874 .priv = PRIV_ITEM(PORT, sizeof(struct rte_flow_item_port)),
875 .next = NEXT(item_port),
878 [ITEM_PORT_INDEX] = {
880 .help = "physical port index",
881 .next = NEXT(item_port, NEXT_ENTRY(UNSIGNED), item_param),
882 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port, index)),
886 .help = "match an arbitrary byte string",
887 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
888 .next = NEXT(item_raw),
891 [ITEM_RAW_RELATIVE] = {
893 .help = "look for pattern after the previous item",
894 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
895 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
898 [ITEM_RAW_SEARCH] = {
900 .help = "search pattern from offset (see also limit)",
901 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
902 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
905 [ITEM_RAW_OFFSET] = {
907 .help = "absolute or relative offset for pattern",
908 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
909 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
913 .help = "search area limit for start of pattern",
914 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
915 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
917 [ITEM_RAW_PATTERN] = {
919 .help = "byte string to look for",
920 .next = NEXT(item_raw,
922 NEXT_ENTRY(ITEM_PARAM_IS,
925 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, length),
926 ARGS_ENTRY_USZ(struct rte_flow_item_raw,
928 ITEM_RAW_PATTERN_SIZE)),
932 .help = "match Ethernet header",
933 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
934 .next = NEXT(item_eth),
939 .help = "destination MAC",
940 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
941 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_eth, dst)),
945 .help = "source MAC",
946 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
947 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_eth, src)),
952 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
953 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
957 .help = "match 802.1Q/ad VLAN tag",
958 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
959 .next = NEXT(item_vlan),
964 .help = "tag protocol identifier",
965 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
966 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tpid)),
970 .help = "tag control information",
971 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
972 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
976 .help = "match IPv4 header",
977 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
978 .next = NEXT(item_ipv4),
983 .help = "source address",
984 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
985 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
990 .help = "destination address",
991 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
992 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
997 .help = "match IPv6 header",
998 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
999 .next = NEXT(item_ipv6),
1004 .help = "source address",
1005 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1006 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1011 .help = "destination address",
1012 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1013 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1018 .help = "match ICMP header",
1019 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1020 .next = NEXT(item_icmp),
1023 [ITEM_ICMP_TYPE] = {
1025 .help = "ICMP packet type",
1026 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1027 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1030 [ITEM_ICMP_CODE] = {
1032 .help = "ICMP packet code",
1033 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1034 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1039 .help = "match UDP header",
1040 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1041 .next = NEXT(item_udp),
1046 .help = "UDP source port",
1047 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1048 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1053 .help = "UDP destination port",
1054 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1055 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1060 .help = "match TCP header",
1061 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1062 .next = NEXT(item_tcp),
1067 .help = "TCP source port",
1068 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1069 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1074 .help = "TCP destination port",
1075 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1076 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1081 .help = "match SCTP header",
1082 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1083 .next = NEXT(item_sctp),
1088 .help = "SCTP source port",
1089 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1090 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1095 .help = "SCTP destination port",
1096 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1097 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1102 .help = "match VXLAN header",
1103 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1104 .next = NEXT(item_vxlan),
1107 [ITEM_VXLAN_VNI] = {
1109 .help = "VXLAN identifier",
1110 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1111 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
1113 /* Validate/create actions. */
1116 .help = "submit a list of associated actions",
1117 .next = NEXT(next_action),
1122 .help = "specify next action",
1123 .next = NEXT(next_action),
1127 .help = "end list of actions",
1128 .priv = PRIV_ACTION(END, 0),
1133 .help = "no-op action",
1134 .priv = PRIV_ACTION(VOID, 0),
1135 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1138 [ACTION_PASSTHRU] = {
1140 .help = "let subsequent rule process matched packets",
1141 .priv = PRIV_ACTION(PASSTHRU, 0),
1142 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1147 .help = "attach 32 bit value to packets",
1148 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
1149 .next = NEXT(action_mark),
1152 [ACTION_MARK_ID] = {
1154 .help = "32 bit value to return with packets",
1155 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
1156 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
1157 .call = parse_vc_conf,
1161 .help = "flag packets",
1162 .priv = PRIV_ACTION(FLAG, 0),
1163 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1168 .help = "drop packets (note: passthru has priority)",
1169 .priv = PRIV_ACTION(DROP, 0),
1170 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1175 .help = "enable counters for this rule",
1176 .priv = PRIV_ACTION(COUNT, 0),
1177 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1182 .help = "redirect packets to physical device function",
1183 .priv = PRIV_ACTION(PF, 0),
1184 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1189 .help = "redirect packets to virtual device function",
1190 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
1191 .next = NEXT(action_vf),
1194 [ACTION_VF_ORIGINAL] = {
1196 .help = "use original VF ID if possible",
1197 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
1198 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
1200 .call = parse_vc_conf,
1204 .help = "VF ID to redirect packets to",
1205 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
1206 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
1207 .call = parse_vc_conf,
1211 /** Remove and return last entry from argument stack. */
1212 static const struct arg *
1213 pop_args(struct context *ctx)
1215 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
1218 /** Add entry on top of the argument stack. */
1220 push_args(struct context *ctx, const struct arg *arg)
1222 if (ctx->args_num == CTX_STACK_SIZE)
1224 ctx->args[ctx->args_num++] = arg;
1228 /** Spread value into buffer according to bit-mask. */
1230 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
1232 uint32_t i = arg->size;
1240 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1249 unsigned int shift = 0;
1250 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
1252 for (shift = 0; arg->mask[i] >> shift; ++shift) {
1253 if (!(arg->mask[i] & (1 << shift)))
1258 *buf &= ~(1 << shift);
1259 *buf |= (val & 1) << shift;
1268 * Parse a prefix length and generate a bit-mask.
1270 * Last argument (ctx->args) is retrieved to determine mask size, storage
1271 * location and whether the result must use network byte ordering.
1274 parse_prefix(struct context *ctx, const struct token *token,
1275 const char *str, unsigned int len,
1276 void *buf, unsigned int size)
1278 const struct arg *arg = pop_args(ctx);
1279 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
1286 /* Argument is expected. */
1290 u = strtoumax(str, &end, 0);
1291 if (errno || (size_t)(end - str) != len)
1296 extra = arg_entry_bf_fill(NULL, 0, arg);
1305 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
1306 !arg_entry_bf_fill(ctx->objmask, -1, arg))
1313 if (bytes > size || bytes + !!extra > size)
1317 buf = (uint8_t *)ctx->object + arg->offset;
1318 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1320 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
1321 memset(buf, 0x00, size - bytes);
1323 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
1327 memset(buf, 0xff, bytes);
1328 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
1330 ((uint8_t *)buf)[bytes] = conv[extra];
1333 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
1336 push_args(ctx, arg);
1340 /** Default parsing function for token name matching. */
1342 parse_default(struct context *ctx, const struct token *token,
1343 const char *str, unsigned int len,
1344 void *buf, unsigned int size)
1349 if (strncmp(str, token->name, len))
1354 /** Parse flow command, initialize output buffer for subsequent tokens. */
1356 parse_init(struct context *ctx, const struct token *token,
1357 const char *str, unsigned int len,
1358 void *buf, unsigned int size)
1360 struct buffer *out = buf;
1362 /* Token name must match. */
1363 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1365 /* Nothing else to do if there is no buffer. */
1368 /* Make sure buffer is large enough. */
1369 if (size < sizeof(*out))
1371 /* Initialize buffer. */
1372 memset(out, 0x00, sizeof(*out));
1373 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
1376 ctx->objmask = NULL;
1380 /** Parse tokens for validate/create commands. */
1382 parse_vc(struct context *ctx, const struct token *token,
1383 const char *str, unsigned int len,
1384 void *buf, unsigned int size)
1386 struct buffer *out = buf;
1390 /* Token name must match. */
1391 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1393 /* Nothing else to do if there is no buffer. */
1396 if (!out->command) {
1397 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
1399 if (sizeof(*out) > size)
1401 out->command = ctx->curr;
1404 ctx->objmask = NULL;
1405 out->args.vc.data = (uint8_t *)out + size;
1409 ctx->object = &out->args.vc.attr;
1410 ctx->objmask = NULL;
1411 switch (ctx->curr) {
1416 out->args.vc.attr.ingress = 1;
1419 out->args.vc.attr.egress = 1;
1422 out->args.vc.pattern =
1423 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1425 ctx->object = out->args.vc.pattern;
1426 ctx->objmask = NULL;
1429 out->args.vc.actions =
1430 (void *)RTE_ALIGN_CEIL((uintptr_t)
1431 (out->args.vc.pattern +
1432 out->args.vc.pattern_n),
1434 ctx->object = out->args.vc.actions;
1435 ctx->objmask = NULL;
1442 if (!out->args.vc.actions) {
1443 const struct parse_item_priv *priv = token->priv;
1444 struct rte_flow_item *item =
1445 out->args.vc.pattern + out->args.vc.pattern_n;
1447 data_size = priv->size * 3; /* spec, last, mask */
1448 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1449 (out->args.vc.data - data_size),
1451 if ((uint8_t *)item + sizeof(*item) > data)
1453 *item = (struct rte_flow_item){
1456 ++out->args.vc.pattern_n;
1458 ctx->objmask = NULL;
1460 const struct parse_action_priv *priv = token->priv;
1461 struct rte_flow_action *action =
1462 out->args.vc.actions + out->args.vc.actions_n;
1464 data_size = priv->size; /* configuration */
1465 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1466 (out->args.vc.data - data_size),
1468 if ((uint8_t *)action + sizeof(*action) > data)
1470 *action = (struct rte_flow_action){
1473 ++out->args.vc.actions_n;
1474 ctx->object = action;
1475 ctx->objmask = NULL;
1477 memset(data, 0, data_size);
1478 out->args.vc.data = data;
1479 ctx->objdata = data_size;
1483 /** Parse pattern item parameter type. */
1485 parse_vc_spec(struct context *ctx, const struct token *token,
1486 const char *str, unsigned int len,
1487 void *buf, unsigned int size)
1489 struct buffer *out = buf;
1490 struct rte_flow_item *item;
1496 /* Token name must match. */
1497 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1499 /* Parse parameter types. */
1500 switch (ctx->curr) {
1505 case ITEM_PARAM_SPEC:
1508 case ITEM_PARAM_LAST:
1511 case ITEM_PARAM_PREFIX:
1512 /* Modify next token to expect a prefix. */
1513 if (ctx->next_num < 2)
1515 ctx->next[ctx->next_num - 2] = NEXT_ENTRY(PREFIX);
1517 case ITEM_PARAM_MASK:
1523 /* Nothing else to do if there is no buffer. */
1526 if (!out->args.vc.pattern_n)
1528 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
1529 data_size = ctx->objdata / 3; /* spec, last, mask */
1530 /* Point to selected object. */
1531 ctx->object = out->args.vc.data + (data_size * index);
1533 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
1534 item->mask = ctx->objmask;
1536 ctx->objmask = NULL;
1537 /* Update relevant item pointer. */
1538 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
1543 /** Parse action configuration field. */
1545 parse_vc_conf(struct context *ctx, const struct token *token,
1546 const char *str, unsigned int len,
1547 void *buf, unsigned int size)
1549 struct buffer *out = buf;
1550 struct rte_flow_action *action;
1553 /* Token name must match. */
1554 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1556 /* Nothing else to do if there is no buffer. */
1559 if (!out->args.vc.actions_n)
1561 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
1562 /* Point to selected object. */
1563 ctx->object = out->args.vc.data;
1564 ctx->objmask = NULL;
1565 /* Update configuration pointer. */
1566 action->conf = ctx->object;
1570 /** Parse tokens for destroy command. */
1572 parse_destroy(struct context *ctx, const struct token *token,
1573 const char *str, unsigned int len,
1574 void *buf, unsigned int size)
1576 struct buffer *out = buf;
1578 /* Token name must match. */
1579 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1581 /* Nothing else to do if there is no buffer. */
1584 if (!out->command) {
1585 if (ctx->curr != DESTROY)
1587 if (sizeof(*out) > size)
1589 out->command = ctx->curr;
1592 ctx->objmask = NULL;
1593 out->args.destroy.rule =
1594 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1598 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
1599 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
1602 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
1603 ctx->objmask = NULL;
1607 /** Parse tokens for flush command. */
1609 parse_flush(struct context *ctx, const struct token *token,
1610 const char *str, unsigned int len,
1611 void *buf, unsigned int size)
1613 struct buffer *out = buf;
1615 /* Token name must match. */
1616 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1618 /* Nothing else to do if there is no buffer. */
1621 if (!out->command) {
1622 if (ctx->curr != FLUSH)
1624 if (sizeof(*out) > size)
1626 out->command = ctx->curr;
1629 ctx->objmask = NULL;
1634 /** Parse tokens for query command. */
1636 parse_query(struct context *ctx, const struct token *token,
1637 const char *str, unsigned int len,
1638 void *buf, unsigned int size)
1640 struct buffer *out = buf;
1642 /* Token name must match. */
1643 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1645 /* Nothing else to do if there is no buffer. */
1648 if (!out->command) {
1649 if (ctx->curr != QUERY)
1651 if (sizeof(*out) > size)
1653 out->command = ctx->curr;
1656 ctx->objmask = NULL;
1661 /** Parse action names. */
1663 parse_action(struct context *ctx, const struct token *token,
1664 const char *str, unsigned int len,
1665 void *buf, unsigned int size)
1667 struct buffer *out = buf;
1668 const struct arg *arg = pop_args(ctx);
1672 /* Argument is expected. */
1675 /* Parse action name. */
1676 for (i = 0; next_action[i]; ++i) {
1677 const struct parse_action_priv *priv;
1679 token = &token_list[next_action[i]];
1680 if (strncmp(token->name, str, len))
1686 memcpy((uint8_t *)ctx->object + arg->offset,
1692 push_args(ctx, arg);
1696 /** Parse tokens for list command. */
1698 parse_list(struct context *ctx, const struct token *token,
1699 const char *str, unsigned int len,
1700 void *buf, unsigned int size)
1702 struct buffer *out = buf;
1704 /* Token name must match. */
1705 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1707 /* Nothing else to do if there is no buffer. */
1710 if (!out->command) {
1711 if (ctx->curr != LIST)
1713 if (sizeof(*out) > size)
1715 out->command = ctx->curr;
1718 ctx->objmask = NULL;
1719 out->args.list.group =
1720 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1724 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
1725 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
1728 ctx->object = out->args.list.group + out->args.list.group_n++;
1729 ctx->objmask = NULL;
1734 * Parse signed/unsigned integers 8 to 64-bit long.
1736 * Last argument (ctx->args) is retrieved to determine integer type and
1740 parse_int(struct context *ctx, const struct token *token,
1741 const char *str, unsigned int len,
1742 void *buf, unsigned int size)
1744 const struct arg *arg = pop_args(ctx);
1749 /* Argument is expected. */
1754 (uintmax_t)strtoimax(str, &end, 0) :
1755 strtoumax(str, &end, 0);
1756 if (errno || (size_t)(end - str) != len)
1761 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
1762 !arg_entry_bf_fill(ctx->objmask, -1, arg))
1766 buf = (uint8_t *)ctx->object + arg->offset;
1770 case sizeof(uint8_t):
1771 *(uint8_t *)buf = u;
1773 case sizeof(uint16_t):
1774 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
1776 case sizeof(uint8_t [3]):
1777 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1779 ((uint8_t *)buf)[0] = u;
1780 ((uint8_t *)buf)[1] = u >> 8;
1781 ((uint8_t *)buf)[2] = u >> 16;
1785 ((uint8_t *)buf)[0] = u >> 16;
1786 ((uint8_t *)buf)[1] = u >> 8;
1787 ((uint8_t *)buf)[2] = u;
1789 case sizeof(uint32_t):
1790 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
1792 case sizeof(uint64_t):
1793 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
1798 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
1800 buf = (uint8_t *)ctx->objmask + arg->offset;
1805 push_args(ctx, arg);
1812 * Two arguments (ctx->args) are retrieved from the stack to store data and
1813 * its length (in that order).
1816 parse_string(struct context *ctx, const struct token *token,
1817 const char *str, unsigned int len,
1818 void *buf, unsigned int size)
1820 const struct arg *arg_data = pop_args(ctx);
1821 const struct arg *arg_len = pop_args(ctx);
1822 char tmp[16]; /* Ought to be enough. */
1825 /* Arguments are expected. */
1829 push_args(ctx, arg_data);
1832 size = arg_data->size;
1833 /* Bit-mask fill is not supported. */
1834 if (arg_data->mask || size < len)
1838 /* Let parse_int() fill length information first. */
1839 ret = snprintf(tmp, sizeof(tmp), "%u", len);
1842 push_args(ctx, arg_len);
1843 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
1848 buf = (uint8_t *)ctx->object + arg_data->offset;
1849 /* Output buffer is not necessarily NUL-terminated. */
1850 memcpy(buf, str, len);
1851 memset((uint8_t *)buf + len, 0x55, size - len);
1853 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
1856 push_args(ctx, arg_len);
1857 push_args(ctx, arg_data);
1862 * Parse a MAC address.
1864 * Last argument (ctx->args) is retrieved to determine storage size and
1868 parse_mac_addr(struct context *ctx, const struct token *token,
1869 const char *str, unsigned int len,
1870 void *buf, unsigned int size)
1872 const struct arg *arg = pop_args(ctx);
1873 struct ether_addr tmp;
1877 /* Argument is expected. */
1881 /* Bit-mask fill is not supported. */
1882 if (arg->mask || size != sizeof(tmp))
1884 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
1885 if (ret < 0 || (unsigned int)ret != len)
1889 buf = (uint8_t *)ctx->object + arg->offset;
1890 memcpy(buf, &tmp, size);
1892 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
1895 push_args(ctx, arg);
1900 * Parse an IPv4 address.
1902 * Last argument (ctx->args) is retrieved to determine storage size and
1906 parse_ipv4_addr(struct context *ctx, const struct token *token,
1907 const char *str, unsigned int len,
1908 void *buf, unsigned int size)
1910 const struct arg *arg = pop_args(ctx);
1915 /* Argument is expected. */
1919 /* Bit-mask fill is not supported. */
1920 if (arg->mask || size != sizeof(tmp))
1922 /* Only network endian is supported. */
1925 memcpy(str2, str, len);
1927 ret = inet_pton(AF_INET, str2, &tmp);
1929 /* Attempt integer parsing. */
1930 push_args(ctx, arg);
1931 return parse_int(ctx, token, str, len, buf, size);
1935 buf = (uint8_t *)ctx->object + arg->offset;
1936 memcpy(buf, &tmp, size);
1938 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
1941 push_args(ctx, arg);
1946 * Parse an IPv6 address.
1948 * Last argument (ctx->args) is retrieved to determine storage size and
1952 parse_ipv6_addr(struct context *ctx, const struct token *token,
1953 const char *str, unsigned int len,
1954 void *buf, unsigned int size)
1956 const struct arg *arg = pop_args(ctx);
1958 struct in6_addr tmp;
1962 /* Argument is expected. */
1966 /* Bit-mask fill is not supported. */
1967 if (arg->mask || size != sizeof(tmp))
1969 /* Only network endian is supported. */
1972 memcpy(str2, str, len);
1974 ret = inet_pton(AF_INET6, str2, &tmp);
1979 buf = (uint8_t *)ctx->object + arg->offset;
1980 memcpy(buf, &tmp, size);
1982 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
1985 push_args(ctx, arg);
1989 /** Boolean values (even indices stand for false). */
1990 static const char *const boolean_name[] = {
1999 * Parse a boolean value.
2001 * Last argument (ctx->args) is retrieved to determine storage size and
2005 parse_boolean(struct context *ctx, const struct token *token,
2006 const char *str, unsigned int len,
2007 void *buf, unsigned int size)
2009 const struct arg *arg = pop_args(ctx);
2013 /* Argument is expected. */
2016 for (i = 0; boolean_name[i]; ++i)
2017 if (!strncmp(str, boolean_name[i], len))
2019 /* Process token as integer. */
2020 if (boolean_name[i])
2021 str = i & 1 ? "1" : "0";
2022 push_args(ctx, arg);
2023 ret = parse_int(ctx, token, str, strlen(str), buf, size);
2024 return ret > 0 ? (int)len : ret;
2027 /** Parse port and update context. */
2029 parse_port(struct context *ctx, const struct token *token,
2030 const char *str, unsigned int len,
2031 void *buf, unsigned int size)
2033 struct buffer *out = &(struct buffer){ .port = 0 };
2041 ctx->objmask = NULL;
2042 size = sizeof(*out);
2044 ret = parse_int(ctx, token, str, len, out, size);
2046 ctx->port = out->port;
2052 /** No completion. */
2054 comp_none(struct context *ctx, const struct token *token,
2055 unsigned int ent, char *buf, unsigned int size)
2065 /** Complete boolean values. */
2067 comp_boolean(struct context *ctx, const struct token *token,
2068 unsigned int ent, char *buf, unsigned int size)
2074 for (i = 0; boolean_name[i]; ++i)
2075 if (buf && i == ent)
2076 return snprintf(buf, size, "%s", boolean_name[i]);
2082 /** Complete action names. */
2084 comp_action(struct context *ctx, const struct token *token,
2085 unsigned int ent, char *buf, unsigned int size)
2091 for (i = 0; next_action[i]; ++i)
2092 if (buf && i == ent)
2093 return snprintf(buf, size, "%s",
2094 token_list[next_action[i]].name);
2100 /** Complete available ports. */
2102 comp_port(struct context *ctx, const struct token *token,
2103 unsigned int ent, char *buf, unsigned int size)
2110 FOREACH_PORT(p, ports) {
2111 if (buf && i == ent)
2112 return snprintf(buf, size, "%u", p);
2120 /** Complete available rule IDs. */
2122 comp_rule_id(struct context *ctx, const struct token *token,
2123 unsigned int ent, char *buf, unsigned int size)
2126 struct rte_port *port;
2127 struct port_flow *pf;
2130 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
2131 ctx->port == (uint16_t)RTE_PORT_ALL)
2133 port = &ports[ctx->port];
2134 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
2135 if (buf && i == ent)
2136 return snprintf(buf, size, "%u", pf->id);
2144 /** Internal context. */
2145 static struct context cmd_flow_context;
2147 /** Global parser instance (cmdline API). */
2148 cmdline_parse_inst_t cmd_flow;
2150 /** Initialize context. */
2152 cmd_flow_context_init(struct context *ctx)
2154 /* A full memset() is not necessary. */
2165 ctx->objmask = NULL;
2168 /** Parse a token (cmdline API). */
2170 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
2173 struct context *ctx = &cmd_flow_context;
2174 const struct token *token;
2175 const enum index *list;
2180 /* Restart as requested. */
2182 cmd_flow_context_init(ctx);
2183 token = &token_list[ctx->curr];
2184 /* Check argument length. */
2187 for (len = 0; src[len]; ++len)
2188 if (src[len] == '#' || isspace(src[len]))
2192 /* Last argument and EOL detection. */
2193 for (i = len; src[i]; ++i)
2194 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
2196 else if (!isspace(src[i])) {
2201 if (src[i] == '\r' || src[i] == '\n') {
2205 /* Initialize context if necessary. */
2206 if (!ctx->next_num) {
2209 ctx->next[ctx->next_num++] = token->next[0];
2211 /* Process argument through candidates. */
2212 ctx->prev = ctx->curr;
2213 list = ctx->next[ctx->next_num - 1];
2214 for (i = 0; list[i]; ++i) {
2215 const struct token *next = &token_list[list[i]];
2218 ctx->curr = list[i];
2220 tmp = next->call(ctx, next, src, len, result, size);
2222 tmp = parse_default(ctx, next, src, len, result, size);
2223 if (tmp == -1 || tmp != len)
2231 /* Push subsequent tokens if any. */
2233 for (i = 0; token->next[i]; ++i) {
2234 if (ctx->next_num == RTE_DIM(ctx->next))
2236 ctx->next[ctx->next_num++] = token->next[i];
2238 /* Push arguments if any. */
2240 for (i = 0; token->args[i]; ++i) {
2241 if (ctx->args_num == RTE_DIM(ctx->args))
2243 ctx->args[ctx->args_num++] = token->args[i];
2248 /** Return number of completion entries (cmdline API). */
2250 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
2252 struct context *ctx = &cmd_flow_context;
2253 const struct token *token = &token_list[ctx->curr];
2254 const enum index *list;
2258 /* Tell cmd_flow_parse() that context must be reinitialized. */
2260 /* Count number of tokens in current list. */
2262 list = ctx->next[ctx->next_num - 1];
2264 list = token->next[0];
2265 for (i = 0; list[i]; ++i)
2270 * If there is a single token, use its completion callback, otherwise
2271 * return the number of entries.
2273 token = &token_list[list[0]];
2274 if (i == 1 && token->comp) {
2275 /* Save index for cmd_flow_get_help(). */
2276 ctx->prev = list[0];
2277 return token->comp(ctx, token, 0, NULL, 0);
2282 /** Return a completion entry (cmdline API). */
2284 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
2285 char *dst, unsigned int size)
2287 struct context *ctx = &cmd_flow_context;
2288 const struct token *token = &token_list[ctx->curr];
2289 const enum index *list;
2293 /* Tell cmd_flow_parse() that context must be reinitialized. */
2295 /* Count number of tokens in current list. */
2297 list = ctx->next[ctx->next_num - 1];
2299 list = token->next[0];
2300 for (i = 0; list[i]; ++i)
2304 /* If there is a single token, use its completion callback. */
2305 token = &token_list[list[0]];
2306 if (i == 1 && token->comp) {
2307 /* Save index for cmd_flow_get_help(). */
2308 ctx->prev = list[0];
2309 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
2311 /* Otherwise make sure the index is valid and use defaults. */
2314 token = &token_list[list[index]];
2315 snprintf(dst, size, "%s", token->name);
2316 /* Save index for cmd_flow_get_help(). */
2317 ctx->prev = list[index];
2321 /** Populate help strings for current token (cmdline API). */
2323 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
2325 struct context *ctx = &cmd_flow_context;
2326 const struct token *token = &token_list[ctx->prev];
2329 /* Tell cmd_flow_parse() that context must be reinitialized. */
2333 /* Set token type and update global help with details. */
2334 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
2336 cmd_flow.help_str = token->help;
2338 cmd_flow.help_str = token->name;
2342 /** Token definition template (cmdline API). */
2343 static struct cmdline_token_hdr cmd_flow_token_hdr = {
2344 .ops = &(struct cmdline_token_ops){
2345 .parse = cmd_flow_parse,
2346 .complete_get_nb = cmd_flow_complete_get_nb,
2347 .complete_get_elt = cmd_flow_complete_get_elt,
2348 .get_help = cmd_flow_get_help,
2353 /** Populate the next dynamic token. */
2355 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
2356 cmdline_parse_token_hdr_t *(*hdrs)[])
2358 struct context *ctx = &cmd_flow_context;
2360 /* Always reinitialize context before requesting the first token. */
2362 cmd_flow_context_init(ctx);
2363 /* Return NULL when no more tokens are expected. */
2364 if (!ctx->next_num && ctx->curr) {
2368 /* Determine if command should end here. */
2369 if (ctx->eol && ctx->last && ctx->next_num) {
2370 const enum index *list = ctx->next[ctx->next_num - 1];
2373 for (i = 0; list[i]; ++i) {
2380 *hdr = &cmd_flow_token_hdr;
2383 /** Dispatch parsed buffer to function calls. */
2385 cmd_flow_parsed(const struct buffer *in)
2387 switch (in->command) {
2389 port_flow_validate(in->port, &in->args.vc.attr,
2390 in->args.vc.pattern, in->args.vc.actions);
2393 port_flow_create(in->port, &in->args.vc.attr,
2394 in->args.vc.pattern, in->args.vc.actions);
2397 port_flow_destroy(in->port, in->args.destroy.rule_n,
2398 in->args.destroy.rule);
2401 port_flow_flush(in->port);
2404 port_flow_query(in->port, in->args.query.rule,
2405 in->args.query.action);
2408 port_flow_list(in->port, in->args.list.group_n,
2409 in->args.list.group);
2416 /** Token generator and output processing callback (cmdline API). */
2418 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
2421 cmd_flow_tok(arg0, arg2);
2423 cmd_flow_parsed(arg0);
2426 /** Global parser instance (cmdline API). */
2427 cmdline_parse_inst_t cmd_flow = {
2429 .data = NULL, /**< Unused. */
2430 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
2433 }, /**< Tokens are returned by cmd_flow_tok(). */