4 * Copyright 2016 6WIND S.A.
5 * Copyright 2016 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <arpa/inet.h>
42 #include <sys/socket.h>
44 #include <rte_common.h>
45 #include <rte_ethdev.h>
46 #include <rte_byteorder.h>
47 #include <cmdline_parse.h>
48 #include <cmdline_parse_etheraddr.h>
53 /** Parser token indices. */
73 /* Top-level command. */
76 /* Sub-level commands. */
84 /* Destroy arguments. */
87 /* Query arguments. */
93 /* Validate/create arguments. */
99 /* Validate/create pattern. */
167 /* Validate/create actions. */
191 /** Size of pattern[] field in struct rte_flow_item_raw. */
192 #define ITEM_RAW_PATTERN_SIZE 36
194 /** Storage size for struct rte_flow_item_raw including pattern. */
195 #define ITEM_RAW_SIZE \
196 (offsetof(struct rte_flow_item_raw, pattern) + ITEM_RAW_PATTERN_SIZE)
198 /** Number of queue[] entries in struct rte_flow_action_rss. */
199 #define ACTION_RSS_NUM 32
201 /** Storage size for struct rte_flow_action_rss including queues. */
202 #define ACTION_RSS_SIZE \
203 (offsetof(struct rte_flow_action_rss, queue) + \
204 sizeof(*((struct rte_flow_action_rss *)0)->queue) * ACTION_RSS_NUM)
206 /** Maximum number of subsequent tokens and arguments on the stack. */
207 #define CTX_STACK_SIZE 16
209 /** Parser context. */
211 /** Stack of subsequent token lists to process. */
212 const enum index *next[CTX_STACK_SIZE];
213 /** Arguments for stacked tokens. */
214 const void *args[CTX_STACK_SIZE];
215 enum index curr; /**< Current token index. */
216 enum index prev; /**< Index of the last token seen. */
217 int next_num; /**< Number of entries in next[]. */
218 int args_num; /**< Number of entries in args[]. */
219 uint32_t reparse:1; /**< Start over from the beginning. */
220 uint32_t eol:1; /**< EOL has been detected. */
221 uint32_t last:1; /**< No more arguments. */
222 uint16_t port; /**< Current port ID (for completions). */
223 uint32_t objdata; /**< Object-specific data. */
224 void *object; /**< Address of current object for relative offsets. */
225 void *objmask; /**< Object a full mask must be written to. */
228 /** Token argument. */
230 uint32_t hton:1; /**< Use network byte ordering. */
231 uint32_t sign:1; /**< Value is signed. */
232 uint32_t offset; /**< Relative offset from ctx->object. */
233 uint32_t size; /**< Field size. */
234 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
237 /** Parser token definition. */
239 /** Type displayed during completion (defaults to "TOKEN"). */
241 /** Help displayed during completion (defaults to token name). */
243 /** Private data used by parser functions. */
246 * Lists of subsequent tokens to push on the stack. Each call to the
247 * parser consumes the last entry of that stack.
249 const enum index *const *next;
250 /** Arguments stack for subsequent tokens that need them. */
251 const struct arg *const *args;
253 * Token-processing callback, returns -1 in case of error, the
254 * length of the matched string otherwise. If NULL, attempts to
255 * match the token name.
257 * If buf is not NULL, the result should be stored in it according
258 * to context. An error is returned if not large enough.
260 int (*call)(struct context *ctx, const struct token *token,
261 const char *str, unsigned int len,
262 void *buf, unsigned int size);
264 * Callback that provides possible values for this token, used for
265 * completion. Returns -1 in case of error, the number of possible
266 * values otherwise. If NULL, the token name is used.
268 * If buf is not NULL, entry index ent is written to buf and the
269 * full length of the entry is returned (same behavior as
272 int (*comp)(struct context *ctx, const struct token *token,
273 unsigned int ent, char *buf, unsigned int size);
274 /** Mandatory token name, no default value. */
278 /** Static initializer for the next field. */
279 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
281 /** Static initializer for a NEXT() entry. */
282 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
284 /** Static initializer for the args field. */
285 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
287 /** Static initializer for ARGS() to target a field. */
288 #define ARGS_ENTRY(s, f) \
289 (&(const struct arg){ \
290 .offset = offsetof(s, f), \
291 .size = sizeof(((s *)0)->f), \
294 /** Static initializer for ARGS() to target a bit-field. */
295 #define ARGS_ENTRY_BF(s, f, b) \
296 (&(const struct arg){ \
298 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
301 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
302 #define ARGS_ENTRY_MASK(s, f, m) \
303 (&(const struct arg){ \
304 .offset = offsetof(s, f), \
305 .size = sizeof(((s *)0)->f), \
306 .mask = (const void *)(m), \
309 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
310 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
311 (&(const struct arg){ \
313 .offset = offsetof(s, f), \
314 .size = sizeof(((s *)0)->f), \
315 .mask = (const void *)(m), \
318 /** Static initializer for ARGS() to target a pointer. */
319 #define ARGS_ENTRY_PTR(s, f) \
320 (&(const struct arg){ \
321 .size = sizeof(*((s *)0)->f), \
324 /** Static initializer for ARGS() with arbitrary size. */
325 #define ARGS_ENTRY_USZ(s, f, sz) \
326 (&(const struct arg){ \
327 .offset = offsetof(s, f), \
331 /** Same as ARGS_ENTRY() using network byte ordering. */
332 #define ARGS_ENTRY_HTON(s, f) \
333 (&(const struct arg){ \
335 .offset = offsetof(s, f), \
336 .size = sizeof(((s *)0)->f), \
339 /** Parser output buffer layout expected by cmd_flow_parsed(). */
341 enum index command; /**< Flow command. */
342 uint16_t port; /**< Affected port ID. */
345 struct rte_flow_attr attr;
346 struct rte_flow_item *pattern;
347 struct rte_flow_action *actions;
351 } vc; /**< Validate/create arguments. */
355 } destroy; /**< Destroy arguments. */
358 enum rte_flow_action_type action;
359 } query; /**< Query arguments. */
363 } list; /**< List arguments. */
364 } args; /**< Command arguments. */
367 /** Private data for pattern items. */
368 struct parse_item_priv {
369 enum rte_flow_item_type type; /**< Item type. */
370 uint32_t size; /**< Size of item specification structure. */
373 #define PRIV_ITEM(t, s) \
374 (&(const struct parse_item_priv){ \
375 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
379 /** Private data for actions. */
380 struct parse_action_priv {
381 enum rte_flow_action_type type; /**< Action type. */
382 uint32_t size; /**< Size of action configuration structure. */
385 #define PRIV_ACTION(t, s) \
386 (&(const struct parse_action_priv){ \
387 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
391 static const enum index next_vc_attr[] = {
400 static const enum index next_destroy_attr[] = {
406 static const enum index next_list_attr[] = {
412 static const enum index item_param[] = {
421 static const enum index next_item[] = {
444 static const enum index item_any[] = {
450 static const enum index item_vf[] = {
456 static const enum index item_port[] = {
462 static const enum index item_raw[] = {
472 static const enum index item_eth[] = {
480 static const enum index item_vlan[] = {
490 static const enum index item_ipv4[] = {
500 static const enum index item_ipv6[] = {
511 static const enum index item_icmp[] = {
518 static const enum index item_udp[] = {
525 static const enum index item_tcp[] = {
532 static const enum index item_sctp[] = {
541 static const enum index item_vxlan[] = {
547 static const enum index item_mpls[] = {
553 static const enum index item_gre[] = {
559 static const enum index next_action[] = {
575 static const enum index action_mark[] = {
581 static const enum index action_queue[] = {
587 static const enum index action_dup[] = {
593 static const enum index action_rss[] = {
599 static const enum index action_vf[] = {
606 static int parse_init(struct context *, const struct token *,
607 const char *, unsigned int,
608 void *, unsigned int);
609 static int parse_vc(struct context *, const struct token *,
610 const char *, unsigned int,
611 void *, unsigned int);
612 static int parse_vc_spec(struct context *, const struct token *,
613 const char *, unsigned int, void *, unsigned int);
614 static int parse_vc_conf(struct context *, const struct token *,
615 const char *, unsigned int, void *, unsigned int);
616 static int parse_vc_action_rss_queue(struct context *, const struct token *,
617 const char *, unsigned int, void *,
619 static int parse_destroy(struct context *, const struct token *,
620 const char *, unsigned int,
621 void *, unsigned int);
622 static int parse_flush(struct context *, const struct token *,
623 const char *, unsigned int,
624 void *, unsigned int);
625 static int parse_query(struct context *, const struct token *,
626 const char *, unsigned int,
627 void *, unsigned int);
628 static int parse_action(struct context *, const struct token *,
629 const char *, unsigned int,
630 void *, unsigned int);
631 static int parse_list(struct context *, const struct token *,
632 const char *, unsigned int,
633 void *, unsigned int);
634 static int parse_int(struct context *, const struct token *,
635 const char *, unsigned int,
636 void *, unsigned int);
637 static int parse_prefix(struct context *, const struct token *,
638 const char *, unsigned int,
639 void *, unsigned int);
640 static int parse_boolean(struct context *, const struct token *,
641 const char *, unsigned int,
642 void *, unsigned int);
643 static int parse_string(struct context *, const struct token *,
644 const char *, unsigned int,
645 void *, unsigned int);
646 static int parse_mac_addr(struct context *, const struct token *,
647 const char *, unsigned int,
648 void *, unsigned int);
649 static int parse_ipv4_addr(struct context *, const struct token *,
650 const char *, unsigned int,
651 void *, unsigned int);
652 static int parse_ipv6_addr(struct context *, const struct token *,
653 const char *, unsigned int,
654 void *, unsigned int);
655 static int parse_port(struct context *, const struct token *,
656 const char *, unsigned int,
657 void *, unsigned int);
658 static int comp_none(struct context *, const struct token *,
659 unsigned int, char *, unsigned int);
660 static int comp_boolean(struct context *, const struct token *,
661 unsigned int, char *, unsigned int);
662 static int comp_action(struct context *, const struct token *,
663 unsigned int, char *, unsigned int);
664 static int comp_port(struct context *, const struct token *,
665 unsigned int, char *, unsigned int);
666 static int comp_rule_id(struct context *, const struct token *,
667 unsigned int, char *, unsigned int);
668 static int comp_vc_action_rss_queue(struct context *, const struct token *,
669 unsigned int, char *, unsigned int);
671 /** Token definitions. */
672 static const struct token token_list[] = {
673 /* Special tokens. */
676 .help = "null entry, abused as the entry point",
677 .next = NEXT(NEXT_ENTRY(FLOW)),
682 .help = "command may end here",
688 .help = "integer value",
693 .name = "{unsigned}",
695 .help = "unsigned integer value",
702 .help = "prefix length for bit-mask",
703 .call = parse_prefix,
709 .help = "any boolean value",
710 .call = parse_boolean,
711 .comp = comp_boolean,
716 .help = "fixed string",
717 .call = parse_string,
721 .name = "{MAC address}",
723 .help = "standard MAC address notation",
724 .call = parse_mac_addr,
728 .name = "{IPv4 address}",
729 .type = "IPV4 ADDRESS",
730 .help = "standard IPv4 address notation",
731 .call = parse_ipv4_addr,
735 .name = "{IPv6 address}",
736 .type = "IPV6 ADDRESS",
737 .help = "standard IPv6 address notation",
738 .call = parse_ipv6_addr,
744 .help = "rule identifier",
746 .comp = comp_rule_id,
751 .help = "port identifier",
756 .name = "{group_id}",
758 .help = "group identifier",
765 .help = "priority level",
769 /* Top-level command. */
772 .type = "{command} {port_id} [{arg} [...]]",
773 .help = "manage ingress/egress flow rules",
774 .next = NEXT(NEXT_ENTRY
783 /* Sub-level commands. */
786 .help = "check whether a flow rule can be created",
787 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
788 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
793 .help = "create a flow rule",
794 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
795 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
800 .help = "destroy specific flow rules",
801 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
802 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
803 .call = parse_destroy,
807 .help = "destroy all flow rules",
808 .next = NEXT(NEXT_ENTRY(PORT_ID)),
809 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
814 .help = "query an existing flow rule",
815 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
817 NEXT_ENTRY(PORT_ID)),
818 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action),
819 ARGS_ENTRY(struct buffer, args.query.rule),
820 ARGS_ENTRY(struct buffer, port)),
825 .help = "list existing flow rules",
826 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
827 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
830 /* Destroy arguments. */
833 .help = "specify a rule identifier",
834 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
835 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
836 .call = parse_destroy,
838 /* Query arguments. */
842 .help = "action to query, must be part of the rule",
843 .call = parse_action,
846 /* List arguments. */
849 .help = "specify a group",
850 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
851 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
854 /* Validate/create attributes. */
857 .help = "specify a group",
858 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
859 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
864 .help = "specify a priority level",
865 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
866 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
871 .help = "affect rule to ingress",
872 .next = NEXT(next_vc_attr),
877 .help = "affect rule to egress",
878 .next = NEXT(next_vc_attr),
881 /* Validate/create pattern. */
884 .help = "submit a list of pattern items",
885 .next = NEXT(next_item),
890 .help = "match value perfectly (with full bit-mask)",
891 .call = parse_vc_spec,
893 [ITEM_PARAM_SPEC] = {
895 .help = "match value according to configured bit-mask",
896 .call = parse_vc_spec,
898 [ITEM_PARAM_LAST] = {
900 .help = "specify upper bound to establish a range",
901 .call = parse_vc_spec,
903 [ITEM_PARAM_MASK] = {
905 .help = "specify bit-mask with relevant bits set to one",
906 .call = parse_vc_spec,
908 [ITEM_PARAM_PREFIX] = {
910 .help = "generate bit-mask from a prefix length",
911 .call = parse_vc_spec,
915 .help = "specify next pattern item",
916 .next = NEXT(next_item),
920 .help = "end list of pattern items",
921 .priv = PRIV_ITEM(END, 0),
922 .next = NEXT(NEXT_ENTRY(ACTIONS)),
927 .help = "no-op pattern item",
928 .priv = PRIV_ITEM(VOID, 0),
929 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
934 .help = "perform actions when pattern does not match",
935 .priv = PRIV_ITEM(INVERT, 0),
936 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
941 .help = "match any protocol for the current layer",
942 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
943 .next = NEXT(item_any),
948 .help = "number of layers covered",
949 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
950 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
954 .help = "match packets addressed to the physical function",
955 .priv = PRIV_ITEM(PF, 0),
956 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
961 .help = "match packets addressed to a virtual function ID",
962 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
963 .next = NEXT(item_vf),
968 .help = "destination VF ID",
969 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
970 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
974 .help = "device-specific physical port index to use",
975 .priv = PRIV_ITEM(PORT, sizeof(struct rte_flow_item_port)),
976 .next = NEXT(item_port),
979 [ITEM_PORT_INDEX] = {
981 .help = "physical port index",
982 .next = NEXT(item_port, NEXT_ENTRY(UNSIGNED), item_param),
983 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port, index)),
987 .help = "match an arbitrary byte string",
988 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
989 .next = NEXT(item_raw),
992 [ITEM_RAW_RELATIVE] = {
994 .help = "look for pattern after the previous item",
995 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
996 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
999 [ITEM_RAW_SEARCH] = {
1001 .help = "search pattern from offset (see also limit)",
1002 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1003 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1006 [ITEM_RAW_OFFSET] = {
1008 .help = "absolute or relative offset for pattern",
1009 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1010 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1012 [ITEM_RAW_LIMIT] = {
1014 .help = "search area limit for start of pattern",
1015 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1016 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1018 [ITEM_RAW_PATTERN] = {
1020 .help = "byte string to look for",
1021 .next = NEXT(item_raw,
1023 NEXT_ENTRY(ITEM_PARAM_IS,
1026 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, length),
1027 ARGS_ENTRY_USZ(struct rte_flow_item_raw,
1029 ITEM_RAW_PATTERN_SIZE)),
1033 .help = "match Ethernet header",
1034 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1035 .next = NEXT(item_eth),
1040 .help = "destination MAC",
1041 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1042 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_eth, dst)),
1046 .help = "source MAC",
1047 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1048 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_eth, src)),
1052 .help = "EtherType",
1053 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1054 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1058 .help = "match 802.1Q/ad VLAN tag",
1059 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1060 .next = NEXT(item_vlan),
1063 [ITEM_VLAN_TPID] = {
1065 .help = "tag protocol identifier",
1066 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1067 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tpid)),
1071 .help = "tag control information",
1072 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1073 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1077 .help = "priority code point",
1078 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1079 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1084 .help = "drop eligible indicator",
1085 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1086 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1091 .help = "VLAN identifier",
1092 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1093 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1098 .help = "match IPv4 header",
1099 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1100 .next = NEXT(item_ipv4),
1105 .help = "type of service",
1106 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1107 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1108 hdr.type_of_service)),
1112 .help = "time to live",
1113 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1114 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1117 [ITEM_IPV4_PROTO] = {
1119 .help = "next protocol ID",
1120 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1121 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1122 hdr.next_proto_id)),
1126 .help = "source address",
1127 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1128 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1133 .help = "destination address",
1134 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1135 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1140 .help = "match IPv6 header",
1141 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1142 .next = NEXT(item_ipv6),
1147 .help = "traffic class",
1148 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1149 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1151 "\x0f\xf0\x00\x00")),
1153 [ITEM_IPV6_FLOW] = {
1155 .help = "flow label",
1156 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1157 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1159 "\x00\x0f\xff\xff")),
1161 [ITEM_IPV6_PROTO] = {
1163 .help = "protocol (next header)",
1164 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1165 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1170 .help = "hop limit",
1171 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1172 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1177 .help = "source address",
1178 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1179 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1184 .help = "destination address",
1185 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1186 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1191 .help = "match ICMP header",
1192 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1193 .next = NEXT(item_icmp),
1196 [ITEM_ICMP_TYPE] = {
1198 .help = "ICMP packet type",
1199 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1200 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1203 [ITEM_ICMP_CODE] = {
1205 .help = "ICMP packet code",
1206 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1207 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1212 .help = "match UDP header",
1213 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1214 .next = NEXT(item_udp),
1219 .help = "UDP source port",
1220 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1221 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1226 .help = "UDP destination port",
1227 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1228 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1233 .help = "match TCP header",
1234 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1235 .next = NEXT(item_tcp),
1240 .help = "TCP source port",
1241 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1242 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1247 .help = "TCP destination port",
1248 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1249 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1254 .help = "match SCTP header",
1255 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1256 .next = NEXT(item_sctp),
1261 .help = "SCTP source port",
1262 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1263 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1268 .help = "SCTP destination port",
1269 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1270 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1275 .help = "validation tag",
1276 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1277 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1280 [ITEM_SCTP_CKSUM] = {
1283 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1284 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1289 .help = "match VXLAN header",
1290 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1291 .next = NEXT(item_vxlan),
1294 [ITEM_VXLAN_VNI] = {
1296 .help = "VXLAN identifier",
1297 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1298 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
1302 .help = "match MPLS header",
1303 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
1304 .next = NEXT(item_mpls),
1307 [ITEM_MPLS_LABEL] = {
1309 .help = "MPLS label",
1310 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
1311 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
1317 .help = "match GRE header",
1318 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
1319 .next = NEXT(item_gre),
1322 [ITEM_GRE_PROTO] = {
1324 .help = "GRE protocol type",
1325 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
1326 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
1329 /* Validate/create actions. */
1332 .help = "submit a list of associated actions",
1333 .next = NEXT(next_action),
1338 .help = "specify next action",
1339 .next = NEXT(next_action),
1343 .help = "end list of actions",
1344 .priv = PRIV_ACTION(END, 0),
1349 .help = "no-op action",
1350 .priv = PRIV_ACTION(VOID, 0),
1351 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1354 [ACTION_PASSTHRU] = {
1356 .help = "let subsequent rule process matched packets",
1357 .priv = PRIV_ACTION(PASSTHRU, 0),
1358 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1363 .help = "attach 32 bit value to packets",
1364 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
1365 .next = NEXT(action_mark),
1368 [ACTION_MARK_ID] = {
1370 .help = "32 bit value to return with packets",
1371 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
1372 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
1373 .call = parse_vc_conf,
1377 .help = "flag packets",
1378 .priv = PRIV_ACTION(FLAG, 0),
1379 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1384 .help = "assign packets to a given queue index",
1385 .priv = PRIV_ACTION(QUEUE,
1386 sizeof(struct rte_flow_action_queue)),
1387 .next = NEXT(action_queue),
1390 [ACTION_QUEUE_INDEX] = {
1392 .help = "queue index to use",
1393 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
1394 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
1395 .call = parse_vc_conf,
1399 .help = "drop packets (note: passthru has priority)",
1400 .priv = PRIV_ACTION(DROP, 0),
1401 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1406 .help = "enable counters for this rule",
1407 .priv = PRIV_ACTION(COUNT, 0),
1408 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1413 .help = "duplicate packets to a given queue index",
1414 .priv = PRIV_ACTION(DUP, sizeof(struct rte_flow_action_dup)),
1415 .next = NEXT(action_dup),
1418 [ACTION_DUP_INDEX] = {
1420 .help = "queue index to duplicate packets to",
1421 .next = NEXT(action_dup, NEXT_ENTRY(UNSIGNED)),
1422 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_dup, index)),
1423 .call = parse_vc_conf,
1427 .help = "spread packets among several queues",
1428 .priv = PRIV_ACTION(RSS, ACTION_RSS_SIZE),
1429 .next = NEXT(action_rss),
1432 [ACTION_RSS_QUEUES] = {
1434 .help = "queue indices to use",
1435 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
1436 .call = parse_vc_conf,
1438 [ACTION_RSS_QUEUE] = {
1440 .help = "queue index",
1441 .call = parse_vc_action_rss_queue,
1442 .comp = comp_vc_action_rss_queue,
1446 .help = "redirect packets to physical device function",
1447 .priv = PRIV_ACTION(PF, 0),
1448 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1453 .help = "redirect packets to virtual device function",
1454 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
1455 .next = NEXT(action_vf),
1458 [ACTION_VF_ORIGINAL] = {
1460 .help = "use original VF ID if possible",
1461 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
1462 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
1464 .call = parse_vc_conf,
1468 .help = "VF ID to redirect packets to",
1469 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
1470 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
1471 .call = parse_vc_conf,
1475 /** Remove and return last entry from argument stack. */
1476 static const struct arg *
1477 pop_args(struct context *ctx)
1479 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
1482 /** Add entry on top of the argument stack. */
1484 push_args(struct context *ctx, const struct arg *arg)
1486 if (ctx->args_num == CTX_STACK_SIZE)
1488 ctx->args[ctx->args_num++] = arg;
1492 /** Spread value into buffer according to bit-mask. */
1494 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
1496 uint32_t i = arg->size;
1504 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1513 unsigned int shift = 0;
1514 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
1516 for (shift = 0; arg->mask[i] >> shift; ++shift) {
1517 if (!(arg->mask[i] & (1 << shift)))
1522 *buf &= ~(1 << shift);
1523 *buf |= (val & 1) << shift;
1532 * Parse a prefix length and generate a bit-mask.
1534 * Last argument (ctx->args) is retrieved to determine mask size, storage
1535 * location and whether the result must use network byte ordering.
1538 parse_prefix(struct context *ctx, const struct token *token,
1539 const char *str, unsigned int len,
1540 void *buf, unsigned int size)
1542 const struct arg *arg = pop_args(ctx);
1543 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
1550 /* Argument is expected. */
1554 u = strtoumax(str, &end, 0);
1555 if (errno || (size_t)(end - str) != len)
1560 extra = arg_entry_bf_fill(NULL, 0, arg);
1569 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
1570 !arg_entry_bf_fill(ctx->objmask, -1, arg))
1577 if (bytes > size || bytes + !!extra > size)
1581 buf = (uint8_t *)ctx->object + arg->offset;
1582 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1584 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
1585 memset(buf, 0x00, size - bytes);
1587 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
1591 memset(buf, 0xff, bytes);
1592 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
1594 ((uint8_t *)buf)[bytes] = conv[extra];
1597 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
1600 push_args(ctx, arg);
1604 /** Default parsing function for token name matching. */
1606 parse_default(struct context *ctx, const struct token *token,
1607 const char *str, unsigned int len,
1608 void *buf, unsigned int size)
1613 if (strncmp(str, token->name, len))
1618 /** Parse flow command, initialize output buffer for subsequent tokens. */
1620 parse_init(struct context *ctx, const struct token *token,
1621 const char *str, unsigned int len,
1622 void *buf, unsigned int size)
1624 struct buffer *out = buf;
1626 /* Token name must match. */
1627 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1629 /* Nothing else to do if there is no buffer. */
1632 /* Make sure buffer is large enough. */
1633 if (size < sizeof(*out))
1635 /* Initialize buffer. */
1636 memset(out, 0x00, sizeof(*out));
1637 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
1640 ctx->objmask = NULL;
1644 /** Parse tokens for validate/create commands. */
1646 parse_vc(struct context *ctx, const struct token *token,
1647 const char *str, unsigned int len,
1648 void *buf, unsigned int size)
1650 struct buffer *out = buf;
1654 /* Token name must match. */
1655 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1657 /* Nothing else to do if there is no buffer. */
1660 if (!out->command) {
1661 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
1663 if (sizeof(*out) > size)
1665 out->command = ctx->curr;
1668 ctx->objmask = NULL;
1669 out->args.vc.data = (uint8_t *)out + size;
1673 ctx->object = &out->args.vc.attr;
1674 ctx->objmask = NULL;
1675 switch (ctx->curr) {
1680 out->args.vc.attr.ingress = 1;
1683 out->args.vc.attr.egress = 1;
1686 out->args.vc.pattern =
1687 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1689 ctx->object = out->args.vc.pattern;
1690 ctx->objmask = NULL;
1693 out->args.vc.actions =
1694 (void *)RTE_ALIGN_CEIL((uintptr_t)
1695 (out->args.vc.pattern +
1696 out->args.vc.pattern_n),
1698 ctx->object = out->args.vc.actions;
1699 ctx->objmask = NULL;
1706 if (!out->args.vc.actions) {
1707 const struct parse_item_priv *priv = token->priv;
1708 struct rte_flow_item *item =
1709 out->args.vc.pattern + out->args.vc.pattern_n;
1711 data_size = priv->size * 3; /* spec, last, mask */
1712 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1713 (out->args.vc.data - data_size),
1715 if ((uint8_t *)item + sizeof(*item) > data)
1717 *item = (struct rte_flow_item){
1720 ++out->args.vc.pattern_n;
1722 ctx->objmask = NULL;
1724 const struct parse_action_priv *priv = token->priv;
1725 struct rte_flow_action *action =
1726 out->args.vc.actions + out->args.vc.actions_n;
1728 data_size = priv->size; /* configuration */
1729 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1730 (out->args.vc.data - data_size),
1732 if ((uint8_t *)action + sizeof(*action) > data)
1734 *action = (struct rte_flow_action){
1737 ++out->args.vc.actions_n;
1738 ctx->object = action;
1739 ctx->objmask = NULL;
1741 memset(data, 0, data_size);
1742 out->args.vc.data = data;
1743 ctx->objdata = data_size;
1747 /** Parse pattern item parameter type. */
1749 parse_vc_spec(struct context *ctx, const struct token *token,
1750 const char *str, unsigned int len,
1751 void *buf, unsigned int size)
1753 struct buffer *out = buf;
1754 struct rte_flow_item *item;
1760 /* Token name must match. */
1761 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1763 /* Parse parameter types. */
1764 switch (ctx->curr) {
1769 case ITEM_PARAM_SPEC:
1772 case ITEM_PARAM_LAST:
1775 case ITEM_PARAM_PREFIX:
1776 /* Modify next token to expect a prefix. */
1777 if (ctx->next_num < 2)
1779 ctx->next[ctx->next_num - 2] = NEXT_ENTRY(PREFIX);
1781 case ITEM_PARAM_MASK:
1787 /* Nothing else to do if there is no buffer. */
1790 if (!out->args.vc.pattern_n)
1792 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
1793 data_size = ctx->objdata / 3; /* spec, last, mask */
1794 /* Point to selected object. */
1795 ctx->object = out->args.vc.data + (data_size * index);
1797 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
1798 item->mask = ctx->objmask;
1800 ctx->objmask = NULL;
1801 /* Update relevant item pointer. */
1802 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
1807 /** Parse action configuration field. */
1809 parse_vc_conf(struct context *ctx, const struct token *token,
1810 const char *str, unsigned int len,
1811 void *buf, unsigned int size)
1813 struct buffer *out = buf;
1814 struct rte_flow_action *action;
1817 /* Token name must match. */
1818 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1820 /* Nothing else to do if there is no buffer. */
1823 if (!out->args.vc.actions_n)
1825 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
1826 /* Point to selected object. */
1827 ctx->object = out->args.vc.data;
1828 ctx->objmask = NULL;
1829 /* Update configuration pointer. */
1830 action->conf = ctx->object;
1835 * Parse queue field for RSS action.
1837 * Valid tokens are queue indices and the "end" token.
1840 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
1841 const char *str, unsigned int len,
1842 void *buf, unsigned int size)
1844 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
1851 if (ctx->curr != ACTION_RSS_QUEUE)
1853 i = ctx->objdata >> 16;
1854 if (!strncmp(str, "end", len)) {
1855 ctx->objdata &= 0xffff;
1858 if (i >= ACTION_RSS_NUM)
1860 if (push_args(ctx, ARGS_ENTRY(struct rte_flow_action_rss, queue[i])))
1862 ret = parse_int(ctx, token, str, len, NULL, 0);
1868 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
1870 if (ctx->next_num == RTE_DIM(ctx->next))
1872 ctx->next[ctx->next_num++] = next;
1875 ((struct rte_flow_action_rss *)ctx->object)->num = i;
1879 /** Parse tokens for destroy command. */
1881 parse_destroy(struct context *ctx, const struct token *token,
1882 const char *str, unsigned int len,
1883 void *buf, unsigned int size)
1885 struct buffer *out = buf;
1887 /* Token name must match. */
1888 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1890 /* Nothing else to do if there is no buffer. */
1893 if (!out->command) {
1894 if (ctx->curr != DESTROY)
1896 if (sizeof(*out) > size)
1898 out->command = ctx->curr;
1901 ctx->objmask = NULL;
1902 out->args.destroy.rule =
1903 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1907 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
1908 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
1911 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
1912 ctx->objmask = NULL;
1916 /** Parse tokens for flush command. */
1918 parse_flush(struct context *ctx, const struct token *token,
1919 const char *str, unsigned int len,
1920 void *buf, unsigned int size)
1922 struct buffer *out = buf;
1924 /* Token name must match. */
1925 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1927 /* Nothing else to do if there is no buffer. */
1930 if (!out->command) {
1931 if (ctx->curr != FLUSH)
1933 if (sizeof(*out) > size)
1935 out->command = ctx->curr;
1938 ctx->objmask = NULL;
1943 /** Parse tokens for query command. */
1945 parse_query(struct context *ctx, const struct token *token,
1946 const char *str, unsigned int len,
1947 void *buf, unsigned int size)
1949 struct buffer *out = buf;
1951 /* Token name must match. */
1952 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1954 /* Nothing else to do if there is no buffer. */
1957 if (!out->command) {
1958 if (ctx->curr != QUERY)
1960 if (sizeof(*out) > size)
1962 out->command = ctx->curr;
1965 ctx->objmask = NULL;
1970 /** Parse action names. */
1972 parse_action(struct context *ctx, const struct token *token,
1973 const char *str, unsigned int len,
1974 void *buf, unsigned int size)
1976 struct buffer *out = buf;
1977 const struct arg *arg = pop_args(ctx);
1981 /* Argument is expected. */
1984 /* Parse action name. */
1985 for (i = 0; next_action[i]; ++i) {
1986 const struct parse_action_priv *priv;
1988 token = &token_list[next_action[i]];
1989 if (strncmp(token->name, str, len))
1995 memcpy((uint8_t *)ctx->object + arg->offset,
2001 push_args(ctx, arg);
2005 /** Parse tokens for list command. */
2007 parse_list(struct context *ctx, const struct token *token,
2008 const char *str, unsigned int len,
2009 void *buf, unsigned int size)
2011 struct buffer *out = buf;
2013 /* Token name must match. */
2014 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2016 /* Nothing else to do if there is no buffer. */
2019 if (!out->command) {
2020 if (ctx->curr != LIST)
2022 if (sizeof(*out) > size)
2024 out->command = ctx->curr;
2027 ctx->objmask = NULL;
2028 out->args.list.group =
2029 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2033 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
2034 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
2037 ctx->object = out->args.list.group + out->args.list.group_n++;
2038 ctx->objmask = NULL;
2043 * Parse signed/unsigned integers 8 to 64-bit long.
2045 * Last argument (ctx->args) is retrieved to determine integer type and
2049 parse_int(struct context *ctx, const struct token *token,
2050 const char *str, unsigned int len,
2051 void *buf, unsigned int size)
2053 const struct arg *arg = pop_args(ctx);
2058 /* Argument is expected. */
2063 (uintmax_t)strtoimax(str, &end, 0) :
2064 strtoumax(str, &end, 0);
2065 if (errno || (size_t)(end - str) != len)
2070 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
2071 !arg_entry_bf_fill(ctx->objmask, -1, arg))
2075 buf = (uint8_t *)ctx->object + arg->offset;
2079 case sizeof(uint8_t):
2080 *(uint8_t *)buf = u;
2082 case sizeof(uint16_t):
2083 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
2085 case sizeof(uint8_t [3]):
2086 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2088 ((uint8_t *)buf)[0] = u;
2089 ((uint8_t *)buf)[1] = u >> 8;
2090 ((uint8_t *)buf)[2] = u >> 16;
2094 ((uint8_t *)buf)[0] = u >> 16;
2095 ((uint8_t *)buf)[1] = u >> 8;
2096 ((uint8_t *)buf)[2] = u;
2098 case sizeof(uint32_t):
2099 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
2101 case sizeof(uint64_t):
2102 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
2107 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
2109 buf = (uint8_t *)ctx->objmask + arg->offset;
2114 push_args(ctx, arg);
2121 * Two arguments (ctx->args) are retrieved from the stack to store data and
2122 * its length (in that order).
2125 parse_string(struct context *ctx, const struct token *token,
2126 const char *str, unsigned int len,
2127 void *buf, unsigned int size)
2129 const struct arg *arg_data = pop_args(ctx);
2130 const struct arg *arg_len = pop_args(ctx);
2131 char tmp[16]; /* Ought to be enough. */
2134 /* Arguments are expected. */
2138 push_args(ctx, arg_data);
2141 size = arg_data->size;
2142 /* Bit-mask fill is not supported. */
2143 if (arg_data->mask || size < len)
2147 /* Let parse_int() fill length information first. */
2148 ret = snprintf(tmp, sizeof(tmp), "%u", len);
2151 push_args(ctx, arg_len);
2152 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
2157 buf = (uint8_t *)ctx->object + arg_data->offset;
2158 /* Output buffer is not necessarily NUL-terminated. */
2159 memcpy(buf, str, len);
2160 memset((uint8_t *)buf + len, 0x55, size - len);
2162 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
2165 push_args(ctx, arg_len);
2166 push_args(ctx, arg_data);
2171 * Parse a MAC address.
2173 * Last argument (ctx->args) is retrieved to determine storage size and
2177 parse_mac_addr(struct context *ctx, const struct token *token,
2178 const char *str, unsigned int len,
2179 void *buf, unsigned int size)
2181 const struct arg *arg = pop_args(ctx);
2182 struct ether_addr tmp;
2186 /* Argument is expected. */
2190 /* Bit-mask fill is not supported. */
2191 if (arg->mask || size != sizeof(tmp))
2193 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
2194 if (ret < 0 || (unsigned int)ret != len)
2198 buf = (uint8_t *)ctx->object + arg->offset;
2199 memcpy(buf, &tmp, size);
2201 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2204 push_args(ctx, arg);
2209 * Parse an IPv4 address.
2211 * Last argument (ctx->args) is retrieved to determine storage size and
2215 parse_ipv4_addr(struct context *ctx, const struct token *token,
2216 const char *str, unsigned int len,
2217 void *buf, unsigned int size)
2219 const struct arg *arg = pop_args(ctx);
2224 /* Argument is expected. */
2228 /* Bit-mask fill is not supported. */
2229 if (arg->mask || size != sizeof(tmp))
2231 /* Only network endian is supported. */
2234 memcpy(str2, str, len);
2236 ret = inet_pton(AF_INET, str2, &tmp);
2238 /* Attempt integer parsing. */
2239 push_args(ctx, arg);
2240 return parse_int(ctx, token, str, len, buf, size);
2244 buf = (uint8_t *)ctx->object + arg->offset;
2245 memcpy(buf, &tmp, size);
2247 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2250 push_args(ctx, arg);
2255 * Parse an IPv6 address.
2257 * Last argument (ctx->args) is retrieved to determine storage size and
2261 parse_ipv6_addr(struct context *ctx, const struct token *token,
2262 const char *str, unsigned int len,
2263 void *buf, unsigned int size)
2265 const struct arg *arg = pop_args(ctx);
2267 struct in6_addr tmp;
2271 /* Argument is expected. */
2275 /* Bit-mask fill is not supported. */
2276 if (arg->mask || size != sizeof(tmp))
2278 /* Only network endian is supported. */
2281 memcpy(str2, str, len);
2283 ret = inet_pton(AF_INET6, str2, &tmp);
2288 buf = (uint8_t *)ctx->object + arg->offset;
2289 memcpy(buf, &tmp, size);
2291 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2294 push_args(ctx, arg);
2298 /** Boolean values (even indices stand for false). */
2299 static const char *const boolean_name[] = {
2308 * Parse a boolean value.
2310 * Last argument (ctx->args) is retrieved to determine storage size and
2314 parse_boolean(struct context *ctx, const struct token *token,
2315 const char *str, unsigned int len,
2316 void *buf, unsigned int size)
2318 const struct arg *arg = pop_args(ctx);
2322 /* Argument is expected. */
2325 for (i = 0; boolean_name[i]; ++i)
2326 if (!strncmp(str, boolean_name[i], len))
2328 /* Process token as integer. */
2329 if (boolean_name[i])
2330 str = i & 1 ? "1" : "0";
2331 push_args(ctx, arg);
2332 ret = parse_int(ctx, token, str, strlen(str), buf, size);
2333 return ret > 0 ? (int)len : ret;
2336 /** Parse port and update context. */
2338 parse_port(struct context *ctx, const struct token *token,
2339 const char *str, unsigned int len,
2340 void *buf, unsigned int size)
2342 struct buffer *out = &(struct buffer){ .port = 0 };
2350 ctx->objmask = NULL;
2351 size = sizeof(*out);
2353 ret = parse_int(ctx, token, str, len, out, size);
2355 ctx->port = out->port;
2361 /** No completion. */
2363 comp_none(struct context *ctx, const struct token *token,
2364 unsigned int ent, char *buf, unsigned int size)
2374 /** Complete boolean values. */
2376 comp_boolean(struct context *ctx, const struct token *token,
2377 unsigned int ent, char *buf, unsigned int size)
2383 for (i = 0; boolean_name[i]; ++i)
2384 if (buf && i == ent)
2385 return snprintf(buf, size, "%s", boolean_name[i]);
2391 /** Complete action names. */
2393 comp_action(struct context *ctx, const struct token *token,
2394 unsigned int ent, char *buf, unsigned int size)
2400 for (i = 0; next_action[i]; ++i)
2401 if (buf && i == ent)
2402 return snprintf(buf, size, "%s",
2403 token_list[next_action[i]].name);
2409 /** Complete available ports. */
2411 comp_port(struct context *ctx, const struct token *token,
2412 unsigned int ent, char *buf, unsigned int size)
2419 RTE_ETH_FOREACH_DEV(p) {
2420 if (buf && i == ent)
2421 return snprintf(buf, size, "%u", p);
2429 /** Complete available rule IDs. */
2431 comp_rule_id(struct context *ctx, const struct token *token,
2432 unsigned int ent, char *buf, unsigned int size)
2435 struct rte_port *port;
2436 struct port_flow *pf;
2439 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
2440 ctx->port == (uint16_t)RTE_PORT_ALL)
2442 port = &ports[ctx->port];
2443 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
2444 if (buf && i == ent)
2445 return snprintf(buf, size, "%u", pf->id);
2453 /** Complete queue field for RSS action. */
2455 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
2456 unsigned int ent, char *buf, unsigned int size)
2458 static const char *const str[] = { "", "end", NULL };
2463 for (i = 0; str[i] != NULL; ++i)
2464 if (buf && i == ent)
2465 return snprintf(buf, size, "%s", str[i]);
2471 /** Internal context. */
2472 static struct context cmd_flow_context;
2474 /** Global parser instance (cmdline API). */
2475 cmdline_parse_inst_t cmd_flow;
2477 /** Initialize context. */
2479 cmd_flow_context_init(struct context *ctx)
2481 /* A full memset() is not necessary. */
2492 ctx->objmask = NULL;
2495 /** Parse a token (cmdline API). */
2497 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
2500 struct context *ctx = &cmd_flow_context;
2501 const struct token *token;
2502 const enum index *list;
2507 /* Restart as requested. */
2509 cmd_flow_context_init(ctx);
2510 token = &token_list[ctx->curr];
2511 /* Check argument length. */
2514 for (len = 0; src[len]; ++len)
2515 if (src[len] == '#' || isspace(src[len]))
2519 /* Last argument and EOL detection. */
2520 for (i = len; src[i]; ++i)
2521 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
2523 else if (!isspace(src[i])) {
2528 if (src[i] == '\r' || src[i] == '\n') {
2532 /* Initialize context if necessary. */
2533 if (!ctx->next_num) {
2536 ctx->next[ctx->next_num++] = token->next[0];
2538 /* Process argument through candidates. */
2539 ctx->prev = ctx->curr;
2540 list = ctx->next[ctx->next_num - 1];
2541 for (i = 0; list[i]; ++i) {
2542 const struct token *next = &token_list[list[i]];
2545 ctx->curr = list[i];
2547 tmp = next->call(ctx, next, src, len, result, size);
2549 tmp = parse_default(ctx, next, src, len, result, size);
2550 if (tmp == -1 || tmp != len)
2558 /* Push subsequent tokens if any. */
2560 for (i = 0; token->next[i]; ++i) {
2561 if (ctx->next_num == RTE_DIM(ctx->next))
2563 ctx->next[ctx->next_num++] = token->next[i];
2565 /* Push arguments if any. */
2567 for (i = 0; token->args[i]; ++i) {
2568 if (ctx->args_num == RTE_DIM(ctx->args))
2570 ctx->args[ctx->args_num++] = token->args[i];
2575 /** Return number of completion entries (cmdline API). */
2577 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
2579 struct context *ctx = &cmd_flow_context;
2580 const struct token *token = &token_list[ctx->curr];
2581 const enum index *list;
2585 /* Tell cmd_flow_parse() that context must be reinitialized. */
2587 /* Count number of tokens in current list. */
2589 list = ctx->next[ctx->next_num - 1];
2591 list = token->next[0];
2592 for (i = 0; list[i]; ++i)
2597 * If there is a single token, use its completion callback, otherwise
2598 * return the number of entries.
2600 token = &token_list[list[0]];
2601 if (i == 1 && token->comp) {
2602 /* Save index for cmd_flow_get_help(). */
2603 ctx->prev = list[0];
2604 return token->comp(ctx, token, 0, NULL, 0);
2609 /** Return a completion entry (cmdline API). */
2611 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
2612 char *dst, unsigned int size)
2614 struct context *ctx = &cmd_flow_context;
2615 const struct token *token = &token_list[ctx->curr];
2616 const enum index *list;
2620 /* Tell cmd_flow_parse() that context must be reinitialized. */
2622 /* Count number of tokens in current list. */
2624 list = ctx->next[ctx->next_num - 1];
2626 list = token->next[0];
2627 for (i = 0; list[i]; ++i)
2631 /* If there is a single token, use its completion callback. */
2632 token = &token_list[list[0]];
2633 if (i == 1 && token->comp) {
2634 /* Save index for cmd_flow_get_help(). */
2635 ctx->prev = list[0];
2636 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
2638 /* Otherwise make sure the index is valid and use defaults. */
2641 token = &token_list[list[index]];
2642 snprintf(dst, size, "%s", token->name);
2643 /* Save index for cmd_flow_get_help(). */
2644 ctx->prev = list[index];
2648 /** Populate help strings for current token (cmdline API). */
2650 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
2652 struct context *ctx = &cmd_flow_context;
2653 const struct token *token = &token_list[ctx->prev];
2656 /* Tell cmd_flow_parse() that context must be reinitialized. */
2660 /* Set token type and update global help with details. */
2661 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
2663 cmd_flow.help_str = token->help;
2665 cmd_flow.help_str = token->name;
2669 /** Token definition template (cmdline API). */
2670 static struct cmdline_token_hdr cmd_flow_token_hdr = {
2671 .ops = &(struct cmdline_token_ops){
2672 .parse = cmd_flow_parse,
2673 .complete_get_nb = cmd_flow_complete_get_nb,
2674 .complete_get_elt = cmd_flow_complete_get_elt,
2675 .get_help = cmd_flow_get_help,
2680 /** Populate the next dynamic token. */
2682 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
2683 cmdline_parse_token_hdr_t *(*hdrs)[])
2685 struct context *ctx = &cmd_flow_context;
2687 /* Always reinitialize context before requesting the first token. */
2689 cmd_flow_context_init(ctx);
2690 /* Return NULL when no more tokens are expected. */
2691 if (!ctx->next_num && ctx->curr) {
2695 /* Determine if command should end here. */
2696 if (ctx->eol && ctx->last && ctx->next_num) {
2697 const enum index *list = ctx->next[ctx->next_num - 1];
2700 for (i = 0; list[i]; ++i) {
2707 *hdr = &cmd_flow_token_hdr;
2710 /** Dispatch parsed buffer to function calls. */
2712 cmd_flow_parsed(const struct buffer *in)
2714 switch (in->command) {
2716 port_flow_validate(in->port, &in->args.vc.attr,
2717 in->args.vc.pattern, in->args.vc.actions);
2720 port_flow_create(in->port, &in->args.vc.attr,
2721 in->args.vc.pattern, in->args.vc.actions);
2724 port_flow_destroy(in->port, in->args.destroy.rule_n,
2725 in->args.destroy.rule);
2728 port_flow_flush(in->port);
2731 port_flow_query(in->port, in->args.query.rule,
2732 in->args.query.action);
2735 port_flow_list(in->port, in->args.list.group_n,
2736 in->args.list.group);
2743 /** Token generator and output processing callback (cmdline API). */
2745 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
2748 cmd_flow_tok(arg0, arg2);
2750 cmd_flow_parsed(arg0);
2753 /** Global parser instance (cmdline API). */
2754 cmdline_parse_inst_t cmd_flow = {
2756 .data = NULL, /**< Unused. */
2757 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
2760 }, /**< Tokens are returned by cmd_flow_tok(). */