4 * Copyright 2016 6WIND S.A.
5 * Copyright 2016 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <arpa/inet.h>
43 #include <rte_common.h>
44 #include <rte_ethdev.h>
45 #include <rte_byteorder.h>
46 #include <cmdline_parse.h>
47 #include <cmdline_parse_etheraddr.h>
52 /** Parser token indices. */
72 /* Top-level command. */
75 /* Sub-level commands. */
83 /* Destroy arguments. */
86 /* Query arguments. */
92 /* Validate/create arguments. */
98 /* Validate/create pattern. */
162 /* Validate/create actions. */
186 /** Size of pattern[] field in struct rte_flow_item_raw. */
187 #define ITEM_RAW_PATTERN_SIZE 36
189 /** Storage size for struct rte_flow_item_raw including pattern. */
190 #define ITEM_RAW_SIZE \
191 (offsetof(struct rte_flow_item_raw, pattern) + ITEM_RAW_PATTERN_SIZE)
193 /** Number of queue[] entries in struct rte_flow_action_rss. */
194 #define ACTION_RSS_NUM 32
196 /** Storage size for struct rte_flow_action_rss including queues. */
197 #define ACTION_RSS_SIZE \
198 (offsetof(struct rte_flow_action_rss, queue) + \
199 sizeof(*((struct rte_flow_action_rss *)0)->queue) * ACTION_RSS_NUM)
201 /** Maximum number of subsequent tokens and arguments on the stack. */
202 #define CTX_STACK_SIZE 16
204 /** Parser context. */
206 /** Stack of subsequent token lists to process. */
207 const enum index *next[CTX_STACK_SIZE];
208 /** Arguments for stacked tokens. */
209 const void *args[CTX_STACK_SIZE];
210 enum index curr; /**< Current token index. */
211 enum index prev; /**< Index of the last token seen. */
212 int next_num; /**< Number of entries in next[]. */
213 int args_num; /**< Number of entries in args[]. */
214 uint32_t reparse:1; /**< Start over from the beginning. */
215 uint32_t eol:1; /**< EOL has been detected. */
216 uint32_t last:1; /**< No more arguments. */
217 uint16_t port; /**< Current port ID (for completions). */
218 uint32_t objdata; /**< Object-specific data. */
219 void *object; /**< Address of current object for relative offsets. */
220 void *objmask; /**< Object a full mask must be written to. */
223 /** Token argument. */
225 uint32_t hton:1; /**< Use network byte ordering. */
226 uint32_t sign:1; /**< Value is signed. */
227 uint32_t offset; /**< Relative offset from ctx->object. */
228 uint32_t size; /**< Field size. */
229 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
232 /** Parser token definition. */
234 /** Type displayed during completion (defaults to "TOKEN"). */
236 /** Help displayed during completion (defaults to token name). */
238 /** Private data used by parser functions. */
241 * Lists of subsequent tokens to push on the stack. Each call to the
242 * parser consumes the last entry of that stack.
244 const enum index *const *next;
245 /** Arguments stack for subsequent tokens that need them. */
246 const struct arg *const *args;
248 * Token-processing callback, returns -1 in case of error, the
249 * length of the matched string otherwise. If NULL, attempts to
250 * match the token name.
252 * If buf is not NULL, the result should be stored in it according
253 * to context. An error is returned if not large enough.
255 int (*call)(struct context *ctx, const struct token *token,
256 const char *str, unsigned int len,
257 void *buf, unsigned int size);
259 * Callback that provides possible values for this token, used for
260 * completion. Returns -1 in case of error, the number of possible
261 * values otherwise. If NULL, the token name is used.
263 * If buf is not NULL, entry index ent is written to buf and the
264 * full length of the entry is returned (same behavior as
267 int (*comp)(struct context *ctx, const struct token *token,
268 unsigned int ent, char *buf, unsigned int size);
269 /** Mandatory token name, no default value. */
273 /** Static initializer for the next field. */
274 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
276 /** Static initializer for a NEXT() entry. */
277 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
279 /** Static initializer for the args field. */
280 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
282 /** Static initializer for ARGS() to target a field. */
283 #define ARGS_ENTRY(s, f) \
284 (&(const struct arg){ \
285 .offset = offsetof(s, f), \
286 .size = sizeof(((s *)0)->f), \
289 /** Static initializer for ARGS() to target a bit-field. */
290 #define ARGS_ENTRY_BF(s, f, b) \
291 (&(const struct arg){ \
293 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
296 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
297 #define ARGS_ENTRY_MASK(s, f, m) \
298 (&(const struct arg){ \
299 .offset = offsetof(s, f), \
300 .size = sizeof(((s *)0)->f), \
301 .mask = (const void *)(m), \
304 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
305 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
306 (&(const struct arg){ \
308 .offset = offsetof(s, f), \
309 .size = sizeof(((s *)0)->f), \
310 .mask = (const void *)(m), \
313 /** Static initializer for ARGS() to target a pointer. */
314 #define ARGS_ENTRY_PTR(s, f) \
315 (&(const struct arg){ \
316 .size = sizeof(*((s *)0)->f), \
319 /** Static initializer for ARGS() with arbitrary size. */
320 #define ARGS_ENTRY_USZ(s, f, sz) \
321 (&(const struct arg){ \
322 .offset = offsetof(s, f), \
326 /** Same as ARGS_ENTRY() using network byte ordering. */
327 #define ARGS_ENTRY_HTON(s, f) \
328 (&(const struct arg){ \
330 .offset = offsetof(s, f), \
331 .size = sizeof(((s *)0)->f), \
334 /** Parser output buffer layout expected by cmd_flow_parsed(). */
336 enum index command; /**< Flow command. */
337 uint16_t port; /**< Affected port ID. */
340 struct rte_flow_attr attr;
341 struct rte_flow_item *pattern;
342 struct rte_flow_action *actions;
346 } vc; /**< Validate/create arguments. */
350 } destroy; /**< Destroy arguments. */
353 enum rte_flow_action_type action;
354 } query; /**< Query arguments. */
358 } list; /**< List arguments. */
359 } args; /**< Command arguments. */
362 /** Private data for pattern items. */
363 struct parse_item_priv {
364 enum rte_flow_item_type type; /**< Item type. */
365 uint32_t size; /**< Size of item specification structure. */
368 #define PRIV_ITEM(t, s) \
369 (&(const struct parse_item_priv){ \
370 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
374 /** Private data for actions. */
375 struct parse_action_priv {
376 enum rte_flow_action_type type; /**< Action type. */
377 uint32_t size; /**< Size of action configuration structure. */
380 #define PRIV_ACTION(t, s) \
381 (&(const struct parse_action_priv){ \
382 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
386 static const enum index next_vc_attr[] = {
395 static const enum index next_destroy_attr[] = {
401 static const enum index next_list_attr[] = {
407 static const enum index item_param[] = {
416 static const enum index next_item[] = {
437 static const enum index item_any[] = {
443 static const enum index item_vf[] = {
449 static const enum index item_port[] = {
455 static const enum index item_raw[] = {
465 static const enum index item_eth[] = {
473 static const enum index item_vlan[] = {
483 static const enum index item_ipv4[] = {
493 static const enum index item_ipv6[] = {
504 static const enum index item_icmp[] = {
511 static const enum index item_udp[] = {
518 static const enum index item_tcp[] = {
525 static const enum index item_sctp[] = {
534 static const enum index item_vxlan[] = {
540 static const enum index next_action[] = {
556 static const enum index action_mark[] = {
562 static const enum index action_queue[] = {
568 static const enum index action_dup[] = {
574 static const enum index action_rss[] = {
580 static const enum index action_vf[] = {
587 static int parse_init(struct context *, const struct token *,
588 const char *, unsigned int,
589 void *, unsigned int);
590 static int parse_vc(struct context *, const struct token *,
591 const char *, unsigned int,
592 void *, unsigned int);
593 static int parse_vc_spec(struct context *, const struct token *,
594 const char *, unsigned int, void *, unsigned int);
595 static int parse_vc_conf(struct context *, const struct token *,
596 const char *, unsigned int, void *, unsigned int);
597 static int parse_vc_action_rss_queue(struct context *, const struct token *,
598 const char *, unsigned int, void *,
600 static int parse_destroy(struct context *, const struct token *,
601 const char *, unsigned int,
602 void *, unsigned int);
603 static int parse_flush(struct context *, const struct token *,
604 const char *, unsigned int,
605 void *, unsigned int);
606 static int parse_query(struct context *, const struct token *,
607 const char *, unsigned int,
608 void *, unsigned int);
609 static int parse_action(struct context *, const struct token *,
610 const char *, unsigned int,
611 void *, unsigned int);
612 static int parse_list(struct context *, const struct token *,
613 const char *, unsigned int,
614 void *, unsigned int);
615 static int parse_int(struct context *, const struct token *,
616 const char *, unsigned int,
617 void *, unsigned int);
618 static int parse_prefix(struct context *, const struct token *,
619 const char *, unsigned int,
620 void *, unsigned int);
621 static int parse_boolean(struct context *, const struct token *,
622 const char *, unsigned int,
623 void *, unsigned int);
624 static int parse_string(struct context *, const struct token *,
625 const char *, unsigned int,
626 void *, unsigned int);
627 static int parse_mac_addr(struct context *, const struct token *,
628 const char *, unsigned int,
629 void *, unsigned int);
630 static int parse_ipv4_addr(struct context *, const struct token *,
631 const char *, unsigned int,
632 void *, unsigned int);
633 static int parse_ipv6_addr(struct context *, const struct token *,
634 const char *, unsigned int,
635 void *, unsigned int);
636 static int parse_port(struct context *, const struct token *,
637 const char *, unsigned int,
638 void *, unsigned int);
639 static int comp_none(struct context *, const struct token *,
640 unsigned int, char *, unsigned int);
641 static int comp_boolean(struct context *, const struct token *,
642 unsigned int, char *, unsigned int);
643 static int comp_action(struct context *, const struct token *,
644 unsigned int, char *, unsigned int);
645 static int comp_port(struct context *, const struct token *,
646 unsigned int, char *, unsigned int);
647 static int comp_rule_id(struct context *, const struct token *,
648 unsigned int, char *, unsigned int);
649 static int comp_vc_action_rss_queue(struct context *, const struct token *,
650 unsigned int, char *, unsigned int);
652 /** Token definitions. */
653 static const struct token token_list[] = {
654 /* Special tokens. */
657 .help = "null entry, abused as the entry point",
658 .next = NEXT(NEXT_ENTRY(FLOW)),
663 .help = "command may end here",
669 .help = "integer value",
674 .name = "{unsigned}",
676 .help = "unsigned integer value",
683 .help = "prefix length for bit-mask",
684 .call = parse_prefix,
690 .help = "any boolean value",
691 .call = parse_boolean,
692 .comp = comp_boolean,
697 .help = "fixed string",
698 .call = parse_string,
702 .name = "{MAC address}",
704 .help = "standard MAC address notation",
705 .call = parse_mac_addr,
709 .name = "{IPv4 address}",
710 .type = "IPV4 ADDRESS",
711 .help = "standard IPv4 address notation",
712 .call = parse_ipv4_addr,
716 .name = "{IPv6 address}",
717 .type = "IPV6 ADDRESS",
718 .help = "standard IPv6 address notation",
719 .call = parse_ipv6_addr,
725 .help = "rule identifier",
727 .comp = comp_rule_id,
732 .help = "port identifier",
737 .name = "{group_id}",
739 .help = "group identifier",
746 .help = "priority level",
750 /* Top-level command. */
753 .type = "{command} {port_id} [{arg} [...]]",
754 .help = "manage ingress/egress flow rules",
755 .next = NEXT(NEXT_ENTRY
764 /* Sub-level commands. */
767 .help = "check whether a flow rule can be created",
768 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
769 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
774 .help = "create a flow rule",
775 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
776 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
781 .help = "destroy specific flow rules",
782 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
783 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
784 .call = parse_destroy,
788 .help = "destroy all flow rules",
789 .next = NEXT(NEXT_ENTRY(PORT_ID)),
790 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
795 .help = "query an existing flow rule",
796 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
798 NEXT_ENTRY(PORT_ID)),
799 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action),
800 ARGS_ENTRY(struct buffer, args.query.rule),
801 ARGS_ENTRY(struct buffer, port)),
806 .help = "list existing flow rules",
807 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
808 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
811 /* Destroy arguments. */
814 .help = "specify a rule identifier",
815 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
816 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
817 .call = parse_destroy,
819 /* Query arguments. */
823 .help = "action to query, must be part of the rule",
824 .call = parse_action,
827 /* List arguments. */
830 .help = "specify a group",
831 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
832 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
835 /* Validate/create attributes. */
838 .help = "specify a group",
839 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
840 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
845 .help = "specify a priority level",
846 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
847 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
852 .help = "affect rule to ingress",
853 .next = NEXT(next_vc_attr),
858 .help = "affect rule to egress",
859 .next = NEXT(next_vc_attr),
862 /* Validate/create pattern. */
865 .help = "submit a list of pattern items",
866 .next = NEXT(next_item),
871 .help = "match value perfectly (with full bit-mask)",
872 .call = parse_vc_spec,
874 [ITEM_PARAM_SPEC] = {
876 .help = "match value according to configured bit-mask",
877 .call = parse_vc_spec,
879 [ITEM_PARAM_LAST] = {
881 .help = "specify upper bound to establish a range",
882 .call = parse_vc_spec,
884 [ITEM_PARAM_MASK] = {
886 .help = "specify bit-mask with relevant bits set to one",
887 .call = parse_vc_spec,
889 [ITEM_PARAM_PREFIX] = {
891 .help = "generate bit-mask from a prefix length",
892 .call = parse_vc_spec,
896 .help = "specify next pattern item",
897 .next = NEXT(next_item),
901 .help = "end list of pattern items",
902 .priv = PRIV_ITEM(END, 0),
903 .next = NEXT(NEXT_ENTRY(ACTIONS)),
908 .help = "no-op pattern item",
909 .priv = PRIV_ITEM(VOID, 0),
910 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
915 .help = "perform actions when pattern does not match",
916 .priv = PRIV_ITEM(INVERT, 0),
917 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
922 .help = "match any protocol for the current layer",
923 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
924 .next = NEXT(item_any),
929 .help = "number of layers covered",
930 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
931 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
935 .help = "match packets addressed to the physical function",
936 .priv = PRIV_ITEM(PF, 0),
937 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
942 .help = "match packets addressed to a virtual function ID",
943 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
944 .next = NEXT(item_vf),
949 .help = "destination VF ID",
950 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
951 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
955 .help = "device-specific physical port index to use",
956 .priv = PRIV_ITEM(PORT, sizeof(struct rte_flow_item_port)),
957 .next = NEXT(item_port),
960 [ITEM_PORT_INDEX] = {
962 .help = "physical port index",
963 .next = NEXT(item_port, NEXT_ENTRY(UNSIGNED), item_param),
964 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port, index)),
968 .help = "match an arbitrary byte string",
969 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
970 .next = NEXT(item_raw),
973 [ITEM_RAW_RELATIVE] = {
975 .help = "look for pattern after the previous item",
976 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
977 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
980 [ITEM_RAW_SEARCH] = {
982 .help = "search pattern from offset (see also limit)",
983 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
984 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
987 [ITEM_RAW_OFFSET] = {
989 .help = "absolute or relative offset for pattern",
990 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
991 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
995 .help = "search area limit for start of pattern",
996 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
997 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
999 [ITEM_RAW_PATTERN] = {
1001 .help = "byte string to look for",
1002 .next = NEXT(item_raw,
1004 NEXT_ENTRY(ITEM_PARAM_IS,
1007 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, length),
1008 ARGS_ENTRY_USZ(struct rte_flow_item_raw,
1010 ITEM_RAW_PATTERN_SIZE)),
1014 .help = "match Ethernet header",
1015 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1016 .next = NEXT(item_eth),
1021 .help = "destination MAC",
1022 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1023 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_eth, dst)),
1027 .help = "source MAC",
1028 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1029 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_eth, src)),
1033 .help = "EtherType",
1034 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1035 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1039 .help = "match 802.1Q/ad VLAN tag",
1040 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1041 .next = NEXT(item_vlan),
1044 [ITEM_VLAN_TPID] = {
1046 .help = "tag protocol identifier",
1047 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1048 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tpid)),
1052 .help = "tag control information",
1053 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1054 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1058 .help = "priority code point",
1059 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1060 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1065 .help = "drop eligible indicator",
1066 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1067 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1072 .help = "VLAN identifier",
1073 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1074 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1079 .help = "match IPv4 header",
1080 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1081 .next = NEXT(item_ipv4),
1086 .help = "type of service",
1087 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1088 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1089 hdr.type_of_service)),
1093 .help = "time to live",
1094 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1095 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1098 [ITEM_IPV4_PROTO] = {
1100 .help = "next protocol ID",
1101 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1102 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1103 hdr.next_proto_id)),
1107 .help = "source address",
1108 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1109 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1114 .help = "destination address",
1115 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1116 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1121 .help = "match IPv6 header",
1122 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1123 .next = NEXT(item_ipv6),
1128 .help = "traffic class",
1129 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1130 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1132 "\x0f\xf0\x00\x00")),
1134 [ITEM_IPV6_FLOW] = {
1136 .help = "flow label",
1137 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1138 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1140 "\x00\x0f\xff\xff")),
1142 [ITEM_IPV6_PROTO] = {
1144 .help = "protocol (next header)",
1145 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1146 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1151 .help = "hop limit",
1152 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1153 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1158 .help = "source address",
1159 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1160 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1165 .help = "destination address",
1166 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1167 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1172 .help = "match ICMP header",
1173 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1174 .next = NEXT(item_icmp),
1177 [ITEM_ICMP_TYPE] = {
1179 .help = "ICMP packet type",
1180 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1181 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1184 [ITEM_ICMP_CODE] = {
1186 .help = "ICMP packet code",
1187 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1188 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1193 .help = "match UDP header",
1194 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1195 .next = NEXT(item_udp),
1200 .help = "UDP source port",
1201 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1202 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1207 .help = "UDP destination port",
1208 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1209 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1214 .help = "match TCP header",
1215 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1216 .next = NEXT(item_tcp),
1221 .help = "TCP source port",
1222 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1223 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1228 .help = "TCP destination port",
1229 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1230 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1235 .help = "match SCTP header",
1236 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1237 .next = NEXT(item_sctp),
1242 .help = "SCTP source port",
1243 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1244 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1249 .help = "SCTP destination port",
1250 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1251 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1256 .help = "validation tag",
1257 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1258 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1261 [ITEM_SCTP_CKSUM] = {
1264 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1265 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1270 .help = "match VXLAN header",
1271 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1272 .next = NEXT(item_vxlan),
1275 [ITEM_VXLAN_VNI] = {
1277 .help = "VXLAN identifier",
1278 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1279 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
1281 /* Validate/create actions. */
1284 .help = "submit a list of associated actions",
1285 .next = NEXT(next_action),
1290 .help = "specify next action",
1291 .next = NEXT(next_action),
1295 .help = "end list of actions",
1296 .priv = PRIV_ACTION(END, 0),
1301 .help = "no-op action",
1302 .priv = PRIV_ACTION(VOID, 0),
1303 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1306 [ACTION_PASSTHRU] = {
1308 .help = "let subsequent rule process matched packets",
1309 .priv = PRIV_ACTION(PASSTHRU, 0),
1310 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1315 .help = "attach 32 bit value to packets",
1316 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
1317 .next = NEXT(action_mark),
1320 [ACTION_MARK_ID] = {
1322 .help = "32 bit value to return with packets",
1323 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
1324 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
1325 .call = parse_vc_conf,
1329 .help = "flag packets",
1330 .priv = PRIV_ACTION(FLAG, 0),
1331 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1336 .help = "assign packets to a given queue index",
1337 .priv = PRIV_ACTION(QUEUE,
1338 sizeof(struct rte_flow_action_queue)),
1339 .next = NEXT(action_queue),
1342 [ACTION_QUEUE_INDEX] = {
1344 .help = "queue index to use",
1345 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
1346 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
1347 .call = parse_vc_conf,
1351 .help = "drop packets (note: passthru has priority)",
1352 .priv = PRIV_ACTION(DROP, 0),
1353 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1358 .help = "enable counters for this rule",
1359 .priv = PRIV_ACTION(COUNT, 0),
1360 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1365 .help = "duplicate packets to a given queue index",
1366 .priv = PRIV_ACTION(DUP, sizeof(struct rte_flow_action_dup)),
1367 .next = NEXT(action_dup),
1370 [ACTION_DUP_INDEX] = {
1372 .help = "queue index to duplicate packets to",
1373 .next = NEXT(action_dup, NEXT_ENTRY(UNSIGNED)),
1374 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_dup, index)),
1375 .call = parse_vc_conf,
1379 .help = "spread packets among several queues",
1380 .priv = PRIV_ACTION(RSS, ACTION_RSS_SIZE),
1381 .next = NEXT(action_rss),
1384 [ACTION_RSS_QUEUES] = {
1386 .help = "queue indices to use",
1387 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
1388 .call = parse_vc_conf,
1390 [ACTION_RSS_QUEUE] = {
1392 .help = "queue index",
1393 .call = parse_vc_action_rss_queue,
1394 .comp = comp_vc_action_rss_queue,
1398 .help = "redirect packets to physical device function",
1399 .priv = PRIV_ACTION(PF, 0),
1400 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1405 .help = "redirect packets to virtual device function",
1406 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
1407 .next = NEXT(action_vf),
1410 [ACTION_VF_ORIGINAL] = {
1412 .help = "use original VF ID if possible",
1413 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
1414 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
1416 .call = parse_vc_conf,
1420 .help = "VF ID to redirect packets to",
1421 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
1422 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
1423 .call = parse_vc_conf,
1427 /** Remove and return last entry from argument stack. */
1428 static const struct arg *
1429 pop_args(struct context *ctx)
1431 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
1434 /** Add entry on top of the argument stack. */
1436 push_args(struct context *ctx, const struct arg *arg)
1438 if (ctx->args_num == CTX_STACK_SIZE)
1440 ctx->args[ctx->args_num++] = arg;
1444 /** Spread value into buffer according to bit-mask. */
1446 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
1448 uint32_t i = arg->size;
1456 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1465 unsigned int shift = 0;
1466 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
1468 for (shift = 0; arg->mask[i] >> shift; ++shift) {
1469 if (!(arg->mask[i] & (1 << shift)))
1474 *buf &= ~(1 << shift);
1475 *buf |= (val & 1) << shift;
1484 * Parse a prefix length and generate a bit-mask.
1486 * Last argument (ctx->args) is retrieved to determine mask size, storage
1487 * location and whether the result must use network byte ordering.
1490 parse_prefix(struct context *ctx, const struct token *token,
1491 const char *str, unsigned int len,
1492 void *buf, unsigned int size)
1494 const struct arg *arg = pop_args(ctx);
1495 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
1502 /* Argument is expected. */
1506 u = strtoumax(str, &end, 0);
1507 if (errno || (size_t)(end - str) != len)
1512 extra = arg_entry_bf_fill(NULL, 0, arg);
1521 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
1522 !arg_entry_bf_fill(ctx->objmask, -1, arg))
1529 if (bytes > size || bytes + !!extra > size)
1533 buf = (uint8_t *)ctx->object + arg->offset;
1534 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1536 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
1537 memset(buf, 0x00, size - bytes);
1539 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
1543 memset(buf, 0xff, bytes);
1544 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
1546 ((uint8_t *)buf)[bytes] = conv[extra];
1549 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
1552 push_args(ctx, arg);
1556 /** Default parsing function for token name matching. */
1558 parse_default(struct context *ctx, const struct token *token,
1559 const char *str, unsigned int len,
1560 void *buf, unsigned int size)
1565 if (strncmp(str, token->name, len))
1570 /** Parse flow command, initialize output buffer for subsequent tokens. */
1572 parse_init(struct context *ctx, const struct token *token,
1573 const char *str, unsigned int len,
1574 void *buf, unsigned int size)
1576 struct buffer *out = buf;
1578 /* Token name must match. */
1579 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1581 /* Nothing else to do if there is no buffer. */
1584 /* Make sure buffer is large enough. */
1585 if (size < sizeof(*out))
1587 /* Initialize buffer. */
1588 memset(out, 0x00, sizeof(*out));
1589 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
1592 ctx->objmask = NULL;
1596 /** Parse tokens for validate/create commands. */
1598 parse_vc(struct context *ctx, const struct token *token,
1599 const char *str, unsigned int len,
1600 void *buf, unsigned int size)
1602 struct buffer *out = buf;
1606 /* Token name must match. */
1607 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1609 /* Nothing else to do if there is no buffer. */
1612 if (!out->command) {
1613 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
1615 if (sizeof(*out) > size)
1617 out->command = ctx->curr;
1620 ctx->objmask = NULL;
1621 out->args.vc.data = (uint8_t *)out + size;
1625 ctx->object = &out->args.vc.attr;
1626 ctx->objmask = NULL;
1627 switch (ctx->curr) {
1632 out->args.vc.attr.ingress = 1;
1635 out->args.vc.attr.egress = 1;
1638 out->args.vc.pattern =
1639 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1641 ctx->object = out->args.vc.pattern;
1642 ctx->objmask = NULL;
1645 out->args.vc.actions =
1646 (void *)RTE_ALIGN_CEIL((uintptr_t)
1647 (out->args.vc.pattern +
1648 out->args.vc.pattern_n),
1650 ctx->object = out->args.vc.actions;
1651 ctx->objmask = NULL;
1658 if (!out->args.vc.actions) {
1659 const struct parse_item_priv *priv = token->priv;
1660 struct rte_flow_item *item =
1661 out->args.vc.pattern + out->args.vc.pattern_n;
1663 data_size = priv->size * 3; /* spec, last, mask */
1664 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1665 (out->args.vc.data - data_size),
1667 if ((uint8_t *)item + sizeof(*item) > data)
1669 *item = (struct rte_flow_item){
1672 ++out->args.vc.pattern_n;
1674 ctx->objmask = NULL;
1676 const struct parse_action_priv *priv = token->priv;
1677 struct rte_flow_action *action =
1678 out->args.vc.actions + out->args.vc.actions_n;
1680 data_size = priv->size; /* configuration */
1681 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1682 (out->args.vc.data - data_size),
1684 if ((uint8_t *)action + sizeof(*action) > data)
1686 *action = (struct rte_flow_action){
1689 ++out->args.vc.actions_n;
1690 ctx->object = action;
1691 ctx->objmask = NULL;
1693 memset(data, 0, data_size);
1694 out->args.vc.data = data;
1695 ctx->objdata = data_size;
1699 /** Parse pattern item parameter type. */
1701 parse_vc_spec(struct context *ctx, const struct token *token,
1702 const char *str, unsigned int len,
1703 void *buf, unsigned int size)
1705 struct buffer *out = buf;
1706 struct rte_flow_item *item;
1712 /* Token name must match. */
1713 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1715 /* Parse parameter types. */
1716 switch (ctx->curr) {
1721 case ITEM_PARAM_SPEC:
1724 case ITEM_PARAM_LAST:
1727 case ITEM_PARAM_PREFIX:
1728 /* Modify next token to expect a prefix. */
1729 if (ctx->next_num < 2)
1731 ctx->next[ctx->next_num - 2] = NEXT_ENTRY(PREFIX);
1733 case ITEM_PARAM_MASK:
1739 /* Nothing else to do if there is no buffer. */
1742 if (!out->args.vc.pattern_n)
1744 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
1745 data_size = ctx->objdata / 3; /* spec, last, mask */
1746 /* Point to selected object. */
1747 ctx->object = out->args.vc.data + (data_size * index);
1749 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
1750 item->mask = ctx->objmask;
1752 ctx->objmask = NULL;
1753 /* Update relevant item pointer. */
1754 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
1759 /** Parse action configuration field. */
1761 parse_vc_conf(struct context *ctx, const struct token *token,
1762 const char *str, unsigned int len,
1763 void *buf, unsigned int size)
1765 struct buffer *out = buf;
1766 struct rte_flow_action *action;
1769 /* Token name must match. */
1770 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1772 /* Nothing else to do if there is no buffer. */
1775 if (!out->args.vc.actions_n)
1777 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
1778 /* Point to selected object. */
1779 ctx->object = out->args.vc.data;
1780 ctx->objmask = NULL;
1781 /* Update configuration pointer. */
1782 action->conf = ctx->object;
1787 * Parse queue field for RSS action.
1789 * Valid tokens are queue indices and the "end" token.
1792 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
1793 const char *str, unsigned int len,
1794 void *buf, unsigned int size)
1796 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
1803 if (ctx->curr != ACTION_RSS_QUEUE)
1805 i = ctx->objdata >> 16;
1806 if (!strncmp(str, "end", len)) {
1807 ctx->objdata &= 0xffff;
1810 if (i >= ACTION_RSS_NUM)
1812 if (push_args(ctx, ARGS_ENTRY(struct rte_flow_action_rss, queue[i])))
1814 ret = parse_int(ctx, token, str, len, NULL, 0);
1820 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
1822 if (ctx->next_num == RTE_DIM(ctx->next))
1824 ctx->next[ctx->next_num++] = next;
1827 ((struct rte_flow_action_rss *)ctx->object)->num = i;
1831 /** Parse tokens for destroy command. */
1833 parse_destroy(struct context *ctx, const struct token *token,
1834 const char *str, unsigned int len,
1835 void *buf, unsigned int size)
1837 struct buffer *out = buf;
1839 /* Token name must match. */
1840 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1842 /* Nothing else to do if there is no buffer. */
1845 if (!out->command) {
1846 if (ctx->curr != DESTROY)
1848 if (sizeof(*out) > size)
1850 out->command = ctx->curr;
1853 ctx->objmask = NULL;
1854 out->args.destroy.rule =
1855 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1859 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
1860 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
1863 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
1864 ctx->objmask = NULL;
1868 /** Parse tokens for flush command. */
1870 parse_flush(struct context *ctx, const struct token *token,
1871 const char *str, unsigned int len,
1872 void *buf, unsigned int size)
1874 struct buffer *out = buf;
1876 /* Token name must match. */
1877 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1879 /* Nothing else to do if there is no buffer. */
1882 if (!out->command) {
1883 if (ctx->curr != FLUSH)
1885 if (sizeof(*out) > size)
1887 out->command = ctx->curr;
1890 ctx->objmask = NULL;
1895 /** Parse tokens for query command. */
1897 parse_query(struct context *ctx, const struct token *token,
1898 const char *str, unsigned int len,
1899 void *buf, unsigned int size)
1901 struct buffer *out = buf;
1903 /* Token name must match. */
1904 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1906 /* Nothing else to do if there is no buffer. */
1909 if (!out->command) {
1910 if (ctx->curr != QUERY)
1912 if (sizeof(*out) > size)
1914 out->command = ctx->curr;
1917 ctx->objmask = NULL;
1922 /** Parse action names. */
1924 parse_action(struct context *ctx, const struct token *token,
1925 const char *str, unsigned int len,
1926 void *buf, unsigned int size)
1928 struct buffer *out = buf;
1929 const struct arg *arg = pop_args(ctx);
1933 /* Argument is expected. */
1936 /* Parse action name. */
1937 for (i = 0; next_action[i]; ++i) {
1938 const struct parse_action_priv *priv;
1940 token = &token_list[next_action[i]];
1941 if (strncmp(token->name, str, len))
1947 memcpy((uint8_t *)ctx->object + arg->offset,
1953 push_args(ctx, arg);
1957 /** Parse tokens for list command. */
1959 parse_list(struct context *ctx, const struct token *token,
1960 const char *str, unsigned int len,
1961 void *buf, unsigned int size)
1963 struct buffer *out = buf;
1965 /* Token name must match. */
1966 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1968 /* Nothing else to do if there is no buffer. */
1971 if (!out->command) {
1972 if (ctx->curr != LIST)
1974 if (sizeof(*out) > size)
1976 out->command = ctx->curr;
1979 ctx->objmask = NULL;
1980 out->args.list.group =
1981 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1985 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
1986 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
1989 ctx->object = out->args.list.group + out->args.list.group_n++;
1990 ctx->objmask = NULL;
1995 * Parse signed/unsigned integers 8 to 64-bit long.
1997 * Last argument (ctx->args) is retrieved to determine integer type and
2001 parse_int(struct context *ctx, const struct token *token,
2002 const char *str, unsigned int len,
2003 void *buf, unsigned int size)
2005 const struct arg *arg = pop_args(ctx);
2010 /* Argument is expected. */
2015 (uintmax_t)strtoimax(str, &end, 0) :
2016 strtoumax(str, &end, 0);
2017 if (errno || (size_t)(end - str) != len)
2022 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
2023 !arg_entry_bf_fill(ctx->objmask, -1, arg))
2027 buf = (uint8_t *)ctx->object + arg->offset;
2031 case sizeof(uint8_t):
2032 *(uint8_t *)buf = u;
2034 case sizeof(uint16_t):
2035 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
2037 case sizeof(uint8_t [3]):
2038 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2040 ((uint8_t *)buf)[0] = u;
2041 ((uint8_t *)buf)[1] = u >> 8;
2042 ((uint8_t *)buf)[2] = u >> 16;
2046 ((uint8_t *)buf)[0] = u >> 16;
2047 ((uint8_t *)buf)[1] = u >> 8;
2048 ((uint8_t *)buf)[2] = u;
2050 case sizeof(uint32_t):
2051 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
2053 case sizeof(uint64_t):
2054 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
2059 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
2061 buf = (uint8_t *)ctx->objmask + arg->offset;
2066 push_args(ctx, arg);
2073 * Two arguments (ctx->args) are retrieved from the stack to store data and
2074 * its length (in that order).
2077 parse_string(struct context *ctx, const struct token *token,
2078 const char *str, unsigned int len,
2079 void *buf, unsigned int size)
2081 const struct arg *arg_data = pop_args(ctx);
2082 const struct arg *arg_len = pop_args(ctx);
2083 char tmp[16]; /* Ought to be enough. */
2086 /* Arguments are expected. */
2090 push_args(ctx, arg_data);
2093 size = arg_data->size;
2094 /* Bit-mask fill is not supported. */
2095 if (arg_data->mask || size < len)
2099 /* Let parse_int() fill length information first. */
2100 ret = snprintf(tmp, sizeof(tmp), "%u", len);
2103 push_args(ctx, arg_len);
2104 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
2109 buf = (uint8_t *)ctx->object + arg_data->offset;
2110 /* Output buffer is not necessarily NUL-terminated. */
2111 memcpy(buf, str, len);
2112 memset((uint8_t *)buf + len, 0x55, size - len);
2114 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
2117 push_args(ctx, arg_len);
2118 push_args(ctx, arg_data);
2123 * Parse a MAC address.
2125 * Last argument (ctx->args) is retrieved to determine storage size and
2129 parse_mac_addr(struct context *ctx, const struct token *token,
2130 const char *str, unsigned int len,
2131 void *buf, unsigned int size)
2133 const struct arg *arg = pop_args(ctx);
2134 struct ether_addr tmp;
2138 /* Argument is expected. */
2142 /* Bit-mask fill is not supported. */
2143 if (arg->mask || size != sizeof(tmp))
2145 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
2146 if (ret < 0 || (unsigned int)ret != len)
2150 buf = (uint8_t *)ctx->object + arg->offset;
2151 memcpy(buf, &tmp, size);
2153 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2156 push_args(ctx, arg);
2161 * Parse an IPv4 address.
2163 * Last argument (ctx->args) is retrieved to determine storage size and
2167 parse_ipv4_addr(struct context *ctx, const struct token *token,
2168 const char *str, unsigned int len,
2169 void *buf, unsigned int size)
2171 const struct arg *arg = pop_args(ctx);
2176 /* Argument is expected. */
2180 /* Bit-mask fill is not supported. */
2181 if (arg->mask || size != sizeof(tmp))
2183 /* Only network endian is supported. */
2186 memcpy(str2, str, len);
2188 ret = inet_pton(AF_INET, str2, &tmp);
2190 /* Attempt integer parsing. */
2191 push_args(ctx, arg);
2192 return parse_int(ctx, token, str, len, buf, size);
2196 buf = (uint8_t *)ctx->object + arg->offset;
2197 memcpy(buf, &tmp, size);
2199 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2202 push_args(ctx, arg);
2207 * Parse an IPv6 address.
2209 * Last argument (ctx->args) is retrieved to determine storage size and
2213 parse_ipv6_addr(struct context *ctx, const struct token *token,
2214 const char *str, unsigned int len,
2215 void *buf, unsigned int size)
2217 const struct arg *arg = pop_args(ctx);
2219 struct in6_addr tmp;
2223 /* Argument is expected. */
2227 /* Bit-mask fill is not supported. */
2228 if (arg->mask || size != sizeof(tmp))
2230 /* Only network endian is supported. */
2233 memcpy(str2, str, len);
2235 ret = inet_pton(AF_INET6, str2, &tmp);
2240 buf = (uint8_t *)ctx->object + arg->offset;
2241 memcpy(buf, &tmp, size);
2243 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2246 push_args(ctx, arg);
2250 /** Boolean values (even indices stand for false). */
2251 static const char *const boolean_name[] = {
2260 * Parse a boolean value.
2262 * Last argument (ctx->args) is retrieved to determine storage size and
2266 parse_boolean(struct context *ctx, const struct token *token,
2267 const char *str, unsigned int len,
2268 void *buf, unsigned int size)
2270 const struct arg *arg = pop_args(ctx);
2274 /* Argument is expected. */
2277 for (i = 0; boolean_name[i]; ++i)
2278 if (!strncmp(str, boolean_name[i], len))
2280 /* Process token as integer. */
2281 if (boolean_name[i])
2282 str = i & 1 ? "1" : "0";
2283 push_args(ctx, arg);
2284 ret = parse_int(ctx, token, str, strlen(str), buf, size);
2285 return ret > 0 ? (int)len : ret;
2288 /** Parse port and update context. */
2290 parse_port(struct context *ctx, const struct token *token,
2291 const char *str, unsigned int len,
2292 void *buf, unsigned int size)
2294 struct buffer *out = &(struct buffer){ .port = 0 };
2302 ctx->objmask = NULL;
2303 size = sizeof(*out);
2305 ret = parse_int(ctx, token, str, len, out, size);
2307 ctx->port = out->port;
2313 /** No completion. */
2315 comp_none(struct context *ctx, const struct token *token,
2316 unsigned int ent, char *buf, unsigned int size)
2326 /** Complete boolean values. */
2328 comp_boolean(struct context *ctx, const struct token *token,
2329 unsigned int ent, char *buf, unsigned int size)
2335 for (i = 0; boolean_name[i]; ++i)
2336 if (buf && i == ent)
2337 return snprintf(buf, size, "%s", boolean_name[i]);
2343 /** Complete action names. */
2345 comp_action(struct context *ctx, const struct token *token,
2346 unsigned int ent, char *buf, unsigned int size)
2352 for (i = 0; next_action[i]; ++i)
2353 if (buf && i == ent)
2354 return snprintf(buf, size, "%s",
2355 token_list[next_action[i]].name);
2361 /** Complete available ports. */
2363 comp_port(struct context *ctx, const struct token *token,
2364 unsigned int ent, char *buf, unsigned int size)
2371 FOREACH_PORT(p, ports) {
2372 if (buf && i == ent)
2373 return snprintf(buf, size, "%u", p);
2381 /** Complete available rule IDs. */
2383 comp_rule_id(struct context *ctx, const struct token *token,
2384 unsigned int ent, char *buf, unsigned int size)
2387 struct rte_port *port;
2388 struct port_flow *pf;
2391 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
2392 ctx->port == (uint16_t)RTE_PORT_ALL)
2394 port = &ports[ctx->port];
2395 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
2396 if (buf && i == ent)
2397 return snprintf(buf, size, "%u", pf->id);
2405 /** Complete queue field for RSS action. */
2407 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
2408 unsigned int ent, char *buf, unsigned int size)
2410 static const char *const str[] = { "", "end", NULL };
2415 for (i = 0; str[i] != NULL; ++i)
2416 if (buf && i == ent)
2417 return snprintf(buf, size, "%s", str[i]);
2423 /** Internal context. */
2424 static struct context cmd_flow_context;
2426 /** Global parser instance (cmdline API). */
2427 cmdline_parse_inst_t cmd_flow;
2429 /** Initialize context. */
2431 cmd_flow_context_init(struct context *ctx)
2433 /* A full memset() is not necessary. */
2444 ctx->objmask = NULL;
2447 /** Parse a token (cmdline API). */
2449 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
2452 struct context *ctx = &cmd_flow_context;
2453 const struct token *token;
2454 const enum index *list;
2459 /* Restart as requested. */
2461 cmd_flow_context_init(ctx);
2462 token = &token_list[ctx->curr];
2463 /* Check argument length. */
2466 for (len = 0; src[len]; ++len)
2467 if (src[len] == '#' || isspace(src[len]))
2471 /* Last argument and EOL detection. */
2472 for (i = len; src[i]; ++i)
2473 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
2475 else if (!isspace(src[i])) {
2480 if (src[i] == '\r' || src[i] == '\n') {
2484 /* Initialize context if necessary. */
2485 if (!ctx->next_num) {
2488 ctx->next[ctx->next_num++] = token->next[0];
2490 /* Process argument through candidates. */
2491 ctx->prev = ctx->curr;
2492 list = ctx->next[ctx->next_num - 1];
2493 for (i = 0; list[i]; ++i) {
2494 const struct token *next = &token_list[list[i]];
2497 ctx->curr = list[i];
2499 tmp = next->call(ctx, next, src, len, result, size);
2501 tmp = parse_default(ctx, next, src, len, result, size);
2502 if (tmp == -1 || tmp != len)
2510 /* Push subsequent tokens if any. */
2512 for (i = 0; token->next[i]; ++i) {
2513 if (ctx->next_num == RTE_DIM(ctx->next))
2515 ctx->next[ctx->next_num++] = token->next[i];
2517 /* Push arguments if any. */
2519 for (i = 0; token->args[i]; ++i) {
2520 if (ctx->args_num == RTE_DIM(ctx->args))
2522 ctx->args[ctx->args_num++] = token->args[i];
2527 /** Return number of completion entries (cmdline API). */
2529 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
2531 struct context *ctx = &cmd_flow_context;
2532 const struct token *token = &token_list[ctx->curr];
2533 const enum index *list;
2537 /* Tell cmd_flow_parse() that context must be reinitialized. */
2539 /* Count number of tokens in current list. */
2541 list = ctx->next[ctx->next_num - 1];
2543 list = token->next[0];
2544 for (i = 0; list[i]; ++i)
2549 * If there is a single token, use its completion callback, otherwise
2550 * return the number of entries.
2552 token = &token_list[list[0]];
2553 if (i == 1 && token->comp) {
2554 /* Save index for cmd_flow_get_help(). */
2555 ctx->prev = list[0];
2556 return token->comp(ctx, token, 0, NULL, 0);
2561 /** Return a completion entry (cmdline API). */
2563 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
2564 char *dst, unsigned int size)
2566 struct context *ctx = &cmd_flow_context;
2567 const struct token *token = &token_list[ctx->curr];
2568 const enum index *list;
2572 /* Tell cmd_flow_parse() that context must be reinitialized. */
2574 /* Count number of tokens in current list. */
2576 list = ctx->next[ctx->next_num - 1];
2578 list = token->next[0];
2579 for (i = 0; list[i]; ++i)
2583 /* If there is a single token, use its completion callback. */
2584 token = &token_list[list[0]];
2585 if (i == 1 && token->comp) {
2586 /* Save index for cmd_flow_get_help(). */
2587 ctx->prev = list[0];
2588 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
2590 /* Otherwise make sure the index is valid and use defaults. */
2593 token = &token_list[list[index]];
2594 snprintf(dst, size, "%s", token->name);
2595 /* Save index for cmd_flow_get_help(). */
2596 ctx->prev = list[index];
2600 /** Populate help strings for current token (cmdline API). */
2602 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
2604 struct context *ctx = &cmd_flow_context;
2605 const struct token *token = &token_list[ctx->prev];
2608 /* Tell cmd_flow_parse() that context must be reinitialized. */
2612 /* Set token type and update global help with details. */
2613 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
2615 cmd_flow.help_str = token->help;
2617 cmd_flow.help_str = token->name;
2621 /** Token definition template (cmdline API). */
2622 static struct cmdline_token_hdr cmd_flow_token_hdr = {
2623 .ops = &(struct cmdline_token_ops){
2624 .parse = cmd_flow_parse,
2625 .complete_get_nb = cmd_flow_complete_get_nb,
2626 .complete_get_elt = cmd_flow_complete_get_elt,
2627 .get_help = cmd_flow_get_help,
2632 /** Populate the next dynamic token. */
2634 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
2635 cmdline_parse_token_hdr_t *(*hdrs)[])
2637 struct context *ctx = &cmd_flow_context;
2639 /* Always reinitialize context before requesting the first token. */
2641 cmd_flow_context_init(ctx);
2642 /* Return NULL when no more tokens are expected. */
2643 if (!ctx->next_num && ctx->curr) {
2647 /* Determine if command should end here. */
2648 if (ctx->eol && ctx->last && ctx->next_num) {
2649 const enum index *list = ctx->next[ctx->next_num - 1];
2652 for (i = 0; list[i]; ++i) {
2659 *hdr = &cmd_flow_token_hdr;
2662 /** Dispatch parsed buffer to function calls. */
2664 cmd_flow_parsed(const struct buffer *in)
2666 switch (in->command) {
2668 port_flow_validate(in->port, &in->args.vc.attr,
2669 in->args.vc.pattern, in->args.vc.actions);
2672 port_flow_create(in->port, &in->args.vc.attr,
2673 in->args.vc.pattern, in->args.vc.actions);
2676 port_flow_destroy(in->port, in->args.destroy.rule_n,
2677 in->args.destroy.rule);
2680 port_flow_flush(in->port);
2683 port_flow_query(in->port, in->args.query.rule,
2684 in->args.query.action);
2687 port_flow_list(in->port, in->args.list.group_n,
2688 in->args.list.group);
2695 /** Token generator and output processing callback (cmdline API). */
2697 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
2700 cmd_flow_tok(arg0, arg2);
2702 cmd_flow_parsed(arg0);
2705 /** Global parser instance (cmdline API). */
2706 cmdline_parse_inst_t cmd_flow = {
2708 .data = NULL, /**< Unused. */
2709 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
2712 }, /**< Tokens are returned by cmd_flow_tok(). */