4 * Copyright 2016 6WIND S.A.
5 * Copyright 2016 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <arpa/inet.h>
42 #include <sys/socket.h>
44 #include <rte_common.h>
45 #include <rte_ethdev.h>
46 #include <rte_byteorder.h>
47 #include <cmdline_parse.h>
48 #include <cmdline_parse_etheraddr.h>
53 /** Parser token indices. */
73 /* Top-level command. */
76 /* Sub-level commands. */
84 /* Destroy arguments. */
87 /* Query arguments. */
93 /* Validate/create arguments. */
99 /* Validate/create pattern. */
163 /* Validate/create actions. */
187 /** Size of pattern[] field in struct rte_flow_item_raw. */
188 #define ITEM_RAW_PATTERN_SIZE 36
190 /** Storage size for struct rte_flow_item_raw including pattern. */
191 #define ITEM_RAW_SIZE \
192 (offsetof(struct rte_flow_item_raw, pattern) + ITEM_RAW_PATTERN_SIZE)
194 /** Number of queue[] entries in struct rte_flow_action_rss. */
195 #define ACTION_RSS_NUM 32
197 /** Storage size for struct rte_flow_action_rss including queues. */
198 #define ACTION_RSS_SIZE \
199 (offsetof(struct rte_flow_action_rss, queue) + \
200 sizeof(*((struct rte_flow_action_rss *)0)->queue) * ACTION_RSS_NUM)
202 /** Maximum number of subsequent tokens and arguments on the stack. */
203 #define CTX_STACK_SIZE 16
205 /** Parser context. */
207 /** Stack of subsequent token lists to process. */
208 const enum index *next[CTX_STACK_SIZE];
209 /** Arguments for stacked tokens. */
210 const void *args[CTX_STACK_SIZE];
211 enum index curr; /**< Current token index. */
212 enum index prev; /**< Index of the last token seen. */
213 int next_num; /**< Number of entries in next[]. */
214 int args_num; /**< Number of entries in args[]. */
215 uint32_t reparse:1; /**< Start over from the beginning. */
216 uint32_t eol:1; /**< EOL has been detected. */
217 uint32_t last:1; /**< No more arguments. */
218 uint16_t port; /**< Current port ID (for completions). */
219 uint32_t objdata; /**< Object-specific data. */
220 void *object; /**< Address of current object for relative offsets. */
221 void *objmask; /**< Object a full mask must be written to. */
224 /** Token argument. */
226 uint32_t hton:1; /**< Use network byte ordering. */
227 uint32_t sign:1; /**< Value is signed. */
228 uint32_t offset; /**< Relative offset from ctx->object. */
229 uint32_t size; /**< Field size. */
230 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
233 /** Parser token definition. */
235 /** Type displayed during completion (defaults to "TOKEN"). */
237 /** Help displayed during completion (defaults to token name). */
239 /** Private data used by parser functions. */
242 * Lists of subsequent tokens to push on the stack. Each call to the
243 * parser consumes the last entry of that stack.
245 const enum index *const *next;
246 /** Arguments stack for subsequent tokens that need them. */
247 const struct arg *const *args;
249 * Token-processing callback, returns -1 in case of error, the
250 * length of the matched string otherwise. If NULL, attempts to
251 * match the token name.
253 * If buf is not NULL, the result should be stored in it according
254 * to context. An error is returned if not large enough.
256 int (*call)(struct context *ctx, const struct token *token,
257 const char *str, unsigned int len,
258 void *buf, unsigned int size);
260 * Callback that provides possible values for this token, used for
261 * completion. Returns -1 in case of error, the number of possible
262 * values otherwise. If NULL, the token name is used.
264 * If buf is not NULL, entry index ent is written to buf and the
265 * full length of the entry is returned (same behavior as
268 int (*comp)(struct context *ctx, const struct token *token,
269 unsigned int ent, char *buf, unsigned int size);
270 /** Mandatory token name, no default value. */
274 /** Static initializer for the next field. */
275 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
277 /** Static initializer for a NEXT() entry. */
278 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
280 /** Static initializer for the args field. */
281 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
283 /** Static initializer for ARGS() to target a field. */
284 #define ARGS_ENTRY(s, f) \
285 (&(const struct arg){ \
286 .offset = offsetof(s, f), \
287 .size = sizeof(((s *)0)->f), \
290 /** Static initializer for ARGS() to target a bit-field. */
291 #define ARGS_ENTRY_BF(s, f, b) \
292 (&(const struct arg){ \
294 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
297 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
298 #define ARGS_ENTRY_MASK(s, f, m) \
299 (&(const struct arg){ \
300 .offset = offsetof(s, f), \
301 .size = sizeof(((s *)0)->f), \
302 .mask = (const void *)(m), \
305 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
306 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
307 (&(const struct arg){ \
309 .offset = offsetof(s, f), \
310 .size = sizeof(((s *)0)->f), \
311 .mask = (const void *)(m), \
314 /** Static initializer for ARGS() to target a pointer. */
315 #define ARGS_ENTRY_PTR(s, f) \
316 (&(const struct arg){ \
317 .size = sizeof(*((s *)0)->f), \
320 /** Static initializer for ARGS() with arbitrary size. */
321 #define ARGS_ENTRY_USZ(s, f, sz) \
322 (&(const struct arg){ \
323 .offset = offsetof(s, f), \
327 /** Same as ARGS_ENTRY() using network byte ordering. */
328 #define ARGS_ENTRY_HTON(s, f) \
329 (&(const struct arg){ \
331 .offset = offsetof(s, f), \
332 .size = sizeof(((s *)0)->f), \
335 /** Parser output buffer layout expected by cmd_flow_parsed(). */
337 enum index command; /**< Flow command. */
338 uint16_t port; /**< Affected port ID. */
341 struct rte_flow_attr attr;
342 struct rte_flow_item *pattern;
343 struct rte_flow_action *actions;
347 } vc; /**< Validate/create arguments. */
351 } destroy; /**< Destroy arguments. */
354 enum rte_flow_action_type action;
355 } query; /**< Query arguments. */
359 } list; /**< List arguments. */
360 } args; /**< Command arguments. */
363 /** Private data for pattern items. */
364 struct parse_item_priv {
365 enum rte_flow_item_type type; /**< Item type. */
366 uint32_t size; /**< Size of item specification structure. */
369 #define PRIV_ITEM(t, s) \
370 (&(const struct parse_item_priv){ \
371 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
375 /** Private data for actions. */
376 struct parse_action_priv {
377 enum rte_flow_action_type type; /**< Action type. */
378 uint32_t size; /**< Size of action configuration structure. */
381 #define PRIV_ACTION(t, s) \
382 (&(const struct parse_action_priv){ \
383 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
387 static const enum index next_vc_attr[] = {
396 static const enum index next_destroy_attr[] = {
402 static const enum index next_list_attr[] = {
408 static const enum index item_param[] = {
417 static const enum index next_item[] = {
438 static const enum index item_any[] = {
444 static const enum index item_vf[] = {
450 static const enum index item_port[] = {
456 static const enum index item_raw[] = {
466 static const enum index item_eth[] = {
474 static const enum index item_vlan[] = {
484 static const enum index item_ipv4[] = {
494 static const enum index item_ipv6[] = {
505 static const enum index item_icmp[] = {
512 static const enum index item_udp[] = {
519 static const enum index item_tcp[] = {
526 static const enum index item_sctp[] = {
535 static const enum index item_vxlan[] = {
541 static const enum index next_action[] = {
557 static const enum index action_mark[] = {
563 static const enum index action_queue[] = {
569 static const enum index action_dup[] = {
575 static const enum index action_rss[] = {
581 static const enum index action_vf[] = {
588 static int parse_init(struct context *, const struct token *,
589 const char *, unsigned int,
590 void *, unsigned int);
591 static int parse_vc(struct context *, const struct token *,
592 const char *, unsigned int,
593 void *, unsigned int);
594 static int parse_vc_spec(struct context *, const struct token *,
595 const char *, unsigned int, void *, unsigned int);
596 static int parse_vc_conf(struct context *, const struct token *,
597 const char *, unsigned int, void *, unsigned int);
598 static int parse_vc_action_rss_queue(struct context *, const struct token *,
599 const char *, unsigned int, void *,
601 static int parse_destroy(struct context *, const struct token *,
602 const char *, unsigned int,
603 void *, unsigned int);
604 static int parse_flush(struct context *, const struct token *,
605 const char *, unsigned int,
606 void *, unsigned int);
607 static int parse_query(struct context *, const struct token *,
608 const char *, unsigned int,
609 void *, unsigned int);
610 static int parse_action(struct context *, const struct token *,
611 const char *, unsigned int,
612 void *, unsigned int);
613 static int parse_list(struct context *, const struct token *,
614 const char *, unsigned int,
615 void *, unsigned int);
616 static int parse_int(struct context *, const struct token *,
617 const char *, unsigned int,
618 void *, unsigned int);
619 static int parse_prefix(struct context *, const struct token *,
620 const char *, unsigned int,
621 void *, unsigned int);
622 static int parse_boolean(struct context *, const struct token *,
623 const char *, unsigned int,
624 void *, unsigned int);
625 static int parse_string(struct context *, const struct token *,
626 const char *, unsigned int,
627 void *, unsigned int);
628 static int parse_mac_addr(struct context *, const struct token *,
629 const char *, unsigned int,
630 void *, unsigned int);
631 static int parse_ipv4_addr(struct context *, const struct token *,
632 const char *, unsigned int,
633 void *, unsigned int);
634 static int parse_ipv6_addr(struct context *, const struct token *,
635 const char *, unsigned int,
636 void *, unsigned int);
637 static int parse_port(struct context *, const struct token *,
638 const char *, unsigned int,
639 void *, unsigned int);
640 static int comp_none(struct context *, const struct token *,
641 unsigned int, char *, unsigned int);
642 static int comp_boolean(struct context *, const struct token *,
643 unsigned int, char *, unsigned int);
644 static int comp_action(struct context *, const struct token *,
645 unsigned int, char *, unsigned int);
646 static int comp_port(struct context *, const struct token *,
647 unsigned int, char *, unsigned int);
648 static int comp_rule_id(struct context *, const struct token *,
649 unsigned int, char *, unsigned int);
650 static int comp_vc_action_rss_queue(struct context *, const struct token *,
651 unsigned int, char *, unsigned int);
653 /** Token definitions. */
654 static const struct token token_list[] = {
655 /* Special tokens. */
658 .help = "null entry, abused as the entry point",
659 .next = NEXT(NEXT_ENTRY(FLOW)),
664 .help = "command may end here",
670 .help = "integer value",
675 .name = "{unsigned}",
677 .help = "unsigned integer value",
684 .help = "prefix length for bit-mask",
685 .call = parse_prefix,
691 .help = "any boolean value",
692 .call = parse_boolean,
693 .comp = comp_boolean,
698 .help = "fixed string",
699 .call = parse_string,
703 .name = "{MAC address}",
705 .help = "standard MAC address notation",
706 .call = parse_mac_addr,
710 .name = "{IPv4 address}",
711 .type = "IPV4 ADDRESS",
712 .help = "standard IPv4 address notation",
713 .call = parse_ipv4_addr,
717 .name = "{IPv6 address}",
718 .type = "IPV6 ADDRESS",
719 .help = "standard IPv6 address notation",
720 .call = parse_ipv6_addr,
726 .help = "rule identifier",
728 .comp = comp_rule_id,
733 .help = "port identifier",
738 .name = "{group_id}",
740 .help = "group identifier",
747 .help = "priority level",
751 /* Top-level command. */
754 .type = "{command} {port_id} [{arg} [...]]",
755 .help = "manage ingress/egress flow rules",
756 .next = NEXT(NEXT_ENTRY
765 /* Sub-level commands. */
768 .help = "check whether a flow rule can be created",
769 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
770 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
775 .help = "create a flow rule",
776 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
777 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
782 .help = "destroy specific flow rules",
783 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
784 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
785 .call = parse_destroy,
789 .help = "destroy all flow rules",
790 .next = NEXT(NEXT_ENTRY(PORT_ID)),
791 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
796 .help = "query an existing flow rule",
797 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
799 NEXT_ENTRY(PORT_ID)),
800 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action),
801 ARGS_ENTRY(struct buffer, args.query.rule),
802 ARGS_ENTRY(struct buffer, port)),
807 .help = "list existing flow rules",
808 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
809 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
812 /* Destroy arguments. */
815 .help = "specify a rule identifier",
816 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
817 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
818 .call = parse_destroy,
820 /* Query arguments. */
824 .help = "action to query, must be part of the rule",
825 .call = parse_action,
828 /* List arguments. */
831 .help = "specify a group",
832 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
833 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
836 /* Validate/create attributes. */
839 .help = "specify a group",
840 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
841 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
846 .help = "specify a priority level",
847 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
848 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
853 .help = "affect rule to ingress",
854 .next = NEXT(next_vc_attr),
859 .help = "affect rule to egress",
860 .next = NEXT(next_vc_attr),
863 /* Validate/create pattern. */
866 .help = "submit a list of pattern items",
867 .next = NEXT(next_item),
872 .help = "match value perfectly (with full bit-mask)",
873 .call = parse_vc_spec,
875 [ITEM_PARAM_SPEC] = {
877 .help = "match value according to configured bit-mask",
878 .call = parse_vc_spec,
880 [ITEM_PARAM_LAST] = {
882 .help = "specify upper bound to establish a range",
883 .call = parse_vc_spec,
885 [ITEM_PARAM_MASK] = {
887 .help = "specify bit-mask with relevant bits set to one",
888 .call = parse_vc_spec,
890 [ITEM_PARAM_PREFIX] = {
892 .help = "generate bit-mask from a prefix length",
893 .call = parse_vc_spec,
897 .help = "specify next pattern item",
898 .next = NEXT(next_item),
902 .help = "end list of pattern items",
903 .priv = PRIV_ITEM(END, 0),
904 .next = NEXT(NEXT_ENTRY(ACTIONS)),
909 .help = "no-op pattern item",
910 .priv = PRIV_ITEM(VOID, 0),
911 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
916 .help = "perform actions when pattern does not match",
917 .priv = PRIV_ITEM(INVERT, 0),
918 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
923 .help = "match any protocol for the current layer",
924 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
925 .next = NEXT(item_any),
930 .help = "number of layers covered",
931 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
932 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
936 .help = "match packets addressed to the physical function",
937 .priv = PRIV_ITEM(PF, 0),
938 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
943 .help = "match packets addressed to a virtual function ID",
944 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
945 .next = NEXT(item_vf),
950 .help = "destination VF ID",
951 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
952 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
956 .help = "device-specific physical port index to use",
957 .priv = PRIV_ITEM(PORT, sizeof(struct rte_flow_item_port)),
958 .next = NEXT(item_port),
961 [ITEM_PORT_INDEX] = {
963 .help = "physical port index",
964 .next = NEXT(item_port, NEXT_ENTRY(UNSIGNED), item_param),
965 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port, index)),
969 .help = "match an arbitrary byte string",
970 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
971 .next = NEXT(item_raw),
974 [ITEM_RAW_RELATIVE] = {
976 .help = "look for pattern after the previous item",
977 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
978 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
981 [ITEM_RAW_SEARCH] = {
983 .help = "search pattern from offset (see also limit)",
984 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
985 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
988 [ITEM_RAW_OFFSET] = {
990 .help = "absolute or relative offset for pattern",
991 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
992 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
996 .help = "search area limit for start of pattern",
997 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
998 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1000 [ITEM_RAW_PATTERN] = {
1002 .help = "byte string to look for",
1003 .next = NEXT(item_raw,
1005 NEXT_ENTRY(ITEM_PARAM_IS,
1008 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, length),
1009 ARGS_ENTRY_USZ(struct rte_flow_item_raw,
1011 ITEM_RAW_PATTERN_SIZE)),
1015 .help = "match Ethernet header",
1016 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1017 .next = NEXT(item_eth),
1022 .help = "destination MAC",
1023 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1024 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_eth, dst)),
1028 .help = "source MAC",
1029 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1030 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_eth, src)),
1034 .help = "EtherType",
1035 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1036 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1040 .help = "match 802.1Q/ad VLAN tag",
1041 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1042 .next = NEXT(item_vlan),
1045 [ITEM_VLAN_TPID] = {
1047 .help = "tag protocol identifier",
1048 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1049 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tpid)),
1053 .help = "tag control information",
1054 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1055 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1059 .help = "priority code point",
1060 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1061 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1066 .help = "drop eligible indicator",
1067 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1068 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1073 .help = "VLAN identifier",
1074 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1075 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1080 .help = "match IPv4 header",
1081 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1082 .next = NEXT(item_ipv4),
1087 .help = "type of service",
1088 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1089 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1090 hdr.type_of_service)),
1094 .help = "time to live",
1095 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1096 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1099 [ITEM_IPV4_PROTO] = {
1101 .help = "next protocol ID",
1102 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1103 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1104 hdr.next_proto_id)),
1108 .help = "source address",
1109 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1110 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1115 .help = "destination address",
1116 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1117 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1122 .help = "match IPv6 header",
1123 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1124 .next = NEXT(item_ipv6),
1129 .help = "traffic class",
1130 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1131 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1133 "\x0f\xf0\x00\x00")),
1135 [ITEM_IPV6_FLOW] = {
1137 .help = "flow label",
1138 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1139 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1141 "\x00\x0f\xff\xff")),
1143 [ITEM_IPV6_PROTO] = {
1145 .help = "protocol (next header)",
1146 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1147 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1152 .help = "hop limit",
1153 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1154 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1159 .help = "source address",
1160 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1161 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1166 .help = "destination address",
1167 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1168 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1173 .help = "match ICMP header",
1174 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1175 .next = NEXT(item_icmp),
1178 [ITEM_ICMP_TYPE] = {
1180 .help = "ICMP packet type",
1181 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1182 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1185 [ITEM_ICMP_CODE] = {
1187 .help = "ICMP packet code",
1188 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1189 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1194 .help = "match UDP header",
1195 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1196 .next = NEXT(item_udp),
1201 .help = "UDP source port",
1202 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1203 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1208 .help = "UDP destination port",
1209 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1210 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1215 .help = "match TCP header",
1216 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1217 .next = NEXT(item_tcp),
1222 .help = "TCP source port",
1223 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1224 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1229 .help = "TCP destination port",
1230 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1231 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1236 .help = "match SCTP header",
1237 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1238 .next = NEXT(item_sctp),
1243 .help = "SCTP source port",
1244 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1245 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1250 .help = "SCTP destination port",
1251 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1252 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1257 .help = "validation tag",
1258 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1259 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1262 [ITEM_SCTP_CKSUM] = {
1265 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1266 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1271 .help = "match VXLAN header",
1272 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1273 .next = NEXT(item_vxlan),
1276 [ITEM_VXLAN_VNI] = {
1278 .help = "VXLAN identifier",
1279 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1280 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
1282 /* Validate/create actions. */
1285 .help = "submit a list of associated actions",
1286 .next = NEXT(next_action),
1291 .help = "specify next action",
1292 .next = NEXT(next_action),
1296 .help = "end list of actions",
1297 .priv = PRIV_ACTION(END, 0),
1302 .help = "no-op action",
1303 .priv = PRIV_ACTION(VOID, 0),
1304 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1307 [ACTION_PASSTHRU] = {
1309 .help = "let subsequent rule process matched packets",
1310 .priv = PRIV_ACTION(PASSTHRU, 0),
1311 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1316 .help = "attach 32 bit value to packets",
1317 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
1318 .next = NEXT(action_mark),
1321 [ACTION_MARK_ID] = {
1323 .help = "32 bit value to return with packets",
1324 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
1325 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
1326 .call = parse_vc_conf,
1330 .help = "flag packets",
1331 .priv = PRIV_ACTION(FLAG, 0),
1332 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1337 .help = "assign packets to a given queue index",
1338 .priv = PRIV_ACTION(QUEUE,
1339 sizeof(struct rte_flow_action_queue)),
1340 .next = NEXT(action_queue),
1343 [ACTION_QUEUE_INDEX] = {
1345 .help = "queue index to use",
1346 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
1347 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
1348 .call = parse_vc_conf,
1352 .help = "drop packets (note: passthru has priority)",
1353 .priv = PRIV_ACTION(DROP, 0),
1354 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1359 .help = "enable counters for this rule",
1360 .priv = PRIV_ACTION(COUNT, 0),
1361 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1366 .help = "duplicate packets to a given queue index",
1367 .priv = PRIV_ACTION(DUP, sizeof(struct rte_flow_action_dup)),
1368 .next = NEXT(action_dup),
1371 [ACTION_DUP_INDEX] = {
1373 .help = "queue index to duplicate packets to",
1374 .next = NEXT(action_dup, NEXT_ENTRY(UNSIGNED)),
1375 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_dup, index)),
1376 .call = parse_vc_conf,
1380 .help = "spread packets among several queues",
1381 .priv = PRIV_ACTION(RSS, ACTION_RSS_SIZE),
1382 .next = NEXT(action_rss),
1385 [ACTION_RSS_QUEUES] = {
1387 .help = "queue indices to use",
1388 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
1389 .call = parse_vc_conf,
1391 [ACTION_RSS_QUEUE] = {
1393 .help = "queue index",
1394 .call = parse_vc_action_rss_queue,
1395 .comp = comp_vc_action_rss_queue,
1399 .help = "redirect packets to physical device function",
1400 .priv = PRIV_ACTION(PF, 0),
1401 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1406 .help = "redirect packets to virtual device function",
1407 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
1408 .next = NEXT(action_vf),
1411 [ACTION_VF_ORIGINAL] = {
1413 .help = "use original VF ID if possible",
1414 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
1415 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
1417 .call = parse_vc_conf,
1421 .help = "VF ID to redirect packets to",
1422 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
1423 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
1424 .call = parse_vc_conf,
1428 /** Remove and return last entry from argument stack. */
1429 static const struct arg *
1430 pop_args(struct context *ctx)
1432 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
1435 /** Add entry on top of the argument stack. */
1437 push_args(struct context *ctx, const struct arg *arg)
1439 if (ctx->args_num == CTX_STACK_SIZE)
1441 ctx->args[ctx->args_num++] = arg;
1445 /** Spread value into buffer according to bit-mask. */
1447 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
1449 uint32_t i = arg->size;
1457 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1466 unsigned int shift = 0;
1467 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
1469 for (shift = 0; arg->mask[i] >> shift; ++shift) {
1470 if (!(arg->mask[i] & (1 << shift)))
1475 *buf &= ~(1 << shift);
1476 *buf |= (val & 1) << shift;
1485 * Parse a prefix length and generate a bit-mask.
1487 * Last argument (ctx->args) is retrieved to determine mask size, storage
1488 * location and whether the result must use network byte ordering.
1491 parse_prefix(struct context *ctx, const struct token *token,
1492 const char *str, unsigned int len,
1493 void *buf, unsigned int size)
1495 const struct arg *arg = pop_args(ctx);
1496 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
1503 /* Argument is expected. */
1507 u = strtoumax(str, &end, 0);
1508 if (errno || (size_t)(end - str) != len)
1513 extra = arg_entry_bf_fill(NULL, 0, arg);
1522 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
1523 !arg_entry_bf_fill(ctx->objmask, -1, arg))
1530 if (bytes > size || bytes + !!extra > size)
1534 buf = (uint8_t *)ctx->object + arg->offset;
1535 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1537 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
1538 memset(buf, 0x00, size - bytes);
1540 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
1544 memset(buf, 0xff, bytes);
1545 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
1547 ((uint8_t *)buf)[bytes] = conv[extra];
1550 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
1553 push_args(ctx, arg);
1557 /** Default parsing function for token name matching. */
1559 parse_default(struct context *ctx, const struct token *token,
1560 const char *str, unsigned int len,
1561 void *buf, unsigned int size)
1566 if (strncmp(str, token->name, len))
1571 /** Parse flow command, initialize output buffer for subsequent tokens. */
1573 parse_init(struct context *ctx, const struct token *token,
1574 const char *str, unsigned int len,
1575 void *buf, unsigned int size)
1577 struct buffer *out = buf;
1579 /* Token name must match. */
1580 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1582 /* Nothing else to do if there is no buffer. */
1585 /* Make sure buffer is large enough. */
1586 if (size < sizeof(*out))
1588 /* Initialize buffer. */
1589 memset(out, 0x00, sizeof(*out));
1590 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
1593 ctx->objmask = NULL;
1597 /** Parse tokens for validate/create commands. */
1599 parse_vc(struct context *ctx, const struct token *token,
1600 const char *str, unsigned int len,
1601 void *buf, unsigned int size)
1603 struct buffer *out = buf;
1607 /* Token name must match. */
1608 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1610 /* Nothing else to do if there is no buffer. */
1613 if (!out->command) {
1614 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
1616 if (sizeof(*out) > size)
1618 out->command = ctx->curr;
1621 ctx->objmask = NULL;
1622 out->args.vc.data = (uint8_t *)out + size;
1626 ctx->object = &out->args.vc.attr;
1627 ctx->objmask = NULL;
1628 switch (ctx->curr) {
1633 out->args.vc.attr.ingress = 1;
1636 out->args.vc.attr.egress = 1;
1639 out->args.vc.pattern =
1640 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1642 ctx->object = out->args.vc.pattern;
1643 ctx->objmask = NULL;
1646 out->args.vc.actions =
1647 (void *)RTE_ALIGN_CEIL((uintptr_t)
1648 (out->args.vc.pattern +
1649 out->args.vc.pattern_n),
1651 ctx->object = out->args.vc.actions;
1652 ctx->objmask = NULL;
1659 if (!out->args.vc.actions) {
1660 const struct parse_item_priv *priv = token->priv;
1661 struct rte_flow_item *item =
1662 out->args.vc.pattern + out->args.vc.pattern_n;
1664 data_size = priv->size * 3; /* spec, last, mask */
1665 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1666 (out->args.vc.data - data_size),
1668 if ((uint8_t *)item + sizeof(*item) > data)
1670 *item = (struct rte_flow_item){
1673 ++out->args.vc.pattern_n;
1675 ctx->objmask = NULL;
1677 const struct parse_action_priv *priv = token->priv;
1678 struct rte_flow_action *action =
1679 out->args.vc.actions + out->args.vc.actions_n;
1681 data_size = priv->size; /* configuration */
1682 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1683 (out->args.vc.data - data_size),
1685 if ((uint8_t *)action + sizeof(*action) > data)
1687 *action = (struct rte_flow_action){
1690 ++out->args.vc.actions_n;
1691 ctx->object = action;
1692 ctx->objmask = NULL;
1694 memset(data, 0, data_size);
1695 out->args.vc.data = data;
1696 ctx->objdata = data_size;
1700 /** Parse pattern item parameter type. */
1702 parse_vc_spec(struct context *ctx, const struct token *token,
1703 const char *str, unsigned int len,
1704 void *buf, unsigned int size)
1706 struct buffer *out = buf;
1707 struct rte_flow_item *item;
1713 /* Token name must match. */
1714 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1716 /* Parse parameter types. */
1717 switch (ctx->curr) {
1722 case ITEM_PARAM_SPEC:
1725 case ITEM_PARAM_LAST:
1728 case ITEM_PARAM_PREFIX:
1729 /* Modify next token to expect a prefix. */
1730 if (ctx->next_num < 2)
1732 ctx->next[ctx->next_num - 2] = NEXT_ENTRY(PREFIX);
1734 case ITEM_PARAM_MASK:
1740 /* Nothing else to do if there is no buffer. */
1743 if (!out->args.vc.pattern_n)
1745 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
1746 data_size = ctx->objdata / 3; /* spec, last, mask */
1747 /* Point to selected object. */
1748 ctx->object = out->args.vc.data + (data_size * index);
1750 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
1751 item->mask = ctx->objmask;
1753 ctx->objmask = NULL;
1754 /* Update relevant item pointer. */
1755 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
1760 /** Parse action configuration field. */
1762 parse_vc_conf(struct context *ctx, const struct token *token,
1763 const char *str, unsigned int len,
1764 void *buf, unsigned int size)
1766 struct buffer *out = buf;
1767 struct rte_flow_action *action;
1770 /* Token name must match. */
1771 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1773 /* Nothing else to do if there is no buffer. */
1776 if (!out->args.vc.actions_n)
1778 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
1779 /* Point to selected object. */
1780 ctx->object = out->args.vc.data;
1781 ctx->objmask = NULL;
1782 /* Update configuration pointer. */
1783 action->conf = ctx->object;
1788 * Parse queue field for RSS action.
1790 * Valid tokens are queue indices and the "end" token.
1793 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
1794 const char *str, unsigned int len,
1795 void *buf, unsigned int size)
1797 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
1804 if (ctx->curr != ACTION_RSS_QUEUE)
1806 i = ctx->objdata >> 16;
1807 if (!strncmp(str, "end", len)) {
1808 ctx->objdata &= 0xffff;
1811 if (i >= ACTION_RSS_NUM)
1813 if (push_args(ctx, ARGS_ENTRY(struct rte_flow_action_rss, queue[i])))
1815 ret = parse_int(ctx, token, str, len, NULL, 0);
1821 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
1823 if (ctx->next_num == RTE_DIM(ctx->next))
1825 ctx->next[ctx->next_num++] = next;
1828 ((struct rte_flow_action_rss *)ctx->object)->num = i;
1832 /** Parse tokens for destroy command. */
1834 parse_destroy(struct context *ctx, const struct token *token,
1835 const char *str, unsigned int len,
1836 void *buf, unsigned int size)
1838 struct buffer *out = buf;
1840 /* Token name must match. */
1841 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1843 /* Nothing else to do if there is no buffer. */
1846 if (!out->command) {
1847 if (ctx->curr != DESTROY)
1849 if (sizeof(*out) > size)
1851 out->command = ctx->curr;
1854 ctx->objmask = NULL;
1855 out->args.destroy.rule =
1856 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1860 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
1861 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
1864 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
1865 ctx->objmask = NULL;
1869 /** Parse tokens for flush command. */
1871 parse_flush(struct context *ctx, const struct token *token,
1872 const char *str, unsigned int len,
1873 void *buf, unsigned int size)
1875 struct buffer *out = buf;
1877 /* Token name must match. */
1878 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1880 /* Nothing else to do if there is no buffer. */
1883 if (!out->command) {
1884 if (ctx->curr != FLUSH)
1886 if (sizeof(*out) > size)
1888 out->command = ctx->curr;
1891 ctx->objmask = NULL;
1896 /** Parse tokens for query command. */
1898 parse_query(struct context *ctx, const struct token *token,
1899 const char *str, unsigned int len,
1900 void *buf, unsigned int size)
1902 struct buffer *out = buf;
1904 /* Token name must match. */
1905 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1907 /* Nothing else to do if there is no buffer. */
1910 if (!out->command) {
1911 if (ctx->curr != QUERY)
1913 if (sizeof(*out) > size)
1915 out->command = ctx->curr;
1918 ctx->objmask = NULL;
1923 /** Parse action names. */
1925 parse_action(struct context *ctx, const struct token *token,
1926 const char *str, unsigned int len,
1927 void *buf, unsigned int size)
1929 struct buffer *out = buf;
1930 const struct arg *arg = pop_args(ctx);
1934 /* Argument is expected. */
1937 /* Parse action name. */
1938 for (i = 0; next_action[i]; ++i) {
1939 const struct parse_action_priv *priv;
1941 token = &token_list[next_action[i]];
1942 if (strncmp(token->name, str, len))
1948 memcpy((uint8_t *)ctx->object + arg->offset,
1954 push_args(ctx, arg);
1958 /** Parse tokens for list command. */
1960 parse_list(struct context *ctx, const struct token *token,
1961 const char *str, unsigned int len,
1962 void *buf, unsigned int size)
1964 struct buffer *out = buf;
1966 /* Token name must match. */
1967 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1969 /* Nothing else to do if there is no buffer. */
1972 if (!out->command) {
1973 if (ctx->curr != LIST)
1975 if (sizeof(*out) > size)
1977 out->command = ctx->curr;
1980 ctx->objmask = NULL;
1981 out->args.list.group =
1982 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1986 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
1987 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
1990 ctx->object = out->args.list.group + out->args.list.group_n++;
1991 ctx->objmask = NULL;
1996 * Parse signed/unsigned integers 8 to 64-bit long.
1998 * Last argument (ctx->args) is retrieved to determine integer type and
2002 parse_int(struct context *ctx, const struct token *token,
2003 const char *str, unsigned int len,
2004 void *buf, unsigned int size)
2006 const struct arg *arg = pop_args(ctx);
2011 /* Argument is expected. */
2016 (uintmax_t)strtoimax(str, &end, 0) :
2017 strtoumax(str, &end, 0);
2018 if (errno || (size_t)(end - str) != len)
2023 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
2024 !arg_entry_bf_fill(ctx->objmask, -1, arg))
2028 buf = (uint8_t *)ctx->object + arg->offset;
2032 case sizeof(uint8_t):
2033 *(uint8_t *)buf = u;
2035 case sizeof(uint16_t):
2036 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
2038 case sizeof(uint8_t [3]):
2039 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2041 ((uint8_t *)buf)[0] = u;
2042 ((uint8_t *)buf)[1] = u >> 8;
2043 ((uint8_t *)buf)[2] = u >> 16;
2047 ((uint8_t *)buf)[0] = u >> 16;
2048 ((uint8_t *)buf)[1] = u >> 8;
2049 ((uint8_t *)buf)[2] = u;
2051 case sizeof(uint32_t):
2052 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
2054 case sizeof(uint64_t):
2055 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
2060 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
2062 buf = (uint8_t *)ctx->objmask + arg->offset;
2067 push_args(ctx, arg);
2074 * Two arguments (ctx->args) are retrieved from the stack to store data and
2075 * its length (in that order).
2078 parse_string(struct context *ctx, const struct token *token,
2079 const char *str, unsigned int len,
2080 void *buf, unsigned int size)
2082 const struct arg *arg_data = pop_args(ctx);
2083 const struct arg *arg_len = pop_args(ctx);
2084 char tmp[16]; /* Ought to be enough. */
2087 /* Arguments are expected. */
2091 push_args(ctx, arg_data);
2094 size = arg_data->size;
2095 /* Bit-mask fill is not supported. */
2096 if (arg_data->mask || size < len)
2100 /* Let parse_int() fill length information first. */
2101 ret = snprintf(tmp, sizeof(tmp), "%u", len);
2104 push_args(ctx, arg_len);
2105 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
2110 buf = (uint8_t *)ctx->object + arg_data->offset;
2111 /* Output buffer is not necessarily NUL-terminated. */
2112 memcpy(buf, str, len);
2113 memset((uint8_t *)buf + len, 0x55, size - len);
2115 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
2118 push_args(ctx, arg_len);
2119 push_args(ctx, arg_data);
2124 * Parse a MAC address.
2126 * Last argument (ctx->args) is retrieved to determine storage size and
2130 parse_mac_addr(struct context *ctx, const struct token *token,
2131 const char *str, unsigned int len,
2132 void *buf, unsigned int size)
2134 const struct arg *arg = pop_args(ctx);
2135 struct ether_addr tmp;
2139 /* Argument is expected. */
2143 /* Bit-mask fill is not supported. */
2144 if (arg->mask || size != sizeof(tmp))
2146 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
2147 if (ret < 0 || (unsigned int)ret != len)
2151 buf = (uint8_t *)ctx->object + arg->offset;
2152 memcpy(buf, &tmp, size);
2154 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2157 push_args(ctx, arg);
2162 * Parse an IPv4 address.
2164 * Last argument (ctx->args) is retrieved to determine storage size and
2168 parse_ipv4_addr(struct context *ctx, const struct token *token,
2169 const char *str, unsigned int len,
2170 void *buf, unsigned int size)
2172 const struct arg *arg = pop_args(ctx);
2177 /* Argument is expected. */
2181 /* Bit-mask fill is not supported. */
2182 if (arg->mask || size != sizeof(tmp))
2184 /* Only network endian is supported. */
2187 memcpy(str2, str, len);
2189 ret = inet_pton(AF_INET, str2, &tmp);
2191 /* Attempt integer parsing. */
2192 push_args(ctx, arg);
2193 return parse_int(ctx, token, str, len, buf, size);
2197 buf = (uint8_t *)ctx->object + arg->offset;
2198 memcpy(buf, &tmp, size);
2200 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2203 push_args(ctx, arg);
2208 * Parse an IPv6 address.
2210 * Last argument (ctx->args) is retrieved to determine storage size and
2214 parse_ipv6_addr(struct context *ctx, const struct token *token,
2215 const char *str, unsigned int len,
2216 void *buf, unsigned int size)
2218 const struct arg *arg = pop_args(ctx);
2220 struct in6_addr tmp;
2224 /* Argument is expected. */
2228 /* Bit-mask fill is not supported. */
2229 if (arg->mask || size != sizeof(tmp))
2231 /* Only network endian is supported. */
2234 memcpy(str2, str, len);
2236 ret = inet_pton(AF_INET6, str2, &tmp);
2241 buf = (uint8_t *)ctx->object + arg->offset;
2242 memcpy(buf, &tmp, size);
2244 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2247 push_args(ctx, arg);
2251 /** Boolean values (even indices stand for false). */
2252 static const char *const boolean_name[] = {
2261 * Parse a boolean value.
2263 * Last argument (ctx->args) is retrieved to determine storage size and
2267 parse_boolean(struct context *ctx, const struct token *token,
2268 const char *str, unsigned int len,
2269 void *buf, unsigned int size)
2271 const struct arg *arg = pop_args(ctx);
2275 /* Argument is expected. */
2278 for (i = 0; boolean_name[i]; ++i)
2279 if (!strncmp(str, boolean_name[i], len))
2281 /* Process token as integer. */
2282 if (boolean_name[i])
2283 str = i & 1 ? "1" : "0";
2284 push_args(ctx, arg);
2285 ret = parse_int(ctx, token, str, strlen(str), buf, size);
2286 return ret > 0 ? (int)len : ret;
2289 /** Parse port and update context. */
2291 parse_port(struct context *ctx, const struct token *token,
2292 const char *str, unsigned int len,
2293 void *buf, unsigned int size)
2295 struct buffer *out = &(struct buffer){ .port = 0 };
2303 ctx->objmask = NULL;
2304 size = sizeof(*out);
2306 ret = parse_int(ctx, token, str, len, out, size);
2308 ctx->port = out->port;
2314 /** No completion. */
2316 comp_none(struct context *ctx, const struct token *token,
2317 unsigned int ent, char *buf, unsigned int size)
2327 /** Complete boolean values. */
2329 comp_boolean(struct context *ctx, const struct token *token,
2330 unsigned int ent, char *buf, unsigned int size)
2336 for (i = 0; boolean_name[i]; ++i)
2337 if (buf && i == ent)
2338 return snprintf(buf, size, "%s", boolean_name[i]);
2344 /** Complete action names. */
2346 comp_action(struct context *ctx, const struct token *token,
2347 unsigned int ent, char *buf, unsigned int size)
2353 for (i = 0; next_action[i]; ++i)
2354 if (buf && i == ent)
2355 return snprintf(buf, size, "%s",
2356 token_list[next_action[i]].name);
2362 /** Complete available ports. */
2364 comp_port(struct context *ctx, const struct token *token,
2365 unsigned int ent, char *buf, unsigned int size)
2372 FOREACH_PORT(p, ports) {
2373 if (buf && i == ent)
2374 return snprintf(buf, size, "%u", p);
2382 /** Complete available rule IDs. */
2384 comp_rule_id(struct context *ctx, const struct token *token,
2385 unsigned int ent, char *buf, unsigned int size)
2388 struct rte_port *port;
2389 struct port_flow *pf;
2392 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
2393 ctx->port == (uint16_t)RTE_PORT_ALL)
2395 port = &ports[ctx->port];
2396 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
2397 if (buf && i == ent)
2398 return snprintf(buf, size, "%u", pf->id);
2406 /** Complete queue field for RSS action. */
2408 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
2409 unsigned int ent, char *buf, unsigned int size)
2411 static const char *const str[] = { "", "end", NULL };
2416 for (i = 0; str[i] != NULL; ++i)
2417 if (buf && i == ent)
2418 return snprintf(buf, size, "%s", str[i]);
2424 /** Internal context. */
2425 static struct context cmd_flow_context;
2427 /** Global parser instance (cmdline API). */
2428 cmdline_parse_inst_t cmd_flow;
2430 /** Initialize context. */
2432 cmd_flow_context_init(struct context *ctx)
2434 /* A full memset() is not necessary. */
2445 ctx->objmask = NULL;
2448 /** Parse a token (cmdline API). */
2450 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
2453 struct context *ctx = &cmd_flow_context;
2454 const struct token *token;
2455 const enum index *list;
2460 /* Restart as requested. */
2462 cmd_flow_context_init(ctx);
2463 token = &token_list[ctx->curr];
2464 /* Check argument length. */
2467 for (len = 0; src[len]; ++len)
2468 if (src[len] == '#' || isspace(src[len]))
2472 /* Last argument and EOL detection. */
2473 for (i = len; src[i]; ++i)
2474 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
2476 else if (!isspace(src[i])) {
2481 if (src[i] == '\r' || src[i] == '\n') {
2485 /* Initialize context if necessary. */
2486 if (!ctx->next_num) {
2489 ctx->next[ctx->next_num++] = token->next[0];
2491 /* Process argument through candidates. */
2492 ctx->prev = ctx->curr;
2493 list = ctx->next[ctx->next_num - 1];
2494 for (i = 0; list[i]; ++i) {
2495 const struct token *next = &token_list[list[i]];
2498 ctx->curr = list[i];
2500 tmp = next->call(ctx, next, src, len, result, size);
2502 tmp = parse_default(ctx, next, src, len, result, size);
2503 if (tmp == -1 || tmp != len)
2511 /* Push subsequent tokens if any. */
2513 for (i = 0; token->next[i]; ++i) {
2514 if (ctx->next_num == RTE_DIM(ctx->next))
2516 ctx->next[ctx->next_num++] = token->next[i];
2518 /* Push arguments if any. */
2520 for (i = 0; token->args[i]; ++i) {
2521 if (ctx->args_num == RTE_DIM(ctx->args))
2523 ctx->args[ctx->args_num++] = token->args[i];
2528 /** Return number of completion entries (cmdline API). */
2530 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
2532 struct context *ctx = &cmd_flow_context;
2533 const struct token *token = &token_list[ctx->curr];
2534 const enum index *list;
2538 /* Tell cmd_flow_parse() that context must be reinitialized. */
2540 /* Count number of tokens in current list. */
2542 list = ctx->next[ctx->next_num - 1];
2544 list = token->next[0];
2545 for (i = 0; list[i]; ++i)
2550 * If there is a single token, use its completion callback, otherwise
2551 * return the number of entries.
2553 token = &token_list[list[0]];
2554 if (i == 1 && token->comp) {
2555 /* Save index for cmd_flow_get_help(). */
2556 ctx->prev = list[0];
2557 return token->comp(ctx, token, 0, NULL, 0);
2562 /** Return a completion entry (cmdline API). */
2564 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
2565 char *dst, unsigned int size)
2567 struct context *ctx = &cmd_flow_context;
2568 const struct token *token = &token_list[ctx->curr];
2569 const enum index *list;
2573 /* Tell cmd_flow_parse() that context must be reinitialized. */
2575 /* Count number of tokens in current list. */
2577 list = ctx->next[ctx->next_num - 1];
2579 list = token->next[0];
2580 for (i = 0; list[i]; ++i)
2584 /* If there is a single token, use its completion callback. */
2585 token = &token_list[list[0]];
2586 if (i == 1 && token->comp) {
2587 /* Save index for cmd_flow_get_help(). */
2588 ctx->prev = list[0];
2589 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
2591 /* Otherwise make sure the index is valid and use defaults. */
2594 token = &token_list[list[index]];
2595 snprintf(dst, size, "%s", token->name);
2596 /* Save index for cmd_flow_get_help(). */
2597 ctx->prev = list[index];
2601 /** Populate help strings for current token (cmdline API). */
2603 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
2605 struct context *ctx = &cmd_flow_context;
2606 const struct token *token = &token_list[ctx->prev];
2609 /* Tell cmd_flow_parse() that context must be reinitialized. */
2613 /* Set token type and update global help with details. */
2614 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
2616 cmd_flow.help_str = token->help;
2618 cmd_flow.help_str = token->name;
2622 /** Token definition template (cmdline API). */
2623 static struct cmdline_token_hdr cmd_flow_token_hdr = {
2624 .ops = &(struct cmdline_token_ops){
2625 .parse = cmd_flow_parse,
2626 .complete_get_nb = cmd_flow_complete_get_nb,
2627 .complete_get_elt = cmd_flow_complete_get_elt,
2628 .get_help = cmd_flow_get_help,
2633 /** Populate the next dynamic token. */
2635 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
2636 cmdline_parse_token_hdr_t *(*hdrs)[])
2638 struct context *ctx = &cmd_flow_context;
2640 /* Always reinitialize context before requesting the first token. */
2642 cmd_flow_context_init(ctx);
2643 /* Return NULL when no more tokens are expected. */
2644 if (!ctx->next_num && ctx->curr) {
2648 /* Determine if command should end here. */
2649 if (ctx->eol && ctx->last && ctx->next_num) {
2650 const enum index *list = ctx->next[ctx->next_num - 1];
2653 for (i = 0; list[i]; ++i) {
2660 *hdr = &cmd_flow_token_hdr;
2663 /** Dispatch parsed buffer to function calls. */
2665 cmd_flow_parsed(const struct buffer *in)
2667 switch (in->command) {
2669 port_flow_validate(in->port, &in->args.vc.attr,
2670 in->args.vc.pattern, in->args.vc.actions);
2673 port_flow_create(in->port, &in->args.vc.attr,
2674 in->args.vc.pattern, in->args.vc.actions);
2677 port_flow_destroy(in->port, in->args.destroy.rule_n,
2678 in->args.destroy.rule);
2681 port_flow_flush(in->port);
2684 port_flow_query(in->port, in->args.query.rule,
2685 in->args.query.action);
2688 port_flow_list(in->port, in->args.list.group_n,
2689 in->args.list.group);
2696 /** Token generator and output processing callback (cmdline API). */
2698 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
2701 cmd_flow_tok(arg0, arg2);
2703 cmd_flow_parsed(arg0);
2706 /** Global parser instance (cmdline API). */
2707 cmdline_parse_inst_t cmd_flow = {
2709 .data = NULL, /**< Unused. */
2710 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
2713 }, /**< Tokens are returned by cmd_flow_tok(). */