4 * Copyright 2016 6WIND S.A.
5 * Copyright 2016 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <arpa/inet.h>
42 #include <sys/socket.h>
44 #include <rte_common.h>
45 #include <rte_ethdev.h>
46 #include <rte_byteorder.h>
47 #include <cmdline_parse.h>
48 #include <cmdline_parse_etheraddr.h>
53 /** Parser token indices. */
73 /* Top-level command. */
76 /* Sub-level commands. */
84 /* Destroy arguments. */
87 /* Query arguments. */
93 /* Validate/create arguments. */
99 /* Validate/create pattern. */
164 ITEM_E_TAG_GRP_ECID_B,
172 /* Validate/create actions. */
196 /** Size of pattern[] field in struct rte_flow_item_raw. */
197 #define ITEM_RAW_PATTERN_SIZE 36
199 /** Storage size for struct rte_flow_item_raw including pattern. */
200 #define ITEM_RAW_SIZE \
201 (offsetof(struct rte_flow_item_raw, pattern) + ITEM_RAW_PATTERN_SIZE)
203 /** Number of queue[] entries in struct rte_flow_action_rss. */
204 #define ACTION_RSS_NUM 32
206 /** Storage size for struct rte_flow_action_rss including queues. */
207 #define ACTION_RSS_SIZE \
208 (offsetof(struct rte_flow_action_rss, queue) + \
209 sizeof(*((struct rte_flow_action_rss *)0)->queue) * ACTION_RSS_NUM)
211 /** Maximum number of subsequent tokens and arguments on the stack. */
212 #define CTX_STACK_SIZE 16
214 /** Parser context. */
216 /** Stack of subsequent token lists to process. */
217 const enum index *next[CTX_STACK_SIZE];
218 /** Arguments for stacked tokens. */
219 const void *args[CTX_STACK_SIZE];
220 enum index curr; /**< Current token index. */
221 enum index prev; /**< Index of the last token seen. */
222 int next_num; /**< Number of entries in next[]. */
223 int args_num; /**< Number of entries in args[]. */
224 uint32_t reparse:1; /**< Start over from the beginning. */
225 uint32_t eol:1; /**< EOL has been detected. */
226 uint32_t last:1; /**< No more arguments. */
227 uint16_t port; /**< Current port ID (for completions). */
228 uint32_t objdata; /**< Object-specific data. */
229 void *object; /**< Address of current object for relative offsets. */
230 void *objmask; /**< Object a full mask must be written to. */
233 /** Token argument. */
235 uint32_t hton:1; /**< Use network byte ordering. */
236 uint32_t sign:1; /**< Value is signed. */
237 uint32_t offset; /**< Relative offset from ctx->object. */
238 uint32_t size; /**< Field size. */
239 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
242 /** Parser token definition. */
244 /** Type displayed during completion (defaults to "TOKEN"). */
246 /** Help displayed during completion (defaults to token name). */
248 /** Private data used by parser functions. */
251 * Lists of subsequent tokens to push on the stack. Each call to the
252 * parser consumes the last entry of that stack.
254 const enum index *const *next;
255 /** Arguments stack for subsequent tokens that need them. */
256 const struct arg *const *args;
258 * Token-processing callback, returns -1 in case of error, the
259 * length of the matched string otherwise. If NULL, attempts to
260 * match the token name.
262 * If buf is not NULL, the result should be stored in it according
263 * to context. An error is returned if not large enough.
265 int (*call)(struct context *ctx, const struct token *token,
266 const char *str, unsigned int len,
267 void *buf, unsigned int size);
269 * Callback that provides possible values for this token, used for
270 * completion. Returns -1 in case of error, the number of possible
271 * values otherwise. If NULL, the token name is used.
273 * If buf is not NULL, entry index ent is written to buf and the
274 * full length of the entry is returned (same behavior as
277 int (*comp)(struct context *ctx, const struct token *token,
278 unsigned int ent, char *buf, unsigned int size);
279 /** Mandatory token name, no default value. */
283 /** Static initializer for the next field. */
284 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
286 /** Static initializer for a NEXT() entry. */
287 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
289 /** Static initializer for the args field. */
290 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
292 /** Static initializer for ARGS() to target a field. */
293 #define ARGS_ENTRY(s, f) \
294 (&(const struct arg){ \
295 .offset = offsetof(s, f), \
296 .size = sizeof(((s *)0)->f), \
299 /** Static initializer for ARGS() to target a bit-field. */
300 #define ARGS_ENTRY_BF(s, f, b) \
301 (&(const struct arg){ \
303 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
306 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
307 #define ARGS_ENTRY_MASK(s, f, m) \
308 (&(const struct arg){ \
309 .offset = offsetof(s, f), \
310 .size = sizeof(((s *)0)->f), \
311 .mask = (const void *)(m), \
314 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
315 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
316 (&(const struct arg){ \
318 .offset = offsetof(s, f), \
319 .size = sizeof(((s *)0)->f), \
320 .mask = (const void *)(m), \
323 /** Static initializer for ARGS() to target a pointer. */
324 #define ARGS_ENTRY_PTR(s, f) \
325 (&(const struct arg){ \
326 .size = sizeof(*((s *)0)->f), \
329 /** Static initializer for ARGS() with arbitrary size. */
330 #define ARGS_ENTRY_USZ(s, f, sz) \
331 (&(const struct arg){ \
332 .offset = offsetof(s, f), \
336 /** Same as ARGS_ENTRY() using network byte ordering. */
337 #define ARGS_ENTRY_HTON(s, f) \
338 (&(const struct arg){ \
340 .offset = offsetof(s, f), \
341 .size = sizeof(((s *)0)->f), \
344 /** Parser output buffer layout expected by cmd_flow_parsed(). */
346 enum index command; /**< Flow command. */
347 uint16_t port; /**< Affected port ID. */
350 struct rte_flow_attr attr;
351 struct rte_flow_item *pattern;
352 struct rte_flow_action *actions;
356 } vc; /**< Validate/create arguments. */
360 } destroy; /**< Destroy arguments. */
363 enum rte_flow_action_type action;
364 } query; /**< Query arguments. */
368 } list; /**< List arguments. */
369 } args; /**< Command arguments. */
372 /** Private data for pattern items. */
373 struct parse_item_priv {
374 enum rte_flow_item_type type; /**< Item type. */
375 uint32_t size; /**< Size of item specification structure. */
378 #define PRIV_ITEM(t, s) \
379 (&(const struct parse_item_priv){ \
380 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
384 /** Private data for actions. */
385 struct parse_action_priv {
386 enum rte_flow_action_type type; /**< Action type. */
387 uint32_t size; /**< Size of action configuration structure. */
390 #define PRIV_ACTION(t, s) \
391 (&(const struct parse_action_priv){ \
392 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
396 static const enum index next_vc_attr[] = {
405 static const enum index next_destroy_attr[] = {
411 static const enum index next_list_attr[] = {
417 static const enum index item_param[] = {
426 static const enum index next_item[] = {
451 static const enum index item_any[] = {
457 static const enum index item_vf[] = {
463 static const enum index item_port[] = {
469 static const enum index item_raw[] = {
479 static const enum index item_eth[] = {
487 static const enum index item_vlan[] = {
497 static const enum index item_ipv4[] = {
507 static const enum index item_ipv6[] = {
518 static const enum index item_icmp[] = {
525 static const enum index item_udp[] = {
532 static const enum index item_tcp[] = {
540 static const enum index item_sctp[] = {
549 static const enum index item_vxlan[] = {
555 static const enum index item_e_tag[] = {
556 ITEM_E_TAG_GRP_ECID_B,
561 static const enum index item_nvgre[] = {
567 static const enum index item_mpls[] = {
573 static const enum index item_gre[] = {
579 static const enum index next_action[] = {
595 static const enum index action_mark[] = {
601 static const enum index action_queue[] = {
607 static const enum index action_dup[] = {
613 static const enum index action_rss[] = {
619 static const enum index action_vf[] = {
626 static int parse_init(struct context *, const struct token *,
627 const char *, unsigned int,
628 void *, unsigned int);
629 static int parse_vc(struct context *, const struct token *,
630 const char *, unsigned int,
631 void *, unsigned int);
632 static int parse_vc_spec(struct context *, const struct token *,
633 const char *, unsigned int, void *, unsigned int);
634 static int parse_vc_conf(struct context *, const struct token *,
635 const char *, unsigned int, void *, unsigned int);
636 static int parse_vc_action_rss_queue(struct context *, const struct token *,
637 const char *, unsigned int, void *,
639 static int parse_destroy(struct context *, const struct token *,
640 const char *, unsigned int,
641 void *, unsigned int);
642 static int parse_flush(struct context *, const struct token *,
643 const char *, unsigned int,
644 void *, unsigned int);
645 static int parse_query(struct context *, const struct token *,
646 const char *, unsigned int,
647 void *, unsigned int);
648 static int parse_action(struct context *, const struct token *,
649 const char *, unsigned int,
650 void *, unsigned int);
651 static int parse_list(struct context *, const struct token *,
652 const char *, unsigned int,
653 void *, unsigned int);
654 static int parse_int(struct context *, const struct token *,
655 const char *, unsigned int,
656 void *, unsigned int);
657 static int parse_prefix(struct context *, const struct token *,
658 const char *, unsigned int,
659 void *, unsigned int);
660 static int parse_boolean(struct context *, const struct token *,
661 const char *, unsigned int,
662 void *, unsigned int);
663 static int parse_string(struct context *, const struct token *,
664 const char *, unsigned int,
665 void *, unsigned int);
666 static int parse_mac_addr(struct context *, const struct token *,
667 const char *, unsigned int,
668 void *, unsigned int);
669 static int parse_ipv4_addr(struct context *, const struct token *,
670 const char *, unsigned int,
671 void *, unsigned int);
672 static int parse_ipv6_addr(struct context *, const struct token *,
673 const char *, unsigned int,
674 void *, unsigned int);
675 static int parse_port(struct context *, const struct token *,
676 const char *, unsigned int,
677 void *, unsigned int);
678 static int comp_none(struct context *, const struct token *,
679 unsigned int, char *, unsigned int);
680 static int comp_boolean(struct context *, const struct token *,
681 unsigned int, char *, unsigned int);
682 static int comp_action(struct context *, const struct token *,
683 unsigned int, char *, unsigned int);
684 static int comp_port(struct context *, const struct token *,
685 unsigned int, char *, unsigned int);
686 static int comp_rule_id(struct context *, const struct token *,
687 unsigned int, char *, unsigned int);
688 static int comp_vc_action_rss_queue(struct context *, const struct token *,
689 unsigned int, char *, unsigned int);
691 /** Token definitions. */
692 static const struct token token_list[] = {
693 /* Special tokens. */
696 .help = "null entry, abused as the entry point",
697 .next = NEXT(NEXT_ENTRY(FLOW)),
702 .help = "command may end here",
708 .help = "integer value",
713 .name = "{unsigned}",
715 .help = "unsigned integer value",
722 .help = "prefix length for bit-mask",
723 .call = parse_prefix,
729 .help = "any boolean value",
730 .call = parse_boolean,
731 .comp = comp_boolean,
736 .help = "fixed string",
737 .call = parse_string,
741 .name = "{MAC address}",
743 .help = "standard MAC address notation",
744 .call = parse_mac_addr,
748 .name = "{IPv4 address}",
749 .type = "IPV4 ADDRESS",
750 .help = "standard IPv4 address notation",
751 .call = parse_ipv4_addr,
755 .name = "{IPv6 address}",
756 .type = "IPV6 ADDRESS",
757 .help = "standard IPv6 address notation",
758 .call = parse_ipv6_addr,
764 .help = "rule identifier",
766 .comp = comp_rule_id,
771 .help = "port identifier",
776 .name = "{group_id}",
778 .help = "group identifier",
785 .help = "priority level",
789 /* Top-level command. */
792 .type = "{command} {port_id} [{arg} [...]]",
793 .help = "manage ingress/egress flow rules",
794 .next = NEXT(NEXT_ENTRY
803 /* Sub-level commands. */
806 .help = "check whether a flow rule can be created",
807 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
808 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
813 .help = "create a flow rule",
814 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
815 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
820 .help = "destroy specific flow rules",
821 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
822 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
823 .call = parse_destroy,
827 .help = "destroy all flow rules",
828 .next = NEXT(NEXT_ENTRY(PORT_ID)),
829 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
834 .help = "query an existing flow rule",
835 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
837 NEXT_ENTRY(PORT_ID)),
838 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action),
839 ARGS_ENTRY(struct buffer, args.query.rule),
840 ARGS_ENTRY(struct buffer, port)),
845 .help = "list existing flow rules",
846 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
847 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
850 /* Destroy arguments. */
853 .help = "specify a rule identifier",
854 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
855 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
856 .call = parse_destroy,
858 /* Query arguments. */
862 .help = "action to query, must be part of the rule",
863 .call = parse_action,
866 /* List arguments. */
869 .help = "specify a group",
870 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
871 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
874 /* Validate/create attributes. */
877 .help = "specify a group",
878 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
879 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
884 .help = "specify a priority level",
885 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
886 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
891 .help = "affect rule to ingress",
892 .next = NEXT(next_vc_attr),
897 .help = "affect rule to egress",
898 .next = NEXT(next_vc_attr),
901 /* Validate/create pattern. */
904 .help = "submit a list of pattern items",
905 .next = NEXT(next_item),
910 .help = "match value perfectly (with full bit-mask)",
911 .call = parse_vc_spec,
913 [ITEM_PARAM_SPEC] = {
915 .help = "match value according to configured bit-mask",
916 .call = parse_vc_spec,
918 [ITEM_PARAM_LAST] = {
920 .help = "specify upper bound to establish a range",
921 .call = parse_vc_spec,
923 [ITEM_PARAM_MASK] = {
925 .help = "specify bit-mask with relevant bits set to one",
926 .call = parse_vc_spec,
928 [ITEM_PARAM_PREFIX] = {
930 .help = "generate bit-mask from a prefix length",
931 .call = parse_vc_spec,
935 .help = "specify next pattern item",
936 .next = NEXT(next_item),
940 .help = "end list of pattern items",
941 .priv = PRIV_ITEM(END, 0),
942 .next = NEXT(NEXT_ENTRY(ACTIONS)),
947 .help = "no-op pattern item",
948 .priv = PRIV_ITEM(VOID, 0),
949 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
954 .help = "perform actions when pattern does not match",
955 .priv = PRIV_ITEM(INVERT, 0),
956 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
961 .help = "match any protocol for the current layer",
962 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
963 .next = NEXT(item_any),
968 .help = "number of layers covered",
969 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
970 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
974 .help = "match packets addressed to the physical function",
975 .priv = PRIV_ITEM(PF, 0),
976 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
981 .help = "match packets addressed to a virtual function ID",
982 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
983 .next = NEXT(item_vf),
988 .help = "destination VF ID",
989 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
990 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
994 .help = "device-specific physical port index to use",
995 .priv = PRIV_ITEM(PORT, sizeof(struct rte_flow_item_port)),
996 .next = NEXT(item_port),
999 [ITEM_PORT_INDEX] = {
1001 .help = "physical port index",
1002 .next = NEXT(item_port, NEXT_ENTRY(UNSIGNED), item_param),
1003 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port, index)),
1007 .help = "match an arbitrary byte string",
1008 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1009 .next = NEXT(item_raw),
1012 [ITEM_RAW_RELATIVE] = {
1014 .help = "look for pattern after the previous item",
1015 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1016 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1019 [ITEM_RAW_SEARCH] = {
1021 .help = "search pattern from offset (see also limit)",
1022 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1023 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1026 [ITEM_RAW_OFFSET] = {
1028 .help = "absolute or relative offset for pattern",
1029 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1030 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1032 [ITEM_RAW_LIMIT] = {
1034 .help = "search area limit for start of pattern",
1035 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1036 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1038 [ITEM_RAW_PATTERN] = {
1040 .help = "byte string to look for",
1041 .next = NEXT(item_raw,
1043 NEXT_ENTRY(ITEM_PARAM_IS,
1046 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, length),
1047 ARGS_ENTRY_USZ(struct rte_flow_item_raw,
1049 ITEM_RAW_PATTERN_SIZE)),
1053 .help = "match Ethernet header",
1054 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1055 .next = NEXT(item_eth),
1060 .help = "destination MAC",
1061 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1062 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1066 .help = "source MAC",
1067 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1068 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1072 .help = "EtherType",
1073 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1074 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1078 .help = "match 802.1Q/ad VLAN tag",
1079 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1080 .next = NEXT(item_vlan),
1083 [ITEM_VLAN_TPID] = {
1085 .help = "tag protocol identifier",
1086 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1087 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tpid)),
1091 .help = "tag control information",
1092 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1093 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1097 .help = "priority code point",
1098 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1099 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1104 .help = "drop eligible indicator",
1105 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1106 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1111 .help = "VLAN identifier",
1112 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1113 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1118 .help = "match IPv4 header",
1119 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1120 .next = NEXT(item_ipv4),
1125 .help = "type of service",
1126 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1127 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1128 hdr.type_of_service)),
1132 .help = "time to live",
1133 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1134 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1137 [ITEM_IPV4_PROTO] = {
1139 .help = "next protocol ID",
1140 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1141 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1142 hdr.next_proto_id)),
1146 .help = "source address",
1147 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1148 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1153 .help = "destination address",
1154 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1155 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1160 .help = "match IPv6 header",
1161 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1162 .next = NEXT(item_ipv6),
1167 .help = "traffic class",
1168 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1169 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1171 "\x0f\xf0\x00\x00")),
1173 [ITEM_IPV6_FLOW] = {
1175 .help = "flow label",
1176 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1177 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1179 "\x00\x0f\xff\xff")),
1181 [ITEM_IPV6_PROTO] = {
1183 .help = "protocol (next header)",
1184 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1185 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1190 .help = "hop limit",
1191 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1192 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1197 .help = "source address",
1198 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1199 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1204 .help = "destination address",
1205 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1206 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1211 .help = "match ICMP header",
1212 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1213 .next = NEXT(item_icmp),
1216 [ITEM_ICMP_TYPE] = {
1218 .help = "ICMP packet type",
1219 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1220 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1223 [ITEM_ICMP_CODE] = {
1225 .help = "ICMP packet code",
1226 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1227 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1232 .help = "match UDP header",
1233 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1234 .next = NEXT(item_udp),
1239 .help = "UDP source port",
1240 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1241 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1246 .help = "UDP destination port",
1247 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1248 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1253 .help = "match TCP header",
1254 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1255 .next = NEXT(item_tcp),
1260 .help = "TCP source port",
1261 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1262 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1267 .help = "TCP destination port",
1268 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1269 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1272 [ITEM_TCP_FLAGS] = {
1274 .help = "TCP flags",
1275 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1276 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1281 .help = "match SCTP header",
1282 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1283 .next = NEXT(item_sctp),
1288 .help = "SCTP source port",
1289 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1290 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1295 .help = "SCTP destination port",
1296 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1297 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1302 .help = "validation tag",
1303 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1304 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1307 [ITEM_SCTP_CKSUM] = {
1310 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1311 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1316 .help = "match VXLAN header",
1317 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1318 .next = NEXT(item_vxlan),
1321 [ITEM_VXLAN_VNI] = {
1323 .help = "VXLAN identifier",
1324 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1325 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
1329 .help = "match E-Tag header",
1330 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
1331 .next = NEXT(item_e_tag),
1334 [ITEM_E_TAG_GRP_ECID_B] = {
1335 .name = "grp_ecid_b",
1336 .help = "GRP and E-CID base",
1337 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
1338 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
1344 .help = "match NVGRE header",
1345 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
1346 .next = NEXT(item_nvgre),
1349 [ITEM_NVGRE_TNI] = {
1351 .help = "virtual subnet ID",
1352 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
1353 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
1357 .help = "match MPLS header",
1358 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
1359 .next = NEXT(item_mpls),
1362 [ITEM_MPLS_LABEL] = {
1364 .help = "MPLS label",
1365 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
1366 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
1372 .help = "match GRE header",
1373 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
1374 .next = NEXT(item_gre),
1377 [ITEM_GRE_PROTO] = {
1379 .help = "GRE protocol type",
1380 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
1381 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
1384 /* Validate/create actions. */
1387 .help = "submit a list of associated actions",
1388 .next = NEXT(next_action),
1393 .help = "specify next action",
1394 .next = NEXT(next_action),
1398 .help = "end list of actions",
1399 .priv = PRIV_ACTION(END, 0),
1404 .help = "no-op action",
1405 .priv = PRIV_ACTION(VOID, 0),
1406 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1409 [ACTION_PASSTHRU] = {
1411 .help = "let subsequent rule process matched packets",
1412 .priv = PRIV_ACTION(PASSTHRU, 0),
1413 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1418 .help = "attach 32 bit value to packets",
1419 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
1420 .next = NEXT(action_mark),
1423 [ACTION_MARK_ID] = {
1425 .help = "32 bit value to return with packets",
1426 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
1427 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
1428 .call = parse_vc_conf,
1432 .help = "flag packets",
1433 .priv = PRIV_ACTION(FLAG, 0),
1434 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1439 .help = "assign packets to a given queue index",
1440 .priv = PRIV_ACTION(QUEUE,
1441 sizeof(struct rte_flow_action_queue)),
1442 .next = NEXT(action_queue),
1445 [ACTION_QUEUE_INDEX] = {
1447 .help = "queue index to use",
1448 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
1449 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
1450 .call = parse_vc_conf,
1454 .help = "drop packets (note: passthru has priority)",
1455 .priv = PRIV_ACTION(DROP, 0),
1456 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1461 .help = "enable counters for this rule",
1462 .priv = PRIV_ACTION(COUNT, 0),
1463 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1468 .help = "duplicate packets to a given queue index",
1469 .priv = PRIV_ACTION(DUP, sizeof(struct rte_flow_action_dup)),
1470 .next = NEXT(action_dup),
1473 [ACTION_DUP_INDEX] = {
1475 .help = "queue index to duplicate packets to",
1476 .next = NEXT(action_dup, NEXT_ENTRY(UNSIGNED)),
1477 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_dup, index)),
1478 .call = parse_vc_conf,
1482 .help = "spread packets among several queues",
1483 .priv = PRIV_ACTION(RSS, ACTION_RSS_SIZE),
1484 .next = NEXT(action_rss),
1487 [ACTION_RSS_QUEUES] = {
1489 .help = "queue indices to use",
1490 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
1491 .call = parse_vc_conf,
1493 [ACTION_RSS_QUEUE] = {
1495 .help = "queue index",
1496 .call = parse_vc_action_rss_queue,
1497 .comp = comp_vc_action_rss_queue,
1501 .help = "redirect packets to physical device function",
1502 .priv = PRIV_ACTION(PF, 0),
1503 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1508 .help = "redirect packets to virtual device function",
1509 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
1510 .next = NEXT(action_vf),
1513 [ACTION_VF_ORIGINAL] = {
1515 .help = "use original VF ID if possible",
1516 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
1517 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
1519 .call = parse_vc_conf,
1523 .help = "VF ID to redirect packets to",
1524 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
1525 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
1526 .call = parse_vc_conf,
1530 /** Remove and return last entry from argument stack. */
1531 static const struct arg *
1532 pop_args(struct context *ctx)
1534 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
1537 /** Add entry on top of the argument stack. */
1539 push_args(struct context *ctx, const struct arg *arg)
1541 if (ctx->args_num == CTX_STACK_SIZE)
1543 ctx->args[ctx->args_num++] = arg;
1547 /** Spread value into buffer according to bit-mask. */
1549 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
1551 uint32_t i = arg->size;
1559 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1568 unsigned int shift = 0;
1569 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
1571 for (shift = 0; arg->mask[i] >> shift; ++shift) {
1572 if (!(arg->mask[i] & (1 << shift)))
1577 *buf &= ~(1 << shift);
1578 *buf |= (val & 1) << shift;
1587 * Parse a prefix length and generate a bit-mask.
1589 * Last argument (ctx->args) is retrieved to determine mask size, storage
1590 * location and whether the result must use network byte ordering.
1593 parse_prefix(struct context *ctx, const struct token *token,
1594 const char *str, unsigned int len,
1595 void *buf, unsigned int size)
1597 const struct arg *arg = pop_args(ctx);
1598 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
1605 /* Argument is expected. */
1609 u = strtoumax(str, &end, 0);
1610 if (errno || (size_t)(end - str) != len)
1615 extra = arg_entry_bf_fill(NULL, 0, arg);
1624 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
1625 !arg_entry_bf_fill(ctx->objmask, -1, arg))
1632 if (bytes > size || bytes + !!extra > size)
1636 buf = (uint8_t *)ctx->object + arg->offset;
1637 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1639 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
1640 memset(buf, 0x00, size - bytes);
1642 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
1646 memset(buf, 0xff, bytes);
1647 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
1649 ((uint8_t *)buf)[bytes] = conv[extra];
1652 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
1655 push_args(ctx, arg);
1659 /** Default parsing function for token name matching. */
1661 parse_default(struct context *ctx, const struct token *token,
1662 const char *str, unsigned int len,
1663 void *buf, unsigned int size)
1668 if (strncmp(str, token->name, len))
1673 /** Parse flow command, initialize output buffer for subsequent tokens. */
1675 parse_init(struct context *ctx, const struct token *token,
1676 const char *str, unsigned int len,
1677 void *buf, unsigned int size)
1679 struct buffer *out = buf;
1681 /* Token name must match. */
1682 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1684 /* Nothing else to do if there is no buffer. */
1687 /* Make sure buffer is large enough. */
1688 if (size < sizeof(*out))
1690 /* Initialize buffer. */
1691 memset(out, 0x00, sizeof(*out));
1692 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
1695 ctx->objmask = NULL;
1699 /** Parse tokens for validate/create commands. */
1701 parse_vc(struct context *ctx, const struct token *token,
1702 const char *str, unsigned int len,
1703 void *buf, unsigned int size)
1705 struct buffer *out = buf;
1709 /* Token name must match. */
1710 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1712 /* Nothing else to do if there is no buffer. */
1715 if (!out->command) {
1716 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
1718 if (sizeof(*out) > size)
1720 out->command = ctx->curr;
1723 ctx->objmask = NULL;
1724 out->args.vc.data = (uint8_t *)out + size;
1728 ctx->object = &out->args.vc.attr;
1729 ctx->objmask = NULL;
1730 switch (ctx->curr) {
1735 out->args.vc.attr.ingress = 1;
1738 out->args.vc.attr.egress = 1;
1741 out->args.vc.pattern =
1742 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1744 ctx->object = out->args.vc.pattern;
1745 ctx->objmask = NULL;
1748 out->args.vc.actions =
1749 (void *)RTE_ALIGN_CEIL((uintptr_t)
1750 (out->args.vc.pattern +
1751 out->args.vc.pattern_n),
1753 ctx->object = out->args.vc.actions;
1754 ctx->objmask = NULL;
1761 if (!out->args.vc.actions) {
1762 const struct parse_item_priv *priv = token->priv;
1763 struct rte_flow_item *item =
1764 out->args.vc.pattern + out->args.vc.pattern_n;
1766 data_size = priv->size * 3; /* spec, last, mask */
1767 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1768 (out->args.vc.data - data_size),
1770 if ((uint8_t *)item + sizeof(*item) > data)
1772 *item = (struct rte_flow_item){
1775 ++out->args.vc.pattern_n;
1777 ctx->objmask = NULL;
1779 const struct parse_action_priv *priv = token->priv;
1780 struct rte_flow_action *action =
1781 out->args.vc.actions + out->args.vc.actions_n;
1783 data_size = priv->size; /* configuration */
1784 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1785 (out->args.vc.data - data_size),
1787 if ((uint8_t *)action + sizeof(*action) > data)
1789 *action = (struct rte_flow_action){
1792 ++out->args.vc.actions_n;
1793 ctx->object = action;
1794 ctx->objmask = NULL;
1796 memset(data, 0, data_size);
1797 out->args.vc.data = data;
1798 ctx->objdata = data_size;
1802 /** Parse pattern item parameter type. */
1804 parse_vc_spec(struct context *ctx, const struct token *token,
1805 const char *str, unsigned int len,
1806 void *buf, unsigned int size)
1808 struct buffer *out = buf;
1809 struct rte_flow_item *item;
1815 /* Token name must match. */
1816 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1818 /* Parse parameter types. */
1819 switch (ctx->curr) {
1820 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
1826 case ITEM_PARAM_SPEC:
1829 case ITEM_PARAM_LAST:
1832 case ITEM_PARAM_PREFIX:
1833 /* Modify next token to expect a prefix. */
1834 if (ctx->next_num < 2)
1836 ctx->next[ctx->next_num - 2] = prefix;
1838 case ITEM_PARAM_MASK:
1844 /* Nothing else to do if there is no buffer. */
1847 if (!out->args.vc.pattern_n)
1849 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
1850 data_size = ctx->objdata / 3; /* spec, last, mask */
1851 /* Point to selected object. */
1852 ctx->object = out->args.vc.data + (data_size * index);
1854 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
1855 item->mask = ctx->objmask;
1857 ctx->objmask = NULL;
1858 /* Update relevant item pointer. */
1859 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
1864 /** Parse action configuration field. */
1866 parse_vc_conf(struct context *ctx, const struct token *token,
1867 const char *str, unsigned int len,
1868 void *buf, unsigned int size)
1870 struct buffer *out = buf;
1871 struct rte_flow_action *action;
1874 /* Token name must match. */
1875 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1877 /* Nothing else to do if there is no buffer. */
1880 if (!out->args.vc.actions_n)
1882 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
1883 /* Point to selected object. */
1884 ctx->object = out->args.vc.data;
1885 ctx->objmask = NULL;
1886 /* Update configuration pointer. */
1887 action->conf = ctx->object;
1892 * Parse queue field for RSS action.
1894 * Valid tokens are queue indices and the "end" token.
1897 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
1898 const char *str, unsigned int len,
1899 void *buf, unsigned int size)
1901 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
1908 if (ctx->curr != ACTION_RSS_QUEUE)
1910 i = ctx->objdata >> 16;
1911 if (!strncmp(str, "end", len)) {
1912 ctx->objdata &= 0xffff;
1915 if (i >= ACTION_RSS_NUM)
1917 if (push_args(ctx, ARGS_ENTRY(struct rte_flow_action_rss, queue[i])))
1919 ret = parse_int(ctx, token, str, len, NULL, 0);
1925 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
1927 if (ctx->next_num == RTE_DIM(ctx->next))
1929 ctx->next[ctx->next_num++] = next;
1932 ((struct rte_flow_action_rss *)ctx->object)->num = i;
1936 /** Parse tokens for destroy command. */
1938 parse_destroy(struct context *ctx, const struct token *token,
1939 const char *str, unsigned int len,
1940 void *buf, unsigned int size)
1942 struct buffer *out = buf;
1944 /* Token name must match. */
1945 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1947 /* Nothing else to do if there is no buffer. */
1950 if (!out->command) {
1951 if (ctx->curr != DESTROY)
1953 if (sizeof(*out) > size)
1955 out->command = ctx->curr;
1958 ctx->objmask = NULL;
1959 out->args.destroy.rule =
1960 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1964 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
1965 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
1968 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
1969 ctx->objmask = NULL;
1973 /** Parse tokens for flush command. */
1975 parse_flush(struct context *ctx, const struct token *token,
1976 const char *str, unsigned int len,
1977 void *buf, unsigned int size)
1979 struct buffer *out = buf;
1981 /* Token name must match. */
1982 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1984 /* Nothing else to do if there is no buffer. */
1987 if (!out->command) {
1988 if (ctx->curr != FLUSH)
1990 if (sizeof(*out) > size)
1992 out->command = ctx->curr;
1995 ctx->objmask = NULL;
2000 /** Parse tokens for query command. */
2002 parse_query(struct context *ctx, const struct token *token,
2003 const char *str, unsigned int len,
2004 void *buf, unsigned int size)
2006 struct buffer *out = buf;
2008 /* Token name must match. */
2009 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2011 /* Nothing else to do if there is no buffer. */
2014 if (!out->command) {
2015 if (ctx->curr != QUERY)
2017 if (sizeof(*out) > size)
2019 out->command = ctx->curr;
2022 ctx->objmask = NULL;
2027 /** Parse action names. */
2029 parse_action(struct context *ctx, const struct token *token,
2030 const char *str, unsigned int len,
2031 void *buf, unsigned int size)
2033 struct buffer *out = buf;
2034 const struct arg *arg = pop_args(ctx);
2038 /* Argument is expected. */
2041 /* Parse action name. */
2042 for (i = 0; next_action[i]; ++i) {
2043 const struct parse_action_priv *priv;
2045 token = &token_list[next_action[i]];
2046 if (strncmp(token->name, str, len))
2052 memcpy((uint8_t *)ctx->object + arg->offset,
2058 push_args(ctx, arg);
2062 /** Parse tokens for list command. */
2064 parse_list(struct context *ctx, const struct token *token,
2065 const char *str, unsigned int len,
2066 void *buf, unsigned int size)
2068 struct buffer *out = buf;
2070 /* Token name must match. */
2071 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2073 /* Nothing else to do if there is no buffer. */
2076 if (!out->command) {
2077 if (ctx->curr != LIST)
2079 if (sizeof(*out) > size)
2081 out->command = ctx->curr;
2084 ctx->objmask = NULL;
2085 out->args.list.group =
2086 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2090 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
2091 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
2094 ctx->object = out->args.list.group + out->args.list.group_n++;
2095 ctx->objmask = NULL;
2100 * Parse signed/unsigned integers 8 to 64-bit long.
2102 * Last argument (ctx->args) is retrieved to determine integer type and
2106 parse_int(struct context *ctx, const struct token *token,
2107 const char *str, unsigned int len,
2108 void *buf, unsigned int size)
2110 const struct arg *arg = pop_args(ctx);
2115 /* Argument is expected. */
2120 (uintmax_t)strtoimax(str, &end, 0) :
2121 strtoumax(str, &end, 0);
2122 if (errno || (size_t)(end - str) != len)
2127 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
2128 !arg_entry_bf_fill(ctx->objmask, -1, arg))
2132 buf = (uint8_t *)ctx->object + arg->offset;
2136 case sizeof(uint8_t):
2137 *(uint8_t *)buf = u;
2139 case sizeof(uint16_t):
2140 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
2142 case sizeof(uint8_t [3]):
2143 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2145 ((uint8_t *)buf)[0] = u;
2146 ((uint8_t *)buf)[1] = u >> 8;
2147 ((uint8_t *)buf)[2] = u >> 16;
2151 ((uint8_t *)buf)[0] = u >> 16;
2152 ((uint8_t *)buf)[1] = u >> 8;
2153 ((uint8_t *)buf)[2] = u;
2155 case sizeof(uint32_t):
2156 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
2158 case sizeof(uint64_t):
2159 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
2164 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
2166 buf = (uint8_t *)ctx->objmask + arg->offset;
2171 push_args(ctx, arg);
2178 * Two arguments (ctx->args) are retrieved from the stack to store data and
2179 * its length (in that order).
2182 parse_string(struct context *ctx, const struct token *token,
2183 const char *str, unsigned int len,
2184 void *buf, unsigned int size)
2186 const struct arg *arg_data = pop_args(ctx);
2187 const struct arg *arg_len = pop_args(ctx);
2188 char tmp[16]; /* Ought to be enough. */
2191 /* Arguments are expected. */
2195 push_args(ctx, arg_data);
2198 size = arg_data->size;
2199 /* Bit-mask fill is not supported. */
2200 if (arg_data->mask || size < len)
2204 /* Let parse_int() fill length information first. */
2205 ret = snprintf(tmp, sizeof(tmp), "%u", len);
2208 push_args(ctx, arg_len);
2209 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
2214 buf = (uint8_t *)ctx->object + arg_data->offset;
2215 /* Output buffer is not necessarily NUL-terminated. */
2216 memcpy(buf, str, len);
2217 memset((uint8_t *)buf + len, 0x55, size - len);
2219 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
2222 push_args(ctx, arg_len);
2223 push_args(ctx, arg_data);
2228 * Parse a MAC address.
2230 * Last argument (ctx->args) is retrieved to determine storage size and
2234 parse_mac_addr(struct context *ctx, const struct token *token,
2235 const char *str, unsigned int len,
2236 void *buf, unsigned int size)
2238 const struct arg *arg = pop_args(ctx);
2239 struct ether_addr tmp;
2243 /* Argument is expected. */
2247 /* Bit-mask fill is not supported. */
2248 if (arg->mask || size != sizeof(tmp))
2250 /* Only network endian is supported. */
2253 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
2254 if (ret < 0 || (unsigned int)ret != len)
2258 buf = (uint8_t *)ctx->object + arg->offset;
2259 memcpy(buf, &tmp, size);
2261 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2264 push_args(ctx, arg);
2269 * Parse an IPv4 address.
2271 * Last argument (ctx->args) is retrieved to determine storage size and
2275 parse_ipv4_addr(struct context *ctx, const struct token *token,
2276 const char *str, unsigned int len,
2277 void *buf, unsigned int size)
2279 const struct arg *arg = pop_args(ctx);
2284 /* Argument is expected. */
2288 /* Bit-mask fill is not supported. */
2289 if (arg->mask || size != sizeof(tmp))
2291 /* Only network endian is supported. */
2294 memcpy(str2, str, len);
2296 ret = inet_pton(AF_INET, str2, &tmp);
2298 /* Attempt integer parsing. */
2299 push_args(ctx, arg);
2300 return parse_int(ctx, token, str, len, buf, size);
2304 buf = (uint8_t *)ctx->object + arg->offset;
2305 memcpy(buf, &tmp, size);
2307 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2310 push_args(ctx, arg);
2315 * Parse an IPv6 address.
2317 * Last argument (ctx->args) is retrieved to determine storage size and
2321 parse_ipv6_addr(struct context *ctx, const struct token *token,
2322 const char *str, unsigned int len,
2323 void *buf, unsigned int size)
2325 const struct arg *arg = pop_args(ctx);
2327 struct in6_addr tmp;
2331 /* Argument is expected. */
2335 /* Bit-mask fill is not supported. */
2336 if (arg->mask || size != sizeof(tmp))
2338 /* Only network endian is supported. */
2341 memcpy(str2, str, len);
2343 ret = inet_pton(AF_INET6, str2, &tmp);
2348 buf = (uint8_t *)ctx->object + arg->offset;
2349 memcpy(buf, &tmp, size);
2351 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2354 push_args(ctx, arg);
2358 /** Boolean values (even indices stand for false). */
2359 static const char *const boolean_name[] = {
2368 * Parse a boolean value.
2370 * Last argument (ctx->args) is retrieved to determine storage size and
2374 parse_boolean(struct context *ctx, const struct token *token,
2375 const char *str, unsigned int len,
2376 void *buf, unsigned int size)
2378 const struct arg *arg = pop_args(ctx);
2382 /* Argument is expected. */
2385 for (i = 0; boolean_name[i]; ++i)
2386 if (!strncmp(str, boolean_name[i], len))
2388 /* Process token as integer. */
2389 if (boolean_name[i])
2390 str = i & 1 ? "1" : "0";
2391 push_args(ctx, arg);
2392 ret = parse_int(ctx, token, str, strlen(str), buf, size);
2393 return ret > 0 ? (int)len : ret;
2396 /** Parse port and update context. */
2398 parse_port(struct context *ctx, const struct token *token,
2399 const char *str, unsigned int len,
2400 void *buf, unsigned int size)
2402 struct buffer *out = &(struct buffer){ .port = 0 };
2410 ctx->objmask = NULL;
2411 size = sizeof(*out);
2413 ret = parse_int(ctx, token, str, len, out, size);
2415 ctx->port = out->port;
2421 /** No completion. */
2423 comp_none(struct context *ctx, const struct token *token,
2424 unsigned int ent, char *buf, unsigned int size)
2434 /** Complete boolean values. */
2436 comp_boolean(struct context *ctx, const struct token *token,
2437 unsigned int ent, char *buf, unsigned int size)
2443 for (i = 0; boolean_name[i]; ++i)
2444 if (buf && i == ent)
2445 return snprintf(buf, size, "%s", boolean_name[i]);
2451 /** Complete action names. */
2453 comp_action(struct context *ctx, const struct token *token,
2454 unsigned int ent, char *buf, unsigned int size)
2460 for (i = 0; next_action[i]; ++i)
2461 if (buf && i == ent)
2462 return snprintf(buf, size, "%s",
2463 token_list[next_action[i]].name);
2469 /** Complete available ports. */
2471 comp_port(struct context *ctx, const struct token *token,
2472 unsigned int ent, char *buf, unsigned int size)
2479 RTE_ETH_FOREACH_DEV(p) {
2480 if (buf && i == ent)
2481 return snprintf(buf, size, "%u", p);
2489 /** Complete available rule IDs. */
2491 comp_rule_id(struct context *ctx, const struct token *token,
2492 unsigned int ent, char *buf, unsigned int size)
2495 struct rte_port *port;
2496 struct port_flow *pf;
2499 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
2500 ctx->port == (uint16_t)RTE_PORT_ALL)
2502 port = &ports[ctx->port];
2503 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
2504 if (buf && i == ent)
2505 return snprintf(buf, size, "%u", pf->id);
2513 /** Complete queue field for RSS action. */
2515 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
2516 unsigned int ent, char *buf, unsigned int size)
2518 static const char *const str[] = { "", "end", NULL };
2523 for (i = 0; str[i] != NULL; ++i)
2524 if (buf && i == ent)
2525 return snprintf(buf, size, "%s", str[i]);
2531 /** Internal context. */
2532 static struct context cmd_flow_context;
2534 /** Global parser instance (cmdline API). */
2535 cmdline_parse_inst_t cmd_flow;
2537 /** Initialize context. */
2539 cmd_flow_context_init(struct context *ctx)
2541 /* A full memset() is not necessary. */
2552 ctx->objmask = NULL;
2555 /** Parse a token (cmdline API). */
2557 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
2560 struct context *ctx = &cmd_flow_context;
2561 const struct token *token;
2562 const enum index *list;
2567 /* Restart as requested. */
2569 cmd_flow_context_init(ctx);
2570 token = &token_list[ctx->curr];
2571 /* Check argument length. */
2574 for (len = 0; src[len]; ++len)
2575 if (src[len] == '#' || isspace(src[len]))
2579 /* Last argument and EOL detection. */
2580 for (i = len; src[i]; ++i)
2581 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
2583 else if (!isspace(src[i])) {
2588 if (src[i] == '\r' || src[i] == '\n') {
2592 /* Initialize context if necessary. */
2593 if (!ctx->next_num) {
2596 ctx->next[ctx->next_num++] = token->next[0];
2598 /* Process argument through candidates. */
2599 ctx->prev = ctx->curr;
2600 list = ctx->next[ctx->next_num - 1];
2601 for (i = 0; list[i]; ++i) {
2602 const struct token *next = &token_list[list[i]];
2605 ctx->curr = list[i];
2607 tmp = next->call(ctx, next, src, len, result, size);
2609 tmp = parse_default(ctx, next, src, len, result, size);
2610 if (tmp == -1 || tmp != len)
2618 /* Push subsequent tokens if any. */
2620 for (i = 0; token->next[i]; ++i) {
2621 if (ctx->next_num == RTE_DIM(ctx->next))
2623 ctx->next[ctx->next_num++] = token->next[i];
2625 /* Push arguments if any. */
2627 for (i = 0; token->args[i]; ++i) {
2628 if (ctx->args_num == RTE_DIM(ctx->args))
2630 ctx->args[ctx->args_num++] = token->args[i];
2635 /** Return number of completion entries (cmdline API). */
2637 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
2639 struct context *ctx = &cmd_flow_context;
2640 const struct token *token = &token_list[ctx->curr];
2641 const enum index *list;
2645 /* Tell cmd_flow_parse() that context must be reinitialized. */
2647 /* Count number of tokens in current list. */
2649 list = ctx->next[ctx->next_num - 1];
2651 list = token->next[0];
2652 for (i = 0; list[i]; ++i)
2657 * If there is a single token, use its completion callback, otherwise
2658 * return the number of entries.
2660 token = &token_list[list[0]];
2661 if (i == 1 && token->comp) {
2662 /* Save index for cmd_flow_get_help(). */
2663 ctx->prev = list[0];
2664 return token->comp(ctx, token, 0, NULL, 0);
2669 /** Return a completion entry (cmdline API). */
2671 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
2672 char *dst, unsigned int size)
2674 struct context *ctx = &cmd_flow_context;
2675 const struct token *token = &token_list[ctx->curr];
2676 const enum index *list;
2680 /* Tell cmd_flow_parse() that context must be reinitialized. */
2682 /* Count number of tokens in current list. */
2684 list = ctx->next[ctx->next_num - 1];
2686 list = token->next[0];
2687 for (i = 0; list[i]; ++i)
2691 /* If there is a single token, use its completion callback. */
2692 token = &token_list[list[0]];
2693 if (i == 1 && token->comp) {
2694 /* Save index for cmd_flow_get_help(). */
2695 ctx->prev = list[0];
2696 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
2698 /* Otherwise make sure the index is valid and use defaults. */
2701 token = &token_list[list[index]];
2702 snprintf(dst, size, "%s", token->name);
2703 /* Save index for cmd_flow_get_help(). */
2704 ctx->prev = list[index];
2708 /** Populate help strings for current token (cmdline API). */
2710 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
2712 struct context *ctx = &cmd_flow_context;
2713 const struct token *token = &token_list[ctx->prev];
2716 /* Tell cmd_flow_parse() that context must be reinitialized. */
2720 /* Set token type and update global help with details. */
2721 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
2723 cmd_flow.help_str = token->help;
2725 cmd_flow.help_str = token->name;
2729 /** Token definition template (cmdline API). */
2730 static struct cmdline_token_hdr cmd_flow_token_hdr = {
2731 .ops = &(struct cmdline_token_ops){
2732 .parse = cmd_flow_parse,
2733 .complete_get_nb = cmd_flow_complete_get_nb,
2734 .complete_get_elt = cmd_flow_complete_get_elt,
2735 .get_help = cmd_flow_get_help,
2740 /** Populate the next dynamic token. */
2742 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
2743 cmdline_parse_token_hdr_t *(*hdrs)[])
2745 struct context *ctx = &cmd_flow_context;
2747 /* Always reinitialize context before requesting the first token. */
2749 cmd_flow_context_init(ctx);
2750 /* Return NULL when no more tokens are expected. */
2751 if (!ctx->next_num && ctx->curr) {
2755 /* Determine if command should end here. */
2756 if (ctx->eol && ctx->last && ctx->next_num) {
2757 const enum index *list = ctx->next[ctx->next_num - 1];
2760 for (i = 0; list[i]; ++i) {
2767 *hdr = &cmd_flow_token_hdr;
2770 /** Dispatch parsed buffer to function calls. */
2772 cmd_flow_parsed(const struct buffer *in)
2774 switch (in->command) {
2776 port_flow_validate(in->port, &in->args.vc.attr,
2777 in->args.vc.pattern, in->args.vc.actions);
2780 port_flow_create(in->port, &in->args.vc.attr,
2781 in->args.vc.pattern, in->args.vc.actions);
2784 port_flow_destroy(in->port, in->args.destroy.rule_n,
2785 in->args.destroy.rule);
2788 port_flow_flush(in->port);
2791 port_flow_query(in->port, in->args.query.rule,
2792 in->args.query.action);
2795 port_flow_list(in->port, in->args.list.group_n,
2796 in->args.list.group);
2803 /** Token generator and output processing callback (cmdline API). */
2805 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
2808 cmd_flow_tok(arg0, arg2);
2810 cmd_flow_parsed(arg0);
2813 /** Global parser instance (cmdline API). */
2814 cmdline_parse_inst_t cmd_flow = {
2816 .data = NULL, /**< Unused. */
2817 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
2820 }, /**< Tokens are returned by cmd_flow_tok(). */