4 * Copyright 2016 6WIND S.A.
5 * Copyright 2016 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <arpa/inet.h>
42 #include <sys/socket.h>
44 #include <rte_common.h>
45 #include <rte_ethdev.h>
46 #include <rte_byteorder.h>
47 #include <cmdline_parse.h>
48 #include <cmdline_parse_etheraddr.h>
53 /** Parser token indices. */
73 /* Top-level command. */
76 /* Sub-level commands. */
84 /* Destroy arguments. */
87 /* Query arguments. */
93 /* Validate/create arguments. */
99 /* Validate/create pattern. */
163 ITEM_E_TAG_GRP_ECID_B,
171 /* Validate/create actions. */
195 /** Size of pattern[] field in struct rte_flow_item_raw. */
196 #define ITEM_RAW_PATTERN_SIZE 36
198 /** Storage size for struct rte_flow_item_raw including pattern. */
199 #define ITEM_RAW_SIZE \
200 (offsetof(struct rte_flow_item_raw, pattern) + ITEM_RAW_PATTERN_SIZE)
202 /** Number of queue[] entries in struct rte_flow_action_rss. */
203 #define ACTION_RSS_NUM 32
205 /** Storage size for struct rte_flow_action_rss including queues. */
206 #define ACTION_RSS_SIZE \
207 (offsetof(struct rte_flow_action_rss, queue) + \
208 sizeof(*((struct rte_flow_action_rss *)0)->queue) * ACTION_RSS_NUM)
210 /** Maximum number of subsequent tokens and arguments on the stack. */
211 #define CTX_STACK_SIZE 16
213 /** Parser context. */
215 /** Stack of subsequent token lists to process. */
216 const enum index *next[CTX_STACK_SIZE];
217 /** Arguments for stacked tokens. */
218 const void *args[CTX_STACK_SIZE];
219 enum index curr; /**< Current token index. */
220 enum index prev; /**< Index of the last token seen. */
221 int next_num; /**< Number of entries in next[]. */
222 int args_num; /**< Number of entries in args[]. */
223 uint32_t reparse:1; /**< Start over from the beginning. */
224 uint32_t eol:1; /**< EOL has been detected. */
225 uint32_t last:1; /**< No more arguments. */
226 uint16_t port; /**< Current port ID (for completions). */
227 uint32_t objdata; /**< Object-specific data. */
228 void *object; /**< Address of current object for relative offsets. */
229 void *objmask; /**< Object a full mask must be written to. */
232 /** Token argument. */
234 uint32_t hton:1; /**< Use network byte ordering. */
235 uint32_t sign:1; /**< Value is signed. */
236 uint32_t offset; /**< Relative offset from ctx->object. */
237 uint32_t size; /**< Field size. */
238 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
241 /** Parser token definition. */
243 /** Type displayed during completion (defaults to "TOKEN"). */
245 /** Help displayed during completion (defaults to token name). */
247 /** Private data used by parser functions. */
250 * Lists of subsequent tokens to push on the stack. Each call to the
251 * parser consumes the last entry of that stack.
253 const enum index *const *next;
254 /** Arguments stack for subsequent tokens that need them. */
255 const struct arg *const *args;
257 * Token-processing callback, returns -1 in case of error, the
258 * length of the matched string otherwise. If NULL, attempts to
259 * match the token name.
261 * If buf is not NULL, the result should be stored in it according
262 * to context. An error is returned if not large enough.
264 int (*call)(struct context *ctx, const struct token *token,
265 const char *str, unsigned int len,
266 void *buf, unsigned int size);
268 * Callback that provides possible values for this token, used for
269 * completion. Returns -1 in case of error, the number of possible
270 * values otherwise. If NULL, the token name is used.
272 * If buf is not NULL, entry index ent is written to buf and the
273 * full length of the entry is returned (same behavior as
276 int (*comp)(struct context *ctx, const struct token *token,
277 unsigned int ent, char *buf, unsigned int size);
278 /** Mandatory token name, no default value. */
282 /** Static initializer for the next field. */
283 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
285 /** Static initializer for a NEXT() entry. */
286 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
288 /** Static initializer for the args field. */
289 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
291 /** Static initializer for ARGS() to target a field. */
292 #define ARGS_ENTRY(s, f) \
293 (&(const struct arg){ \
294 .offset = offsetof(s, f), \
295 .size = sizeof(((s *)0)->f), \
298 /** Static initializer for ARGS() to target a bit-field. */
299 #define ARGS_ENTRY_BF(s, f, b) \
300 (&(const struct arg){ \
302 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
305 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
306 #define ARGS_ENTRY_MASK(s, f, m) \
307 (&(const struct arg){ \
308 .offset = offsetof(s, f), \
309 .size = sizeof(((s *)0)->f), \
310 .mask = (const void *)(m), \
313 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
314 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
315 (&(const struct arg){ \
317 .offset = offsetof(s, f), \
318 .size = sizeof(((s *)0)->f), \
319 .mask = (const void *)(m), \
322 /** Static initializer for ARGS() to target a pointer. */
323 #define ARGS_ENTRY_PTR(s, f) \
324 (&(const struct arg){ \
325 .size = sizeof(*((s *)0)->f), \
328 /** Static initializer for ARGS() with arbitrary size. */
329 #define ARGS_ENTRY_USZ(s, f, sz) \
330 (&(const struct arg){ \
331 .offset = offsetof(s, f), \
335 /** Same as ARGS_ENTRY() using network byte ordering. */
336 #define ARGS_ENTRY_HTON(s, f) \
337 (&(const struct arg){ \
339 .offset = offsetof(s, f), \
340 .size = sizeof(((s *)0)->f), \
343 /** Parser output buffer layout expected by cmd_flow_parsed(). */
345 enum index command; /**< Flow command. */
346 uint16_t port; /**< Affected port ID. */
349 struct rte_flow_attr attr;
350 struct rte_flow_item *pattern;
351 struct rte_flow_action *actions;
355 } vc; /**< Validate/create arguments. */
359 } destroy; /**< Destroy arguments. */
362 enum rte_flow_action_type action;
363 } query; /**< Query arguments. */
367 } list; /**< List arguments. */
368 } args; /**< Command arguments. */
371 /** Private data for pattern items. */
372 struct parse_item_priv {
373 enum rte_flow_item_type type; /**< Item type. */
374 uint32_t size; /**< Size of item specification structure. */
377 #define PRIV_ITEM(t, s) \
378 (&(const struct parse_item_priv){ \
379 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
383 /** Private data for actions. */
384 struct parse_action_priv {
385 enum rte_flow_action_type type; /**< Action type. */
386 uint32_t size; /**< Size of action configuration structure. */
389 #define PRIV_ACTION(t, s) \
390 (&(const struct parse_action_priv){ \
391 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
395 static const enum index next_vc_attr[] = {
404 static const enum index next_destroy_attr[] = {
410 static const enum index next_list_attr[] = {
416 static const enum index item_param[] = {
425 static const enum index next_item[] = {
450 static const enum index item_any[] = {
456 static const enum index item_vf[] = {
462 static const enum index item_port[] = {
468 static const enum index item_raw[] = {
478 static const enum index item_eth[] = {
486 static const enum index item_vlan[] = {
496 static const enum index item_ipv4[] = {
506 static const enum index item_ipv6[] = {
517 static const enum index item_icmp[] = {
524 static const enum index item_udp[] = {
531 static const enum index item_tcp[] = {
538 static const enum index item_sctp[] = {
547 static const enum index item_vxlan[] = {
553 static const enum index item_e_tag[] = {
554 ITEM_E_TAG_GRP_ECID_B,
559 static const enum index item_nvgre[] = {
565 static const enum index item_mpls[] = {
571 static const enum index item_gre[] = {
577 static const enum index next_action[] = {
593 static const enum index action_mark[] = {
599 static const enum index action_queue[] = {
605 static const enum index action_dup[] = {
611 static const enum index action_rss[] = {
617 static const enum index action_vf[] = {
624 static int parse_init(struct context *, const struct token *,
625 const char *, unsigned int,
626 void *, unsigned int);
627 static int parse_vc(struct context *, const struct token *,
628 const char *, unsigned int,
629 void *, unsigned int);
630 static int parse_vc_spec(struct context *, const struct token *,
631 const char *, unsigned int, void *, unsigned int);
632 static int parse_vc_conf(struct context *, const struct token *,
633 const char *, unsigned int, void *, unsigned int);
634 static int parse_vc_action_rss_queue(struct context *, const struct token *,
635 const char *, unsigned int, void *,
637 static int parse_destroy(struct context *, const struct token *,
638 const char *, unsigned int,
639 void *, unsigned int);
640 static int parse_flush(struct context *, const struct token *,
641 const char *, unsigned int,
642 void *, unsigned int);
643 static int parse_query(struct context *, const struct token *,
644 const char *, unsigned int,
645 void *, unsigned int);
646 static int parse_action(struct context *, const struct token *,
647 const char *, unsigned int,
648 void *, unsigned int);
649 static int parse_list(struct context *, const struct token *,
650 const char *, unsigned int,
651 void *, unsigned int);
652 static int parse_int(struct context *, const struct token *,
653 const char *, unsigned int,
654 void *, unsigned int);
655 static int parse_prefix(struct context *, const struct token *,
656 const char *, unsigned int,
657 void *, unsigned int);
658 static int parse_boolean(struct context *, const struct token *,
659 const char *, unsigned int,
660 void *, unsigned int);
661 static int parse_string(struct context *, const struct token *,
662 const char *, unsigned int,
663 void *, unsigned int);
664 static int parse_mac_addr(struct context *, const struct token *,
665 const char *, unsigned int,
666 void *, unsigned int);
667 static int parse_ipv4_addr(struct context *, const struct token *,
668 const char *, unsigned int,
669 void *, unsigned int);
670 static int parse_ipv6_addr(struct context *, const struct token *,
671 const char *, unsigned int,
672 void *, unsigned int);
673 static int parse_port(struct context *, const struct token *,
674 const char *, unsigned int,
675 void *, unsigned int);
676 static int comp_none(struct context *, const struct token *,
677 unsigned int, char *, unsigned int);
678 static int comp_boolean(struct context *, const struct token *,
679 unsigned int, char *, unsigned int);
680 static int comp_action(struct context *, const struct token *,
681 unsigned int, char *, unsigned int);
682 static int comp_port(struct context *, const struct token *,
683 unsigned int, char *, unsigned int);
684 static int comp_rule_id(struct context *, const struct token *,
685 unsigned int, char *, unsigned int);
686 static int comp_vc_action_rss_queue(struct context *, const struct token *,
687 unsigned int, char *, unsigned int);
689 /** Token definitions. */
690 static const struct token token_list[] = {
691 /* Special tokens. */
694 .help = "null entry, abused as the entry point",
695 .next = NEXT(NEXT_ENTRY(FLOW)),
700 .help = "command may end here",
706 .help = "integer value",
711 .name = "{unsigned}",
713 .help = "unsigned integer value",
720 .help = "prefix length for bit-mask",
721 .call = parse_prefix,
727 .help = "any boolean value",
728 .call = parse_boolean,
729 .comp = comp_boolean,
734 .help = "fixed string",
735 .call = parse_string,
739 .name = "{MAC address}",
741 .help = "standard MAC address notation",
742 .call = parse_mac_addr,
746 .name = "{IPv4 address}",
747 .type = "IPV4 ADDRESS",
748 .help = "standard IPv4 address notation",
749 .call = parse_ipv4_addr,
753 .name = "{IPv6 address}",
754 .type = "IPV6 ADDRESS",
755 .help = "standard IPv6 address notation",
756 .call = parse_ipv6_addr,
762 .help = "rule identifier",
764 .comp = comp_rule_id,
769 .help = "port identifier",
774 .name = "{group_id}",
776 .help = "group identifier",
783 .help = "priority level",
787 /* Top-level command. */
790 .type = "{command} {port_id} [{arg} [...]]",
791 .help = "manage ingress/egress flow rules",
792 .next = NEXT(NEXT_ENTRY
801 /* Sub-level commands. */
804 .help = "check whether a flow rule can be created",
805 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
806 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
811 .help = "create a flow rule",
812 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
813 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
818 .help = "destroy specific flow rules",
819 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
820 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
821 .call = parse_destroy,
825 .help = "destroy all flow rules",
826 .next = NEXT(NEXT_ENTRY(PORT_ID)),
827 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
832 .help = "query an existing flow rule",
833 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
835 NEXT_ENTRY(PORT_ID)),
836 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action),
837 ARGS_ENTRY(struct buffer, args.query.rule),
838 ARGS_ENTRY(struct buffer, port)),
843 .help = "list existing flow rules",
844 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
845 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
848 /* Destroy arguments. */
851 .help = "specify a rule identifier",
852 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
853 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
854 .call = parse_destroy,
856 /* Query arguments. */
860 .help = "action to query, must be part of the rule",
861 .call = parse_action,
864 /* List arguments. */
867 .help = "specify a group",
868 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
869 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
872 /* Validate/create attributes. */
875 .help = "specify a group",
876 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
877 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
882 .help = "specify a priority level",
883 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
884 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
889 .help = "affect rule to ingress",
890 .next = NEXT(next_vc_attr),
895 .help = "affect rule to egress",
896 .next = NEXT(next_vc_attr),
899 /* Validate/create pattern. */
902 .help = "submit a list of pattern items",
903 .next = NEXT(next_item),
908 .help = "match value perfectly (with full bit-mask)",
909 .call = parse_vc_spec,
911 [ITEM_PARAM_SPEC] = {
913 .help = "match value according to configured bit-mask",
914 .call = parse_vc_spec,
916 [ITEM_PARAM_LAST] = {
918 .help = "specify upper bound to establish a range",
919 .call = parse_vc_spec,
921 [ITEM_PARAM_MASK] = {
923 .help = "specify bit-mask with relevant bits set to one",
924 .call = parse_vc_spec,
926 [ITEM_PARAM_PREFIX] = {
928 .help = "generate bit-mask from a prefix length",
929 .call = parse_vc_spec,
933 .help = "specify next pattern item",
934 .next = NEXT(next_item),
938 .help = "end list of pattern items",
939 .priv = PRIV_ITEM(END, 0),
940 .next = NEXT(NEXT_ENTRY(ACTIONS)),
945 .help = "no-op pattern item",
946 .priv = PRIV_ITEM(VOID, 0),
947 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
952 .help = "perform actions when pattern does not match",
953 .priv = PRIV_ITEM(INVERT, 0),
954 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
959 .help = "match any protocol for the current layer",
960 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
961 .next = NEXT(item_any),
966 .help = "number of layers covered",
967 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
968 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
972 .help = "match packets addressed to the physical function",
973 .priv = PRIV_ITEM(PF, 0),
974 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
979 .help = "match packets addressed to a virtual function ID",
980 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
981 .next = NEXT(item_vf),
986 .help = "destination VF ID",
987 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
988 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
992 .help = "device-specific physical port index to use",
993 .priv = PRIV_ITEM(PORT, sizeof(struct rte_flow_item_port)),
994 .next = NEXT(item_port),
997 [ITEM_PORT_INDEX] = {
999 .help = "physical port index",
1000 .next = NEXT(item_port, NEXT_ENTRY(UNSIGNED), item_param),
1001 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port, index)),
1005 .help = "match an arbitrary byte string",
1006 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1007 .next = NEXT(item_raw),
1010 [ITEM_RAW_RELATIVE] = {
1012 .help = "look for pattern after the previous item",
1013 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1014 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1017 [ITEM_RAW_SEARCH] = {
1019 .help = "search pattern from offset (see also limit)",
1020 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1021 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1024 [ITEM_RAW_OFFSET] = {
1026 .help = "absolute or relative offset for pattern",
1027 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1028 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1030 [ITEM_RAW_LIMIT] = {
1032 .help = "search area limit for start of pattern",
1033 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1034 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1036 [ITEM_RAW_PATTERN] = {
1038 .help = "byte string to look for",
1039 .next = NEXT(item_raw,
1041 NEXT_ENTRY(ITEM_PARAM_IS,
1044 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, length),
1045 ARGS_ENTRY_USZ(struct rte_flow_item_raw,
1047 ITEM_RAW_PATTERN_SIZE)),
1051 .help = "match Ethernet header",
1052 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1053 .next = NEXT(item_eth),
1058 .help = "destination MAC",
1059 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1060 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1064 .help = "source MAC",
1065 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1066 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1070 .help = "EtherType",
1071 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1072 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1076 .help = "match 802.1Q/ad VLAN tag",
1077 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1078 .next = NEXT(item_vlan),
1081 [ITEM_VLAN_TPID] = {
1083 .help = "tag protocol identifier",
1084 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1085 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tpid)),
1089 .help = "tag control information",
1090 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1091 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1095 .help = "priority code point",
1096 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1097 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1102 .help = "drop eligible indicator",
1103 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1104 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1109 .help = "VLAN identifier",
1110 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1111 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1116 .help = "match IPv4 header",
1117 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1118 .next = NEXT(item_ipv4),
1123 .help = "type of service",
1124 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1125 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1126 hdr.type_of_service)),
1130 .help = "time to live",
1131 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1132 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1135 [ITEM_IPV4_PROTO] = {
1137 .help = "next protocol ID",
1138 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1139 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1140 hdr.next_proto_id)),
1144 .help = "source address",
1145 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1146 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1151 .help = "destination address",
1152 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1153 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1158 .help = "match IPv6 header",
1159 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1160 .next = NEXT(item_ipv6),
1165 .help = "traffic class",
1166 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1167 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1169 "\x0f\xf0\x00\x00")),
1171 [ITEM_IPV6_FLOW] = {
1173 .help = "flow label",
1174 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1175 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1177 "\x00\x0f\xff\xff")),
1179 [ITEM_IPV6_PROTO] = {
1181 .help = "protocol (next header)",
1182 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1183 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1188 .help = "hop limit",
1189 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1190 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1195 .help = "source address",
1196 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1197 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1202 .help = "destination address",
1203 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1204 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1209 .help = "match ICMP header",
1210 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1211 .next = NEXT(item_icmp),
1214 [ITEM_ICMP_TYPE] = {
1216 .help = "ICMP packet type",
1217 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1218 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1221 [ITEM_ICMP_CODE] = {
1223 .help = "ICMP packet code",
1224 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1225 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1230 .help = "match UDP header",
1231 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1232 .next = NEXT(item_udp),
1237 .help = "UDP source port",
1238 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1239 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1244 .help = "UDP destination port",
1245 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1246 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1251 .help = "match TCP header",
1252 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1253 .next = NEXT(item_tcp),
1258 .help = "TCP source port",
1259 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1260 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1265 .help = "TCP destination port",
1266 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1267 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1272 .help = "match SCTP header",
1273 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1274 .next = NEXT(item_sctp),
1279 .help = "SCTP source port",
1280 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1281 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1286 .help = "SCTP destination port",
1287 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1288 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1293 .help = "validation tag",
1294 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1295 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1298 [ITEM_SCTP_CKSUM] = {
1301 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1302 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1307 .help = "match VXLAN header",
1308 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1309 .next = NEXT(item_vxlan),
1312 [ITEM_VXLAN_VNI] = {
1314 .help = "VXLAN identifier",
1315 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1316 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
1320 .help = "match E-Tag header",
1321 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
1322 .next = NEXT(item_e_tag),
1325 [ITEM_E_TAG_GRP_ECID_B] = {
1326 .name = "grp_ecid_b",
1327 .help = "GRP and E-CID base",
1328 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
1329 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
1335 .help = "match NVGRE header",
1336 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
1337 .next = NEXT(item_nvgre),
1340 [ITEM_NVGRE_TNI] = {
1342 .help = "virtual subnet ID",
1343 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
1344 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
1348 .help = "match MPLS header",
1349 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
1350 .next = NEXT(item_mpls),
1353 [ITEM_MPLS_LABEL] = {
1355 .help = "MPLS label",
1356 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
1357 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
1363 .help = "match GRE header",
1364 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
1365 .next = NEXT(item_gre),
1368 [ITEM_GRE_PROTO] = {
1370 .help = "GRE protocol type",
1371 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
1372 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
1375 /* Validate/create actions. */
1378 .help = "submit a list of associated actions",
1379 .next = NEXT(next_action),
1384 .help = "specify next action",
1385 .next = NEXT(next_action),
1389 .help = "end list of actions",
1390 .priv = PRIV_ACTION(END, 0),
1395 .help = "no-op action",
1396 .priv = PRIV_ACTION(VOID, 0),
1397 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1400 [ACTION_PASSTHRU] = {
1402 .help = "let subsequent rule process matched packets",
1403 .priv = PRIV_ACTION(PASSTHRU, 0),
1404 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1409 .help = "attach 32 bit value to packets",
1410 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
1411 .next = NEXT(action_mark),
1414 [ACTION_MARK_ID] = {
1416 .help = "32 bit value to return with packets",
1417 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
1418 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
1419 .call = parse_vc_conf,
1423 .help = "flag packets",
1424 .priv = PRIV_ACTION(FLAG, 0),
1425 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1430 .help = "assign packets to a given queue index",
1431 .priv = PRIV_ACTION(QUEUE,
1432 sizeof(struct rte_flow_action_queue)),
1433 .next = NEXT(action_queue),
1436 [ACTION_QUEUE_INDEX] = {
1438 .help = "queue index to use",
1439 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
1440 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
1441 .call = parse_vc_conf,
1445 .help = "drop packets (note: passthru has priority)",
1446 .priv = PRIV_ACTION(DROP, 0),
1447 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1452 .help = "enable counters for this rule",
1453 .priv = PRIV_ACTION(COUNT, 0),
1454 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1459 .help = "duplicate packets to a given queue index",
1460 .priv = PRIV_ACTION(DUP, sizeof(struct rte_flow_action_dup)),
1461 .next = NEXT(action_dup),
1464 [ACTION_DUP_INDEX] = {
1466 .help = "queue index to duplicate packets to",
1467 .next = NEXT(action_dup, NEXT_ENTRY(UNSIGNED)),
1468 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_dup, index)),
1469 .call = parse_vc_conf,
1473 .help = "spread packets among several queues",
1474 .priv = PRIV_ACTION(RSS, ACTION_RSS_SIZE),
1475 .next = NEXT(action_rss),
1478 [ACTION_RSS_QUEUES] = {
1480 .help = "queue indices to use",
1481 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
1482 .call = parse_vc_conf,
1484 [ACTION_RSS_QUEUE] = {
1486 .help = "queue index",
1487 .call = parse_vc_action_rss_queue,
1488 .comp = comp_vc_action_rss_queue,
1492 .help = "redirect packets to physical device function",
1493 .priv = PRIV_ACTION(PF, 0),
1494 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1499 .help = "redirect packets to virtual device function",
1500 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
1501 .next = NEXT(action_vf),
1504 [ACTION_VF_ORIGINAL] = {
1506 .help = "use original VF ID if possible",
1507 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
1508 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
1510 .call = parse_vc_conf,
1514 .help = "VF ID to redirect packets to",
1515 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
1516 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
1517 .call = parse_vc_conf,
1521 /** Remove and return last entry from argument stack. */
1522 static const struct arg *
1523 pop_args(struct context *ctx)
1525 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
1528 /** Add entry on top of the argument stack. */
1530 push_args(struct context *ctx, const struct arg *arg)
1532 if (ctx->args_num == CTX_STACK_SIZE)
1534 ctx->args[ctx->args_num++] = arg;
1538 /** Spread value into buffer according to bit-mask. */
1540 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
1542 uint32_t i = arg->size;
1550 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1559 unsigned int shift = 0;
1560 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
1562 for (shift = 0; arg->mask[i] >> shift; ++shift) {
1563 if (!(arg->mask[i] & (1 << shift)))
1568 *buf &= ~(1 << shift);
1569 *buf |= (val & 1) << shift;
1578 * Parse a prefix length and generate a bit-mask.
1580 * Last argument (ctx->args) is retrieved to determine mask size, storage
1581 * location and whether the result must use network byte ordering.
1584 parse_prefix(struct context *ctx, const struct token *token,
1585 const char *str, unsigned int len,
1586 void *buf, unsigned int size)
1588 const struct arg *arg = pop_args(ctx);
1589 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
1596 /* Argument is expected. */
1600 u = strtoumax(str, &end, 0);
1601 if (errno || (size_t)(end - str) != len)
1606 extra = arg_entry_bf_fill(NULL, 0, arg);
1615 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
1616 !arg_entry_bf_fill(ctx->objmask, -1, arg))
1623 if (bytes > size || bytes + !!extra > size)
1627 buf = (uint8_t *)ctx->object + arg->offset;
1628 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1630 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
1631 memset(buf, 0x00, size - bytes);
1633 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
1637 memset(buf, 0xff, bytes);
1638 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
1640 ((uint8_t *)buf)[bytes] = conv[extra];
1643 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
1646 push_args(ctx, arg);
1650 /** Default parsing function for token name matching. */
1652 parse_default(struct context *ctx, const struct token *token,
1653 const char *str, unsigned int len,
1654 void *buf, unsigned int size)
1659 if (strncmp(str, token->name, len))
1664 /** Parse flow command, initialize output buffer for subsequent tokens. */
1666 parse_init(struct context *ctx, const struct token *token,
1667 const char *str, unsigned int len,
1668 void *buf, unsigned int size)
1670 struct buffer *out = buf;
1672 /* Token name must match. */
1673 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1675 /* Nothing else to do if there is no buffer. */
1678 /* Make sure buffer is large enough. */
1679 if (size < sizeof(*out))
1681 /* Initialize buffer. */
1682 memset(out, 0x00, sizeof(*out));
1683 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
1686 ctx->objmask = NULL;
1690 /** Parse tokens for validate/create commands. */
1692 parse_vc(struct context *ctx, const struct token *token,
1693 const char *str, unsigned int len,
1694 void *buf, unsigned int size)
1696 struct buffer *out = buf;
1700 /* Token name must match. */
1701 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1703 /* Nothing else to do if there is no buffer. */
1706 if (!out->command) {
1707 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
1709 if (sizeof(*out) > size)
1711 out->command = ctx->curr;
1714 ctx->objmask = NULL;
1715 out->args.vc.data = (uint8_t *)out + size;
1719 ctx->object = &out->args.vc.attr;
1720 ctx->objmask = NULL;
1721 switch (ctx->curr) {
1726 out->args.vc.attr.ingress = 1;
1729 out->args.vc.attr.egress = 1;
1732 out->args.vc.pattern =
1733 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1735 ctx->object = out->args.vc.pattern;
1736 ctx->objmask = NULL;
1739 out->args.vc.actions =
1740 (void *)RTE_ALIGN_CEIL((uintptr_t)
1741 (out->args.vc.pattern +
1742 out->args.vc.pattern_n),
1744 ctx->object = out->args.vc.actions;
1745 ctx->objmask = NULL;
1752 if (!out->args.vc.actions) {
1753 const struct parse_item_priv *priv = token->priv;
1754 struct rte_flow_item *item =
1755 out->args.vc.pattern + out->args.vc.pattern_n;
1757 data_size = priv->size * 3; /* spec, last, mask */
1758 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1759 (out->args.vc.data - data_size),
1761 if ((uint8_t *)item + sizeof(*item) > data)
1763 *item = (struct rte_flow_item){
1766 ++out->args.vc.pattern_n;
1768 ctx->objmask = NULL;
1770 const struct parse_action_priv *priv = token->priv;
1771 struct rte_flow_action *action =
1772 out->args.vc.actions + out->args.vc.actions_n;
1774 data_size = priv->size; /* configuration */
1775 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1776 (out->args.vc.data - data_size),
1778 if ((uint8_t *)action + sizeof(*action) > data)
1780 *action = (struct rte_flow_action){
1783 ++out->args.vc.actions_n;
1784 ctx->object = action;
1785 ctx->objmask = NULL;
1787 memset(data, 0, data_size);
1788 out->args.vc.data = data;
1789 ctx->objdata = data_size;
1793 /** Parse pattern item parameter type. */
1795 parse_vc_spec(struct context *ctx, const struct token *token,
1796 const char *str, unsigned int len,
1797 void *buf, unsigned int size)
1799 struct buffer *out = buf;
1800 struct rte_flow_item *item;
1806 /* Token name must match. */
1807 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1809 /* Parse parameter types. */
1810 switch (ctx->curr) {
1811 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
1817 case ITEM_PARAM_SPEC:
1820 case ITEM_PARAM_LAST:
1823 case ITEM_PARAM_PREFIX:
1824 /* Modify next token to expect a prefix. */
1825 if (ctx->next_num < 2)
1827 ctx->next[ctx->next_num - 2] = prefix;
1829 case ITEM_PARAM_MASK:
1835 /* Nothing else to do if there is no buffer. */
1838 if (!out->args.vc.pattern_n)
1840 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
1841 data_size = ctx->objdata / 3; /* spec, last, mask */
1842 /* Point to selected object. */
1843 ctx->object = out->args.vc.data + (data_size * index);
1845 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
1846 item->mask = ctx->objmask;
1848 ctx->objmask = NULL;
1849 /* Update relevant item pointer. */
1850 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
1855 /** Parse action configuration field. */
1857 parse_vc_conf(struct context *ctx, const struct token *token,
1858 const char *str, unsigned int len,
1859 void *buf, unsigned int size)
1861 struct buffer *out = buf;
1862 struct rte_flow_action *action;
1865 /* Token name must match. */
1866 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1868 /* Nothing else to do if there is no buffer. */
1871 if (!out->args.vc.actions_n)
1873 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
1874 /* Point to selected object. */
1875 ctx->object = out->args.vc.data;
1876 ctx->objmask = NULL;
1877 /* Update configuration pointer. */
1878 action->conf = ctx->object;
1883 * Parse queue field for RSS action.
1885 * Valid tokens are queue indices and the "end" token.
1888 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
1889 const char *str, unsigned int len,
1890 void *buf, unsigned int size)
1892 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
1899 if (ctx->curr != ACTION_RSS_QUEUE)
1901 i = ctx->objdata >> 16;
1902 if (!strncmp(str, "end", len)) {
1903 ctx->objdata &= 0xffff;
1906 if (i >= ACTION_RSS_NUM)
1908 if (push_args(ctx, ARGS_ENTRY(struct rte_flow_action_rss, queue[i])))
1910 ret = parse_int(ctx, token, str, len, NULL, 0);
1916 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
1918 if (ctx->next_num == RTE_DIM(ctx->next))
1920 ctx->next[ctx->next_num++] = next;
1923 ((struct rte_flow_action_rss *)ctx->object)->num = i;
1927 /** Parse tokens for destroy command. */
1929 parse_destroy(struct context *ctx, const struct token *token,
1930 const char *str, unsigned int len,
1931 void *buf, unsigned int size)
1933 struct buffer *out = buf;
1935 /* Token name must match. */
1936 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1938 /* Nothing else to do if there is no buffer. */
1941 if (!out->command) {
1942 if (ctx->curr != DESTROY)
1944 if (sizeof(*out) > size)
1946 out->command = ctx->curr;
1949 ctx->objmask = NULL;
1950 out->args.destroy.rule =
1951 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1955 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
1956 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
1959 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
1960 ctx->objmask = NULL;
1964 /** Parse tokens for flush command. */
1966 parse_flush(struct context *ctx, const struct token *token,
1967 const char *str, unsigned int len,
1968 void *buf, unsigned int size)
1970 struct buffer *out = buf;
1972 /* Token name must match. */
1973 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1975 /* Nothing else to do if there is no buffer. */
1978 if (!out->command) {
1979 if (ctx->curr != FLUSH)
1981 if (sizeof(*out) > size)
1983 out->command = ctx->curr;
1986 ctx->objmask = NULL;
1991 /** Parse tokens for query command. */
1993 parse_query(struct context *ctx, const struct token *token,
1994 const char *str, unsigned int len,
1995 void *buf, unsigned int size)
1997 struct buffer *out = buf;
1999 /* Token name must match. */
2000 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2002 /* Nothing else to do if there is no buffer. */
2005 if (!out->command) {
2006 if (ctx->curr != QUERY)
2008 if (sizeof(*out) > size)
2010 out->command = ctx->curr;
2013 ctx->objmask = NULL;
2018 /** Parse action names. */
2020 parse_action(struct context *ctx, const struct token *token,
2021 const char *str, unsigned int len,
2022 void *buf, unsigned int size)
2024 struct buffer *out = buf;
2025 const struct arg *arg = pop_args(ctx);
2029 /* Argument is expected. */
2032 /* Parse action name. */
2033 for (i = 0; next_action[i]; ++i) {
2034 const struct parse_action_priv *priv;
2036 token = &token_list[next_action[i]];
2037 if (strncmp(token->name, str, len))
2043 memcpy((uint8_t *)ctx->object + arg->offset,
2049 push_args(ctx, arg);
2053 /** Parse tokens for list command. */
2055 parse_list(struct context *ctx, const struct token *token,
2056 const char *str, unsigned int len,
2057 void *buf, unsigned int size)
2059 struct buffer *out = buf;
2061 /* Token name must match. */
2062 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2064 /* Nothing else to do if there is no buffer. */
2067 if (!out->command) {
2068 if (ctx->curr != LIST)
2070 if (sizeof(*out) > size)
2072 out->command = ctx->curr;
2075 ctx->objmask = NULL;
2076 out->args.list.group =
2077 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2081 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
2082 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
2085 ctx->object = out->args.list.group + out->args.list.group_n++;
2086 ctx->objmask = NULL;
2091 * Parse signed/unsigned integers 8 to 64-bit long.
2093 * Last argument (ctx->args) is retrieved to determine integer type and
2097 parse_int(struct context *ctx, const struct token *token,
2098 const char *str, unsigned int len,
2099 void *buf, unsigned int size)
2101 const struct arg *arg = pop_args(ctx);
2106 /* Argument is expected. */
2111 (uintmax_t)strtoimax(str, &end, 0) :
2112 strtoumax(str, &end, 0);
2113 if (errno || (size_t)(end - str) != len)
2118 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
2119 !arg_entry_bf_fill(ctx->objmask, -1, arg))
2123 buf = (uint8_t *)ctx->object + arg->offset;
2127 case sizeof(uint8_t):
2128 *(uint8_t *)buf = u;
2130 case sizeof(uint16_t):
2131 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
2133 case sizeof(uint8_t [3]):
2134 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2136 ((uint8_t *)buf)[0] = u;
2137 ((uint8_t *)buf)[1] = u >> 8;
2138 ((uint8_t *)buf)[2] = u >> 16;
2142 ((uint8_t *)buf)[0] = u >> 16;
2143 ((uint8_t *)buf)[1] = u >> 8;
2144 ((uint8_t *)buf)[2] = u;
2146 case sizeof(uint32_t):
2147 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
2149 case sizeof(uint64_t):
2150 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
2155 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
2157 buf = (uint8_t *)ctx->objmask + arg->offset;
2162 push_args(ctx, arg);
2169 * Two arguments (ctx->args) are retrieved from the stack to store data and
2170 * its length (in that order).
2173 parse_string(struct context *ctx, const struct token *token,
2174 const char *str, unsigned int len,
2175 void *buf, unsigned int size)
2177 const struct arg *arg_data = pop_args(ctx);
2178 const struct arg *arg_len = pop_args(ctx);
2179 char tmp[16]; /* Ought to be enough. */
2182 /* Arguments are expected. */
2186 push_args(ctx, arg_data);
2189 size = arg_data->size;
2190 /* Bit-mask fill is not supported. */
2191 if (arg_data->mask || size < len)
2195 /* Let parse_int() fill length information first. */
2196 ret = snprintf(tmp, sizeof(tmp), "%u", len);
2199 push_args(ctx, arg_len);
2200 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
2205 buf = (uint8_t *)ctx->object + arg_data->offset;
2206 /* Output buffer is not necessarily NUL-terminated. */
2207 memcpy(buf, str, len);
2208 memset((uint8_t *)buf + len, 0x55, size - len);
2210 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
2213 push_args(ctx, arg_len);
2214 push_args(ctx, arg_data);
2219 * Parse a MAC address.
2221 * Last argument (ctx->args) is retrieved to determine storage size and
2225 parse_mac_addr(struct context *ctx, const struct token *token,
2226 const char *str, unsigned int len,
2227 void *buf, unsigned int size)
2229 const struct arg *arg = pop_args(ctx);
2230 struct ether_addr tmp;
2234 /* Argument is expected. */
2238 /* Bit-mask fill is not supported. */
2239 if (arg->mask || size != sizeof(tmp))
2241 /* Only network endian is supported. */
2244 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
2245 if (ret < 0 || (unsigned int)ret != len)
2249 buf = (uint8_t *)ctx->object + arg->offset;
2250 memcpy(buf, &tmp, size);
2252 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2255 push_args(ctx, arg);
2260 * Parse an IPv4 address.
2262 * Last argument (ctx->args) is retrieved to determine storage size and
2266 parse_ipv4_addr(struct context *ctx, const struct token *token,
2267 const char *str, unsigned int len,
2268 void *buf, unsigned int size)
2270 const struct arg *arg = pop_args(ctx);
2275 /* Argument is expected. */
2279 /* Bit-mask fill is not supported. */
2280 if (arg->mask || size != sizeof(tmp))
2282 /* Only network endian is supported. */
2285 memcpy(str2, str, len);
2287 ret = inet_pton(AF_INET, str2, &tmp);
2289 /* Attempt integer parsing. */
2290 push_args(ctx, arg);
2291 return parse_int(ctx, token, str, len, buf, size);
2295 buf = (uint8_t *)ctx->object + arg->offset;
2296 memcpy(buf, &tmp, size);
2298 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2301 push_args(ctx, arg);
2306 * Parse an IPv6 address.
2308 * Last argument (ctx->args) is retrieved to determine storage size and
2312 parse_ipv6_addr(struct context *ctx, const struct token *token,
2313 const char *str, unsigned int len,
2314 void *buf, unsigned int size)
2316 const struct arg *arg = pop_args(ctx);
2318 struct in6_addr tmp;
2322 /* Argument is expected. */
2326 /* Bit-mask fill is not supported. */
2327 if (arg->mask || size != sizeof(tmp))
2329 /* Only network endian is supported. */
2332 memcpy(str2, str, len);
2334 ret = inet_pton(AF_INET6, str2, &tmp);
2339 buf = (uint8_t *)ctx->object + arg->offset;
2340 memcpy(buf, &tmp, size);
2342 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2345 push_args(ctx, arg);
2349 /** Boolean values (even indices stand for false). */
2350 static const char *const boolean_name[] = {
2359 * Parse a boolean value.
2361 * Last argument (ctx->args) is retrieved to determine storage size and
2365 parse_boolean(struct context *ctx, const struct token *token,
2366 const char *str, unsigned int len,
2367 void *buf, unsigned int size)
2369 const struct arg *arg = pop_args(ctx);
2373 /* Argument is expected. */
2376 for (i = 0; boolean_name[i]; ++i)
2377 if (!strncmp(str, boolean_name[i], len))
2379 /* Process token as integer. */
2380 if (boolean_name[i])
2381 str = i & 1 ? "1" : "0";
2382 push_args(ctx, arg);
2383 ret = parse_int(ctx, token, str, strlen(str), buf, size);
2384 return ret > 0 ? (int)len : ret;
2387 /** Parse port and update context. */
2389 parse_port(struct context *ctx, const struct token *token,
2390 const char *str, unsigned int len,
2391 void *buf, unsigned int size)
2393 struct buffer *out = &(struct buffer){ .port = 0 };
2401 ctx->objmask = NULL;
2402 size = sizeof(*out);
2404 ret = parse_int(ctx, token, str, len, out, size);
2406 ctx->port = out->port;
2412 /** No completion. */
2414 comp_none(struct context *ctx, const struct token *token,
2415 unsigned int ent, char *buf, unsigned int size)
2425 /** Complete boolean values. */
2427 comp_boolean(struct context *ctx, const struct token *token,
2428 unsigned int ent, char *buf, unsigned int size)
2434 for (i = 0; boolean_name[i]; ++i)
2435 if (buf && i == ent)
2436 return snprintf(buf, size, "%s", boolean_name[i]);
2442 /** Complete action names. */
2444 comp_action(struct context *ctx, const struct token *token,
2445 unsigned int ent, char *buf, unsigned int size)
2451 for (i = 0; next_action[i]; ++i)
2452 if (buf && i == ent)
2453 return snprintf(buf, size, "%s",
2454 token_list[next_action[i]].name);
2460 /** Complete available ports. */
2462 comp_port(struct context *ctx, const struct token *token,
2463 unsigned int ent, char *buf, unsigned int size)
2470 RTE_ETH_FOREACH_DEV(p) {
2471 if (buf && i == ent)
2472 return snprintf(buf, size, "%u", p);
2480 /** Complete available rule IDs. */
2482 comp_rule_id(struct context *ctx, const struct token *token,
2483 unsigned int ent, char *buf, unsigned int size)
2486 struct rte_port *port;
2487 struct port_flow *pf;
2490 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
2491 ctx->port == (uint16_t)RTE_PORT_ALL)
2493 port = &ports[ctx->port];
2494 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
2495 if (buf && i == ent)
2496 return snprintf(buf, size, "%u", pf->id);
2504 /** Complete queue field for RSS action. */
2506 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
2507 unsigned int ent, char *buf, unsigned int size)
2509 static const char *const str[] = { "", "end", NULL };
2514 for (i = 0; str[i] != NULL; ++i)
2515 if (buf && i == ent)
2516 return snprintf(buf, size, "%s", str[i]);
2522 /** Internal context. */
2523 static struct context cmd_flow_context;
2525 /** Global parser instance (cmdline API). */
2526 cmdline_parse_inst_t cmd_flow;
2528 /** Initialize context. */
2530 cmd_flow_context_init(struct context *ctx)
2532 /* A full memset() is not necessary. */
2543 ctx->objmask = NULL;
2546 /** Parse a token (cmdline API). */
2548 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
2551 struct context *ctx = &cmd_flow_context;
2552 const struct token *token;
2553 const enum index *list;
2558 /* Restart as requested. */
2560 cmd_flow_context_init(ctx);
2561 token = &token_list[ctx->curr];
2562 /* Check argument length. */
2565 for (len = 0; src[len]; ++len)
2566 if (src[len] == '#' || isspace(src[len]))
2570 /* Last argument and EOL detection. */
2571 for (i = len; src[i]; ++i)
2572 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
2574 else if (!isspace(src[i])) {
2579 if (src[i] == '\r' || src[i] == '\n') {
2583 /* Initialize context if necessary. */
2584 if (!ctx->next_num) {
2587 ctx->next[ctx->next_num++] = token->next[0];
2589 /* Process argument through candidates. */
2590 ctx->prev = ctx->curr;
2591 list = ctx->next[ctx->next_num - 1];
2592 for (i = 0; list[i]; ++i) {
2593 const struct token *next = &token_list[list[i]];
2596 ctx->curr = list[i];
2598 tmp = next->call(ctx, next, src, len, result, size);
2600 tmp = parse_default(ctx, next, src, len, result, size);
2601 if (tmp == -1 || tmp != len)
2609 /* Push subsequent tokens if any. */
2611 for (i = 0; token->next[i]; ++i) {
2612 if (ctx->next_num == RTE_DIM(ctx->next))
2614 ctx->next[ctx->next_num++] = token->next[i];
2616 /* Push arguments if any. */
2618 for (i = 0; token->args[i]; ++i) {
2619 if (ctx->args_num == RTE_DIM(ctx->args))
2621 ctx->args[ctx->args_num++] = token->args[i];
2626 /** Return number of completion entries (cmdline API). */
2628 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
2630 struct context *ctx = &cmd_flow_context;
2631 const struct token *token = &token_list[ctx->curr];
2632 const enum index *list;
2636 /* Tell cmd_flow_parse() that context must be reinitialized. */
2638 /* Count number of tokens in current list. */
2640 list = ctx->next[ctx->next_num - 1];
2642 list = token->next[0];
2643 for (i = 0; list[i]; ++i)
2648 * If there is a single token, use its completion callback, otherwise
2649 * return the number of entries.
2651 token = &token_list[list[0]];
2652 if (i == 1 && token->comp) {
2653 /* Save index for cmd_flow_get_help(). */
2654 ctx->prev = list[0];
2655 return token->comp(ctx, token, 0, NULL, 0);
2660 /** Return a completion entry (cmdline API). */
2662 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
2663 char *dst, unsigned int size)
2665 struct context *ctx = &cmd_flow_context;
2666 const struct token *token = &token_list[ctx->curr];
2667 const enum index *list;
2671 /* Tell cmd_flow_parse() that context must be reinitialized. */
2673 /* Count number of tokens in current list. */
2675 list = ctx->next[ctx->next_num - 1];
2677 list = token->next[0];
2678 for (i = 0; list[i]; ++i)
2682 /* If there is a single token, use its completion callback. */
2683 token = &token_list[list[0]];
2684 if (i == 1 && token->comp) {
2685 /* Save index for cmd_flow_get_help(). */
2686 ctx->prev = list[0];
2687 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
2689 /* Otherwise make sure the index is valid and use defaults. */
2692 token = &token_list[list[index]];
2693 snprintf(dst, size, "%s", token->name);
2694 /* Save index for cmd_flow_get_help(). */
2695 ctx->prev = list[index];
2699 /** Populate help strings for current token (cmdline API). */
2701 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
2703 struct context *ctx = &cmd_flow_context;
2704 const struct token *token = &token_list[ctx->prev];
2707 /* Tell cmd_flow_parse() that context must be reinitialized. */
2711 /* Set token type and update global help with details. */
2712 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
2714 cmd_flow.help_str = token->help;
2716 cmd_flow.help_str = token->name;
2720 /** Token definition template (cmdline API). */
2721 static struct cmdline_token_hdr cmd_flow_token_hdr = {
2722 .ops = &(struct cmdline_token_ops){
2723 .parse = cmd_flow_parse,
2724 .complete_get_nb = cmd_flow_complete_get_nb,
2725 .complete_get_elt = cmd_flow_complete_get_elt,
2726 .get_help = cmd_flow_get_help,
2731 /** Populate the next dynamic token. */
2733 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
2734 cmdline_parse_token_hdr_t *(*hdrs)[])
2736 struct context *ctx = &cmd_flow_context;
2738 /* Always reinitialize context before requesting the first token. */
2740 cmd_flow_context_init(ctx);
2741 /* Return NULL when no more tokens are expected. */
2742 if (!ctx->next_num && ctx->curr) {
2746 /* Determine if command should end here. */
2747 if (ctx->eol && ctx->last && ctx->next_num) {
2748 const enum index *list = ctx->next[ctx->next_num - 1];
2751 for (i = 0; list[i]; ++i) {
2758 *hdr = &cmd_flow_token_hdr;
2761 /** Dispatch parsed buffer to function calls. */
2763 cmd_flow_parsed(const struct buffer *in)
2765 switch (in->command) {
2767 port_flow_validate(in->port, &in->args.vc.attr,
2768 in->args.vc.pattern, in->args.vc.actions);
2771 port_flow_create(in->port, &in->args.vc.attr,
2772 in->args.vc.pattern, in->args.vc.actions);
2775 port_flow_destroy(in->port, in->args.destroy.rule_n,
2776 in->args.destroy.rule);
2779 port_flow_flush(in->port);
2782 port_flow_query(in->port, in->args.query.rule,
2783 in->args.query.action);
2786 port_flow_list(in->port, in->args.list.group_n,
2787 in->args.list.group);
2794 /** Token generator and output processing callback (cmdline API). */
2796 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
2799 cmd_flow_tok(arg0, arg2);
2801 cmd_flow_parsed(arg0);
2804 /** Global parser instance (cmdline API). */
2805 cmdline_parse_inst_t cmd_flow = {
2807 .data = NULL, /**< Unused. */
2808 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
2811 }, /**< Tokens are returned by cmd_flow_tok(). */