4 * Copyright 2016 6WIND S.A.
5 * Copyright 2016 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <arpa/inet.h>
42 #include <sys/socket.h>
44 #include <rte_common.h>
45 #include <rte_ethdev.h>
46 #include <rte_byteorder.h>
47 #include <cmdline_parse.h>
48 #include <cmdline_parse_etheraddr.h>
53 /** Parser token indices. */
73 /* Top-level command. */
76 /* Sub-level commands. */
85 /* Destroy arguments. */
88 /* Query arguments. */
94 /* Validate/create arguments. */
100 /* Validate/create pattern. */
165 ITEM_E_TAG_GRP_ECID_B,
179 /* Validate/create actions. */
203 /** Size of pattern[] field in struct rte_flow_item_raw. */
204 #define ITEM_RAW_PATTERN_SIZE 36
206 /** Storage size for struct rte_flow_item_raw including pattern. */
207 #define ITEM_RAW_SIZE \
208 (offsetof(struct rte_flow_item_raw, pattern) + ITEM_RAW_PATTERN_SIZE)
210 /** Number of queue[] entries in struct rte_flow_action_rss. */
211 #define ACTION_RSS_NUM 32
213 /** Storage size for struct rte_flow_action_rss including queues. */
214 #define ACTION_RSS_SIZE \
215 (offsetof(struct rte_flow_action_rss, queue) + \
216 sizeof(*((struct rte_flow_action_rss *)0)->queue) * ACTION_RSS_NUM)
218 /** Maximum number of subsequent tokens and arguments on the stack. */
219 #define CTX_STACK_SIZE 16
221 /** Parser context. */
223 /** Stack of subsequent token lists to process. */
224 const enum index *next[CTX_STACK_SIZE];
225 /** Arguments for stacked tokens. */
226 const void *args[CTX_STACK_SIZE];
227 enum index curr; /**< Current token index. */
228 enum index prev; /**< Index of the last token seen. */
229 int next_num; /**< Number of entries in next[]. */
230 int args_num; /**< Number of entries in args[]. */
231 uint32_t eol:1; /**< EOL has been detected. */
232 uint32_t last:1; /**< No more arguments. */
233 portid_t port; /**< Current port ID (for completions). */
234 uint32_t objdata; /**< Object-specific data. */
235 void *object; /**< Address of current object for relative offsets. */
236 void *objmask; /**< Object a full mask must be written to. */
239 /** Token argument. */
241 uint32_t hton:1; /**< Use network byte ordering. */
242 uint32_t sign:1; /**< Value is signed. */
243 uint32_t offset; /**< Relative offset from ctx->object. */
244 uint32_t size; /**< Field size. */
245 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
248 /** Parser token definition. */
250 /** Type displayed during completion (defaults to "TOKEN"). */
252 /** Help displayed during completion (defaults to token name). */
254 /** Private data used by parser functions. */
257 * Lists of subsequent tokens to push on the stack. Each call to the
258 * parser consumes the last entry of that stack.
260 const enum index *const *next;
261 /** Arguments stack for subsequent tokens that need them. */
262 const struct arg *const *args;
264 * Token-processing callback, returns -1 in case of error, the
265 * length of the matched string otherwise. If NULL, attempts to
266 * match the token name.
268 * If buf is not NULL, the result should be stored in it according
269 * to context. An error is returned if not large enough.
271 int (*call)(struct context *ctx, const struct token *token,
272 const char *str, unsigned int len,
273 void *buf, unsigned int size);
275 * Callback that provides possible values for this token, used for
276 * completion. Returns -1 in case of error, the number of possible
277 * values otherwise. If NULL, the token name is used.
279 * If buf is not NULL, entry index ent is written to buf and the
280 * full length of the entry is returned (same behavior as
283 int (*comp)(struct context *ctx, const struct token *token,
284 unsigned int ent, char *buf, unsigned int size);
285 /** Mandatory token name, no default value. */
289 /** Static initializer for the next field. */
290 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
292 /** Static initializer for a NEXT() entry. */
293 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
295 /** Static initializer for the args field. */
296 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
298 /** Static initializer for ARGS() to target a field. */
299 #define ARGS_ENTRY(s, f) \
300 (&(const struct arg){ \
301 .offset = offsetof(s, f), \
302 .size = sizeof(((s *)0)->f), \
305 /** Static initializer for ARGS() to target a bit-field. */
306 #define ARGS_ENTRY_BF(s, f, b) \
307 (&(const struct arg){ \
309 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
312 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
313 #define ARGS_ENTRY_MASK(s, f, m) \
314 (&(const struct arg){ \
315 .offset = offsetof(s, f), \
316 .size = sizeof(((s *)0)->f), \
317 .mask = (const void *)(m), \
320 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
321 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
322 (&(const struct arg){ \
324 .offset = offsetof(s, f), \
325 .size = sizeof(((s *)0)->f), \
326 .mask = (const void *)(m), \
329 /** Static initializer for ARGS() to target a pointer. */
330 #define ARGS_ENTRY_PTR(s, f) \
331 (&(const struct arg){ \
332 .size = sizeof(*((s *)0)->f), \
335 /** Static initializer for ARGS() with arbitrary size. */
336 #define ARGS_ENTRY_USZ(s, f, sz) \
337 (&(const struct arg){ \
338 .offset = offsetof(s, f), \
342 /** Same as ARGS_ENTRY() using network byte ordering. */
343 #define ARGS_ENTRY_HTON(s, f) \
344 (&(const struct arg){ \
346 .offset = offsetof(s, f), \
347 .size = sizeof(((s *)0)->f), \
350 /** Parser output buffer layout expected by cmd_flow_parsed(). */
352 enum index command; /**< Flow command. */
353 portid_t port; /**< Affected port ID. */
356 struct rte_flow_attr attr;
357 struct rte_flow_item *pattern;
358 struct rte_flow_action *actions;
362 } vc; /**< Validate/create arguments. */
366 } destroy; /**< Destroy arguments. */
369 enum rte_flow_action_type action;
370 } query; /**< Query arguments. */
374 } list; /**< List arguments. */
377 } isolate; /**< Isolated mode arguments. */
378 } args; /**< Command arguments. */
381 /** Private data for pattern items. */
382 struct parse_item_priv {
383 enum rte_flow_item_type type; /**< Item type. */
384 uint32_t size; /**< Size of item specification structure. */
387 #define PRIV_ITEM(t, s) \
388 (&(const struct parse_item_priv){ \
389 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
393 /** Private data for actions. */
394 struct parse_action_priv {
395 enum rte_flow_action_type type; /**< Action type. */
396 uint32_t size; /**< Size of action configuration structure. */
399 #define PRIV_ACTION(t, s) \
400 (&(const struct parse_action_priv){ \
401 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
405 static const enum index next_vc_attr[] = {
414 static const enum index next_destroy_attr[] = {
420 static const enum index next_list_attr[] = {
426 static const enum index item_param[] = {
435 static const enum index next_item[] = {
464 static const enum index item_fuzzy[] = {
470 static const enum index item_any[] = {
476 static const enum index item_vf[] = {
482 static const enum index item_port[] = {
488 static const enum index item_raw[] = {
498 static const enum index item_eth[] = {
506 static const enum index item_vlan[] = {
516 static const enum index item_ipv4[] = {
526 static const enum index item_ipv6[] = {
537 static const enum index item_icmp[] = {
544 static const enum index item_udp[] = {
551 static const enum index item_tcp[] = {
559 static const enum index item_sctp[] = {
568 static const enum index item_vxlan[] = {
574 static const enum index item_e_tag[] = {
575 ITEM_E_TAG_GRP_ECID_B,
580 static const enum index item_nvgre[] = {
586 static const enum index item_mpls[] = {
592 static const enum index item_gre[] = {
598 static const enum index item_gtp[] = {
604 static const enum index next_action[] = {
620 static const enum index action_mark[] = {
626 static const enum index action_queue[] = {
632 static const enum index action_dup[] = {
638 static const enum index action_rss[] = {
644 static const enum index action_vf[] = {
651 static int parse_init(struct context *, const struct token *,
652 const char *, unsigned int,
653 void *, unsigned int);
654 static int parse_vc(struct context *, const struct token *,
655 const char *, unsigned int,
656 void *, unsigned int);
657 static int parse_vc_spec(struct context *, const struct token *,
658 const char *, unsigned int, void *, unsigned int);
659 static int parse_vc_conf(struct context *, const struct token *,
660 const char *, unsigned int, void *, unsigned int);
661 static int parse_vc_action_rss_queue(struct context *, const struct token *,
662 const char *, unsigned int, void *,
664 static int parse_destroy(struct context *, const struct token *,
665 const char *, unsigned int,
666 void *, unsigned int);
667 static int parse_flush(struct context *, const struct token *,
668 const char *, unsigned int,
669 void *, unsigned int);
670 static int parse_query(struct context *, const struct token *,
671 const char *, unsigned int,
672 void *, unsigned int);
673 static int parse_action(struct context *, const struct token *,
674 const char *, unsigned int,
675 void *, unsigned int);
676 static int parse_list(struct context *, const struct token *,
677 const char *, unsigned int,
678 void *, unsigned int);
679 static int parse_isolate(struct context *, const struct token *,
680 const char *, unsigned int,
681 void *, unsigned int);
682 static int parse_int(struct context *, const struct token *,
683 const char *, unsigned int,
684 void *, unsigned int);
685 static int parse_prefix(struct context *, const struct token *,
686 const char *, unsigned int,
687 void *, unsigned int);
688 static int parse_boolean(struct context *, const struct token *,
689 const char *, unsigned int,
690 void *, unsigned int);
691 static int parse_string(struct context *, const struct token *,
692 const char *, unsigned int,
693 void *, unsigned int);
694 static int parse_mac_addr(struct context *, const struct token *,
695 const char *, unsigned int,
696 void *, unsigned int);
697 static int parse_ipv4_addr(struct context *, const struct token *,
698 const char *, unsigned int,
699 void *, unsigned int);
700 static int parse_ipv6_addr(struct context *, const struct token *,
701 const char *, unsigned int,
702 void *, unsigned int);
703 static int parse_port(struct context *, const struct token *,
704 const char *, unsigned int,
705 void *, unsigned int);
706 static int comp_none(struct context *, const struct token *,
707 unsigned int, char *, unsigned int);
708 static int comp_boolean(struct context *, const struct token *,
709 unsigned int, char *, unsigned int);
710 static int comp_action(struct context *, const struct token *,
711 unsigned int, char *, unsigned int);
712 static int comp_port(struct context *, const struct token *,
713 unsigned int, char *, unsigned int);
714 static int comp_rule_id(struct context *, const struct token *,
715 unsigned int, char *, unsigned int);
716 static int comp_vc_action_rss_queue(struct context *, const struct token *,
717 unsigned int, char *, unsigned int);
719 /** Token definitions. */
720 static const struct token token_list[] = {
721 /* Special tokens. */
724 .help = "null entry, abused as the entry point",
725 .next = NEXT(NEXT_ENTRY(FLOW)),
730 .help = "command may end here",
736 .help = "integer value",
741 .name = "{unsigned}",
743 .help = "unsigned integer value",
750 .help = "prefix length for bit-mask",
751 .call = parse_prefix,
757 .help = "any boolean value",
758 .call = parse_boolean,
759 .comp = comp_boolean,
764 .help = "fixed string",
765 .call = parse_string,
769 .name = "{MAC address}",
771 .help = "standard MAC address notation",
772 .call = parse_mac_addr,
776 .name = "{IPv4 address}",
777 .type = "IPV4 ADDRESS",
778 .help = "standard IPv4 address notation",
779 .call = parse_ipv4_addr,
783 .name = "{IPv6 address}",
784 .type = "IPV6 ADDRESS",
785 .help = "standard IPv6 address notation",
786 .call = parse_ipv6_addr,
792 .help = "rule identifier",
794 .comp = comp_rule_id,
799 .help = "port identifier",
804 .name = "{group_id}",
806 .help = "group identifier",
813 .help = "priority level",
817 /* Top-level command. */
820 .type = "{command} {port_id} [{arg} [...]]",
821 .help = "manage ingress/egress flow rules",
822 .next = NEXT(NEXT_ENTRY
832 /* Sub-level commands. */
835 .help = "check whether a flow rule can be created",
836 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
837 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
842 .help = "create a flow rule",
843 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
844 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
849 .help = "destroy specific flow rules",
850 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
851 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
852 .call = parse_destroy,
856 .help = "destroy all flow rules",
857 .next = NEXT(NEXT_ENTRY(PORT_ID)),
858 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
863 .help = "query an existing flow rule",
864 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
866 NEXT_ENTRY(PORT_ID)),
867 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action),
868 ARGS_ENTRY(struct buffer, args.query.rule),
869 ARGS_ENTRY(struct buffer, port)),
874 .help = "list existing flow rules",
875 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
876 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
881 .help = "restrict ingress traffic to the defined flow rules",
882 .next = NEXT(NEXT_ENTRY(BOOLEAN),
883 NEXT_ENTRY(PORT_ID)),
884 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
885 ARGS_ENTRY(struct buffer, port)),
886 .call = parse_isolate,
888 /* Destroy arguments. */
891 .help = "specify a rule identifier",
892 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
893 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
894 .call = parse_destroy,
896 /* Query arguments. */
900 .help = "action to query, must be part of the rule",
901 .call = parse_action,
904 /* List arguments. */
907 .help = "specify a group",
908 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
909 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
912 /* Validate/create attributes. */
915 .help = "specify a group",
916 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
917 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
922 .help = "specify a priority level",
923 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
924 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
929 .help = "affect rule to ingress",
930 .next = NEXT(next_vc_attr),
935 .help = "affect rule to egress",
936 .next = NEXT(next_vc_attr),
939 /* Validate/create pattern. */
942 .help = "submit a list of pattern items",
943 .next = NEXT(next_item),
948 .help = "match value perfectly (with full bit-mask)",
949 .call = parse_vc_spec,
951 [ITEM_PARAM_SPEC] = {
953 .help = "match value according to configured bit-mask",
954 .call = parse_vc_spec,
956 [ITEM_PARAM_LAST] = {
958 .help = "specify upper bound to establish a range",
959 .call = parse_vc_spec,
961 [ITEM_PARAM_MASK] = {
963 .help = "specify bit-mask with relevant bits set to one",
964 .call = parse_vc_spec,
966 [ITEM_PARAM_PREFIX] = {
968 .help = "generate bit-mask from a prefix length",
969 .call = parse_vc_spec,
973 .help = "specify next pattern item",
974 .next = NEXT(next_item),
978 .help = "end list of pattern items",
979 .priv = PRIV_ITEM(END, 0),
980 .next = NEXT(NEXT_ENTRY(ACTIONS)),
985 .help = "no-op pattern item",
986 .priv = PRIV_ITEM(VOID, 0),
987 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
992 .help = "perform actions when pattern does not match",
993 .priv = PRIV_ITEM(INVERT, 0),
994 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
999 .help = "match any protocol for the current layer",
1000 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
1001 .next = NEXT(item_any),
1006 .help = "number of layers covered",
1007 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
1008 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
1012 .help = "match packets addressed to the physical function",
1013 .priv = PRIV_ITEM(PF, 0),
1014 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1019 .help = "match packets addressed to a virtual function ID",
1020 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
1021 .next = NEXT(item_vf),
1026 .help = "destination VF ID",
1027 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
1028 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
1032 .help = "device-specific physical port index to use",
1033 .priv = PRIV_ITEM(PORT, sizeof(struct rte_flow_item_port)),
1034 .next = NEXT(item_port),
1037 [ITEM_PORT_INDEX] = {
1039 .help = "physical port index",
1040 .next = NEXT(item_port, NEXT_ENTRY(UNSIGNED), item_param),
1041 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port, index)),
1045 .help = "match an arbitrary byte string",
1046 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1047 .next = NEXT(item_raw),
1050 [ITEM_RAW_RELATIVE] = {
1052 .help = "look for pattern after the previous item",
1053 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1054 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1057 [ITEM_RAW_SEARCH] = {
1059 .help = "search pattern from offset (see also limit)",
1060 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1061 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1064 [ITEM_RAW_OFFSET] = {
1066 .help = "absolute or relative offset for pattern",
1067 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1068 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1070 [ITEM_RAW_LIMIT] = {
1072 .help = "search area limit for start of pattern",
1073 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1074 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1076 [ITEM_RAW_PATTERN] = {
1078 .help = "byte string to look for",
1079 .next = NEXT(item_raw,
1081 NEXT_ENTRY(ITEM_PARAM_IS,
1084 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, length),
1085 ARGS_ENTRY_USZ(struct rte_flow_item_raw,
1087 ITEM_RAW_PATTERN_SIZE)),
1091 .help = "match Ethernet header",
1092 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1093 .next = NEXT(item_eth),
1098 .help = "destination MAC",
1099 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1100 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1104 .help = "source MAC",
1105 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1106 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1110 .help = "EtherType",
1111 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1112 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1116 .help = "match 802.1Q/ad VLAN tag",
1117 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1118 .next = NEXT(item_vlan),
1121 [ITEM_VLAN_TPID] = {
1123 .help = "tag protocol identifier",
1124 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1125 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tpid)),
1129 .help = "tag control information",
1130 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1131 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1135 .help = "priority code point",
1136 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1137 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1142 .help = "drop eligible indicator",
1143 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1144 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1149 .help = "VLAN identifier",
1150 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1151 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1156 .help = "match IPv4 header",
1157 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1158 .next = NEXT(item_ipv4),
1163 .help = "type of service",
1164 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1165 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1166 hdr.type_of_service)),
1170 .help = "time to live",
1171 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1172 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1175 [ITEM_IPV4_PROTO] = {
1177 .help = "next protocol ID",
1178 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1179 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1180 hdr.next_proto_id)),
1184 .help = "source address",
1185 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1186 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1191 .help = "destination address",
1192 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1193 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1198 .help = "match IPv6 header",
1199 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1200 .next = NEXT(item_ipv6),
1205 .help = "traffic class",
1206 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1207 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1209 "\x0f\xf0\x00\x00")),
1211 [ITEM_IPV6_FLOW] = {
1213 .help = "flow label",
1214 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1215 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1217 "\x00\x0f\xff\xff")),
1219 [ITEM_IPV6_PROTO] = {
1221 .help = "protocol (next header)",
1222 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1223 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1228 .help = "hop limit",
1229 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1230 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1235 .help = "source address",
1236 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1237 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1242 .help = "destination address",
1243 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1244 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1249 .help = "match ICMP header",
1250 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1251 .next = NEXT(item_icmp),
1254 [ITEM_ICMP_TYPE] = {
1256 .help = "ICMP packet type",
1257 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1258 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1261 [ITEM_ICMP_CODE] = {
1263 .help = "ICMP packet code",
1264 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1265 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1270 .help = "match UDP header",
1271 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1272 .next = NEXT(item_udp),
1277 .help = "UDP source port",
1278 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1279 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1284 .help = "UDP destination port",
1285 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1286 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1291 .help = "match TCP header",
1292 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1293 .next = NEXT(item_tcp),
1298 .help = "TCP source port",
1299 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1300 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1305 .help = "TCP destination port",
1306 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1307 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1310 [ITEM_TCP_FLAGS] = {
1312 .help = "TCP flags",
1313 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1314 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1319 .help = "match SCTP header",
1320 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1321 .next = NEXT(item_sctp),
1326 .help = "SCTP source port",
1327 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1328 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1333 .help = "SCTP destination port",
1334 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1335 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1340 .help = "validation tag",
1341 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1342 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1345 [ITEM_SCTP_CKSUM] = {
1348 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1349 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1354 .help = "match VXLAN header",
1355 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1356 .next = NEXT(item_vxlan),
1359 [ITEM_VXLAN_VNI] = {
1361 .help = "VXLAN identifier",
1362 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1363 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
1367 .help = "match E-Tag header",
1368 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
1369 .next = NEXT(item_e_tag),
1372 [ITEM_E_TAG_GRP_ECID_B] = {
1373 .name = "grp_ecid_b",
1374 .help = "GRP and E-CID base",
1375 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
1376 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
1382 .help = "match NVGRE header",
1383 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
1384 .next = NEXT(item_nvgre),
1387 [ITEM_NVGRE_TNI] = {
1389 .help = "virtual subnet ID",
1390 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
1391 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
1395 .help = "match MPLS header",
1396 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
1397 .next = NEXT(item_mpls),
1400 [ITEM_MPLS_LABEL] = {
1402 .help = "MPLS label",
1403 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
1404 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
1410 .help = "match GRE header",
1411 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
1412 .next = NEXT(item_gre),
1415 [ITEM_GRE_PROTO] = {
1417 .help = "GRE protocol type",
1418 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
1419 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
1424 .help = "fuzzy pattern match, expect faster than default",
1425 .priv = PRIV_ITEM(FUZZY,
1426 sizeof(struct rte_flow_item_fuzzy)),
1427 .next = NEXT(item_fuzzy),
1430 [ITEM_FUZZY_THRESH] = {
1432 .help = "match accuracy threshold",
1433 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
1434 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
1439 .help = "match GTP header",
1440 .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
1441 .next = NEXT(item_gtp),
1446 .help = "tunnel endpoint identifier",
1447 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
1448 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
1452 .help = "match GTP header",
1453 .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
1454 .next = NEXT(item_gtp),
1459 .help = "match GTP header",
1460 .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
1461 .next = NEXT(item_gtp),
1465 /* Validate/create actions. */
1468 .help = "submit a list of associated actions",
1469 .next = NEXT(next_action),
1474 .help = "specify next action",
1475 .next = NEXT(next_action),
1479 .help = "end list of actions",
1480 .priv = PRIV_ACTION(END, 0),
1485 .help = "no-op action",
1486 .priv = PRIV_ACTION(VOID, 0),
1487 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1490 [ACTION_PASSTHRU] = {
1492 .help = "let subsequent rule process matched packets",
1493 .priv = PRIV_ACTION(PASSTHRU, 0),
1494 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1499 .help = "attach 32 bit value to packets",
1500 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
1501 .next = NEXT(action_mark),
1504 [ACTION_MARK_ID] = {
1506 .help = "32 bit value to return with packets",
1507 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
1508 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
1509 .call = parse_vc_conf,
1513 .help = "flag packets",
1514 .priv = PRIV_ACTION(FLAG, 0),
1515 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1520 .help = "assign packets to a given queue index",
1521 .priv = PRIV_ACTION(QUEUE,
1522 sizeof(struct rte_flow_action_queue)),
1523 .next = NEXT(action_queue),
1526 [ACTION_QUEUE_INDEX] = {
1528 .help = "queue index to use",
1529 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
1530 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
1531 .call = parse_vc_conf,
1535 .help = "drop packets (note: passthru has priority)",
1536 .priv = PRIV_ACTION(DROP, 0),
1537 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1542 .help = "enable counters for this rule",
1543 .priv = PRIV_ACTION(COUNT, 0),
1544 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1549 .help = "duplicate packets to a given queue index",
1550 .priv = PRIV_ACTION(DUP, sizeof(struct rte_flow_action_dup)),
1551 .next = NEXT(action_dup),
1554 [ACTION_DUP_INDEX] = {
1556 .help = "queue index to duplicate packets to",
1557 .next = NEXT(action_dup, NEXT_ENTRY(UNSIGNED)),
1558 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_dup, index)),
1559 .call = parse_vc_conf,
1563 .help = "spread packets among several queues",
1564 .priv = PRIV_ACTION(RSS, ACTION_RSS_SIZE),
1565 .next = NEXT(action_rss),
1568 [ACTION_RSS_QUEUES] = {
1570 .help = "queue indices to use",
1571 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
1572 .call = parse_vc_conf,
1574 [ACTION_RSS_QUEUE] = {
1576 .help = "queue index",
1577 .call = parse_vc_action_rss_queue,
1578 .comp = comp_vc_action_rss_queue,
1582 .help = "redirect packets to physical device function",
1583 .priv = PRIV_ACTION(PF, 0),
1584 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1589 .help = "redirect packets to virtual device function",
1590 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
1591 .next = NEXT(action_vf),
1594 [ACTION_VF_ORIGINAL] = {
1596 .help = "use original VF ID if possible",
1597 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
1598 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
1600 .call = parse_vc_conf,
1604 .help = "VF ID to redirect packets to",
1605 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
1606 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
1607 .call = parse_vc_conf,
1611 /** Remove and return last entry from argument stack. */
1612 static const struct arg *
1613 pop_args(struct context *ctx)
1615 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
1618 /** Add entry on top of the argument stack. */
1620 push_args(struct context *ctx, const struct arg *arg)
1622 if (ctx->args_num == CTX_STACK_SIZE)
1624 ctx->args[ctx->args_num++] = arg;
1628 /** Spread value into buffer according to bit-mask. */
1630 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
1632 uint32_t i = arg->size;
1640 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1649 unsigned int shift = 0;
1650 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
1652 for (shift = 0; arg->mask[i] >> shift; ++shift) {
1653 if (!(arg->mask[i] & (1 << shift)))
1658 *buf &= ~(1 << shift);
1659 *buf |= (val & 1) << shift;
1667 /** Compare a string with a partial one of a given length. */
1669 strcmp_partial(const char *full, const char *partial, size_t partial_len)
1671 int r = strncmp(full, partial, partial_len);
1675 if (strlen(full) <= partial_len)
1677 return full[partial_len];
1681 * Parse a prefix length and generate a bit-mask.
1683 * Last argument (ctx->args) is retrieved to determine mask size, storage
1684 * location and whether the result must use network byte ordering.
1687 parse_prefix(struct context *ctx, const struct token *token,
1688 const char *str, unsigned int len,
1689 void *buf, unsigned int size)
1691 const struct arg *arg = pop_args(ctx);
1692 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
1699 /* Argument is expected. */
1703 u = strtoumax(str, &end, 0);
1704 if (errno || (size_t)(end - str) != len)
1709 extra = arg_entry_bf_fill(NULL, 0, arg);
1718 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
1719 !arg_entry_bf_fill(ctx->objmask, -1, arg))
1726 if (bytes > size || bytes + !!extra > size)
1730 buf = (uint8_t *)ctx->object + arg->offset;
1731 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1733 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
1734 memset(buf, 0x00, size - bytes);
1736 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
1740 memset(buf, 0xff, bytes);
1741 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
1743 ((uint8_t *)buf)[bytes] = conv[extra];
1746 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
1749 push_args(ctx, arg);
1753 /** Default parsing function for token name matching. */
1755 parse_default(struct context *ctx, const struct token *token,
1756 const char *str, unsigned int len,
1757 void *buf, unsigned int size)
1762 if (strcmp_partial(token->name, str, len))
1767 /** Parse flow command, initialize output buffer for subsequent tokens. */
1769 parse_init(struct context *ctx, const struct token *token,
1770 const char *str, unsigned int len,
1771 void *buf, unsigned int size)
1773 struct buffer *out = buf;
1775 /* Token name must match. */
1776 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1778 /* Nothing else to do if there is no buffer. */
1781 /* Make sure buffer is large enough. */
1782 if (size < sizeof(*out))
1784 /* Initialize buffer. */
1785 memset(out, 0x00, sizeof(*out));
1786 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
1789 ctx->objmask = NULL;
1793 /** Parse tokens for validate/create commands. */
1795 parse_vc(struct context *ctx, const struct token *token,
1796 const char *str, unsigned int len,
1797 void *buf, unsigned int size)
1799 struct buffer *out = buf;
1803 /* Token name must match. */
1804 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1806 /* Nothing else to do if there is no buffer. */
1809 if (!out->command) {
1810 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
1812 if (sizeof(*out) > size)
1814 out->command = ctx->curr;
1817 ctx->objmask = NULL;
1818 out->args.vc.data = (uint8_t *)out + size;
1822 ctx->object = &out->args.vc.attr;
1823 ctx->objmask = NULL;
1824 switch (ctx->curr) {
1829 out->args.vc.attr.ingress = 1;
1832 out->args.vc.attr.egress = 1;
1835 out->args.vc.pattern =
1836 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1838 ctx->object = out->args.vc.pattern;
1839 ctx->objmask = NULL;
1842 out->args.vc.actions =
1843 (void *)RTE_ALIGN_CEIL((uintptr_t)
1844 (out->args.vc.pattern +
1845 out->args.vc.pattern_n),
1847 ctx->object = out->args.vc.actions;
1848 ctx->objmask = NULL;
1855 if (!out->args.vc.actions) {
1856 const struct parse_item_priv *priv = token->priv;
1857 struct rte_flow_item *item =
1858 out->args.vc.pattern + out->args.vc.pattern_n;
1860 data_size = priv->size * 3; /* spec, last, mask */
1861 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1862 (out->args.vc.data - data_size),
1864 if ((uint8_t *)item + sizeof(*item) > data)
1866 *item = (struct rte_flow_item){
1869 ++out->args.vc.pattern_n;
1871 ctx->objmask = NULL;
1873 const struct parse_action_priv *priv = token->priv;
1874 struct rte_flow_action *action =
1875 out->args.vc.actions + out->args.vc.actions_n;
1877 data_size = priv->size; /* configuration */
1878 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1879 (out->args.vc.data - data_size),
1881 if ((uint8_t *)action + sizeof(*action) > data)
1883 *action = (struct rte_flow_action){
1886 ++out->args.vc.actions_n;
1887 ctx->object = action;
1888 ctx->objmask = NULL;
1890 memset(data, 0, data_size);
1891 out->args.vc.data = data;
1892 ctx->objdata = data_size;
1896 /** Parse pattern item parameter type. */
1898 parse_vc_spec(struct context *ctx, const struct token *token,
1899 const char *str, unsigned int len,
1900 void *buf, unsigned int size)
1902 struct buffer *out = buf;
1903 struct rte_flow_item *item;
1909 /* Token name must match. */
1910 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1912 /* Parse parameter types. */
1913 switch (ctx->curr) {
1914 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
1920 case ITEM_PARAM_SPEC:
1923 case ITEM_PARAM_LAST:
1926 case ITEM_PARAM_PREFIX:
1927 /* Modify next token to expect a prefix. */
1928 if (ctx->next_num < 2)
1930 ctx->next[ctx->next_num - 2] = prefix;
1932 case ITEM_PARAM_MASK:
1938 /* Nothing else to do if there is no buffer. */
1941 if (!out->args.vc.pattern_n)
1943 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
1944 data_size = ctx->objdata / 3; /* spec, last, mask */
1945 /* Point to selected object. */
1946 ctx->object = out->args.vc.data + (data_size * index);
1948 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
1949 item->mask = ctx->objmask;
1951 ctx->objmask = NULL;
1952 /* Update relevant item pointer. */
1953 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
1958 /** Parse action configuration field. */
1960 parse_vc_conf(struct context *ctx, const struct token *token,
1961 const char *str, unsigned int len,
1962 void *buf, unsigned int size)
1964 struct buffer *out = buf;
1965 struct rte_flow_action *action;
1968 /* Token name must match. */
1969 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1971 /* Nothing else to do if there is no buffer. */
1974 if (!out->args.vc.actions_n)
1976 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
1977 /* Point to selected object. */
1978 ctx->object = out->args.vc.data;
1979 ctx->objmask = NULL;
1980 /* Update configuration pointer. */
1981 action->conf = ctx->object;
1986 * Parse queue field for RSS action.
1988 * Valid tokens are queue indices and the "end" token.
1991 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
1992 const char *str, unsigned int len,
1993 void *buf, unsigned int size)
1995 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
2002 if (ctx->curr != ACTION_RSS_QUEUE)
2004 i = ctx->objdata >> 16;
2005 if (!strcmp_partial("end", str, len)) {
2006 ctx->objdata &= 0xffff;
2009 if (i >= ACTION_RSS_NUM)
2011 if (push_args(ctx, ARGS_ENTRY(struct rte_flow_action_rss, queue[i])))
2013 ret = parse_int(ctx, token, str, len, NULL, 0);
2019 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
2021 if (ctx->next_num == RTE_DIM(ctx->next))
2023 ctx->next[ctx->next_num++] = next;
2026 ((struct rte_flow_action_rss *)ctx->object)->num = i;
2030 /** Parse tokens for destroy command. */
2032 parse_destroy(struct context *ctx, const struct token *token,
2033 const char *str, unsigned int len,
2034 void *buf, unsigned int size)
2036 struct buffer *out = buf;
2038 /* Token name must match. */
2039 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2041 /* Nothing else to do if there is no buffer. */
2044 if (!out->command) {
2045 if (ctx->curr != DESTROY)
2047 if (sizeof(*out) > size)
2049 out->command = ctx->curr;
2052 ctx->objmask = NULL;
2053 out->args.destroy.rule =
2054 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2058 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
2059 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
2062 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
2063 ctx->objmask = NULL;
2067 /** Parse tokens for flush command. */
2069 parse_flush(struct context *ctx, const struct token *token,
2070 const char *str, unsigned int len,
2071 void *buf, unsigned int size)
2073 struct buffer *out = buf;
2075 /* Token name must match. */
2076 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2078 /* Nothing else to do if there is no buffer. */
2081 if (!out->command) {
2082 if (ctx->curr != FLUSH)
2084 if (sizeof(*out) > size)
2086 out->command = ctx->curr;
2089 ctx->objmask = NULL;
2094 /** Parse tokens for query command. */
2096 parse_query(struct context *ctx, const struct token *token,
2097 const char *str, unsigned int len,
2098 void *buf, unsigned int size)
2100 struct buffer *out = buf;
2102 /* Token name must match. */
2103 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2105 /* Nothing else to do if there is no buffer. */
2108 if (!out->command) {
2109 if (ctx->curr != QUERY)
2111 if (sizeof(*out) > size)
2113 out->command = ctx->curr;
2116 ctx->objmask = NULL;
2121 /** Parse action names. */
2123 parse_action(struct context *ctx, const struct token *token,
2124 const char *str, unsigned int len,
2125 void *buf, unsigned int size)
2127 struct buffer *out = buf;
2128 const struct arg *arg = pop_args(ctx);
2132 /* Argument is expected. */
2135 /* Parse action name. */
2136 for (i = 0; next_action[i]; ++i) {
2137 const struct parse_action_priv *priv;
2139 token = &token_list[next_action[i]];
2140 if (strcmp_partial(token->name, str, len))
2146 memcpy((uint8_t *)ctx->object + arg->offset,
2152 push_args(ctx, arg);
2156 /** Parse tokens for list command. */
2158 parse_list(struct context *ctx, const struct token *token,
2159 const char *str, unsigned int len,
2160 void *buf, unsigned int size)
2162 struct buffer *out = buf;
2164 /* Token name must match. */
2165 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2167 /* Nothing else to do if there is no buffer. */
2170 if (!out->command) {
2171 if (ctx->curr != LIST)
2173 if (sizeof(*out) > size)
2175 out->command = ctx->curr;
2178 ctx->objmask = NULL;
2179 out->args.list.group =
2180 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2184 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
2185 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
2188 ctx->object = out->args.list.group + out->args.list.group_n++;
2189 ctx->objmask = NULL;
2193 /** Parse tokens for isolate command. */
2195 parse_isolate(struct context *ctx, const struct token *token,
2196 const char *str, unsigned int len,
2197 void *buf, unsigned int size)
2199 struct buffer *out = buf;
2201 /* Token name must match. */
2202 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2204 /* Nothing else to do if there is no buffer. */
2207 if (!out->command) {
2208 if (ctx->curr != ISOLATE)
2210 if (sizeof(*out) > size)
2212 out->command = ctx->curr;
2215 ctx->objmask = NULL;
2221 * Parse signed/unsigned integers 8 to 64-bit long.
2223 * Last argument (ctx->args) is retrieved to determine integer type and
2227 parse_int(struct context *ctx, const struct token *token,
2228 const char *str, unsigned int len,
2229 void *buf, unsigned int size)
2231 const struct arg *arg = pop_args(ctx);
2236 /* Argument is expected. */
2241 (uintmax_t)strtoimax(str, &end, 0) :
2242 strtoumax(str, &end, 0);
2243 if (errno || (size_t)(end - str) != len)
2248 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
2249 !arg_entry_bf_fill(ctx->objmask, -1, arg))
2253 buf = (uint8_t *)ctx->object + arg->offset;
2257 case sizeof(uint8_t):
2258 *(uint8_t *)buf = u;
2260 case sizeof(uint16_t):
2261 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
2263 case sizeof(uint8_t [3]):
2264 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2266 ((uint8_t *)buf)[0] = u;
2267 ((uint8_t *)buf)[1] = u >> 8;
2268 ((uint8_t *)buf)[2] = u >> 16;
2272 ((uint8_t *)buf)[0] = u >> 16;
2273 ((uint8_t *)buf)[1] = u >> 8;
2274 ((uint8_t *)buf)[2] = u;
2276 case sizeof(uint32_t):
2277 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
2279 case sizeof(uint64_t):
2280 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
2285 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
2287 buf = (uint8_t *)ctx->objmask + arg->offset;
2292 push_args(ctx, arg);
2299 * Two arguments (ctx->args) are retrieved from the stack to store data and
2300 * its length (in that order).
2303 parse_string(struct context *ctx, const struct token *token,
2304 const char *str, unsigned int len,
2305 void *buf, unsigned int size)
2307 const struct arg *arg_data = pop_args(ctx);
2308 const struct arg *arg_len = pop_args(ctx);
2309 char tmp[16]; /* Ought to be enough. */
2312 /* Arguments are expected. */
2316 push_args(ctx, arg_data);
2319 size = arg_data->size;
2320 /* Bit-mask fill is not supported. */
2321 if (arg_data->mask || size < len)
2325 /* Let parse_int() fill length information first. */
2326 ret = snprintf(tmp, sizeof(tmp), "%u", len);
2329 push_args(ctx, arg_len);
2330 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
2335 buf = (uint8_t *)ctx->object + arg_data->offset;
2336 /* Output buffer is not necessarily NUL-terminated. */
2337 memcpy(buf, str, len);
2338 memset((uint8_t *)buf + len, 0x55, size - len);
2340 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
2343 push_args(ctx, arg_len);
2344 push_args(ctx, arg_data);
2349 * Parse a MAC address.
2351 * Last argument (ctx->args) is retrieved to determine storage size and
2355 parse_mac_addr(struct context *ctx, const struct token *token,
2356 const char *str, unsigned int len,
2357 void *buf, unsigned int size)
2359 const struct arg *arg = pop_args(ctx);
2360 struct ether_addr tmp;
2364 /* Argument is expected. */
2368 /* Bit-mask fill is not supported. */
2369 if (arg->mask || size != sizeof(tmp))
2371 /* Only network endian is supported. */
2374 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
2375 if (ret < 0 || (unsigned int)ret != len)
2379 buf = (uint8_t *)ctx->object + arg->offset;
2380 memcpy(buf, &tmp, size);
2382 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2385 push_args(ctx, arg);
2390 * Parse an IPv4 address.
2392 * Last argument (ctx->args) is retrieved to determine storage size and
2396 parse_ipv4_addr(struct context *ctx, const struct token *token,
2397 const char *str, unsigned int len,
2398 void *buf, unsigned int size)
2400 const struct arg *arg = pop_args(ctx);
2405 /* Argument is expected. */
2409 /* Bit-mask fill is not supported. */
2410 if (arg->mask || size != sizeof(tmp))
2412 /* Only network endian is supported. */
2415 memcpy(str2, str, len);
2417 ret = inet_pton(AF_INET, str2, &tmp);
2419 /* Attempt integer parsing. */
2420 push_args(ctx, arg);
2421 return parse_int(ctx, token, str, len, buf, size);
2425 buf = (uint8_t *)ctx->object + arg->offset;
2426 memcpy(buf, &tmp, size);
2428 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2431 push_args(ctx, arg);
2436 * Parse an IPv6 address.
2438 * Last argument (ctx->args) is retrieved to determine storage size and
2442 parse_ipv6_addr(struct context *ctx, const struct token *token,
2443 const char *str, unsigned int len,
2444 void *buf, unsigned int size)
2446 const struct arg *arg = pop_args(ctx);
2448 struct in6_addr tmp;
2452 /* Argument is expected. */
2456 /* Bit-mask fill is not supported. */
2457 if (arg->mask || size != sizeof(tmp))
2459 /* Only network endian is supported. */
2462 memcpy(str2, str, len);
2464 ret = inet_pton(AF_INET6, str2, &tmp);
2469 buf = (uint8_t *)ctx->object + arg->offset;
2470 memcpy(buf, &tmp, size);
2472 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2475 push_args(ctx, arg);
2479 /** Boolean values (even indices stand for false). */
2480 static const char *const boolean_name[] = {
2489 * Parse a boolean value.
2491 * Last argument (ctx->args) is retrieved to determine storage size and
2495 parse_boolean(struct context *ctx, const struct token *token,
2496 const char *str, unsigned int len,
2497 void *buf, unsigned int size)
2499 const struct arg *arg = pop_args(ctx);
2503 /* Argument is expected. */
2506 for (i = 0; boolean_name[i]; ++i)
2507 if (!strcmp_partial(boolean_name[i], str, len))
2509 /* Process token as integer. */
2510 if (boolean_name[i])
2511 str = i & 1 ? "1" : "0";
2512 push_args(ctx, arg);
2513 ret = parse_int(ctx, token, str, strlen(str), buf, size);
2514 return ret > 0 ? (int)len : ret;
2517 /** Parse port and update context. */
2519 parse_port(struct context *ctx, const struct token *token,
2520 const char *str, unsigned int len,
2521 void *buf, unsigned int size)
2523 struct buffer *out = &(struct buffer){ .port = 0 };
2531 ctx->objmask = NULL;
2532 size = sizeof(*out);
2534 ret = parse_int(ctx, token, str, len, out, size);
2536 ctx->port = out->port;
2542 /** No completion. */
2544 comp_none(struct context *ctx, const struct token *token,
2545 unsigned int ent, char *buf, unsigned int size)
2555 /** Complete boolean values. */
2557 comp_boolean(struct context *ctx, const struct token *token,
2558 unsigned int ent, char *buf, unsigned int size)
2564 for (i = 0; boolean_name[i]; ++i)
2565 if (buf && i == ent)
2566 return snprintf(buf, size, "%s", boolean_name[i]);
2572 /** Complete action names. */
2574 comp_action(struct context *ctx, const struct token *token,
2575 unsigned int ent, char *buf, unsigned int size)
2581 for (i = 0; next_action[i]; ++i)
2582 if (buf && i == ent)
2583 return snprintf(buf, size, "%s",
2584 token_list[next_action[i]].name);
2590 /** Complete available ports. */
2592 comp_port(struct context *ctx, const struct token *token,
2593 unsigned int ent, char *buf, unsigned int size)
2600 RTE_ETH_FOREACH_DEV(p) {
2601 if (buf && i == ent)
2602 return snprintf(buf, size, "%u", p);
2610 /** Complete available rule IDs. */
2612 comp_rule_id(struct context *ctx, const struct token *token,
2613 unsigned int ent, char *buf, unsigned int size)
2616 struct rte_port *port;
2617 struct port_flow *pf;
2620 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
2621 ctx->port == (portid_t)RTE_PORT_ALL)
2623 port = &ports[ctx->port];
2624 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
2625 if (buf && i == ent)
2626 return snprintf(buf, size, "%u", pf->id);
2634 /** Complete queue field for RSS action. */
2636 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
2637 unsigned int ent, char *buf, unsigned int size)
2639 static const char *const str[] = { "", "end", NULL };
2644 for (i = 0; str[i] != NULL; ++i)
2645 if (buf && i == ent)
2646 return snprintf(buf, size, "%s", str[i]);
2652 /** Internal context. */
2653 static struct context cmd_flow_context;
2655 /** Global parser instance (cmdline API). */
2656 cmdline_parse_inst_t cmd_flow;
2658 /** Initialize context. */
2660 cmd_flow_context_init(struct context *ctx)
2662 /* A full memset() is not necessary. */
2672 ctx->objmask = NULL;
2675 /** Parse a token (cmdline API). */
2677 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
2680 struct context *ctx = &cmd_flow_context;
2681 const struct token *token;
2682 const enum index *list;
2687 token = &token_list[ctx->curr];
2688 /* Check argument length. */
2691 for (len = 0; src[len]; ++len)
2692 if (src[len] == '#' || isspace(src[len]))
2696 /* Last argument and EOL detection. */
2697 for (i = len; src[i]; ++i)
2698 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
2700 else if (!isspace(src[i])) {
2705 if (src[i] == '\r' || src[i] == '\n') {
2709 /* Initialize context if necessary. */
2710 if (!ctx->next_num) {
2713 ctx->next[ctx->next_num++] = token->next[0];
2715 /* Process argument through candidates. */
2716 ctx->prev = ctx->curr;
2717 list = ctx->next[ctx->next_num - 1];
2718 for (i = 0; list[i]; ++i) {
2719 const struct token *next = &token_list[list[i]];
2722 ctx->curr = list[i];
2724 tmp = next->call(ctx, next, src, len, result, size);
2726 tmp = parse_default(ctx, next, src, len, result, size);
2727 if (tmp == -1 || tmp != len)
2735 /* Push subsequent tokens if any. */
2737 for (i = 0; token->next[i]; ++i) {
2738 if (ctx->next_num == RTE_DIM(ctx->next))
2740 ctx->next[ctx->next_num++] = token->next[i];
2742 /* Push arguments if any. */
2744 for (i = 0; token->args[i]; ++i) {
2745 if (ctx->args_num == RTE_DIM(ctx->args))
2747 ctx->args[ctx->args_num++] = token->args[i];
2752 /** Return number of completion entries (cmdline API). */
2754 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
2756 struct context *ctx = &cmd_flow_context;
2757 const struct token *token = &token_list[ctx->curr];
2758 const enum index *list;
2762 /* Count number of tokens in current list. */
2764 list = ctx->next[ctx->next_num - 1];
2766 list = token->next[0];
2767 for (i = 0; list[i]; ++i)
2772 * If there is a single token, use its completion callback, otherwise
2773 * return the number of entries.
2775 token = &token_list[list[0]];
2776 if (i == 1 && token->comp) {
2777 /* Save index for cmd_flow_get_help(). */
2778 ctx->prev = list[0];
2779 return token->comp(ctx, token, 0, NULL, 0);
2784 /** Return a completion entry (cmdline API). */
2786 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
2787 char *dst, unsigned int size)
2789 struct context *ctx = &cmd_flow_context;
2790 const struct token *token = &token_list[ctx->curr];
2791 const enum index *list;
2795 /* Count number of tokens in current list. */
2797 list = ctx->next[ctx->next_num - 1];
2799 list = token->next[0];
2800 for (i = 0; list[i]; ++i)
2804 /* If there is a single token, use its completion callback. */
2805 token = &token_list[list[0]];
2806 if (i == 1 && token->comp) {
2807 /* Save index for cmd_flow_get_help(). */
2808 ctx->prev = list[0];
2809 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
2811 /* Otherwise make sure the index is valid and use defaults. */
2814 token = &token_list[list[index]];
2815 snprintf(dst, size, "%s", token->name);
2816 /* Save index for cmd_flow_get_help(). */
2817 ctx->prev = list[index];
2821 /** Populate help strings for current token (cmdline API). */
2823 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
2825 struct context *ctx = &cmd_flow_context;
2826 const struct token *token = &token_list[ctx->prev];
2831 /* Set token type and update global help with details. */
2832 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
2834 cmd_flow.help_str = token->help;
2836 cmd_flow.help_str = token->name;
2840 /** Token definition template (cmdline API). */
2841 static struct cmdline_token_hdr cmd_flow_token_hdr = {
2842 .ops = &(struct cmdline_token_ops){
2843 .parse = cmd_flow_parse,
2844 .complete_get_nb = cmd_flow_complete_get_nb,
2845 .complete_get_elt = cmd_flow_complete_get_elt,
2846 .get_help = cmd_flow_get_help,
2851 /** Populate the next dynamic token. */
2853 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
2854 cmdline_parse_token_hdr_t **hdr_inst)
2856 struct context *ctx = &cmd_flow_context;
2858 /* Always reinitialize context before requesting the first token. */
2859 if (!(hdr_inst - cmd_flow.tokens))
2860 cmd_flow_context_init(ctx);
2861 /* Return NULL when no more tokens are expected. */
2862 if (!ctx->next_num && ctx->curr) {
2866 /* Determine if command should end here. */
2867 if (ctx->eol && ctx->last && ctx->next_num) {
2868 const enum index *list = ctx->next[ctx->next_num - 1];
2871 for (i = 0; list[i]; ++i) {
2878 *hdr = &cmd_flow_token_hdr;
2881 /** Dispatch parsed buffer to function calls. */
2883 cmd_flow_parsed(const struct buffer *in)
2885 switch (in->command) {
2887 port_flow_validate(in->port, &in->args.vc.attr,
2888 in->args.vc.pattern, in->args.vc.actions);
2891 port_flow_create(in->port, &in->args.vc.attr,
2892 in->args.vc.pattern, in->args.vc.actions);
2895 port_flow_destroy(in->port, in->args.destroy.rule_n,
2896 in->args.destroy.rule);
2899 port_flow_flush(in->port);
2902 port_flow_query(in->port, in->args.query.rule,
2903 in->args.query.action);
2906 port_flow_list(in->port, in->args.list.group_n,
2907 in->args.list.group);
2910 port_flow_isolate(in->port, in->args.isolate.set);
2917 /** Token generator and output processing callback (cmdline API). */
2919 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
2922 cmd_flow_tok(arg0, arg2);
2924 cmd_flow_parsed(arg0);
2927 /** Global parser instance (cmdline API). */
2928 cmdline_parse_inst_t cmd_flow = {
2930 .data = NULL, /**< Unused. */
2931 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
2934 }, /**< Tokens are returned by cmd_flow_tok(). */