4 * Copyright 2016 6WIND S.A.
5 * Copyright 2016 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <arpa/inet.h>
42 #include <sys/socket.h>
44 #include <rte_common.h>
45 #include <rte_ethdev.h>
46 #include <rte_byteorder.h>
47 #include <cmdline_parse.h>
48 #include <cmdline_parse_etheraddr.h>
53 /** Parser token indices. */
73 /* Top-level command. */
76 /* Sub-level commands. */
85 /* Destroy arguments. */
88 /* Query arguments. */
94 /* Validate/create arguments. */
100 /* Validate/create pattern. */
165 ITEM_E_TAG_GRP_ECID_B,
173 /* Validate/create actions. */
197 /** Size of pattern[] field in struct rte_flow_item_raw. */
198 #define ITEM_RAW_PATTERN_SIZE 36
200 /** Storage size for struct rte_flow_item_raw including pattern. */
201 #define ITEM_RAW_SIZE \
202 (offsetof(struct rte_flow_item_raw, pattern) + ITEM_RAW_PATTERN_SIZE)
204 /** Number of queue[] entries in struct rte_flow_action_rss. */
205 #define ACTION_RSS_NUM 32
207 /** Storage size for struct rte_flow_action_rss including queues. */
208 #define ACTION_RSS_SIZE \
209 (offsetof(struct rte_flow_action_rss, queue) + \
210 sizeof(*((struct rte_flow_action_rss *)0)->queue) * ACTION_RSS_NUM)
212 /** Maximum number of subsequent tokens and arguments on the stack. */
213 #define CTX_STACK_SIZE 16
215 /** Parser context. */
217 /** Stack of subsequent token lists to process. */
218 const enum index *next[CTX_STACK_SIZE];
219 /** Arguments for stacked tokens. */
220 const void *args[CTX_STACK_SIZE];
221 enum index curr; /**< Current token index. */
222 enum index prev; /**< Index of the last token seen. */
223 int next_num; /**< Number of entries in next[]. */
224 int args_num; /**< Number of entries in args[]. */
225 uint32_t reparse:1; /**< Start over from the beginning. */
226 uint32_t eol:1; /**< EOL has been detected. */
227 uint32_t last:1; /**< No more arguments. */
228 uint16_t port; /**< Current port ID (for completions). */
229 uint32_t objdata; /**< Object-specific data. */
230 void *object; /**< Address of current object for relative offsets. */
231 void *objmask; /**< Object a full mask must be written to. */
234 /** Token argument. */
236 uint32_t hton:1; /**< Use network byte ordering. */
237 uint32_t sign:1; /**< Value is signed. */
238 uint32_t offset; /**< Relative offset from ctx->object. */
239 uint32_t size; /**< Field size. */
240 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
243 /** Parser token definition. */
245 /** Type displayed during completion (defaults to "TOKEN"). */
247 /** Help displayed during completion (defaults to token name). */
249 /** Private data used by parser functions. */
252 * Lists of subsequent tokens to push on the stack. Each call to the
253 * parser consumes the last entry of that stack.
255 const enum index *const *next;
256 /** Arguments stack for subsequent tokens that need them. */
257 const struct arg *const *args;
259 * Token-processing callback, returns -1 in case of error, the
260 * length of the matched string otherwise. If NULL, attempts to
261 * match the token name.
263 * If buf is not NULL, the result should be stored in it according
264 * to context. An error is returned if not large enough.
266 int (*call)(struct context *ctx, const struct token *token,
267 const char *str, unsigned int len,
268 void *buf, unsigned int size);
270 * Callback that provides possible values for this token, used for
271 * completion. Returns -1 in case of error, the number of possible
272 * values otherwise. If NULL, the token name is used.
274 * If buf is not NULL, entry index ent is written to buf and the
275 * full length of the entry is returned (same behavior as
278 int (*comp)(struct context *ctx, const struct token *token,
279 unsigned int ent, char *buf, unsigned int size);
280 /** Mandatory token name, no default value. */
284 /** Static initializer for the next field. */
285 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
287 /** Static initializer for a NEXT() entry. */
288 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
290 /** Static initializer for the args field. */
291 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
293 /** Static initializer for ARGS() to target a field. */
294 #define ARGS_ENTRY(s, f) \
295 (&(const struct arg){ \
296 .offset = offsetof(s, f), \
297 .size = sizeof(((s *)0)->f), \
300 /** Static initializer for ARGS() to target a bit-field. */
301 #define ARGS_ENTRY_BF(s, f, b) \
302 (&(const struct arg){ \
304 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
307 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
308 #define ARGS_ENTRY_MASK(s, f, m) \
309 (&(const struct arg){ \
310 .offset = offsetof(s, f), \
311 .size = sizeof(((s *)0)->f), \
312 .mask = (const void *)(m), \
315 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
316 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
317 (&(const struct arg){ \
319 .offset = offsetof(s, f), \
320 .size = sizeof(((s *)0)->f), \
321 .mask = (const void *)(m), \
324 /** Static initializer for ARGS() to target a pointer. */
325 #define ARGS_ENTRY_PTR(s, f) \
326 (&(const struct arg){ \
327 .size = sizeof(*((s *)0)->f), \
330 /** Static initializer for ARGS() with arbitrary size. */
331 #define ARGS_ENTRY_USZ(s, f, sz) \
332 (&(const struct arg){ \
333 .offset = offsetof(s, f), \
337 /** Same as ARGS_ENTRY() using network byte ordering. */
338 #define ARGS_ENTRY_HTON(s, f) \
339 (&(const struct arg){ \
341 .offset = offsetof(s, f), \
342 .size = sizeof(((s *)0)->f), \
345 /** Parser output buffer layout expected by cmd_flow_parsed(). */
347 enum index command; /**< Flow command. */
348 uint16_t port; /**< Affected port ID. */
351 struct rte_flow_attr attr;
352 struct rte_flow_item *pattern;
353 struct rte_flow_action *actions;
357 } vc; /**< Validate/create arguments. */
361 } destroy; /**< Destroy arguments. */
364 enum rte_flow_action_type action;
365 } query; /**< Query arguments. */
369 } list; /**< List arguments. */
372 } isolate; /**< Isolated mode arguments. */
373 } args; /**< Command arguments. */
376 /** Private data for pattern items. */
377 struct parse_item_priv {
378 enum rte_flow_item_type type; /**< Item type. */
379 uint32_t size; /**< Size of item specification structure. */
382 #define PRIV_ITEM(t, s) \
383 (&(const struct parse_item_priv){ \
384 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
388 /** Private data for actions. */
389 struct parse_action_priv {
390 enum rte_flow_action_type type; /**< Action type. */
391 uint32_t size; /**< Size of action configuration structure. */
394 #define PRIV_ACTION(t, s) \
395 (&(const struct parse_action_priv){ \
396 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
400 static const enum index next_vc_attr[] = {
409 static const enum index next_destroy_attr[] = {
415 static const enum index next_list_attr[] = {
421 static const enum index item_param[] = {
430 static const enum index next_item[] = {
455 static const enum index item_any[] = {
461 static const enum index item_vf[] = {
467 static const enum index item_port[] = {
473 static const enum index item_raw[] = {
483 static const enum index item_eth[] = {
491 static const enum index item_vlan[] = {
501 static const enum index item_ipv4[] = {
511 static const enum index item_ipv6[] = {
522 static const enum index item_icmp[] = {
529 static const enum index item_udp[] = {
536 static const enum index item_tcp[] = {
544 static const enum index item_sctp[] = {
553 static const enum index item_vxlan[] = {
559 static const enum index item_e_tag[] = {
560 ITEM_E_TAG_GRP_ECID_B,
565 static const enum index item_nvgre[] = {
571 static const enum index item_mpls[] = {
577 static const enum index item_gre[] = {
583 static const enum index next_action[] = {
599 static const enum index action_mark[] = {
605 static const enum index action_queue[] = {
611 static const enum index action_dup[] = {
617 static const enum index action_rss[] = {
623 static const enum index action_vf[] = {
630 static int parse_init(struct context *, const struct token *,
631 const char *, unsigned int,
632 void *, unsigned int);
633 static int parse_vc(struct context *, const struct token *,
634 const char *, unsigned int,
635 void *, unsigned int);
636 static int parse_vc_spec(struct context *, const struct token *,
637 const char *, unsigned int, void *, unsigned int);
638 static int parse_vc_conf(struct context *, const struct token *,
639 const char *, unsigned int, void *, unsigned int);
640 static int parse_vc_action_rss_queue(struct context *, const struct token *,
641 const char *, unsigned int, void *,
643 static int parse_destroy(struct context *, const struct token *,
644 const char *, unsigned int,
645 void *, unsigned int);
646 static int parse_flush(struct context *, const struct token *,
647 const char *, unsigned int,
648 void *, unsigned int);
649 static int parse_query(struct context *, const struct token *,
650 const char *, unsigned int,
651 void *, unsigned int);
652 static int parse_action(struct context *, const struct token *,
653 const char *, unsigned int,
654 void *, unsigned int);
655 static int parse_list(struct context *, const struct token *,
656 const char *, unsigned int,
657 void *, unsigned int);
658 static int parse_isolate(struct context *, const struct token *,
659 const char *, unsigned int,
660 void *, unsigned int);
661 static int parse_int(struct context *, const struct token *,
662 const char *, unsigned int,
663 void *, unsigned int);
664 static int parse_prefix(struct context *, const struct token *,
665 const char *, unsigned int,
666 void *, unsigned int);
667 static int parse_boolean(struct context *, const struct token *,
668 const char *, unsigned int,
669 void *, unsigned int);
670 static int parse_string(struct context *, const struct token *,
671 const char *, unsigned int,
672 void *, unsigned int);
673 static int parse_mac_addr(struct context *, const struct token *,
674 const char *, unsigned int,
675 void *, unsigned int);
676 static int parse_ipv4_addr(struct context *, const struct token *,
677 const char *, unsigned int,
678 void *, unsigned int);
679 static int parse_ipv6_addr(struct context *, const struct token *,
680 const char *, unsigned int,
681 void *, unsigned int);
682 static int parse_port(struct context *, const struct token *,
683 const char *, unsigned int,
684 void *, unsigned int);
685 static int comp_none(struct context *, const struct token *,
686 unsigned int, char *, unsigned int);
687 static int comp_boolean(struct context *, const struct token *,
688 unsigned int, char *, unsigned int);
689 static int comp_action(struct context *, const struct token *,
690 unsigned int, char *, unsigned int);
691 static int comp_port(struct context *, const struct token *,
692 unsigned int, char *, unsigned int);
693 static int comp_rule_id(struct context *, const struct token *,
694 unsigned int, char *, unsigned int);
695 static int comp_vc_action_rss_queue(struct context *, const struct token *,
696 unsigned int, char *, unsigned int);
698 /** Token definitions. */
699 static const struct token token_list[] = {
700 /* Special tokens. */
703 .help = "null entry, abused as the entry point",
704 .next = NEXT(NEXT_ENTRY(FLOW)),
709 .help = "command may end here",
715 .help = "integer value",
720 .name = "{unsigned}",
722 .help = "unsigned integer value",
729 .help = "prefix length for bit-mask",
730 .call = parse_prefix,
736 .help = "any boolean value",
737 .call = parse_boolean,
738 .comp = comp_boolean,
743 .help = "fixed string",
744 .call = parse_string,
748 .name = "{MAC address}",
750 .help = "standard MAC address notation",
751 .call = parse_mac_addr,
755 .name = "{IPv4 address}",
756 .type = "IPV4 ADDRESS",
757 .help = "standard IPv4 address notation",
758 .call = parse_ipv4_addr,
762 .name = "{IPv6 address}",
763 .type = "IPV6 ADDRESS",
764 .help = "standard IPv6 address notation",
765 .call = parse_ipv6_addr,
771 .help = "rule identifier",
773 .comp = comp_rule_id,
778 .help = "port identifier",
783 .name = "{group_id}",
785 .help = "group identifier",
792 .help = "priority level",
796 /* Top-level command. */
799 .type = "{command} {port_id} [{arg} [...]]",
800 .help = "manage ingress/egress flow rules",
801 .next = NEXT(NEXT_ENTRY
811 /* Sub-level commands. */
814 .help = "check whether a flow rule can be created",
815 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
816 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
821 .help = "create a flow rule",
822 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
823 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
828 .help = "destroy specific flow rules",
829 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
830 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
831 .call = parse_destroy,
835 .help = "destroy all flow rules",
836 .next = NEXT(NEXT_ENTRY(PORT_ID)),
837 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
842 .help = "query an existing flow rule",
843 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
845 NEXT_ENTRY(PORT_ID)),
846 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action),
847 ARGS_ENTRY(struct buffer, args.query.rule),
848 ARGS_ENTRY(struct buffer, port)),
853 .help = "list existing flow rules",
854 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
855 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
860 .help = "restrict ingress traffic to the defined flow rules",
861 .next = NEXT(NEXT_ENTRY(BOOLEAN),
862 NEXT_ENTRY(PORT_ID)),
863 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
864 ARGS_ENTRY(struct buffer, port)),
865 .call = parse_isolate,
867 /* Destroy arguments. */
870 .help = "specify a rule identifier",
871 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
872 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
873 .call = parse_destroy,
875 /* Query arguments. */
879 .help = "action to query, must be part of the rule",
880 .call = parse_action,
883 /* List arguments. */
886 .help = "specify a group",
887 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
888 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
891 /* Validate/create attributes. */
894 .help = "specify a group",
895 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
896 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
901 .help = "specify a priority level",
902 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
903 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
908 .help = "affect rule to ingress",
909 .next = NEXT(next_vc_attr),
914 .help = "affect rule to egress",
915 .next = NEXT(next_vc_attr),
918 /* Validate/create pattern. */
921 .help = "submit a list of pattern items",
922 .next = NEXT(next_item),
927 .help = "match value perfectly (with full bit-mask)",
928 .call = parse_vc_spec,
930 [ITEM_PARAM_SPEC] = {
932 .help = "match value according to configured bit-mask",
933 .call = parse_vc_spec,
935 [ITEM_PARAM_LAST] = {
937 .help = "specify upper bound to establish a range",
938 .call = parse_vc_spec,
940 [ITEM_PARAM_MASK] = {
942 .help = "specify bit-mask with relevant bits set to one",
943 .call = parse_vc_spec,
945 [ITEM_PARAM_PREFIX] = {
947 .help = "generate bit-mask from a prefix length",
948 .call = parse_vc_spec,
952 .help = "specify next pattern item",
953 .next = NEXT(next_item),
957 .help = "end list of pattern items",
958 .priv = PRIV_ITEM(END, 0),
959 .next = NEXT(NEXT_ENTRY(ACTIONS)),
964 .help = "no-op pattern item",
965 .priv = PRIV_ITEM(VOID, 0),
966 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
971 .help = "perform actions when pattern does not match",
972 .priv = PRIV_ITEM(INVERT, 0),
973 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
978 .help = "match any protocol for the current layer",
979 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
980 .next = NEXT(item_any),
985 .help = "number of layers covered",
986 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
987 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
991 .help = "match packets addressed to the physical function",
992 .priv = PRIV_ITEM(PF, 0),
993 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
998 .help = "match packets addressed to a virtual function ID",
999 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
1000 .next = NEXT(item_vf),
1005 .help = "destination VF ID",
1006 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
1007 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
1011 .help = "device-specific physical port index to use",
1012 .priv = PRIV_ITEM(PORT, sizeof(struct rte_flow_item_port)),
1013 .next = NEXT(item_port),
1016 [ITEM_PORT_INDEX] = {
1018 .help = "physical port index",
1019 .next = NEXT(item_port, NEXT_ENTRY(UNSIGNED), item_param),
1020 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port, index)),
1024 .help = "match an arbitrary byte string",
1025 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1026 .next = NEXT(item_raw),
1029 [ITEM_RAW_RELATIVE] = {
1031 .help = "look for pattern after the previous item",
1032 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1033 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1036 [ITEM_RAW_SEARCH] = {
1038 .help = "search pattern from offset (see also limit)",
1039 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1040 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1043 [ITEM_RAW_OFFSET] = {
1045 .help = "absolute or relative offset for pattern",
1046 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1047 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1049 [ITEM_RAW_LIMIT] = {
1051 .help = "search area limit for start of pattern",
1052 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1053 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1055 [ITEM_RAW_PATTERN] = {
1057 .help = "byte string to look for",
1058 .next = NEXT(item_raw,
1060 NEXT_ENTRY(ITEM_PARAM_IS,
1063 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, length),
1064 ARGS_ENTRY_USZ(struct rte_flow_item_raw,
1066 ITEM_RAW_PATTERN_SIZE)),
1070 .help = "match Ethernet header",
1071 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1072 .next = NEXT(item_eth),
1077 .help = "destination MAC",
1078 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1079 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1083 .help = "source MAC",
1084 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1085 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1089 .help = "EtherType",
1090 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1091 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1095 .help = "match 802.1Q/ad VLAN tag",
1096 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1097 .next = NEXT(item_vlan),
1100 [ITEM_VLAN_TPID] = {
1102 .help = "tag protocol identifier",
1103 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1104 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tpid)),
1108 .help = "tag control information",
1109 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1110 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1114 .help = "priority code point",
1115 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1116 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1121 .help = "drop eligible indicator",
1122 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1123 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1128 .help = "VLAN identifier",
1129 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1130 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1135 .help = "match IPv4 header",
1136 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1137 .next = NEXT(item_ipv4),
1142 .help = "type of service",
1143 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1144 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1145 hdr.type_of_service)),
1149 .help = "time to live",
1150 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1151 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1154 [ITEM_IPV4_PROTO] = {
1156 .help = "next protocol ID",
1157 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1158 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1159 hdr.next_proto_id)),
1163 .help = "source address",
1164 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1165 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1170 .help = "destination address",
1171 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1172 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1177 .help = "match IPv6 header",
1178 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1179 .next = NEXT(item_ipv6),
1184 .help = "traffic class",
1185 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1186 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1188 "\x0f\xf0\x00\x00")),
1190 [ITEM_IPV6_FLOW] = {
1192 .help = "flow label",
1193 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1194 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1196 "\x00\x0f\xff\xff")),
1198 [ITEM_IPV6_PROTO] = {
1200 .help = "protocol (next header)",
1201 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1202 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1207 .help = "hop limit",
1208 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1209 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1214 .help = "source address",
1215 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1216 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1221 .help = "destination address",
1222 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1223 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1228 .help = "match ICMP header",
1229 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1230 .next = NEXT(item_icmp),
1233 [ITEM_ICMP_TYPE] = {
1235 .help = "ICMP packet type",
1236 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1237 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1240 [ITEM_ICMP_CODE] = {
1242 .help = "ICMP packet code",
1243 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1244 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1249 .help = "match UDP header",
1250 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1251 .next = NEXT(item_udp),
1256 .help = "UDP source port",
1257 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1258 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1263 .help = "UDP destination port",
1264 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1265 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1270 .help = "match TCP header",
1271 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1272 .next = NEXT(item_tcp),
1277 .help = "TCP source port",
1278 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1279 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1284 .help = "TCP destination port",
1285 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1286 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1289 [ITEM_TCP_FLAGS] = {
1291 .help = "TCP flags",
1292 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1293 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1298 .help = "match SCTP header",
1299 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1300 .next = NEXT(item_sctp),
1305 .help = "SCTP source port",
1306 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1307 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1312 .help = "SCTP destination port",
1313 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1314 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1319 .help = "validation tag",
1320 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1321 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1324 [ITEM_SCTP_CKSUM] = {
1327 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1328 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1333 .help = "match VXLAN header",
1334 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1335 .next = NEXT(item_vxlan),
1338 [ITEM_VXLAN_VNI] = {
1340 .help = "VXLAN identifier",
1341 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1342 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
1346 .help = "match E-Tag header",
1347 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
1348 .next = NEXT(item_e_tag),
1351 [ITEM_E_TAG_GRP_ECID_B] = {
1352 .name = "grp_ecid_b",
1353 .help = "GRP and E-CID base",
1354 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
1355 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
1361 .help = "match NVGRE header",
1362 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
1363 .next = NEXT(item_nvgre),
1366 [ITEM_NVGRE_TNI] = {
1368 .help = "virtual subnet ID",
1369 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
1370 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
1374 .help = "match MPLS header",
1375 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
1376 .next = NEXT(item_mpls),
1379 [ITEM_MPLS_LABEL] = {
1381 .help = "MPLS label",
1382 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
1383 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
1389 .help = "match GRE header",
1390 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
1391 .next = NEXT(item_gre),
1394 [ITEM_GRE_PROTO] = {
1396 .help = "GRE protocol type",
1397 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
1398 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
1401 /* Validate/create actions. */
1404 .help = "submit a list of associated actions",
1405 .next = NEXT(next_action),
1410 .help = "specify next action",
1411 .next = NEXT(next_action),
1415 .help = "end list of actions",
1416 .priv = PRIV_ACTION(END, 0),
1421 .help = "no-op action",
1422 .priv = PRIV_ACTION(VOID, 0),
1423 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1426 [ACTION_PASSTHRU] = {
1428 .help = "let subsequent rule process matched packets",
1429 .priv = PRIV_ACTION(PASSTHRU, 0),
1430 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1435 .help = "attach 32 bit value to packets",
1436 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
1437 .next = NEXT(action_mark),
1440 [ACTION_MARK_ID] = {
1442 .help = "32 bit value to return with packets",
1443 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
1444 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
1445 .call = parse_vc_conf,
1449 .help = "flag packets",
1450 .priv = PRIV_ACTION(FLAG, 0),
1451 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1456 .help = "assign packets to a given queue index",
1457 .priv = PRIV_ACTION(QUEUE,
1458 sizeof(struct rte_flow_action_queue)),
1459 .next = NEXT(action_queue),
1462 [ACTION_QUEUE_INDEX] = {
1464 .help = "queue index to use",
1465 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
1466 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
1467 .call = parse_vc_conf,
1471 .help = "drop packets (note: passthru has priority)",
1472 .priv = PRIV_ACTION(DROP, 0),
1473 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1478 .help = "enable counters for this rule",
1479 .priv = PRIV_ACTION(COUNT, 0),
1480 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1485 .help = "duplicate packets to a given queue index",
1486 .priv = PRIV_ACTION(DUP, sizeof(struct rte_flow_action_dup)),
1487 .next = NEXT(action_dup),
1490 [ACTION_DUP_INDEX] = {
1492 .help = "queue index to duplicate packets to",
1493 .next = NEXT(action_dup, NEXT_ENTRY(UNSIGNED)),
1494 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_dup, index)),
1495 .call = parse_vc_conf,
1499 .help = "spread packets among several queues",
1500 .priv = PRIV_ACTION(RSS, ACTION_RSS_SIZE),
1501 .next = NEXT(action_rss),
1504 [ACTION_RSS_QUEUES] = {
1506 .help = "queue indices to use",
1507 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
1508 .call = parse_vc_conf,
1510 [ACTION_RSS_QUEUE] = {
1512 .help = "queue index",
1513 .call = parse_vc_action_rss_queue,
1514 .comp = comp_vc_action_rss_queue,
1518 .help = "redirect packets to physical device function",
1519 .priv = PRIV_ACTION(PF, 0),
1520 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1525 .help = "redirect packets to virtual device function",
1526 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
1527 .next = NEXT(action_vf),
1530 [ACTION_VF_ORIGINAL] = {
1532 .help = "use original VF ID if possible",
1533 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
1534 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
1536 .call = parse_vc_conf,
1540 .help = "VF ID to redirect packets to",
1541 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
1542 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
1543 .call = parse_vc_conf,
1547 /** Remove and return last entry from argument stack. */
1548 static const struct arg *
1549 pop_args(struct context *ctx)
1551 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
1554 /** Add entry on top of the argument stack. */
1556 push_args(struct context *ctx, const struct arg *arg)
1558 if (ctx->args_num == CTX_STACK_SIZE)
1560 ctx->args[ctx->args_num++] = arg;
1564 /** Spread value into buffer according to bit-mask. */
1566 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
1568 uint32_t i = arg->size;
1576 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1585 unsigned int shift = 0;
1586 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
1588 for (shift = 0; arg->mask[i] >> shift; ++shift) {
1589 if (!(arg->mask[i] & (1 << shift)))
1594 *buf &= ~(1 << shift);
1595 *buf |= (val & 1) << shift;
1604 * Parse a prefix length and generate a bit-mask.
1606 * Last argument (ctx->args) is retrieved to determine mask size, storage
1607 * location and whether the result must use network byte ordering.
1610 parse_prefix(struct context *ctx, const struct token *token,
1611 const char *str, unsigned int len,
1612 void *buf, unsigned int size)
1614 const struct arg *arg = pop_args(ctx);
1615 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
1622 /* Argument is expected. */
1626 u = strtoumax(str, &end, 0);
1627 if (errno || (size_t)(end - str) != len)
1632 extra = arg_entry_bf_fill(NULL, 0, arg);
1641 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
1642 !arg_entry_bf_fill(ctx->objmask, -1, arg))
1649 if (bytes > size || bytes + !!extra > size)
1653 buf = (uint8_t *)ctx->object + arg->offset;
1654 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1656 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
1657 memset(buf, 0x00, size - bytes);
1659 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
1663 memset(buf, 0xff, bytes);
1664 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
1666 ((uint8_t *)buf)[bytes] = conv[extra];
1669 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
1672 push_args(ctx, arg);
1676 /** Default parsing function for token name matching. */
1678 parse_default(struct context *ctx, const struct token *token,
1679 const char *str, unsigned int len,
1680 void *buf, unsigned int size)
1685 if (strncmp(str, token->name, len))
1690 /** Parse flow command, initialize output buffer for subsequent tokens. */
1692 parse_init(struct context *ctx, const struct token *token,
1693 const char *str, unsigned int len,
1694 void *buf, unsigned int size)
1696 struct buffer *out = buf;
1698 /* Token name must match. */
1699 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1701 /* Nothing else to do if there is no buffer. */
1704 /* Make sure buffer is large enough. */
1705 if (size < sizeof(*out))
1707 /* Initialize buffer. */
1708 memset(out, 0x00, sizeof(*out));
1709 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
1712 ctx->objmask = NULL;
1716 /** Parse tokens for validate/create commands. */
1718 parse_vc(struct context *ctx, const struct token *token,
1719 const char *str, unsigned int len,
1720 void *buf, unsigned int size)
1722 struct buffer *out = buf;
1726 /* Token name must match. */
1727 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1729 /* Nothing else to do if there is no buffer. */
1732 if (!out->command) {
1733 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
1735 if (sizeof(*out) > size)
1737 out->command = ctx->curr;
1740 ctx->objmask = NULL;
1741 out->args.vc.data = (uint8_t *)out + size;
1745 ctx->object = &out->args.vc.attr;
1746 ctx->objmask = NULL;
1747 switch (ctx->curr) {
1752 out->args.vc.attr.ingress = 1;
1755 out->args.vc.attr.egress = 1;
1758 out->args.vc.pattern =
1759 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1761 ctx->object = out->args.vc.pattern;
1762 ctx->objmask = NULL;
1765 out->args.vc.actions =
1766 (void *)RTE_ALIGN_CEIL((uintptr_t)
1767 (out->args.vc.pattern +
1768 out->args.vc.pattern_n),
1770 ctx->object = out->args.vc.actions;
1771 ctx->objmask = NULL;
1778 if (!out->args.vc.actions) {
1779 const struct parse_item_priv *priv = token->priv;
1780 struct rte_flow_item *item =
1781 out->args.vc.pattern + out->args.vc.pattern_n;
1783 data_size = priv->size * 3; /* spec, last, mask */
1784 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1785 (out->args.vc.data - data_size),
1787 if ((uint8_t *)item + sizeof(*item) > data)
1789 *item = (struct rte_flow_item){
1792 ++out->args.vc.pattern_n;
1794 ctx->objmask = NULL;
1796 const struct parse_action_priv *priv = token->priv;
1797 struct rte_flow_action *action =
1798 out->args.vc.actions + out->args.vc.actions_n;
1800 data_size = priv->size; /* configuration */
1801 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1802 (out->args.vc.data - data_size),
1804 if ((uint8_t *)action + sizeof(*action) > data)
1806 *action = (struct rte_flow_action){
1809 ++out->args.vc.actions_n;
1810 ctx->object = action;
1811 ctx->objmask = NULL;
1813 memset(data, 0, data_size);
1814 out->args.vc.data = data;
1815 ctx->objdata = data_size;
1819 /** Parse pattern item parameter type. */
1821 parse_vc_spec(struct context *ctx, const struct token *token,
1822 const char *str, unsigned int len,
1823 void *buf, unsigned int size)
1825 struct buffer *out = buf;
1826 struct rte_flow_item *item;
1832 /* Token name must match. */
1833 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1835 /* Parse parameter types. */
1836 switch (ctx->curr) {
1837 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
1843 case ITEM_PARAM_SPEC:
1846 case ITEM_PARAM_LAST:
1849 case ITEM_PARAM_PREFIX:
1850 /* Modify next token to expect a prefix. */
1851 if (ctx->next_num < 2)
1853 ctx->next[ctx->next_num - 2] = prefix;
1855 case ITEM_PARAM_MASK:
1861 /* Nothing else to do if there is no buffer. */
1864 if (!out->args.vc.pattern_n)
1866 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
1867 data_size = ctx->objdata / 3; /* spec, last, mask */
1868 /* Point to selected object. */
1869 ctx->object = out->args.vc.data + (data_size * index);
1871 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
1872 item->mask = ctx->objmask;
1874 ctx->objmask = NULL;
1875 /* Update relevant item pointer. */
1876 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
1881 /** Parse action configuration field. */
1883 parse_vc_conf(struct context *ctx, const struct token *token,
1884 const char *str, unsigned int len,
1885 void *buf, unsigned int size)
1887 struct buffer *out = buf;
1888 struct rte_flow_action *action;
1891 /* Token name must match. */
1892 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1894 /* Nothing else to do if there is no buffer. */
1897 if (!out->args.vc.actions_n)
1899 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
1900 /* Point to selected object. */
1901 ctx->object = out->args.vc.data;
1902 ctx->objmask = NULL;
1903 /* Update configuration pointer. */
1904 action->conf = ctx->object;
1909 * Parse queue field for RSS action.
1911 * Valid tokens are queue indices and the "end" token.
1914 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
1915 const char *str, unsigned int len,
1916 void *buf, unsigned int size)
1918 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
1925 if (ctx->curr != ACTION_RSS_QUEUE)
1927 i = ctx->objdata >> 16;
1928 if (!strncmp(str, "end", len)) {
1929 ctx->objdata &= 0xffff;
1932 if (i >= ACTION_RSS_NUM)
1934 if (push_args(ctx, ARGS_ENTRY(struct rte_flow_action_rss, queue[i])))
1936 ret = parse_int(ctx, token, str, len, NULL, 0);
1942 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
1944 if (ctx->next_num == RTE_DIM(ctx->next))
1946 ctx->next[ctx->next_num++] = next;
1949 ((struct rte_flow_action_rss *)ctx->object)->num = i;
1953 /** Parse tokens for destroy command. */
1955 parse_destroy(struct context *ctx, const struct token *token,
1956 const char *str, unsigned int len,
1957 void *buf, unsigned int size)
1959 struct buffer *out = buf;
1961 /* Token name must match. */
1962 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1964 /* Nothing else to do if there is no buffer. */
1967 if (!out->command) {
1968 if (ctx->curr != DESTROY)
1970 if (sizeof(*out) > size)
1972 out->command = ctx->curr;
1975 ctx->objmask = NULL;
1976 out->args.destroy.rule =
1977 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1981 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
1982 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
1985 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
1986 ctx->objmask = NULL;
1990 /** Parse tokens for flush command. */
1992 parse_flush(struct context *ctx, const struct token *token,
1993 const char *str, unsigned int len,
1994 void *buf, unsigned int size)
1996 struct buffer *out = buf;
1998 /* Token name must match. */
1999 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2001 /* Nothing else to do if there is no buffer. */
2004 if (!out->command) {
2005 if (ctx->curr != FLUSH)
2007 if (sizeof(*out) > size)
2009 out->command = ctx->curr;
2012 ctx->objmask = NULL;
2017 /** Parse tokens for query command. */
2019 parse_query(struct context *ctx, const struct token *token,
2020 const char *str, unsigned int len,
2021 void *buf, unsigned int size)
2023 struct buffer *out = buf;
2025 /* Token name must match. */
2026 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2028 /* Nothing else to do if there is no buffer. */
2031 if (!out->command) {
2032 if (ctx->curr != QUERY)
2034 if (sizeof(*out) > size)
2036 out->command = ctx->curr;
2039 ctx->objmask = NULL;
2044 /** Parse action names. */
2046 parse_action(struct context *ctx, const struct token *token,
2047 const char *str, unsigned int len,
2048 void *buf, unsigned int size)
2050 struct buffer *out = buf;
2051 const struct arg *arg = pop_args(ctx);
2055 /* Argument is expected. */
2058 /* Parse action name. */
2059 for (i = 0; next_action[i]; ++i) {
2060 const struct parse_action_priv *priv;
2062 token = &token_list[next_action[i]];
2063 if (strncmp(token->name, str, len))
2069 memcpy((uint8_t *)ctx->object + arg->offset,
2075 push_args(ctx, arg);
2079 /** Parse tokens for list command. */
2081 parse_list(struct context *ctx, const struct token *token,
2082 const char *str, unsigned int len,
2083 void *buf, unsigned int size)
2085 struct buffer *out = buf;
2087 /* Token name must match. */
2088 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2090 /* Nothing else to do if there is no buffer. */
2093 if (!out->command) {
2094 if (ctx->curr != LIST)
2096 if (sizeof(*out) > size)
2098 out->command = ctx->curr;
2101 ctx->objmask = NULL;
2102 out->args.list.group =
2103 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2107 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
2108 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
2111 ctx->object = out->args.list.group + out->args.list.group_n++;
2112 ctx->objmask = NULL;
2116 /** Parse tokens for isolate command. */
2118 parse_isolate(struct context *ctx, const struct token *token,
2119 const char *str, unsigned int len,
2120 void *buf, unsigned int size)
2122 struct buffer *out = buf;
2124 /* Token name must match. */
2125 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2127 /* Nothing else to do if there is no buffer. */
2130 if (!out->command) {
2131 if (ctx->curr != ISOLATE)
2133 if (sizeof(*out) > size)
2135 out->command = ctx->curr;
2138 ctx->objmask = NULL;
2144 * Parse signed/unsigned integers 8 to 64-bit long.
2146 * Last argument (ctx->args) is retrieved to determine integer type and
2150 parse_int(struct context *ctx, const struct token *token,
2151 const char *str, unsigned int len,
2152 void *buf, unsigned int size)
2154 const struct arg *arg = pop_args(ctx);
2159 /* Argument is expected. */
2164 (uintmax_t)strtoimax(str, &end, 0) :
2165 strtoumax(str, &end, 0);
2166 if (errno || (size_t)(end - str) != len)
2171 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
2172 !arg_entry_bf_fill(ctx->objmask, -1, arg))
2176 buf = (uint8_t *)ctx->object + arg->offset;
2180 case sizeof(uint8_t):
2181 *(uint8_t *)buf = u;
2183 case sizeof(uint16_t):
2184 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
2186 case sizeof(uint8_t [3]):
2187 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2189 ((uint8_t *)buf)[0] = u;
2190 ((uint8_t *)buf)[1] = u >> 8;
2191 ((uint8_t *)buf)[2] = u >> 16;
2195 ((uint8_t *)buf)[0] = u >> 16;
2196 ((uint8_t *)buf)[1] = u >> 8;
2197 ((uint8_t *)buf)[2] = u;
2199 case sizeof(uint32_t):
2200 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
2202 case sizeof(uint64_t):
2203 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
2208 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
2210 buf = (uint8_t *)ctx->objmask + arg->offset;
2215 push_args(ctx, arg);
2222 * Two arguments (ctx->args) are retrieved from the stack to store data and
2223 * its length (in that order).
2226 parse_string(struct context *ctx, const struct token *token,
2227 const char *str, unsigned int len,
2228 void *buf, unsigned int size)
2230 const struct arg *arg_data = pop_args(ctx);
2231 const struct arg *arg_len = pop_args(ctx);
2232 char tmp[16]; /* Ought to be enough. */
2235 /* Arguments are expected. */
2239 push_args(ctx, arg_data);
2242 size = arg_data->size;
2243 /* Bit-mask fill is not supported. */
2244 if (arg_data->mask || size < len)
2248 /* Let parse_int() fill length information first. */
2249 ret = snprintf(tmp, sizeof(tmp), "%u", len);
2252 push_args(ctx, arg_len);
2253 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
2258 buf = (uint8_t *)ctx->object + arg_data->offset;
2259 /* Output buffer is not necessarily NUL-terminated. */
2260 memcpy(buf, str, len);
2261 memset((uint8_t *)buf + len, 0x55, size - len);
2263 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
2266 push_args(ctx, arg_len);
2267 push_args(ctx, arg_data);
2272 * Parse a MAC address.
2274 * Last argument (ctx->args) is retrieved to determine storage size and
2278 parse_mac_addr(struct context *ctx, const struct token *token,
2279 const char *str, unsigned int len,
2280 void *buf, unsigned int size)
2282 const struct arg *arg = pop_args(ctx);
2283 struct ether_addr tmp;
2287 /* Argument is expected. */
2291 /* Bit-mask fill is not supported. */
2292 if (arg->mask || size != sizeof(tmp))
2294 /* Only network endian is supported. */
2297 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
2298 if (ret < 0 || (unsigned int)ret != len)
2302 buf = (uint8_t *)ctx->object + arg->offset;
2303 memcpy(buf, &tmp, size);
2305 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2308 push_args(ctx, arg);
2313 * Parse an IPv4 address.
2315 * Last argument (ctx->args) is retrieved to determine storage size and
2319 parse_ipv4_addr(struct context *ctx, const struct token *token,
2320 const char *str, unsigned int len,
2321 void *buf, unsigned int size)
2323 const struct arg *arg = pop_args(ctx);
2328 /* Argument is expected. */
2332 /* Bit-mask fill is not supported. */
2333 if (arg->mask || size != sizeof(tmp))
2335 /* Only network endian is supported. */
2338 memcpy(str2, str, len);
2340 ret = inet_pton(AF_INET, str2, &tmp);
2342 /* Attempt integer parsing. */
2343 push_args(ctx, arg);
2344 return parse_int(ctx, token, str, len, buf, size);
2348 buf = (uint8_t *)ctx->object + arg->offset;
2349 memcpy(buf, &tmp, size);
2351 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2354 push_args(ctx, arg);
2359 * Parse an IPv6 address.
2361 * Last argument (ctx->args) is retrieved to determine storage size and
2365 parse_ipv6_addr(struct context *ctx, const struct token *token,
2366 const char *str, unsigned int len,
2367 void *buf, unsigned int size)
2369 const struct arg *arg = pop_args(ctx);
2371 struct in6_addr tmp;
2375 /* Argument is expected. */
2379 /* Bit-mask fill is not supported. */
2380 if (arg->mask || size != sizeof(tmp))
2382 /* Only network endian is supported. */
2385 memcpy(str2, str, len);
2387 ret = inet_pton(AF_INET6, str2, &tmp);
2392 buf = (uint8_t *)ctx->object + arg->offset;
2393 memcpy(buf, &tmp, size);
2395 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2398 push_args(ctx, arg);
2402 /** Boolean values (even indices stand for false). */
2403 static const char *const boolean_name[] = {
2412 * Parse a boolean value.
2414 * Last argument (ctx->args) is retrieved to determine storage size and
2418 parse_boolean(struct context *ctx, const struct token *token,
2419 const char *str, unsigned int len,
2420 void *buf, unsigned int size)
2422 const struct arg *arg = pop_args(ctx);
2426 /* Argument is expected. */
2429 for (i = 0; boolean_name[i]; ++i)
2430 if (!strncmp(str, boolean_name[i], len))
2432 /* Process token as integer. */
2433 if (boolean_name[i])
2434 str = i & 1 ? "1" : "0";
2435 push_args(ctx, arg);
2436 ret = parse_int(ctx, token, str, strlen(str), buf, size);
2437 return ret > 0 ? (int)len : ret;
2440 /** Parse port and update context. */
2442 parse_port(struct context *ctx, const struct token *token,
2443 const char *str, unsigned int len,
2444 void *buf, unsigned int size)
2446 struct buffer *out = &(struct buffer){ .port = 0 };
2454 ctx->objmask = NULL;
2455 size = sizeof(*out);
2457 ret = parse_int(ctx, token, str, len, out, size);
2459 ctx->port = out->port;
2465 /** No completion. */
2467 comp_none(struct context *ctx, const struct token *token,
2468 unsigned int ent, char *buf, unsigned int size)
2478 /** Complete boolean values. */
2480 comp_boolean(struct context *ctx, const struct token *token,
2481 unsigned int ent, char *buf, unsigned int size)
2487 for (i = 0; boolean_name[i]; ++i)
2488 if (buf && i == ent)
2489 return snprintf(buf, size, "%s", boolean_name[i]);
2495 /** Complete action names. */
2497 comp_action(struct context *ctx, const struct token *token,
2498 unsigned int ent, char *buf, unsigned int size)
2504 for (i = 0; next_action[i]; ++i)
2505 if (buf && i == ent)
2506 return snprintf(buf, size, "%s",
2507 token_list[next_action[i]].name);
2513 /** Complete available ports. */
2515 comp_port(struct context *ctx, const struct token *token,
2516 unsigned int ent, char *buf, unsigned int size)
2523 RTE_ETH_FOREACH_DEV(p) {
2524 if (buf && i == ent)
2525 return snprintf(buf, size, "%u", p);
2533 /** Complete available rule IDs. */
2535 comp_rule_id(struct context *ctx, const struct token *token,
2536 unsigned int ent, char *buf, unsigned int size)
2539 struct rte_port *port;
2540 struct port_flow *pf;
2543 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
2544 ctx->port == (uint16_t)RTE_PORT_ALL)
2546 port = &ports[ctx->port];
2547 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
2548 if (buf && i == ent)
2549 return snprintf(buf, size, "%u", pf->id);
2557 /** Complete queue field for RSS action. */
2559 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
2560 unsigned int ent, char *buf, unsigned int size)
2562 static const char *const str[] = { "", "end", NULL };
2567 for (i = 0; str[i] != NULL; ++i)
2568 if (buf && i == ent)
2569 return snprintf(buf, size, "%s", str[i]);
2575 /** Internal context. */
2576 static struct context cmd_flow_context;
2578 /** Global parser instance (cmdline API). */
2579 cmdline_parse_inst_t cmd_flow;
2581 /** Initialize context. */
2583 cmd_flow_context_init(struct context *ctx)
2585 /* A full memset() is not necessary. */
2596 ctx->objmask = NULL;
2599 /** Parse a token (cmdline API). */
2601 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
2604 struct context *ctx = &cmd_flow_context;
2605 const struct token *token;
2606 const enum index *list;
2611 /* Restart as requested. */
2613 cmd_flow_context_init(ctx);
2614 token = &token_list[ctx->curr];
2615 /* Check argument length. */
2618 for (len = 0; src[len]; ++len)
2619 if (src[len] == '#' || isspace(src[len]))
2623 /* Last argument and EOL detection. */
2624 for (i = len; src[i]; ++i)
2625 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
2627 else if (!isspace(src[i])) {
2632 if (src[i] == '\r' || src[i] == '\n') {
2636 /* Initialize context if necessary. */
2637 if (!ctx->next_num) {
2640 ctx->next[ctx->next_num++] = token->next[0];
2642 /* Process argument through candidates. */
2643 ctx->prev = ctx->curr;
2644 list = ctx->next[ctx->next_num - 1];
2645 for (i = 0; list[i]; ++i) {
2646 const struct token *next = &token_list[list[i]];
2649 ctx->curr = list[i];
2651 tmp = next->call(ctx, next, src, len, result, size);
2653 tmp = parse_default(ctx, next, src, len, result, size);
2654 if (tmp == -1 || tmp != len)
2662 /* Push subsequent tokens if any. */
2664 for (i = 0; token->next[i]; ++i) {
2665 if (ctx->next_num == RTE_DIM(ctx->next))
2667 ctx->next[ctx->next_num++] = token->next[i];
2669 /* Push arguments if any. */
2671 for (i = 0; token->args[i]; ++i) {
2672 if (ctx->args_num == RTE_DIM(ctx->args))
2674 ctx->args[ctx->args_num++] = token->args[i];
2679 /** Return number of completion entries (cmdline API). */
2681 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
2683 struct context *ctx = &cmd_flow_context;
2684 const struct token *token = &token_list[ctx->curr];
2685 const enum index *list;
2689 /* Tell cmd_flow_parse() that context must be reinitialized. */
2691 /* Count number of tokens in current list. */
2693 list = ctx->next[ctx->next_num - 1];
2695 list = token->next[0];
2696 for (i = 0; list[i]; ++i)
2701 * If there is a single token, use its completion callback, otherwise
2702 * return the number of entries.
2704 token = &token_list[list[0]];
2705 if (i == 1 && token->comp) {
2706 /* Save index for cmd_flow_get_help(). */
2707 ctx->prev = list[0];
2708 return token->comp(ctx, token, 0, NULL, 0);
2713 /** Return a completion entry (cmdline API). */
2715 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
2716 char *dst, unsigned int size)
2718 struct context *ctx = &cmd_flow_context;
2719 const struct token *token = &token_list[ctx->curr];
2720 const enum index *list;
2724 /* Tell cmd_flow_parse() that context must be reinitialized. */
2726 /* Count number of tokens in current list. */
2728 list = ctx->next[ctx->next_num - 1];
2730 list = token->next[0];
2731 for (i = 0; list[i]; ++i)
2735 /* If there is a single token, use its completion callback. */
2736 token = &token_list[list[0]];
2737 if (i == 1 && token->comp) {
2738 /* Save index for cmd_flow_get_help(). */
2739 ctx->prev = list[0];
2740 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
2742 /* Otherwise make sure the index is valid and use defaults. */
2745 token = &token_list[list[index]];
2746 snprintf(dst, size, "%s", token->name);
2747 /* Save index for cmd_flow_get_help(). */
2748 ctx->prev = list[index];
2752 /** Populate help strings for current token (cmdline API). */
2754 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
2756 struct context *ctx = &cmd_flow_context;
2757 const struct token *token = &token_list[ctx->prev];
2760 /* Tell cmd_flow_parse() that context must be reinitialized. */
2764 /* Set token type and update global help with details. */
2765 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
2767 cmd_flow.help_str = token->help;
2769 cmd_flow.help_str = token->name;
2773 /** Token definition template (cmdline API). */
2774 static struct cmdline_token_hdr cmd_flow_token_hdr = {
2775 .ops = &(struct cmdline_token_ops){
2776 .parse = cmd_flow_parse,
2777 .complete_get_nb = cmd_flow_complete_get_nb,
2778 .complete_get_elt = cmd_flow_complete_get_elt,
2779 .get_help = cmd_flow_get_help,
2784 /** Populate the next dynamic token. */
2786 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
2787 cmdline_parse_token_hdr_t *(*hdrs)[])
2789 struct context *ctx = &cmd_flow_context;
2791 /* Always reinitialize context before requesting the first token. */
2793 cmd_flow_context_init(ctx);
2794 /* Return NULL when no more tokens are expected. */
2795 if (!ctx->next_num && ctx->curr) {
2799 /* Determine if command should end here. */
2800 if (ctx->eol && ctx->last && ctx->next_num) {
2801 const enum index *list = ctx->next[ctx->next_num - 1];
2804 for (i = 0; list[i]; ++i) {
2811 *hdr = &cmd_flow_token_hdr;
2814 /** Dispatch parsed buffer to function calls. */
2816 cmd_flow_parsed(const struct buffer *in)
2818 switch (in->command) {
2820 port_flow_validate(in->port, &in->args.vc.attr,
2821 in->args.vc.pattern, in->args.vc.actions);
2824 port_flow_create(in->port, &in->args.vc.attr,
2825 in->args.vc.pattern, in->args.vc.actions);
2828 port_flow_destroy(in->port, in->args.destroy.rule_n,
2829 in->args.destroy.rule);
2832 port_flow_flush(in->port);
2835 port_flow_query(in->port, in->args.query.rule,
2836 in->args.query.action);
2839 port_flow_list(in->port, in->args.list.group_n,
2840 in->args.list.group);
2843 port_flow_isolate(in->port, in->args.isolate.set);
2850 /** Token generator and output processing callback (cmdline API). */
2852 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
2855 cmd_flow_tok(arg0, arg2);
2857 cmd_flow_parsed(arg0);
2860 /** Global parser instance (cmdline API). */
2861 cmdline_parse_inst_t cmd_flow = {
2863 .data = NULL, /**< Unused. */
2864 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
2867 }, /**< Tokens are returned by cmd_flow_tok(). */