4 * Copyright 2016 6WIND S.A.
5 * Copyright 2016 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <arpa/inet.h>
42 #include <sys/socket.h>
44 #include <rte_common.h>
45 #include <rte_ethdev.h>
46 #include <rte_byteorder.h>
47 #include <cmdline_parse.h>
48 #include <cmdline_parse_etheraddr.h>
53 /** Parser token indices. */
73 /* Top-level command. */
76 /* Sub-level commands. */
85 /* Destroy arguments. */
88 /* Query arguments. */
94 /* Validate/create arguments. */
100 /* Validate/create pattern. */
165 ITEM_E_TAG_GRP_ECID_B,
175 /* Validate/create actions. */
199 /** Size of pattern[] field in struct rte_flow_item_raw. */
200 #define ITEM_RAW_PATTERN_SIZE 36
202 /** Storage size for struct rte_flow_item_raw including pattern. */
203 #define ITEM_RAW_SIZE \
204 (offsetof(struct rte_flow_item_raw, pattern) + ITEM_RAW_PATTERN_SIZE)
206 /** Number of queue[] entries in struct rte_flow_action_rss. */
207 #define ACTION_RSS_NUM 32
209 /** Storage size for struct rte_flow_action_rss including queues. */
210 #define ACTION_RSS_SIZE \
211 (offsetof(struct rte_flow_action_rss, queue) + \
212 sizeof(*((struct rte_flow_action_rss *)0)->queue) * ACTION_RSS_NUM)
214 /** Maximum number of subsequent tokens and arguments on the stack. */
215 #define CTX_STACK_SIZE 16
217 /** Parser context. */
219 /** Stack of subsequent token lists to process. */
220 const enum index *next[CTX_STACK_SIZE];
221 /** Arguments for stacked tokens. */
222 const void *args[CTX_STACK_SIZE];
223 enum index curr; /**< Current token index. */
224 enum index prev; /**< Index of the last token seen. */
225 int next_num; /**< Number of entries in next[]. */
226 int args_num; /**< Number of entries in args[]. */
227 uint32_t reparse:1; /**< Start over from the beginning. */
228 uint32_t eol:1; /**< EOL has been detected. */
229 uint32_t last:1; /**< No more arguments. */
230 uint16_t port; /**< Current port ID (for completions). */
231 uint32_t objdata; /**< Object-specific data. */
232 void *object; /**< Address of current object for relative offsets. */
233 void *objmask; /**< Object a full mask must be written to. */
236 /** Token argument. */
238 uint32_t hton:1; /**< Use network byte ordering. */
239 uint32_t sign:1; /**< Value is signed. */
240 uint32_t offset; /**< Relative offset from ctx->object. */
241 uint32_t size; /**< Field size. */
242 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
245 /** Parser token definition. */
247 /** Type displayed during completion (defaults to "TOKEN"). */
249 /** Help displayed during completion (defaults to token name). */
251 /** Private data used by parser functions. */
254 * Lists of subsequent tokens to push on the stack. Each call to the
255 * parser consumes the last entry of that stack.
257 const enum index *const *next;
258 /** Arguments stack for subsequent tokens that need them. */
259 const struct arg *const *args;
261 * Token-processing callback, returns -1 in case of error, the
262 * length of the matched string otherwise. If NULL, attempts to
263 * match the token name.
265 * If buf is not NULL, the result should be stored in it according
266 * to context. An error is returned if not large enough.
268 int (*call)(struct context *ctx, const struct token *token,
269 const char *str, unsigned int len,
270 void *buf, unsigned int size);
272 * Callback that provides possible values for this token, used for
273 * completion. Returns -1 in case of error, the number of possible
274 * values otherwise. If NULL, the token name is used.
276 * If buf is not NULL, entry index ent is written to buf and the
277 * full length of the entry is returned (same behavior as
280 int (*comp)(struct context *ctx, const struct token *token,
281 unsigned int ent, char *buf, unsigned int size);
282 /** Mandatory token name, no default value. */
286 /** Static initializer for the next field. */
287 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
289 /** Static initializer for a NEXT() entry. */
290 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
292 /** Static initializer for the args field. */
293 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
295 /** Static initializer for ARGS() to target a field. */
296 #define ARGS_ENTRY(s, f) \
297 (&(const struct arg){ \
298 .offset = offsetof(s, f), \
299 .size = sizeof(((s *)0)->f), \
302 /** Static initializer for ARGS() to target a bit-field. */
303 #define ARGS_ENTRY_BF(s, f, b) \
304 (&(const struct arg){ \
306 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
309 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
310 #define ARGS_ENTRY_MASK(s, f, m) \
311 (&(const struct arg){ \
312 .offset = offsetof(s, f), \
313 .size = sizeof(((s *)0)->f), \
314 .mask = (const void *)(m), \
317 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
318 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
319 (&(const struct arg){ \
321 .offset = offsetof(s, f), \
322 .size = sizeof(((s *)0)->f), \
323 .mask = (const void *)(m), \
326 /** Static initializer for ARGS() to target a pointer. */
327 #define ARGS_ENTRY_PTR(s, f) \
328 (&(const struct arg){ \
329 .size = sizeof(*((s *)0)->f), \
332 /** Static initializer for ARGS() with arbitrary size. */
333 #define ARGS_ENTRY_USZ(s, f, sz) \
334 (&(const struct arg){ \
335 .offset = offsetof(s, f), \
339 /** Same as ARGS_ENTRY() using network byte ordering. */
340 #define ARGS_ENTRY_HTON(s, f) \
341 (&(const struct arg){ \
343 .offset = offsetof(s, f), \
344 .size = sizeof(((s *)0)->f), \
347 /** Parser output buffer layout expected by cmd_flow_parsed(). */
349 enum index command; /**< Flow command. */
350 uint16_t port; /**< Affected port ID. */
353 struct rte_flow_attr attr;
354 struct rte_flow_item *pattern;
355 struct rte_flow_action *actions;
359 } vc; /**< Validate/create arguments. */
363 } destroy; /**< Destroy arguments. */
366 enum rte_flow_action_type action;
367 } query; /**< Query arguments. */
371 } list; /**< List arguments. */
374 } isolate; /**< Isolated mode arguments. */
375 } args; /**< Command arguments. */
378 /** Private data for pattern items. */
379 struct parse_item_priv {
380 enum rte_flow_item_type type; /**< Item type. */
381 uint32_t size; /**< Size of item specification structure. */
384 #define PRIV_ITEM(t, s) \
385 (&(const struct parse_item_priv){ \
386 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
390 /** Private data for actions. */
391 struct parse_action_priv {
392 enum rte_flow_action_type type; /**< Action type. */
393 uint32_t size; /**< Size of action configuration structure. */
396 #define PRIV_ACTION(t, s) \
397 (&(const struct parse_action_priv){ \
398 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
402 static const enum index next_vc_attr[] = {
411 static const enum index next_destroy_attr[] = {
417 static const enum index next_list_attr[] = {
423 static const enum index item_param[] = {
432 static const enum index next_item[] = {
458 static const enum index item_fuzzy[] = {
464 static const enum index item_any[] = {
470 static const enum index item_vf[] = {
476 static const enum index item_port[] = {
482 static const enum index item_raw[] = {
492 static const enum index item_eth[] = {
500 static const enum index item_vlan[] = {
510 static const enum index item_ipv4[] = {
520 static const enum index item_ipv6[] = {
531 static const enum index item_icmp[] = {
538 static const enum index item_udp[] = {
545 static const enum index item_tcp[] = {
553 static const enum index item_sctp[] = {
562 static const enum index item_vxlan[] = {
568 static const enum index item_e_tag[] = {
569 ITEM_E_TAG_GRP_ECID_B,
574 static const enum index item_nvgre[] = {
580 static const enum index item_mpls[] = {
586 static const enum index item_gre[] = {
592 static const enum index next_action[] = {
608 static const enum index action_mark[] = {
614 static const enum index action_queue[] = {
620 static const enum index action_dup[] = {
626 static const enum index action_rss[] = {
632 static const enum index action_vf[] = {
639 static int parse_init(struct context *, const struct token *,
640 const char *, unsigned int,
641 void *, unsigned int);
642 static int parse_vc(struct context *, const struct token *,
643 const char *, unsigned int,
644 void *, unsigned int);
645 static int parse_vc_spec(struct context *, const struct token *,
646 const char *, unsigned int, void *, unsigned int);
647 static int parse_vc_conf(struct context *, const struct token *,
648 const char *, unsigned int, void *, unsigned int);
649 static int parse_vc_action_rss_queue(struct context *, const struct token *,
650 const char *, unsigned int, void *,
652 static int parse_destroy(struct context *, const struct token *,
653 const char *, unsigned int,
654 void *, unsigned int);
655 static int parse_flush(struct context *, const struct token *,
656 const char *, unsigned int,
657 void *, unsigned int);
658 static int parse_query(struct context *, const struct token *,
659 const char *, unsigned int,
660 void *, unsigned int);
661 static int parse_action(struct context *, const struct token *,
662 const char *, unsigned int,
663 void *, unsigned int);
664 static int parse_list(struct context *, const struct token *,
665 const char *, unsigned int,
666 void *, unsigned int);
667 static int parse_isolate(struct context *, const struct token *,
668 const char *, unsigned int,
669 void *, unsigned int);
670 static int parse_int(struct context *, const struct token *,
671 const char *, unsigned int,
672 void *, unsigned int);
673 static int parse_prefix(struct context *, const struct token *,
674 const char *, unsigned int,
675 void *, unsigned int);
676 static int parse_boolean(struct context *, const struct token *,
677 const char *, unsigned int,
678 void *, unsigned int);
679 static int parse_string(struct context *, const struct token *,
680 const char *, unsigned int,
681 void *, unsigned int);
682 static int parse_mac_addr(struct context *, const struct token *,
683 const char *, unsigned int,
684 void *, unsigned int);
685 static int parse_ipv4_addr(struct context *, const struct token *,
686 const char *, unsigned int,
687 void *, unsigned int);
688 static int parse_ipv6_addr(struct context *, const struct token *,
689 const char *, unsigned int,
690 void *, unsigned int);
691 static int parse_port(struct context *, const struct token *,
692 const char *, unsigned int,
693 void *, unsigned int);
694 static int comp_none(struct context *, const struct token *,
695 unsigned int, char *, unsigned int);
696 static int comp_boolean(struct context *, const struct token *,
697 unsigned int, char *, unsigned int);
698 static int comp_action(struct context *, const struct token *,
699 unsigned int, char *, unsigned int);
700 static int comp_port(struct context *, const struct token *,
701 unsigned int, char *, unsigned int);
702 static int comp_rule_id(struct context *, const struct token *,
703 unsigned int, char *, unsigned int);
704 static int comp_vc_action_rss_queue(struct context *, const struct token *,
705 unsigned int, char *, unsigned int);
707 /** Token definitions. */
708 static const struct token token_list[] = {
709 /* Special tokens. */
712 .help = "null entry, abused as the entry point",
713 .next = NEXT(NEXT_ENTRY(FLOW)),
718 .help = "command may end here",
724 .help = "integer value",
729 .name = "{unsigned}",
731 .help = "unsigned integer value",
738 .help = "prefix length for bit-mask",
739 .call = parse_prefix,
745 .help = "any boolean value",
746 .call = parse_boolean,
747 .comp = comp_boolean,
752 .help = "fixed string",
753 .call = parse_string,
757 .name = "{MAC address}",
759 .help = "standard MAC address notation",
760 .call = parse_mac_addr,
764 .name = "{IPv4 address}",
765 .type = "IPV4 ADDRESS",
766 .help = "standard IPv4 address notation",
767 .call = parse_ipv4_addr,
771 .name = "{IPv6 address}",
772 .type = "IPV6 ADDRESS",
773 .help = "standard IPv6 address notation",
774 .call = parse_ipv6_addr,
780 .help = "rule identifier",
782 .comp = comp_rule_id,
787 .help = "port identifier",
792 .name = "{group_id}",
794 .help = "group identifier",
801 .help = "priority level",
805 /* Top-level command. */
808 .type = "{command} {port_id} [{arg} [...]]",
809 .help = "manage ingress/egress flow rules",
810 .next = NEXT(NEXT_ENTRY
820 /* Sub-level commands. */
823 .help = "check whether a flow rule can be created",
824 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
825 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
830 .help = "create a flow rule",
831 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
832 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
837 .help = "destroy specific flow rules",
838 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
839 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
840 .call = parse_destroy,
844 .help = "destroy all flow rules",
845 .next = NEXT(NEXT_ENTRY(PORT_ID)),
846 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
851 .help = "query an existing flow rule",
852 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
854 NEXT_ENTRY(PORT_ID)),
855 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action),
856 ARGS_ENTRY(struct buffer, args.query.rule),
857 ARGS_ENTRY(struct buffer, port)),
862 .help = "list existing flow rules",
863 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
864 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
869 .help = "restrict ingress traffic to the defined flow rules",
870 .next = NEXT(NEXT_ENTRY(BOOLEAN),
871 NEXT_ENTRY(PORT_ID)),
872 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
873 ARGS_ENTRY(struct buffer, port)),
874 .call = parse_isolate,
876 /* Destroy arguments. */
879 .help = "specify a rule identifier",
880 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
881 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
882 .call = parse_destroy,
884 /* Query arguments. */
888 .help = "action to query, must be part of the rule",
889 .call = parse_action,
892 /* List arguments. */
895 .help = "specify a group",
896 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
897 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
900 /* Validate/create attributes. */
903 .help = "specify a group",
904 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
905 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
910 .help = "specify a priority level",
911 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
912 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
917 .help = "affect rule to ingress",
918 .next = NEXT(next_vc_attr),
923 .help = "affect rule to egress",
924 .next = NEXT(next_vc_attr),
927 /* Validate/create pattern. */
930 .help = "submit a list of pattern items",
931 .next = NEXT(next_item),
936 .help = "match value perfectly (with full bit-mask)",
937 .call = parse_vc_spec,
939 [ITEM_PARAM_SPEC] = {
941 .help = "match value according to configured bit-mask",
942 .call = parse_vc_spec,
944 [ITEM_PARAM_LAST] = {
946 .help = "specify upper bound to establish a range",
947 .call = parse_vc_spec,
949 [ITEM_PARAM_MASK] = {
951 .help = "specify bit-mask with relevant bits set to one",
952 .call = parse_vc_spec,
954 [ITEM_PARAM_PREFIX] = {
956 .help = "generate bit-mask from a prefix length",
957 .call = parse_vc_spec,
961 .help = "specify next pattern item",
962 .next = NEXT(next_item),
966 .help = "end list of pattern items",
967 .priv = PRIV_ITEM(END, 0),
968 .next = NEXT(NEXT_ENTRY(ACTIONS)),
973 .help = "no-op pattern item",
974 .priv = PRIV_ITEM(VOID, 0),
975 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
980 .help = "perform actions when pattern does not match",
981 .priv = PRIV_ITEM(INVERT, 0),
982 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
987 .help = "match any protocol for the current layer",
988 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
989 .next = NEXT(item_any),
994 .help = "number of layers covered",
995 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
996 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
1000 .help = "match packets addressed to the physical function",
1001 .priv = PRIV_ITEM(PF, 0),
1002 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1007 .help = "match packets addressed to a virtual function ID",
1008 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
1009 .next = NEXT(item_vf),
1014 .help = "destination VF ID",
1015 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
1016 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
1020 .help = "device-specific physical port index to use",
1021 .priv = PRIV_ITEM(PORT, sizeof(struct rte_flow_item_port)),
1022 .next = NEXT(item_port),
1025 [ITEM_PORT_INDEX] = {
1027 .help = "physical port index",
1028 .next = NEXT(item_port, NEXT_ENTRY(UNSIGNED), item_param),
1029 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port, index)),
1033 .help = "match an arbitrary byte string",
1034 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1035 .next = NEXT(item_raw),
1038 [ITEM_RAW_RELATIVE] = {
1040 .help = "look for pattern after the previous item",
1041 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1042 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1045 [ITEM_RAW_SEARCH] = {
1047 .help = "search pattern from offset (see also limit)",
1048 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1049 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1052 [ITEM_RAW_OFFSET] = {
1054 .help = "absolute or relative offset for pattern",
1055 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1056 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1058 [ITEM_RAW_LIMIT] = {
1060 .help = "search area limit for start of pattern",
1061 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1062 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1064 [ITEM_RAW_PATTERN] = {
1066 .help = "byte string to look for",
1067 .next = NEXT(item_raw,
1069 NEXT_ENTRY(ITEM_PARAM_IS,
1072 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, length),
1073 ARGS_ENTRY_USZ(struct rte_flow_item_raw,
1075 ITEM_RAW_PATTERN_SIZE)),
1079 .help = "match Ethernet header",
1080 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1081 .next = NEXT(item_eth),
1086 .help = "destination MAC",
1087 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1088 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1092 .help = "source MAC",
1093 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1094 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1098 .help = "EtherType",
1099 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1100 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1104 .help = "match 802.1Q/ad VLAN tag",
1105 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1106 .next = NEXT(item_vlan),
1109 [ITEM_VLAN_TPID] = {
1111 .help = "tag protocol identifier",
1112 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1113 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tpid)),
1117 .help = "tag control information",
1118 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1119 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1123 .help = "priority code point",
1124 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1125 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1130 .help = "drop eligible indicator",
1131 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1132 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1137 .help = "VLAN identifier",
1138 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1139 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1144 .help = "match IPv4 header",
1145 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1146 .next = NEXT(item_ipv4),
1151 .help = "type of service",
1152 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1153 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1154 hdr.type_of_service)),
1158 .help = "time to live",
1159 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1160 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1163 [ITEM_IPV4_PROTO] = {
1165 .help = "next protocol ID",
1166 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1167 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1168 hdr.next_proto_id)),
1172 .help = "source address",
1173 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1174 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1179 .help = "destination address",
1180 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1181 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1186 .help = "match IPv6 header",
1187 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1188 .next = NEXT(item_ipv6),
1193 .help = "traffic class",
1194 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1195 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1197 "\x0f\xf0\x00\x00")),
1199 [ITEM_IPV6_FLOW] = {
1201 .help = "flow label",
1202 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1203 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1205 "\x00\x0f\xff\xff")),
1207 [ITEM_IPV6_PROTO] = {
1209 .help = "protocol (next header)",
1210 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1211 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1216 .help = "hop limit",
1217 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1218 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1223 .help = "source address",
1224 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1225 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1230 .help = "destination address",
1231 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1232 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1237 .help = "match ICMP header",
1238 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1239 .next = NEXT(item_icmp),
1242 [ITEM_ICMP_TYPE] = {
1244 .help = "ICMP packet type",
1245 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1246 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1249 [ITEM_ICMP_CODE] = {
1251 .help = "ICMP packet code",
1252 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1253 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1258 .help = "match UDP header",
1259 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1260 .next = NEXT(item_udp),
1265 .help = "UDP source port",
1266 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1267 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1272 .help = "UDP destination port",
1273 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1274 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1279 .help = "match TCP header",
1280 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1281 .next = NEXT(item_tcp),
1286 .help = "TCP source port",
1287 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1288 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1293 .help = "TCP destination port",
1294 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1295 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1298 [ITEM_TCP_FLAGS] = {
1300 .help = "TCP flags",
1301 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1302 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1307 .help = "match SCTP header",
1308 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1309 .next = NEXT(item_sctp),
1314 .help = "SCTP source port",
1315 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1316 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1321 .help = "SCTP destination port",
1322 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1323 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1328 .help = "validation tag",
1329 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1330 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1333 [ITEM_SCTP_CKSUM] = {
1336 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1337 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1342 .help = "match VXLAN header",
1343 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1344 .next = NEXT(item_vxlan),
1347 [ITEM_VXLAN_VNI] = {
1349 .help = "VXLAN identifier",
1350 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1351 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
1355 .help = "match E-Tag header",
1356 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
1357 .next = NEXT(item_e_tag),
1360 [ITEM_E_TAG_GRP_ECID_B] = {
1361 .name = "grp_ecid_b",
1362 .help = "GRP and E-CID base",
1363 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
1364 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
1370 .help = "match NVGRE header",
1371 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
1372 .next = NEXT(item_nvgre),
1375 [ITEM_NVGRE_TNI] = {
1377 .help = "virtual subnet ID",
1378 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
1379 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
1383 .help = "match MPLS header",
1384 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
1385 .next = NEXT(item_mpls),
1388 [ITEM_MPLS_LABEL] = {
1390 .help = "MPLS label",
1391 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
1392 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
1398 .help = "match GRE header",
1399 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
1400 .next = NEXT(item_gre),
1403 [ITEM_GRE_PROTO] = {
1405 .help = "GRE protocol type",
1406 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
1407 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
1412 .help = "fuzzy pattern match, expect faster than default",
1413 .priv = PRIV_ITEM(FUZZY,
1414 sizeof(struct rte_flow_item_fuzzy)),
1415 .next = NEXT(item_fuzzy),
1418 [ITEM_FUZZY_THRESH] = {
1420 .help = "match accuracy threshold",
1421 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
1422 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
1426 /* Validate/create actions. */
1429 .help = "submit a list of associated actions",
1430 .next = NEXT(next_action),
1435 .help = "specify next action",
1436 .next = NEXT(next_action),
1440 .help = "end list of actions",
1441 .priv = PRIV_ACTION(END, 0),
1446 .help = "no-op action",
1447 .priv = PRIV_ACTION(VOID, 0),
1448 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1451 [ACTION_PASSTHRU] = {
1453 .help = "let subsequent rule process matched packets",
1454 .priv = PRIV_ACTION(PASSTHRU, 0),
1455 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1460 .help = "attach 32 bit value to packets",
1461 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
1462 .next = NEXT(action_mark),
1465 [ACTION_MARK_ID] = {
1467 .help = "32 bit value to return with packets",
1468 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
1469 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
1470 .call = parse_vc_conf,
1474 .help = "flag packets",
1475 .priv = PRIV_ACTION(FLAG, 0),
1476 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1481 .help = "assign packets to a given queue index",
1482 .priv = PRIV_ACTION(QUEUE,
1483 sizeof(struct rte_flow_action_queue)),
1484 .next = NEXT(action_queue),
1487 [ACTION_QUEUE_INDEX] = {
1489 .help = "queue index to use",
1490 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
1491 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
1492 .call = parse_vc_conf,
1496 .help = "drop packets (note: passthru has priority)",
1497 .priv = PRIV_ACTION(DROP, 0),
1498 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1503 .help = "enable counters for this rule",
1504 .priv = PRIV_ACTION(COUNT, 0),
1505 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1510 .help = "duplicate packets to a given queue index",
1511 .priv = PRIV_ACTION(DUP, sizeof(struct rte_flow_action_dup)),
1512 .next = NEXT(action_dup),
1515 [ACTION_DUP_INDEX] = {
1517 .help = "queue index to duplicate packets to",
1518 .next = NEXT(action_dup, NEXT_ENTRY(UNSIGNED)),
1519 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_dup, index)),
1520 .call = parse_vc_conf,
1524 .help = "spread packets among several queues",
1525 .priv = PRIV_ACTION(RSS, ACTION_RSS_SIZE),
1526 .next = NEXT(action_rss),
1529 [ACTION_RSS_QUEUES] = {
1531 .help = "queue indices to use",
1532 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
1533 .call = parse_vc_conf,
1535 [ACTION_RSS_QUEUE] = {
1537 .help = "queue index",
1538 .call = parse_vc_action_rss_queue,
1539 .comp = comp_vc_action_rss_queue,
1543 .help = "redirect packets to physical device function",
1544 .priv = PRIV_ACTION(PF, 0),
1545 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1550 .help = "redirect packets to virtual device function",
1551 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
1552 .next = NEXT(action_vf),
1555 [ACTION_VF_ORIGINAL] = {
1557 .help = "use original VF ID if possible",
1558 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
1559 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
1561 .call = parse_vc_conf,
1565 .help = "VF ID to redirect packets to",
1566 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
1567 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
1568 .call = parse_vc_conf,
1572 /** Remove and return last entry from argument stack. */
1573 static const struct arg *
1574 pop_args(struct context *ctx)
1576 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
1579 /** Add entry on top of the argument stack. */
1581 push_args(struct context *ctx, const struct arg *arg)
1583 if (ctx->args_num == CTX_STACK_SIZE)
1585 ctx->args[ctx->args_num++] = arg;
1589 /** Spread value into buffer according to bit-mask. */
1591 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
1593 uint32_t i = arg->size;
1601 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1610 unsigned int shift = 0;
1611 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
1613 for (shift = 0; arg->mask[i] >> shift; ++shift) {
1614 if (!(arg->mask[i] & (1 << shift)))
1619 *buf &= ~(1 << shift);
1620 *buf |= (val & 1) << shift;
1629 * Parse a prefix length and generate a bit-mask.
1631 * Last argument (ctx->args) is retrieved to determine mask size, storage
1632 * location and whether the result must use network byte ordering.
1635 parse_prefix(struct context *ctx, const struct token *token,
1636 const char *str, unsigned int len,
1637 void *buf, unsigned int size)
1639 const struct arg *arg = pop_args(ctx);
1640 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
1647 /* Argument is expected. */
1651 u = strtoumax(str, &end, 0);
1652 if (errno || (size_t)(end - str) != len)
1657 extra = arg_entry_bf_fill(NULL, 0, arg);
1666 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
1667 !arg_entry_bf_fill(ctx->objmask, -1, arg))
1674 if (bytes > size || bytes + !!extra > size)
1678 buf = (uint8_t *)ctx->object + arg->offset;
1679 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1681 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
1682 memset(buf, 0x00, size - bytes);
1684 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
1688 memset(buf, 0xff, bytes);
1689 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
1691 ((uint8_t *)buf)[bytes] = conv[extra];
1694 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
1697 push_args(ctx, arg);
1701 /** Default parsing function for token name matching. */
1703 parse_default(struct context *ctx, const struct token *token,
1704 const char *str, unsigned int len,
1705 void *buf, unsigned int size)
1710 if (strncmp(str, token->name, len))
1715 /** Parse flow command, initialize output buffer for subsequent tokens. */
1717 parse_init(struct context *ctx, const struct token *token,
1718 const char *str, unsigned int len,
1719 void *buf, unsigned int size)
1721 struct buffer *out = buf;
1723 /* Token name must match. */
1724 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1726 /* Nothing else to do if there is no buffer. */
1729 /* Make sure buffer is large enough. */
1730 if (size < sizeof(*out))
1732 /* Initialize buffer. */
1733 memset(out, 0x00, sizeof(*out));
1734 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
1737 ctx->objmask = NULL;
1741 /** Parse tokens for validate/create commands. */
1743 parse_vc(struct context *ctx, const struct token *token,
1744 const char *str, unsigned int len,
1745 void *buf, unsigned int size)
1747 struct buffer *out = buf;
1751 /* Token name must match. */
1752 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1754 /* Nothing else to do if there is no buffer. */
1757 if (!out->command) {
1758 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
1760 if (sizeof(*out) > size)
1762 out->command = ctx->curr;
1765 ctx->objmask = NULL;
1766 out->args.vc.data = (uint8_t *)out + size;
1770 ctx->object = &out->args.vc.attr;
1771 ctx->objmask = NULL;
1772 switch (ctx->curr) {
1777 out->args.vc.attr.ingress = 1;
1780 out->args.vc.attr.egress = 1;
1783 out->args.vc.pattern =
1784 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1786 ctx->object = out->args.vc.pattern;
1787 ctx->objmask = NULL;
1790 out->args.vc.actions =
1791 (void *)RTE_ALIGN_CEIL((uintptr_t)
1792 (out->args.vc.pattern +
1793 out->args.vc.pattern_n),
1795 ctx->object = out->args.vc.actions;
1796 ctx->objmask = NULL;
1803 if (!out->args.vc.actions) {
1804 const struct parse_item_priv *priv = token->priv;
1805 struct rte_flow_item *item =
1806 out->args.vc.pattern + out->args.vc.pattern_n;
1808 data_size = priv->size * 3; /* spec, last, mask */
1809 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1810 (out->args.vc.data - data_size),
1812 if ((uint8_t *)item + sizeof(*item) > data)
1814 *item = (struct rte_flow_item){
1817 ++out->args.vc.pattern_n;
1819 ctx->objmask = NULL;
1821 const struct parse_action_priv *priv = token->priv;
1822 struct rte_flow_action *action =
1823 out->args.vc.actions + out->args.vc.actions_n;
1825 data_size = priv->size; /* configuration */
1826 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1827 (out->args.vc.data - data_size),
1829 if ((uint8_t *)action + sizeof(*action) > data)
1831 *action = (struct rte_flow_action){
1834 ++out->args.vc.actions_n;
1835 ctx->object = action;
1836 ctx->objmask = NULL;
1838 memset(data, 0, data_size);
1839 out->args.vc.data = data;
1840 ctx->objdata = data_size;
1844 /** Parse pattern item parameter type. */
1846 parse_vc_spec(struct context *ctx, const struct token *token,
1847 const char *str, unsigned int len,
1848 void *buf, unsigned int size)
1850 struct buffer *out = buf;
1851 struct rte_flow_item *item;
1857 /* Token name must match. */
1858 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1860 /* Parse parameter types. */
1861 switch (ctx->curr) {
1862 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
1868 case ITEM_PARAM_SPEC:
1871 case ITEM_PARAM_LAST:
1874 case ITEM_PARAM_PREFIX:
1875 /* Modify next token to expect a prefix. */
1876 if (ctx->next_num < 2)
1878 ctx->next[ctx->next_num - 2] = prefix;
1880 case ITEM_PARAM_MASK:
1886 /* Nothing else to do if there is no buffer. */
1889 if (!out->args.vc.pattern_n)
1891 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
1892 data_size = ctx->objdata / 3; /* spec, last, mask */
1893 /* Point to selected object. */
1894 ctx->object = out->args.vc.data + (data_size * index);
1896 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
1897 item->mask = ctx->objmask;
1899 ctx->objmask = NULL;
1900 /* Update relevant item pointer. */
1901 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
1906 /** Parse action configuration field. */
1908 parse_vc_conf(struct context *ctx, const struct token *token,
1909 const char *str, unsigned int len,
1910 void *buf, unsigned int size)
1912 struct buffer *out = buf;
1913 struct rte_flow_action *action;
1916 /* Token name must match. */
1917 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1919 /* Nothing else to do if there is no buffer. */
1922 if (!out->args.vc.actions_n)
1924 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
1925 /* Point to selected object. */
1926 ctx->object = out->args.vc.data;
1927 ctx->objmask = NULL;
1928 /* Update configuration pointer. */
1929 action->conf = ctx->object;
1934 * Parse queue field for RSS action.
1936 * Valid tokens are queue indices and the "end" token.
1939 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
1940 const char *str, unsigned int len,
1941 void *buf, unsigned int size)
1943 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
1950 if (ctx->curr != ACTION_RSS_QUEUE)
1952 i = ctx->objdata >> 16;
1953 if (!strncmp(str, "end", len)) {
1954 ctx->objdata &= 0xffff;
1957 if (i >= ACTION_RSS_NUM)
1959 if (push_args(ctx, ARGS_ENTRY(struct rte_flow_action_rss, queue[i])))
1961 ret = parse_int(ctx, token, str, len, NULL, 0);
1967 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
1969 if (ctx->next_num == RTE_DIM(ctx->next))
1971 ctx->next[ctx->next_num++] = next;
1974 ((struct rte_flow_action_rss *)ctx->object)->num = i;
1978 /** Parse tokens for destroy command. */
1980 parse_destroy(struct context *ctx, const struct token *token,
1981 const char *str, unsigned int len,
1982 void *buf, unsigned int size)
1984 struct buffer *out = buf;
1986 /* Token name must match. */
1987 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1989 /* Nothing else to do if there is no buffer. */
1992 if (!out->command) {
1993 if (ctx->curr != DESTROY)
1995 if (sizeof(*out) > size)
1997 out->command = ctx->curr;
2000 ctx->objmask = NULL;
2001 out->args.destroy.rule =
2002 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2006 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
2007 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
2010 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
2011 ctx->objmask = NULL;
2015 /** Parse tokens for flush command. */
2017 parse_flush(struct context *ctx, const struct token *token,
2018 const char *str, unsigned int len,
2019 void *buf, unsigned int size)
2021 struct buffer *out = buf;
2023 /* Token name must match. */
2024 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2026 /* Nothing else to do if there is no buffer. */
2029 if (!out->command) {
2030 if (ctx->curr != FLUSH)
2032 if (sizeof(*out) > size)
2034 out->command = ctx->curr;
2037 ctx->objmask = NULL;
2042 /** Parse tokens for query command. */
2044 parse_query(struct context *ctx, const struct token *token,
2045 const char *str, unsigned int len,
2046 void *buf, unsigned int size)
2048 struct buffer *out = buf;
2050 /* Token name must match. */
2051 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2053 /* Nothing else to do if there is no buffer. */
2056 if (!out->command) {
2057 if (ctx->curr != QUERY)
2059 if (sizeof(*out) > size)
2061 out->command = ctx->curr;
2064 ctx->objmask = NULL;
2069 /** Parse action names. */
2071 parse_action(struct context *ctx, const struct token *token,
2072 const char *str, unsigned int len,
2073 void *buf, unsigned int size)
2075 struct buffer *out = buf;
2076 const struct arg *arg = pop_args(ctx);
2080 /* Argument is expected. */
2083 /* Parse action name. */
2084 for (i = 0; next_action[i]; ++i) {
2085 const struct parse_action_priv *priv;
2087 token = &token_list[next_action[i]];
2088 if (strncmp(token->name, str, len))
2094 memcpy((uint8_t *)ctx->object + arg->offset,
2100 push_args(ctx, arg);
2104 /** Parse tokens for list command. */
2106 parse_list(struct context *ctx, const struct token *token,
2107 const char *str, unsigned int len,
2108 void *buf, unsigned int size)
2110 struct buffer *out = buf;
2112 /* Token name must match. */
2113 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2115 /* Nothing else to do if there is no buffer. */
2118 if (!out->command) {
2119 if (ctx->curr != LIST)
2121 if (sizeof(*out) > size)
2123 out->command = ctx->curr;
2126 ctx->objmask = NULL;
2127 out->args.list.group =
2128 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2132 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
2133 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
2136 ctx->object = out->args.list.group + out->args.list.group_n++;
2137 ctx->objmask = NULL;
2141 /** Parse tokens for isolate command. */
2143 parse_isolate(struct context *ctx, const struct token *token,
2144 const char *str, unsigned int len,
2145 void *buf, unsigned int size)
2147 struct buffer *out = buf;
2149 /* Token name must match. */
2150 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2152 /* Nothing else to do if there is no buffer. */
2155 if (!out->command) {
2156 if (ctx->curr != ISOLATE)
2158 if (sizeof(*out) > size)
2160 out->command = ctx->curr;
2163 ctx->objmask = NULL;
2169 * Parse signed/unsigned integers 8 to 64-bit long.
2171 * Last argument (ctx->args) is retrieved to determine integer type and
2175 parse_int(struct context *ctx, const struct token *token,
2176 const char *str, unsigned int len,
2177 void *buf, unsigned int size)
2179 const struct arg *arg = pop_args(ctx);
2184 /* Argument is expected. */
2189 (uintmax_t)strtoimax(str, &end, 0) :
2190 strtoumax(str, &end, 0);
2191 if (errno || (size_t)(end - str) != len)
2196 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
2197 !arg_entry_bf_fill(ctx->objmask, -1, arg))
2201 buf = (uint8_t *)ctx->object + arg->offset;
2205 case sizeof(uint8_t):
2206 *(uint8_t *)buf = u;
2208 case sizeof(uint16_t):
2209 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
2211 case sizeof(uint8_t [3]):
2212 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2214 ((uint8_t *)buf)[0] = u;
2215 ((uint8_t *)buf)[1] = u >> 8;
2216 ((uint8_t *)buf)[2] = u >> 16;
2220 ((uint8_t *)buf)[0] = u >> 16;
2221 ((uint8_t *)buf)[1] = u >> 8;
2222 ((uint8_t *)buf)[2] = u;
2224 case sizeof(uint32_t):
2225 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
2227 case sizeof(uint64_t):
2228 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
2233 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
2235 buf = (uint8_t *)ctx->objmask + arg->offset;
2240 push_args(ctx, arg);
2247 * Two arguments (ctx->args) are retrieved from the stack to store data and
2248 * its length (in that order).
2251 parse_string(struct context *ctx, const struct token *token,
2252 const char *str, unsigned int len,
2253 void *buf, unsigned int size)
2255 const struct arg *arg_data = pop_args(ctx);
2256 const struct arg *arg_len = pop_args(ctx);
2257 char tmp[16]; /* Ought to be enough. */
2260 /* Arguments are expected. */
2264 push_args(ctx, arg_data);
2267 size = arg_data->size;
2268 /* Bit-mask fill is not supported. */
2269 if (arg_data->mask || size < len)
2273 /* Let parse_int() fill length information first. */
2274 ret = snprintf(tmp, sizeof(tmp), "%u", len);
2277 push_args(ctx, arg_len);
2278 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
2283 buf = (uint8_t *)ctx->object + arg_data->offset;
2284 /* Output buffer is not necessarily NUL-terminated. */
2285 memcpy(buf, str, len);
2286 memset((uint8_t *)buf + len, 0x55, size - len);
2288 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
2291 push_args(ctx, arg_len);
2292 push_args(ctx, arg_data);
2297 * Parse a MAC address.
2299 * Last argument (ctx->args) is retrieved to determine storage size and
2303 parse_mac_addr(struct context *ctx, const struct token *token,
2304 const char *str, unsigned int len,
2305 void *buf, unsigned int size)
2307 const struct arg *arg = pop_args(ctx);
2308 struct ether_addr tmp;
2312 /* Argument is expected. */
2316 /* Bit-mask fill is not supported. */
2317 if (arg->mask || size != sizeof(tmp))
2319 /* Only network endian is supported. */
2322 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
2323 if (ret < 0 || (unsigned int)ret != len)
2327 buf = (uint8_t *)ctx->object + arg->offset;
2328 memcpy(buf, &tmp, size);
2330 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2333 push_args(ctx, arg);
2338 * Parse an IPv4 address.
2340 * Last argument (ctx->args) is retrieved to determine storage size and
2344 parse_ipv4_addr(struct context *ctx, const struct token *token,
2345 const char *str, unsigned int len,
2346 void *buf, unsigned int size)
2348 const struct arg *arg = pop_args(ctx);
2353 /* Argument is expected. */
2357 /* Bit-mask fill is not supported. */
2358 if (arg->mask || size != sizeof(tmp))
2360 /* Only network endian is supported. */
2363 memcpy(str2, str, len);
2365 ret = inet_pton(AF_INET, str2, &tmp);
2367 /* Attempt integer parsing. */
2368 push_args(ctx, arg);
2369 return parse_int(ctx, token, str, len, buf, size);
2373 buf = (uint8_t *)ctx->object + arg->offset;
2374 memcpy(buf, &tmp, size);
2376 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2379 push_args(ctx, arg);
2384 * Parse an IPv6 address.
2386 * Last argument (ctx->args) is retrieved to determine storage size and
2390 parse_ipv6_addr(struct context *ctx, const struct token *token,
2391 const char *str, unsigned int len,
2392 void *buf, unsigned int size)
2394 const struct arg *arg = pop_args(ctx);
2396 struct in6_addr tmp;
2400 /* Argument is expected. */
2404 /* Bit-mask fill is not supported. */
2405 if (arg->mask || size != sizeof(tmp))
2407 /* Only network endian is supported. */
2410 memcpy(str2, str, len);
2412 ret = inet_pton(AF_INET6, str2, &tmp);
2417 buf = (uint8_t *)ctx->object + arg->offset;
2418 memcpy(buf, &tmp, size);
2420 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2423 push_args(ctx, arg);
2427 /** Boolean values (even indices stand for false). */
2428 static const char *const boolean_name[] = {
2437 * Parse a boolean value.
2439 * Last argument (ctx->args) is retrieved to determine storage size and
2443 parse_boolean(struct context *ctx, const struct token *token,
2444 const char *str, unsigned int len,
2445 void *buf, unsigned int size)
2447 const struct arg *arg = pop_args(ctx);
2451 /* Argument is expected. */
2454 for (i = 0; boolean_name[i]; ++i)
2455 if (!strncmp(str, boolean_name[i], len))
2457 /* Process token as integer. */
2458 if (boolean_name[i])
2459 str = i & 1 ? "1" : "0";
2460 push_args(ctx, arg);
2461 ret = parse_int(ctx, token, str, strlen(str), buf, size);
2462 return ret > 0 ? (int)len : ret;
2465 /** Parse port and update context. */
2467 parse_port(struct context *ctx, const struct token *token,
2468 const char *str, unsigned int len,
2469 void *buf, unsigned int size)
2471 struct buffer *out = &(struct buffer){ .port = 0 };
2479 ctx->objmask = NULL;
2480 size = sizeof(*out);
2482 ret = parse_int(ctx, token, str, len, out, size);
2484 ctx->port = out->port;
2490 /** No completion. */
2492 comp_none(struct context *ctx, const struct token *token,
2493 unsigned int ent, char *buf, unsigned int size)
2503 /** Complete boolean values. */
2505 comp_boolean(struct context *ctx, const struct token *token,
2506 unsigned int ent, char *buf, unsigned int size)
2512 for (i = 0; boolean_name[i]; ++i)
2513 if (buf && i == ent)
2514 return snprintf(buf, size, "%s", boolean_name[i]);
2520 /** Complete action names. */
2522 comp_action(struct context *ctx, const struct token *token,
2523 unsigned int ent, char *buf, unsigned int size)
2529 for (i = 0; next_action[i]; ++i)
2530 if (buf && i == ent)
2531 return snprintf(buf, size, "%s",
2532 token_list[next_action[i]].name);
2538 /** Complete available ports. */
2540 comp_port(struct context *ctx, const struct token *token,
2541 unsigned int ent, char *buf, unsigned int size)
2548 RTE_ETH_FOREACH_DEV(p) {
2549 if (buf && i == ent)
2550 return snprintf(buf, size, "%u", p);
2558 /** Complete available rule IDs. */
2560 comp_rule_id(struct context *ctx, const struct token *token,
2561 unsigned int ent, char *buf, unsigned int size)
2564 struct rte_port *port;
2565 struct port_flow *pf;
2568 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
2569 ctx->port == (uint16_t)RTE_PORT_ALL)
2571 port = &ports[ctx->port];
2572 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
2573 if (buf && i == ent)
2574 return snprintf(buf, size, "%u", pf->id);
2582 /** Complete queue field for RSS action. */
2584 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
2585 unsigned int ent, char *buf, unsigned int size)
2587 static const char *const str[] = { "", "end", NULL };
2592 for (i = 0; str[i] != NULL; ++i)
2593 if (buf && i == ent)
2594 return snprintf(buf, size, "%s", str[i]);
2600 /** Internal context. */
2601 static struct context cmd_flow_context;
2603 /** Global parser instance (cmdline API). */
2604 cmdline_parse_inst_t cmd_flow;
2606 /** Initialize context. */
2608 cmd_flow_context_init(struct context *ctx)
2610 /* A full memset() is not necessary. */
2621 ctx->objmask = NULL;
2624 /** Parse a token (cmdline API). */
2626 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
2629 struct context *ctx = &cmd_flow_context;
2630 const struct token *token;
2631 const enum index *list;
2636 /* Restart as requested. */
2638 cmd_flow_context_init(ctx);
2639 token = &token_list[ctx->curr];
2640 /* Check argument length. */
2643 for (len = 0; src[len]; ++len)
2644 if (src[len] == '#' || isspace(src[len]))
2648 /* Last argument and EOL detection. */
2649 for (i = len; src[i]; ++i)
2650 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
2652 else if (!isspace(src[i])) {
2657 if (src[i] == '\r' || src[i] == '\n') {
2661 /* Initialize context if necessary. */
2662 if (!ctx->next_num) {
2665 ctx->next[ctx->next_num++] = token->next[0];
2667 /* Process argument through candidates. */
2668 ctx->prev = ctx->curr;
2669 list = ctx->next[ctx->next_num - 1];
2670 for (i = 0; list[i]; ++i) {
2671 const struct token *next = &token_list[list[i]];
2674 ctx->curr = list[i];
2676 tmp = next->call(ctx, next, src, len, result, size);
2678 tmp = parse_default(ctx, next, src, len, result, size);
2679 if (tmp == -1 || tmp != len)
2687 /* Push subsequent tokens if any. */
2689 for (i = 0; token->next[i]; ++i) {
2690 if (ctx->next_num == RTE_DIM(ctx->next))
2692 ctx->next[ctx->next_num++] = token->next[i];
2694 /* Push arguments if any. */
2696 for (i = 0; token->args[i]; ++i) {
2697 if (ctx->args_num == RTE_DIM(ctx->args))
2699 ctx->args[ctx->args_num++] = token->args[i];
2704 /** Return number of completion entries (cmdline API). */
2706 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
2708 struct context *ctx = &cmd_flow_context;
2709 const struct token *token = &token_list[ctx->curr];
2710 const enum index *list;
2714 /* Tell cmd_flow_parse() that context must be reinitialized. */
2716 /* Count number of tokens in current list. */
2718 list = ctx->next[ctx->next_num - 1];
2720 list = token->next[0];
2721 for (i = 0; list[i]; ++i)
2726 * If there is a single token, use its completion callback, otherwise
2727 * return the number of entries.
2729 token = &token_list[list[0]];
2730 if (i == 1 && token->comp) {
2731 /* Save index for cmd_flow_get_help(). */
2732 ctx->prev = list[0];
2733 return token->comp(ctx, token, 0, NULL, 0);
2738 /** Return a completion entry (cmdline API). */
2740 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
2741 char *dst, unsigned int size)
2743 struct context *ctx = &cmd_flow_context;
2744 const struct token *token = &token_list[ctx->curr];
2745 const enum index *list;
2749 /* Tell cmd_flow_parse() that context must be reinitialized. */
2751 /* Count number of tokens in current list. */
2753 list = ctx->next[ctx->next_num - 1];
2755 list = token->next[0];
2756 for (i = 0; list[i]; ++i)
2760 /* If there is a single token, use its completion callback. */
2761 token = &token_list[list[0]];
2762 if (i == 1 && token->comp) {
2763 /* Save index for cmd_flow_get_help(). */
2764 ctx->prev = list[0];
2765 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
2767 /* Otherwise make sure the index is valid and use defaults. */
2770 token = &token_list[list[index]];
2771 snprintf(dst, size, "%s", token->name);
2772 /* Save index for cmd_flow_get_help(). */
2773 ctx->prev = list[index];
2777 /** Populate help strings for current token (cmdline API). */
2779 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
2781 struct context *ctx = &cmd_flow_context;
2782 const struct token *token = &token_list[ctx->prev];
2785 /* Tell cmd_flow_parse() that context must be reinitialized. */
2789 /* Set token type and update global help with details. */
2790 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
2792 cmd_flow.help_str = token->help;
2794 cmd_flow.help_str = token->name;
2798 /** Token definition template (cmdline API). */
2799 static struct cmdline_token_hdr cmd_flow_token_hdr = {
2800 .ops = &(struct cmdline_token_ops){
2801 .parse = cmd_flow_parse,
2802 .complete_get_nb = cmd_flow_complete_get_nb,
2803 .complete_get_elt = cmd_flow_complete_get_elt,
2804 .get_help = cmd_flow_get_help,
2809 /** Populate the next dynamic token. */
2811 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
2812 cmdline_parse_token_hdr_t *(*hdrs)[])
2814 struct context *ctx = &cmd_flow_context;
2816 /* Always reinitialize context before requesting the first token. */
2818 cmd_flow_context_init(ctx);
2819 /* Return NULL when no more tokens are expected. */
2820 if (!ctx->next_num && ctx->curr) {
2824 /* Determine if command should end here. */
2825 if (ctx->eol && ctx->last && ctx->next_num) {
2826 const enum index *list = ctx->next[ctx->next_num - 1];
2829 for (i = 0; list[i]; ++i) {
2836 *hdr = &cmd_flow_token_hdr;
2839 /** Dispatch parsed buffer to function calls. */
2841 cmd_flow_parsed(const struct buffer *in)
2843 switch (in->command) {
2845 port_flow_validate(in->port, &in->args.vc.attr,
2846 in->args.vc.pattern, in->args.vc.actions);
2849 port_flow_create(in->port, &in->args.vc.attr,
2850 in->args.vc.pattern, in->args.vc.actions);
2853 port_flow_destroy(in->port, in->args.destroy.rule_n,
2854 in->args.destroy.rule);
2857 port_flow_flush(in->port);
2860 port_flow_query(in->port, in->args.query.rule,
2861 in->args.query.action);
2864 port_flow_list(in->port, in->args.list.group_n,
2865 in->args.list.group);
2868 port_flow_isolate(in->port, in->args.isolate.set);
2875 /** Token generator and output processing callback (cmdline API). */
2877 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
2880 cmd_flow_tok(arg0, arg2);
2882 cmd_flow_parsed(arg0);
2885 /** Global parser instance (cmdline API). */
2886 cmdline_parse_inst_t cmd_flow = {
2888 .data = NULL, /**< Unused. */
2889 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
2892 }, /**< Tokens are returned by cmd_flow_tok(). */