4 * Copyright 2016 6WIND S.A.
5 * Copyright 2016 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <arpa/inet.h>
42 #include <sys/socket.h>
44 #include <rte_common.h>
45 #include <rte_ethdev.h>
46 #include <rte_byteorder.h>
47 #include <cmdline_parse.h>
48 #include <cmdline_parse_etheraddr.h>
53 /** Parser token indices. */
73 /* Top-level command. */
76 /* Sub-level commands. */
85 /* Destroy arguments. */
88 /* Query arguments. */
94 /* Validate/create arguments. */
100 /* Validate/create pattern. */
165 ITEM_E_TAG_GRP_ECID_B,
175 /* Validate/create actions. */
199 /** Size of pattern[] field in struct rte_flow_item_raw. */
200 #define ITEM_RAW_PATTERN_SIZE 36
202 /** Storage size for struct rte_flow_item_raw including pattern. */
203 #define ITEM_RAW_SIZE \
204 (offsetof(struct rte_flow_item_raw, pattern) + ITEM_RAW_PATTERN_SIZE)
206 /** Number of queue[] entries in struct rte_flow_action_rss. */
207 #define ACTION_RSS_NUM 32
209 /** Storage size for struct rte_flow_action_rss including queues. */
210 #define ACTION_RSS_SIZE \
211 (offsetof(struct rte_flow_action_rss, queue) + \
212 sizeof(*((struct rte_flow_action_rss *)0)->queue) * ACTION_RSS_NUM)
214 /** Maximum number of subsequent tokens and arguments on the stack. */
215 #define CTX_STACK_SIZE 16
217 /** Parser context. */
219 /** Stack of subsequent token lists to process. */
220 const enum index *next[CTX_STACK_SIZE];
221 /** Arguments for stacked tokens. */
222 const void *args[CTX_STACK_SIZE];
223 enum index curr; /**< Current token index. */
224 enum index prev; /**< Index of the last token seen. */
225 int next_num; /**< Number of entries in next[]. */
226 int args_num; /**< Number of entries in args[]. */
227 uint32_t eol:1; /**< EOL has been detected. */
228 uint32_t last:1; /**< No more arguments. */
229 uint16_t port; /**< Current port ID (for completions). */
230 uint32_t objdata; /**< Object-specific data. */
231 void *object; /**< Address of current object for relative offsets. */
232 void *objmask; /**< Object a full mask must be written to. */
235 /** Token argument. */
237 uint32_t hton:1; /**< Use network byte ordering. */
238 uint32_t sign:1; /**< Value is signed. */
239 uint32_t offset; /**< Relative offset from ctx->object. */
240 uint32_t size; /**< Field size. */
241 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
244 /** Parser token definition. */
246 /** Type displayed during completion (defaults to "TOKEN"). */
248 /** Help displayed during completion (defaults to token name). */
250 /** Private data used by parser functions. */
253 * Lists of subsequent tokens to push on the stack. Each call to the
254 * parser consumes the last entry of that stack.
256 const enum index *const *next;
257 /** Arguments stack for subsequent tokens that need them. */
258 const struct arg *const *args;
260 * Token-processing callback, returns -1 in case of error, the
261 * length of the matched string otherwise. If NULL, attempts to
262 * match the token name.
264 * If buf is not NULL, the result should be stored in it according
265 * to context. An error is returned if not large enough.
267 int (*call)(struct context *ctx, const struct token *token,
268 const char *str, unsigned int len,
269 void *buf, unsigned int size);
271 * Callback that provides possible values for this token, used for
272 * completion. Returns -1 in case of error, the number of possible
273 * values otherwise. If NULL, the token name is used.
275 * If buf is not NULL, entry index ent is written to buf and the
276 * full length of the entry is returned (same behavior as
279 int (*comp)(struct context *ctx, const struct token *token,
280 unsigned int ent, char *buf, unsigned int size);
281 /** Mandatory token name, no default value. */
285 /** Static initializer for the next field. */
286 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
288 /** Static initializer for a NEXT() entry. */
289 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
291 /** Static initializer for the args field. */
292 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
294 /** Static initializer for ARGS() to target a field. */
295 #define ARGS_ENTRY(s, f) \
296 (&(const struct arg){ \
297 .offset = offsetof(s, f), \
298 .size = sizeof(((s *)0)->f), \
301 /** Static initializer for ARGS() to target a bit-field. */
302 #define ARGS_ENTRY_BF(s, f, b) \
303 (&(const struct arg){ \
305 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
308 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
309 #define ARGS_ENTRY_MASK(s, f, m) \
310 (&(const struct arg){ \
311 .offset = offsetof(s, f), \
312 .size = sizeof(((s *)0)->f), \
313 .mask = (const void *)(m), \
316 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
317 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
318 (&(const struct arg){ \
320 .offset = offsetof(s, f), \
321 .size = sizeof(((s *)0)->f), \
322 .mask = (const void *)(m), \
325 /** Static initializer for ARGS() to target a pointer. */
326 #define ARGS_ENTRY_PTR(s, f) \
327 (&(const struct arg){ \
328 .size = sizeof(*((s *)0)->f), \
331 /** Static initializer for ARGS() with arbitrary size. */
332 #define ARGS_ENTRY_USZ(s, f, sz) \
333 (&(const struct arg){ \
334 .offset = offsetof(s, f), \
338 /** Same as ARGS_ENTRY() using network byte ordering. */
339 #define ARGS_ENTRY_HTON(s, f) \
340 (&(const struct arg){ \
342 .offset = offsetof(s, f), \
343 .size = sizeof(((s *)0)->f), \
346 /** Parser output buffer layout expected by cmd_flow_parsed(). */
348 enum index command; /**< Flow command. */
349 uint16_t port; /**< Affected port ID. */
352 struct rte_flow_attr attr;
353 struct rte_flow_item *pattern;
354 struct rte_flow_action *actions;
358 } vc; /**< Validate/create arguments. */
362 } destroy; /**< Destroy arguments. */
365 enum rte_flow_action_type action;
366 } query; /**< Query arguments. */
370 } list; /**< List arguments. */
373 } isolate; /**< Isolated mode arguments. */
374 } args; /**< Command arguments. */
377 /** Private data for pattern items. */
378 struct parse_item_priv {
379 enum rte_flow_item_type type; /**< Item type. */
380 uint32_t size; /**< Size of item specification structure. */
383 #define PRIV_ITEM(t, s) \
384 (&(const struct parse_item_priv){ \
385 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
389 /** Private data for actions. */
390 struct parse_action_priv {
391 enum rte_flow_action_type type; /**< Action type. */
392 uint32_t size; /**< Size of action configuration structure. */
395 #define PRIV_ACTION(t, s) \
396 (&(const struct parse_action_priv){ \
397 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
401 static const enum index next_vc_attr[] = {
410 static const enum index next_destroy_attr[] = {
416 static const enum index next_list_attr[] = {
422 static const enum index item_param[] = {
431 static const enum index next_item[] = {
457 static const enum index item_fuzzy[] = {
463 static const enum index item_any[] = {
469 static const enum index item_vf[] = {
475 static const enum index item_port[] = {
481 static const enum index item_raw[] = {
491 static const enum index item_eth[] = {
499 static const enum index item_vlan[] = {
509 static const enum index item_ipv4[] = {
519 static const enum index item_ipv6[] = {
530 static const enum index item_icmp[] = {
537 static const enum index item_udp[] = {
544 static const enum index item_tcp[] = {
552 static const enum index item_sctp[] = {
561 static const enum index item_vxlan[] = {
567 static const enum index item_e_tag[] = {
568 ITEM_E_TAG_GRP_ECID_B,
573 static const enum index item_nvgre[] = {
579 static const enum index item_mpls[] = {
585 static const enum index item_gre[] = {
591 static const enum index next_action[] = {
607 static const enum index action_mark[] = {
613 static const enum index action_queue[] = {
619 static const enum index action_dup[] = {
625 static const enum index action_rss[] = {
631 static const enum index action_vf[] = {
638 static int parse_init(struct context *, const struct token *,
639 const char *, unsigned int,
640 void *, unsigned int);
641 static int parse_vc(struct context *, const struct token *,
642 const char *, unsigned int,
643 void *, unsigned int);
644 static int parse_vc_spec(struct context *, const struct token *,
645 const char *, unsigned int, void *, unsigned int);
646 static int parse_vc_conf(struct context *, const struct token *,
647 const char *, unsigned int, void *, unsigned int);
648 static int parse_vc_action_rss_queue(struct context *, const struct token *,
649 const char *, unsigned int, void *,
651 static int parse_destroy(struct context *, const struct token *,
652 const char *, unsigned int,
653 void *, unsigned int);
654 static int parse_flush(struct context *, const struct token *,
655 const char *, unsigned int,
656 void *, unsigned int);
657 static int parse_query(struct context *, const struct token *,
658 const char *, unsigned int,
659 void *, unsigned int);
660 static int parse_action(struct context *, const struct token *,
661 const char *, unsigned int,
662 void *, unsigned int);
663 static int parse_list(struct context *, const struct token *,
664 const char *, unsigned int,
665 void *, unsigned int);
666 static int parse_isolate(struct context *, const struct token *,
667 const char *, unsigned int,
668 void *, unsigned int);
669 static int parse_int(struct context *, const struct token *,
670 const char *, unsigned int,
671 void *, unsigned int);
672 static int parse_prefix(struct context *, const struct token *,
673 const char *, unsigned int,
674 void *, unsigned int);
675 static int parse_boolean(struct context *, const struct token *,
676 const char *, unsigned int,
677 void *, unsigned int);
678 static int parse_string(struct context *, const struct token *,
679 const char *, unsigned int,
680 void *, unsigned int);
681 static int parse_mac_addr(struct context *, const struct token *,
682 const char *, unsigned int,
683 void *, unsigned int);
684 static int parse_ipv4_addr(struct context *, const struct token *,
685 const char *, unsigned int,
686 void *, unsigned int);
687 static int parse_ipv6_addr(struct context *, const struct token *,
688 const char *, unsigned int,
689 void *, unsigned int);
690 static int parse_port(struct context *, const struct token *,
691 const char *, unsigned int,
692 void *, unsigned int);
693 static int comp_none(struct context *, const struct token *,
694 unsigned int, char *, unsigned int);
695 static int comp_boolean(struct context *, const struct token *,
696 unsigned int, char *, unsigned int);
697 static int comp_action(struct context *, const struct token *,
698 unsigned int, char *, unsigned int);
699 static int comp_port(struct context *, const struct token *,
700 unsigned int, char *, unsigned int);
701 static int comp_rule_id(struct context *, const struct token *,
702 unsigned int, char *, unsigned int);
703 static int comp_vc_action_rss_queue(struct context *, const struct token *,
704 unsigned int, char *, unsigned int);
706 /** Token definitions. */
707 static const struct token token_list[] = {
708 /* Special tokens. */
711 .help = "null entry, abused as the entry point",
712 .next = NEXT(NEXT_ENTRY(FLOW)),
717 .help = "command may end here",
723 .help = "integer value",
728 .name = "{unsigned}",
730 .help = "unsigned integer value",
737 .help = "prefix length for bit-mask",
738 .call = parse_prefix,
744 .help = "any boolean value",
745 .call = parse_boolean,
746 .comp = comp_boolean,
751 .help = "fixed string",
752 .call = parse_string,
756 .name = "{MAC address}",
758 .help = "standard MAC address notation",
759 .call = parse_mac_addr,
763 .name = "{IPv4 address}",
764 .type = "IPV4 ADDRESS",
765 .help = "standard IPv4 address notation",
766 .call = parse_ipv4_addr,
770 .name = "{IPv6 address}",
771 .type = "IPV6 ADDRESS",
772 .help = "standard IPv6 address notation",
773 .call = parse_ipv6_addr,
779 .help = "rule identifier",
781 .comp = comp_rule_id,
786 .help = "port identifier",
791 .name = "{group_id}",
793 .help = "group identifier",
800 .help = "priority level",
804 /* Top-level command. */
807 .type = "{command} {port_id} [{arg} [...]]",
808 .help = "manage ingress/egress flow rules",
809 .next = NEXT(NEXT_ENTRY
819 /* Sub-level commands. */
822 .help = "check whether a flow rule can be created",
823 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
824 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
829 .help = "create a flow rule",
830 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
831 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
836 .help = "destroy specific flow rules",
837 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
838 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
839 .call = parse_destroy,
843 .help = "destroy all flow rules",
844 .next = NEXT(NEXT_ENTRY(PORT_ID)),
845 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
850 .help = "query an existing flow rule",
851 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
853 NEXT_ENTRY(PORT_ID)),
854 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action),
855 ARGS_ENTRY(struct buffer, args.query.rule),
856 ARGS_ENTRY(struct buffer, port)),
861 .help = "list existing flow rules",
862 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
863 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
868 .help = "restrict ingress traffic to the defined flow rules",
869 .next = NEXT(NEXT_ENTRY(BOOLEAN),
870 NEXT_ENTRY(PORT_ID)),
871 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
872 ARGS_ENTRY(struct buffer, port)),
873 .call = parse_isolate,
875 /* Destroy arguments. */
878 .help = "specify a rule identifier",
879 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
880 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
881 .call = parse_destroy,
883 /* Query arguments. */
887 .help = "action to query, must be part of the rule",
888 .call = parse_action,
891 /* List arguments. */
894 .help = "specify a group",
895 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
896 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
899 /* Validate/create attributes. */
902 .help = "specify a group",
903 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
904 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
909 .help = "specify a priority level",
910 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
911 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
916 .help = "affect rule to ingress",
917 .next = NEXT(next_vc_attr),
922 .help = "affect rule to egress",
923 .next = NEXT(next_vc_attr),
926 /* Validate/create pattern. */
929 .help = "submit a list of pattern items",
930 .next = NEXT(next_item),
935 .help = "match value perfectly (with full bit-mask)",
936 .call = parse_vc_spec,
938 [ITEM_PARAM_SPEC] = {
940 .help = "match value according to configured bit-mask",
941 .call = parse_vc_spec,
943 [ITEM_PARAM_LAST] = {
945 .help = "specify upper bound to establish a range",
946 .call = parse_vc_spec,
948 [ITEM_PARAM_MASK] = {
950 .help = "specify bit-mask with relevant bits set to one",
951 .call = parse_vc_spec,
953 [ITEM_PARAM_PREFIX] = {
955 .help = "generate bit-mask from a prefix length",
956 .call = parse_vc_spec,
960 .help = "specify next pattern item",
961 .next = NEXT(next_item),
965 .help = "end list of pattern items",
966 .priv = PRIV_ITEM(END, 0),
967 .next = NEXT(NEXT_ENTRY(ACTIONS)),
972 .help = "no-op pattern item",
973 .priv = PRIV_ITEM(VOID, 0),
974 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
979 .help = "perform actions when pattern does not match",
980 .priv = PRIV_ITEM(INVERT, 0),
981 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
986 .help = "match any protocol for the current layer",
987 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
988 .next = NEXT(item_any),
993 .help = "number of layers covered",
994 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
995 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
999 .help = "match packets addressed to the physical function",
1000 .priv = PRIV_ITEM(PF, 0),
1001 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1006 .help = "match packets addressed to a virtual function ID",
1007 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
1008 .next = NEXT(item_vf),
1013 .help = "destination VF ID",
1014 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
1015 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
1019 .help = "device-specific physical port index to use",
1020 .priv = PRIV_ITEM(PORT, sizeof(struct rte_flow_item_port)),
1021 .next = NEXT(item_port),
1024 [ITEM_PORT_INDEX] = {
1026 .help = "physical port index",
1027 .next = NEXT(item_port, NEXT_ENTRY(UNSIGNED), item_param),
1028 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port, index)),
1032 .help = "match an arbitrary byte string",
1033 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1034 .next = NEXT(item_raw),
1037 [ITEM_RAW_RELATIVE] = {
1039 .help = "look for pattern after the previous item",
1040 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1041 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1044 [ITEM_RAW_SEARCH] = {
1046 .help = "search pattern from offset (see also limit)",
1047 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1048 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1051 [ITEM_RAW_OFFSET] = {
1053 .help = "absolute or relative offset for pattern",
1054 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1055 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1057 [ITEM_RAW_LIMIT] = {
1059 .help = "search area limit for start of pattern",
1060 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1061 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1063 [ITEM_RAW_PATTERN] = {
1065 .help = "byte string to look for",
1066 .next = NEXT(item_raw,
1068 NEXT_ENTRY(ITEM_PARAM_IS,
1071 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, length),
1072 ARGS_ENTRY_USZ(struct rte_flow_item_raw,
1074 ITEM_RAW_PATTERN_SIZE)),
1078 .help = "match Ethernet header",
1079 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1080 .next = NEXT(item_eth),
1085 .help = "destination MAC",
1086 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1087 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1091 .help = "source MAC",
1092 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1093 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1097 .help = "EtherType",
1098 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1099 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1103 .help = "match 802.1Q/ad VLAN tag",
1104 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1105 .next = NEXT(item_vlan),
1108 [ITEM_VLAN_TPID] = {
1110 .help = "tag protocol identifier",
1111 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1112 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tpid)),
1116 .help = "tag control information",
1117 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1118 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1122 .help = "priority code point",
1123 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1124 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1129 .help = "drop eligible indicator",
1130 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1131 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1136 .help = "VLAN identifier",
1137 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1138 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1143 .help = "match IPv4 header",
1144 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1145 .next = NEXT(item_ipv4),
1150 .help = "type of service",
1151 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1152 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1153 hdr.type_of_service)),
1157 .help = "time to live",
1158 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1159 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1162 [ITEM_IPV4_PROTO] = {
1164 .help = "next protocol ID",
1165 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1166 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1167 hdr.next_proto_id)),
1171 .help = "source address",
1172 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1173 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1178 .help = "destination address",
1179 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1180 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1185 .help = "match IPv6 header",
1186 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1187 .next = NEXT(item_ipv6),
1192 .help = "traffic class",
1193 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1194 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1196 "\x0f\xf0\x00\x00")),
1198 [ITEM_IPV6_FLOW] = {
1200 .help = "flow label",
1201 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1202 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1204 "\x00\x0f\xff\xff")),
1206 [ITEM_IPV6_PROTO] = {
1208 .help = "protocol (next header)",
1209 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1210 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1215 .help = "hop limit",
1216 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1217 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1222 .help = "source address",
1223 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1224 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1229 .help = "destination address",
1230 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1231 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1236 .help = "match ICMP header",
1237 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1238 .next = NEXT(item_icmp),
1241 [ITEM_ICMP_TYPE] = {
1243 .help = "ICMP packet type",
1244 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1245 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1248 [ITEM_ICMP_CODE] = {
1250 .help = "ICMP packet code",
1251 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1252 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1257 .help = "match UDP header",
1258 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1259 .next = NEXT(item_udp),
1264 .help = "UDP source port",
1265 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1266 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1271 .help = "UDP destination port",
1272 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1273 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1278 .help = "match TCP header",
1279 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1280 .next = NEXT(item_tcp),
1285 .help = "TCP source port",
1286 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1287 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1292 .help = "TCP destination port",
1293 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1294 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1297 [ITEM_TCP_FLAGS] = {
1299 .help = "TCP flags",
1300 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1301 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1306 .help = "match SCTP header",
1307 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1308 .next = NEXT(item_sctp),
1313 .help = "SCTP source port",
1314 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1315 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1320 .help = "SCTP destination port",
1321 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1322 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1327 .help = "validation tag",
1328 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1329 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1332 [ITEM_SCTP_CKSUM] = {
1335 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1336 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1341 .help = "match VXLAN header",
1342 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1343 .next = NEXT(item_vxlan),
1346 [ITEM_VXLAN_VNI] = {
1348 .help = "VXLAN identifier",
1349 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1350 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
1354 .help = "match E-Tag header",
1355 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
1356 .next = NEXT(item_e_tag),
1359 [ITEM_E_TAG_GRP_ECID_B] = {
1360 .name = "grp_ecid_b",
1361 .help = "GRP and E-CID base",
1362 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
1363 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
1369 .help = "match NVGRE header",
1370 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
1371 .next = NEXT(item_nvgre),
1374 [ITEM_NVGRE_TNI] = {
1376 .help = "virtual subnet ID",
1377 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
1378 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
1382 .help = "match MPLS header",
1383 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
1384 .next = NEXT(item_mpls),
1387 [ITEM_MPLS_LABEL] = {
1389 .help = "MPLS label",
1390 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
1391 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
1397 .help = "match GRE header",
1398 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
1399 .next = NEXT(item_gre),
1402 [ITEM_GRE_PROTO] = {
1404 .help = "GRE protocol type",
1405 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
1406 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
1411 .help = "fuzzy pattern match, expect faster than default",
1412 .priv = PRIV_ITEM(FUZZY,
1413 sizeof(struct rte_flow_item_fuzzy)),
1414 .next = NEXT(item_fuzzy),
1417 [ITEM_FUZZY_THRESH] = {
1419 .help = "match accuracy threshold",
1420 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
1421 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
1425 /* Validate/create actions. */
1428 .help = "submit a list of associated actions",
1429 .next = NEXT(next_action),
1434 .help = "specify next action",
1435 .next = NEXT(next_action),
1439 .help = "end list of actions",
1440 .priv = PRIV_ACTION(END, 0),
1445 .help = "no-op action",
1446 .priv = PRIV_ACTION(VOID, 0),
1447 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1450 [ACTION_PASSTHRU] = {
1452 .help = "let subsequent rule process matched packets",
1453 .priv = PRIV_ACTION(PASSTHRU, 0),
1454 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1459 .help = "attach 32 bit value to packets",
1460 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
1461 .next = NEXT(action_mark),
1464 [ACTION_MARK_ID] = {
1466 .help = "32 bit value to return with packets",
1467 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
1468 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
1469 .call = parse_vc_conf,
1473 .help = "flag packets",
1474 .priv = PRIV_ACTION(FLAG, 0),
1475 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1480 .help = "assign packets to a given queue index",
1481 .priv = PRIV_ACTION(QUEUE,
1482 sizeof(struct rte_flow_action_queue)),
1483 .next = NEXT(action_queue),
1486 [ACTION_QUEUE_INDEX] = {
1488 .help = "queue index to use",
1489 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
1490 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
1491 .call = parse_vc_conf,
1495 .help = "drop packets (note: passthru has priority)",
1496 .priv = PRIV_ACTION(DROP, 0),
1497 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1502 .help = "enable counters for this rule",
1503 .priv = PRIV_ACTION(COUNT, 0),
1504 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1509 .help = "duplicate packets to a given queue index",
1510 .priv = PRIV_ACTION(DUP, sizeof(struct rte_flow_action_dup)),
1511 .next = NEXT(action_dup),
1514 [ACTION_DUP_INDEX] = {
1516 .help = "queue index to duplicate packets to",
1517 .next = NEXT(action_dup, NEXT_ENTRY(UNSIGNED)),
1518 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_dup, index)),
1519 .call = parse_vc_conf,
1523 .help = "spread packets among several queues",
1524 .priv = PRIV_ACTION(RSS, ACTION_RSS_SIZE),
1525 .next = NEXT(action_rss),
1528 [ACTION_RSS_QUEUES] = {
1530 .help = "queue indices to use",
1531 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
1532 .call = parse_vc_conf,
1534 [ACTION_RSS_QUEUE] = {
1536 .help = "queue index",
1537 .call = parse_vc_action_rss_queue,
1538 .comp = comp_vc_action_rss_queue,
1542 .help = "redirect packets to physical device function",
1543 .priv = PRIV_ACTION(PF, 0),
1544 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1549 .help = "redirect packets to virtual device function",
1550 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
1551 .next = NEXT(action_vf),
1554 [ACTION_VF_ORIGINAL] = {
1556 .help = "use original VF ID if possible",
1557 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
1558 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
1560 .call = parse_vc_conf,
1564 .help = "VF ID to redirect packets to",
1565 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
1566 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
1567 .call = parse_vc_conf,
1571 /** Remove and return last entry from argument stack. */
1572 static const struct arg *
1573 pop_args(struct context *ctx)
1575 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
1578 /** Add entry on top of the argument stack. */
1580 push_args(struct context *ctx, const struct arg *arg)
1582 if (ctx->args_num == CTX_STACK_SIZE)
1584 ctx->args[ctx->args_num++] = arg;
1588 /** Spread value into buffer according to bit-mask. */
1590 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
1592 uint32_t i = arg->size;
1600 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1609 unsigned int shift = 0;
1610 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
1612 for (shift = 0; arg->mask[i] >> shift; ++shift) {
1613 if (!(arg->mask[i] & (1 << shift)))
1618 *buf &= ~(1 << shift);
1619 *buf |= (val & 1) << shift;
1627 /** Compare a string with a partial one of a given length. */
1629 strcmp_partial(const char *full, const char *partial, size_t partial_len)
1631 int r = strncmp(full, partial, partial_len);
1635 if (strlen(full) <= partial_len)
1637 return full[partial_len];
1641 * Parse a prefix length and generate a bit-mask.
1643 * Last argument (ctx->args) is retrieved to determine mask size, storage
1644 * location and whether the result must use network byte ordering.
1647 parse_prefix(struct context *ctx, const struct token *token,
1648 const char *str, unsigned int len,
1649 void *buf, unsigned int size)
1651 const struct arg *arg = pop_args(ctx);
1652 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
1659 /* Argument is expected. */
1663 u = strtoumax(str, &end, 0);
1664 if (errno || (size_t)(end - str) != len)
1669 extra = arg_entry_bf_fill(NULL, 0, arg);
1678 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
1679 !arg_entry_bf_fill(ctx->objmask, -1, arg))
1686 if (bytes > size || bytes + !!extra > size)
1690 buf = (uint8_t *)ctx->object + arg->offset;
1691 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1693 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
1694 memset(buf, 0x00, size - bytes);
1696 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
1700 memset(buf, 0xff, bytes);
1701 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
1703 ((uint8_t *)buf)[bytes] = conv[extra];
1706 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
1709 push_args(ctx, arg);
1713 /** Default parsing function for token name matching. */
1715 parse_default(struct context *ctx, const struct token *token,
1716 const char *str, unsigned int len,
1717 void *buf, unsigned int size)
1722 if (strcmp_partial(token->name, str, len))
1727 /** Parse flow command, initialize output buffer for subsequent tokens. */
1729 parse_init(struct context *ctx, const struct token *token,
1730 const char *str, unsigned int len,
1731 void *buf, unsigned int size)
1733 struct buffer *out = buf;
1735 /* Token name must match. */
1736 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1738 /* Nothing else to do if there is no buffer. */
1741 /* Make sure buffer is large enough. */
1742 if (size < sizeof(*out))
1744 /* Initialize buffer. */
1745 memset(out, 0x00, sizeof(*out));
1746 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
1749 ctx->objmask = NULL;
1753 /** Parse tokens for validate/create commands. */
1755 parse_vc(struct context *ctx, const struct token *token,
1756 const char *str, unsigned int len,
1757 void *buf, unsigned int size)
1759 struct buffer *out = buf;
1763 /* Token name must match. */
1764 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1766 /* Nothing else to do if there is no buffer. */
1769 if (!out->command) {
1770 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
1772 if (sizeof(*out) > size)
1774 out->command = ctx->curr;
1777 ctx->objmask = NULL;
1778 out->args.vc.data = (uint8_t *)out + size;
1782 ctx->object = &out->args.vc.attr;
1783 ctx->objmask = NULL;
1784 switch (ctx->curr) {
1789 out->args.vc.attr.ingress = 1;
1792 out->args.vc.attr.egress = 1;
1795 out->args.vc.pattern =
1796 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1798 ctx->object = out->args.vc.pattern;
1799 ctx->objmask = NULL;
1802 out->args.vc.actions =
1803 (void *)RTE_ALIGN_CEIL((uintptr_t)
1804 (out->args.vc.pattern +
1805 out->args.vc.pattern_n),
1807 ctx->object = out->args.vc.actions;
1808 ctx->objmask = NULL;
1815 if (!out->args.vc.actions) {
1816 const struct parse_item_priv *priv = token->priv;
1817 struct rte_flow_item *item =
1818 out->args.vc.pattern + out->args.vc.pattern_n;
1820 data_size = priv->size * 3; /* spec, last, mask */
1821 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1822 (out->args.vc.data - data_size),
1824 if ((uint8_t *)item + sizeof(*item) > data)
1826 *item = (struct rte_flow_item){
1829 ++out->args.vc.pattern_n;
1831 ctx->objmask = NULL;
1833 const struct parse_action_priv *priv = token->priv;
1834 struct rte_flow_action *action =
1835 out->args.vc.actions + out->args.vc.actions_n;
1837 data_size = priv->size; /* configuration */
1838 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1839 (out->args.vc.data - data_size),
1841 if ((uint8_t *)action + sizeof(*action) > data)
1843 *action = (struct rte_flow_action){
1846 ++out->args.vc.actions_n;
1847 ctx->object = action;
1848 ctx->objmask = NULL;
1850 memset(data, 0, data_size);
1851 out->args.vc.data = data;
1852 ctx->objdata = data_size;
1856 /** Parse pattern item parameter type. */
1858 parse_vc_spec(struct context *ctx, const struct token *token,
1859 const char *str, unsigned int len,
1860 void *buf, unsigned int size)
1862 struct buffer *out = buf;
1863 struct rte_flow_item *item;
1869 /* Token name must match. */
1870 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1872 /* Parse parameter types. */
1873 switch (ctx->curr) {
1874 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
1880 case ITEM_PARAM_SPEC:
1883 case ITEM_PARAM_LAST:
1886 case ITEM_PARAM_PREFIX:
1887 /* Modify next token to expect a prefix. */
1888 if (ctx->next_num < 2)
1890 ctx->next[ctx->next_num - 2] = prefix;
1892 case ITEM_PARAM_MASK:
1898 /* Nothing else to do if there is no buffer. */
1901 if (!out->args.vc.pattern_n)
1903 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
1904 data_size = ctx->objdata / 3; /* spec, last, mask */
1905 /* Point to selected object. */
1906 ctx->object = out->args.vc.data + (data_size * index);
1908 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
1909 item->mask = ctx->objmask;
1911 ctx->objmask = NULL;
1912 /* Update relevant item pointer. */
1913 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
1918 /** Parse action configuration field. */
1920 parse_vc_conf(struct context *ctx, const struct token *token,
1921 const char *str, unsigned int len,
1922 void *buf, unsigned int size)
1924 struct buffer *out = buf;
1925 struct rte_flow_action *action;
1928 /* Token name must match. */
1929 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1931 /* Nothing else to do if there is no buffer. */
1934 if (!out->args.vc.actions_n)
1936 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
1937 /* Point to selected object. */
1938 ctx->object = out->args.vc.data;
1939 ctx->objmask = NULL;
1940 /* Update configuration pointer. */
1941 action->conf = ctx->object;
1946 * Parse queue field for RSS action.
1948 * Valid tokens are queue indices and the "end" token.
1951 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
1952 const char *str, unsigned int len,
1953 void *buf, unsigned int size)
1955 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
1962 if (ctx->curr != ACTION_RSS_QUEUE)
1964 i = ctx->objdata >> 16;
1965 if (!strcmp_partial("end", str, len)) {
1966 ctx->objdata &= 0xffff;
1969 if (i >= ACTION_RSS_NUM)
1971 if (push_args(ctx, ARGS_ENTRY(struct rte_flow_action_rss, queue[i])))
1973 ret = parse_int(ctx, token, str, len, NULL, 0);
1979 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
1981 if (ctx->next_num == RTE_DIM(ctx->next))
1983 ctx->next[ctx->next_num++] = next;
1986 ((struct rte_flow_action_rss *)ctx->object)->num = i;
1990 /** Parse tokens for destroy command. */
1992 parse_destroy(struct context *ctx, const struct token *token,
1993 const char *str, unsigned int len,
1994 void *buf, unsigned int size)
1996 struct buffer *out = buf;
1998 /* Token name must match. */
1999 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2001 /* Nothing else to do if there is no buffer. */
2004 if (!out->command) {
2005 if (ctx->curr != DESTROY)
2007 if (sizeof(*out) > size)
2009 out->command = ctx->curr;
2012 ctx->objmask = NULL;
2013 out->args.destroy.rule =
2014 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2018 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
2019 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
2022 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
2023 ctx->objmask = NULL;
2027 /** Parse tokens for flush command. */
2029 parse_flush(struct context *ctx, const struct token *token,
2030 const char *str, unsigned int len,
2031 void *buf, unsigned int size)
2033 struct buffer *out = buf;
2035 /* Token name must match. */
2036 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2038 /* Nothing else to do if there is no buffer. */
2041 if (!out->command) {
2042 if (ctx->curr != FLUSH)
2044 if (sizeof(*out) > size)
2046 out->command = ctx->curr;
2049 ctx->objmask = NULL;
2054 /** Parse tokens for query command. */
2056 parse_query(struct context *ctx, const struct token *token,
2057 const char *str, unsigned int len,
2058 void *buf, unsigned int size)
2060 struct buffer *out = buf;
2062 /* Token name must match. */
2063 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2065 /* Nothing else to do if there is no buffer. */
2068 if (!out->command) {
2069 if (ctx->curr != QUERY)
2071 if (sizeof(*out) > size)
2073 out->command = ctx->curr;
2076 ctx->objmask = NULL;
2081 /** Parse action names. */
2083 parse_action(struct context *ctx, const struct token *token,
2084 const char *str, unsigned int len,
2085 void *buf, unsigned int size)
2087 struct buffer *out = buf;
2088 const struct arg *arg = pop_args(ctx);
2092 /* Argument is expected. */
2095 /* Parse action name. */
2096 for (i = 0; next_action[i]; ++i) {
2097 const struct parse_action_priv *priv;
2099 token = &token_list[next_action[i]];
2100 if (strcmp_partial(token->name, str, len))
2106 memcpy((uint8_t *)ctx->object + arg->offset,
2112 push_args(ctx, arg);
2116 /** Parse tokens for list command. */
2118 parse_list(struct context *ctx, const struct token *token,
2119 const char *str, unsigned int len,
2120 void *buf, unsigned int size)
2122 struct buffer *out = buf;
2124 /* Token name must match. */
2125 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2127 /* Nothing else to do if there is no buffer. */
2130 if (!out->command) {
2131 if (ctx->curr != LIST)
2133 if (sizeof(*out) > size)
2135 out->command = ctx->curr;
2138 ctx->objmask = NULL;
2139 out->args.list.group =
2140 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2144 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
2145 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
2148 ctx->object = out->args.list.group + out->args.list.group_n++;
2149 ctx->objmask = NULL;
2153 /** Parse tokens for isolate command. */
2155 parse_isolate(struct context *ctx, const struct token *token,
2156 const char *str, unsigned int len,
2157 void *buf, unsigned int size)
2159 struct buffer *out = buf;
2161 /* Token name must match. */
2162 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2164 /* Nothing else to do if there is no buffer. */
2167 if (!out->command) {
2168 if (ctx->curr != ISOLATE)
2170 if (sizeof(*out) > size)
2172 out->command = ctx->curr;
2175 ctx->objmask = NULL;
2181 * Parse signed/unsigned integers 8 to 64-bit long.
2183 * Last argument (ctx->args) is retrieved to determine integer type and
2187 parse_int(struct context *ctx, const struct token *token,
2188 const char *str, unsigned int len,
2189 void *buf, unsigned int size)
2191 const struct arg *arg = pop_args(ctx);
2196 /* Argument is expected. */
2201 (uintmax_t)strtoimax(str, &end, 0) :
2202 strtoumax(str, &end, 0);
2203 if (errno || (size_t)(end - str) != len)
2208 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
2209 !arg_entry_bf_fill(ctx->objmask, -1, arg))
2213 buf = (uint8_t *)ctx->object + arg->offset;
2217 case sizeof(uint8_t):
2218 *(uint8_t *)buf = u;
2220 case sizeof(uint16_t):
2221 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
2223 case sizeof(uint8_t [3]):
2224 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2226 ((uint8_t *)buf)[0] = u;
2227 ((uint8_t *)buf)[1] = u >> 8;
2228 ((uint8_t *)buf)[2] = u >> 16;
2232 ((uint8_t *)buf)[0] = u >> 16;
2233 ((uint8_t *)buf)[1] = u >> 8;
2234 ((uint8_t *)buf)[2] = u;
2236 case sizeof(uint32_t):
2237 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
2239 case sizeof(uint64_t):
2240 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
2245 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
2247 buf = (uint8_t *)ctx->objmask + arg->offset;
2252 push_args(ctx, arg);
2259 * Two arguments (ctx->args) are retrieved from the stack to store data and
2260 * its length (in that order).
2263 parse_string(struct context *ctx, const struct token *token,
2264 const char *str, unsigned int len,
2265 void *buf, unsigned int size)
2267 const struct arg *arg_data = pop_args(ctx);
2268 const struct arg *arg_len = pop_args(ctx);
2269 char tmp[16]; /* Ought to be enough. */
2272 /* Arguments are expected. */
2276 push_args(ctx, arg_data);
2279 size = arg_data->size;
2280 /* Bit-mask fill is not supported. */
2281 if (arg_data->mask || size < len)
2285 /* Let parse_int() fill length information first. */
2286 ret = snprintf(tmp, sizeof(tmp), "%u", len);
2289 push_args(ctx, arg_len);
2290 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
2295 buf = (uint8_t *)ctx->object + arg_data->offset;
2296 /* Output buffer is not necessarily NUL-terminated. */
2297 memcpy(buf, str, len);
2298 memset((uint8_t *)buf + len, 0x55, size - len);
2300 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
2303 push_args(ctx, arg_len);
2304 push_args(ctx, arg_data);
2309 * Parse a MAC address.
2311 * Last argument (ctx->args) is retrieved to determine storage size and
2315 parse_mac_addr(struct context *ctx, const struct token *token,
2316 const char *str, unsigned int len,
2317 void *buf, unsigned int size)
2319 const struct arg *arg = pop_args(ctx);
2320 struct ether_addr tmp;
2324 /* Argument is expected. */
2328 /* Bit-mask fill is not supported. */
2329 if (arg->mask || size != sizeof(tmp))
2331 /* Only network endian is supported. */
2334 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
2335 if (ret < 0 || (unsigned int)ret != len)
2339 buf = (uint8_t *)ctx->object + arg->offset;
2340 memcpy(buf, &tmp, size);
2342 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2345 push_args(ctx, arg);
2350 * Parse an IPv4 address.
2352 * Last argument (ctx->args) is retrieved to determine storage size and
2356 parse_ipv4_addr(struct context *ctx, const struct token *token,
2357 const char *str, unsigned int len,
2358 void *buf, unsigned int size)
2360 const struct arg *arg = pop_args(ctx);
2365 /* Argument is expected. */
2369 /* Bit-mask fill is not supported. */
2370 if (arg->mask || size != sizeof(tmp))
2372 /* Only network endian is supported. */
2375 memcpy(str2, str, len);
2377 ret = inet_pton(AF_INET, str2, &tmp);
2379 /* Attempt integer parsing. */
2380 push_args(ctx, arg);
2381 return parse_int(ctx, token, str, len, buf, size);
2385 buf = (uint8_t *)ctx->object + arg->offset;
2386 memcpy(buf, &tmp, size);
2388 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2391 push_args(ctx, arg);
2396 * Parse an IPv6 address.
2398 * Last argument (ctx->args) is retrieved to determine storage size and
2402 parse_ipv6_addr(struct context *ctx, const struct token *token,
2403 const char *str, unsigned int len,
2404 void *buf, unsigned int size)
2406 const struct arg *arg = pop_args(ctx);
2408 struct in6_addr tmp;
2412 /* Argument is expected. */
2416 /* Bit-mask fill is not supported. */
2417 if (arg->mask || size != sizeof(tmp))
2419 /* Only network endian is supported. */
2422 memcpy(str2, str, len);
2424 ret = inet_pton(AF_INET6, str2, &tmp);
2429 buf = (uint8_t *)ctx->object + arg->offset;
2430 memcpy(buf, &tmp, size);
2432 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2435 push_args(ctx, arg);
2439 /** Boolean values (even indices stand for false). */
2440 static const char *const boolean_name[] = {
2449 * Parse a boolean value.
2451 * Last argument (ctx->args) is retrieved to determine storage size and
2455 parse_boolean(struct context *ctx, const struct token *token,
2456 const char *str, unsigned int len,
2457 void *buf, unsigned int size)
2459 const struct arg *arg = pop_args(ctx);
2463 /* Argument is expected. */
2466 for (i = 0; boolean_name[i]; ++i)
2467 if (!strcmp_partial(boolean_name[i], str, len))
2469 /* Process token as integer. */
2470 if (boolean_name[i])
2471 str = i & 1 ? "1" : "0";
2472 push_args(ctx, arg);
2473 ret = parse_int(ctx, token, str, strlen(str), buf, size);
2474 return ret > 0 ? (int)len : ret;
2477 /** Parse port and update context. */
2479 parse_port(struct context *ctx, const struct token *token,
2480 const char *str, unsigned int len,
2481 void *buf, unsigned int size)
2483 struct buffer *out = &(struct buffer){ .port = 0 };
2491 ctx->objmask = NULL;
2492 size = sizeof(*out);
2494 ret = parse_int(ctx, token, str, len, out, size);
2496 ctx->port = out->port;
2502 /** No completion. */
2504 comp_none(struct context *ctx, const struct token *token,
2505 unsigned int ent, char *buf, unsigned int size)
2515 /** Complete boolean values. */
2517 comp_boolean(struct context *ctx, const struct token *token,
2518 unsigned int ent, char *buf, unsigned int size)
2524 for (i = 0; boolean_name[i]; ++i)
2525 if (buf && i == ent)
2526 return snprintf(buf, size, "%s", boolean_name[i]);
2532 /** Complete action names. */
2534 comp_action(struct context *ctx, const struct token *token,
2535 unsigned int ent, char *buf, unsigned int size)
2541 for (i = 0; next_action[i]; ++i)
2542 if (buf && i == ent)
2543 return snprintf(buf, size, "%s",
2544 token_list[next_action[i]].name);
2550 /** Complete available ports. */
2552 comp_port(struct context *ctx, const struct token *token,
2553 unsigned int ent, char *buf, unsigned int size)
2560 RTE_ETH_FOREACH_DEV(p) {
2561 if (buf && i == ent)
2562 return snprintf(buf, size, "%u", p);
2570 /** Complete available rule IDs. */
2572 comp_rule_id(struct context *ctx, const struct token *token,
2573 unsigned int ent, char *buf, unsigned int size)
2576 struct rte_port *port;
2577 struct port_flow *pf;
2580 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
2581 ctx->port == (uint16_t)RTE_PORT_ALL)
2583 port = &ports[ctx->port];
2584 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
2585 if (buf && i == ent)
2586 return snprintf(buf, size, "%u", pf->id);
2594 /** Complete queue field for RSS action. */
2596 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
2597 unsigned int ent, char *buf, unsigned int size)
2599 static const char *const str[] = { "", "end", NULL };
2604 for (i = 0; str[i] != NULL; ++i)
2605 if (buf && i == ent)
2606 return snprintf(buf, size, "%s", str[i]);
2612 /** Internal context. */
2613 static struct context cmd_flow_context;
2615 /** Global parser instance (cmdline API). */
2616 cmdline_parse_inst_t cmd_flow;
2618 /** Initialize context. */
2620 cmd_flow_context_init(struct context *ctx)
2622 /* A full memset() is not necessary. */
2632 ctx->objmask = NULL;
2635 /** Parse a token (cmdline API). */
2637 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
2640 struct context *ctx = &cmd_flow_context;
2641 const struct token *token;
2642 const enum index *list;
2647 token = &token_list[ctx->curr];
2648 /* Check argument length. */
2651 for (len = 0; src[len]; ++len)
2652 if (src[len] == '#' || isspace(src[len]))
2656 /* Last argument and EOL detection. */
2657 for (i = len; src[i]; ++i)
2658 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
2660 else if (!isspace(src[i])) {
2665 if (src[i] == '\r' || src[i] == '\n') {
2669 /* Initialize context if necessary. */
2670 if (!ctx->next_num) {
2673 ctx->next[ctx->next_num++] = token->next[0];
2675 /* Process argument through candidates. */
2676 ctx->prev = ctx->curr;
2677 list = ctx->next[ctx->next_num - 1];
2678 for (i = 0; list[i]; ++i) {
2679 const struct token *next = &token_list[list[i]];
2682 ctx->curr = list[i];
2684 tmp = next->call(ctx, next, src, len, result, size);
2686 tmp = parse_default(ctx, next, src, len, result, size);
2687 if (tmp == -1 || tmp != len)
2695 /* Push subsequent tokens if any. */
2697 for (i = 0; token->next[i]; ++i) {
2698 if (ctx->next_num == RTE_DIM(ctx->next))
2700 ctx->next[ctx->next_num++] = token->next[i];
2702 /* Push arguments if any. */
2704 for (i = 0; token->args[i]; ++i) {
2705 if (ctx->args_num == RTE_DIM(ctx->args))
2707 ctx->args[ctx->args_num++] = token->args[i];
2712 /** Return number of completion entries (cmdline API). */
2714 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
2716 struct context *ctx = &cmd_flow_context;
2717 const struct token *token = &token_list[ctx->curr];
2718 const enum index *list;
2722 /* Count number of tokens in current list. */
2724 list = ctx->next[ctx->next_num - 1];
2726 list = token->next[0];
2727 for (i = 0; list[i]; ++i)
2732 * If there is a single token, use its completion callback, otherwise
2733 * return the number of entries.
2735 token = &token_list[list[0]];
2736 if (i == 1 && token->comp) {
2737 /* Save index for cmd_flow_get_help(). */
2738 ctx->prev = list[0];
2739 return token->comp(ctx, token, 0, NULL, 0);
2744 /** Return a completion entry (cmdline API). */
2746 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
2747 char *dst, unsigned int size)
2749 struct context *ctx = &cmd_flow_context;
2750 const struct token *token = &token_list[ctx->curr];
2751 const enum index *list;
2755 /* Count number of tokens in current list. */
2757 list = ctx->next[ctx->next_num - 1];
2759 list = token->next[0];
2760 for (i = 0; list[i]; ++i)
2764 /* If there is a single token, use its completion callback. */
2765 token = &token_list[list[0]];
2766 if (i == 1 && token->comp) {
2767 /* Save index for cmd_flow_get_help(). */
2768 ctx->prev = list[0];
2769 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
2771 /* Otherwise make sure the index is valid and use defaults. */
2774 token = &token_list[list[index]];
2775 snprintf(dst, size, "%s", token->name);
2776 /* Save index for cmd_flow_get_help(). */
2777 ctx->prev = list[index];
2781 /** Populate help strings for current token (cmdline API). */
2783 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
2785 struct context *ctx = &cmd_flow_context;
2786 const struct token *token = &token_list[ctx->prev];
2791 /* Set token type and update global help with details. */
2792 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
2794 cmd_flow.help_str = token->help;
2796 cmd_flow.help_str = token->name;
2800 /** Token definition template (cmdline API). */
2801 static struct cmdline_token_hdr cmd_flow_token_hdr = {
2802 .ops = &(struct cmdline_token_ops){
2803 .parse = cmd_flow_parse,
2804 .complete_get_nb = cmd_flow_complete_get_nb,
2805 .complete_get_elt = cmd_flow_complete_get_elt,
2806 .get_help = cmd_flow_get_help,
2811 /** Populate the next dynamic token. */
2813 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
2814 cmdline_parse_token_hdr_t **hdr_inst)
2816 struct context *ctx = &cmd_flow_context;
2818 /* Always reinitialize context before requesting the first token. */
2819 if (!(hdr_inst - cmd_flow.tokens))
2820 cmd_flow_context_init(ctx);
2821 /* Return NULL when no more tokens are expected. */
2822 if (!ctx->next_num && ctx->curr) {
2826 /* Determine if command should end here. */
2827 if (ctx->eol && ctx->last && ctx->next_num) {
2828 const enum index *list = ctx->next[ctx->next_num - 1];
2831 for (i = 0; list[i]; ++i) {
2838 *hdr = &cmd_flow_token_hdr;
2841 /** Dispatch parsed buffer to function calls. */
2843 cmd_flow_parsed(const struct buffer *in)
2845 switch (in->command) {
2847 port_flow_validate(in->port, &in->args.vc.attr,
2848 in->args.vc.pattern, in->args.vc.actions);
2851 port_flow_create(in->port, &in->args.vc.attr,
2852 in->args.vc.pattern, in->args.vc.actions);
2855 port_flow_destroy(in->port, in->args.destroy.rule_n,
2856 in->args.destroy.rule);
2859 port_flow_flush(in->port);
2862 port_flow_query(in->port, in->args.query.rule,
2863 in->args.query.action);
2866 port_flow_list(in->port, in->args.list.group_n,
2867 in->args.list.group);
2870 port_flow_isolate(in->port, in->args.isolate.set);
2877 /** Token generator and output processing callback (cmdline API). */
2879 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
2882 cmd_flow_tok(arg0, arg2);
2884 cmd_flow_parsed(arg0);
2887 /** Global parser instance (cmdline API). */
2888 cmdline_parse_inst_t cmd_flow = {
2890 .data = NULL, /**< Unused. */
2891 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
2894 }, /**< Tokens are returned by cmd_flow_tok(). */