4 * Copyright 2016 6WIND S.A.
5 * Copyright 2016 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <arpa/inet.h>
42 #include <sys/socket.h>
44 #include <rte_common.h>
45 #include <rte_ethdev.h>
46 #include <rte_byteorder.h>
47 #include <cmdline_parse.h>
48 #include <cmdline_parse_etheraddr.h>
53 /** Parser token indices. */
73 /* Top-level command. */
76 /* Sub-level commands. */
85 /* Destroy arguments. */
88 /* Query arguments. */
94 /* Validate/create arguments. */
100 /* Validate/create pattern. */
165 ITEM_E_TAG_GRP_ECID_B,
182 /* Validate/create actions. */
208 /** Size of pattern[] field in struct rte_flow_item_raw. */
209 #define ITEM_RAW_PATTERN_SIZE 36
211 /** Storage size for struct rte_flow_item_raw including pattern. */
212 #define ITEM_RAW_SIZE \
213 (offsetof(struct rte_flow_item_raw, pattern) + ITEM_RAW_PATTERN_SIZE)
215 /** Number of queue[] entries in struct rte_flow_action_rss. */
216 #define ACTION_RSS_NUM 32
218 /** Storage size for struct rte_flow_action_rss including queues. */
219 #define ACTION_RSS_SIZE \
220 (offsetof(struct rte_flow_action_rss, queue) + \
221 sizeof(*((struct rte_flow_action_rss *)0)->queue) * ACTION_RSS_NUM)
223 /** Maximum number of subsequent tokens and arguments on the stack. */
224 #define CTX_STACK_SIZE 16
226 /** Parser context. */
228 /** Stack of subsequent token lists to process. */
229 const enum index *next[CTX_STACK_SIZE];
230 /** Arguments for stacked tokens. */
231 const void *args[CTX_STACK_SIZE];
232 enum index curr; /**< Current token index. */
233 enum index prev; /**< Index of the last token seen. */
234 int next_num; /**< Number of entries in next[]. */
235 int args_num; /**< Number of entries in args[]. */
236 uint32_t eol:1; /**< EOL has been detected. */
237 uint32_t last:1; /**< No more arguments. */
238 portid_t port; /**< Current port ID (for completions). */
239 uint32_t objdata; /**< Object-specific data. */
240 void *object; /**< Address of current object for relative offsets. */
241 void *objmask; /**< Object a full mask must be written to. */
244 /** Token argument. */
246 uint32_t hton:1; /**< Use network byte ordering. */
247 uint32_t sign:1; /**< Value is signed. */
248 uint32_t offset; /**< Relative offset from ctx->object. */
249 uint32_t size; /**< Field size. */
250 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
253 /** Parser token definition. */
255 /** Type displayed during completion (defaults to "TOKEN"). */
257 /** Help displayed during completion (defaults to token name). */
259 /** Private data used by parser functions. */
262 * Lists of subsequent tokens to push on the stack. Each call to the
263 * parser consumes the last entry of that stack.
265 const enum index *const *next;
266 /** Arguments stack for subsequent tokens that need them. */
267 const struct arg *const *args;
269 * Token-processing callback, returns -1 in case of error, the
270 * length of the matched string otherwise. If NULL, attempts to
271 * match the token name.
273 * If buf is not NULL, the result should be stored in it according
274 * to context. An error is returned if not large enough.
276 int (*call)(struct context *ctx, const struct token *token,
277 const char *str, unsigned int len,
278 void *buf, unsigned int size);
280 * Callback that provides possible values for this token, used for
281 * completion. Returns -1 in case of error, the number of possible
282 * values otherwise. If NULL, the token name is used.
284 * If buf is not NULL, entry index ent is written to buf and the
285 * full length of the entry is returned (same behavior as
288 int (*comp)(struct context *ctx, const struct token *token,
289 unsigned int ent, char *buf, unsigned int size);
290 /** Mandatory token name, no default value. */
294 /** Static initializer for the next field. */
295 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
297 /** Static initializer for a NEXT() entry. */
298 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
300 /** Static initializer for the args field. */
301 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
303 /** Static initializer for ARGS() to target a field. */
304 #define ARGS_ENTRY(s, f) \
305 (&(const struct arg){ \
306 .offset = offsetof(s, f), \
307 .size = sizeof(((s *)0)->f), \
310 /** Static initializer for ARGS() to target a bit-field. */
311 #define ARGS_ENTRY_BF(s, f, b) \
312 (&(const struct arg){ \
314 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
317 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
318 #define ARGS_ENTRY_MASK(s, f, m) \
319 (&(const struct arg){ \
320 .offset = offsetof(s, f), \
321 .size = sizeof(((s *)0)->f), \
322 .mask = (const void *)(m), \
325 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
326 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
327 (&(const struct arg){ \
329 .offset = offsetof(s, f), \
330 .size = sizeof(((s *)0)->f), \
331 .mask = (const void *)(m), \
334 /** Static initializer for ARGS() to target a pointer. */
335 #define ARGS_ENTRY_PTR(s, f) \
336 (&(const struct arg){ \
337 .size = sizeof(*((s *)0)->f), \
340 /** Static initializer for ARGS() with arbitrary size. */
341 #define ARGS_ENTRY_USZ(s, f, sz) \
342 (&(const struct arg){ \
343 .offset = offsetof(s, f), \
347 /** Same as ARGS_ENTRY() using network byte ordering. */
348 #define ARGS_ENTRY_HTON(s, f) \
349 (&(const struct arg){ \
351 .offset = offsetof(s, f), \
352 .size = sizeof(((s *)0)->f), \
355 /** Parser output buffer layout expected by cmd_flow_parsed(). */
357 enum index command; /**< Flow command. */
358 portid_t port; /**< Affected port ID. */
361 struct rte_flow_attr attr;
362 struct rte_flow_item *pattern;
363 struct rte_flow_action *actions;
367 } vc; /**< Validate/create arguments. */
371 } destroy; /**< Destroy arguments. */
374 enum rte_flow_action_type action;
375 } query; /**< Query arguments. */
379 } list; /**< List arguments. */
382 } isolate; /**< Isolated mode arguments. */
383 } args; /**< Command arguments. */
386 /** Private data for pattern items. */
387 struct parse_item_priv {
388 enum rte_flow_item_type type; /**< Item type. */
389 uint32_t size; /**< Size of item specification structure. */
392 #define PRIV_ITEM(t, s) \
393 (&(const struct parse_item_priv){ \
394 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
398 /** Private data for actions. */
399 struct parse_action_priv {
400 enum rte_flow_action_type type; /**< Action type. */
401 uint32_t size; /**< Size of action configuration structure. */
404 #define PRIV_ACTION(t, s) \
405 (&(const struct parse_action_priv){ \
406 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
410 static const enum index next_vc_attr[] = {
419 static const enum index next_destroy_attr[] = {
425 static const enum index next_list_attr[] = {
431 static const enum index item_param[] = {
440 static const enum index next_item[] = {
470 static const enum index item_fuzzy[] = {
476 static const enum index item_any[] = {
482 static const enum index item_vf[] = {
488 static const enum index item_port[] = {
494 static const enum index item_raw[] = {
504 static const enum index item_eth[] = {
512 static const enum index item_vlan[] = {
522 static const enum index item_ipv4[] = {
532 static const enum index item_ipv6[] = {
543 static const enum index item_icmp[] = {
550 static const enum index item_udp[] = {
557 static const enum index item_tcp[] = {
565 static const enum index item_sctp[] = {
574 static const enum index item_vxlan[] = {
580 static const enum index item_e_tag[] = {
581 ITEM_E_TAG_GRP_ECID_B,
586 static const enum index item_nvgre[] = {
592 static const enum index item_mpls[] = {
598 static const enum index item_gre[] = {
604 static const enum index item_gtp[] = {
610 static const enum index item_geneve[] = {
617 static const enum index next_action[] = {
634 static const enum index action_mark[] = {
640 static const enum index action_queue[] = {
646 static const enum index action_dup[] = {
652 static const enum index action_rss[] = {
658 static const enum index action_vf[] = {
665 static const enum index action_meter[] = {
671 static int parse_init(struct context *, const struct token *,
672 const char *, unsigned int,
673 void *, unsigned int);
674 static int parse_vc(struct context *, const struct token *,
675 const char *, unsigned int,
676 void *, unsigned int);
677 static int parse_vc_spec(struct context *, const struct token *,
678 const char *, unsigned int, void *, unsigned int);
679 static int parse_vc_conf(struct context *, const struct token *,
680 const char *, unsigned int, void *, unsigned int);
681 static int parse_vc_action_rss_queue(struct context *, const struct token *,
682 const char *, unsigned int, void *,
684 static int parse_destroy(struct context *, const struct token *,
685 const char *, unsigned int,
686 void *, unsigned int);
687 static int parse_flush(struct context *, const struct token *,
688 const char *, unsigned int,
689 void *, unsigned int);
690 static int parse_query(struct context *, const struct token *,
691 const char *, unsigned int,
692 void *, unsigned int);
693 static int parse_action(struct context *, const struct token *,
694 const char *, unsigned int,
695 void *, unsigned int);
696 static int parse_list(struct context *, const struct token *,
697 const char *, unsigned int,
698 void *, unsigned int);
699 static int parse_isolate(struct context *, const struct token *,
700 const char *, unsigned int,
701 void *, unsigned int);
702 static int parse_int(struct context *, const struct token *,
703 const char *, unsigned int,
704 void *, unsigned int);
705 static int parse_prefix(struct context *, const struct token *,
706 const char *, unsigned int,
707 void *, unsigned int);
708 static int parse_boolean(struct context *, const struct token *,
709 const char *, unsigned int,
710 void *, unsigned int);
711 static int parse_string(struct context *, const struct token *,
712 const char *, unsigned int,
713 void *, unsigned int);
714 static int parse_mac_addr(struct context *, const struct token *,
715 const char *, unsigned int,
716 void *, unsigned int);
717 static int parse_ipv4_addr(struct context *, const struct token *,
718 const char *, unsigned int,
719 void *, unsigned int);
720 static int parse_ipv6_addr(struct context *, const struct token *,
721 const char *, unsigned int,
722 void *, unsigned int);
723 static int parse_port(struct context *, const struct token *,
724 const char *, unsigned int,
725 void *, unsigned int);
726 static int comp_none(struct context *, const struct token *,
727 unsigned int, char *, unsigned int);
728 static int comp_boolean(struct context *, const struct token *,
729 unsigned int, char *, unsigned int);
730 static int comp_action(struct context *, const struct token *,
731 unsigned int, char *, unsigned int);
732 static int comp_port(struct context *, const struct token *,
733 unsigned int, char *, unsigned int);
734 static int comp_rule_id(struct context *, const struct token *,
735 unsigned int, char *, unsigned int);
736 static int comp_vc_action_rss_queue(struct context *, const struct token *,
737 unsigned int, char *, unsigned int);
739 /** Token definitions. */
740 static const struct token token_list[] = {
741 /* Special tokens. */
744 .help = "null entry, abused as the entry point",
745 .next = NEXT(NEXT_ENTRY(FLOW)),
750 .help = "command may end here",
756 .help = "integer value",
761 .name = "{unsigned}",
763 .help = "unsigned integer value",
770 .help = "prefix length for bit-mask",
771 .call = parse_prefix,
777 .help = "any boolean value",
778 .call = parse_boolean,
779 .comp = comp_boolean,
784 .help = "fixed string",
785 .call = parse_string,
789 .name = "{MAC address}",
791 .help = "standard MAC address notation",
792 .call = parse_mac_addr,
796 .name = "{IPv4 address}",
797 .type = "IPV4 ADDRESS",
798 .help = "standard IPv4 address notation",
799 .call = parse_ipv4_addr,
803 .name = "{IPv6 address}",
804 .type = "IPV6 ADDRESS",
805 .help = "standard IPv6 address notation",
806 .call = parse_ipv6_addr,
812 .help = "rule identifier",
814 .comp = comp_rule_id,
819 .help = "port identifier",
824 .name = "{group_id}",
826 .help = "group identifier",
833 .help = "priority level",
837 /* Top-level command. */
840 .type = "{command} {port_id} [{arg} [...]]",
841 .help = "manage ingress/egress flow rules",
842 .next = NEXT(NEXT_ENTRY
852 /* Sub-level commands. */
855 .help = "check whether a flow rule can be created",
856 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
857 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
862 .help = "create a flow rule",
863 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
864 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
869 .help = "destroy specific flow rules",
870 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
871 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
872 .call = parse_destroy,
876 .help = "destroy all flow rules",
877 .next = NEXT(NEXT_ENTRY(PORT_ID)),
878 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
883 .help = "query an existing flow rule",
884 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
886 NEXT_ENTRY(PORT_ID)),
887 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action),
888 ARGS_ENTRY(struct buffer, args.query.rule),
889 ARGS_ENTRY(struct buffer, port)),
894 .help = "list existing flow rules",
895 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
896 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
901 .help = "restrict ingress traffic to the defined flow rules",
902 .next = NEXT(NEXT_ENTRY(BOOLEAN),
903 NEXT_ENTRY(PORT_ID)),
904 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
905 ARGS_ENTRY(struct buffer, port)),
906 .call = parse_isolate,
908 /* Destroy arguments. */
911 .help = "specify a rule identifier",
912 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
913 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
914 .call = parse_destroy,
916 /* Query arguments. */
920 .help = "action to query, must be part of the rule",
921 .call = parse_action,
924 /* List arguments. */
927 .help = "specify a group",
928 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
929 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
932 /* Validate/create attributes. */
935 .help = "specify a group",
936 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
937 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
942 .help = "specify a priority level",
943 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
944 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
949 .help = "affect rule to ingress",
950 .next = NEXT(next_vc_attr),
955 .help = "affect rule to egress",
956 .next = NEXT(next_vc_attr),
959 /* Validate/create pattern. */
962 .help = "submit a list of pattern items",
963 .next = NEXT(next_item),
968 .help = "match value perfectly (with full bit-mask)",
969 .call = parse_vc_spec,
971 [ITEM_PARAM_SPEC] = {
973 .help = "match value according to configured bit-mask",
974 .call = parse_vc_spec,
976 [ITEM_PARAM_LAST] = {
978 .help = "specify upper bound to establish a range",
979 .call = parse_vc_spec,
981 [ITEM_PARAM_MASK] = {
983 .help = "specify bit-mask with relevant bits set to one",
984 .call = parse_vc_spec,
986 [ITEM_PARAM_PREFIX] = {
988 .help = "generate bit-mask from a prefix length",
989 .call = parse_vc_spec,
993 .help = "specify next pattern item",
994 .next = NEXT(next_item),
998 .help = "end list of pattern items",
999 .priv = PRIV_ITEM(END, 0),
1000 .next = NEXT(NEXT_ENTRY(ACTIONS)),
1005 .help = "no-op pattern item",
1006 .priv = PRIV_ITEM(VOID, 0),
1007 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1012 .help = "perform actions when pattern does not match",
1013 .priv = PRIV_ITEM(INVERT, 0),
1014 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1019 .help = "match any protocol for the current layer",
1020 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
1021 .next = NEXT(item_any),
1026 .help = "number of layers covered",
1027 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
1028 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
1032 .help = "match packets addressed to the physical function",
1033 .priv = PRIV_ITEM(PF, 0),
1034 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1039 .help = "match packets addressed to a virtual function ID",
1040 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
1041 .next = NEXT(item_vf),
1046 .help = "destination VF ID",
1047 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
1048 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
1052 .help = "device-specific physical port index to use",
1053 .priv = PRIV_ITEM(PORT, sizeof(struct rte_flow_item_port)),
1054 .next = NEXT(item_port),
1057 [ITEM_PORT_INDEX] = {
1059 .help = "physical port index",
1060 .next = NEXT(item_port, NEXT_ENTRY(UNSIGNED), item_param),
1061 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port, index)),
1065 .help = "match an arbitrary byte string",
1066 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1067 .next = NEXT(item_raw),
1070 [ITEM_RAW_RELATIVE] = {
1072 .help = "look for pattern after the previous item",
1073 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1074 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1077 [ITEM_RAW_SEARCH] = {
1079 .help = "search pattern from offset (see also limit)",
1080 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1081 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1084 [ITEM_RAW_OFFSET] = {
1086 .help = "absolute or relative offset for pattern",
1087 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1088 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1090 [ITEM_RAW_LIMIT] = {
1092 .help = "search area limit for start of pattern",
1093 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1094 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1096 [ITEM_RAW_PATTERN] = {
1098 .help = "byte string to look for",
1099 .next = NEXT(item_raw,
1101 NEXT_ENTRY(ITEM_PARAM_IS,
1104 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, length),
1105 ARGS_ENTRY_USZ(struct rte_flow_item_raw,
1107 ITEM_RAW_PATTERN_SIZE)),
1111 .help = "match Ethernet header",
1112 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1113 .next = NEXT(item_eth),
1118 .help = "destination MAC",
1119 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1120 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1124 .help = "source MAC",
1125 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1126 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1130 .help = "EtherType",
1131 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1132 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1136 .help = "match 802.1Q/ad VLAN tag",
1137 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1138 .next = NEXT(item_vlan),
1141 [ITEM_VLAN_TPID] = {
1143 .help = "tag protocol identifier",
1144 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1145 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tpid)),
1149 .help = "tag control information",
1150 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1151 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1155 .help = "priority code point",
1156 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1157 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1162 .help = "drop eligible indicator",
1163 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1164 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1169 .help = "VLAN identifier",
1170 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1171 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1176 .help = "match IPv4 header",
1177 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1178 .next = NEXT(item_ipv4),
1183 .help = "type of service",
1184 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1185 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1186 hdr.type_of_service)),
1190 .help = "time to live",
1191 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1192 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1195 [ITEM_IPV4_PROTO] = {
1197 .help = "next protocol ID",
1198 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1199 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1200 hdr.next_proto_id)),
1204 .help = "source address",
1205 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1206 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1211 .help = "destination address",
1212 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1213 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1218 .help = "match IPv6 header",
1219 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1220 .next = NEXT(item_ipv6),
1225 .help = "traffic class",
1226 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1227 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1229 "\x0f\xf0\x00\x00")),
1231 [ITEM_IPV6_FLOW] = {
1233 .help = "flow label",
1234 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1235 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1237 "\x00\x0f\xff\xff")),
1239 [ITEM_IPV6_PROTO] = {
1241 .help = "protocol (next header)",
1242 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1243 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1248 .help = "hop limit",
1249 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1250 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1255 .help = "source address",
1256 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1257 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1262 .help = "destination address",
1263 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1264 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1269 .help = "match ICMP header",
1270 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1271 .next = NEXT(item_icmp),
1274 [ITEM_ICMP_TYPE] = {
1276 .help = "ICMP packet type",
1277 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1278 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1281 [ITEM_ICMP_CODE] = {
1283 .help = "ICMP packet code",
1284 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1285 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1290 .help = "match UDP header",
1291 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1292 .next = NEXT(item_udp),
1297 .help = "UDP source port",
1298 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1299 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1304 .help = "UDP destination port",
1305 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1306 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1311 .help = "match TCP header",
1312 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1313 .next = NEXT(item_tcp),
1318 .help = "TCP source port",
1319 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1320 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1325 .help = "TCP destination port",
1326 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1327 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1330 [ITEM_TCP_FLAGS] = {
1332 .help = "TCP flags",
1333 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1334 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1339 .help = "match SCTP header",
1340 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1341 .next = NEXT(item_sctp),
1346 .help = "SCTP source port",
1347 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1348 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1353 .help = "SCTP destination port",
1354 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1355 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1360 .help = "validation tag",
1361 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1362 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1365 [ITEM_SCTP_CKSUM] = {
1368 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1369 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1374 .help = "match VXLAN header",
1375 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1376 .next = NEXT(item_vxlan),
1379 [ITEM_VXLAN_VNI] = {
1381 .help = "VXLAN identifier",
1382 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1383 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
1387 .help = "match E-Tag header",
1388 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
1389 .next = NEXT(item_e_tag),
1392 [ITEM_E_TAG_GRP_ECID_B] = {
1393 .name = "grp_ecid_b",
1394 .help = "GRP and E-CID base",
1395 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
1396 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
1402 .help = "match NVGRE header",
1403 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
1404 .next = NEXT(item_nvgre),
1407 [ITEM_NVGRE_TNI] = {
1409 .help = "virtual subnet ID",
1410 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
1411 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
1415 .help = "match MPLS header",
1416 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
1417 .next = NEXT(item_mpls),
1420 [ITEM_MPLS_LABEL] = {
1422 .help = "MPLS label",
1423 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
1424 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
1430 .help = "match GRE header",
1431 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
1432 .next = NEXT(item_gre),
1435 [ITEM_GRE_PROTO] = {
1437 .help = "GRE protocol type",
1438 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
1439 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
1444 .help = "fuzzy pattern match, expect faster than default",
1445 .priv = PRIV_ITEM(FUZZY,
1446 sizeof(struct rte_flow_item_fuzzy)),
1447 .next = NEXT(item_fuzzy),
1450 [ITEM_FUZZY_THRESH] = {
1452 .help = "match accuracy threshold",
1453 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
1454 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
1459 .help = "match GTP header",
1460 .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
1461 .next = NEXT(item_gtp),
1466 .help = "tunnel endpoint identifier",
1467 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
1468 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
1472 .help = "match GTP header",
1473 .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
1474 .next = NEXT(item_gtp),
1479 .help = "match GTP header",
1480 .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
1481 .next = NEXT(item_gtp),
1486 .help = "match GENEVE header",
1487 .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
1488 .next = NEXT(item_geneve),
1491 [ITEM_GENEVE_VNI] = {
1493 .help = "virtual network identifier",
1494 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1495 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)),
1497 [ITEM_GENEVE_PROTO] = {
1499 .help = "GENEVE protocol type",
1500 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1501 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve,
1505 /* Validate/create actions. */
1508 .help = "submit a list of associated actions",
1509 .next = NEXT(next_action),
1514 .help = "specify next action",
1515 .next = NEXT(next_action),
1519 .help = "end list of actions",
1520 .priv = PRIV_ACTION(END, 0),
1525 .help = "no-op action",
1526 .priv = PRIV_ACTION(VOID, 0),
1527 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1530 [ACTION_PASSTHRU] = {
1532 .help = "let subsequent rule process matched packets",
1533 .priv = PRIV_ACTION(PASSTHRU, 0),
1534 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1539 .help = "attach 32 bit value to packets",
1540 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
1541 .next = NEXT(action_mark),
1544 [ACTION_MARK_ID] = {
1546 .help = "32 bit value to return with packets",
1547 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
1548 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
1549 .call = parse_vc_conf,
1553 .help = "flag packets",
1554 .priv = PRIV_ACTION(FLAG, 0),
1555 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1560 .help = "assign packets to a given queue index",
1561 .priv = PRIV_ACTION(QUEUE,
1562 sizeof(struct rte_flow_action_queue)),
1563 .next = NEXT(action_queue),
1566 [ACTION_QUEUE_INDEX] = {
1568 .help = "queue index to use",
1569 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
1570 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
1571 .call = parse_vc_conf,
1575 .help = "drop packets (note: passthru has priority)",
1576 .priv = PRIV_ACTION(DROP, 0),
1577 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1582 .help = "enable counters for this rule",
1583 .priv = PRIV_ACTION(COUNT, 0),
1584 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1589 .help = "duplicate packets to a given queue index",
1590 .priv = PRIV_ACTION(DUP, sizeof(struct rte_flow_action_dup)),
1591 .next = NEXT(action_dup),
1594 [ACTION_DUP_INDEX] = {
1596 .help = "queue index to duplicate packets to",
1597 .next = NEXT(action_dup, NEXT_ENTRY(UNSIGNED)),
1598 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_dup, index)),
1599 .call = parse_vc_conf,
1603 .help = "spread packets among several queues",
1604 .priv = PRIV_ACTION(RSS, ACTION_RSS_SIZE),
1605 .next = NEXT(action_rss),
1608 [ACTION_RSS_QUEUES] = {
1610 .help = "queue indices to use",
1611 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
1612 .call = parse_vc_conf,
1614 [ACTION_RSS_QUEUE] = {
1616 .help = "queue index",
1617 .call = parse_vc_action_rss_queue,
1618 .comp = comp_vc_action_rss_queue,
1622 .help = "redirect packets to physical device function",
1623 .priv = PRIV_ACTION(PF, 0),
1624 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1629 .help = "redirect packets to virtual device function",
1630 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
1631 .next = NEXT(action_vf),
1634 [ACTION_VF_ORIGINAL] = {
1636 .help = "use original VF ID if possible",
1637 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
1638 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
1640 .call = parse_vc_conf,
1644 .help = "VF ID to redirect packets to",
1645 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
1646 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
1647 .call = parse_vc_conf,
1651 .help = "meter the directed packets at given id",
1652 .priv = PRIV_ACTION(METER,
1653 sizeof(struct rte_flow_action_meter)),
1654 .next = NEXT(action_meter),
1657 [ACTION_METER_ID] = {
1659 .help = "meter id to use",
1660 .next = NEXT(action_meter, NEXT_ENTRY(UNSIGNED)),
1661 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
1662 .call = parse_vc_conf,
1666 /** Remove and return last entry from argument stack. */
1667 static const struct arg *
1668 pop_args(struct context *ctx)
1670 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
1673 /** Add entry on top of the argument stack. */
1675 push_args(struct context *ctx, const struct arg *arg)
1677 if (ctx->args_num == CTX_STACK_SIZE)
1679 ctx->args[ctx->args_num++] = arg;
1683 /** Spread value into buffer according to bit-mask. */
1685 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
1687 uint32_t i = arg->size;
1695 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1704 unsigned int shift = 0;
1705 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
1707 for (shift = 0; arg->mask[i] >> shift; ++shift) {
1708 if (!(arg->mask[i] & (1 << shift)))
1713 *buf &= ~(1 << shift);
1714 *buf |= (val & 1) << shift;
1722 /** Compare a string with a partial one of a given length. */
1724 strcmp_partial(const char *full, const char *partial, size_t partial_len)
1726 int r = strncmp(full, partial, partial_len);
1730 if (strlen(full) <= partial_len)
1732 return full[partial_len];
1736 * Parse a prefix length and generate a bit-mask.
1738 * Last argument (ctx->args) is retrieved to determine mask size, storage
1739 * location and whether the result must use network byte ordering.
1742 parse_prefix(struct context *ctx, const struct token *token,
1743 const char *str, unsigned int len,
1744 void *buf, unsigned int size)
1746 const struct arg *arg = pop_args(ctx);
1747 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
1754 /* Argument is expected. */
1758 u = strtoumax(str, &end, 0);
1759 if (errno || (size_t)(end - str) != len)
1764 extra = arg_entry_bf_fill(NULL, 0, arg);
1773 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
1774 !arg_entry_bf_fill(ctx->objmask, -1, arg))
1781 if (bytes > size || bytes + !!extra > size)
1785 buf = (uint8_t *)ctx->object + arg->offset;
1786 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1788 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
1789 memset(buf, 0x00, size - bytes);
1791 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
1795 memset(buf, 0xff, bytes);
1796 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
1798 ((uint8_t *)buf)[bytes] = conv[extra];
1801 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
1804 push_args(ctx, arg);
1808 /** Default parsing function for token name matching. */
1810 parse_default(struct context *ctx, const struct token *token,
1811 const char *str, unsigned int len,
1812 void *buf, unsigned int size)
1817 if (strcmp_partial(token->name, str, len))
1822 /** Parse flow command, initialize output buffer for subsequent tokens. */
1824 parse_init(struct context *ctx, const struct token *token,
1825 const char *str, unsigned int len,
1826 void *buf, unsigned int size)
1828 struct buffer *out = buf;
1830 /* Token name must match. */
1831 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1833 /* Nothing else to do if there is no buffer. */
1836 /* Make sure buffer is large enough. */
1837 if (size < sizeof(*out))
1839 /* Initialize buffer. */
1840 memset(out, 0x00, sizeof(*out));
1841 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
1844 ctx->objmask = NULL;
1848 /** Parse tokens for validate/create commands. */
1850 parse_vc(struct context *ctx, const struct token *token,
1851 const char *str, unsigned int len,
1852 void *buf, unsigned int size)
1854 struct buffer *out = buf;
1858 /* Token name must match. */
1859 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1861 /* Nothing else to do if there is no buffer. */
1864 if (!out->command) {
1865 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
1867 if (sizeof(*out) > size)
1869 out->command = ctx->curr;
1872 ctx->objmask = NULL;
1873 out->args.vc.data = (uint8_t *)out + size;
1877 ctx->object = &out->args.vc.attr;
1878 ctx->objmask = NULL;
1879 switch (ctx->curr) {
1884 out->args.vc.attr.ingress = 1;
1887 out->args.vc.attr.egress = 1;
1890 out->args.vc.pattern =
1891 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1893 ctx->object = out->args.vc.pattern;
1894 ctx->objmask = NULL;
1897 out->args.vc.actions =
1898 (void *)RTE_ALIGN_CEIL((uintptr_t)
1899 (out->args.vc.pattern +
1900 out->args.vc.pattern_n),
1902 ctx->object = out->args.vc.actions;
1903 ctx->objmask = NULL;
1910 if (!out->args.vc.actions) {
1911 const struct parse_item_priv *priv = token->priv;
1912 struct rte_flow_item *item =
1913 out->args.vc.pattern + out->args.vc.pattern_n;
1915 data_size = priv->size * 3; /* spec, last, mask */
1916 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1917 (out->args.vc.data - data_size),
1919 if ((uint8_t *)item + sizeof(*item) > data)
1921 *item = (struct rte_flow_item){
1924 ++out->args.vc.pattern_n;
1926 ctx->objmask = NULL;
1928 const struct parse_action_priv *priv = token->priv;
1929 struct rte_flow_action *action =
1930 out->args.vc.actions + out->args.vc.actions_n;
1932 data_size = priv->size; /* configuration */
1933 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1934 (out->args.vc.data - data_size),
1936 if ((uint8_t *)action + sizeof(*action) > data)
1938 *action = (struct rte_flow_action){
1941 ++out->args.vc.actions_n;
1942 ctx->object = action;
1943 ctx->objmask = NULL;
1945 memset(data, 0, data_size);
1946 out->args.vc.data = data;
1947 ctx->objdata = data_size;
1951 /** Parse pattern item parameter type. */
1953 parse_vc_spec(struct context *ctx, const struct token *token,
1954 const char *str, unsigned int len,
1955 void *buf, unsigned int size)
1957 struct buffer *out = buf;
1958 struct rte_flow_item *item;
1964 /* Token name must match. */
1965 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1967 /* Parse parameter types. */
1968 switch (ctx->curr) {
1969 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
1975 case ITEM_PARAM_SPEC:
1978 case ITEM_PARAM_LAST:
1981 case ITEM_PARAM_PREFIX:
1982 /* Modify next token to expect a prefix. */
1983 if (ctx->next_num < 2)
1985 ctx->next[ctx->next_num - 2] = prefix;
1987 case ITEM_PARAM_MASK:
1993 /* Nothing else to do if there is no buffer. */
1996 if (!out->args.vc.pattern_n)
1998 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
1999 data_size = ctx->objdata / 3; /* spec, last, mask */
2000 /* Point to selected object. */
2001 ctx->object = out->args.vc.data + (data_size * index);
2003 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
2004 item->mask = ctx->objmask;
2006 ctx->objmask = NULL;
2007 /* Update relevant item pointer. */
2008 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
2013 /** Parse action configuration field. */
2015 parse_vc_conf(struct context *ctx, const struct token *token,
2016 const char *str, unsigned int len,
2017 void *buf, unsigned int size)
2019 struct buffer *out = buf;
2020 struct rte_flow_action *action;
2023 /* Token name must match. */
2024 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2026 /* Nothing else to do if there is no buffer. */
2029 if (!out->args.vc.actions_n)
2031 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
2032 /* Point to selected object. */
2033 ctx->object = out->args.vc.data;
2034 ctx->objmask = NULL;
2035 /* Update configuration pointer. */
2036 action->conf = ctx->object;
2041 * Parse queue field for RSS action.
2043 * Valid tokens are queue indices and the "end" token.
2046 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
2047 const char *str, unsigned int len,
2048 void *buf, unsigned int size)
2050 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
2057 if (ctx->curr != ACTION_RSS_QUEUE)
2059 i = ctx->objdata >> 16;
2060 if (!strcmp_partial("end", str, len)) {
2061 ctx->objdata &= 0xffff;
2064 if (i >= ACTION_RSS_NUM)
2066 if (push_args(ctx, ARGS_ENTRY(struct rte_flow_action_rss, queue[i])))
2068 ret = parse_int(ctx, token, str, len, NULL, 0);
2074 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
2076 if (ctx->next_num == RTE_DIM(ctx->next))
2078 ctx->next[ctx->next_num++] = next;
2081 ((struct rte_flow_action_rss *)ctx->object)->num = i;
2085 /** Parse tokens for destroy command. */
2087 parse_destroy(struct context *ctx, const struct token *token,
2088 const char *str, unsigned int len,
2089 void *buf, unsigned int size)
2091 struct buffer *out = buf;
2093 /* Token name must match. */
2094 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2096 /* Nothing else to do if there is no buffer. */
2099 if (!out->command) {
2100 if (ctx->curr != DESTROY)
2102 if (sizeof(*out) > size)
2104 out->command = ctx->curr;
2107 ctx->objmask = NULL;
2108 out->args.destroy.rule =
2109 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2113 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
2114 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
2117 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
2118 ctx->objmask = NULL;
2122 /** Parse tokens for flush command. */
2124 parse_flush(struct context *ctx, const struct token *token,
2125 const char *str, unsigned int len,
2126 void *buf, unsigned int size)
2128 struct buffer *out = buf;
2130 /* Token name must match. */
2131 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2133 /* Nothing else to do if there is no buffer. */
2136 if (!out->command) {
2137 if (ctx->curr != FLUSH)
2139 if (sizeof(*out) > size)
2141 out->command = ctx->curr;
2144 ctx->objmask = NULL;
2149 /** Parse tokens for query command. */
2151 parse_query(struct context *ctx, const struct token *token,
2152 const char *str, unsigned int len,
2153 void *buf, unsigned int size)
2155 struct buffer *out = buf;
2157 /* Token name must match. */
2158 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2160 /* Nothing else to do if there is no buffer. */
2163 if (!out->command) {
2164 if (ctx->curr != QUERY)
2166 if (sizeof(*out) > size)
2168 out->command = ctx->curr;
2171 ctx->objmask = NULL;
2176 /** Parse action names. */
2178 parse_action(struct context *ctx, const struct token *token,
2179 const char *str, unsigned int len,
2180 void *buf, unsigned int size)
2182 struct buffer *out = buf;
2183 const struct arg *arg = pop_args(ctx);
2187 /* Argument is expected. */
2190 /* Parse action name. */
2191 for (i = 0; next_action[i]; ++i) {
2192 const struct parse_action_priv *priv;
2194 token = &token_list[next_action[i]];
2195 if (strcmp_partial(token->name, str, len))
2201 memcpy((uint8_t *)ctx->object + arg->offset,
2207 push_args(ctx, arg);
2211 /** Parse tokens for list command. */
2213 parse_list(struct context *ctx, const struct token *token,
2214 const char *str, unsigned int len,
2215 void *buf, unsigned int size)
2217 struct buffer *out = buf;
2219 /* Token name must match. */
2220 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2222 /* Nothing else to do if there is no buffer. */
2225 if (!out->command) {
2226 if (ctx->curr != LIST)
2228 if (sizeof(*out) > size)
2230 out->command = ctx->curr;
2233 ctx->objmask = NULL;
2234 out->args.list.group =
2235 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2239 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
2240 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
2243 ctx->object = out->args.list.group + out->args.list.group_n++;
2244 ctx->objmask = NULL;
2248 /** Parse tokens for isolate command. */
2250 parse_isolate(struct context *ctx, const struct token *token,
2251 const char *str, unsigned int len,
2252 void *buf, unsigned int size)
2254 struct buffer *out = buf;
2256 /* Token name must match. */
2257 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2259 /* Nothing else to do if there is no buffer. */
2262 if (!out->command) {
2263 if (ctx->curr != ISOLATE)
2265 if (sizeof(*out) > size)
2267 out->command = ctx->curr;
2270 ctx->objmask = NULL;
2276 * Parse signed/unsigned integers 8 to 64-bit long.
2278 * Last argument (ctx->args) is retrieved to determine integer type and
2282 parse_int(struct context *ctx, const struct token *token,
2283 const char *str, unsigned int len,
2284 void *buf, unsigned int size)
2286 const struct arg *arg = pop_args(ctx);
2291 /* Argument is expected. */
2296 (uintmax_t)strtoimax(str, &end, 0) :
2297 strtoumax(str, &end, 0);
2298 if (errno || (size_t)(end - str) != len)
2303 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
2304 !arg_entry_bf_fill(ctx->objmask, -1, arg))
2308 buf = (uint8_t *)ctx->object + arg->offset;
2312 case sizeof(uint8_t):
2313 *(uint8_t *)buf = u;
2315 case sizeof(uint16_t):
2316 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
2318 case sizeof(uint8_t [3]):
2319 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2321 ((uint8_t *)buf)[0] = u;
2322 ((uint8_t *)buf)[1] = u >> 8;
2323 ((uint8_t *)buf)[2] = u >> 16;
2327 ((uint8_t *)buf)[0] = u >> 16;
2328 ((uint8_t *)buf)[1] = u >> 8;
2329 ((uint8_t *)buf)[2] = u;
2331 case sizeof(uint32_t):
2332 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
2334 case sizeof(uint64_t):
2335 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
2340 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
2342 buf = (uint8_t *)ctx->objmask + arg->offset;
2347 push_args(ctx, arg);
2354 * Two arguments (ctx->args) are retrieved from the stack to store data and
2355 * its length (in that order).
2358 parse_string(struct context *ctx, const struct token *token,
2359 const char *str, unsigned int len,
2360 void *buf, unsigned int size)
2362 const struct arg *arg_data = pop_args(ctx);
2363 const struct arg *arg_len = pop_args(ctx);
2364 char tmp[16]; /* Ought to be enough. */
2367 /* Arguments are expected. */
2371 push_args(ctx, arg_data);
2374 size = arg_data->size;
2375 /* Bit-mask fill is not supported. */
2376 if (arg_data->mask || size < len)
2380 /* Let parse_int() fill length information first. */
2381 ret = snprintf(tmp, sizeof(tmp), "%u", len);
2384 push_args(ctx, arg_len);
2385 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
2390 buf = (uint8_t *)ctx->object + arg_data->offset;
2391 /* Output buffer is not necessarily NUL-terminated. */
2392 memcpy(buf, str, len);
2393 memset((uint8_t *)buf + len, 0x55, size - len);
2395 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
2398 push_args(ctx, arg_len);
2399 push_args(ctx, arg_data);
2404 * Parse a MAC address.
2406 * Last argument (ctx->args) is retrieved to determine storage size and
2410 parse_mac_addr(struct context *ctx, const struct token *token,
2411 const char *str, unsigned int len,
2412 void *buf, unsigned int size)
2414 const struct arg *arg = pop_args(ctx);
2415 struct ether_addr tmp;
2419 /* Argument is expected. */
2423 /* Bit-mask fill is not supported. */
2424 if (arg->mask || size != sizeof(tmp))
2426 /* Only network endian is supported. */
2429 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
2430 if (ret < 0 || (unsigned int)ret != len)
2434 buf = (uint8_t *)ctx->object + arg->offset;
2435 memcpy(buf, &tmp, size);
2437 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2440 push_args(ctx, arg);
2445 * Parse an IPv4 address.
2447 * Last argument (ctx->args) is retrieved to determine storage size and
2451 parse_ipv4_addr(struct context *ctx, const struct token *token,
2452 const char *str, unsigned int len,
2453 void *buf, unsigned int size)
2455 const struct arg *arg = pop_args(ctx);
2460 /* Argument is expected. */
2464 /* Bit-mask fill is not supported. */
2465 if (arg->mask || size != sizeof(tmp))
2467 /* Only network endian is supported. */
2470 memcpy(str2, str, len);
2472 ret = inet_pton(AF_INET, str2, &tmp);
2474 /* Attempt integer parsing. */
2475 push_args(ctx, arg);
2476 return parse_int(ctx, token, str, len, buf, size);
2480 buf = (uint8_t *)ctx->object + arg->offset;
2481 memcpy(buf, &tmp, size);
2483 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2486 push_args(ctx, arg);
2491 * Parse an IPv6 address.
2493 * Last argument (ctx->args) is retrieved to determine storage size and
2497 parse_ipv6_addr(struct context *ctx, const struct token *token,
2498 const char *str, unsigned int len,
2499 void *buf, unsigned int size)
2501 const struct arg *arg = pop_args(ctx);
2503 struct in6_addr tmp;
2507 /* Argument is expected. */
2511 /* Bit-mask fill is not supported. */
2512 if (arg->mask || size != sizeof(tmp))
2514 /* Only network endian is supported. */
2517 memcpy(str2, str, len);
2519 ret = inet_pton(AF_INET6, str2, &tmp);
2524 buf = (uint8_t *)ctx->object + arg->offset;
2525 memcpy(buf, &tmp, size);
2527 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2530 push_args(ctx, arg);
2534 /** Boolean values (even indices stand for false). */
2535 static const char *const boolean_name[] = {
2544 * Parse a boolean value.
2546 * Last argument (ctx->args) is retrieved to determine storage size and
2550 parse_boolean(struct context *ctx, const struct token *token,
2551 const char *str, unsigned int len,
2552 void *buf, unsigned int size)
2554 const struct arg *arg = pop_args(ctx);
2558 /* Argument is expected. */
2561 for (i = 0; boolean_name[i]; ++i)
2562 if (!strcmp_partial(boolean_name[i], str, len))
2564 /* Process token as integer. */
2565 if (boolean_name[i])
2566 str = i & 1 ? "1" : "0";
2567 push_args(ctx, arg);
2568 ret = parse_int(ctx, token, str, strlen(str), buf, size);
2569 return ret > 0 ? (int)len : ret;
2572 /** Parse port and update context. */
2574 parse_port(struct context *ctx, const struct token *token,
2575 const char *str, unsigned int len,
2576 void *buf, unsigned int size)
2578 struct buffer *out = &(struct buffer){ .port = 0 };
2586 ctx->objmask = NULL;
2587 size = sizeof(*out);
2589 ret = parse_int(ctx, token, str, len, out, size);
2591 ctx->port = out->port;
2597 /** No completion. */
2599 comp_none(struct context *ctx, const struct token *token,
2600 unsigned int ent, char *buf, unsigned int size)
2610 /** Complete boolean values. */
2612 comp_boolean(struct context *ctx, const struct token *token,
2613 unsigned int ent, char *buf, unsigned int size)
2619 for (i = 0; boolean_name[i]; ++i)
2620 if (buf && i == ent)
2621 return snprintf(buf, size, "%s", boolean_name[i]);
2627 /** Complete action names. */
2629 comp_action(struct context *ctx, const struct token *token,
2630 unsigned int ent, char *buf, unsigned int size)
2636 for (i = 0; next_action[i]; ++i)
2637 if (buf && i == ent)
2638 return snprintf(buf, size, "%s",
2639 token_list[next_action[i]].name);
2645 /** Complete available ports. */
2647 comp_port(struct context *ctx, const struct token *token,
2648 unsigned int ent, char *buf, unsigned int size)
2655 RTE_ETH_FOREACH_DEV(p) {
2656 if (buf && i == ent)
2657 return snprintf(buf, size, "%u", p);
2665 /** Complete available rule IDs. */
2667 comp_rule_id(struct context *ctx, const struct token *token,
2668 unsigned int ent, char *buf, unsigned int size)
2671 struct rte_port *port;
2672 struct port_flow *pf;
2675 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
2676 ctx->port == (portid_t)RTE_PORT_ALL)
2678 port = &ports[ctx->port];
2679 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
2680 if (buf && i == ent)
2681 return snprintf(buf, size, "%u", pf->id);
2689 /** Complete queue field for RSS action. */
2691 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
2692 unsigned int ent, char *buf, unsigned int size)
2694 static const char *const str[] = { "", "end", NULL };
2699 for (i = 0; str[i] != NULL; ++i)
2700 if (buf && i == ent)
2701 return snprintf(buf, size, "%s", str[i]);
2707 /** Internal context. */
2708 static struct context cmd_flow_context;
2710 /** Global parser instance (cmdline API). */
2711 cmdline_parse_inst_t cmd_flow;
2713 /** Initialize context. */
2715 cmd_flow_context_init(struct context *ctx)
2717 /* A full memset() is not necessary. */
2727 ctx->objmask = NULL;
2730 /** Parse a token (cmdline API). */
2732 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
2735 struct context *ctx = &cmd_flow_context;
2736 const struct token *token;
2737 const enum index *list;
2742 token = &token_list[ctx->curr];
2743 /* Check argument length. */
2746 for (len = 0; src[len]; ++len)
2747 if (src[len] == '#' || isspace(src[len]))
2751 /* Last argument and EOL detection. */
2752 for (i = len; src[i]; ++i)
2753 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
2755 else if (!isspace(src[i])) {
2760 if (src[i] == '\r' || src[i] == '\n') {
2764 /* Initialize context if necessary. */
2765 if (!ctx->next_num) {
2768 ctx->next[ctx->next_num++] = token->next[0];
2770 /* Process argument through candidates. */
2771 ctx->prev = ctx->curr;
2772 list = ctx->next[ctx->next_num - 1];
2773 for (i = 0; list[i]; ++i) {
2774 const struct token *next = &token_list[list[i]];
2777 ctx->curr = list[i];
2779 tmp = next->call(ctx, next, src, len, result, size);
2781 tmp = parse_default(ctx, next, src, len, result, size);
2782 if (tmp == -1 || tmp != len)
2790 /* Push subsequent tokens if any. */
2792 for (i = 0; token->next[i]; ++i) {
2793 if (ctx->next_num == RTE_DIM(ctx->next))
2795 ctx->next[ctx->next_num++] = token->next[i];
2797 /* Push arguments if any. */
2799 for (i = 0; token->args[i]; ++i) {
2800 if (ctx->args_num == RTE_DIM(ctx->args))
2802 ctx->args[ctx->args_num++] = token->args[i];
2807 /** Return number of completion entries (cmdline API). */
2809 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
2811 struct context *ctx = &cmd_flow_context;
2812 const struct token *token = &token_list[ctx->curr];
2813 const enum index *list;
2817 /* Count number of tokens in current list. */
2819 list = ctx->next[ctx->next_num - 1];
2821 list = token->next[0];
2822 for (i = 0; list[i]; ++i)
2827 * If there is a single token, use its completion callback, otherwise
2828 * return the number of entries.
2830 token = &token_list[list[0]];
2831 if (i == 1 && token->comp) {
2832 /* Save index for cmd_flow_get_help(). */
2833 ctx->prev = list[0];
2834 return token->comp(ctx, token, 0, NULL, 0);
2839 /** Return a completion entry (cmdline API). */
2841 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
2842 char *dst, unsigned int size)
2844 struct context *ctx = &cmd_flow_context;
2845 const struct token *token = &token_list[ctx->curr];
2846 const enum index *list;
2850 /* Count number of tokens in current list. */
2852 list = ctx->next[ctx->next_num - 1];
2854 list = token->next[0];
2855 for (i = 0; list[i]; ++i)
2859 /* If there is a single token, use its completion callback. */
2860 token = &token_list[list[0]];
2861 if (i == 1 && token->comp) {
2862 /* Save index for cmd_flow_get_help(). */
2863 ctx->prev = list[0];
2864 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
2866 /* Otherwise make sure the index is valid and use defaults. */
2869 token = &token_list[list[index]];
2870 snprintf(dst, size, "%s", token->name);
2871 /* Save index for cmd_flow_get_help(). */
2872 ctx->prev = list[index];
2876 /** Populate help strings for current token (cmdline API). */
2878 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
2880 struct context *ctx = &cmd_flow_context;
2881 const struct token *token = &token_list[ctx->prev];
2886 /* Set token type and update global help with details. */
2887 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
2889 cmd_flow.help_str = token->help;
2891 cmd_flow.help_str = token->name;
2895 /** Token definition template (cmdline API). */
2896 static struct cmdline_token_hdr cmd_flow_token_hdr = {
2897 .ops = &(struct cmdline_token_ops){
2898 .parse = cmd_flow_parse,
2899 .complete_get_nb = cmd_flow_complete_get_nb,
2900 .complete_get_elt = cmd_flow_complete_get_elt,
2901 .get_help = cmd_flow_get_help,
2906 /** Populate the next dynamic token. */
2908 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
2909 cmdline_parse_token_hdr_t **hdr_inst)
2911 struct context *ctx = &cmd_flow_context;
2913 /* Always reinitialize context before requesting the first token. */
2914 if (!(hdr_inst - cmd_flow.tokens))
2915 cmd_flow_context_init(ctx);
2916 /* Return NULL when no more tokens are expected. */
2917 if (!ctx->next_num && ctx->curr) {
2921 /* Determine if command should end here. */
2922 if (ctx->eol && ctx->last && ctx->next_num) {
2923 const enum index *list = ctx->next[ctx->next_num - 1];
2926 for (i = 0; list[i]; ++i) {
2933 *hdr = &cmd_flow_token_hdr;
2936 /** Dispatch parsed buffer to function calls. */
2938 cmd_flow_parsed(const struct buffer *in)
2940 switch (in->command) {
2942 port_flow_validate(in->port, &in->args.vc.attr,
2943 in->args.vc.pattern, in->args.vc.actions);
2946 port_flow_create(in->port, &in->args.vc.attr,
2947 in->args.vc.pattern, in->args.vc.actions);
2950 port_flow_destroy(in->port, in->args.destroy.rule_n,
2951 in->args.destroy.rule);
2954 port_flow_flush(in->port);
2957 port_flow_query(in->port, in->args.query.rule,
2958 in->args.query.action);
2961 port_flow_list(in->port, in->args.list.group_n,
2962 in->args.list.group);
2965 port_flow_isolate(in->port, in->args.isolate.set);
2972 /** Token generator and output processing callback (cmdline API). */
2974 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
2977 cmd_flow_tok(arg0, arg2);
2979 cmd_flow_parsed(arg0);
2982 /** Global parser instance (cmdline API). */
2983 cmdline_parse_inst_t cmd_flow = {
2985 .data = NULL, /**< Unused. */
2986 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
2989 }, /**< Tokens are returned by cmd_flow_tok(). */