4 * Copyright 2016 6WIND S.A.
5 * Copyright 2016 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <arpa/inet.h>
43 #include <rte_common.h>
44 #include <rte_ethdev.h>
45 #include <rte_byteorder.h>
46 #include <cmdline_parse.h>
47 #include <cmdline_parse_etheraddr.h>
52 /** Parser token indices. */
72 /* Top-level command. */
75 /* Sub-level commands. */
83 /* Destroy arguments. */
86 /* Query arguments. */
92 /* Validate/create arguments. */
98 /* Validate/create pattern. */
150 /* Validate/create actions. */
174 /** Size of pattern[] field in struct rte_flow_item_raw. */
175 #define ITEM_RAW_PATTERN_SIZE 36
177 /** Storage size for struct rte_flow_item_raw including pattern. */
178 #define ITEM_RAW_SIZE \
179 (offsetof(struct rte_flow_item_raw, pattern) + ITEM_RAW_PATTERN_SIZE)
181 /** Number of queue[] entries in struct rte_flow_action_rss. */
182 #define ACTION_RSS_NUM 32
184 /** Storage size for struct rte_flow_action_rss including queues. */
185 #define ACTION_RSS_SIZE \
186 (offsetof(struct rte_flow_action_rss, queue) + \
187 sizeof(*((struct rte_flow_action_rss *)0)->queue) * ACTION_RSS_NUM)
189 /** Maximum number of subsequent tokens and arguments on the stack. */
190 #define CTX_STACK_SIZE 16
192 /** Parser context. */
194 /** Stack of subsequent token lists to process. */
195 const enum index *next[CTX_STACK_SIZE];
196 /** Arguments for stacked tokens. */
197 const void *args[CTX_STACK_SIZE];
198 enum index curr; /**< Current token index. */
199 enum index prev; /**< Index of the last token seen. */
200 int next_num; /**< Number of entries in next[]. */
201 int args_num; /**< Number of entries in args[]. */
202 uint32_t reparse:1; /**< Start over from the beginning. */
203 uint32_t eol:1; /**< EOL has been detected. */
204 uint32_t last:1; /**< No more arguments. */
205 uint16_t port; /**< Current port ID (for completions). */
206 uint32_t objdata; /**< Object-specific data. */
207 void *object; /**< Address of current object for relative offsets. */
208 void *objmask; /**< Object a full mask must be written to. */
211 /** Token argument. */
213 uint32_t hton:1; /**< Use network byte ordering. */
214 uint32_t sign:1; /**< Value is signed. */
215 uint32_t offset; /**< Relative offset from ctx->object. */
216 uint32_t size; /**< Field size. */
217 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
220 /** Parser token definition. */
222 /** Type displayed during completion (defaults to "TOKEN"). */
224 /** Help displayed during completion (defaults to token name). */
226 /** Private data used by parser functions. */
229 * Lists of subsequent tokens to push on the stack. Each call to the
230 * parser consumes the last entry of that stack.
232 const enum index *const *next;
233 /** Arguments stack for subsequent tokens that need them. */
234 const struct arg *const *args;
236 * Token-processing callback, returns -1 in case of error, the
237 * length of the matched string otherwise. If NULL, attempts to
238 * match the token name.
240 * If buf is not NULL, the result should be stored in it according
241 * to context. An error is returned if not large enough.
243 int (*call)(struct context *ctx, const struct token *token,
244 const char *str, unsigned int len,
245 void *buf, unsigned int size);
247 * Callback that provides possible values for this token, used for
248 * completion. Returns -1 in case of error, the number of possible
249 * values otherwise. If NULL, the token name is used.
251 * If buf is not NULL, entry index ent is written to buf and the
252 * full length of the entry is returned (same behavior as
255 int (*comp)(struct context *ctx, const struct token *token,
256 unsigned int ent, char *buf, unsigned int size);
257 /** Mandatory token name, no default value. */
261 /** Static initializer for the next field. */
262 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
264 /** Static initializer for a NEXT() entry. */
265 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
267 /** Static initializer for the args field. */
268 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
270 /** Static initializer for ARGS() to target a field. */
271 #define ARGS_ENTRY(s, f) \
272 (&(const struct arg){ \
273 .offset = offsetof(s, f), \
274 .size = sizeof(((s *)0)->f), \
277 /** Static initializer for ARGS() to target a bit-field. */
278 #define ARGS_ENTRY_BF(s, f, b) \
279 (&(const struct arg){ \
281 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
284 /** Static initializer for ARGS() to target a pointer. */
285 #define ARGS_ENTRY_PTR(s, f) \
286 (&(const struct arg){ \
287 .size = sizeof(*((s *)0)->f), \
290 /** Static initializer for ARGS() with arbitrary size. */
291 #define ARGS_ENTRY_USZ(s, f, sz) \
292 (&(const struct arg){ \
293 .offset = offsetof(s, f), \
297 /** Same as ARGS_ENTRY() using network byte ordering. */
298 #define ARGS_ENTRY_HTON(s, f) \
299 (&(const struct arg){ \
301 .offset = offsetof(s, f), \
302 .size = sizeof(((s *)0)->f), \
305 /** Parser output buffer layout expected by cmd_flow_parsed(). */
307 enum index command; /**< Flow command. */
308 uint16_t port; /**< Affected port ID. */
311 struct rte_flow_attr attr;
312 struct rte_flow_item *pattern;
313 struct rte_flow_action *actions;
317 } vc; /**< Validate/create arguments. */
321 } destroy; /**< Destroy arguments. */
324 enum rte_flow_action_type action;
325 } query; /**< Query arguments. */
329 } list; /**< List arguments. */
330 } args; /**< Command arguments. */
333 /** Private data for pattern items. */
334 struct parse_item_priv {
335 enum rte_flow_item_type type; /**< Item type. */
336 uint32_t size; /**< Size of item specification structure. */
339 #define PRIV_ITEM(t, s) \
340 (&(const struct parse_item_priv){ \
341 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
345 /** Private data for actions. */
346 struct parse_action_priv {
347 enum rte_flow_action_type type; /**< Action type. */
348 uint32_t size; /**< Size of action configuration structure. */
351 #define PRIV_ACTION(t, s) \
352 (&(const struct parse_action_priv){ \
353 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
357 static const enum index next_vc_attr[] = {
366 static const enum index next_destroy_attr[] = {
372 static const enum index next_list_attr[] = {
378 static const enum index item_param[] = {
387 static const enum index next_item[] = {
408 static const enum index item_any[] = {
414 static const enum index item_vf[] = {
420 static const enum index item_port[] = {
426 static const enum index item_raw[] = {
436 static const enum index item_eth[] = {
444 static const enum index item_vlan[] = {
451 static const enum index item_ipv4[] = {
458 static const enum index item_ipv6[] = {
465 static const enum index item_icmp[] = {
472 static const enum index item_udp[] = {
479 static const enum index item_tcp[] = {
486 static const enum index item_sctp[] = {
493 static const enum index item_vxlan[] = {
499 static const enum index next_action[] = {
515 static const enum index action_mark[] = {
521 static const enum index action_queue[] = {
527 static const enum index action_dup[] = {
533 static const enum index action_rss[] = {
539 static const enum index action_vf[] = {
546 static int parse_init(struct context *, const struct token *,
547 const char *, unsigned int,
548 void *, unsigned int);
549 static int parse_vc(struct context *, const struct token *,
550 const char *, unsigned int,
551 void *, unsigned int);
552 static int parse_vc_spec(struct context *, const struct token *,
553 const char *, unsigned int, void *, unsigned int);
554 static int parse_vc_conf(struct context *, const struct token *,
555 const char *, unsigned int, void *, unsigned int);
556 static int parse_vc_action_rss_queue(struct context *, const struct token *,
557 const char *, unsigned int, void *,
559 static int parse_destroy(struct context *, const struct token *,
560 const char *, unsigned int,
561 void *, unsigned int);
562 static int parse_flush(struct context *, const struct token *,
563 const char *, unsigned int,
564 void *, unsigned int);
565 static int parse_query(struct context *, const struct token *,
566 const char *, unsigned int,
567 void *, unsigned int);
568 static int parse_action(struct context *, const struct token *,
569 const char *, unsigned int,
570 void *, unsigned int);
571 static int parse_list(struct context *, const struct token *,
572 const char *, unsigned int,
573 void *, unsigned int);
574 static int parse_int(struct context *, const struct token *,
575 const char *, unsigned int,
576 void *, unsigned int);
577 static int parse_prefix(struct context *, const struct token *,
578 const char *, unsigned int,
579 void *, unsigned int);
580 static int parse_boolean(struct context *, const struct token *,
581 const char *, unsigned int,
582 void *, unsigned int);
583 static int parse_string(struct context *, const struct token *,
584 const char *, unsigned int,
585 void *, unsigned int);
586 static int parse_mac_addr(struct context *, const struct token *,
587 const char *, unsigned int,
588 void *, unsigned int);
589 static int parse_ipv4_addr(struct context *, const struct token *,
590 const char *, unsigned int,
591 void *, unsigned int);
592 static int parse_ipv6_addr(struct context *, const struct token *,
593 const char *, unsigned int,
594 void *, unsigned int);
595 static int parse_port(struct context *, const struct token *,
596 const char *, unsigned int,
597 void *, unsigned int);
598 static int comp_none(struct context *, const struct token *,
599 unsigned int, char *, unsigned int);
600 static int comp_boolean(struct context *, const struct token *,
601 unsigned int, char *, unsigned int);
602 static int comp_action(struct context *, const struct token *,
603 unsigned int, char *, unsigned int);
604 static int comp_port(struct context *, const struct token *,
605 unsigned int, char *, unsigned int);
606 static int comp_rule_id(struct context *, const struct token *,
607 unsigned int, char *, unsigned int);
608 static int comp_vc_action_rss_queue(struct context *, const struct token *,
609 unsigned int, char *, unsigned int);
611 /** Token definitions. */
612 static const struct token token_list[] = {
613 /* Special tokens. */
616 .help = "null entry, abused as the entry point",
617 .next = NEXT(NEXT_ENTRY(FLOW)),
622 .help = "command may end here",
628 .help = "integer value",
633 .name = "{unsigned}",
635 .help = "unsigned integer value",
642 .help = "prefix length for bit-mask",
643 .call = parse_prefix,
649 .help = "any boolean value",
650 .call = parse_boolean,
651 .comp = comp_boolean,
656 .help = "fixed string",
657 .call = parse_string,
661 .name = "{MAC address}",
663 .help = "standard MAC address notation",
664 .call = parse_mac_addr,
668 .name = "{IPv4 address}",
669 .type = "IPV4 ADDRESS",
670 .help = "standard IPv4 address notation",
671 .call = parse_ipv4_addr,
675 .name = "{IPv6 address}",
676 .type = "IPV6 ADDRESS",
677 .help = "standard IPv6 address notation",
678 .call = parse_ipv6_addr,
684 .help = "rule identifier",
686 .comp = comp_rule_id,
691 .help = "port identifier",
696 .name = "{group_id}",
698 .help = "group identifier",
705 .help = "priority level",
709 /* Top-level command. */
712 .type = "{command} {port_id} [{arg} [...]]",
713 .help = "manage ingress/egress flow rules",
714 .next = NEXT(NEXT_ENTRY
723 /* Sub-level commands. */
726 .help = "check whether a flow rule can be created",
727 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
728 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
733 .help = "create a flow rule",
734 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
735 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
740 .help = "destroy specific flow rules",
741 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
742 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
743 .call = parse_destroy,
747 .help = "destroy all flow rules",
748 .next = NEXT(NEXT_ENTRY(PORT_ID)),
749 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
754 .help = "query an existing flow rule",
755 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
757 NEXT_ENTRY(PORT_ID)),
758 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action),
759 ARGS_ENTRY(struct buffer, args.query.rule),
760 ARGS_ENTRY(struct buffer, port)),
765 .help = "list existing flow rules",
766 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
767 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
770 /* Destroy arguments. */
773 .help = "specify a rule identifier",
774 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
775 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
776 .call = parse_destroy,
778 /* Query arguments. */
782 .help = "action to query, must be part of the rule",
783 .call = parse_action,
786 /* List arguments. */
789 .help = "specify a group",
790 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
791 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
794 /* Validate/create attributes. */
797 .help = "specify a group",
798 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
799 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
804 .help = "specify a priority level",
805 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
806 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
811 .help = "affect rule to ingress",
812 .next = NEXT(next_vc_attr),
817 .help = "affect rule to egress",
818 .next = NEXT(next_vc_attr),
821 /* Validate/create pattern. */
824 .help = "submit a list of pattern items",
825 .next = NEXT(next_item),
830 .help = "match value perfectly (with full bit-mask)",
831 .call = parse_vc_spec,
833 [ITEM_PARAM_SPEC] = {
835 .help = "match value according to configured bit-mask",
836 .call = parse_vc_spec,
838 [ITEM_PARAM_LAST] = {
840 .help = "specify upper bound to establish a range",
841 .call = parse_vc_spec,
843 [ITEM_PARAM_MASK] = {
845 .help = "specify bit-mask with relevant bits set to one",
846 .call = parse_vc_spec,
848 [ITEM_PARAM_PREFIX] = {
850 .help = "generate bit-mask from a prefix length",
851 .call = parse_vc_spec,
855 .help = "specify next pattern item",
856 .next = NEXT(next_item),
860 .help = "end list of pattern items",
861 .priv = PRIV_ITEM(END, 0),
862 .next = NEXT(NEXT_ENTRY(ACTIONS)),
867 .help = "no-op pattern item",
868 .priv = PRIV_ITEM(VOID, 0),
869 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
874 .help = "perform actions when pattern does not match",
875 .priv = PRIV_ITEM(INVERT, 0),
876 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
881 .help = "match any protocol for the current layer",
882 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
883 .next = NEXT(item_any),
888 .help = "number of layers covered",
889 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
890 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
894 .help = "match packets addressed to the physical function",
895 .priv = PRIV_ITEM(PF, 0),
896 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
901 .help = "match packets addressed to a virtual function ID",
902 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
903 .next = NEXT(item_vf),
908 .help = "destination VF ID",
909 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
910 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
914 .help = "device-specific physical port index to use",
915 .priv = PRIV_ITEM(PORT, sizeof(struct rte_flow_item_port)),
916 .next = NEXT(item_port),
919 [ITEM_PORT_INDEX] = {
921 .help = "physical port index",
922 .next = NEXT(item_port, NEXT_ENTRY(UNSIGNED), item_param),
923 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port, index)),
927 .help = "match an arbitrary byte string",
928 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
929 .next = NEXT(item_raw),
932 [ITEM_RAW_RELATIVE] = {
934 .help = "look for pattern after the previous item",
935 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
936 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
939 [ITEM_RAW_SEARCH] = {
941 .help = "search pattern from offset (see also limit)",
942 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
943 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
946 [ITEM_RAW_OFFSET] = {
948 .help = "absolute or relative offset for pattern",
949 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
950 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
954 .help = "search area limit for start of pattern",
955 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
956 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
958 [ITEM_RAW_PATTERN] = {
960 .help = "byte string to look for",
961 .next = NEXT(item_raw,
963 NEXT_ENTRY(ITEM_PARAM_IS,
966 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, length),
967 ARGS_ENTRY_USZ(struct rte_flow_item_raw,
969 ITEM_RAW_PATTERN_SIZE)),
973 .help = "match Ethernet header",
974 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
975 .next = NEXT(item_eth),
980 .help = "destination MAC",
981 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
982 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_eth, dst)),
986 .help = "source MAC",
987 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
988 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_eth, src)),
993 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
994 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
998 .help = "match 802.1Q/ad VLAN tag",
999 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1000 .next = NEXT(item_vlan),
1003 [ITEM_VLAN_TPID] = {
1005 .help = "tag protocol identifier",
1006 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1007 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tpid)),
1011 .help = "tag control information",
1012 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1013 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1017 .help = "match IPv4 header",
1018 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1019 .next = NEXT(item_ipv4),
1024 .help = "source address",
1025 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1026 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1031 .help = "destination address",
1032 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1033 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1038 .help = "match IPv6 header",
1039 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1040 .next = NEXT(item_ipv6),
1045 .help = "source address",
1046 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1047 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1052 .help = "destination address",
1053 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1054 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1059 .help = "match ICMP header",
1060 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1061 .next = NEXT(item_icmp),
1064 [ITEM_ICMP_TYPE] = {
1066 .help = "ICMP packet type",
1067 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1068 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1071 [ITEM_ICMP_CODE] = {
1073 .help = "ICMP packet code",
1074 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1075 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1080 .help = "match UDP header",
1081 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1082 .next = NEXT(item_udp),
1087 .help = "UDP source port",
1088 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1089 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1094 .help = "UDP destination port",
1095 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1096 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1101 .help = "match TCP header",
1102 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1103 .next = NEXT(item_tcp),
1108 .help = "TCP source port",
1109 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1110 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1115 .help = "TCP destination port",
1116 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1117 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1122 .help = "match SCTP header",
1123 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1124 .next = NEXT(item_sctp),
1129 .help = "SCTP source port",
1130 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1131 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1136 .help = "SCTP destination port",
1137 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1138 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1143 .help = "match VXLAN header",
1144 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1145 .next = NEXT(item_vxlan),
1148 [ITEM_VXLAN_VNI] = {
1150 .help = "VXLAN identifier",
1151 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1152 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
1154 /* Validate/create actions. */
1157 .help = "submit a list of associated actions",
1158 .next = NEXT(next_action),
1163 .help = "specify next action",
1164 .next = NEXT(next_action),
1168 .help = "end list of actions",
1169 .priv = PRIV_ACTION(END, 0),
1174 .help = "no-op action",
1175 .priv = PRIV_ACTION(VOID, 0),
1176 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1179 [ACTION_PASSTHRU] = {
1181 .help = "let subsequent rule process matched packets",
1182 .priv = PRIV_ACTION(PASSTHRU, 0),
1183 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1188 .help = "attach 32 bit value to packets",
1189 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
1190 .next = NEXT(action_mark),
1193 [ACTION_MARK_ID] = {
1195 .help = "32 bit value to return with packets",
1196 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
1197 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
1198 .call = parse_vc_conf,
1202 .help = "flag packets",
1203 .priv = PRIV_ACTION(FLAG, 0),
1204 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1209 .help = "assign packets to a given queue index",
1210 .priv = PRIV_ACTION(QUEUE,
1211 sizeof(struct rte_flow_action_queue)),
1212 .next = NEXT(action_queue),
1215 [ACTION_QUEUE_INDEX] = {
1217 .help = "queue index to use",
1218 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
1219 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
1220 .call = parse_vc_conf,
1224 .help = "drop packets (note: passthru has priority)",
1225 .priv = PRIV_ACTION(DROP, 0),
1226 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1231 .help = "enable counters for this rule",
1232 .priv = PRIV_ACTION(COUNT, 0),
1233 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1238 .help = "duplicate packets to a given queue index",
1239 .priv = PRIV_ACTION(DUP, sizeof(struct rte_flow_action_dup)),
1240 .next = NEXT(action_dup),
1243 [ACTION_DUP_INDEX] = {
1245 .help = "queue index to duplicate packets to",
1246 .next = NEXT(action_dup, NEXT_ENTRY(UNSIGNED)),
1247 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_dup, index)),
1248 .call = parse_vc_conf,
1252 .help = "spread packets among several queues",
1253 .priv = PRIV_ACTION(RSS, ACTION_RSS_SIZE),
1254 .next = NEXT(action_rss),
1257 [ACTION_RSS_QUEUES] = {
1259 .help = "queue indices to use",
1260 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
1261 .call = parse_vc_conf,
1263 [ACTION_RSS_QUEUE] = {
1265 .help = "queue index",
1266 .call = parse_vc_action_rss_queue,
1267 .comp = comp_vc_action_rss_queue,
1271 .help = "redirect packets to physical device function",
1272 .priv = PRIV_ACTION(PF, 0),
1273 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1278 .help = "redirect packets to virtual device function",
1279 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
1280 .next = NEXT(action_vf),
1283 [ACTION_VF_ORIGINAL] = {
1285 .help = "use original VF ID if possible",
1286 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
1287 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
1289 .call = parse_vc_conf,
1293 .help = "VF ID to redirect packets to",
1294 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
1295 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
1296 .call = parse_vc_conf,
1300 /** Remove and return last entry from argument stack. */
1301 static const struct arg *
1302 pop_args(struct context *ctx)
1304 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
1307 /** Add entry on top of the argument stack. */
1309 push_args(struct context *ctx, const struct arg *arg)
1311 if (ctx->args_num == CTX_STACK_SIZE)
1313 ctx->args[ctx->args_num++] = arg;
1317 /** Spread value into buffer according to bit-mask. */
1319 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
1321 uint32_t i = arg->size;
1329 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1338 unsigned int shift = 0;
1339 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
1341 for (shift = 0; arg->mask[i] >> shift; ++shift) {
1342 if (!(arg->mask[i] & (1 << shift)))
1347 *buf &= ~(1 << shift);
1348 *buf |= (val & 1) << shift;
1357 * Parse a prefix length and generate a bit-mask.
1359 * Last argument (ctx->args) is retrieved to determine mask size, storage
1360 * location and whether the result must use network byte ordering.
1363 parse_prefix(struct context *ctx, const struct token *token,
1364 const char *str, unsigned int len,
1365 void *buf, unsigned int size)
1367 const struct arg *arg = pop_args(ctx);
1368 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
1375 /* Argument is expected. */
1379 u = strtoumax(str, &end, 0);
1380 if (errno || (size_t)(end - str) != len)
1385 extra = arg_entry_bf_fill(NULL, 0, arg);
1394 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
1395 !arg_entry_bf_fill(ctx->objmask, -1, arg))
1402 if (bytes > size || bytes + !!extra > size)
1406 buf = (uint8_t *)ctx->object + arg->offset;
1407 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1409 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
1410 memset(buf, 0x00, size - bytes);
1412 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
1416 memset(buf, 0xff, bytes);
1417 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
1419 ((uint8_t *)buf)[bytes] = conv[extra];
1422 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
1425 push_args(ctx, arg);
1429 /** Default parsing function for token name matching. */
1431 parse_default(struct context *ctx, const struct token *token,
1432 const char *str, unsigned int len,
1433 void *buf, unsigned int size)
1438 if (strncmp(str, token->name, len))
1443 /** Parse flow command, initialize output buffer for subsequent tokens. */
1445 parse_init(struct context *ctx, const struct token *token,
1446 const char *str, unsigned int len,
1447 void *buf, unsigned int size)
1449 struct buffer *out = buf;
1451 /* Token name must match. */
1452 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1454 /* Nothing else to do if there is no buffer. */
1457 /* Make sure buffer is large enough. */
1458 if (size < sizeof(*out))
1460 /* Initialize buffer. */
1461 memset(out, 0x00, sizeof(*out));
1462 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
1465 ctx->objmask = NULL;
1469 /** Parse tokens for validate/create commands. */
1471 parse_vc(struct context *ctx, const struct token *token,
1472 const char *str, unsigned int len,
1473 void *buf, unsigned int size)
1475 struct buffer *out = buf;
1479 /* Token name must match. */
1480 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1482 /* Nothing else to do if there is no buffer. */
1485 if (!out->command) {
1486 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
1488 if (sizeof(*out) > size)
1490 out->command = ctx->curr;
1493 ctx->objmask = NULL;
1494 out->args.vc.data = (uint8_t *)out + size;
1498 ctx->object = &out->args.vc.attr;
1499 ctx->objmask = NULL;
1500 switch (ctx->curr) {
1505 out->args.vc.attr.ingress = 1;
1508 out->args.vc.attr.egress = 1;
1511 out->args.vc.pattern =
1512 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1514 ctx->object = out->args.vc.pattern;
1515 ctx->objmask = NULL;
1518 out->args.vc.actions =
1519 (void *)RTE_ALIGN_CEIL((uintptr_t)
1520 (out->args.vc.pattern +
1521 out->args.vc.pattern_n),
1523 ctx->object = out->args.vc.actions;
1524 ctx->objmask = NULL;
1531 if (!out->args.vc.actions) {
1532 const struct parse_item_priv *priv = token->priv;
1533 struct rte_flow_item *item =
1534 out->args.vc.pattern + out->args.vc.pattern_n;
1536 data_size = priv->size * 3; /* spec, last, mask */
1537 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1538 (out->args.vc.data - data_size),
1540 if ((uint8_t *)item + sizeof(*item) > data)
1542 *item = (struct rte_flow_item){
1545 ++out->args.vc.pattern_n;
1547 ctx->objmask = NULL;
1549 const struct parse_action_priv *priv = token->priv;
1550 struct rte_flow_action *action =
1551 out->args.vc.actions + out->args.vc.actions_n;
1553 data_size = priv->size; /* configuration */
1554 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1555 (out->args.vc.data - data_size),
1557 if ((uint8_t *)action + sizeof(*action) > data)
1559 *action = (struct rte_flow_action){
1562 ++out->args.vc.actions_n;
1563 ctx->object = action;
1564 ctx->objmask = NULL;
1566 memset(data, 0, data_size);
1567 out->args.vc.data = data;
1568 ctx->objdata = data_size;
1572 /** Parse pattern item parameter type. */
1574 parse_vc_spec(struct context *ctx, const struct token *token,
1575 const char *str, unsigned int len,
1576 void *buf, unsigned int size)
1578 struct buffer *out = buf;
1579 struct rte_flow_item *item;
1585 /* Token name must match. */
1586 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1588 /* Parse parameter types. */
1589 switch (ctx->curr) {
1594 case ITEM_PARAM_SPEC:
1597 case ITEM_PARAM_LAST:
1600 case ITEM_PARAM_PREFIX:
1601 /* Modify next token to expect a prefix. */
1602 if (ctx->next_num < 2)
1604 ctx->next[ctx->next_num - 2] = NEXT_ENTRY(PREFIX);
1606 case ITEM_PARAM_MASK:
1612 /* Nothing else to do if there is no buffer. */
1615 if (!out->args.vc.pattern_n)
1617 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
1618 data_size = ctx->objdata / 3; /* spec, last, mask */
1619 /* Point to selected object. */
1620 ctx->object = out->args.vc.data + (data_size * index);
1622 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
1623 item->mask = ctx->objmask;
1625 ctx->objmask = NULL;
1626 /* Update relevant item pointer. */
1627 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
1632 /** Parse action configuration field. */
1634 parse_vc_conf(struct context *ctx, const struct token *token,
1635 const char *str, unsigned int len,
1636 void *buf, unsigned int size)
1638 struct buffer *out = buf;
1639 struct rte_flow_action *action;
1642 /* Token name must match. */
1643 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1645 /* Nothing else to do if there is no buffer. */
1648 if (!out->args.vc.actions_n)
1650 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
1651 /* Point to selected object. */
1652 ctx->object = out->args.vc.data;
1653 ctx->objmask = NULL;
1654 /* Update configuration pointer. */
1655 action->conf = ctx->object;
1660 * Parse queue field for RSS action.
1662 * Valid tokens are queue indices and the "end" token.
1665 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
1666 const char *str, unsigned int len,
1667 void *buf, unsigned int size)
1669 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
1676 if (ctx->curr != ACTION_RSS_QUEUE)
1678 i = ctx->objdata >> 16;
1679 if (!strncmp(str, "end", len)) {
1680 ctx->objdata &= 0xffff;
1683 if (i >= ACTION_RSS_NUM)
1685 if (push_args(ctx, ARGS_ENTRY(struct rte_flow_action_rss, queue[i])))
1687 ret = parse_int(ctx, token, str, len, NULL, 0);
1693 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
1695 if (ctx->next_num == RTE_DIM(ctx->next))
1697 ctx->next[ctx->next_num++] = next;
1700 ((struct rte_flow_action_rss *)ctx->object)->num = i;
1704 /** Parse tokens for destroy command. */
1706 parse_destroy(struct context *ctx, const struct token *token,
1707 const char *str, unsigned int len,
1708 void *buf, unsigned int size)
1710 struct buffer *out = buf;
1712 /* Token name must match. */
1713 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1715 /* Nothing else to do if there is no buffer. */
1718 if (!out->command) {
1719 if (ctx->curr != DESTROY)
1721 if (sizeof(*out) > size)
1723 out->command = ctx->curr;
1726 ctx->objmask = NULL;
1727 out->args.destroy.rule =
1728 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1732 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
1733 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
1736 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
1737 ctx->objmask = NULL;
1741 /** Parse tokens for flush command. */
1743 parse_flush(struct context *ctx, const struct token *token,
1744 const char *str, unsigned int len,
1745 void *buf, unsigned int size)
1747 struct buffer *out = buf;
1749 /* Token name must match. */
1750 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1752 /* Nothing else to do if there is no buffer. */
1755 if (!out->command) {
1756 if (ctx->curr != FLUSH)
1758 if (sizeof(*out) > size)
1760 out->command = ctx->curr;
1763 ctx->objmask = NULL;
1768 /** Parse tokens for query command. */
1770 parse_query(struct context *ctx, const struct token *token,
1771 const char *str, unsigned int len,
1772 void *buf, unsigned int size)
1774 struct buffer *out = buf;
1776 /* Token name must match. */
1777 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1779 /* Nothing else to do if there is no buffer. */
1782 if (!out->command) {
1783 if (ctx->curr != QUERY)
1785 if (sizeof(*out) > size)
1787 out->command = ctx->curr;
1790 ctx->objmask = NULL;
1795 /** Parse action names. */
1797 parse_action(struct context *ctx, const struct token *token,
1798 const char *str, unsigned int len,
1799 void *buf, unsigned int size)
1801 struct buffer *out = buf;
1802 const struct arg *arg = pop_args(ctx);
1806 /* Argument is expected. */
1809 /* Parse action name. */
1810 for (i = 0; next_action[i]; ++i) {
1811 const struct parse_action_priv *priv;
1813 token = &token_list[next_action[i]];
1814 if (strncmp(token->name, str, len))
1820 memcpy((uint8_t *)ctx->object + arg->offset,
1826 push_args(ctx, arg);
1830 /** Parse tokens for list command. */
1832 parse_list(struct context *ctx, const struct token *token,
1833 const char *str, unsigned int len,
1834 void *buf, unsigned int size)
1836 struct buffer *out = buf;
1838 /* Token name must match. */
1839 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1841 /* Nothing else to do if there is no buffer. */
1844 if (!out->command) {
1845 if (ctx->curr != LIST)
1847 if (sizeof(*out) > size)
1849 out->command = ctx->curr;
1852 ctx->objmask = NULL;
1853 out->args.list.group =
1854 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1858 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
1859 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
1862 ctx->object = out->args.list.group + out->args.list.group_n++;
1863 ctx->objmask = NULL;
1868 * Parse signed/unsigned integers 8 to 64-bit long.
1870 * Last argument (ctx->args) is retrieved to determine integer type and
1874 parse_int(struct context *ctx, const struct token *token,
1875 const char *str, unsigned int len,
1876 void *buf, unsigned int size)
1878 const struct arg *arg = pop_args(ctx);
1883 /* Argument is expected. */
1888 (uintmax_t)strtoimax(str, &end, 0) :
1889 strtoumax(str, &end, 0);
1890 if (errno || (size_t)(end - str) != len)
1895 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
1896 !arg_entry_bf_fill(ctx->objmask, -1, arg))
1900 buf = (uint8_t *)ctx->object + arg->offset;
1904 case sizeof(uint8_t):
1905 *(uint8_t *)buf = u;
1907 case sizeof(uint16_t):
1908 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
1910 case sizeof(uint8_t [3]):
1911 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1913 ((uint8_t *)buf)[0] = u;
1914 ((uint8_t *)buf)[1] = u >> 8;
1915 ((uint8_t *)buf)[2] = u >> 16;
1919 ((uint8_t *)buf)[0] = u >> 16;
1920 ((uint8_t *)buf)[1] = u >> 8;
1921 ((uint8_t *)buf)[2] = u;
1923 case sizeof(uint32_t):
1924 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
1926 case sizeof(uint64_t):
1927 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
1932 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
1934 buf = (uint8_t *)ctx->objmask + arg->offset;
1939 push_args(ctx, arg);
1946 * Two arguments (ctx->args) are retrieved from the stack to store data and
1947 * its length (in that order).
1950 parse_string(struct context *ctx, const struct token *token,
1951 const char *str, unsigned int len,
1952 void *buf, unsigned int size)
1954 const struct arg *arg_data = pop_args(ctx);
1955 const struct arg *arg_len = pop_args(ctx);
1956 char tmp[16]; /* Ought to be enough. */
1959 /* Arguments are expected. */
1963 push_args(ctx, arg_data);
1966 size = arg_data->size;
1967 /* Bit-mask fill is not supported. */
1968 if (arg_data->mask || size < len)
1972 /* Let parse_int() fill length information first. */
1973 ret = snprintf(tmp, sizeof(tmp), "%u", len);
1976 push_args(ctx, arg_len);
1977 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
1982 buf = (uint8_t *)ctx->object + arg_data->offset;
1983 /* Output buffer is not necessarily NUL-terminated. */
1984 memcpy(buf, str, len);
1985 memset((uint8_t *)buf + len, 0x55, size - len);
1987 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
1990 push_args(ctx, arg_len);
1991 push_args(ctx, arg_data);
1996 * Parse a MAC address.
1998 * Last argument (ctx->args) is retrieved to determine storage size and
2002 parse_mac_addr(struct context *ctx, const struct token *token,
2003 const char *str, unsigned int len,
2004 void *buf, unsigned int size)
2006 const struct arg *arg = pop_args(ctx);
2007 struct ether_addr tmp;
2011 /* Argument is expected. */
2015 /* Bit-mask fill is not supported. */
2016 if (arg->mask || size != sizeof(tmp))
2018 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
2019 if (ret < 0 || (unsigned int)ret != len)
2023 buf = (uint8_t *)ctx->object + arg->offset;
2024 memcpy(buf, &tmp, size);
2026 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2029 push_args(ctx, arg);
2034 * Parse an IPv4 address.
2036 * Last argument (ctx->args) is retrieved to determine storage size and
2040 parse_ipv4_addr(struct context *ctx, const struct token *token,
2041 const char *str, unsigned int len,
2042 void *buf, unsigned int size)
2044 const struct arg *arg = pop_args(ctx);
2049 /* Argument is expected. */
2053 /* Bit-mask fill is not supported. */
2054 if (arg->mask || size != sizeof(tmp))
2056 /* Only network endian is supported. */
2059 memcpy(str2, str, len);
2061 ret = inet_pton(AF_INET, str2, &tmp);
2063 /* Attempt integer parsing. */
2064 push_args(ctx, arg);
2065 return parse_int(ctx, token, str, len, buf, size);
2069 buf = (uint8_t *)ctx->object + arg->offset;
2070 memcpy(buf, &tmp, size);
2072 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2075 push_args(ctx, arg);
2080 * Parse an IPv6 address.
2082 * Last argument (ctx->args) is retrieved to determine storage size and
2086 parse_ipv6_addr(struct context *ctx, const struct token *token,
2087 const char *str, unsigned int len,
2088 void *buf, unsigned int size)
2090 const struct arg *arg = pop_args(ctx);
2092 struct in6_addr tmp;
2096 /* Argument is expected. */
2100 /* Bit-mask fill is not supported. */
2101 if (arg->mask || size != sizeof(tmp))
2103 /* Only network endian is supported. */
2106 memcpy(str2, str, len);
2108 ret = inet_pton(AF_INET6, str2, &tmp);
2113 buf = (uint8_t *)ctx->object + arg->offset;
2114 memcpy(buf, &tmp, size);
2116 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2119 push_args(ctx, arg);
2123 /** Boolean values (even indices stand for false). */
2124 static const char *const boolean_name[] = {
2133 * Parse a boolean value.
2135 * Last argument (ctx->args) is retrieved to determine storage size and
2139 parse_boolean(struct context *ctx, const struct token *token,
2140 const char *str, unsigned int len,
2141 void *buf, unsigned int size)
2143 const struct arg *arg = pop_args(ctx);
2147 /* Argument is expected. */
2150 for (i = 0; boolean_name[i]; ++i)
2151 if (!strncmp(str, boolean_name[i], len))
2153 /* Process token as integer. */
2154 if (boolean_name[i])
2155 str = i & 1 ? "1" : "0";
2156 push_args(ctx, arg);
2157 ret = parse_int(ctx, token, str, strlen(str), buf, size);
2158 return ret > 0 ? (int)len : ret;
2161 /** Parse port and update context. */
2163 parse_port(struct context *ctx, const struct token *token,
2164 const char *str, unsigned int len,
2165 void *buf, unsigned int size)
2167 struct buffer *out = &(struct buffer){ .port = 0 };
2175 ctx->objmask = NULL;
2176 size = sizeof(*out);
2178 ret = parse_int(ctx, token, str, len, out, size);
2180 ctx->port = out->port;
2186 /** No completion. */
2188 comp_none(struct context *ctx, const struct token *token,
2189 unsigned int ent, char *buf, unsigned int size)
2199 /** Complete boolean values. */
2201 comp_boolean(struct context *ctx, const struct token *token,
2202 unsigned int ent, char *buf, unsigned int size)
2208 for (i = 0; boolean_name[i]; ++i)
2209 if (buf && i == ent)
2210 return snprintf(buf, size, "%s", boolean_name[i]);
2216 /** Complete action names. */
2218 comp_action(struct context *ctx, const struct token *token,
2219 unsigned int ent, char *buf, unsigned int size)
2225 for (i = 0; next_action[i]; ++i)
2226 if (buf && i == ent)
2227 return snprintf(buf, size, "%s",
2228 token_list[next_action[i]].name);
2234 /** Complete available ports. */
2236 comp_port(struct context *ctx, const struct token *token,
2237 unsigned int ent, char *buf, unsigned int size)
2244 FOREACH_PORT(p, ports) {
2245 if (buf && i == ent)
2246 return snprintf(buf, size, "%u", p);
2254 /** Complete available rule IDs. */
2256 comp_rule_id(struct context *ctx, const struct token *token,
2257 unsigned int ent, char *buf, unsigned int size)
2260 struct rte_port *port;
2261 struct port_flow *pf;
2264 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
2265 ctx->port == (uint16_t)RTE_PORT_ALL)
2267 port = &ports[ctx->port];
2268 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
2269 if (buf && i == ent)
2270 return snprintf(buf, size, "%u", pf->id);
2278 /** Complete queue field for RSS action. */
2280 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
2281 unsigned int ent, char *buf, unsigned int size)
2283 static const char *const str[] = { "", "end", NULL };
2288 for (i = 0; str[i] != NULL; ++i)
2289 if (buf && i == ent)
2290 return snprintf(buf, size, "%s", str[i]);
2296 /** Internal context. */
2297 static struct context cmd_flow_context;
2299 /** Global parser instance (cmdline API). */
2300 cmdline_parse_inst_t cmd_flow;
2302 /** Initialize context. */
2304 cmd_flow_context_init(struct context *ctx)
2306 /* A full memset() is not necessary. */
2317 ctx->objmask = NULL;
2320 /** Parse a token (cmdline API). */
2322 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
2325 struct context *ctx = &cmd_flow_context;
2326 const struct token *token;
2327 const enum index *list;
2332 /* Restart as requested. */
2334 cmd_flow_context_init(ctx);
2335 token = &token_list[ctx->curr];
2336 /* Check argument length. */
2339 for (len = 0; src[len]; ++len)
2340 if (src[len] == '#' || isspace(src[len]))
2344 /* Last argument and EOL detection. */
2345 for (i = len; src[i]; ++i)
2346 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
2348 else if (!isspace(src[i])) {
2353 if (src[i] == '\r' || src[i] == '\n') {
2357 /* Initialize context if necessary. */
2358 if (!ctx->next_num) {
2361 ctx->next[ctx->next_num++] = token->next[0];
2363 /* Process argument through candidates. */
2364 ctx->prev = ctx->curr;
2365 list = ctx->next[ctx->next_num - 1];
2366 for (i = 0; list[i]; ++i) {
2367 const struct token *next = &token_list[list[i]];
2370 ctx->curr = list[i];
2372 tmp = next->call(ctx, next, src, len, result, size);
2374 tmp = parse_default(ctx, next, src, len, result, size);
2375 if (tmp == -1 || tmp != len)
2383 /* Push subsequent tokens if any. */
2385 for (i = 0; token->next[i]; ++i) {
2386 if (ctx->next_num == RTE_DIM(ctx->next))
2388 ctx->next[ctx->next_num++] = token->next[i];
2390 /* Push arguments if any. */
2392 for (i = 0; token->args[i]; ++i) {
2393 if (ctx->args_num == RTE_DIM(ctx->args))
2395 ctx->args[ctx->args_num++] = token->args[i];
2400 /** Return number of completion entries (cmdline API). */
2402 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
2404 struct context *ctx = &cmd_flow_context;
2405 const struct token *token = &token_list[ctx->curr];
2406 const enum index *list;
2410 /* Tell cmd_flow_parse() that context must be reinitialized. */
2412 /* Count number of tokens in current list. */
2414 list = ctx->next[ctx->next_num - 1];
2416 list = token->next[0];
2417 for (i = 0; list[i]; ++i)
2422 * If there is a single token, use its completion callback, otherwise
2423 * return the number of entries.
2425 token = &token_list[list[0]];
2426 if (i == 1 && token->comp) {
2427 /* Save index for cmd_flow_get_help(). */
2428 ctx->prev = list[0];
2429 return token->comp(ctx, token, 0, NULL, 0);
2434 /** Return a completion entry (cmdline API). */
2436 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
2437 char *dst, unsigned int size)
2439 struct context *ctx = &cmd_flow_context;
2440 const struct token *token = &token_list[ctx->curr];
2441 const enum index *list;
2445 /* Tell cmd_flow_parse() that context must be reinitialized. */
2447 /* Count number of tokens in current list. */
2449 list = ctx->next[ctx->next_num - 1];
2451 list = token->next[0];
2452 for (i = 0; list[i]; ++i)
2456 /* If there is a single token, use its completion callback. */
2457 token = &token_list[list[0]];
2458 if (i == 1 && token->comp) {
2459 /* Save index for cmd_flow_get_help(). */
2460 ctx->prev = list[0];
2461 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
2463 /* Otherwise make sure the index is valid and use defaults. */
2466 token = &token_list[list[index]];
2467 snprintf(dst, size, "%s", token->name);
2468 /* Save index for cmd_flow_get_help(). */
2469 ctx->prev = list[index];
2473 /** Populate help strings for current token (cmdline API). */
2475 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
2477 struct context *ctx = &cmd_flow_context;
2478 const struct token *token = &token_list[ctx->prev];
2481 /* Tell cmd_flow_parse() that context must be reinitialized. */
2485 /* Set token type and update global help with details. */
2486 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
2488 cmd_flow.help_str = token->help;
2490 cmd_flow.help_str = token->name;
2494 /** Token definition template (cmdline API). */
2495 static struct cmdline_token_hdr cmd_flow_token_hdr = {
2496 .ops = &(struct cmdline_token_ops){
2497 .parse = cmd_flow_parse,
2498 .complete_get_nb = cmd_flow_complete_get_nb,
2499 .complete_get_elt = cmd_flow_complete_get_elt,
2500 .get_help = cmd_flow_get_help,
2505 /** Populate the next dynamic token. */
2507 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
2508 cmdline_parse_token_hdr_t *(*hdrs)[])
2510 struct context *ctx = &cmd_flow_context;
2512 /* Always reinitialize context before requesting the first token. */
2514 cmd_flow_context_init(ctx);
2515 /* Return NULL when no more tokens are expected. */
2516 if (!ctx->next_num && ctx->curr) {
2520 /* Determine if command should end here. */
2521 if (ctx->eol && ctx->last && ctx->next_num) {
2522 const enum index *list = ctx->next[ctx->next_num - 1];
2525 for (i = 0; list[i]; ++i) {
2532 *hdr = &cmd_flow_token_hdr;
2535 /** Dispatch parsed buffer to function calls. */
2537 cmd_flow_parsed(const struct buffer *in)
2539 switch (in->command) {
2541 port_flow_validate(in->port, &in->args.vc.attr,
2542 in->args.vc.pattern, in->args.vc.actions);
2545 port_flow_create(in->port, &in->args.vc.attr,
2546 in->args.vc.pattern, in->args.vc.actions);
2549 port_flow_destroy(in->port, in->args.destroy.rule_n,
2550 in->args.destroy.rule);
2553 port_flow_flush(in->port);
2556 port_flow_query(in->port, in->args.query.rule,
2557 in->args.query.action);
2560 port_flow_list(in->port, in->args.list.group_n,
2561 in->args.list.group);
2568 /** Token generator and output processing callback (cmdline API). */
2570 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
2573 cmd_flow_tok(arg0, arg2);
2575 cmd_flow_parsed(arg0);
2578 /** Global parser instance (cmdline API). */
2579 cmdline_parse_inst_t cmd_flow = {
2581 .data = NULL, /**< Unused. */
2582 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
2585 }, /**< Tokens are returned by cmd_flow_tok(). */