1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
13 #include <arpa/inet.h>
14 #include <sys/socket.h>
16 #include <rte_common.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_ethdev.h>
19 #include <rte_byteorder.h>
20 #include <cmdline_parse.h>
21 #include <cmdline_parse_etheraddr.h>
26 /** Parser token indices. */
46 /* Top-level command. */
49 /* Sub-level commands. */
58 /* Destroy arguments. */
61 /* Query arguments. */
67 /* Validate/create arguments. */
73 /* Validate/create pattern. */
106 ITEM_VLAN_INNER_TYPE,
138 ITEM_E_TAG_GRP_ECID_B,
155 /* Validate/create actions. */
171 ACTION_RSS_FUNC_DEFAULT,
172 ACTION_RSS_FUNC_TOEPLITZ,
173 ACTION_RSS_FUNC_SIMPLE_XOR,
188 /** Maximum size for pattern in struct rte_flow_item_raw. */
189 #define ITEM_RAW_PATTERN_SIZE 40
191 /** Storage size for struct rte_flow_item_raw including pattern. */
192 #define ITEM_RAW_SIZE \
193 (sizeof(struct rte_flow_item_raw) + ITEM_RAW_PATTERN_SIZE)
195 /** Maximum number of queue indices in struct rte_flow_action_rss. */
196 #define ACTION_RSS_QUEUE_NUM 32
198 /** Storage for struct rte_flow_action_rss including external data. */
199 struct action_rss_data {
200 struct rte_flow_action_rss conf;
201 uint8_t key[RSS_HASH_KEY_LENGTH];
202 uint16_t queue[ACTION_RSS_QUEUE_NUM];
205 /** Maximum number of subsequent tokens and arguments on the stack. */
206 #define CTX_STACK_SIZE 16
208 /** Parser context. */
210 /** Stack of subsequent token lists to process. */
211 const enum index *next[CTX_STACK_SIZE];
212 /** Arguments for stacked tokens. */
213 const void *args[CTX_STACK_SIZE];
214 enum index curr; /**< Current token index. */
215 enum index prev; /**< Index of the last token seen. */
216 int next_num; /**< Number of entries in next[]. */
217 int args_num; /**< Number of entries in args[]. */
218 uint32_t eol:1; /**< EOL has been detected. */
219 uint32_t last:1; /**< No more arguments. */
220 portid_t port; /**< Current port ID (for completions). */
221 uint32_t objdata; /**< Object-specific data. */
222 void *object; /**< Address of current object for relative offsets. */
223 void *objmask; /**< Object a full mask must be written to. */
226 /** Token argument. */
228 uint32_t hton:1; /**< Use network byte ordering. */
229 uint32_t sign:1; /**< Value is signed. */
230 uint32_t bounded:1; /**< Value is bounded. */
231 uintmax_t min; /**< Minimum value if bounded. */
232 uintmax_t max; /**< Maximum value if bounded. */
233 uint32_t offset; /**< Relative offset from ctx->object. */
234 uint32_t size; /**< Field size. */
235 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
238 /** Parser token definition. */
240 /** Type displayed during completion (defaults to "TOKEN"). */
242 /** Help displayed during completion (defaults to token name). */
244 /** Private data used by parser functions. */
247 * Lists of subsequent tokens to push on the stack. Each call to the
248 * parser consumes the last entry of that stack.
250 const enum index *const *next;
251 /** Arguments stack for subsequent tokens that need them. */
252 const struct arg *const *args;
254 * Token-processing callback, returns -1 in case of error, the
255 * length of the matched string otherwise. If NULL, attempts to
256 * match the token name.
258 * If buf is not NULL, the result should be stored in it according
259 * to context. An error is returned if not large enough.
261 int (*call)(struct context *ctx, const struct token *token,
262 const char *str, unsigned int len,
263 void *buf, unsigned int size);
265 * Callback that provides possible values for this token, used for
266 * completion. Returns -1 in case of error, the number of possible
267 * values otherwise. If NULL, the token name is used.
269 * If buf is not NULL, entry index ent is written to buf and the
270 * full length of the entry is returned (same behavior as
273 int (*comp)(struct context *ctx, const struct token *token,
274 unsigned int ent, char *buf, unsigned int size);
275 /** Mandatory token name, no default value. */
279 /** Static initializer for the next field. */
280 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
282 /** Static initializer for a NEXT() entry. */
283 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
285 /** Static initializer for the args field. */
286 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
288 /** Static initializer for ARGS() to target a field. */
289 #define ARGS_ENTRY(s, f) \
290 (&(const struct arg){ \
291 .offset = offsetof(s, f), \
292 .size = sizeof(((s *)0)->f), \
295 /** Static initializer for ARGS() to target a bit-field. */
296 #define ARGS_ENTRY_BF(s, f, b) \
297 (&(const struct arg){ \
299 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
302 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
303 #define ARGS_ENTRY_MASK(s, f, m) \
304 (&(const struct arg){ \
305 .offset = offsetof(s, f), \
306 .size = sizeof(((s *)0)->f), \
307 .mask = (const void *)(m), \
310 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
311 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
312 (&(const struct arg){ \
314 .offset = offsetof(s, f), \
315 .size = sizeof(((s *)0)->f), \
316 .mask = (const void *)(m), \
319 /** Static initializer for ARGS() to target a pointer. */
320 #define ARGS_ENTRY_PTR(s, f) \
321 (&(const struct arg){ \
322 .size = sizeof(*((s *)0)->f), \
325 /** Static initializer for ARGS() with arbitrary offset and size. */
326 #define ARGS_ENTRY_ARB(o, s) \
327 (&(const struct arg){ \
332 /** Same as ARGS_ENTRY_ARB() with bounded values. */
333 #define ARGS_ENTRY_ARB_BOUNDED(o, s, i, a) \
334 (&(const struct arg){ \
342 /** Same as ARGS_ENTRY() using network byte ordering. */
343 #define ARGS_ENTRY_HTON(s, f) \
344 (&(const struct arg){ \
346 .offset = offsetof(s, f), \
347 .size = sizeof(((s *)0)->f), \
350 /** Parser output buffer layout expected by cmd_flow_parsed(). */
352 enum index command; /**< Flow command. */
353 portid_t port; /**< Affected port ID. */
356 struct rte_flow_attr attr;
357 struct rte_flow_item *pattern;
358 struct rte_flow_action *actions;
362 } vc; /**< Validate/create arguments. */
366 } destroy; /**< Destroy arguments. */
369 enum rte_flow_action_type action;
370 } query; /**< Query arguments. */
374 } list; /**< List arguments. */
377 } isolate; /**< Isolated mode arguments. */
378 } args; /**< Command arguments. */
381 /** Private data for pattern items. */
382 struct parse_item_priv {
383 enum rte_flow_item_type type; /**< Item type. */
384 uint32_t size; /**< Size of item specification structure. */
387 #define PRIV_ITEM(t, s) \
388 (&(const struct parse_item_priv){ \
389 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
393 /** Private data for actions. */
394 struct parse_action_priv {
395 enum rte_flow_action_type type; /**< Action type. */
396 uint32_t size; /**< Size of action configuration structure. */
399 #define PRIV_ACTION(t, s) \
400 (&(const struct parse_action_priv){ \
401 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
405 static const enum index next_vc_attr[] = {
414 static const enum index next_destroy_attr[] = {
420 static const enum index next_list_attr[] = {
426 static const enum index item_param[] = {
435 static const enum index next_item[] = {
465 static const enum index item_fuzzy[] = {
471 static const enum index item_any[] = {
477 static const enum index item_vf[] = {
483 static const enum index item_port[] = {
489 static const enum index item_raw[] = {
499 static const enum index item_eth[] = {
507 static const enum index item_vlan[] = {
512 ITEM_VLAN_INNER_TYPE,
517 static const enum index item_ipv4[] = {
527 static const enum index item_ipv6[] = {
538 static const enum index item_icmp[] = {
545 static const enum index item_udp[] = {
552 static const enum index item_tcp[] = {
560 static const enum index item_sctp[] = {
569 static const enum index item_vxlan[] = {
575 static const enum index item_e_tag[] = {
576 ITEM_E_TAG_GRP_ECID_B,
581 static const enum index item_nvgre[] = {
587 static const enum index item_mpls[] = {
593 static const enum index item_gre[] = {
599 static const enum index item_gtp[] = {
605 static const enum index item_geneve[] = {
612 static const enum index next_action[] = {
628 static const enum index action_mark[] = {
634 static const enum index action_queue[] = {
640 static const enum index action_rss[] = {
651 static const enum index action_vf[] = {
658 static const enum index action_meter[] = {
664 static int parse_init(struct context *, const struct token *,
665 const char *, unsigned int,
666 void *, unsigned int);
667 static int parse_vc(struct context *, const struct token *,
668 const char *, unsigned int,
669 void *, unsigned int);
670 static int parse_vc_spec(struct context *, const struct token *,
671 const char *, unsigned int, void *, unsigned int);
672 static int parse_vc_conf(struct context *, const struct token *,
673 const char *, unsigned int, void *, unsigned int);
674 static int parse_vc_action_rss(struct context *, const struct token *,
675 const char *, unsigned int, void *,
677 static int parse_vc_action_rss_func(struct context *, const struct token *,
678 const char *, unsigned int, void *,
680 static int parse_vc_action_rss_type(struct context *, const struct token *,
681 const char *, unsigned int, void *,
683 static int parse_vc_action_rss_queue(struct context *, const struct token *,
684 const char *, unsigned int, void *,
686 static int parse_destroy(struct context *, const struct token *,
687 const char *, unsigned int,
688 void *, unsigned int);
689 static int parse_flush(struct context *, const struct token *,
690 const char *, unsigned int,
691 void *, unsigned int);
692 static int parse_query(struct context *, const struct token *,
693 const char *, unsigned int,
694 void *, unsigned int);
695 static int parse_action(struct context *, const struct token *,
696 const char *, unsigned int,
697 void *, unsigned int);
698 static int parse_list(struct context *, const struct token *,
699 const char *, unsigned int,
700 void *, unsigned int);
701 static int parse_isolate(struct context *, const struct token *,
702 const char *, unsigned int,
703 void *, unsigned int);
704 static int parse_int(struct context *, const struct token *,
705 const char *, unsigned int,
706 void *, unsigned int);
707 static int parse_prefix(struct context *, const struct token *,
708 const char *, unsigned int,
709 void *, unsigned int);
710 static int parse_boolean(struct context *, const struct token *,
711 const char *, unsigned int,
712 void *, unsigned int);
713 static int parse_string(struct context *, const struct token *,
714 const char *, unsigned int,
715 void *, unsigned int);
716 static int parse_mac_addr(struct context *, const struct token *,
717 const char *, unsigned int,
718 void *, unsigned int);
719 static int parse_ipv4_addr(struct context *, const struct token *,
720 const char *, unsigned int,
721 void *, unsigned int);
722 static int parse_ipv6_addr(struct context *, const struct token *,
723 const char *, unsigned int,
724 void *, unsigned int);
725 static int parse_port(struct context *, const struct token *,
726 const char *, unsigned int,
727 void *, unsigned int);
728 static int comp_none(struct context *, const struct token *,
729 unsigned int, char *, unsigned int);
730 static int comp_boolean(struct context *, const struct token *,
731 unsigned int, char *, unsigned int);
732 static int comp_action(struct context *, const struct token *,
733 unsigned int, char *, unsigned int);
734 static int comp_port(struct context *, const struct token *,
735 unsigned int, char *, unsigned int);
736 static int comp_rule_id(struct context *, const struct token *,
737 unsigned int, char *, unsigned int);
738 static int comp_vc_action_rss_type(struct context *, const struct token *,
739 unsigned int, char *, unsigned int);
740 static int comp_vc_action_rss_queue(struct context *, const struct token *,
741 unsigned int, char *, unsigned int);
743 /** Token definitions. */
744 static const struct token token_list[] = {
745 /* Special tokens. */
748 .help = "null entry, abused as the entry point",
749 .next = NEXT(NEXT_ENTRY(FLOW)),
754 .help = "command may end here",
760 .help = "integer value",
765 .name = "{unsigned}",
767 .help = "unsigned integer value",
774 .help = "prefix length for bit-mask",
775 .call = parse_prefix,
781 .help = "any boolean value",
782 .call = parse_boolean,
783 .comp = comp_boolean,
788 .help = "fixed string",
789 .call = parse_string,
793 .name = "{MAC address}",
795 .help = "standard MAC address notation",
796 .call = parse_mac_addr,
800 .name = "{IPv4 address}",
801 .type = "IPV4 ADDRESS",
802 .help = "standard IPv4 address notation",
803 .call = parse_ipv4_addr,
807 .name = "{IPv6 address}",
808 .type = "IPV6 ADDRESS",
809 .help = "standard IPv6 address notation",
810 .call = parse_ipv6_addr,
816 .help = "rule identifier",
818 .comp = comp_rule_id,
823 .help = "port identifier",
828 .name = "{group_id}",
830 .help = "group identifier",
837 .help = "priority level",
841 /* Top-level command. */
844 .type = "{command} {port_id} [{arg} [...]]",
845 .help = "manage ingress/egress flow rules",
846 .next = NEXT(NEXT_ENTRY
856 /* Sub-level commands. */
859 .help = "check whether a flow rule can be created",
860 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
861 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
866 .help = "create a flow rule",
867 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
868 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
873 .help = "destroy specific flow rules",
874 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
875 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
876 .call = parse_destroy,
880 .help = "destroy all flow rules",
881 .next = NEXT(NEXT_ENTRY(PORT_ID)),
882 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
887 .help = "query an existing flow rule",
888 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
890 NEXT_ENTRY(PORT_ID)),
891 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action),
892 ARGS_ENTRY(struct buffer, args.query.rule),
893 ARGS_ENTRY(struct buffer, port)),
898 .help = "list existing flow rules",
899 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
900 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
905 .help = "restrict ingress traffic to the defined flow rules",
906 .next = NEXT(NEXT_ENTRY(BOOLEAN),
907 NEXT_ENTRY(PORT_ID)),
908 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
909 ARGS_ENTRY(struct buffer, port)),
910 .call = parse_isolate,
912 /* Destroy arguments. */
915 .help = "specify a rule identifier",
916 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
917 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
918 .call = parse_destroy,
920 /* Query arguments. */
924 .help = "action to query, must be part of the rule",
925 .call = parse_action,
928 /* List arguments. */
931 .help = "specify a group",
932 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
933 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
936 /* Validate/create attributes. */
939 .help = "specify a group",
940 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
941 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
946 .help = "specify a priority level",
947 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
948 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
953 .help = "affect rule to ingress",
954 .next = NEXT(next_vc_attr),
959 .help = "affect rule to egress",
960 .next = NEXT(next_vc_attr),
963 /* Validate/create pattern. */
966 .help = "submit a list of pattern items",
967 .next = NEXT(next_item),
972 .help = "match value perfectly (with full bit-mask)",
973 .call = parse_vc_spec,
975 [ITEM_PARAM_SPEC] = {
977 .help = "match value according to configured bit-mask",
978 .call = parse_vc_spec,
980 [ITEM_PARAM_LAST] = {
982 .help = "specify upper bound to establish a range",
983 .call = parse_vc_spec,
985 [ITEM_PARAM_MASK] = {
987 .help = "specify bit-mask with relevant bits set to one",
988 .call = parse_vc_spec,
990 [ITEM_PARAM_PREFIX] = {
992 .help = "generate bit-mask from a prefix length",
993 .call = parse_vc_spec,
997 .help = "specify next pattern item",
998 .next = NEXT(next_item),
1002 .help = "end list of pattern items",
1003 .priv = PRIV_ITEM(END, 0),
1004 .next = NEXT(NEXT_ENTRY(ACTIONS)),
1009 .help = "no-op pattern item",
1010 .priv = PRIV_ITEM(VOID, 0),
1011 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1016 .help = "perform actions when pattern does not match",
1017 .priv = PRIV_ITEM(INVERT, 0),
1018 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1023 .help = "match any protocol for the current layer",
1024 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
1025 .next = NEXT(item_any),
1030 .help = "number of layers covered",
1031 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
1032 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
1036 .help = "match packets addressed to the physical function",
1037 .priv = PRIV_ITEM(PF, 0),
1038 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1043 .help = "match packets addressed to a virtual function ID",
1044 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
1045 .next = NEXT(item_vf),
1050 .help = "destination VF ID",
1051 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
1052 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
1056 .help = "device-specific physical port index to use",
1057 .priv = PRIV_ITEM(PORT, sizeof(struct rte_flow_item_port)),
1058 .next = NEXT(item_port),
1061 [ITEM_PORT_INDEX] = {
1063 .help = "physical port index",
1064 .next = NEXT(item_port, NEXT_ENTRY(UNSIGNED), item_param),
1065 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port, index)),
1069 .help = "match an arbitrary byte string",
1070 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1071 .next = NEXT(item_raw),
1074 [ITEM_RAW_RELATIVE] = {
1076 .help = "look for pattern after the previous item",
1077 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1078 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1081 [ITEM_RAW_SEARCH] = {
1083 .help = "search pattern from offset (see also limit)",
1084 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1085 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1088 [ITEM_RAW_OFFSET] = {
1090 .help = "absolute or relative offset for pattern",
1091 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1092 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1094 [ITEM_RAW_LIMIT] = {
1096 .help = "search area limit for start of pattern",
1097 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1098 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1100 [ITEM_RAW_PATTERN] = {
1102 .help = "byte string to look for",
1103 .next = NEXT(item_raw,
1105 NEXT_ENTRY(ITEM_PARAM_IS,
1108 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, pattern),
1109 ARGS_ENTRY(struct rte_flow_item_raw, length),
1110 ARGS_ENTRY_ARB(sizeof(struct rte_flow_item_raw),
1111 ITEM_RAW_PATTERN_SIZE)),
1115 .help = "match Ethernet header",
1116 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1117 .next = NEXT(item_eth),
1122 .help = "destination MAC",
1123 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1124 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1128 .help = "source MAC",
1129 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1130 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1134 .help = "EtherType",
1135 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1136 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1140 .help = "match 802.1Q/ad VLAN tag",
1141 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1142 .next = NEXT(item_vlan),
1147 .help = "tag control information",
1148 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1149 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1153 .help = "priority code point",
1154 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1155 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1160 .help = "drop eligible indicator",
1161 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1162 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1167 .help = "VLAN identifier",
1168 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1169 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1172 [ITEM_VLAN_INNER_TYPE] = {
1173 .name = "inner_type",
1174 .help = "inner EtherType",
1175 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1176 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan,
1181 .help = "match IPv4 header",
1182 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1183 .next = NEXT(item_ipv4),
1188 .help = "type of service",
1189 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1190 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1191 hdr.type_of_service)),
1195 .help = "time to live",
1196 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1197 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1200 [ITEM_IPV4_PROTO] = {
1202 .help = "next protocol ID",
1203 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1204 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1205 hdr.next_proto_id)),
1209 .help = "source address",
1210 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1211 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1216 .help = "destination address",
1217 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1218 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1223 .help = "match IPv6 header",
1224 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1225 .next = NEXT(item_ipv6),
1230 .help = "traffic class",
1231 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1232 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1234 "\x0f\xf0\x00\x00")),
1236 [ITEM_IPV6_FLOW] = {
1238 .help = "flow label",
1239 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1240 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1242 "\x00\x0f\xff\xff")),
1244 [ITEM_IPV6_PROTO] = {
1246 .help = "protocol (next header)",
1247 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1248 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1253 .help = "hop limit",
1254 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1255 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1260 .help = "source address",
1261 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1262 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1267 .help = "destination address",
1268 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1269 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1274 .help = "match ICMP header",
1275 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1276 .next = NEXT(item_icmp),
1279 [ITEM_ICMP_TYPE] = {
1281 .help = "ICMP packet type",
1282 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1283 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1286 [ITEM_ICMP_CODE] = {
1288 .help = "ICMP packet code",
1289 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1290 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1295 .help = "match UDP header",
1296 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1297 .next = NEXT(item_udp),
1302 .help = "UDP source port",
1303 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1304 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1309 .help = "UDP destination port",
1310 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1311 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1316 .help = "match TCP header",
1317 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1318 .next = NEXT(item_tcp),
1323 .help = "TCP source port",
1324 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1325 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1330 .help = "TCP destination port",
1331 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1332 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1335 [ITEM_TCP_FLAGS] = {
1337 .help = "TCP flags",
1338 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1339 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1344 .help = "match SCTP header",
1345 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1346 .next = NEXT(item_sctp),
1351 .help = "SCTP source port",
1352 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1353 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1358 .help = "SCTP destination port",
1359 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1360 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1365 .help = "validation tag",
1366 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1367 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1370 [ITEM_SCTP_CKSUM] = {
1373 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1374 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1379 .help = "match VXLAN header",
1380 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1381 .next = NEXT(item_vxlan),
1384 [ITEM_VXLAN_VNI] = {
1386 .help = "VXLAN identifier",
1387 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1388 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
1392 .help = "match E-Tag header",
1393 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
1394 .next = NEXT(item_e_tag),
1397 [ITEM_E_TAG_GRP_ECID_B] = {
1398 .name = "grp_ecid_b",
1399 .help = "GRP and E-CID base",
1400 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
1401 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
1407 .help = "match NVGRE header",
1408 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
1409 .next = NEXT(item_nvgre),
1412 [ITEM_NVGRE_TNI] = {
1414 .help = "virtual subnet ID",
1415 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
1416 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
1420 .help = "match MPLS header",
1421 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
1422 .next = NEXT(item_mpls),
1425 [ITEM_MPLS_LABEL] = {
1427 .help = "MPLS label",
1428 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
1429 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
1435 .help = "match GRE header",
1436 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
1437 .next = NEXT(item_gre),
1440 [ITEM_GRE_PROTO] = {
1442 .help = "GRE protocol type",
1443 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
1444 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
1449 .help = "fuzzy pattern match, expect faster than default",
1450 .priv = PRIV_ITEM(FUZZY,
1451 sizeof(struct rte_flow_item_fuzzy)),
1452 .next = NEXT(item_fuzzy),
1455 [ITEM_FUZZY_THRESH] = {
1457 .help = "match accuracy threshold",
1458 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
1459 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
1464 .help = "match GTP header",
1465 .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
1466 .next = NEXT(item_gtp),
1471 .help = "tunnel endpoint identifier",
1472 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
1473 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
1477 .help = "match GTP header",
1478 .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
1479 .next = NEXT(item_gtp),
1484 .help = "match GTP header",
1485 .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
1486 .next = NEXT(item_gtp),
1491 .help = "match GENEVE header",
1492 .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
1493 .next = NEXT(item_geneve),
1496 [ITEM_GENEVE_VNI] = {
1498 .help = "virtual network identifier",
1499 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1500 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)),
1502 [ITEM_GENEVE_PROTO] = {
1504 .help = "GENEVE protocol type",
1505 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1506 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve,
1510 /* Validate/create actions. */
1513 .help = "submit a list of associated actions",
1514 .next = NEXT(next_action),
1519 .help = "specify next action",
1520 .next = NEXT(next_action),
1524 .help = "end list of actions",
1525 .priv = PRIV_ACTION(END, 0),
1530 .help = "no-op action",
1531 .priv = PRIV_ACTION(VOID, 0),
1532 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1535 [ACTION_PASSTHRU] = {
1537 .help = "let subsequent rule process matched packets",
1538 .priv = PRIV_ACTION(PASSTHRU, 0),
1539 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1544 .help = "attach 32 bit value to packets",
1545 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
1546 .next = NEXT(action_mark),
1549 [ACTION_MARK_ID] = {
1551 .help = "32 bit value to return with packets",
1552 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
1553 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
1554 .call = parse_vc_conf,
1558 .help = "flag packets",
1559 .priv = PRIV_ACTION(FLAG, 0),
1560 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1565 .help = "assign packets to a given queue index",
1566 .priv = PRIV_ACTION(QUEUE,
1567 sizeof(struct rte_flow_action_queue)),
1568 .next = NEXT(action_queue),
1571 [ACTION_QUEUE_INDEX] = {
1573 .help = "queue index to use",
1574 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
1575 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
1576 .call = parse_vc_conf,
1580 .help = "drop packets (note: passthru has priority)",
1581 .priv = PRIV_ACTION(DROP, 0),
1582 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1587 .help = "enable counters for this rule",
1588 .priv = PRIV_ACTION(COUNT, 0),
1589 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1594 .help = "spread packets among several queues",
1595 .priv = PRIV_ACTION(RSS, sizeof(struct action_rss_data)),
1596 .next = NEXT(action_rss),
1597 .call = parse_vc_action_rss,
1599 [ACTION_RSS_FUNC] = {
1601 .help = "RSS hash function to apply",
1602 .next = NEXT(action_rss,
1603 NEXT_ENTRY(ACTION_RSS_FUNC_DEFAULT,
1604 ACTION_RSS_FUNC_TOEPLITZ,
1605 ACTION_RSS_FUNC_SIMPLE_XOR)),
1607 [ACTION_RSS_FUNC_DEFAULT] = {
1609 .help = "default hash function",
1610 .call = parse_vc_action_rss_func,
1612 [ACTION_RSS_FUNC_TOEPLITZ] = {
1614 .help = "Toeplitz hash function",
1615 .call = parse_vc_action_rss_func,
1617 [ACTION_RSS_FUNC_SIMPLE_XOR] = {
1618 .name = "simple_xor",
1619 .help = "simple XOR hash function",
1620 .call = parse_vc_action_rss_func,
1622 [ACTION_RSS_LEVEL] = {
1624 .help = "encapsulation level for \"types\"",
1625 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
1626 .args = ARGS(ARGS_ENTRY_ARB
1627 (offsetof(struct action_rss_data, conf) +
1628 offsetof(struct rte_flow_action_rss, level),
1629 sizeof(((struct rte_flow_action_rss *)0)->
1632 [ACTION_RSS_TYPES] = {
1634 .help = "specific RSS hash types",
1635 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)),
1637 [ACTION_RSS_TYPE] = {
1639 .help = "RSS hash type",
1640 .call = parse_vc_action_rss_type,
1641 .comp = comp_vc_action_rss_type,
1643 [ACTION_RSS_KEY] = {
1645 .help = "RSS hash key",
1646 .next = NEXT(action_rss, NEXT_ENTRY(STRING)),
1647 .args = ARGS(ARGS_ENTRY_ARB(0, 0),
1649 (offsetof(struct action_rss_data, conf) +
1650 offsetof(struct rte_flow_action_rss, key_len),
1651 sizeof(((struct rte_flow_action_rss *)0)->
1653 ARGS_ENTRY(struct action_rss_data, key)),
1655 [ACTION_RSS_KEY_LEN] = {
1657 .help = "RSS hash key length in bytes",
1658 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
1659 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
1660 (offsetof(struct action_rss_data, conf) +
1661 offsetof(struct rte_flow_action_rss, key_len),
1662 sizeof(((struct rte_flow_action_rss *)0)->
1665 RSS_HASH_KEY_LENGTH)),
1667 [ACTION_RSS_QUEUES] = {
1669 .help = "queue indices to use",
1670 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
1671 .call = parse_vc_conf,
1673 [ACTION_RSS_QUEUE] = {
1675 .help = "queue index",
1676 .call = parse_vc_action_rss_queue,
1677 .comp = comp_vc_action_rss_queue,
1681 .help = "redirect packets to physical device function",
1682 .priv = PRIV_ACTION(PF, 0),
1683 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1688 .help = "redirect packets to virtual device function",
1689 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
1690 .next = NEXT(action_vf),
1693 [ACTION_VF_ORIGINAL] = {
1695 .help = "use original VF ID if possible",
1696 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
1697 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
1699 .call = parse_vc_conf,
1703 .help = "VF ID to redirect packets to",
1704 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
1705 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
1706 .call = parse_vc_conf,
1710 .help = "meter the directed packets at given id",
1711 .priv = PRIV_ACTION(METER,
1712 sizeof(struct rte_flow_action_meter)),
1713 .next = NEXT(action_meter),
1716 [ACTION_METER_ID] = {
1718 .help = "meter id to use",
1719 .next = NEXT(action_meter, NEXT_ENTRY(UNSIGNED)),
1720 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
1721 .call = parse_vc_conf,
1725 /** Remove and return last entry from argument stack. */
1726 static const struct arg *
1727 pop_args(struct context *ctx)
1729 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
1732 /** Add entry on top of the argument stack. */
1734 push_args(struct context *ctx, const struct arg *arg)
1736 if (ctx->args_num == CTX_STACK_SIZE)
1738 ctx->args[ctx->args_num++] = arg;
1742 /** Spread value into buffer according to bit-mask. */
1744 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
1746 uint32_t i = arg->size;
1754 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1763 unsigned int shift = 0;
1764 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
1766 for (shift = 0; arg->mask[i] >> shift; ++shift) {
1767 if (!(arg->mask[i] & (1 << shift)))
1772 *buf &= ~(1 << shift);
1773 *buf |= (val & 1) << shift;
1781 /** Compare a string with a partial one of a given length. */
1783 strcmp_partial(const char *full, const char *partial, size_t partial_len)
1785 int r = strncmp(full, partial, partial_len);
1789 if (strlen(full) <= partial_len)
1791 return full[partial_len];
1795 * Parse a prefix length and generate a bit-mask.
1797 * Last argument (ctx->args) is retrieved to determine mask size, storage
1798 * location and whether the result must use network byte ordering.
1801 parse_prefix(struct context *ctx, const struct token *token,
1802 const char *str, unsigned int len,
1803 void *buf, unsigned int size)
1805 const struct arg *arg = pop_args(ctx);
1806 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
1813 /* Argument is expected. */
1817 u = strtoumax(str, &end, 0);
1818 if (errno || (size_t)(end - str) != len)
1823 extra = arg_entry_bf_fill(NULL, 0, arg);
1832 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
1833 !arg_entry_bf_fill(ctx->objmask, -1, arg))
1840 if (bytes > size || bytes + !!extra > size)
1844 buf = (uint8_t *)ctx->object + arg->offset;
1845 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1847 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
1848 memset(buf, 0x00, size - bytes);
1850 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
1854 memset(buf, 0xff, bytes);
1855 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
1857 ((uint8_t *)buf)[bytes] = conv[extra];
1860 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
1863 push_args(ctx, arg);
1867 /** Default parsing function for token name matching. */
1869 parse_default(struct context *ctx, const struct token *token,
1870 const char *str, unsigned int len,
1871 void *buf, unsigned int size)
1876 if (strcmp_partial(token->name, str, len))
1881 /** Parse flow command, initialize output buffer for subsequent tokens. */
1883 parse_init(struct context *ctx, const struct token *token,
1884 const char *str, unsigned int len,
1885 void *buf, unsigned int size)
1887 struct buffer *out = buf;
1889 /* Token name must match. */
1890 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1892 /* Nothing else to do if there is no buffer. */
1895 /* Make sure buffer is large enough. */
1896 if (size < sizeof(*out))
1898 /* Initialize buffer. */
1899 memset(out, 0x00, sizeof(*out));
1900 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
1903 ctx->objmask = NULL;
1907 /** Parse tokens for validate/create commands. */
1909 parse_vc(struct context *ctx, const struct token *token,
1910 const char *str, unsigned int len,
1911 void *buf, unsigned int size)
1913 struct buffer *out = buf;
1917 /* Token name must match. */
1918 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1920 /* Nothing else to do if there is no buffer. */
1923 if (!out->command) {
1924 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
1926 if (sizeof(*out) > size)
1928 out->command = ctx->curr;
1931 ctx->objmask = NULL;
1932 out->args.vc.data = (uint8_t *)out + size;
1936 ctx->object = &out->args.vc.attr;
1937 ctx->objmask = NULL;
1938 switch (ctx->curr) {
1943 out->args.vc.attr.ingress = 1;
1946 out->args.vc.attr.egress = 1;
1949 out->args.vc.pattern =
1950 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1952 ctx->object = out->args.vc.pattern;
1953 ctx->objmask = NULL;
1956 out->args.vc.actions =
1957 (void *)RTE_ALIGN_CEIL((uintptr_t)
1958 (out->args.vc.pattern +
1959 out->args.vc.pattern_n),
1961 ctx->object = out->args.vc.actions;
1962 ctx->objmask = NULL;
1969 if (!out->args.vc.actions) {
1970 const struct parse_item_priv *priv = token->priv;
1971 struct rte_flow_item *item =
1972 out->args.vc.pattern + out->args.vc.pattern_n;
1974 data_size = priv->size * 3; /* spec, last, mask */
1975 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1976 (out->args.vc.data - data_size),
1978 if ((uint8_t *)item + sizeof(*item) > data)
1980 *item = (struct rte_flow_item){
1983 ++out->args.vc.pattern_n;
1985 ctx->objmask = NULL;
1987 const struct parse_action_priv *priv = token->priv;
1988 struct rte_flow_action *action =
1989 out->args.vc.actions + out->args.vc.actions_n;
1991 data_size = priv->size; /* configuration */
1992 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1993 (out->args.vc.data - data_size),
1995 if ((uint8_t *)action + sizeof(*action) > data)
1997 *action = (struct rte_flow_action){
1999 .conf = data_size ? data : NULL,
2001 ++out->args.vc.actions_n;
2002 ctx->object = action;
2003 ctx->objmask = NULL;
2005 memset(data, 0, data_size);
2006 out->args.vc.data = data;
2007 ctx->objdata = data_size;
2011 /** Parse pattern item parameter type. */
2013 parse_vc_spec(struct context *ctx, const struct token *token,
2014 const char *str, unsigned int len,
2015 void *buf, unsigned int size)
2017 struct buffer *out = buf;
2018 struct rte_flow_item *item;
2024 /* Token name must match. */
2025 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2027 /* Parse parameter types. */
2028 switch (ctx->curr) {
2029 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
2035 case ITEM_PARAM_SPEC:
2038 case ITEM_PARAM_LAST:
2041 case ITEM_PARAM_PREFIX:
2042 /* Modify next token to expect a prefix. */
2043 if (ctx->next_num < 2)
2045 ctx->next[ctx->next_num - 2] = prefix;
2047 case ITEM_PARAM_MASK:
2053 /* Nothing else to do if there is no buffer. */
2056 if (!out->args.vc.pattern_n)
2058 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
2059 data_size = ctx->objdata / 3; /* spec, last, mask */
2060 /* Point to selected object. */
2061 ctx->object = out->args.vc.data + (data_size * index);
2063 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
2064 item->mask = ctx->objmask;
2066 ctx->objmask = NULL;
2067 /* Update relevant item pointer. */
2068 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
2073 /** Parse action configuration field. */
2075 parse_vc_conf(struct context *ctx, const struct token *token,
2076 const char *str, unsigned int len,
2077 void *buf, unsigned int size)
2079 struct buffer *out = buf;
2082 /* Token name must match. */
2083 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2085 /* Nothing else to do if there is no buffer. */
2088 /* Point to selected object. */
2089 ctx->object = out->args.vc.data;
2090 ctx->objmask = NULL;
2094 /** Parse RSS action. */
2096 parse_vc_action_rss(struct context *ctx, const struct token *token,
2097 const char *str, unsigned int len,
2098 void *buf, unsigned int size)
2100 struct buffer *out = buf;
2101 struct rte_flow_action *action;
2102 struct action_rss_data *action_rss_data;
2106 ret = parse_vc(ctx, token, str, len, buf, size);
2109 /* Nothing else to do if there is no buffer. */
2112 if (!out->args.vc.actions_n)
2114 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
2115 /* Point to selected object. */
2116 ctx->object = out->args.vc.data;
2117 ctx->objmask = NULL;
2118 /* Set up default configuration. */
2119 action_rss_data = ctx->object;
2120 *action_rss_data = (struct action_rss_data){
2121 .conf = (struct rte_flow_action_rss){
2122 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
2125 .key_len = sizeof(action_rss_data->key),
2126 .queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
2127 .key = action_rss_data->key,
2128 .queue = action_rss_data->queue,
2130 .key = "testpmd's default RSS hash key",
2133 for (i = 0; i < action_rss_data->conf.queue_num; ++i)
2134 action_rss_data->queue[i] = i;
2135 if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
2136 ctx->port != (portid_t)RTE_PORT_ALL) {
2137 struct rte_eth_dev_info info;
2139 rte_eth_dev_info_get(ctx->port, &info);
2140 action_rss_data->conf.key_len =
2141 RTE_MIN(sizeof(action_rss_data->key),
2142 info.hash_key_size);
2144 action->conf = &action_rss_data->conf;
2149 * Parse func field for RSS action.
2151 * The RTE_ETH_HASH_FUNCTION_* value to assign is derived from the
2152 * ACTION_RSS_FUNC_* index that called this function.
2155 parse_vc_action_rss_func(struct context *ctx, const struct token *token,
2156 const char *str, unsigned int len,
2157 void *buf, unsigned int size)
2159 struct action_rss_data *action_rss_data;
2160 enum rte_eth_hash_function func;
2164 /* Token name must match. */
2165 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2167 switch (ctx->curr) {
2168 case ACTION_RSS_FUNC_DEFAULT:
2169 func = RTE_ETH_HASH_FUNCTION_DEFAULT;
2171 case ACTION_RSS_FUNC_TOEPLITZ:
2172 func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
2174 case ACTION_RSS_FUNC_SIMPLE_XOR:
2175 func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
2182 action_rss_data = ctx->object;
2183 action_rss_data->conf.func = func;
2188 * Parse type field for RSS action.
2190 * Valid tokens are type field names and the "end" token.
2193 parse_vc_action_rss_type(struct context *ctx, const struct token *token,
2194 const char *str, unsigned int len,
2195 void *buf, unsigned int size)
2197 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_TYPE);
2198 struct action_rss_data *action_rss_data;
2204 if (ctx->curr != ACTION_RSS_TYPE)
2206 if (!(ctx->objdata >> 16) && ctx->object) {
2207 action_rss_data = ctx->object;
2208 action_rss_data->conf.types = 0;
2210 if (!strcmp_partial("end", str, len)) {
2211 ctx->objdata &= 0xffff;
2214 for (i = 0; rss_type_table[i].str; ++i)
2215 if (!strcmp_partial(rss_type_table[i].str, str, len))
2217 if (!rss_type_table[i].str)
2219 ctx->objdata = 1 << 16 | (ctx->objdata & 0xffff);
2221 if (ctx->next_num == RTE_DIM(ctx->next))
2223 ctx->next[ctx->next_num++] = next;
2226 action_rss_data = ctx->object;
2227 action_rss_data->conf.types |= rss_type_table[i].rss_type;
2232 * Parse queue field for RSS action.
2234 * Valid tokens are queue indices and the "end" token.
2237 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
2238 const char *str, unsigned int len,
2239 void *buf, unsigned int size)
2241 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
2242 struct action_rss_data *action_rss_data;
2249 if (ctx->curr != ACTION_RSS_QUEUE)
2251 i = ctx->objdata >> 16;
2252 if (!strcmp_partial("end", str, len)) {
2253 ctx->objdata &= 0xffff;
2256 if (i >= ACTION_RSS_QUEUE_NUM)
2259 ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) +
2260 i * sizeof(action_rss_data->queue[i]),
2261 sizeof(action_rss_data->queue[i]))))
2263 ret = parse_int(ctx, token, str, len, NULL, 0);
2269 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
2271 if (ctx->next_num == RTE_DIM(ctx->next))
2273 ctx->next[ctx->next_num++] = next;
2276 action_rss_data = ctx->object;
2277 action_rss_data->conf.queue_num = i;
2278 action_rss_data->conf.queue = i ? action_rss_data->queue : NULL;
2282 /** Parse tokens for destroy command. */
2284 parse_destroy(struct context *ctx, const struct token *token,
2285 const char *str, unsigned int len,
2286 void *buf, unsigned int size)
2288 struct buffer *out = buf;
2290 /* Token name must match. */
2291 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2293 /* Nothing else to do if there is no buffer. */
2296 if (!out->command) {
2297 if (ctx->curr != DESTROY)
2299 if (sizeof(*out) > size)
2301 out->command = ctx->curr;
2304 ctx->objmask = NULL;
2305 out->args.destroy.rule =
2306 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2310 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
2311 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
2314 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
2315 ctx->objmask = NULL;
2319 /** Parse tokens for flush command. */
2321 parse_flush(struct context *ctx, const struct token *token,
2322 const char *str, unsigned int len,
2323 void *buf, unsigned int size)
2325 struct buffer *out = buf;
2327 /* Token name must match. */
2328 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2330 /* Nothing else to do if there is no buffer. */
2333 if (!out->command) {
2334 if (ctx->curr != FLUSH)
2336 if (sizeof(*out) > size)
2338 out->command = ctx->curr;
2341 ctx->objmask = NULL;
2346 /** Parse tokens for query command. */
2348 parse_query(struct context *ctx, const struct token *token,
2349 const char *str, unsigned int len,
2350 void *buf, unsigned int size)
2352 struct buffer *out = buf;
2354 /* Token name must match. */
2355 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2357 /* Nothing else to do if there is no buffer. */
2360 if (!out->command) {
2361 if (ctx->curr != QUERY)
2363 if (sizeof(*out) > size)
2365 out->command = ctx->curr;
2368 ctx->objmask = NULL;
2373 /** Parse action names. */
2375 parse_action(struct context *ctx, const struct token *token,
2376 const char *str, unsigned int len,
2377 void *buf, unsigned int size)
2379 struct buffer *out = buf;
2380 const struct arg *arg = pop_args(ctx);
2384 /* Argument is expected. */
2387 /* Parse action name. */
2388 for (i = 0; next_action[i]; ++i) {
2389 const struct parse_action_priv *priv;
2391 token = &token_list[next_action[i]];
2392 if (strcmp_partial(token->name, str, len))
2398 memcpy((uint8_t *)ctx->object + arg->offset,
2404 push_args(ctx, arg);
2408 /** Parse tokens for list command. */
2410 parse_list(struct context *ctx, const struct token *token,
2411 const char *str, unsigned int len,
2412 void *buf, unsigned int size)
2414 struct buffer *out = buf;
2416 /* Token name must match. */
2417 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2419 /* Nothing else to do if there is no buffer. */
2422 if (!out->command) {
2423 if (ctx->curr != LIST)
2425 if (sizeof(*out) > size)
2427 out->command = ctx->curr;
2430 ctx->objmask = NULL;
2431 out->args.list.group =
2432 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2436 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
2437 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
2440 ctx->object = out->args.list.group + out->args.list.group_n++;
2441 ctx->objmask = NULL;
2445 /** Parse tokens for isolate command. */
2447 parse_isolate(struct context *ctx, const struct token *token,
2448 const char *str, unsigned int len,
2449 void *buf, unsigned int size)
2451 struct buffer *out = buf;
2453 /* Token name must match. */
2454 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2456 /* Nothing else to do if there is no buffer. */
2459 if (!out->command) {
2460 if (ctx->curr != ISOLATE)
2462 if (sizeof(*out) > size)
2464 out->command = ctx->curr;
2467 ctx->objmask = NULL;
2473 * Parse signed/unsigned integers 8 to 64-bit long.
2475 * Last argument (ctx->args) is retrieved to determine integer type and
2479 parse_int(struct context *ctx, const struct token *token,
2480 const char *str, unsigned int len,
2481 void *buf, unsigned int size)
2483 const struct arg *arg = pop_args(ctx);
2488 /* Argument is expected. */
2493 (uintmax_t)strtoimax(str, &end, 0) :
2494 strtoumax(str, &end, 0);
2495 if (errno || (size_t)(end - str) != len)
2498 ((arg->sign && ((intmax_t)u < (intmax_t)arg->min ||
2499 (intmax_t)u > (intmax_t)arg->max)) ||
2500 (!arg->sign && (u < arg->min || u > arg->max))))
2505 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
2506 !arg_entry_bf_fill(ctx->objmask, -1, arg))
2510 buf = (uint8_t *)ctx->object + arg->offset;
2514 case sizeof(uint8_t):
2515 *(uint8_t *)buf = u;
2517 case sizeof(uint16_t):
2518 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
2520 case sizeof(uint8_t [3]):
2521 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2523 ((uint8_t *)buf)[0] = u;
2524 ((uint8_t *)buf)[1] = u >> 8;
2525 ((uint8_t *)buf)[2] = u >> 16;
2529 ((uint8_t *)buf)[0] = u >> 16;
2530 ((uint8_t *)buf)[1] = u >> 8;
2531 ((uint8_t *)buf)[2] = u;
2533 case sizeof(uint32_t):
2534 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
2536 case sizeof(uint64_t):
2537 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
2542 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
2544 buf = (uint8_t *)ctx->objmask + arg->offset;
2549 push_args(ctx, arg);
2556 * Three arguments (ctx->args) are retrieved from the stack to store data,
2557 * its actual length and address (in that order).
2560 parse_string(struct context *ctx, const struct token *token,
2561 const char *str, unsigned int len,
2562 void *buf, unsigned int size)
2564 const struct arg *arg_data = pop_args(ctx);
2565 const struct arg *arg_len = pop_args(ctx);
2566 const struct arg *arg_addr = pop_args(ctx);
2567 char tmp[16]; /* Ought to be enough. */
2570 /* Arguments are expected. */
2574 push_args(ctx, arg_data);
2578 push_args(ctx, arg_len);
2579 push_args(ctx, arg_data);
2582 size = arg_data->size;
2583 /* Bit-mask fill is not supported. */
2584 if (arg_data->mask || size < len)
2588 /* Let parse_int() fill length information first. */
2589 ret = snprintf(tmp, sizeof(tmp), "%u", len);
2592 push_args(ctx, arg_len);
2593 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
2598 buf = (uint8_t *)ctx->object + arg_data->offset;
2599 /* Output buffer is not necessarily NUL-terminated. */
2600 memcpy(buf, str, len);
2601 memset((uint8_t *)buf + len, 0x00, size - len);
2603 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
2604 /* Save address if requested. */
2605 if (arg_addr->size) {
2606 memcpy((uint8_t *)ctx->object + arg_addr->offset,
2608 (uint8_t *)ctx->object + arg_data->offset
2612 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
2614 (uint8_t *)ctx->objmask + arg_data->offset
2620 push_args(ctx, arg_addr);
2621 push_args(ctx, arg_len);
2622 push_args(ctx, arg_data);
2627 * Parse a MAC address.
2629 * Last argument (ctx->args) is retrieved to determine storage size and
2633 parse_mac_addr(struct context *ctx, const struct token *token,
2634 const char *str, unsigned int len,
2635 void *buf, unsigned int size)
2637 const struct arg *arg = pop_args(ctx);
2638 struct ether_addr tmp;
2642 /* Argument is expected. */
2646 /* Bit-mask fill is not supported. */
2647 if (arg->mask || size != sizeof(tmp))
2649 /* Only network endian is supported. */
2652 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
2653 if (ret < 0 || (unsigned int)ret != len)
2657 buf = (uint8_t *)ctx->object + arg->offset;
2658 memcpy(buf, &tmp, size);
2660 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2663 push_args(ctx, arg);
2668 * Parse an IPv4 address.
2670 * Last argument (ctx->args) is retrieved to determine storage size and
2674 parse_ipv4_addr(struct context *ctx, const struct token *token,
2675 const char *str, unsigned int len,
2676 void *buf, unsigned int size)
2678 const struct arg *arg = pop_args(ctx);
2683 /* Argument is expected. */
2687 /* Bit-mask fill is not supported. */
2688 if (arg->mask || size != sizeof(tmp))
2690 /* Only network endian is supported. */
2693 memcpy(str2, str, len);
2695 ret = inet_pton(AF_INET, str2, &tmp);
2697 /* Attempt integer parsing. */
2698 push_args(ctx, arg);
2699 return parse_int(ctx, token, str, len, buf, size);
2703 buf = (uint8_t *)ctx->object + arg->offset;
2704 memcpy(buf, &tmp, size);
2706 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2709 push_args(ctx, arg);
2714 * Parse an IPv6 address.
2716 * Last argument (ctx->args) is retrieved to determine storage size and
2720 parse_ipv6_addr(struct context *ctx, const struct token *token,
2721 const char *str, unsigned int len,
2722 void *buf, unsigned int size)
2724 const struct arg *arg = pop_args(ctx);
2726 struct in6_addr tmp;
2730 /* Argument is expected. */
2734 /* Bit-mask fill is not supported. */
2735 if (arg->mask || size != sizeof(tmp))
2737 /* Only network endian is supported. */
2740 memcpy(str2, str, len);
2742 ret = inet_pton(AF_INET6, str2, &tmp);
2747 buf = (uint8_t *)ctx->object + arg->offset;
2748 memcpy(buf, &tmp, size);
2750 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2753 push_args(ctx, arg);
2757 /** Boolean values (even indices stand for false). */
2758 static const char *const boolean_name[] = {
2768 * Parse a boolean value.
2770 * Last argument (ctx->args) is retrieved to determine storage size and
2774 parse_boolean(struct context *ctx, const struct token *token,
2775 const char *str, unsigned int len,
2776 void *buf, unsigned int size)
2778 const struct arg *arg = pop_args(ctx);
2782 /* Argument is expected. */
2785 for (i = 0; boolean_name[i]; ++i)
2786 if (!strcmp_partial(boolean_name[i], str, len))
2788 /* Process token as integer. */
2789 if (boolean_name[i])
2790 str = i & 1 ? "1" : "0";
2791 push_args(ctx, arg);
2792 ret = parse_int(ctx, token, str, strlen(str), buf, size);
2793 return ret > 0 ? (int)len : ret;
2796 /** Parse port and update context. */
2798 parse_port(struct context *ctx, const struct token *token,
2799 const char *str, unsigned int len,
2800 void *buf, unsigned int size)
2802 struct buffer *out = &(struct buffer){ .port = 0 };
2810 ctx->objmask = NULL;
2811 size = sizeof(*out);
2813 ret = parse_int(ctx, token, str, len, out, size);
2815 ctx->port = out->port;
2821 /** No completion. */
2823 comp_none(struct context *ctx, const struct token *token,
2824 unsigned int ent, char *buf, unsigned int size)
2834 /** Complete boolean values. */
2836 comp_boolean(struct context *ctx, const struct token *token,
2837 unsigned int ent, char *buf, unsigned int size)
2843 for (i = 0; boolean_name[i]; ++i)
2844 if (buf && i == ent)
2845 return snprintf(buf, size, "%s", boolean_name[i]);
2851 /** Complete action names. */
2853 comp_action(struct context *ctx, const struct token *token,
2854 unsigned int ent, char *buf, unsigned int size)
2860 for (i = 0; next_action[i]; ++i)
2861 if (buf && i == ent)
2862 return snprintf(buf, size, "%s",
2863 token_list[next_action[i]].name);
2869 /** Complete available ports. */
2871 comp_port(struct context *ctx, const struct token *token,
2872 unsigned int ent, char *buf, unsigned int size)
2879 RTE_ETH_FOREACH_DEV(p) {
2880 if (buf && i == ent)
2881 return snprintf(buf, size, "%u", p);
2889 /** Complete available rule IDs. */
2891 comp_rule_id(struct context *ctx, const struct token *token,
2892 unsigned int ent, char *buf, unsigned int size)
2895 struct rte_port *port;
2896 struct port_flow *pf;
2899 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
2900 ctx->port == (portid_t)RTE_PORT_ALL)
2902 port = &ports[ctx->port];
2903 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
2904 if (buf && i == ent)
2905 return snprintf(buf, size, "%u", pf->id);
2913 /** Complete type field for RSS action. */
2915 comp_vc_action_rss_type(struct context *ctx, const struct token *token,
2916 unsigned int ent, char *buf, unsigned int size)
2922 for (i = 0; rss_type_table[i].str; ++i)
2927 return snprintf(buf, size, "%s", rss_type_table[ent].str);
2929 return snprintf(buf, size, "end");
2933 /** Complete queue field for RSS action. */
2935 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
2936 unsigned int ent, char *buf, unsigned int size)
2943 return snprintf(buf, size, "%u", ent);
2945 return snprintf(buf, size, "end");
2949 /** Internal context. */
2950 static struct context cmd_flow_context;
2952 /** Global parser instance (cmdline API). */
2953 cmdline_parse_inst_t cmd_flow;
2955 /** Initialize context. */
2957 cmd_flow_context_init(struct context *ctx)
2959 /* A full memset() is not necessary. */
2969 ctx->objmask = NULL;
2972 /** Parse a token (cmdline API). */
2974 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
2977 struct context *ctx = &cmd_flow_context;
2978 const struct token *token;
2979 const enum index *list;
2984 token = &token_list[ctx->curr];
2985 /* Check argument length. */
2988 for (len = 0; src[len]; ++len)
2989 if (src[len] == '#' || isspace(src[len]))
2993 /* Last argument and EOL detection. */
2994 for (i = len; src[i]; ++i)
2995 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
2997 else if (!isspace(src[i])) {
3002 if (src[i] == '\r' || src[i] == '\n') {
3006 /* Initialize context if necessary. */
3007 if (!ctx->next_num) {
3010 ctx->next[ctx->next_num++] = token->next[0];
3012 /* Process argument through candidates. */
3013 ctx->prev = ctx->curr;
3014 list = ctx->next[ctx->next_num - 1];
3015 for (i = 0; list[i]; ++i) {
3016 const struct token *next = &token_list[list[i]];
3019 ctx->curr = list[i];
3021 tmp = next->call(ctx, next, src, len, result, size);
3023 tmp = parse_default(ctx, next, src, len, result, size);
3024 if (tmp == -1 || tmp != len)
3032 /* Push subsequent tokens if any. */
3034 for (i = 0; token->next[i]; ++i) {
3035 if (ctx->next_num == RTE_DIM(ctx->next))
3037 ctx->next[ctx->next_num++] = token->next[i];
3039 /* Push arguments if any. */
3041 for (i = 0; token->args[i]; ++i) {
3042 if (ctx->args_num == RTE_DIM(ctx->args))
3044 ctx->args[ctx->args_num++] = token->args[i];
3049 /** Return number of completion entries (cmdline API). */
3051 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
3053 struct context *ctx = &cmd_flow_context;
3054 const struct token *token = &token_list[ctx->curr];
3055 const enum index *list;
3059 /* Count number of tokens in current list. */
3061 list = ctx->next[ctx->next_num - 1];
3063 list = token->next[0];
3064 for (i = 0; list[i]; ++i)
3069 * If there is a single token, use its completion callback, otherwise
3070 * return the number of entries.
3072 token = &token_list[list[0]];
3073 if (i == 1 && token->comp) {
3074 /* Save index for cmd_flow_get_help(). */
3075 ctx->prev = list[0];
3076 return token->comp(ctx, token, 0, NULL, 0);
3081 /** Return a completion entry (cmdline API). */
3083 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
3084 char *dst, unsigned int size)
3086 struct context *ctx = &cmd_flow_context;
3087 const struct token *token = &token_list[ctx->curr];
3088 const enum index *list;
3092 /* Count number of tokens in current list. */
3094 list = ctx->next[ctx->next_num - 1];
3096 list = token->next[0];
3097 for (i = 0; list[i]; ++i)
3101 /* If there is a single token, use its completion callback. */
3102 token = &token_list[list[0]];
3103 if (i == 1 && token->comp) {
3104 /* Save index for cmd_flow_get_help(). */
3105 ctx->prev = list[0];
3106 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
3108 /* Otherwise make sure the index is valid and use defaults. */
3111 token = &token_list[list[index]];
3112 snprintf(dst, size, "%s", token->name);
3113 /* Save index for cmd_flow_get_help(). */
3114 ctx->prev = list[index];
3118 /** Populate help strings for current token (cmdline API). */
3120 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
3122 struct context *ctx = &cmd_flow_context;
3123 const struct token *token = &token_list[ctx->prev];
3128 /* Set token type and update global help with details. */
3129 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
3131 cmd_flow.help_str = token->help;
3133 cmd_flow.help_str = token->name;
3137 /** Token definition template (cmdline API). */
3138 static struct cmdline_token_hdr cmd_flow_token_hdr = {
3139 .ops = &(struct cmdline_token_ops){
3140 .parse = cmd_flow_parse,
3141 .complete_get_nb = cmd_flow_complete_get_nb,
3142 .complete_get_elt = cmd_flow_complete_get_elt,
3143 .get_help = cmd_flow_get_help,
3148 /** Populate the next dynamic token. */
3150 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
3151 cmdline_parse_token_hdr_t **hdr_inst)
3153 struct context *ctx = &cmd_flow_context;
3155 /* Always reinitialize context before requesting the first token. */
3156 if (!(hdr_inst - cmd_flow.tokens))
3157 cmd_flow_context_init(ctx);
3158 /* Return NULL when no more tokens are expected. */
3159 if (!ctx->next_num && ctx->curr) {
3163 /* Determine if command should end here. */
3164 if (ctx->eol && ctx->last && ctx->next_num) {
3165 const enum index *list = ctx->next[ctx->next_num - 1];
3168 for (i = 0; list[i]; ++i) {
3175 *hdr = &cmd_flow_token_hdr;
3178 /** Dispatch parsed buffer to function calls. */
3180 cmd_flow_parsed(const struct buffer *in)
3182 switch (in->command) {
3184 port_flow_validate(in->port, &in->args.vc.attr,
3185 in->args.vc.pattern, in->args.vc.actions);
3188 port_flow_create(in->port, &in->args.vc.attr,
3189 in->args.vc.pattern, in->args.vc.actions);
3192 port_flow_destroy(in->port, in->args.destroy.rule_n,
3193 in->args.destroy.rule);
3196 port_flow_flush(in->port);
3199 port_flow_query(in->port, in->args.query.rule,
3200 in->args.query.action);
3203 port_flow_list(in->port, in->args.list.group_n,
3204 in->args.list.group);
3207 port_flow_isolate(in->port, in->args.isolate.set);
3214 /** Token generator and output processing callback (cmdline API). */
3216 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
3219 cmd_flow_tok(arg0, arg2);
3221 cmd_flow_parsed(arg0);
3224 /** Global parser instance (cmdline API). */
3225 cmdline_parse_inst_t cmd_flow = {
3227 .data = NULL, /**< Unused. */
3228 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
3231 }, /**< Tokens are returned by cmd_flow_tok(). */