1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
13 #include <arpa/inet.h>
14 #include <sys/socket.h>
16 #include <rte_common.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_ethdev.h>
19 #include <rte_byteorder.h>
20 #include <cmdline_parse.h>
21 #include <cmdline_parse_etheraddr.h>
26 /** Parser token indices. */
46 /* Top-level command. */
49 /* Sub-level commands. */
58 /* Destroy arguments. */
61 /* Query arguments. */
67 /* Validate/create arguments. */
74 /* Validate/create pattern. */
107 ITEM_VLAN_INNER_TYPE,
139 ITEM_E_TAG_GRP_ECID_B,
156 /* Validate/create actions. */
172 ACTION_RSS_FUNC_DEFAULT,
173 ACTION_RSS_FUNC_TOEPLITZ,
174 ACTION_RSS_FUNC_SIMPLE_XOR,
189 /** Maximum size for pattern in struct rte_flow_item_raw. */
190 #define ITEM_RAW_PATTERN_SIZE 40
192 /** Storage size for struct rte_flow_item_raw including pattern. */
193 #define ITEM_RAW_SIZE \
194 (sizeof(struct rte_flow_item_raw) + ITEM_RAW_PATTERN_SIZE)
196 /** Maximum number of queue indices in struct rte_flow_action_rss. */
197 #define ACTION_RSS_QUEUE_NUM 32
199 /** Storage for struct rte_flow_action_rss including external data. */
200 struct action_rss_data {
201 struct rte_flow_action_rss conf;
202 uint8_t key[RSS_HASH_KEY_LENGTH];
203 uint16_t queue[ACTION_RSS_QUEUE_NUM];
206 /** Maximum number of subsequent tokens and arguments on the stack. */
207 #define CTX_STACK_SIZE 16
209 /** Parser context. */
211 /** Stack of subsequent token lists to process. */
212 const enum index *next[CTX_STACK_SIZE];
213 /** Arguments for stacked tokens. */
214 const void *args[CTX_STACK_SIZE];
215 enum index curr; /**< Current token index. */
216 enum index prev; /**< Index of the last token seen. */
217 int next_num; /**< Number of entries in next[]. */
218 int args_num; /**< Number of entries in args[]. */
219 uint32_t eol:1; /**< EOL has been detected. */
220 uint32_t last:1; /**< No more arguments. */
221 portid_t port; /**< Current port ID (for completions). */
222 uint32_t objdata; /**< Object-specific data. */
223 void *object; /**< Address of current object for relative offsets. */
224 void *objmask; /**< Object a full mask must be written to. */
227 /** Token argument. */
229 uint32_t hton:1; /**< Use network byte ordering. */
230 uint32_t sign:1; /**< Value is signed. */
231 uint32_t bounded:1; /**< Value is bounded. */
232 uintmax_t min; /**< Minimum value if bounded. */
233 uintmax_t max; /**< Maximum value if bounded. */
234 uint32_t offset; /**< Relative offset from ctx->object. */
235 uint32_t size; /**< Field size. */
236 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
239 /** Parser token definition. */
241 /** Type displayed during completion (defaults to "TOKEN"). */
243 /** Help displayed during completion (defaults to token name). */
245 /** Private data used by parser functions. */
248 * Lists of subsequent tokens to push on the stack. Each call to the
249 * parser consumes the last entry of that stack.
251 const enum index *const *next;
252 /** Arguments stack for subsequent tokens that need them. */
253 const struct arg *const *args;
255 * Token-processing callback, returns -1 in case of error, the
256 * length of the matched string otherwise. If NULL, attempts to
257 * match the token name.
259 * If buf is not NULL, the result should be stored in it according
260 * to context. An error is returned if not large enough.
262 int (*call)(struct context *ctx, const struct token *token,
263 const char *str, unsigned int len,
264 void *buf, unsigned int size);
266 * Callback that provides possible values for this token, used for
267 * completion. Returns -1 in case of error, the number of possible
268 * values otherwise. If NULL, the token name is used.
270 * If buf is not NULL, entry index ent is written to buf and the
271 * full length of the entry is returned (same behavior as
274 int (*comp)(struct context *ctx, const struct token *token,
275 unsigned int ent, char *buf, unsigned int size);
276 /** Mandatory token name, no default value. */
280 /** Static initializer for the next field. */
281 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
283 /** Static initializer for a NEXT() entry. */
284 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
286 /** Static initializer for the args field. */
287 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
289 /** Static initializer for ARGS() to target a field. */
290 #define ARGS_ENTRY(s, f) \
291 (&(const struct arg){ \
292 .offset = offsetof(s, f), \
293 .size = sizeof(((s *)0)->f), \
296 /** Static initializer for ARGS() to target a bit-field. */
297 #define ARGS_ENTRY_BF(s, f, b) \
298 (&(const struct arg){ \
300 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
303 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
304 #define ARGS_ENTRY_MASK(s, f, m) \
305 (&(const struct arg){ \
306 .offset = offsetof(s, f), \
307 .size = sizeof(((s *)0)->f), \
308 .mask = (const void *)(m), \
311 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
312 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
313 (&(const struct arg){ \
315 .offset = offsetof(s, f), \
316 .size = sizeof(((s *)0)->f), \
317 .mask = (const void *)(m), \
320 /** Static initializer for ARGS() to target a pointer. */
321 #define ARGS_ENTRY_PTR(s, f) \
322 (&(const struct arg){ \
323 .size = sizeof(*((s *)0)->f), \
326 /** Static initializer for ARGS() with arbitrary offset and size. */
327 #define ARGS_ENTRY_ARB(o, s) \
328 (&(const struct arg){ \
333 /** Same as ARGS_ENTRY_ARB() with bounded values. */
334 #define ARGS_ENTRY_ARB_BOUNDED(o, s, i, a) \
335 (&(const struct arg){ \
343 /** Same as ARGS_ENTRY() using network byte ordering. */
344 #define ARGS_ENTRY_HTON(s, f) \
345 (&(const struct arg){ \
347 .offset = offsetof(s, f), \
348 .size = sizeof(((s *)0)->f), \
351 /** Parser output buffer layout expected by cmd_flow_parsed(). */
353 enum index command; /**< Flow command. */
354 portid_t port; /**< Affected port ID. */
357 struct rte_flow_attr attr;
358 struct rte_flow_item *pattern;
359 struct rte_flow_action *actions;
363 } vc; /**< Validate/create arguments. */
367 } destroy; /**< Destroy arguments. */
370 enum rte_flow_action_type action;
371 } query; /**< Query arguments. */
375 } list; /**< List arguments. */
378 } isolate; /**< Isolated mode arguments. */
379 } args; /**< Command arguments. */
382 /** Private data for pattern items. */
383 struct parse_item_priv {
384 enum rte_flow_item_type type; /**< Item type. */
385 uint32_t size; /**< Size of item specification structure. */
388 #define PRIV_ITEM(t, s) \
389 (&(const struct parse_item_priv){ \
390 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
394 /** Private data for actions. */
395 struct parse_action_priv {
396 enum rte_flow_action_type type; /**< Action type. */
397 uint32_t size; /**< Size of action configuration structure. */
400 #define PRIV_ACTION(t, s) \
401 (&(const struct parse_action_priv){ \
402 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
406 static const enum index next_vc_attr[] = {
416 static const enum index next_destroy_attr[] = {
422 static const enum index next_list_attr[] = {
428 static const enum index item_param[] = {
437 static const enum index next_item[] = {
467 static const enum index item_fuzzy[] = {
473 static const enum index item_any[] = {
479 static const enum index item_vf[] = {
485 static const enum index item_phy_port[] = {
491 static const enum index item_raw[] = {
501 static const enum index item_eth[] = {
509 static const enum index item_vlan[] = {
514 ITEM_VLAN_INNER_TYPE,
519 static const enum index item_ipv4[] = {
529 static const enum index item_ipv6[] = {
540 static const enum index item_icmp[] = {
547 static const enum index item_udp[] = {
554 static const enum index item_tcp[] = {
562 static const enum index item_sctp[] = {
571 static const enum index item_vxlan[] = {
577 static const enum index item_e_tag[] = {
578 ITEM_E_TAG_GRP_ECID_B,
583 static const enum index item_nvgre[] = {
589 static const enum index item_mpls[] = {
595 static const enum index item_gre[] = {
601 static const enum index item_gtp[] = {
607 static const enum index item_geneve[] = {
614 static const enum index next_action[] = {
630 static const enum index action_mark[] = {
636 static const enum index action_queue[] = {
642 static const enum index action_rss[] = {
653 static const enum index action_vf[] = {
660 static const enum index action_meter[] = {
666 static int parse_init(struct context *, const struct token *,
667 const char *, unsigned int,
668 void *, unsigned int);
669 static int parse_vc(struct context *, const struct token *,
670 const char *, unsigned int,
671 void *, unsigned int);
672 static int parse_vc_spec(struct context *, const struct token *,
673 const char *, unsigned int, void *, unsigned int);
674 static int parse_vc_conf(struct context *, const struct token *,
675 const char *, unsigned int, void *, unsigned int);
676 static int parse_vc_action_rss(struct context *, const struct token *,
677 const char *, unsigned int, void *,
679 static int parse_vc_action_rss_func(struct context *, const struct token *,
680 const char *, unsigned int, void *,
682 static int parse_vc_action_rss_type(struct context *, const struct token *,
683 const char *, unsigned int, void *,
685 static int parse_vc_action_rss_queue(struct context *, const struct token *,
686 const char *, unsigned int, void *,
688 static int parse_destroy(struct context *, const struct token *,
689 const char *, unsigned int,
690 void *, unsigned int);
691 static int parse_flush(struct context *, const struct token *,
692 const char *, unsigned int,
693 void *, unsigned int);
694 static int parse_query(struct context *, const struct token *,
695 const char *, unsigned int,
696 void *, unsigned int);
697 static int parse_action(struct context *, const struct token *,
698 const char *, unsigned int,
699 void *, unsigned int);
700 static int parse_list(struct context *, const struct token *,
701 const char *, unsigned int,
702 void *, unsigned int);
703 static int parse_isolate(struct context *, const struct token *,
704 const char *, unsigned int,
705 void *, unsigned int);
706 static int parse_int(struct context *, const struct token *,
707 const char *, unsigned int,
708 void *, unsigned int);
709 static int parse_prefix(struct context *, const struct token *,
710 const char *, unsigned int,
711 void *, unsigned int);
712 static int parse_boolean(struct context *, const struct token *,
713 const char *, unsigned int,
714 void *, unsigned int);
715 static int parse_string(struct context *, const struct token *,
716 const char *, unsigned int,
717 void *, unsigned int);
718 static int parse_mac_addr(struct context *, const struct token *,
719 const char *, unsigned int,
720 void *, unsigned int);
721 static int parse_ipv4_addr(struct context *, const struct token *,
722 const char *, unsigned int,
723 void *, unsigned int);
724 static int parse_ipv6_addr(struct context *, const struct token *,
725 const char *, unsigned int,
726 void *, unsigned int);
727 static int parse_port(struct context *, const struct token *,
728 const char *, unsigned int,
729 void *, unsigned int);
730 static int comp_none(struct context *, const struct token *,
731 unsigned int, char *, unsigned int);
732 static int comp_boolean(struct context *, const struct token *,
733 unsigned int, char *, unsigned int);
734 static int comp_action(struct context *, const struct token *,
735 unsigned int, char *, unsigned int);
736 static int comp_port(struct context *, const struct token *,
737 unsigned int, char *, unsigned int);
738 static int comp_rule_id(struct context *, const struct token *,
739 unsigned int, char *, unsigned int);
740 static int comp_vc_action_rss_type(struct context *, const struct token *,
741 unsigned int, char *, unsigned int);
742 static int comp_vc_action_rss_queue(struct context *, const struct token *,
743 unsigned int, char *, unsigned int);
745 /** Token definitions. */
746 static const struct token token_list[] = {
747 /* Special tokens. */
750 .help = "null entry, abused as the entry point",
751 .next = NEXT(NEXT_ENTRY(FLOW)),
756 .help = "command may end here",
762 .help = "integer value",
767 .name = "{unsigned}",
769 .help = "unsigned integer value",
776 .help = "prefix length for bit-mask",
777 .call = parse_prefix,
783 .help = "any boolean value",
784 .call = parse_boolean,
785 .comp = comp_boolean,
790 .help = "fixed string",
791 .call = parse_string,
795 .name = "{MAC address}",
797 .help = "standard MAC address notation",
798 .call = parse_mac_addr,
802 .name = "{IPv4 address}",
803 .type = "IPV4 ADDRESS",
804 .help = "standard IPv4 address notation",
805 .call = parse_ipv4_addr,
809 .name = "{IPv6 address}",
810 .type = "IPV6 ADDRESS",
811 .help = "standard IPv6 address notation",
812 .call = parse_ipv6_addr,
818 .help = "rule identifier",
820 .comp = comp_rule_id,
825 .help = "port identifier",
830 .name = "{group_id}",
832 .help = "group identifier",
839 .help = "priority level",
843 /* Top-level command. */
846 .type = "{command} {port_id} [{arg} [...]]",
847 .help = "manage ingress/egress flow rules",
848 .next = NEXT(NEXT_ENTRY
858 /* Sub-level commands. */
861 .help = "check whether a flow rule can be created",
862 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
863 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
868 .help = "create a flow rule",
869 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
870 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
875 .help = "destroy specific flow rules",
876 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
877 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
878 .call = parse_destroy,
882 .help = "destroy all flow rules",
883 .next = NEXT(NEXT_ENTRY(PORT_ID)),
884 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
889 .help = "query an existing flow rule",
890 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
892 NEXT_ENTRY(PORT_ID)),
893 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action),
894 ARGS_ENTRY(struct buffer, args.query.rule),
895 ARGS_ENTRY(struct buffer, port)),
900 .help = "list existing flow rules",
901 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
902 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
907 .help = "restrict ingress traffic to the defined flow rules",
908 .next = NEXT(NEXT_ENTRY(BOOLEAN),
909 NEXT_ENTRY(PORT_ID)),
910 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
911 ARGS_ENTRY(struct buffer, port)),
912 .call = parse_isolate,
914 /* Destroy arguments. */
917 .help = "specify a rule identifier",
918 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
919 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
920 .call = parse_destroy,
922 /* Query arguments. */
926 .help = "action to query, must be part of the rule",
927 .call = parse_action,
930 /* List arguments. */
933 .help = "specify a group",
934 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
935 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
938 /* Validate/create attributes. */
941 .help = "specify a group",
942 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
943 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
948 .help = "specify a priority level",
949 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
950 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
955 .help = "affect rule to ingress",
956 .next = NEXT(next_vc_attr),
961 .help = "affect rule to egress",
962 .next = NEXT(next_vc_attr),
967 .help = "apply rule directly to endpoints found in pattern",
968 .next = NEXT(next_vc_attr),
971 /* Validate/create pattern. */
974 .help = "submit a list of pattern items",
975 .next = NEXT(next_item),
980 .help = "match value perfectly (with full bit-mask)",
981 .call = parse_vc_spec,
983 [ITEM_PARAM_SPEC] = {
985 .help = "match value according to configured bit-mask",
986 .call = parse_vc_spec,
988 [ITEM_PARAM_LAST] = {
990 .help = "specify upper bound to establish a range",
991 .call = parse_vc_spec,
993 [ITEM_PARAM_MASK] = {
995 .help = "specify bit-mask with relevant bits set to one",
996 .call = parse_vc_spec,
998 [ITEM_PARAM_PREFIX] = {
1000 .help = "generate bit-mask from a prefix length",
1001 .call = parse_vc_spec,
1005 .help = "specify next pattern item",
1006 .next = NEXT(next_item),
1010 .help = "end list of pattern items",
1011 .priv = PRIV_ITEM(END, 0),
1012 .next = NEXT(NEXT_ENTRY(ACTIONS)),
1017 .help = "no-op pattern item",
1018 .priv = PRIV_ITEM(VOID, 0),
1019 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1024 .help = "perform actions when pattern does not match",
1025 .priv = PRIV_ITEM(INVERT, 0),
1026 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1031 .help = "match any protocol for the current layer",
1032 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
1033 .next = NEXT(item_any),
1038 .help = "number of layers covered",
1039 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
1040 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
1044 .help = "match traffic from/to the physical function",
1045 .priv = PRIV_ITEM(PF, 0),
1046 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1051 .help = "match traffic from/to a virtual function ID",
1052 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
1053 .next = NEXT(item_vf),
1059 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
1060 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
1064 .help = "match traffic from/to a specific physical port",
1065 .priv = PRIV_ITEM(PHY_PORT,
1066 sizeof(struct rte_flow_item_phy_port)),
1067 .next = NEXT(item_phy_port),
1070 [ITEM_PHY_PORT_INDEX] = {
1072 .help = "physical port index",
1073 .next = NEXT(item_phy_port, NEXT_ENTRY(UNSIGNED), item_param),
1074 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_phy_port, index)),
1078 .help = "match an arbitrary byte string",
1079 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1080 .next = NEXT(item_raw),
1083 [ITEM_RAW_RELATIVE] = {
1085 .help = "look for pattern after the previous item",
1086 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1087 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1090 [ITEM_RAW_SEARCH] = {
1092 .help = "search pattern from offset (see also limit)",
1093 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1094 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1097 [ITEM_RAW_OFFSET] = {
1099 .help = "absolute or relative offset for pattern",
1100 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1101 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1103 [ITEM_RAW_LIMIT] = {
1105 .help = "search area limit for start of pattern",
1106 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1107 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1109 [ITEM_RAW_PATTERN] = {
1111 .help = "byte string to look for",
1112 .next = NEXT(item_raw,
1114 NEXT_ENTRY(ITEM_PARAM_IS,
1117 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, pattern),
1118 ARGS_ENTRY(struct rte_flow_item_raw, length),
1119 ARGS_ENTRY_ARB(sizeof(struct rte_flow_item_raw),
1120 ITEM_RAW_PATTERN_SIZE)),
1124 .help = "match Ethernet header",
1125 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1126 .next = NEXT(item_eth),
1131 .help = "destination MAC",
1132 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1133 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1137 .help = "source MAC",
1138 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1139 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1143 .help = "EtherType",
1144 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1145 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1149 .help = "match 802.1Q/ad VLAN tag",
1150 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1151 .next = NEXT(item_vlan),
1156 .help = "tag control information",
1157 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1158 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1162 .help = "priority code point",
1163 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1164 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1169 .help = "drop eligible indicator",
1170 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1171 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1176 .help = "VLAN identifier",
1177 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1178 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1181 [ITEM_VLAN_INNER_TYPE] = {
1182 .name = "inner_type",
1183 .help = "inner EtherType",
1184 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1185 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan,
1190 .help = "match IPv4 header",
1191 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1192 .next = NEXT(item_ipv4),
1197 .help = "type of service",
1198 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1199 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1200 hdr.type_of_service)),
1204 .help = "time to live",
1205 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1206 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1209 [ITEM_IPV4_PROTO] = {
1211 .help = "next protocol ID",
1212 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1213 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1214 hdr.next_proto_id)),
1218 .help = "source address",
1219 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1220 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1225 .help = "destination address",
1226 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1227 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1232 .help = "match IPv6 header",
1233 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1234 .next = NEXT(item_ipv6),
1239 .help = "traffic class",
1240 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1241 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1243 "\x0f\xf0\x00\x00")),
1245 [ITEM_IPV6_FLOW] = {
1247 .help = "flow label",
1248 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1249 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1251 "\x00\x0f\xff\xff")),
1253 [ITEM_IPV6_PROTO] = {
1255 .help = "protocol (next header)",
1256 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1257 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1262 .help = "hop limit",
1263 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1264 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1269 .help = "source address",
1270 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1271 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1276 .help = "destination address",
1277 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1278 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1283 .help = "match ICMP header",
1284 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1285 .next = NEXT(item_icmp),
1288 [ITEM_ICMP_TYPE] = {
1290 .help = "ICMP packet type",
1291 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1292 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1295 [ITEM_ICMP_CODE] = {
1297 .help = "ICMP packet code",
1298 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1299 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1304 .help = "match UDP header",
1305 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1306 .next = NEXT(item_udp),
1311 .help = "UDP source port",
1312 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1313 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1318 .help = "UDP destination port",
1319 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1320 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1325 .help = "match TCP header",
1326 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1327 .next = NEXT(item_tcp),
1332 .help = "TCP source port",
1333 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1334 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1339 .help = "TCP destination port",
1340 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1341 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1344 [ITEM_TCP_FLAGS] = {
1346 .help = "TCP flags",
1347 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1348 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1353 .help = "match SCTP header",
1354 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1355 .next = NEXT(item_sctp),
1360 .help = "SCTP source port",
1361 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1362 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1367 .help = "SCTP destination port",
1368 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1369 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1374 .help = "validation tag",
1375 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1376 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1379 [ITEM_SCTP_CKSUM] = {
1382 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1383 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1388 .help = "match VXLAN header",
1389 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1390 .next = NEXT(item_vxlan),
1393 [ITEM_VXLAN_VNI] = {
1395 .help = "VXLAN identifier",
1396 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1397 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
1401 .help = "match E-Tag header",
1402 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
1403 .next = NEXT(item_e_tag),
1406 [ITEM_E_TAG_GRP_ECID_B] = {
1407 .name = "grp_ecid_b",
1408 .help = "GRP and E-CID base",
1409 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
1410 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
1416 .help = "match NVGRE header",
1417 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
1418 .next = NEXT(item_nvgre),
1421 [ITEM_NVGRE_TNI] = {
1423 .help = "virtual subnet ID",
1424 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
1425 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
1429 .help = "match MPLS header",
1430 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
1431 .next = NEXT(item_mpls),
1434 [ITEM_MPLS_LABEL] = {
1436 .help = "MPLS label",
1437 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
1438 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
1444 .help = "match GRE header",
1445 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
1446 .next = NEXT(item_gre),
1449 [ITEM_GRE_PROTO] = {
1451 .help = "GRE protocol type",
1452 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
1453 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
1458 .help = "fuzzy pattern match, expect faster than default",
1459 .priv = PRIV_ITEM(FUZZY,
1460 sizeof(struct rte_flow_item_fuzzy)),
1461 .next = NEXT(item_fuzzy),
1464 [ITEM_FUZZY_THRESH] = {
1466 .help = "match accuracy threshold",
1467 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
1468 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
1473 .help = "match GTP header",
1474 .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
1475 .next = NEXT(item_gtp),
1480 .help = "tunnel endpoint identifier",
1481 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
1482 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
1486 .help = "match GTP header",
1487 .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
1488 .next = NEXT(item_gtp),
1493 .help = "match GTP header",
1494 .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
1495 .next = NEXT(item_gtp),
1500 .help = "match GENEVE header",
1501 .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
1502 .next = NEXT(item_geneve),
1505 [ITEM_GENEVE_VNI] = {
1507 .help = "virtual network identifier",
1508 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1509 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)),
1511 [ITEM_GENEVE_PROTO] = {
1513 .help = "GENEVE protocol type",
1514 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1515 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve,
1519 /* Validate/create actions. */
1522 .help = "submit a list of associated actions",
1523 .next = NEXT(next_action),
1528 .help = "specify next action",
1529 .next = NEXT(next_action),
1533 .help = "end list of actions",
1534 .priv = PRIV_ACTION(END, 0),
1539 .help = "no-op action",
1540 .priv = PRIV_ACTION(VOID, 0),
1541 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1544 [ACTION_PASSTHRU] = {
1546 .help = "let subsequent rule process matched packets",
1547 .priv = PRIV_ACTION(PASSTHRU, 0),
1548 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1553 .help = "attach 32 bit value to packets",
1554 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
1555 .next = NEXT(action_mark),
1558 [ACTION_MARK_ID] = {
1560 .help = "32 bit value to return with packets",
1561 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
1562 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
1563 .call = parse_vc_conf,
1567 .help = "flag packets",
1568 .priv = PRIV_ACTION(FLAG, 0),
1569 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1574 .help = "assign packets to a given queue index",
1575 .priv = PRIV_ACTION(QUEUE,
1576 sizeof(struct rte_flow_action_queue)),
1577 .next = NEXT(action_queue),
1580 [ACTION_QUEUE_INDEX] = {
1582 .help = "queue index to use",
1583 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
1584 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
1585 .call = parse_vc_conf,
1589 .help = "drop packets (note: passthru has priority)",
1590 .priv = PRIV_ACTION(DROP, 0),
1591 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1596 .help = "enable counters for this rule",
1597 .priv = PRIV_ACTION(COUNT, 0),
1598 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1603 .help = "spread packets among several queues",
1604 .priv = PRIV_ACTION(RSS, sizeof(struct action_rss_data)),
1605 .next = NEXT(action_rss),
1606 .call = parse_vc_action_rss,
1608 [ACTION_RSS_FUNC] = {
1610 .help = "RSS hash function to apply",
1611 .next = NEXT(action_rss,
1612 NEXT_ENTRY(ACTION_RSS_FUNC_DEFAULT,
1613 ACTION_RSS_FUNC_TOEPLITZ,
1614 ACTION_RSS_FUNC_SIMPLE_XOR)),
1616 [ACTION_RSS_FUNC_DEFAULT] = {
1618 .help = "default hash function",
1619 .call = parse_vc_action_rss_func,
1621 [ACTION_RSS_FUNC_TOEPLITZ] = {
1623 .help = "Toeplitz hash function",
1624 .call = parse_vc_action_rss_func,
1626 [ACTION_RSS_FUNC_SIMPLE_XOR] = {
1627 .name = "simple_xor",
1628 .help = "simple XOR hash function",
1629 .call = parse_vc_action_rss_func,
1631 [ACTION_RSS_LEVEL] = {
1633 .help = "encapsulation level for \"types\"",
1634 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
1635 .args = ARGS(ARGS_ENTRY_ARB
1636 (offsetof(struct action_rss_data, conf) +
1637 offsetof(struct rte_flow_action_rss, level),
1638 sizeof(((struct rte_flow_action_rss *)0)->
1641 [ACTION_RSS_TYPES] = {
1643 .help = "specific RSS hash types",
1644 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)),
1646 [ACTION_RSS_TYPE] = {
1648 .help = "RSS hash type",
1649 .call = parse_vc_action_rss_type,
1650 .comp = comp_vc_action_rss_type,
1652 [ACTION_RSS_KEY] = {
1654 .help = "RSS hash key",
1655 .next = NEXT(action_rss, NEXT_ENTRY(STRING)),
1656 .args = ARGS(ARGS_ENTRY_ARB(0, 0),
1658 (offsetof(struct action_rss_data, conf) +
1659 offsetof(struct rte_flow_action_rss, key_len),
1660 sizeof(((struct rte_flow_action_rss *)0)->
1662 ARGS_ENTRY(struct action_rss_data, key)),
1664 [ACTION_RSS_KEY_LEN] = {
1666 .help = "RSS hash key length in bytes",
1667 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
1668 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
1669 (offsetof(struct action_rss_data, conf) +
1670 offsetof(struct rte_flow_action_rss, key_len),
1671 sizeof(((struct rte_flow_action_rss *)0)->
1674 RSS_HASH_KEY_LENGTH)),
1676 [ACTION_RSS_QUEUES] = {
1678 .help = "queue indices to use",
1679 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
1680 .call = parse_vc_conf,
1682 [ACTION_RSS_QUEUE] = {
1684 .help = "queue index",
1685 .call = parse_vc_action_rss_queue,
1686 .comp = comp_vc_action_rss_queue,
1690 .help = "direct traffic to physical function",
1691 .priv = PRIV_ACTION(PF, 0),
1692 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1697 .help = "direct traffic to a virtual function ID",
1698 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
1699 .next = NEXT(action_vf),
1702 [ACTION_VF_ORIGINAL] = {
1704 .help = "use original VF ID if possible",
1705 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
1706 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
1708 .call = parse_vc_conf,
1713 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
1714 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
1715 .call = parse_vc_conf,
1719 .help = "meter the directed packets at given id",
1720 .priv = PRIV_ACTION(METER,
1721 sizeof(struct rte_flow_action_meter)),
1722 .next = NEXT(action_meter),
1725 [ACTION_METER_ID] = {
1727 .help = "meter id to use",
1728 .next = NEXT(action_meter, NEXT_ENTRY(UNSIGNED)),
1729 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
1730 .call = parse_vc_conf,
1734 /** Remove and return last entry from argument stack. */
1735 static const struct arg *
1736 pop_args(struct context *ctx)
1738 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
1741 /** Add entry on top of the argument stack. */
1743 push_args(struct context *ctx, const struct arg *arg)
1745 if (ctx->args_num == CTX_STACK_SIZE)
1747 ctx->args[ctx->args_num++] = arg;
1751 /** Spread value into buffer according to bit-mask. */
1753 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
1755 uint32_t i = arg->size;
1763 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1772 unsigned int shift = 0;
1773 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
1775 for (shift = 0; arg->mask[i] >> shift; ++shift) {
1776 if (!(arg->mask[i] & (1 << shift)))
1781 *buf &= ~(1 << shift);
1782 *buf |= (val & 1) << shift;
1790 /** Compare a string with a partial one of a given length. */
1792 strcmp_partial(const char *full, const char *partial, size_t partial_len)
1794 int r = strncmp(full, partial, partial_len);
1798 if (strlen(full) <= partial_len)
1800 return full[partial_len];
1804 * Parse a prefix length and generate a bit-mask.
1806 * Last argument (ctx->args) is retrieved to determine mask size, storage
1807 * location and whether the result must use network byte ordering.
1810 parse_prefix(struct context *ctx, const struct token *token,
1811 const char *str, unsigned int len,
1812 void *buf, unsigned int size)
1814 const struct arg *arg = pop_args(ctx);
1815 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
1822 /* Argument is expected. */
1826 u = strtoumax(str, &end, 0);
1827 if (errno || (size_t)(end - str) != len)
1832 extra = arg_entry_bf_fill(NULL, 0, arg);
1841 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
1842 !arg_entry_bf_fill(ctx->objmask, -1, arg))
1849 if (bytes > size || bytes + !!extra > size)
1853 buf = (uint8_t *)ctx->object + arg->offset;
1854 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1856 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
1857 memset(buf, 0x00, size - bytes);
1859 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
1863 memset(buf, 0xff, bytes);
1864 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
1866 ((uint8_t *)buf)[bytes] = conv[extra];
1869 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
1872 push_args(ctx, arg);
1876 /** Default parsing function for token name matching. */
1878 parse_default(struct context *ctx, const struct token *token,
1879 const char *str, unsigned int len,
1880 void *buf, unsigned int size)
1885 if (strcmp_partial(token->name, str, len))
1890 /** Parse flow command, initialize output buffer for subsequent tokens. */
1892 parse_init(struct context *ctx, const struct token *token,
1893 const char *str, unsigned int len,
1894 void *buf, unsigned int size)
1896 struct buffer *out = buf;
1898 /* Token name must match. */
1899 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1901 /* Nothing else to do if there is no buffer. */
1904 /* Make sure buffer is large enough. */
1905 if (size < sizeof(*out))
1907 /* Initialize buffer. */
1908 memset(out, 0x00, sizeof(*out));
1909 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
1912 ctx->objmask = NULL;
1916 /** Parse tokens for validate/create commands. */
1918 parse_vc(struct context *ctx, const struct token *token,
1919 const char *str, unsigned int len,
1920 void *buf, unsigned int size)
1922 struct buffer *out = buf;
1926 /* Token name must match. */
1927 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1929 /* Nothing else to do if there is no buffer. */
1932 if (!out->command) {
1933 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
1935 if (sizeof(*out) > size)
1937 out->command = ctx->curr;
1940 ctx->objmask = NULL;
1941 out->args.vc.data = (uint8_t *)out + size;
1945 ctx->object = &out->args.vc.attr;
1946 ctx->objmask = NULL;
1947 switch (ctx->curr) {
1952 out->args.vc.attr.ingress = 1;
1955 out->args.vc.attr.egress = 1;
1958 out->args.vc.attr.transfer = 1;
1961 out->args.vc.pattern =
1962 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1964 ctx->object = out->args.vc.pattern;
1965 ctx->objmask = NULL;
1968 out->args.vc.actions =
1969 (void *)RTE_ALIGN_CEIL((uintptr_t)
1970 (out->args.vc.pattern +
1971 out->args.vc.pattern_n),
1973 ctx->object = out->args.vc.actions;
1974 ctx->objmask = NULL;
1981 if (!out->args.vc.actions) {
1982 const struct parse_item_priv *priv = token->priv;
1983 struct rte_flow_item *item =
1984 out->args.vc.pattern + out->args.vc.pattern_n;
1986 data_size = priv->size * 3; /* spec, last, mask */
1987 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1988 (out->args.vc.data - data_size),
1990 if ((uint8_t *)item + sizeof(*item) > data)
1992 *item = (struct rte_flow_item){
1995 ++out->args.vc.pattern_n;
1997 ctx->objmask = NULL;
1999 const struct parse_action_priv *priv = token->priv;
2000 struct rte_flow_action *action =
2001 out->args.vc.actions + out->args.vc.actions_n;
2003 data_size = priv->size; /* configuration */
2004 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
2005 (out->args.vc.data - data_size),
2007 if ((uint8_t *)action + sizeof(*action) > data)
2009 *action = (struct rte_flow_action){
2011 .conf = data_size ? data : NULL,
2013 ++out->args.vc.actions_n;
2014 ctx->object = action;
2015 ctx->objmask = NULL;
2017 memset(data, 0, data_size);
2018 out->args.vc.data = data;
2019 ctx->objdata = data_size;
2023 /** Parse pattern item parameter type. */
2025 parse_vc_spec(struct context *ctx, const struct token *token,
2026 const char *str, unsigned int len,
2027 void *buf, unsigned int size)
2029 struct buffer *out = buf;
2030 struct rte_flow_item *item;
2036 /* Token name must match. */
2037 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2039 /* Parse parameter types. */
2040 switch (ctx->curr) {
2041 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
2047 case ITEM_PARAM_SPEC:
2050 case ITEM_PARAM_LAST:
2053 case ITEM_PARAM_PREFIX:
2054 /* Modify next token to expect a prefix. */
2055 if (ctx->next_num < 2)
2057 ctx->next[ctx->next_num - 2] = prefix;
2059 case ITEM_PARAM_MASK:
2065 /* Nothing else to do if there is no buffer. */
2068 if (!out->args.vc.pattern_n)
2070 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
2071 data_size = ctx->objdata / 3; /* spec, last, mask */
2072 /* Point to selected object. */
2073 ctx->object = out->args.vc.data + (data_size * index);
2075 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
2076 item->mask = ctx->objmask;
2078 ctx->objmask = NULL;
2079 /* Update relevant item pointer. */
2080 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
2085 /** Parse action configuration field. */
2087 parse_vc_conf(struct context *ctx, const struct token *token,
2088 const char *str, unsigned int len,
2089 void *buf, unsigned int size)
2091 struct buffer *out = buf;
2094 /* Token name must match. */
2095 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2097 /* Nothing else to do if there is no buffer. */
2100 /* Point to selected object. */
2101 ctx->object = out->args.vc.data;
2102 ctx->objmask = NULL;
2106 /** Parse RSS action. */
2108 parse_vc_action_rss(struct context *ctx, const struct token *token,
2109 const char *str, unsigned int len,
2110 void *buf, unsigned int size)
2112 struct buffer *out = buf;
2113 struct rte_flow_action *action;
2114 struct action_rss_data *action_rss_data;
2118 ret = parse_vc(ctx, token, str, len, buf, size);
2121 /* Nothing else to do if there is no buffer. */
2124 if (!out->args.vc.actions_n)
2126 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
2127 /* Point to selected object. */
2128 ctx->object = out->args.vc.data;
2129 ctx->objmask = NULL;
2130 /* Set up default configuration. */
2131 action_rss_data = ctx->object;
2132 *action_rss_data = (struct action_rss_data){
2133 .conf = (struct rte_flow_action_rss){
2134 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
2137 .key_len = sizeof(action_rss_data->key),
2138 .queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
2139 .key = action_rss_data->key,
2140 .queue = action_rss_data->queue,
2142 .key = "testpmd's default RSS hash key",
2145 for (i = 0; i < action_rss_data->conf.queue_num; ++i)
2146 action_rss_data->queue[i] = i;
2147 if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
2148 ctx->port != (portid_t)RTE_PORT_ALL) {
2149 struct rte_eth_dev_info info;
2151 rte_eth_dev_info_get(ctx->port, &info);
2152 action_rss_data->conf.key_len =
2153 RTE_MIN(sizeof(action_rss_data->key),
2154 info.hash_key_size);
2156 action->conf = &action_rss_data->conf;
2161 * Parse func field for RSS action.
2163 * The RTE_ETH_HASH_FUNCTION_* value to assign is derived from the
2164 * ACTION_RSS_FUNC_* index that called this function.
2167 parse_vc_action_rss_func(struct context *ctx, const struct token *token,
2168 const char *str, unsigned int len,
2169 void *buf, unsigned int size)
2171 struct action_rss_data *action_rss_data;
2172 enum rte_eth_hash_function func;
2176 /* Token name must match. */
2177 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2179 switch (ctx->curr) {
2180 case ACTION_RSS_FUNC_DEFAULT:
2181 func = RTE_ETH_HASH_FUNCTION_DEFAULT;
2183 case ACTION_RSS_FUNC_TOEPLITZ:
2184 func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
2186 case ACTION_RSS_FUNC_SIMPLE_XOR:
2187 func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
2194 action_rss_data = ctx->object;
2195 action_rss_data->conf.func = func;
2200 * Parse type field for RSS action.
2202 * Valid tokens are type field names and the "end" token.
2205 parse_vc_action_rss_type(struct context *ctx, const struct token *token,
2206 const char *str, unsigned int len,
2207 void *buf, unsigned int size)
2209 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_TYPE);
2210 struct action_rss_data *action_rss_data;
2216 if (ctx->curr != ACTION_RSS_TYPE)
2218 if (!(ctx->objdata >> 16) && ctx->object) {
2219 action_rss_data = ctx->object;
2220 action_rss_data->conf.types = 0;
2222 if (!strcmp_partial("end", str, len)) {
2223 ctx->objdata &= 0xffff;
2226 for (i = 0; rss_type_table[i].str; ++i)
2227 if (!strcmp_partial(rss_type_table[i].str, str, len))
2229 if (!rss_type_table[i].str)
2231 ctx->objdata = 1 << 16 | (ctx->objdata & 0xffff);
2233 if (ctx->next_num == RTE_DIM(ctx->next))
2235 ctx->next[ctx->next_num++] = next;
2238 action_rss_data = ctx->object;
2239 action_rss_data->conf.types |= rss_type_table[i].rss_type;
2244 * Parse queue field for RSS action.
2246 * Valid tokens are queue indices and the "end" token.
2249 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
2250 const char *str, unsigned int len,
2251 void *buf, unsigned int size)
2253 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
2254 struct action_rss_data *action_rss_data;
2261 if (ctx->curr != ACTION_RSS_QUEUE)
2263 i = ctx->objdata >> 16;
2264 if (!strcmp_partial("end", str, len)) {
2265 ctx->objdata &= 0xffff;
2268 if (i >= ACTION_RSS_QUEUE_NUM)
2271 ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) +
2272 i * sizeof(action_rss_data->queue[i]),
2273 sizeof(action_rss_data->queue[i]))))
2275 ret = parse_int(ctx, token, str, len, NULL, 0);
2281 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
2283 if (ctx->next_num == RTE_DIM(ctx->next))
2285 ctx->next[ctx->next_num++] = next;
2288 action_rss_data = ctx->object;
2289 action_rss_data->conf.queue_num = i;
2290 action_rss_data->conf.queue = i ? action_rss_data->queue : NULL;
2294 /** Parse tokens for destroy command. */
2296 parse_destroy(struct context *ctx, const struct token *token,
2297 const char *str, unsigned int len,
2298 void *buf, unsigned int size)
2300 struct buffer *out = buf;
2302 /* Token name must match. */
2303 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2305 /* Nothing else to do if there is no buffer. */
2308 if (!out->command) {
2309 if (ctx->curr != DESTROY)
2311 if (sizeof(*out) > size)
2313 out->command = ctx->curr;
2316 ctx->objmask = NULL;
2317 out->args.destroy.rule =
2318 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2322 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
2323 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
2326 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
2327 ctx->objmask = NULL;
2331 /** Parse tokens for flush command. */
2333 parse_flush(struct context *ctx, const struct token *token,
2334 const char *str, unsigned int len,
2335 void *buf, unsigned int size)
2337 struct buffer *out = buf;
2339 /* Token name must match. */
2340 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2342 /* Nothing else to do if there is no buffer. */
2345 if (!out->command) {
2346 if (ctx->curr != FLUSH)
2348 if (sizeof(*out) > size)
2350 out->command = ctx->curr;
2353 ctx->objmask = NULL;
2358 /** Parse tokens for query command. */
2360 parse_query(struct context *ctx, const struct token *token,
2361 const char *str, unsigned int len,
2362 void *buf, unsigned int size)
2364 struct buffer *out = buf;
2366 /* Token name must match. */
2367 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2369 /* Nothing else to do if there is no buffer. */
2372 if (!out->command) {
2373 if (ctx->curr != QUERY)
2375 if (sizeof(*out) > size)
2377 out->command = ctx->curr;
2380 ctx->objmask = NULL;
2385 /** Parse action names. */
2387 parse_action(struct context *ctx, const struct token *token,
2388 const char *str, unsigned int len,
2389 void *buf, unsigned int size)
2391 struct buffer *out = buf;
2392 const struct arg *arg = pop_args(ctx);
2396 /* Argument is expected. */
2399 /* Parse action name. */
2400 for (i = 0; next_action[i]; ++i) {
2401 const struct parse_action_priv *priv;
2403 token = &token_list[next_action[i]];
2404 if (strcmp_partial(token->name, str, len))
2410 memcpy((uint8_t *)ctx->object + arg->offset,
2416 push_args(ctx, arg);
2420 /** Parse tokens for list command. */
2422 parse_list(struct context *ctx, const struct token *token,
2423 const char *str, unsigned int len,
2424 void *buf, unsigned int size)
2426 struct buffer *out = buf;
2428 /* Token name must match. */
2429 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2431 /* Nothing else to do if there is no buffer. */
2434 if (!out->command) {
2435 if (ctx->curr != LIST)
2437 if (sizeof(*out) > size)
2439 out->command = ctx->curr;
2442 ctx->objmask = NULL;
2443 out->args.list.group =
2444 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2448 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
2449 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
2452 ctx->object = out->args.list.group + out->args.list.group_n++;
2453 ctx->objmask = NULL;
2457 /** Parse tokens for isolate command. */
2459 parse_isolate(struct context *ctx, const struct token *token,
2460 const char *str, unsigned int len,
2461 void *buf, unsigned int size)
2463 struct buffer *out = buf;
2465 /* Token name must match. */
2466 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2468 /* Nothing else to do if there is no buffer. */
2471 if (!out->command) {
2472 if (ctx->curr != ISOLATE)
2474 if (sizeof(*out) > size)
2476 out->command = ctx->curr;
2479 ctx->objmask = NULL;
2485 * Parse signed/unsigned integers 8 to 64-bit long.
2487 * Last argument (ctx->args) is retrieved to determine integer type and
2491 parse_int(struct context *ctx, const struct token *token,
2492 const char *str, unsigned int len,
2493 void *buf, unsigned int size)
2495 const struct arg *arg = pop_args(ctx);
2500 /* Argument is expected. */
2505 (uintmax_t)strtoimax(str, &end, 0) :
2506 strtoumax(str, &end, 0);
2507 if (errno || (size_t)(end - str) != len)
2510 ((arg->sign && ((intmax_t)u < (intmax_t)arg->min ||
2511 (intmax_t)u > (intmax_t)arg->max)) ||
2512 (!arg->sign && (u < arg->min || u > arg->max))))
2517 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
2518 !arg_entry_bf_fill(ctx->objmask, -1, arg))
2522 buf = (uint8_t *)ctx->object + arg->offset;
2526 case sizeof(uint8_t):
2527 *(uint8_t *)buf = u;
2529 case sizeof(uint16_t):
2530 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
2532 case sizeof(uint8_t [3]):
2533 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2535 ((uint8_t *)buf)[0] = u;
2536 ((uint8_t *)buf)[1] = u >> 8;
2537 ((uint8_t *)buf)[2] = u >> 16;
2541 ((uint8_t *)buf)[0] = u >> 16;
2542 ((uint8_t *)buf)[1] = u >> 8;
2543 ((uint8_t *)buf)[2] = u;
2545 case sizeof(uint32_t):
2546 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
2548 case sizeof(uint64_t):
2549 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
2554 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
2556 buf = (uint8_t *)ctx->objmask + arg->offset;
2561 push_args(ctx, arg);
2568 * Three arguments (ctx->args) are retrieved from the stack to store data,
2569 * its actual length and address (in that order).
2572 parse_string(struct context *ctx, const struct token *token,
2573 const char *str, unsigned int len,
2574 void *buf, unsigned int size)
2576 const struct arg *arg_data = pop_args(ctx);
2577 const struct arg *arg_len = pop_args(ctx);
2578 const struct arg *arg_addr = pop_args(ctx);
2579 char tmp[16]; /* Ought to be enough. */
2582 /* Arguments are expected. */
2586 push_args(ctx, arg_data);
2590 push_args(ctx, arg_len);
2591 push_args(ctx, arg_data);
2594 size = arg_data->size;
2595 /* Bit-mask fill is not supported. */
2596 if (arg_data->mask || size < len)
2600 /* Let parse_int() fill length information first. */
2601 ret = snprintf(tmp, sizeof(tmp), "%u", len);
2604 push_args(ctx, arg_len);
2605 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
2610 buf = (uint8_t *)ctx->object + arg_data->offset;
2611 /* Output buffer is not necessarily NUL-terminated. */
2612 memcpy(buf, str, len);
2613 memset((uint8_t *)buf + len, 0x00, size - len);
2615 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
2616 /* Save address if requested. */
2617 if (arg_addr->size) {
2618 memcpy((uint8_t *)ctx->object + arg_addr->offset,
2620 (uint8_t *)ctx->object + arg_data->offset
2624 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
2626 (uint8_t *)ctx->objmask + arg_data->offset
2632 push_args(ctx, arg_addr);
2633 push_args(ctx, arg_len);
2634 push_args(ctx, arg_data);
2639 * Parse a MAC address.
2641 * Last argument (ctx->args) is retrieved to determine storage size and
2645 parse_mac_addr(struct context *ctx, const struct token *token,
2646 const char *str, unsigned int len,
2647 void *buf, unsigned int size)
2649 const struct arg *arg = pop_args(ctx);
2650 struct ether_addr tmp;
2654 /* Argument is expected. */
2658 /* Bit-mask fill is not supported. */
2659 if (arg->mask || size != sizeof(tmp))
2661 /* Only network endian is supported. */
2664 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
2665 if (ret < 0 || (unsigned int)ret != len)
2669 buf = (uint8_t *)ctx->object + arg->offset;
2670 memcpy(buf, &tmp, size);
2672 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2675 push_args(ctx, arg);
2680 * Parse an IPv4 address.
2682 * Last argument (ctx->args) is retrieved to determine storage size and
2686 parse_ipv4_addr(struct context *ctx, const struct token *token,
2687 const char *str, unsigned int len,
2688 void *buf, unsigned int size)
2690 const struct arg *arg = pop_args(ctx);
2695 /* Argument is expected. */
2699 /* Bit-mask fill is not supported. */
2700 if (arg->mask || size != sizeof(tmp))
2702 /* Only network endian is supported. */
2705 memcpy(str2, str, len);
2707 ret = inet_pton(AF_INET, str2, &tmp);
2709 /* Attempt integer parsing. */
2710 push_args(ctx, arg);
2711 return parse_int(ctx, token, str, len, buf, size);
2715 buf = (uint8_t *)ctx->object + arg->offset;
2716 memcpy(buf, &tmp, size);
2718 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2721 push_args(ctx, arg);
2726 * Parse an IPv6 address.
2728 * Last argument (ctx->args) is retrieved to determine storage size and
2732 parse_ipv6_addr(struct context *ctx, const struct token *token,
2733 const char *str, unsigned int len,
2734 void *buf, unsigned int size)
2736 const struct arg *arg = pop_args(ctx);
2738 struct in6_addr tmp;
2742 /* Argument is expected. */
2746 /* Bit-mask fill is not supported. */
2747 if (arg->mask || size != sizeof(tmp))
2749 /* Only network endian is supported. */
2752 memcpy(str2, str, len);
2754 ret = inet_pton(AF_INET6, str2, &tmp);
2759 buf = (uint8_t *)ctx->object + arg->offset;
2760 memcpy(buf, &tmp, size);
2762 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2765 push_args(ctx, arg);
2769 /** Boolean values (even indices stand for false). */
2770 static const char *const boolean_name[] = {
2780 * Parse a boolean value.
2782 * Last argument (ctx->args) is retrieved to determine storage size and
2786 parse_boolean(struct context *ctx, const struct token *token,
2787 const char *str, unsigned int len,
2788 void *buf, unsigned int size)
2790 const struct arg *arg = pop_args(ctx);
2794 /* Argument is expected. */
2797 for (i = 0; boolean_name[i]; ++i)
2798 if (!strcmp_partial(boolean_name[i], str, len))
2800 /* Process token as integer. */
2801 if (boolean_name[i])
2802 str = i & 1 ? "1" : "0";
2803 push_args(ctx, arg);
2804 ret = parse_int(ctx, token, str, strlen(str), buf, size);
2805 return ret > 0 ? (int)len : ret;
2808 /** Parse port and update context. */
2810 parse_port(struct context *ctx, const struct token *token,
2811 const char *str, unsigned int len,
2812 void *buf, unsigned int size)
2814 struct buffer *out = &(struct buffer){ .port = 0 };
2822 ctx->objmask = NULL;
2823 size = sizeof(*out);
2825 ret = parse_int(ctx, token, str, len, out, size);
2827 ctx->port = out->port;
2833 /** No completion. */
2835 comp_none(struct context *ctx, const struct token *token,
2836 unsigned int ent, char *buf, unsigned int size)
2846 /** Complete boolean values. */
2848 comp_boolean(struct context *ctx, const struct token *token,
2849 unsigned int ent, char *buf, unsigned int size)
2855 for (i = 0; boolean_name[i]; ++i)
2856 if (buf && i == ent)
2857 return snprintf(buf, size, "%s", boolean_name[i]);
2863 /** Complete action names. */
2865 comp_action(struct context *ctx, const struct token *token,
2866 unsigned int ent, char *buf, unsigned int size)
2872 for (i = 0; next_action[i]; ++i)
2873 if (buf && i == ent)
2874 return snprintf(buf, size, "%s",
2875 token_list[next_action[i]].name);
2881 /** Complete available ports. */
2883 comp_port(struct context *ctx, const struct token *token,
2884 unsigned int ent, char *buf, unsigned int size)
2891 RTE_ETH_FOREACH_DEV(p) {
2892 if (buf && i == ent)
2893 return snprintf(buf, size, "%u", p);
2901 /** Complete available rule IDs. */
2903 comp_rule_id(struct context *ctx, const struct token *token,
2904 unsigned int ent, char *buf, unsigned int size)
2907 struct rte_port *port;
2908 struct port_flow *pf;
2911 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
2912 ctx->port == (portid_t)RTE_PORT_ALL)
2914 port = &ports[ctx->port];
2915 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
2916 if (buf && i == ent)
2917 return snprintf(buf, size, "%u", pf->id);
2925 /** Complete type field for RSS action. */
2927 comp_vc_action_rss_type(struct context *ctx, const struct token *token,
2928 unsigned int ent, char *buf, unsigned int size)
2934 for (i = 0; rss_type_table[i].str; ++i)
2939 return snprintf(buf, size, "%s", rss_type_table[ent].str);
2941 return snprintf(buf, size, "end");
2945 /** Complete queue field for RSS action. */
2947 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
2948 unsigned int ent, char *buf, unsigned int size)
2955 return snprintf(buf, size, "%u", ent);
2957 return snprintf(buf, size, "end");
2961 /** Internal context. */
2962 static struct context cmd_flow_context;
2964 /** Global parser instance (cmdline API). */
2965 cmdline_parse_inst_t cmd_flow;
2967 /** Initialize context. */
2969 cmd_flow_context_init(struct context *ctx)
2971 /* A full memset() is not necessary. */
2981 ctx->objmask = NULL;
2984 /** Parse a token (cmdline API). */
2986 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
2989 struct context *ctx = &cmd_flow_context;
2990 const struct token *token;
2991 const enum index *list;
2996 token = &token_list[ctx->curr];
2997 /* Check argument length. */
3000 for (len = 0; src[len]; ++len)
3001 if (src[len] == '#' || isspace(src[len]))
3005 /* Last argument and EOL detection. */
3006 for (i = len; src[i]; ++i)
3007 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
3009 else if (!isspace(src[i])) {
3014 if (src[i] == '\r' || src[i] == '\n') {
3018 /* Initialize context if necessary. */
3019 if (!ctx->next_num) {
3022 ctx->next[ctx->next_num++] = token->next[0];
3024 /* Process argument through candidates. */
3025 ctx->prev = ctx->curr;
3026 list = ctx->next[ctx->next_num - 1];
3027 for (i = 0; list[i]; ++i) {
3028 const struct token *next = &token_list[list[i]];
3031 ctx->curr = list[i];
3033 tmp = next->call(ctx, next, src, len, result, size);
3035 tmp = parse_default(ctx, next, src, len, result, size);
3036 if (tmp == -1 || tmp != len)
3044 /* Push subsequent tokens if any. */
3046 for (i = 0; token->next[i]; ++i) {
3047 if (ctx->next_num == RTE_DIM(ctx->next))
3049 ctx->next[ctx->next_num++] = token->next[i];
3051 /* Push arguments if any. */
3053 for (i = 0; token->args[i]; ++i) {
3054 if (ctx->args_num == RTE_DIM(ctx->args))
3056 ctx->args[ctx->args_num++] = token->args[i];
3061 /** Return number of completion entries (cmdline API). */
3063 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
3065 struct context *ctx = &cmd_flow_context;
3066 const struct token *token = &token_list[ctx->curr];
3067 const enum index *list;
3071 /* Count number of tokens in current list. */
3073 list = ctx->next[ctx->next_num - 1];
3075 list = token->next[0];
3076 for (i = 0; list[i]; ++i)
3081 * If there is a single token, use its completion callback, otherwise
3082 * return the number of entries.
3084 token = &token_list[list[0]];
3085 if (i == 1 && token->comp) {
3086 /* Save index for cmd_flow_get_help(). */
3087 ctx->prev = list[0];
3088 return token->comp(ctx, token, 0, NULL, 0);
3093 /** Return a completion entry (cmdline API). */
3095 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
3096 char *dst, unsigned int size)
3098 struct context *ctx = &cmd_flow_context;
3099 const struct token *token = &token_list[ctx->curr];
3100 const enum index *list;
3104 /* Count number of tokens in current list. */
3106 list = ctx->next[ctx->next_num - 1];
3108 list = token->next[0];
3109 for (i = 0; list[i]; ++i)
3113 /* If there is a single token, use its completion callback. */
3114 token = &token_list[list[0]];
3115 if (i == 1 && token->comp) {
3116 /* Save index for cmd_flow_get_help(). */
3117 ctx->prev = list[0];
3118 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
3120 /* Otherwise make sure the index is valid and use defaults. */
3123 token = &token_list[list[index]];
3124 snprintf(dst, size, "%s", token->name);
3125 /* Save index for cmd_flow_get_help(). */
3126 ctx->prev = list[index];
3130 /** Populate help strings for current token (cmdline API). */
3132 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
3134 struct context *ctx = &cmd_flow_context;
3135 const struct token *token = &token_list[ctx->prev];
3140 /* Set token type and update global help with details. */
3141 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
3143 cmd_flow.help_str = token->help;
3145 cmd_flow.help_str = token->name;
3149 /** Token definition template (cmdline API). */
3150 static struct cmdline_token_hdr cmd_flow_token_hdr = {
3151 .ops = &(struct cmdline_token_ops){
3152 .parse = cmd_flow_parse,
3153 .complete_get_nb = cmd_flow_complete_get_nb,
3154 .complete_get_elt = cmd_flow_complete_get_elt,
3155 .get_help = cmd_flow_get_help,
3160 /** Populate the next dynamic token. */
3162 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
3163 cmdline_parse_token_hdr_t **hdr_inst)
3165 struct context *ctx = &cmd_flow_context;
3167 /* Always reinitialize context before requesting the first token. */
3168 if (!(hdr_inst - cmd_flow.tokens))
3169 cmd_flow_context_init(ctx);
3170 /* Return NULL when no more tokens are expected. */
3171 if (!ctx->next_num && ctx->curr) {
3175 /* Determine if command should end here. */
3176 if (ctx->eol && ctx->last && ctx->next_num) {
3177 const enum index *list = ctx->next[ctx->next_num - 1];
3180 for (i = 0; list[i]; ++i) {
3187 *hdr = &cmd_flow_token_hdr;
3190 /** Dispatch parsed buffer to function calls. */
3192 cmd_flow_parsed(const struct buffer *in)
3194 switch (in->command) {
3196 port_flow_validate(in->port, &in->args.vc.attr,
3197 in->args.vc.pattern, in->args.vc.actions);
3200 port_flow_create(in->port, &in->args.vc.attr,
3201 in->args.vc.pattern, in->args.vc.actions);
3204 port_flow_destroy(in->port, in->args.destroy.rule_n,
3205 in->args.destroy.rule);
3208 port_flow_flush(in->port);
3211 port_flow_query(in->port, in->args.query.rule,
3212 in->args.query.action);
3215 port_flow_list(in->port, in->args.list.group_n,
3216 in->args.list.group);
3219 port_flow_isolate(in->port, in->args.isolate.set);
3226 /** Token generator and output processing callback (cmdline API). */
3228 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
3231 cmd_flow_tok(arg0, arg2);
3233 cmd_flow_parsed(arg0);
3236 /** Global parser instance (cmdline API). */
3237 cmdline_parse_inst_t cmd_flow = {
3239 .data = NULL, /**< Unused. */
3240 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
3243 }, /**< Tokens are returned by cmd_flow_tok(). */