1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
13 #include <arpa/inet.h>
14 #include <sys/socket.h>
16 #include <rte_common.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_ethdev.h>
19 #include <rte_byteorder.h>
20 #include <cmdline_parse.h>
21 #include <cmdline_parse_etheraddr.h>
26 /** Parser token indices. */
46 /* Top-level command. */
49 /* Sub-level commands. */
58 /* Destroy arguments. */
61 /* Query arguments. */
67 /* Validate/create arguments. */
73 /* Validate/create pattern. */
138 ITEM_E_TAG_GRP_ECID_B,
155 /* Validate/create actions. */
170 ACTION_RSS_FUNC_DEFAULT,
171 ACTION_RSS_FUNC_TOEPLITZ,
172 ACTION_RSS_FUNC_SIMPLE_XOR,
187 /** Maximum size for pattern in struct rte_flow_item_raw. */
188 #define ITEM_RAW_PATTERN_SIZE 40
190 /** Storage size for struct rte_flow_item_raw including pattern. */
191 #define ITEM_RAW_SIZE \
192 (sizeof(struct rte_flow_item_raw) + ITEM_RAW_PATTERN_SIZE)
194 /** Maximum number of queue indices in struct rte_flow_action_rss. */
195 #define ACTION_RSS_QUEUE_NUM 32
197 /** Storage for struct rte_flow_action_rss including external data. */
198 struct action_rss_data {
199 struct rte_flow_action_rss conf;
200 uint8_t key[RSS_HASH_KEY_LENGTH];
201 uint16_t queue[ACTION_RSS_QUEUE_NUM];
204 /** Maximum number of subsequent tokens and arguments on the stack. */
205 #define CTX_STACK_SIZE 16
207 /** Parser context. */
209 /** Stack of subsequent token lists to process. */
210 const enum index *next[CTX_STACK_SIZE];
211 /** Arguments for stacked tokens. */
212 const void *args[CTX_STACK_SIZE];
213 enum index curr; /**< Current token index. */
214 enum index prev; /**< Index of the last token seen. */
215 int next_num; /**< Number of entries in next[]. */
216 int args_num; /**< Number of entries in args[]. */
217 uint32_t eol:1; /**< EOL has been detected. */
218 uint32_t last:1; /**< No more arguments. */
219 portid_t port; /**< Current port ID (for completions). */
220 uint32_t objdata; /**< Object-specific data. */
221 void *object; /**< Address of current object for relative offsets. */
222 void *objmask; /**< Object a full mask must be written to. */
225 /** Token argument. */
227 uint32_t hton:1; /**< Use network byte ordering. */
228 uint32_t sign:1; /**< Value is signed. */
229 uint32_t bounded:1; /**< Value is bounded. */
230 uintmax_t min; /**< Minimum value if bounded. */
231 uintmax_t max; /**< Maximum value if bounded. */
232 uint32_t offset; /**< Relative offset from ctx->object. */
233 uint32_t size; /**< Field size. */
234 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
237 /** Parser token definition. */
239 /** Type displayed during completion (defaults to "TOKEN"). */
241 /** Help displayed during completion (defaults to token name). */
243 /** Private data used by parser functions. */
246 * Lists of subsequent tokens to push on the stack. Each call to the
247 * parser consumes the last entry of that stack.
249 const enum index *const *next;
250 /** Arguments stack for subsequent tokens that need them. */
251 const struct arg *const *args;
253 * Token-processing callback, returns -1 in case of error, the
254 * length of the matched string otherwise. If NULL, attempts to
255 * match the token name.
257 * If buf is not NULL, the result should be stored in it according
258 * to context. An error is returned if not large enough.
260 int (*call)(struct context *ctx, const struct token *token,
261 const char *str, unsigned int len,
262 void *buf, unsigned int size);
264 * Callback that provides possible values for this token, used for
265 * completion. Returns -1 in case of error, the number of possible
266 * values otherwise. If NULL, the token name is used.
268 * If buf is not NULL, entry index ent is written to buf and the
269 * full length of the entry is returned (same behavior as
272 int (*comp)(struct context *ctx, const struct token *token,
273 unsigned int ent, char *buf, unsigned int size);
274 /** Mandatory token name, no default value. */
278 /** Static initializer for the next field. */
279 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
281 /** Static initializer for a NEXT() entry. */
282 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
284 /** Static initializer for the args field. */
285 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
287 /** Static initializer for ARGS() to target a field. */
288 #define ARGS_ENTRY(s, f) \
289 (&(const struct arg){ \
290 .offset = offsetof(s, f), \
291 .size = sizeof(((s *)0)->f), \
294 /** Static initializer for ARGS() to target a bit-field. */
295 #define ARGS_ENTRY_BF(s, f, b) \
296 (&(const struct arg){ \
298 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
301 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
302 #define ARGS_ENTRY_MASK(s, f, m) \
303 (&(const struct arg){ \
304 .offset = offsetof(s, f), \
305 .size = sizeof(((s *)0)->f), \
306 .mask = (const void *)(m), \
309 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
310 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
311 (&(const struct arg){ \
313 .offset = offsetof(s, f), \
314 .size = sizeof(((s *)0)->f), \
315 .mask = (const void *)(m), \
318 /** Static initializer for ARGS() to target a pointer. */
319 #define ARGS_ENTRY_PTR(s, f) \
320 (&(const struct arg){ \
321 .size = sizeof(*((s *)0)->f), \
324 /** Static initializer for ARGS() with arbitrary offset and size. */
325 #define ARGS_ENTRY_ARB(o, s) \
326 (&(const struct arg){ \
331 /** Same as ARGS_ENTRY_ARB() with bounded values. */
332 #define ARGS_ENTRY_ARB_BOUNDED(o, s, i, a) \
333 (&(const struct arg){ \
341 /** Same as ARGS_ENTRY() using network byte ordering. */
342 #define ARGS_ENTRY_HTON(s, f) \
343 (&(const struct arg){ \
345 .offset = offsetof(s, f), \
346 .size = sizeof(((s *)0)->f), \
349 /** Parser output buffer layout expected by cmd_flow_parsed(). */
351 enum index command; /**< Flow command. */
352 portid_t port; /**< Affected port ID. */
355 struct rte_flow_attr attr;
356 struct rte_flow_item *pattern;
357 struct rte_flow_action *actions;
361 } vc; /**< Validate/create arguments. */
365 } destroy; /**< Destroy arguments. */
368 enum rte_flow_action_type action;
369 } query; /**< Query arguments. */
373 } list; /**< List arguments. */
376 } isolate; /**< Isolated mode arguments. */
377 } args; /**< Command arguments. */
380 /** Private data for pattern items. */
381 struct parse_item_priv {
382 enum rte_flow_item_type type; /**< Item type. */
383 uint32_t size; /**< Size of item specification structure. */
386 #define PRIV_ITEM(t, s) \
387 (&(const struct parse_item_priv){ \
388 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
392 /** Private data for actions. */
393 struct parse_action_priv {
394 enum rte_flow_action_type type; /**< Action type. */
395 uint32_t size; /**< Size of action configuration structure. */
398 #define PRIV_ACTION(t, s) \
399 (&(const struct parse_action_priv){ \
400 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
404 static const enum index next_vc_attr[] = {
413 static const enum index next_destroy_attr[] = {
419 static const enum index next_list_attr[] = {
425 static const enum index item_param[] = {
434 static const enum index next_item[] = {
464 static const enum index item_fuzzy[] = {
470 static const enum index item_any[] = {
476 static const enum index item_vf[] = {
482 static const enum index item_port[] = {
488 static const enum index item_raw[] = {
498 static const enum index item_eth[] = {
506 static const enum index item_vlan[] = {
516 static const enum index item_ipv4[] = {
526 static const enum index item_ipv6[] = {
537 static const enum index item_icmp[] = {
544 static const enum index item_udp[] = {
551 static const enum index item_tcp[] = {
559 static const enum index item_sctp[] = {
568 static const enum index item_vxlan[] = {
574 static const enum index item_e_tag[] = {
575 ITEM_E_TAG_GRP_ECID_B,
580 static const enum index item_nvgre[] = {
586 static const enum index item_mpls[] = {
592 static const enum index item_gre[] = {
598 static const enum index item_gtp[] = {
604 static const enum index item_geneve[] = {
611 static const enum index next_action[] = {
627 static const enum index action_mark[] = {
633 static const enum index action_queue[] = {
639 static const enum index action_rss[] = {
649 static const enum index action_vf[] = {
656 static const enum index action_meter[] = {
662 static int parse_init(struct context *, const struct token *,
663 const char *, unsigned int,
664 void *, unsigned int);
665 static int parse_vc(struct context *, const struct token *,
666 const char *, unsigned int,
667 void *, unsigned int);
668 static int parse_vc_spec(struct context *, const struct token *,
669 const char *, unsigned int, void *, unsigned int);
670 static int parse_vc_conf(struct context *, const struct token *,
671 const char *, unsigned int, void *, unsigned int);
672 static int parse_vc_action_rss(struct context *, const struct token *,
673 const char *, unsigned int, void *,
675 static int parse_vc_action_rss_func(struct context *, const struct token *,
676 const char *, unsigned int, void *,
678 static int parse_vc_action_rss_type(struct context *, const struct token *,
679 const char *, unsigned int, void *,
681 static int parse_vc_action_rss_queue(struct context *, const struct token *,
682 const char *, unsigned int, void *,
684 static int parse_destroy(struct context *, const struct token *,
685 const char *, unsigned int,
686 void *, unsigned int);
687 static int parse_flush(struct context *, const struct token *,
688 const char *, unsigned int,
689 void *, unsigned int);
690 static int parse_query(struct context *, const struct token *,
691 const char *, unsigned int,
692 void *, unsigned int);
693 static int parse_action(struct context *, const struct token *,
694 const char *, unsigned int,
695 void *, unsigned int);
696 static int parse_list(struct context *, const struct token *,
697 const char *, unsigned int,
698 void *, unsigned int);
699 static int parse_isolate(struct context *, const struct token *,
700 const char *, unsigned int,
701 void *, unsigned int);
702 static int parse_int(struct context *, const struct token *,
703 const char *, unsigned int,
704 void *, unsigned int);
705 static int parse_prefix(struct context *, const struct token *,
706 const char *, unsigned int,
707 void *, unsigned int);
708 static int parse_boolean(struct context *, const struct token *,
709 const char *, unsigned int,
710 void *, unsigned int);
711 static int parse_string(struct context *, const struct token *,
712 const char *, unsigned int,
713 void *, unsigned int);
714 static int parse_mac_addr(struct context *, const struct token *,
715 const char *, unsigned int,
716 void *, unsigned int);
717 static int parse_ipv4_addr(struct context *, const struct token *,
718 const char *, unsigned int,
719 void *, unsigned int);
720 static int parse_ipv6_addr(struct context *, const struct token *,
721 const char *, unsigned int,
722 void *, unsigned int);
723 static int parse_port(struct context *, const struct token *,
724 const char *, unsigned int,
725 void *, unsigned int);
726 static int comp_none(struct context *, const struct token *,
727 unsigned int, char *, unsigned int);
728 static int comp_boolean(struct context *, const struct token *,
729 unsigned int, char *, unsigned int);
730 static int comp_action(struct context *, const struct token *,
731 unsigned int, char *, unsigned int);
732 static int comp_port(struct context *, const struct token *,
733 unsigned int, char *, unsigned int);
734 static int comp_rule_id(struct context *, const struct token *,
735 unsigned int, char *, unsigned int);
736 static int comp_vc_action_rss_type(struct context *, const struct token *,
737 unsigned int, char *, unsigned int);
738 static int comp_vc_action_rss_queue(struct context *, const struct token *,
739 unsigned int, char *, unsigned int);
741 /** Token definitions. */
742 static const struct token token_list[] = {
743 /* Special tokens. */
746 .help = "null entry, abused as the entry point",
747 .next = NEXT(NEXT_ENTRY(FLOW)),
752 .help = "command may end here",
758 .help = "integer value",
763 .name = "{unsigned}",
765 .help = "unsigned integer value",
772 .help = "prefix length for bit-mask",
773 .call = parse_prefix,
779 .help = "any boolean value",
780 .call = parse_boolean,
781 .comp = comp_boolean,
786 .help = "fixed string",
787 .call = parse_string,
791 .name = "{MAC address}",
793 .help = "standard MAC address notation",
794 .call = parse_mac_addr,
798 .name = "{IPv4 address}",
799 .type = "IPV4 ADDRESS",
800 .help = "standard IPv4 address notation",
801 .call = parse_ipv4_addr,
805 .name = "{IPv6 address}",
806 .type = "IPV6 ADDRESS",
807 .help = "standard IPv6 address notation",
808 .call = parse_ipv6_addr,
814 .help = "rule identifier",
816 .comp = comp_rule_id,
821 .help = "port identifier",
826 .name = "{group_id}",
828 .help = "group identifier",
835 .help = "priority level",
839 /* Top-level command. */
842 .type = "{command} {port_id} [{arg} [...]]",
843 .help = "manage ingress/egress flow rules",
844 .next = NEXT(NEXT_ENTRY
854 /* Sub-level commands. */
857 .help = "check whether a flow rule can be created",
858 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
859 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
864 .help = "create a flow rule",
865 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
866 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
871 .help = "destroy specific flow rules",
872 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
873 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
874 .call = parse_destroy,
878 .help = "destroy all flow rules",
879 .next = NEXT(NEXT_ENTRY(PORT_ID)),
880 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
885 .help = "query an existing flow rule",
886 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
888 NEXT_ENTRY(PORT_ID)),
889 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action),
890 ARGS_ENTRY(struct buffer, args.query.rule),
891 ARGS_ENTRY(struct buffer, port)),
896 .help = "list existing flow rules",
897 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
898 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
903 .help = "restrict ingress traffic to the defined flow rules",
904 .next = NEXT(NEXT_ENTRY(BOOLEAN),
905 NEXT_ENTRY(PORT_ID)),
906 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
907 ARGS_ENTRY(struct buffer, port)),
908 .call = parse_isolate,
910 /* Destroy arguments. */
913 .help = "specify a rule identifier",
914 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
915 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
916 .call = parse_destroy,
918 /* Query arguments. */
922 .help = "action to query, must be part of the rule",
923 .call = parse_action,
926 /* List arguments. */
929 .help = "specify a group",
930 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
931 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
934 /* Validate/create attributes. */
937 .help = "specify a group",
938 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
939 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
944 .help = "specify a priority level",
945 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
946 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
951 .help = "affect rule to ingress",
952 .next = NEXT(next_vc_attr),
957 .help = "affect rule to egress",
958 .next = NEXT(next_vc_attr),
961 /* Validate/create pattern. */
964 .help = "submit a list of pattern items",
965 .next = NEXT(next_item),
970 .help = "match value perfectly (with full bit-mask)",
971 .call = parse_vc_spec,
973 [ITEM_PARAM_SPEC] = {
975 .help = "match value according to configured bit-mask",
976 .call = parse_vc_spec,
978 [ITEM_PARAM_LAST] = {
980 .help = "specify upper bound to establish a range",
981 .call = parse_vc_spec,
983 [ITEM_PARAM_MASK] = {
985 .help = "specify bit-mask with relevant bits set to one",
986 .call = parse_vc_spec,
988 [ITEM_PARAM_PREFIX] = {
990 .help = "generate bit-mask from a prefix length",
991 .call = parse_vc_spec,
995 .help = "specify next pattern item",
996 .next = NEXT(next_item),
1000 .help = "end list of pattern items",
1001 .priv = PRIV_ITEM(END, 0),
1002 .next = NEXT(NEXT_ENTRY(ACTIONS)),
1007 .help = "no-op pattern item",
1008 .priv = PRIV_ITEM(VOID, 0),
1009 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1014 .help = "perform actions when pattern does not match",
1015 .priv = PRIV_ITEM(INVERT, 0),
1016 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1021 .help = "match any protocol for the current layer",
1022 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
1023 .next = NEXT(item_any),
1028 .help = "number of layers covered",
1029 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
1030 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
1034 .help = "match packets addressed to the physical function",
1035 .priv = PRIV_ITEM(PF, 0),
1036 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1041 .help = "match packets addressed to a virtual function ID",
1042 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
1043 .next = NEXT(item_vf),
1048 .help = "destination VF ID",
1049 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
1050 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
1054 .help = "device-specific physical port index to use",
1055 .priv = PRIV_ITEM(PORT, sizeof(struct rte_flow_item_port)),
1056 .next = NEXT(item_port),
1059 [ITEM_PORT_INDEX] = {
1061 .help = "physical port index",
1062 .next = NEXT(item_port, NEXT_ENTRY(UNSIGNED), item_param),
1063 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port, index)),
1067 .help = "match an arbitrary byte string",
1068 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1069 .next = NEXT(item_raw),
1072 [ITEM_RAW_RELATIVE] = {
1074 .help = "look for pattern after the previous item",
1075 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1076 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1079 [ITEM_RAW_SEARCH] = {
1081 .help = "search pattern from offset (see also limit)",
1082 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1083 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1086 [ITEM_RAW_OFFSET] = {
1088 .help = "absolute or relative offset for pattern",
1089 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1090 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1092 [ITEM_RAW_LIMIT] = {
1094 .help = "search area limit for start of pattern",
1095 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1096 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1098 [ITEM_RAW_PATTERN] = {
1100 .help = "byte string to look for",
1101 .next = NEXT(item_raw,
1103 NEXT_ENTRY(ITEM_PARAM_IS,
1106 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, pattern),
1107 ARGS_ENTRY(struct rte_flow_item_raw, length),
1108 ARGS_ENTRY_ARB(sizeof(struct rte_flow_item_raw),
1109 ITEM_RAW_PATTERN_SIZE)),
1113 .help = "match Ethernet header",
1114 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1115 .next = NEXT(item_eth),
1120 .help = "destination MAC",
1121 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1122 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1126 .help = "source MAC",
1127 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1128 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1132 .help = "EtherType",
1133 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1134 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1138 .help = "match 802.1Q/ad VLAN tag",
1139 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1140 .next = NEXT(item_vlan),
1143 [ITEM_VLAN_TPID] = {
1145 .help = "tag protocol identifier",
1146 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1147 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tpid)),
1151 .help = "tag control information",
1152 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1153 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1157 .help = "priority code point",
1158 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1159 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1164 .help = "drop eligible indicator",
1165 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1166 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1171 .help = "VLAN identifier",
1172 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1173 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1178 .help = "match IPv4 header",
1179 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1180 .next = NEXT(item_ipv4),
1185 .help = "type of service",
1186 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1187 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1188 hdr.type_of_service)),
1192 .help = "time to live",
1193 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1194 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1197 [ITEM_IPV4_PROTO] = {
1199 .help = "next protocol ID",
1200 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1201 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1202 hdr.next_proto_id)),
1206 .help = "source address",
1207 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1208 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1213 .help = "destination address",
1214 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1215 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1220 .help = "match IPv6 header",
1221 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1222 .next = NEXT(item_ipv6),
1227 .help = "traffic class",
1228 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1229 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1231 "\x0f\xf0\x00\x00")),
1233 [ITEM_IPV6_FLOW] = {
1235 .help = "flow label",
1236 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1237 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1239 "\x00\x0f\xff\xff")),
1241 [ITEM_IPV6_PROTO] = {
1243 .help = "protocol (next header)",
1244 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1245 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1250 .help = "hop limit",
1251 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1252 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1257 .help = "source address",
1258 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1259 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1264 .help = "destination address",
1265 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1266 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1271 .help = "match ICMP header",
1272 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1273 .next = NEXT(item_icmp),
1276 [ITEM_ICMP_TYPE] = {
1278 .help = "ICMP packet type",
1279 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1280 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1283 [ITEM_ICMP_CODE] = {
1285 .help = "ICMP packet code",
1286 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1287 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1292 .help = "match UDP header",
1293 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1294 .next = NEXT(item_udp),
1299 .help = "UDP source port",
1300 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1301 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1306 .help = "UDP destination port",
1307 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1308 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1313 .help = "match TCP header",
1314 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1315 .next = NEXT(item_tcp),
1320 .help = "TCP source port",
1321 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1322 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1327 .help = "TCP destination port",
1328 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1329 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1332 [ITEM_TCP_FLAGS] = {
1334 .help = "TCP flags",
1335 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1336 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1341 .help = "match SCTP header",
1342 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1343 .next = NEXT(item_sctp),
1348 .help = "SCTP source port",
1349 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1350 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1355 .help = "SCTP destination port",
1356 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1357 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1362 .help = "validation tag",
1363 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1364 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1367 [ITEM_SCTP_CKSUM] = {
1370 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1371 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1376 .help = "match VXLAN header",
1377 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1378 .next = NEXT(item_vxlan),
1381 [ITEM_VXLAN_VNI] = {
1383 .help = "VXLAN identifier",
1384 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1385 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
1389 .help = "match E-Tag header",
1390 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
1391 .next = NEXT(item_e_tag),
1394 [ITEM_E_TAG_GRP_ECID_B] = {
1395 .name = "grp_ecid_b",
1396 .help = "GRP and E-CID base",
1397 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
1398 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
1404 .help = "match NVGRE header",
1405 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
1406 .next = NEXT(item_nvgre),
1409 [ITEM_NVGRE_TNI] = {
1411 .help = "virtual subnet ID",
1412 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
1413 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
1417 .help = "match MPLS header",
1418 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
1419 .next = NEXT(item_mpls),
1422 [ITEM_MPLS_LABEL] = {
1424 .help = "MPLS label",
1425 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
1426 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
1432 .help = "match GRE header",
1433 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
1434 .next = NEXT(item_gre),
1437 [ITEM_GRE_PROTO] = {
1439 .help = "GRE protocol type",
1440 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
1441 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
1446 .help = "fuzzy pattern match, expect faster than default",
1447 .priv = PRIV_ITEM(FUZZY,
1448 sizeof(struct rte_flow_item_fuzzy)),
1449 .next = NEXT(item_fuzzy),
1452 [ITEM_FUZZY_THRESH] = {
1454 .help = "match accuracy threshold",
1455 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
1456 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
1461 .help = "match GTP header",
1462 .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
1463 .next = NEXT(item_gtp),
1468 .help = "tunnel endpoint identifier",
1469 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
1470 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
1474 .help = "match GTP header",
1475 .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
1476 .next = NEXT(item_gtp),
1481 .help = "match GTP header",
1482 .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
1483 .next = NEXT(item_gtp),
1488 .help = "match GENEVE header",
1489 .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
1490 .next = NEXT(item_geneve),
1493 [ITEM_GENEVE_VNI] = {
1495 .help = "virtual network identifier",
1496 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1497 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)),
1499 [ITEM_GENEVE_PROTO] = {
1501 .help = "GENEVE protocol type",
1502 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1503 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve,
1507 /* Validate/create actions. */
1510 .help = "submit a list of associated actions",
1511 .next = NEXT(next_action),
1516 .help = "specify next action",
1517 .next = NEXT(next_action),
1521 .help = "end list of actions",
1522 .priv = PRIV_ACTION(END, 0),
1527 .help = "no-op action",
1528 .priv = PRIV_ACTION(VOID, 0),
1529 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1532 [ACTION_PASSTHRU] = {
1534 .help = "let subsequent rule process matched packets",
1535 .priv = PRIV_ACTION(PASSTHRU, 0),
1536 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1541 .help = "attach 32 bit value to packets",
1542 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
1543 .next = NEXT(action_mark),
1546 [ACTION_MARK_ID] = {
1548 .help = "32 bit value to return with packets",
1549 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
1550 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
1551 .call = parse_vc_conf,
1555 .help = "flag packets",
1556 .priv = PRIV_ACTION(FLAG, 0),
1557 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1562 .help = "assign packets to a given queue index",
1563 .priv = PRIV_ACTION(QUEUE,
1564 sizeof(struct rte_flow_action_queue)),
1565 .next = NEXT(action_queue),
1568 [ACTION_QUEUE_INDEX] = {
1570 .help = "queue index to use",
1571 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
1572 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
1573 .call = parse_vc_conf,
1577 .help = "drop packets (note: passthru has priority)",
1578 .priv = PRIV_ACTION(DROP, 0),
1579 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1584 .help = "enable counters for this rule",
1585 .priv = PRIV_ACTION(COUNT, 0),
1586 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1591 .help = "spread packets among several queues",
1592 .priv = PRIV_ACTION(RSS, sizeof(struct action_rss_data)),
1593 .next = NEXT(action_rss),
1594 .call = parse_vc_action_rss,
1596 [ACTION_RSS_FUNC] = {
1598 .help = "RSS hash function to apply",
1599 .next = NEXT(action_rss,
1600 NEXT_ENTRY(ACTION_RSS_FUNC_DEFAULT,
1601 ACTION_RSS_FUNC_TOEPLITZ,
1602 ACTION_RSS_FUNC_SIMPLE_XOR)),
1604 [ACTION_RSS_FUNC_DEFAULT] = {
1606 .help = "default hash function",
1607 .call = parse_vc_action_rss_func,
1609 [ACTION_RSS_FUNC_TOEPLITZ] = {
1611 .help = "Toeplitz hash function",
1612 .call = parse_vc_action_rss_func,
1614 [ACTION_RSS_FUNC_SIMPLE_XOR] = {
1615 .name = "simple_xor",
1616 .help = "simple XOR hash function",
1617 .call = parse_vc_action_rss_func,
1619 [ACTION_RSS_TYPES] = {
1621 .help = "specific RSS hash types",
1622 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)),
1624 [ACTION_RSS_TYPE] = {
1626 .help = "RSS hash type",
1627 .call = parse_vc_action_rss_type,
1628 .comp = comp_vc_action_rss_type,
1630 [ACTION_RSS_KEY] = {
1632 .help = "RSS hash key",
1633 .next = NEXT(action_rss, NEXT_ENTRY(STRING)),
1634 .args = ARGS(ARGS_ENTRY_ARB(0, 0),
1636 (offsetof(struct action_rss_data, conf) +
1637 offsetof(struct rte_flow_action_rss, key_len),
1638 sizeof(((struct rte_flow_action_rss *)0)->
1640 ARGS_ENTRY(struct action_rss_data, key)),
1642 [ACTION_RSS_KEY_LEN] = {
1644 .help = "RSS hash key length in bytes",
1645 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
1646 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
1647 (offsetof(struct action_rss_data, conf) +
1648 offsetof(struct rte_flow_action_rss, key_len),
1649 sizeof(((struct rte_flow_action_rss *)0)->
1652 RSS_HASH_KEY_LENGTH)),
1654 [ACTION_RSS_QUEUES] = {
1656 .help = "queue indices to use",
1657 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
1658 .call = parse_vc_conf,
1660 [ACTION_RSS_QUEUE] = {
1662 .help = "queue index",
1663 .call = parse_vc_action_rss_queue,
1664 .comp = comp_vc_action_rss_queue,
1668 .help = "redirect packets to physical device function",
1669 .priv = PRIV_ACTION(PF, 0),
1670 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1675 .help = "redirect packets to virtual device function",
1676 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
1677 .next = NEXT(action_vf),
1680 [ACTION_VF_ORIGINAL] = {
1682 .help = "use original VF ID if possible",
1683 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
1684 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
1686 .call = parse_vc_conf,
1690 .help = "VF ID to redirect packets to",
1691 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
1692 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
1693 .call = parse_vc_conf,
1697 .help = "meter the directed packets at given id",
1698 .priv = PRIV_ACTION(METER,
1699 sizeof(struct rte_flow_action_meter)),
1700 .next = NEXT(action_meter),
1703 [ACTION_METER_ID] = {
1705 .help = "meter id to use",
1706 .next = NEXT(action_meter, NEXT_ENTRY(UNSIGNED)),
1707 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
1708 .call = parse_vc_conf,
1712 /** Remove and return last entry from argument stack. */
1713 static const struct arg *
1714 pop_args(struct context *ctx)
1716 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
1719 /** Add entry on top of the argument stack. */
1721 push_args(struct context *ctx, const struct arg *arg)
1723 if (ctx->args_num == CTX_STACK_SIZE)
1725 ctx->args[ctx->args_num++] = arg;
1729 /** Spread value into buffer according to bit-mask. */
1731 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
1733 uint32_t i = arg->size;
1741 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1750 unsigned int shift = 0;
1751 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
1753 for (shift = 0; arg->mask[i] >> shift; ++shift) {
1754 if (!(arg->mask[i] & (1 << shift)))
1759 *buf &= ~(1 << shift);
1760 *buf |= (val & 1) << shift;
1768 /** Compare a string with a partial one of a given length. */
1770 strcmp_partial(const char *full, const char *partial, size_t partial_len)
1772 int r = strncmp(full, partial, partial_len);
1776 if (strlen(full) <= partial_len)
1778 return full[partial_len];
1782 * Parse a prefix length and generate a bit-mask.
1784 * Last argument (ctx->args) is retrieved to determine mask size, storage
1785 * location and whether the result must use network byte ordering.
1788 parse_prefix(struct context *ctx, const struct token *token,
1789 const char *str, unsigned int len,
1790 void *buf, unsigned int size)
1792 const struct arg *arg = pop_args(ctx);
1793 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
1800 /* Argument is expected. */
1804 u = strtoumax(str, &end, 0);
1805 if (errno || (size_t)(end - str) != len)
1810 extra = arg_entry_bf_fill(NULL, 0, arg);
1819 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
1820 !arg_entry_bf_fill(ctx->objmask, -1, arg))
1827 if (bytes > size || bytes + !!extra > size)
1831 buf = (uint8_t *)ctx->object + arg->offset;
1832 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1834 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
1835 memset(buf, 0x00, size - bytes);
1837 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
1841 memset(buf, 0xff, bytes);
1842 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
1844 ((uint8_t *)buf)[bytes] = conv[extra];
1847 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
1850 push_args(ctx, arg);
1854 /** Default parsing function for token name matching. */
1856 parse_default(struct context *ctx, const struct token *token,
1857 const char *str, unsigned int len,
1858 void *buf, unsigned int size)
1863 if (strcmp_partial(token->name, str, len))
1868 /** Parse flow command, initialize output buffer for subsequent tokens. */
1870 parse_init(struct context *ctx, const struct token *token,
1871 const char *str, unsigned int len,
1872 void *buf, unsigned int size)
1874 struct buffer *out = buf;
1876 /* Token name must match. */
1877 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1879 /* Nothing else to do if there is no buffer. */
1882 /* Make sure buffer is large enough. */
1883 if (size < sizeof(*out))
1885 /* Initialize buffer. */
1886 memset(out, 0x00, sizeof(*out));
1887 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
1890 ctx->objmask = NULL;
1894 /** Parse tokens for validate/create commands. */
1896 parse_vc(struct context *ctx, const struct token *token,
1897 const char *str, unsigned int len,
1898 void *buf, unsigned int size)
1900 struct buffer *out = buf;
1904 /* Token name must match. */
1905 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1907 /* Nothing else to do if there is no buffer. */
1910 if (!out->command) {
1911 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
1913 if (sizeof(*out) > size)
1915 out->command = ctx->curr;
1918 ctx->objmask = NULL;
1919 out->args.vc.data = (uint8_t *)out + size;
1923 ctx->object = &out->args.vc.attr;
1924 ctx->objmask = NULL;
1925 switch (ctx->curr) {
1930 out->args.vc.attr.ingress = 1;
1933 out->args.vc.attr.egress = 1;
1936 out->args.vc.pattern =
1937 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1939 ctx->object = out->args.vc.pattern;
1940 ctx->objmask = NULL;
1943 out->args.vc.actions =
1944 (void *)RTE_ALIGN_CEIL((uintptr_t)
1945 (out->args.vc.pattern +
1946 out->args.vc.pattern_n),
1948 ctx->object = out->args.vc.actions;
1949 ctx->objmask = NULL;
1956 if (!out->args.vc.actions) {
1957 const struct parse_item_priv *priv = token->priv;
1958 struct rte_flow_item *item =
1959 out->args.vc.pattern + out->args.vc.pattern_n;
1961 data_size = priv->size * 3; /* spec, last, mask */
1962 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1963 (out->args.vc.data - data_size),
1965 if ((uint8_t *)item + sizeof(*item) > data)
1967 *item = (struct rte_flow_item){
1970 ++out->args.vc.pattern_n;
1972 ctx->objmask = NULL;
1974 const struct parse_action_priv *priv = token->priv;
1975 struct rte_flow_action *action =
1976 out->args.vc.actions + out->args.vc.actions_n;
1978 data_size = priv->size; /* configuration */
1979 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1980 (out->args.vc.data - data_size),
1982 if ((uint8_t *)action + sizeof(*action) > data)
1984 *action = (struct rte_flow_action){
1986 .conf = data_size ? data : NULL,
1988 ++out->args.vc.actions_n;
1989 ctx->object = action;
1990 ctx->objmask = NULL;
1992 memset(data, 0, data_size);
1993 out->args.vc.data = data;
1994 ctx->objdata = data_size;
1998 /** Parse pattern item parameter type. */
2000 parse_vc_spec(struct context *ctx, const struct token *token,
2001 const char *str, unsigned int len,
2002 void *buf, unsigned int size)
2004 struct buffer *out = buf;
2005 struct rte_flow_item *item;
2011 /* Token name must match. */
2012 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2014 /* Parse parameter types. */
2015 switch (ctx->curr) {
2016 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
2022 case ITEM_PARAM_SPEC:
2025 case ITEM_PARAM_LAST:
2028 case ITEM_PARAM_PREFIX:
2029 /* Modify next token to expect a prefix. */
2030 if (ctx->next_num < 2)
2032 ctx->next[ctx->next_num - 2] = prefix;
2034 case ITEM_PARAM_MASK:
2040 /* Nothing else to do if there is no buffer. */
2043 if (!out->args.vc.pattern_n)
2045 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
2046 data_size = ctx->objdata / 3; /* spec, last, mask */
2047 /* Point to selected object. */
2048 ctx->object = out->args.vc.data + (data_size * index);
2050 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
2051 item->mask = ctx->objmask;
2053 ctx->objmask = NULL;
2054 /* Update relevant item pointer. */
2055 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
2060 /** Parse action configuration field. */
2062 parse_vc_conf(struct context *ctx, const struct token *token,
2063 const char *str, unsigned int len,
2064 void *buf, unsigned int size)
2066 struct buffer *out = buf;
2069 /* Token name must match. */
2070 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2072 /* Nothing else to do if there is no buffer. */
2075 /* Point to selected object. */
2076 ctx->object = out->args.vc.data;
2077 ctx->objmask = NULL;
2081 /** Parse RSS action. */
2083 parse_vc_action_rss(struct context *ctx, const struct token *token,
2084 const char *str, unsigned int len,
2085 void *buf, unsigned int size)
2087 struct buffer *out = buf;
2088 struct rte_flow_action *action;
2089 struct action_rss_data *action_rss_data;
2093 ret = parse_vc(ctx, token, str, len, buf, size);
2096 /* Nothing else to do if there is no buffer. */
2099 if (!out->args.vc.actions_n)
2101 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
2102 /* Point to selected object. */
2103 ctx->object = out->args.vc.data;
2104 ctx->objmask = NULL;
2105 /* Set up default configuration. */
2106 action_rss_data = ctx->object;
2107 *action_rss_data = (struct action_rss_data){
2108 .conf = (struct rte_flow_action_rss){
2109 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
2111 .key_len = sizeof(action_rss_data->key),
2112 .queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
2113 .key = action_rss_data->key,
2114 .queue = action_rss_data->queue,
2116 .key = "testpmd's default RSS hash key",
2119 for (i = 0; i < action_rss_data->conf.queue_num; ++i)
2120 action_rss_data->queue[i] = i;
2121 if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
2122 ctx->port != (portid_t)RTE_PORT_ALL) {
2123 struct rte_eth_dev_info info;
2125 rte_eth_dev_info_get(ctx->port, &info);
2126 action_rss_data->conf.key_len =
2127 RTE_MIN(sizeof(action_rss_data->key),
2128 info.hash_key_size);
2130 action->conf = &action_rss_data->conf;
2135 * Parse func field for RSS action.
2137 * The RTE_ETH_HASH_FUNCTION_* value to assign is derived from the
2138 * ACTION_RSS_FUNC_* index that called this function.
2141 parse_vc_action_rss_func(struct context *ctx, const struct token *token,
2142 const char *str, unsigned int len,
2143 void *buf, unsigned int size)
2145 struct action_rss_data *action_rss_data;
2146 enum rte_eth_hash_function func;
2150 /* Token name must match. */
2151 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2153 switch (ctx->curr) {
2154 case ACTION_RSS_FUNC_DEFAULT:
2155 func = RTE_ETH_HASH_FUNCTION_DEFAULT;
2157 case ACTION_RSS_FUNC_TOEPLITZ:
2158 func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
2160 case ACTION_RSS_FUNC_SIMPLE_XOR:
2161 func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
2168 action_rss_data = ctx->object;
2169 action_rss_data->conf.func = func;
2174 * Parse type field for RSS action.
2176 * Valid tokens are type field names and the "end" token.
2179 parse_vc_action_rss_type(struct context *ctx, const struct token *token,
2180 const char *str, unsigned int len,
2181 void *buf, unsigned int size)
2183 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_TYPE);
2184 struct action_rss_data *action_rss_data;
2190 if (ctx->curr != ACTION_RSS_TYPE)
2192 if (!(ctx->objdata >> 16) && ctx->object) {
2193 action_rss_data = ctx->object;
2194 action_rss_data->conf.types = 0;
2196 if (!strcmp_partial("end", str, len)) {
2197 ctx->objdata &= 0xffff;
2200 for (i = 0; rss_type_table[i].str; ++i)
2201 if (!strcmp_partial(rss_type_table[i].str, str, len))
2203 if (!rss_type_table[i].str)
2205 ctx->objdata = 1 << 16 | (ctx->objdata & 0xffff);
2207 if (ctx->next_num == RTE_DIM(ctx->next))
2209 ctx->next[ctx->next_num++] = next;
2212 action_rss_data = ctx->object;
2213 action_rss_data->conf.types |= rss_type_table[i].rss_type;
2218 * Parse queue field for RSS action.
2220 * Valid tokens are queue indices and the "end" token.
2223 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
2224 const char *str, unsigned int len,
2225 void *buf, unsigned int size)
2227 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
2228 struct action_rss_data *action_rss_data;
2235 if (ctx->curr != ACTION_RSS_QUEUE)
2237 i = ctx->objdata >> 16;
2238 if (!strcmp_partial("end", str, len)) {
2239 ctx->objdata &= 0xffff;
2242 if (i >= ACTION_RSS_QUEUE_NUM)
2245 ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) +
2246 i * sizeof(action_rss_data->queue[i]),
2247 sizeof(action_rss_data->queue[i]))))
2249 ret = parse_int(ctx, token, str, len, NULL, 0);
2255 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
2257 if (ctx->next_num == RTE_DIM(ctx->next))
2259 ctx->next[ctx->next_num++] = next;
2262 action_rss_data = ctx->object;
2263 action_rss_data->conf.queue_num = i;
2264 action_rss_data->conf.queue = i ? action_rss_data->queue : NULL;
2268 /** Parse tokens for destroy command. */
2270 parse_destroy(struct context *ctx, const struct token *token,
2271 const char *str, unsigned int len,
2272 void *buf, unsigned int size)
2274 struct buffer *out = buf;
2276 /* Token name must match. */
2277 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2279 /* Nothing else to do if there is no buffer. */
2282 if (!out->command) {
2283 if (ctx->curr != DESTROY)
2285 if (sizeof(*out) > size)
2287 out->command = ctx->curr;
2290 ctx->objmask = NULL;
2291 out->args.destroy.rule =
2292 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2296 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
2297 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
2300 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
2301 ctx->objmask = NULL;
2305 /** Parse tokens for flush command. */
2307 parse_flush(struct context *ctx, const struct token *token,
2308 const char *str, unsigned int len,
2309 void *buf, unsigned int size)
2311 struct buffer *out = buf;
2313 /* Token name must match. */
2314 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2316 /* Nothing else to do if there is no buffer. */
2319 if (!out->command) {
2320 if (ctx->curr != FLUSH)
2322 if (sizeof(*out) > size)
2324 out->command = ctx->curr;
2327 ctx->objmask = NULL;
2332 /** Parse tokens for query command. */
2334 parse_query(struct context *ctx, const struct token *token,
2335 const char *str, unsigned int len,
2336 void *buf, unsigned int size)
2338 struct buffer *out = buf;
2340 /* Token name must match. */
2341 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2343 /* Nothing else to do if there is no buffer. */
2346 if (!out->command) {
2347 if (ctx->curr != QUERY)
2349 if (sizeof(*out) > size)
2351 out->command = ctx->curr;
2354 ctx->objmask = NULL;
2359 /** Parse action names. */
2361 parse_action(struct context *ctx, const struct token *token,
2362 const char *str, unsigned int len,
2363 void *buf, unsigned int size)
2365 struct buffer *out = buf;
2366 const struct arg *arg = pop_args(ctx);
2370 /* Argument is expected. */
2373 /* Parse action name. */
2374 for (i = 0; next_action[i]; ++i) {
2375 const struct parse_action_priv *priv;
2377 token = &token_list[next_action[i]];
2378 if (strcmp_partial(token->name, str, len))
2384 memcpy((uint8_t *)ctx->object + arg->offset,
2390 push_args(ctx, arg);
2394 /** Parse tokens for list command. */
2396 parse_list(struct context *ctx, const struct token *token,
2397 const char *str, unsigned int len,
2398 void *buf, unsigned int size)
2400 struct buffer *out = buf;
2402 /* Token name must match. */
2403 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2405 /* Nothing else to do if there is no buffer. */
2408 if (!out->command) {
2409 if (ctx->curr != LIST)
2411 if (sizeof(*out) > size)
2413 out->command = ctx->curr;
2416 ctx->objmask = NULL;
2417 out->args.list.group =
2418 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2422 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
2423 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
2426 ctx->object = out->args.list.group + out->args.list.group_n++;
2427 ctx->objmask = NULL;
2431 /** Parse tokens for isolate command. */
2433 parse_isolate(struct context *ctx, const struct token *token,
2434 const char *str, unsigned int len,
2435 void *buf, unsigned int size)
2437 struct buffer *out = buf;
2439 /* Token name must match. */
2440 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2442 /* Nothing else to do if there is no buffer. */
2445 if (!out->command) {
2446 if (ctx->curr != ISOLATE)
2448 if (sizeof(*out) > size)
2450 out->command = ctx->curr;
2453 ctx->objmask = NULL;
2459 * Parse signed/unsigned integers 8 to 64-bit long.
2461 * Last argument (ctx->args) is retrieved to determine integer type and
2465 parse_int(struct context *ctx, const struct token *token,
2466 const char *str, unsigned int len,
2467 void *buf, unsigned int size)
2469 const struct arg *arg = pop_args(ctx);
2474 /* Argument is expected. */
2479 (uintmax_t)strtoimax(str, &end, 0) :
2480 strtoumax(str, &end, 0);
2481 if (errno || (size_t)(end - str) != len)
2484 ((arg->sign && ((intmax_t)u < (intmax_t)arg->min ||
2485 (intmax_t)u > (intmax_t)arg->max)) ||
2486 (!arg->sign && (u < arg->min || u > arg->max))))
2491 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
2492 !arg_entry_bf_fill(ctx->objmask, -1, arg))
2496 buf = (uint8_t *)ctx->object + arg->offset;
2500 case sizeof(uint8_t):
2501 *(uint8_t *)buf = u;
2503 case sizeof(uint16_t):
2504 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
2506 case sizeof(uint8_t [3]):
2507 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2509 ((uint8_t *)buf)[0] = u;
2510 ((uint8_t *)buf)[1] = u >> 8;
2511 ((uint8_t *)buf)[2] = u >> 16;
2515 ((uint8_t *)buf)[0] = u >> 16;
2516 ((uint8_t *)buf)[1] = u >> 8;
2517 ((uint8_t *)buf)[2] = u;
2519 case sizeof(uint32_t):
2520 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
2522 case sizeof(uint64_t):
2523 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
2528 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
2530 buf = (uint8_t *)ctx->objmask + arg->offset;
2535 push_args(ctx, arg);
2542 * Three arguments (ctx->args) are retrieved from the stack to store data,
2543 * its actual length and address (in that order).
2546 parse_string(struct context *ctx, const struct token *token,
2547 const char *str, unsigned int len,
2548 void *buf, unsigned int size)
2550 const struct arg *arg_data = pop_args(ctx);
2551 const struct arg *arg_len = pop_args(ctx);
2552 const struct arg *arg_addr = pop_args(ctx);
2553 char tmp[16]; /* Ought to be enough. */
2556 /* Arguments are expected. */
2560 push_args(ctx, arg_data);
2564 push_args(ctx, arg_len);
2565 push_args(ctx, arg_data);
2568 size = arg_data->size;
2569 /* Bit-mask fill is not supported. */
2570 if (arg_data->mask || size < len)
2574 /* Let parse_int() fill length information first. */
2575 ret = snprintf(tmp, sizeof(tmp), "%u", len);
2578 push_args(ctx, arg_len);
2579 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
2584 buf = (uint8_t *)ctx->object + arg_data->offset;
2585 /* Output buffer is not necessarily NUL-terminated. */
2586 memcpy(buf, str, len);
2587 memset((uint8_t *)buf + len, 0x00, size - len);
2589 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
2590 /* Save address if requested. */
2591 if (arg_addr->size) {
2592 memcpy((uint8_t *)ctx->object + arg_addr->offset,
2594 (uint8_t *)ctx->object + arg_data->offset
2598 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
2600 (uint8_t *)ctx->objmask + arg_data->offset
2606 push_args(ctx, arg_addr);
2607 push_args(ctx, arg_len);
2608 push_args(ctx, arg_data);
2613 * Parse a MAC address.
2615 * Last argument (ctx->args) is retrieved to determine storage size and
2619 parse_mac_addr(struct context *ctx, const struct token *token,
2620 const char *str, unsigned int len,
2621 void *buf, unsigned int size)
2623 const struct arg *arg = pop_args(ctx);
2624 struct ether_addr tmp;
2628 /* Argument is expected. */
2632 /* Bit-mask fill is not supported. */
2633 if (arg->mask || size != sizeof(tmp))
2635 /* Only network endian is supported. */
2638 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
2639 if (ret < 0 || (unsigned int)ret != len)
2643 buf = (uint8_t *)ctx->object + arg->offset;
2644 memcpy(buf, &tmp, size);
2646 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2649 push_args(ctx, arg);
2654 * Parse an IPv4 address.
2656 * Last argument (ctx->args) is retrieved to determine storage size and
2660 parse_ipv4_addr(struct context *ctx, const struct token *token,
2661 const char *str, unsigned int len,
2662 void *buf, unsigned int size)
2664 const struct arg *arg = pop_args(ctx);
2669 /* Argument is expected. */
2673 /* Bit-mask fill is not supported. */
2674 if (arg->mask || size != sizeof(tmp))
2676 /* Only network endian is supported. */
2679 memcpy(str2, str, len);
2681 ret = inet_pton(AF_INET, str2, &tmp);
2683 /* Attempt integer parsing. */
2684 push_args(ctx, arg);
2685 return parse_int(ctx, token, str, len, buf, size);
2689 buf = (uint8_t *)ctx->object + arg->offset;
2690 memcpy(buf, &tmp, size);
2692 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2695 push_args(ctx, arg);
2700 * Parse an IPv6 address.
2702 * Last argument (ctx->args) is retrieved to determine storage size and
2706 parse_ipv6_addr(struct context *ctx, const struct token *token,
2707 const char *str, unsigned int len,
2708 void *buf, unsigned int size)
2710 const struct arg *arg = pop_args(ctx);
2712 struct in6_addr tmp;
2716 /* Argument is expected. */
2720 /* Bit-mask fill is not supported. */
2721 if (arg->mask || size != sizeof(tmp))
2723 /* Only network endian is supported. */
2726 memcpy(str2, str, len);
2728 ret = inet_pton(AF_INET6, str2, &tmp);
2733 buf = (uint8_t *)ctx->object + arg->offset;
2734 memcpy(buf, &tmp, size);
2736 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2739 push_args(ctx, arg);
2743 /** Boolean values (even indices stand for false). */
2744 static const char *const boolean_name[] = {
2754 * Parse a boolean value.
2756 * Last argument (ctx->args) is retrieved to determine storage size and
2760 parse_boolean(struct context *ctx, const struct token *token,
2761 const char *str, unsigned int len,
2762 void *buf, unsigned int size)
2764 const struct arg *arg = pop_args(ctx);
2768 /* Argument is expected. */
2771 for (i = 0; boolean_name[i]; ++i)
2772 if (!strcmp_partial(boolean_name[i], str, len))
2774 /* Process token as integer. */
2775 if (boolean_name[i])
2776 str = i & 1 ? "1" : "0";
2777 push_args(ctx, arg);
2778 ret = parse_int(ctx, token, str, strlen(str), buf, size);
2779 return ret > 0 ? (int)len : ret;
2782 /** Parse port and update context. */
2784 parse_port(struct context *ctx, const struct token *token,
2785 const char *str, unsigned int len,
2786 void *buf, unsigned int size)
2788 struct buffer *out = &(struct buffer){ .port = 0 };
2796 ctx->objmask = NULL;
2797 size = sizeof(*out);
2799 ret = parse_int(ctx, token, str, len, out, size);
2801 ctx->port = out->port;
2807 /** No completion. */
2809 comp_none(struct context *ctx, const struct token *token,
2810 unsigned int ent, char *buf, unsigned int size)
2820 /** Complete boolean values. */
2822 comp_boolean(struct context *ctx, const struct token *token,
2823 unsigned int ent, char *buf, unsigned int size)
2829 for (i = 0; boolean_name[i]; ++i)
2830 if (buf && i == ent)
2831 return snprintf(buf, size, "%s", boolean_name[i]);
2837 /** Complete action names. */
2839 comp_action(struct context *ctx, const struct token *token,
2840 unsigned int ent, char *buf, unsigned int size)
2846 for (i = 0; next_action[i]; ++i)
2847 if (buf && i == ent)
2848 return snprintf(buf, size, "%s",
2849 token_list[next_action[i]].name);
2855 /** Complete available ports. */
2857 comp_port(struct context *ctx, const struct token *token,
2858 unsigned int ent, char *buf, unsigned int size)
2865 RTE_ETH_FOREACH_DEV(p) {
2866 if (buf && i == ent)
2867 return snprintf(buf, size, "%u", p);
2875 /** Complete available rule IDs. */
2877 comp_rule_id(struct context *ctx, const struct token *token,
2878 unsigned int ent, char *buf, unsigned int size)
2881 struct rte_port *port;
2882 struct port_flow *pf;
2885 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
2886 ctx->port == (portid_t)RTE_PORT_ALL)
2888 port = &ports[ctx->port];
2889 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
2890 if (buf && i == ent)
2891 return snprintf(buf, size, "%u", pf->id);
2899 /** Complete type field for RSS action. */
2901 comp_vc_action_rss_type(struct context *ctx, const struct token *token,
2902 unsigned int ent, char *buf, unsigned int size)
2908 for (i = 0; rss_type_table[i].str; ++i)
2913 return snprintf(buf, size, "%s", rss_type_table[ent].str);
2915 return snprintf(buf, size, "end");
2919 /** Complete queue field for RSS action. */
2921 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
2922 unsigned int ent, char *buf, unsigned int size)
2929 return snprintf(buf, size, "%u", ent);
2931 return snprintf(buf, size, "end");
2935 /** Internal context. */
2936 static struct context cmd_flow_context;
2938 /** Global parser instance (cmdline API). */
2939 cmdline_parse_inst_t cmd_flow;
2941 /** Initialize context. */
2943 cmd_flow_context_init(struct context *ctx)
2945 /* A full memset() is not necessary. */
2955 ctx->objmask = NULL;
2958 /** Parse a token (cmdline API). */
2960 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
2963 struct context *ctx = &cmd_flow_context;
2964 const struct token *token;
2965 const enum index *list;
2970 token = &token_list[ctx->curr];
2971 /* Check argument length. */
2974 for (len = 0; src[len]; ++len)
2975 if (src[len] == '#' || isspace(src[len]))
2979 /* Last argument and EOL detection. */
2980 for (i = len; src[i]; ++i)
2981 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
2983 else if (!isspace(src[i])) {
2988 if (src[i] == '\r' || src[i] == '\n') {
2992 /* Initialize context if necessary. */
2993 if (!ctx->next_num) {
2996 ctx->next[ctx->next_num++] = token->next[0];
2998 /* Process argument through candidates. */
2999 ctx->prev = ctx->curr;
3000 list = ctx->next[ctx->next_num - 1];
3001 for (i = 0; list[i]; ++i) {
3002 const struct token *next = &token_list[list[i]];
3005 ctx->curr = list[i];
3007 tmp = next->call(ctx, next, src, len, result, size);
3009 tmp = parse_default(ctx, next, src, len, result, size);
3010 if (tmp == -1 || tmp != len)
3018 /* Push subsequent tokens if any. */
3020 for (i = 0; token->next[i]; ++i) {
3021 if (ctx->next_num == RTE_DIM(ctx->next))
3023 ctx->next[ctx->next_num++] = token->next[i];
3025 /* Push arguments if any. */
3027 for (i = 0; token->args[i]; ++i) {
3028 if (ctx->args_num == RTE_DIM(ctx->args))
3030 ctx->args[ctx->args_num++] = token->args[i];
3035 /** Return number of completion entries (cmdline API). */
3037 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
3039 struct context *ctx = &cmd_flow_context;
3040 const struct token *token = &token_list[ctx->curr];
3041 const enum index *list;
3045 /* Count number of tokens in current list. */
3047 list = ctx->next[ctx->next_num - 1];
3049 list = token->next[0];
3050 for (i = 0; list[i]; ++i)
3055 * If there is a single token, use its completion callback, otherwise
3056 * return the number of entries.
3058 token = &token_list[list[0]];
3059 if (i == 1 && token->comp) {
3060 /* Save index for cmd_flow_get_help(). */
3061 ctx->prev = list[0];
3062 return token->comp(ctx, token, 0, NULL, 0);
3067 /** Return a completion entry (cmdline API). */
3069 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
3070 char *dst, unsigned int size)
3072 struct context *ctx = &cmd_flow_context;
3073 const struct token *token = &token_list[ctx->curr];
3074 const enum index *list;
3078 /* Count number of tokens in current list. */
3080 list = ctx->next[ctx->next_num - 1];
3082 list = token->next[0];
3083 for (i = 0; list[i]; ++i)
3087 /* If there is a single token, use its completion callback. */
3088 token = &token_list[list[0]];
3089 if (i == 1 && token->comp) {
3090 /* Save index for cmd_flow_get_help(). */
3091 ctx->prev = list[0];
3092 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
3094 /* Otherwise make sure the index is valid and use defaults. */
3097 token = &token_list[list[index]];
3098 snprintf(dst, size, "%s", token->name);
3099 /* Save index for cmd_flow_get_help(). */
3100 ctx->prev = list[index];
3104 /** Populate help strings for current token (cmdline API). */
3106 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
3108 struct context *ctx = &cmd_flow_context;
3109 const struct token *token = &token_list[ctx->prev];
3114 /* Set token type and update global help with details. */
3115 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
3117 cmd_flow.help_str = token->help;
3119 cmd_flow.help_str = token->name;
3123 /** Token definition template (cmdline API). */
3124 static struct cmdline_token_hdr cmd_flow_token_hdr = {
3125 .ops = &(struct cmdline_token_ops){
3126 .parse = cmd_flow_parse,
3127 .complete_get_nb = cmd_flow_complete_get_nb,
3128 .complete_get_elt = cmd_flow_complete_get_elt,
3129 .get_help = cmd_flow_get_help,
3134 /** Populate the next dynamic token. */
3136 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
3137 cmdline_parse_token_hdr_t **hdr_inst)
3139 struct context *ctx = &cmd_flow_context;
3141 /* Always reinitialize context before requesting the first token. */
3142 if (!(hdr_inst - cmd_flow.tokens))
3143 cmd_flow_context_init(ctx);
3144 /* Return NULL when no more tokens are expected. */
3145 if (!ctx->next_num && ctx->curr) {
3149 /* Determine if command should end here. */
3150 if (ctx->eol && ctx->last && ctx->next_num) {
3151 const enum index *list = ctx->next[ctx->next_num - 1];
3154 for (i = 0; list[i]; ++i) {
3161 *hdr = &cmd_flow_token_hdr;
3164 /** Dispatch parsed buffer to function calls. */
3166 cmd_flow_parsed(const struct buffer *in)
3168 switch (in->command) {
3170 port_flow_validate(in->port, &in->args.vc.attr,
3171 in->args.vc.pattern, in->args.vc.actions);
3174 port_flow_create(in->port, &in->args.vc.attr,
3175 in->args.vc.pattern, in->args.vc.actions);
3178 port_flow_destroy(in->port, in->args.destroy.rule_n,
3179 in->args.destroy.rule);
3182 port_flow_flush(in->port);
3185 port_flow_query(in->port, in->args.query.rule,
3186 in->args.query.action);
3189 port_flow_list(in->port, in->args.list.group_n,
3190 in->args.list.group);
3193 port_flow_isolate(in->port, in->args.isolate.set);
3200 /** Token generator and output processing callback (cmdline API). */
3202 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
3205 cmd_flow_tok(arg0, arg2);
3207 cmd_flow_parsed(arg0);
3210 /** Global parser instance (cmdline API). */
3211 cmdline_parse_inst_t cmd_flow = {
3213 .data = NULL, /**< Unused. */
3214 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
3217 }, /**< Tokens are returned by cmd_flow_tok(). */