1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
13 #include <arpa/inet.h>
14 #include <sys/socket.h>
16 #include <rte_common.h>
17 #include <rte_ethdev.h>
18 #include <rte_byteorder.h>
19 #include <cmdline_parse.h>
20 #include <cmdline_parse_etheraddr.h>
25 /** Parser token indices. */
45 /* Top-level command. */
48 /* Sub-level commands. */
57 /* Destroy arguments. */
60 /* Query arguments. */
66 /* Validate/create arguments. */
72 /* Validate/create pattern. */
137 ITEM_E_TAG_GRP_ECID_B,
154 /* Validate/create actions. */
184 /** Size of pattern[] field in struct rte_flow_item_raw. */
185 #define ITEM_RAW_PATTERN_SIZE 36
187 /** Storage size for struct rte_flow_item_raw including pattern. */
188 #define ITEM_RAW_SIZE \
189 (offsetof(struct rte_flow_item_raw, pattern) + ITEM_RAW_PATTERN_SIZE)
191 /** Maximum number of queue indices in struct rte_flow_action_rss. */
192 #define ACTION_RSS_QUEUE_NUM 32
194 /** Storage for struct rte_flow_action_rss including external data. */
195 union action_rss_data {
196 struct rte_flow_action_rss conf;
198 uint8_t conf_data[offsetof(struct rte_flow_action_rss, queue)];
199 uint16_t queue[ACTION_RSS_QUEUE_NUM];
200 struct rte_eth_rss_conf rss_conf;
201 uint8_t rss_key[RSS_HASH_KEY_LENGTH];
205 /** Maximum number of subsequent tokens and arguments on the stack. */
206 #define CTX_STACK_SIZE 16
208 /** Parser context. */
210 /** Stack of subsequent token lists to process. */
211 const enum index *next[CTX_STACK_SIZE];
212 /** Arguments for stacked tokens. */
213 const void *args[CTX_STACK_SIZE];
214 enum index curr; /**< Current token index. */
215 enum index prev; /**< Index of the last token seen. */
216 int next_num; /**< Number of entries in next[]. */
217 int args_num; /**< Number of entries in args[]. */
218 uint32_t eol:1; /**< EOL has been detected. */
219 uint32_t last:1; /**< No more arguments. */
220 portid_t port; /**< Current port ID (for completions). */
221 uint32_t objdata; /**< Object-specific data. */
222 void *object; /**< Address of current object for relative offsets. */
223 void *objmask; /**< Object a full mask must be written to. */
226 /** Token argument. */
228 uint32_t hton:1; /**< Use network byte ordering. */
229 uint32_t sign:1; /**< Value is signed. */
230 uint32_t bounded:1; /**< Value is bounded. */
231 uintmax_t min; /**< Minimum value if bounded. */
232 uintmax_t max; /**< Maximum value if bounded. */
233 uint32_t offset; /**< Relative offset from ctx->object. */
234 uint32_t size; /**< Field size. */
235 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
238 /** Parser token definition. */
240 /** Type displayed during completion (defaults to "TOKEN"). */
242 /** Help displayed during completion (defaults to token name). */
244 /** Private data used by parser functions. */
247 * Lists of subsequent tokens to push on the stack. Each call to the
248 * parser consumes the last entry of that stack.
250 const enum index *const *next;
251 /** Arguments stack for subsequent tokens that need them. */
252 const struct arg *const *args;
254 * Token-processing callback, returns -1 in case of error, the
255 * length of the matched string otherwise. If NULL, attempts to
256 * match the token name.
258 * If buf is not NULL, the result should be stored in it according
259 * to context. An error is returned if not large enough.
261 int (*call)(struct context *ctx, const struct token *token,
262 const char *str, unsigned int len,
263 void *buf, unsigned int size);
265 * Callback that provides possible values for this token, used for
266 * completion. Returns -1 in case of error, the number of possible
267 * values otherwise. If NULL, the token name is used.
269 * If buf is not NULL, entry index ent is written to buf and the
270 * full length of the entry is returned (same behavior as
273 int (*comp)(struct context *ctx, const struct token *token,
274 unsigned int ent, char *buf, unsigned int size);
275 /** Mandatory token name, no default value. */
279 /** Static initializer for the next field. */
280 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
282 /** Static initializer for a NEXT() entry. */
283 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
285 /** Static initializer for the args field. */
286 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
288 /** Static initializer for ARGS() to target a field. */
289 #define ARGS_ENTRY(s, f) \
290 (&(const struct arg){ \
291 .offset = offsetof(s, f), \
292 .size = sizeof(((s *)0)->f), \
295 /** Static initializer for ARGS() to target a bit-field. */
296 #define ARGS_ENTRY_BF(s, f, b) \
297 (&(const struct arg){ \
299 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
302 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
303 #define ARGS_ENTRY_MASK(s, f, m) \
304 (&(const struct arg){ \
305 .offset = offsetof(s, f), \
306 .size = sizeof(((s *)0)->f), \
307 .mask = (const void *)(m), \
310 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
311 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
312 (&(const struct arg){ \
314 .offset = offsetof(s, f), \
315 .size = sizeof(((s *)0)->f), \
316 .mask = (const void *)(m), \
319 /** Static initializer for ARGS() to target a pointer. */
320 #define ARGS_ENTRY_PTR(s, f) \
321 (&(const struct arg){ \
322 .size = sizeof(*((s *)0)->f), \
325 /** Static initializer for ARGS() with arbitrary size. */
326 #define ARGS_ENTRY_USZ(s, f, sz) \
327 (&(const struct arg){ \
328 .offset = offsetof(s, f), \
332 /** Static initializer for ARGS() with arbitrary offset and size. */
333 #define ARGS_ENTRY_ARB(o, s) \
334 (&(const struct arg){ \
339 /** Same as ARGS_ENTRY_ARB() with bounded values. */
340 #define ARGS_ENTRY_ARB_BOUNDED(o, s, i, a) \
341 (&(const struct arg){ \
349 /** Same as ARGS_ENTRY() using network byte ordering. */
350 #define ARGS_ENTRY_HTON(s, f) \
351 (&(const struct arg){ \
353 .offset = offsetof(s, f), \
354 .size = sizeof(((s *)0)->f), \
357 /** Parser output buffer layout expected by cmd_flow_parsed(). */
359 enum index command; /**< Flow command. */
360 portid_t port; /**< Affected port ID. */
363 struct rte_flow_attr attr;
364 struct rte_flow_item *pattern;
365 struct rte_flow_action *actions;
369 } vc; /**< Validate/create arguments. */
373 } destroy; /**< Destroy arguments. */
376 enum rte_flow_action_type action;
377 } query; /**< Query arguments. */
381 } list; /**< List arguments. */
384 } isolate; /**< Isolated mode arguments. */
385 } args; /**< Command arguments. */
388 /** Private data for pattern items. */
389 struct parse_item_priv {
390 enum rte_flow_item_type type; /**< Item type. */
391 uint32_t size; /**< Size of item specification structure. */
394 #define PRIV_ITEM(t, s) \
395 (&(const struct parse_item_priv){ \
396 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
400 /** Private data for actions. */
401 struct parse_action_priv {
402 enum rte_flow_action_type type; /**< Action type. */
403 uint32_t size; /**< Size of action configuration structure. */
406 #define PRIV_ACTION(t, s) \
407 (&(const struct parse_action_priv){ \
408 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
412 static const enum index next_vc_attr[] = {
421 static const enum index next_destroy_attr[] = {
427 static const enum index next_list_attr[] = {
433 static const enum index item_param[] = {
442 static const enum index next_item[] = {
472 static const enum index item_fuzzy[] = {
478 static const enum index item_any[] = {
484 static const enum index item_vf[] = {
490 static const enum index item_port[] = {
496 static const enum index item_raw[] = {
506 static const enum index item_eth[] = {
514 static const enum index item_vlan[] = {
524 static const enum index item_ipv4[] = {
534 static const enum index item_ipv6[] = {
545 static const enum index item_icmp[] = {
552 static const enum index item_udp[] = {
559 static const enum index item_tcp[] = {
567 static const enum index item_sctp[] = {
576 static const enum index item_vxlan[] = {
582 static const enum index item_e_tag[] = {
583 ITEM_E_TAG_GRP_ECID_B,
588 static const enum index item_nvgre[] = {
594 static const enum index item_mpls[] = {
600 static const enum index item_gre[] = {
606 static const enum index item_gtp[] = {
612 static const enum index item_geneve[] = {
619 static const enum index next_action[] = {
636 static const enum index action_mark[] = {
642 static const enum index action_queue[] = {
648 static const enum index action_dup[] = {
654 static const enum index action_rss[] = {
663 static const enum index action_vf[] = {
670 static const enum index action_meter[] = {
676 static int parse_init(struct context *, const struct token *,
677 const char *, unsigned int,
678 void *, unsigned int);
679 static int parse_vc(struct context *, const struct token *,
680 const char *, unsigned int,
681 void *, unsigned int);
682 static int parse_vc_spec(struct context *, const struct token *,
683 const char *, unsigned int, void *, unsigned int);
684 static int parse_vc_conf(struct context *, const struct token *,
685 const char *, unsigned int, void *, unsigned int);
686 static int parse_vc_action_rss(struct context *, const struct token *,
687 const char *, unsigned int, void *,
689 static int parse_vc_action_rss_type(struct context *, const struct token *,
690 const char *, unsigned int, void *,
692 static int parse_vc_action_rss_queue(struct context *, const struct token *,
693 const char *, unsigned int, void *,
695 static int parse_destroy(struct context *, const struct token *,
696 const char *, unsigned int,
697 void *, unsigned int);
698 static int parse_flush(struct context *, const struct token *,
699 const char *, unsigned int,
700 void *, unsigned int);
701 static int parse_query(struct context *, const struct token *,
702 const char *, unsigned int,
703 void *, unsigned int);
704 static int parse_action(struct context *, const struct token *,
705 const char *, unsigned int,
706 void *, unsigned int);
707 static int parse_list(struct context *, const struct token *,
708 const char *, unsigned int,
709 void *, unsigned int);
710 static int parse_isolate(struct context *, const struct token *,
711 const char *, unsigned int,
712 void *, unsigned int);
713 static int parse_int(struct context *, const struct token *,
714 const char *, unsigned int,
715 void *, unsigned int);
716 static int parse_prefix(struct context *, const struct token *,
717 const char *, unsigned int,
718 void *, unsigned int);
719 static int parse_boolean(struct context *, const struct token *,
720 const char *, unsigned int,
721 void *, unsigned int);
722 static int parse_string(struct context *, const struct token *,
723 const char *, unsigned int,
724 void *, unsigned int);
725 static int parse_mac_addr(struct context *, const struct token *,
726 const char *, unsigned int,
727 void *, unsigned int);
728 static int parse_ipv4_addr(struct context *, const struct token *,
729 const char *, unsigned int,
730 void *, unsigned int);
731 static int parse_ipv6_addr(struct context *, const struct token *,
732 const char *, unsigned int,
733 void *, unsigned int);
734 static int parse_port(struct context *, const struct token *,
735 const char *, unsigned int,
736 void *, unsigned int);
737 static int comp_none(struct context *, const struct token *,
738 unsigned int, char *, unsigned int);
739 static int comp_boolean(struct context *, const struct token *,
740 unsigned int, char *, unsigned int);
741 static int comp_action(struct context *, const struct token *,
742 unsigned int, char *, unsigned int);
743 static int comp_port(struct context *, const struct token *,
744 unsigned int, char *, unsigned int);
745 static int comp_rule_id(struct context *, const struct token *,
746 unsigned int, char *, unsigned int);
747 static int comp_vc_action_rss_type(struct context *, const struct token *,
748 unsigned int, char *, unsigned int);
749 static int comp_vc_action_rss_queue(struct context *, const struct token *,
750 unsigned int, char *, unsigned int);
752 /** Token definitions. */
753 static const struct token token_list[] = {
754 /* Special tokens. */
757 .help = "null entry, abused as the entry point",
758 .next = NEXT(NEXT_ENTRY(FLOW)),
763 .help = "command may end here",
769 .help = "integer value",
774 .name = "{unsigned}",
776 .help = "unsigned integer value",
783 .help = "prefix length for bit-mask",
784 .call = parse_prefix,
790 .help = "any boolean value",
791 .call = parse_boolean,
792 .comp = comp_boolean,
797 .help = "fixed string",
798 .call = parse_string,
802 .name = "{MAC address}",
804 .help = "standard MAC address notation",
805 .call = parse_mac_addr,
809 .name = "{IPv4 address}",
810 .type = "IPV4 ADDRESS",
811 .help = "standard IPv4 address notation",
812 .call = parse_ipv4_addr,
816 .name = "{IPv6 address}",
817 .type = "IPV6 ADDRESS",
818 .help = "standard IPv6 address notation",
819 .call = parse_ipv6_addr,
825 .help = "rule identifier",
827 .comp = comp_rule_id,
832 .help = "port identifier",
837 .name = "{group_id}",
839 .help = "group identifier",
846 .help = "priority level",
850 /* Top-level command. */
853 .type = "{command} {port_id} [{arg} [...]]",
854 .help = "manage ingress/egress flow rules",
855 .next = NEXT(NEXT_ENTRY
865 /* Sub-level commands. */
868 .help = "check whether a flow rule can be created",
869 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
870 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
875 .help = "create a flow rule",
876 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
877 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
882 .help = "destroy specific flow rules",
883 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
884 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
885 .call = parse_destroy,
889 .help = "destroy all flow rules",
890 .next = NEXT(NEXT_ENTRY(PORT_ID)),
891 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
896 .help = "query an existing flow rule",
897 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
899 NEXT_ENTRY(PORT_ID)),
900 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action),
901 ARGS_ENTRY(struct buffer, args.query.rule),
902 ARGS_ENTRY(struct buffer, port)),
907 .help = "list existing flow rules",
908 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
909 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
914 .help = "restrict ingress traffic to the defined flow rules",
915 .next = NEXT(NEXT_ENTRY(BOOLEAN),
916 NEXT_ENTRY(PORT_ID)),
917 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
918 ARGS_ENTRY(struct buffer, port)),
919 .call = parse_isolate,
921 /* Destroy arguments. */
924 .help = "specify a rule identifier",
925 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
926 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
927 .call = parse_destroy,
929 /* Query arguments. */
933 .help = "action to query, must be part of the rule",
934 .call = parse_action,
937 /* List arguments. */
940 .help = "specify a group",
941 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
942 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
945 /* Validate/create attributes. */
948 .help = "specify a group",
949 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
950 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
955 .help = "specify a priority level",
956 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
957 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
962 .help = "affect rule to ingress",
963 .next = NEXT(next_vc_attr),
968 .help = "affect rule to egress",
969 .next = NEXT(next_vc_attr),
972 /* Validate/create pattern. */
975 .help = "submit a list of pattern items",
976 .next = NEXT(next_item),
981 .help = "match value perfectly (with full bit-mask)",
982 .call = parse_vc_spec,
984 [ITEM_PARAM_SPEC] = {
986 .help = "match value according to configured bit-mask",
987 .call = parse_vc_spec,
989 [ITEM_PARAM_LAST] = {
991 .help = "specify upper bound to establish a range",
992 .call = parse_vc_spec,
994 [ITEM_PARAM_MASK] = {
996 .help = "specify bit-mask with relevant bits set to one",
997 .call = parse_vc_spec,
999 [ITEM_PARAM_PREFIX] = {
1001 .help = "generate bit-mask from a prefix length",
1002 .call = parse_vc_spec,
1006 .help = "specify next pattern item",
1007 .next = NEXT(next_item),
1011 .help = "end list of pattern items",
1012 .priv = PRIV_ITEM(END, 0),
1013 .next = NEXT(NEXT_ENTRY(ACTIONS)),
1018 .help = "no-op pattern item",
1019 .priv = PRIV_ITEM(VOID, 0),
1020 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1025 .help = "perform actions when pattern does not match",
1026 .priv = PRIV_ITEM(INVERT, 0),
1027 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1032 .help = "match any protocol for the current layer",
1033 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
1034 .next = NEXT(item_any),
1039 .help = "number of layers covered",
1040 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
1041 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
1045 .help = "match packets addressed to the physical function",
1046 .priv = PRIV_ITEM(PF, 0),
1047 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1052 .help = "match packets addressed to a virtual function ID",
1053 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
1054 .next = NEXT(item_vf),
1059 .help = "destination VF ID",
1060 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
1061 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
1065 .help = "device-specific physical port index to use",
1066 .priv = PRIV_ITEM(PORT, sizeof(struct rte_flow_item_port)),
1067 .next = NEXT(item_port),
1070 [ITEM_PORT_INDEX] = {
1072 .help = "physical port index",
1073 .next = NEXT(item_port, NEXT_ENTRY(UNSIGNED), item_param),
1074 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port, index)),
1078 .help = "match an arbitrary byte string",
1079 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1080 .next = NEXT(item_raw),
1083 [ITEM_RAW_RELATIVE] = {
1085 .help = "look for pattern after the previous item",
1086 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1087 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1090 [ITEM_RAW_SEARCH] = {
1092 .help = "search pattern from offset (see also limit)",
1093 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1094 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1097 [ITEM_RAW_OFFSET] = {
1099 .help = "absolute or relative offset for pattern",
1100 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1101 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1103 [ITEM_RAW_LIMIT] = {
1105 .help = "search area limit for start of pattern",
1106 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1107 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1109 [ITEM_RAW_PATTERN] = {
1111 .help = "byte string to look for",
1112 .next = NEXT(item_raw,
1114 NEXT_ENTRY(ITEM_PARAM_IS,
1117 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, length),
1118 ARGS_ENTRY_USZ(struct rte_flow_item_raw,
1120 ITEM_RAW_PATTERN_SIZE)),
1124 .help = "match Ethernet header",
1125 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1126 .next = NEXT(item_eth),
1131 .help = "destination MAC",
1132 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1133 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1137 .help = "source MAC",
1138 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1139 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1143 .help = "EtherType",
1144 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1145 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1149 .help = "match 802.1Q/ad VLAN tag",
1150 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1151 .next = NEXT(item_vlan),
1154 [ITEM_VLAN_TPID] = {
1156 .help = "tag protocol identifier",
1157 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1158 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tpid)),
1162 .help = "tag control information",
1163 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1164 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1168 .help = "priority code point",
1169 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1170 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1175 .help = "drop eligible indicator",
1176 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1177 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1182 .help = "VLAN identifier",
1183 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1184 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1189 .help = "match IPv4 header",
1190 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1191 .next = NEXT(item_ipv4),
1196 .help = "type of service",
1197 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1198 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1199 hdr.type_of_service)),
1203 .help = "time to live",
1204 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1205 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1208 [ITEM_IPV4_PROTO] = {
1210 .help = "next protocol ID",
1211 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1212 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1213 hdr.next_proto_id)),
1217 .help = "source address",
1218 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1219 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1224 .help = "destination address",
1225 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1226 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1231 .help = "match IPv6 header",
1232 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1233 .next = NEXT(item_ipv6),
1238 .help = "traffic class",
1239 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1240 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1242 "\x0f\xf0\x00\x00")),
1244 [ITEM_IPV6_FLOW] = {
1246 .help = "flow label",
1247 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1248 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1250 "\x00\x0f\xff\xff")),
1252 [ITEM_IPV6_PROTO] = {
1254 .help = "protocol (next header)",
1255 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1256 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1261 .help = "hop limit",
1262 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1263 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1268 .help = "source address",
1269 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1270 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1275 .help = "destination address",
1276 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1277 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1282 .help = "match ICMP header",
1283 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1284 .next = NEXT(item_icmp),
1287 [ITEM_ICMP_TYPE] = {
1289 .help = "ICMP packet type",
1290 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1291 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1294 [ITEM_ICMP_CODE] = {
1296 .help = "ICMP packet code",
1297 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1298 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1303 .help = "match UDP header",
1304 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1305 .next = NEXT(item_udp),
1310 .help = "UDP source port",
1311 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1312 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1317 .help = "UDP destination port",
1318 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1319 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1324 .help = "match TCP header",
1325 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1326 .next = NEXT(item_tcp),
1331 .help = "TCP source port",
1332 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1333 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1338 .help = "TCP destination port",
1339 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1340 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1343 [ITEM_TCP_FLAGS] = {
1345 .help = "TCP flags",
1346 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1347 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1352 .help = "match SCTP header",
1353 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1354 .next = NEXT(item_sctp),
1359 .help = "SCTP source port",
1360 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1361 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1366 .help = "SCTP destination port",
1367 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1368 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1373 .help = "validation tag",
1374 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1375 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1378 [ITEM_SCTP_CKSUM] = {
1381 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1382 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1387 .help = "match VXLAN header",
1388 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1389 .next = NEXT(item_vxlan),
1392 [ITEM_VXLAN_VNI] = {
1394 .help = "VXLAN identifier",
1395 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1396 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
1400 .help = "match E-Tag header",
1401 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
1402 .next = NEXT(item_e_tag),
1405 [ITEM_E_TAG_GRP_ECID_B] = {
1406 .name = "grp_ecid_b",
1407 .help = "GRP and E-CID base",
1408 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
1409 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
1415 .help = "match NVGRE header",
1416 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
1417 .next = NEXT(item_nvgre),
1420 [ITEM_NVGRE_TNI] = {
1422 .help = "virtual subnet ID",
1423 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
1424 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
1428 .help = "match MPLS header",
1429 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
1430 .next = NEXT(item_mpls),
1433 [ITEM_MPLS_LABEL] = {
1435 .help = "MPLS label",
1436 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
1437 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
1443 .help = "match GRE header",
1444 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
1445 .next = NEXT(item_gre),
1448 [ITEM_GRE_PROTO] = {
1450 .help = "GRE protocol type",
1451 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
1452 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
1457 .help = "fuzzy pattern match, expect faster than default",
1458 .priv = PRIV_ITEM(FUZZY,
1459 sizeof(struct rte_flow_item_fuzzy)),
1460 .next = NEXT(item_fuzzy),
1463 [ITEM_FUZZY_THRESH] = {
1465 .help = "match accuracy threshold",
1466 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
1467 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
1472 .help = "match GTP header",
1473 .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
1474 .next = NEXT(item_gtp),
1479 .help = "tunnel endpoint identifier",
1480 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
1481 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
1485 .help = "match GTP header",
1486 .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
1487 .next = NEXT(item_gtp),
1492 .help = "match GTP header",
1493 .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
1494 .next = NEXT(item_gtp),
1499 .help = "match GENEVE header",
1500 .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
1501 .next = NEXT(item_geneve),
1504 [ITEM_GENEVE_VNI] = {
1506 .help = "virtual network identifier",
1507 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1508 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)),
1510 [ITEM_GENEVE_PROTO] = {
1512 .help = "GENEVE protocol type",
1513 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1514 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve,
1518 /* Validate/create actions. */
1521 .help = "submit a list of associated actions",
1522 .next = NEXT(next_action),
1527 .help = "specify next action",
1528 .next = NEXT(next_action),
1532 .help = "end list of actions",
1533 .priv = PRIV_ACTION(END, 0),
1538 .help = "no-op action",
1539 .priv = PRIV_ACTION(VOID, 0),
1540 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1543 [ACTION_PASSTHRU] = {
1545 .help = "let subsequent rule process matched packets",
1546 .priv = PRIV_ACTION(PASSTHRU, 0),
1547 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1552 .help = "attach 32 bit value to packets",
1553 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
1554 .next = NEXT(action_mark),
1557 [ACTION_MARK_ID] = {
1559 .help = "32 bit value to return with packets",
1560 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
1561 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
1562 .call = parse_vc_conf,
1566 .help = "flag packets",
1567 .priv = PRIV_ACTION(FLAG, 0),
1568 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1573 .help = "assign packets to a given queue index",
1574 .priv = PRIV_ACTION(QUEUE,
1575 sizeof(struct rte_flow_action_queue)),
1576 .next = NEXT(action_queue),
1579 [ACTION_QUEUE_INDEX] = {
1581 .help = "queue index to use",
1582 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
1583 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
1584 .call = parse_vc_conf,
1588 .help = "drop packets (note: passthru has priority)",
1589 .priv = PRIV_ACTION(DROP, 0),
1590 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1595 .help = "enable counters for this rule",
1596 .priv = PRIV_ACTION(COUNT, 0),
1597 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1602 .help = "duplicate packets to a given queue index",
1603 .priv = PRIV_ACTION(DUP, sizeof(struct rte_flow_action_dup)),
1604 .next = NEXT(action_dup),
1607 [ACTION_DUP_INDEX] = {
1609 .help = "queue index to duplicate packets to",
1610 .next = NEXT(action_dup, NEXT_ENTRY(UNSIGNED)),
1611 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_dup, index)),
1612 .call = parse_vc_conf,
1616 .help = "spread packets among several queues",
1617 .priv = PRIV_ACTION(RSS, sizeof(union action_rss_data)),
1618 .next = NEXT(action_rss),
1619 .call = parse_vc_action_rss,
1621 [ACTION_RSS_TYPES] = {
1623 .help = "RSS hash types",
1624 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)),
1626 [ACTION_RSS_TYPE] = {
1628 .help = "RSS hash type",
1629 .call = parse_vc_action_rss_type,
1630 .comp = comp_vc_action_rss_type,
1632 [ACTION_RSS_KEY] = {
1634 .help = "RSS hash key",
1635 .next = NEXT(action_rss, NEXT_ENTRY(STRING)),
1636 .args = ARGS(ARGS_ENTRY_ARB
1637 (((uintptr_t)&((union action_rss_data *)0)->
1638 s.rss_conf.rss_key_len),
1639 sizeof(((struct rte_eth_rss_conf *)0)->
1642 (((uintptr_t)((union action_rss_data *)0)->
1644 RSS_HASH_KEY_LENGTH)),
1646 [ACTION_RSS_KEY_LEN] = {
1648 .help = "RSS hash key length in bytes",
1649 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
1650 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
1651 (((uintptr_t)&((union action_rss_data *)0)->
1652 s.rss_conf.rss_key_len),
1653 sizeof(((struct rte_eth_rss_conf *)0)->
1656 RSS_HASH_KEY_LENGTH)),
1658 [ACTION_RSS_QUEUES] = {
1660 .help = "queue indices to use",
1661 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
1662 .call = parse_vc_conf,
1664 [ACTION_RSS_QUEUE] = {
1666 .help = "queue index",
1667 .call = parse_vc_action_rss_queue,
1668 .comp = comp_vc_action_rss_queue,
1672 .help = "redirect packets to physical device function",
1673 .priv = PRIV_ACTION(PF, 0),
1674 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1679 .help = "redirect packets to virtual device function",
1680 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
1681 .next = NEXT(action_vf),
1684 [ACTION_VF_ORIGINAL] = {
1686 .help = "use original VF ID if possible",
1687 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
1688 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
1690 .call = parse_vc_conf,
1694 .help = "VF ID to redirect packets to",
1695 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
1696 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
1697 .call = parse_vc_conf,
1701 .help = "meter the directed packets at given id",
1702 .priv = PRIV_ACTION(METER,
1703 sizeof(struct rte_flow_action_meter)),
1704 .next = NEXT(action_meter),
1707 [ACTION_METER_ID] = {
1709 .help = "meter id to use",
1710 .next = NEXT(action_meter, NEXT_ENTRY(UNSIGNED)),
1711 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
1712 .call = parse_vc_conf,
1716 /** Remove and return last entry from argument stack. */
1717 static const struct arg *
1718 pop_args(struct context *ctx)
1720 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
1723 /** Add entry on top of the argument stack. */
1725 push_args(struct context *ctx, const struct arg *arg)
1727 if (ctx->args_num == CTX_STACK_SIZE)
1729 ctx->args[ctx->args_num++] = arg;
1733 /** Spread value into buffer according to bit-mask. */
1735 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
1737 uint32_t i = arg->size;
1745 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1754 unsigned int shift = 0;
1755 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
1757 for (shift = 0; arg->mask[i] >> shift; ++shift) {
1758 if (!(arg->mask[i] & (1 << shift)))
1763 *buf &= ~(1 << shift);
1764 *buf |= (val & 1) << shift;
1772 /** Compare a string with a partial one of a given length. */
1774 strcmp_partial(const char *full, const char *partial, size_t partial_len)
1776 int r = strncmp(full, partial, partial_len);
1780 if (strlen(full) <= partial_len)
1782 return full[partial_len];
1786 * Parse a prefix length and generate a bit-mask.
1788 * Last argument (ctx->args) is retrieved to determine mask size, storage
1789 * location and whether the result must use network byte ordering.
1792 parse_prefix(struct context *ctx, const struct token *token,
1793 const char *str, unsigned int len,
1794 void *buf, unsigned int size)
1796 const struct arg *arg = pop_args(ctx);
1797 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
1804 /* Argument is expected. */
1808 u = strtoumax(str, &end, 0);
1809 if (errno || (size_t)(end - str) != len)
1814 extra = arg_entry_bf_fill(NULL, 0, arg);
1823 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
1824 !arg_entry_bf_fill(ctx->objmask, -1, arg))
1831 if (bytes > size || bytes + !!extra > size)
1835 buf = (uint8_t *)ctx->object + arg->offset;
1836 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1838 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
1839 memset(buf, 0x00, size - bytes);
1841 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
1845 memset(buf, 0xff, bytes);
1846 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
1848 ((uint8_t *)buf)[bytes] = conv[extra];
1851 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
1854 push_args(ctx, arg);
1858 /** Default parsing function for token name matching. */
1860 parse_default(struct context *ctx, const struct token *token,
1861 const char *str, unsigned int len,
1862 void *buf, unsigned int size)
1867 if (strcmp_partial(token->name, str, len))
1872 /** Parse flow command, initialize output buffer for subsequent tokens. */
1874 parse_init(struct context *ctx, const struct token *token,
1875 const char *str, unsigned int len,
1876 void *buf, unsigned int size)
1878 struct buffer *out = buf;
1880 /* Token name must match. */
1881 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1883 /* Nothing else to do if there is no buffer. */
1886 /* Make sure buffer is large enough. */
1887 if (size < sizeof(*out))
1889 /* Initialize buffer. */
1890 memset(out, 0x00, sizeof(*out));
1891 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
1894 ctx->objmask = NULL;
1898 /** Parse tokens for validate/create commands. */
1900 parse_vc(struct context *ctx, const struct token *token,
1901 const char *str, unsigned int len,
1902 void *buf, unsigned int size)
1904 struct buffer *out = buf;
1908 /* Token name must match. */
1909 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1911 /* Nothing else to do if there is no buffer. */
1914 if (!out->command) {
1915 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
1917 if (sizeof(*out) > size)
1919 out->command = ctx->curr;
1922 ctx->objmask = NULL;
1923 out->args.vc.data = (uint8_t *)out + size;
1927 ctx->object = &out->args.vc.attr;
1928 ctx->objmask = NULL;
1929 switch (ctx->curr) {
1934 out->args.vc.attr.ingress = 1;
1937 out->args.vc.attr.egress = 1;
1940 out->args.vc.pattern =
1941 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1943 ctx->object = out->args.vc.pattern;
1944 ctx->objmask = NULL;
1947 out->args.vc.actions =
1948 (void *)RTE_ALIGN_CEIL((uintptr_t)
1949 (out->args.vc.pattern +
1950 out->args.vc.pattern_n),
1952 ctx->object = out->args.vc.actions;
1953 ctx->objmask = NULL;
1960 if (!out->args.vc.actions) {
1961 const struct parse_item_priv *priv = token->priv;
1962 struct rte_flow_item *item =
1963 out->args.vc.pattern + out->args.vc.pattern_n;
1965 data_size = priv->size * 3; /* spec, last, mask */
1966 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1967 (out->args.vc.data - data_size),
1969 if ((uint8_t *)item + sizeof(*item) > data)
1971 *item = (struct rte_flow_item){
1974 ++out->args.vc.pattern_n;
1976 ctx->objmask = NULL;
1978 const struct parse_action_priv *priv = token->priv;
1979 struct rte_flow_action *action =
1980 out->args.vc.actions + out->args.vc.actions_n;
1982 data_size = priv->size; /* configuration */
1983 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1984 (out->args.vc.data - data_size),
1986 if ((uint8_t *)action + sizeof(*action) > data)
1988 *action = (struct rte_flow_action){
1990 .conf = data_size ? data : NULL,
1992 ++out->args.vc.actions_n;
1993 ctx->object = action;
1994 ctx->objmask = NULL;
1996 memset(data, 0, data_size);
1997 out->args.vc.data = data;
1998 ctx->objdata = data_size;
2002 /** Parse pattern item parameter type. */
2004 parse_vc_spec(struct context *ctx, const struct token *token,
2005 const char *str, unsigned int len,
2006 void *buf, unsigned int size)
2008 struct buffer *out = buf;
2009 struct rte_flow_item *item;
2015 /* Token name must match. */
2016 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2018 /* Parse parameter types. */
2019 switch (ctx->curr) {
2020 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
2026 case ITEM_PARAM_SPEC:
2029 case ITEM_PARAM_LAST:
2032 case ITEM_PARAM_PREFIX:
2033 /* Modify next token to expect a prefix. */
2034 if (ctx->next_num < 2)
2036 ctx->next[ctx->next_num - 2] = prefix;
2038 case ITEM_PARAM_MASK:
2044 /* Nothing else to do if there is no buffer. */
2047 if (!out->args.vc.pattern_n)
2049 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
2050 data_size = ctx->objdata / 3; /* spec, last, mask */
2051 /* Point to selected object. */
2052 ctx->object = out->args.vc.data + (data_size * index);
2054 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
2055 item->mask = ctx->objmask;
2057 ctx->objmask = NULL;
2058 /* Update relevant item pointer. */
2059 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
2064 /** Parse action configuration field. */
2066 parse_vc_conf(struct context *ctx, const struct token *token,
2067 const char *str, unsigned int len,
2068 void *buf, unsigned int size)
2070 struct buffer *out = buf;
2073 /* Token name must match. */
2074 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2076 /* Nothing else to do if there is no buffer. */
2079 /* Point to selected object. */
2080 ctx->object = out->args.vc.data;
2081 ctx->objmask = NULL;
2085 /** Parse RSS action. */
2087 parse_vc_action_rss(struct context *ctx, const struct token *token,
2088 const char *str, unsigned int len,
2089 void *buf, unsigned int size)
2091 struct buffer *out = buf;
2092 struct rte_flow_action *action;
2093 union action_rss_data *action_rss_data;
2097 ret = parse_vc(ctx, token, str, len, buf, size);
2100 /* Nothing else to do if there is no buffer. */
2103 if (!out->args.vc.actions_n)
2105 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
2106 /* Point to selected object. */
2107 ctx->object = out->args.vc.data;
2108 ctx->objmask = NULL;
2109 /* Set up default configuration. */
2110 action_rss_data = ctx->object;
2111 *action_rss_data = (union action_rss_data){
2112 .conf = (struct rte_flow_action_rss){
2113 .rss_conf = &action_rss_data->s.rss_conf,
2114 .num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
2117 action_rss_data->s.rss_conf = (struct rte_eth_rss_conf){
2118 .rss_key = action_rss_data->s.rss_key,
2119 .rss_key_len = sizeof(action_rss_data->s.rss_key),
2122 strncpy((void *)action_rss_data->s.rss_key,
2123 "testpmd's default RSS hash key",
2124 sizeof(action_rss_data->s.rss_key));
2125 for (i = 0; i < action_rss_data->conf.num; ++i)
2126 action_rss_data->conf.queue[i] = i;
2127 if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
2128 ctx->port != (portid_t)RTE_PORT_ALL) {
2129 struct rte_eth_dev_info info;
2131 rte_eth_dev_info_get(ctx->port, &info);
2132 action_rss_data->s.rss_conf.rss_key_len =
2133 RTE_MIN(sizeof(action_rss_data->s.rss_key),
2134 info.hash_key_size);
2136 action->conf = &action_rss_data->conf;
2141 * Parse type field for RSS action.
2143 * Valid tokens are type field names and the "end" token.
2146 parse_vc_action_rss_type(struct context *ctx, const struct token *token,
2147 const char *str, unsigned int len,
2148 void *buf, unsigned int size)
2150 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_TYPE);
2151 union action_rss_data *action_rss_data;
2157 if (ctx->curr != ACTION_RSS_TYPE)
2159 if (!(ctx->objdata >> 16) && ctx->object) {
2160 action_rss_data = ctx->object;
2161 action_rss_data->s.rss_conf.rss_hf = 0;
2163 if (!strcmp_partial("end", str, len)) {
2164 ctx->objdata &= 0xffff;
2167 for (i = 0; rss_type_table[i].str; ++i)
2168 if (!strcmp_partial(rss_type_table[i].str, str, len))
2170 if (!rss_type_table[i].str)
2172 ctx->objdata = 1 << 16 | (ctx->objdata & 0xffff);
2174 if (ctx->next_num == RTE_DIM(ctx->next))
2176 ctx->next[ctx->next_num++] = next;
2179 action_rss_data = ctx->object;
2180 action_rss_data->s.rss_conf.rss_hf |= rss_type_table[i].rss_type;
2185 * Parse queue field for RSS action.
2187 * Valid tokens are queue indices and the "end" token.
2190 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
2191 const char *str, unsigned int len,
2192 void *buf, unsigned int size)
2194 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
2195 union action_rss_data *action_rss_data;
2202 if (ctx->curr != ACTION_RSS_QUEUE)
2204 i = ctx->objdata >> 16;
2205 if (!strcmp_partial("end", str, len)) {
2206 ctx->objdata &= 0xffff;
2209 if (i >= ACTION_RSS_QUEUE_NUM)
2212 ARGS_ENTRY_ARB(offsetof(struct rte_flow_action_rss,
2214 i * sizeof(action_rss_data->s.queue[i]),
2215 sizeof(action_rss_data->s.queue[i]))))
2217 ret = parse_int(ctx, token, str, len, NULL, 0);
2223 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
2225 if (ctx->next_num == RTE_DIM(ctx->next))
2227 ctx->next[ctx->next_num++] = next;
2230 action_rss_data = ctx->object;
2231 action_rss_data->conf.num = i;
2235 /** Parse tokens for destroy command. */
2237 parse_destroy(struct context *ctx, const struct token *token,
2238 const char *str, unsigned int len,
2239 void *buf, unsigned int size)
2241 struct buffer *out = buf;
2243 /* Token name must match. */
2244 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2246 /* Nothing else to do if there is no buffer. */
2249 if (!out->command) {
2250 if (ctx->curr != DESTROY)
2252 if (sizeof(*out) > size)
2254 out->command = ctx->curr;
2257 ctx->objmask = NULL;
2258 out->args.destroy.rule =
2259 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2263 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
2264 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
2267 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
2268 ctx->objmask = NULL;
2272 /** Parse tokens for flush command. */
2274 parse_flush(struct context *ctx, const struct token *token,
2275 const char *str, unsigned int len,
2276 void *buf, unsigned int size)
2278 struct buffer *out = buf;
2280 /* Token name must match. */
2281 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2283 /* Nothing else to do if there is no buffer. */
2286 if (!out->command) {
2287 if (ctx->curr != FLUSH)
2289 if (sizeof(*out) > size)
2291 out->command = ctx->curr;
2294 ctx->objmask = NULL;
2299 /** Parse tokens for query command. */
2301 parse_query(struct context *ctx, const struct token *token,
2302 const char *str, unsigned int len,
2303 void *buf, unsigned int size)
2305 struct buffer *out = buf;
2307 /* Token name must match. */
2308 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2310 /* Nothing else to do if there is no buffer. */
2313 if (!out->command) {
2314 if (ctx->curr != QUERY)
2316 if (sizeof(*out) > size)
2318 out->command = ctx->curr;
2321 ctx->objmask = NULL;
2326 /** Parse action names. */
2328 parse_action(struct context *ctx, const struct token *token,
2329 const char *str, unsigned int len,
2330 void *buf, unsigned int size)
2332 struct buffer *out = buf;
2333 const struct arg *arg = pop_args(ctx);
2337 /* Argument is expected. */
2340 /* Parse action name. */
2341 for (i = 0; next_action[i]; ++i) {
2342 const struct parse_action_priv *priv;
2344 token = &token_list[next_action[i]];
2345 if (strcmp_partial(token->name, str, len))
2351 memcpy((uint8_t *)ctx->object + arg->offset,
2357 push_args(ctx, arg);
2361 /** Parse tokens for list command. */
2363 parse_list(struct context *ctx, const struct token *token,
2364 const char *str, unsigned int len,
2365 void *buf, unsigned int size)
2367 struct buffer *out = buf;
2369 /* Token name must match. */
2370 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2372 /* Nothing else to do if there is no buffer. */
2375 if (!out->command) {
2376 if (ctx->curr != LIST)
2378 if (sizeof(*out) > size)
2380 out->command = ctx->curr;
2383 ctx->objmask = NULL;
2384 out->args.list.group =
2385 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2389 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
2390 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
2393 ctx->object = out->args.list.group + out->args.list.group_n++;
2394 ctx->objmask = NULL;
2398 /** Parse tokens for isolate command. */
2400 parse_isolate(struct context *ctx, const struct token *token,
2401 const char *str, unsigned int len,
2402 void *buf, unsigned int size)
2404 struct buffer *out = buf;
2406 /* Token name must match. */
2407 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2409 /* Nothing else to do if there is no buffer. */
2412 if (!out->command) {
2413 if (ctx->curr != ISOLATE)
2415 if (sizeof(*out) > size)
2417 out->command = ctx->curr;
2420 ctx->objmask = NULL;
2426 * Parse signed/unsigned integers 8 to 64-bit long.
2428 * Last argument (ctx->args) is retrieved to determine integer type and
2432 parse_int(struct context *ctx, const struct token *token,
2433 const char *str, unsigned int len,
2434 void *buf, unsigned int size)
2436 const struct arg *arg = pop_args(ctx);
2441 /* Argument is expected. */
2446 (uintmax_t)strtoimax(str, &end, 0) :
2447 strtoumax(str, &end, 0);
2448 if (errno || (size_t)(end - str) != len)
2451 ((arg->sign && ((intmax_t)u < (intmax_t)arg->min ||
2452 (intmax_t)u > (intmax_t)arg->max)) ||
2453 (!arg->sign && (u < arg->min || u > arg->max))))
2458 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
2459 !arg_entry_bf_fill(ctx->objmask, -1, arg))
2463 buf = (uint8_t *)ctx->object + arg->offset;
2467 case sizeof(uint8_t):
2468 *(uint8_t *)buf = u;
2470 case sizeof(uint16_t):
2471 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
2473 case sizeof(uint8_t [3]):
2474 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2476 ((uint8_t *)buf)[0] = u;
2477 ((uint8_t *)buf)[1] = u >> 8;
2478 ((uint8_t *)buf)[2] = u >> 16;
2482 ((uint8_t *)buf)[0] = u >> 16;
2483 ((uint8_t *)buf)[1] = u >> 8;
2484 ((uint8_t *)buf)[2] = u;
2486 case sizeof(uint32_t):
2487 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
2489 case sizeof(uint64_t):
2490 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
2495 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
2497 buf = (uint8_t *)ctx->objmask + arg->offset;
2502 push_args(ctx, arg);
2509 * Two arguments (ctx->args) are retrieved from the stack to store data and
2510 * its length (in that order).
2513 parse_string(struct context *ctx, const struct token *token,
2514 const char *str, unsigned int len,
2515 void *buf, unsigned int size)
2517 const struct arg *arg_data = pop_args(ctx);
2518 const struct arg *arg_len = pop_args(ctx);
2519 char tmp[16]; /* Ought to be enough. */
2522 /* Arguments are expected. */
2526 push_args(ctx, arg_data);
2529 size = arg_data->size;
2530 /* Bit-mask fill is not supported. */
2531 if (arg_data->mask || size < len)
2535 /* Let parse_int() fill length information first. */
2536 ret = snprintf(tmp, sizeof(tmp), "%u", len);
2539 push_args(ctx, arg_len);
2540 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
2545 buf = (uint8_t *)ctx->object + arg_data->offset;
2546 /* Output buffer is not necessarily NUL-terminated. */
2547 memcpy(buf, str, len);
2548 memset((uint8_t *)buf + len, 0x00, size - len);
2550 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
2553 push_args(ctx, arg_len);
2554 push_args(ctx, arg_data);
2559 * Parse a MAC address.
2561 * Last argument (ctx->args) is retrieved to determine storage size and
2565 parse_mac_addr(struct context *ctx, const struct token *token,
2566 const char *str, unsigned int len,
2567 void *buf, unsigned int size)
2569 const struct arg *arg = pop_args(ctx);
2570 struct ether_addr tmp;
2574 /* Argument is expected. */
2578 /* Bit-mask fill is not supported. */
2579 if (arg->mask || size != sizeof(tmp))
2581 /* Only network endian is supported. */
2584 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
2585 if (ret < 0 || (unsigned int)ret != len)
2589 buf = (uint8_t *)ctx->object + arg->offset;
2590 memcpy(buf, &tmp, size);
2592 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2595 push_args(ctx, arg);
2600 * Parse an IPv4 address.
2602 * Last argument (ctx->args) is retrieved to determine storage size and
2606 parse_ipv4_addr(struct context *ctx, const struct token *token,
2607 const char *str, unsigned int len,
2608 void *buf, unsigned int size)
2610 const struct arg *arg = pop_args(ctx);
2615 /* Argument is expected. */
2619 /* Bit-mask fill is not supported. */
2620 if (arg->mask || size != sizeof(tmp))
2622 /* Only network endian is supported. */
2625 memcpy(str2, str, len);
2627 ret = inet_pton(AF_INET, str2, &tmp);
2629 /* Attempt integer parsing. */
2630 push_args(ctx, arg);
2631 return parse_int(ctx, token, str, len, buf, size);
2635 buf = (uint8_t *)ctx->object + arg->offset;
2636 memcpy(buf, &tmp, size);
2638 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2641 push_args(ctx, arg);
2646 * Parse an IPv6 address.
2648 * Last argument (ctx->args) is retrieved to determine storage size and
2652 parse_ipv6_addr(struct context *ctx, const struct token *token,
2653 const char *str, unsigned int len,
2654 void *buf, unsigned int size)
2656 const struct arg *arg = pop_args(ctx);
2658 struct in6_addr tmp;
2662 /* Argument is expected. */
2666 /* Bit-mask fill is not supported. */
2667 if (arg->mask || size != sizeof(tmp))
2669 /* Only network endian is supported. */
2672 memcpy(str2, str, len);
2674 ret = inet_pton(AF_INET6, str2, &tmp);
2679 buf = (uint8_t *)ctx->object + arg->offset;
2680 memcpy(buf, &tmp, size);
2682 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2685 push_args(ctx, arg);
2689 /** Boolean values (even indices stand for false). */
2690 static const char *const boolean_name[] = {
2699 * Parse a boolean value.
2701 * Last argument (ctx->args) is retrieved to determine storage size and
2705 parse_boolean(struct context *ctx, const struct token *token,
2706 const char *str, unsigned int len,
2707 void *buf, unsigned int size)
2709 const struct arg *arg = pop_args(ctx);
2713 /* Argument is expected. */
2716 for (i = 0; boolean_name[i]; ++i)
2717 if (!strcmp_partial(boolean_name[i], str, len))
2719 /* Process token as integer. */
2720 if (boolean_name[i])
2721 str = i & 1 ? "1" : "0";
2722 push_args(ctx, arg);
2723 ret = parse_int(ctx, token, str, strlen(str), buf, size);
2724 return ret > 0 ? (int)len : ret;
2727 /** Parse port and update context. */
2729 parse_port(struct context *ctx, const struct token *token,
2730 const char *str, unsigned int len,
2731 void *buf, unsigned int size)
2733 struct buffer *out = &(struct buffer){ .port = 0 };
2741 ctx->objmask = NULL;
2742 size = sizeof(*out);
2744 ret = parse_int(ctx, token, str, len, out, size);
2746 ctx->port = out->port;
2752 /** No completion. */
2754 comp_none(struct context *ctx, const struct token *token,
2755 unsigned int ent, char *buf, unsigned int size)
2765 /** Complete boolean values. */
2767 comp_boolean(struct context *ctx, const struct token *token,
2768 unsigned int ent, char *buf, unsigned int size)
2774 for (i = 0; boolean_name[i]; ++i)
2775 if (buf && i == ent)
2776 return snprintf(buf, size, "%s", boolean_name[i]);
2782 /** Complete action names. */
2784 comp_action(struct context *ctx, const struct token *token,
2785 unsigned int ent, char *buf, unsigned int size)
2791 for (i = 0; next_action[i]; ++i)
2792 if (buf && i == ent)
2793 return snprintf(buf, size, "%s",
2794 token_list[next_action[i]].name);
2800 /** Complete available ports. */
2802 comp_port(struct context *ctx, const struct token *token,
2803 unsigned int ent, char *buf, unsigned int size)
2810 RTE_ETH_FOREACH_DEV(p) {
2811 if (buf && i == ent)
2812 return snprintf(buf, size, "%u", p);
2820 /** Complete available rule IDs. */
2822 comp_rule_id(struct context *ctx, const struct token *token,
2823 unsigned int ent, char *buf, unsigned int size)
2826 struct rte_port *port;
2827 struct port_flow *pf;
2830 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
2831 ctx->port == (portid_t)RTE_PORT_ALL)
2833 port = &ports[ctx->port];
2834 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
2835 if (buf && i == ent)
2836 return snprintf(buf, size, "%u", pf->id);
2844 /** Complete type field for RSS action. */
2846 comp_vc_action_rss_type(struct context *ctx, const struct token *token,
2847 unsigned int ent, char *buf, unsigned int size)
2853 for (i = 0; rss_type_table[i].str; ++i)
2858 return snprintf(buf, size, "%s", rss_type_table[ent].str);
2860 return snprintf(buf, size, "end");
2864 /** Complete queue field for RSS action. */
2866 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
2867 unsigned int ent, char *buf, unsigned int size)
2874 return snprintf(buf, size, "%u", ent);
2876 return snprintf(buf, size, "end");
2880 /** Internal context. */
2881 static struct context cmd_flow_context;
2883 /** Global parser instance (cmdline API). */
2884 cmdline_parse_inst_t cmd_flow;
2886 /** Initialize context. */
2888 cmd_flow_context_init(struct context *ctx)
2890 /* A full memset() is not necessary. */
2900 ctx->objmask = NULL;
2903 /** Parse a token (cmdline API). */
2905 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
2908 struct context *ctx = &cmd_flow_context;
2909 const struct token *token;
2910 const enum index *list;
2915 token = &token_list[ctx->curr];
2916 /* Check argument length. */
2919 for (len = 0; src[len]; ++len)
2920 if (src[len] == '#' || isspace(src[len]))
2924 /* Last argument and EOL detection. */
2925 for (i = len; src[i]; ++i)
2926 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
2928 else if (!isspace(src[i])) {
2933 if (src[i] == '\r' || src[i] == '\n') {
2937 /* Initialize context if necessary. */
2938 if (!ctx->next_num) {
2941 ctx->next[ctx->next_num++] = token->next[0];
2943 /* Process argument through candidates. */
2944 ctx->prev = ctx->curr;
2945 list = ctx->next[ctx->next_num - 1];
2946 for (i = 0; list[i]; ++i) {
2947 const struct token *next = &token_list[list[i]];
2950 ctx->curr = list[i];
2952 tmp = next->call(ctx, next, src, len, result, size);
2954 tmp = parse_default(ctx, next, src, len, result, size);
2955 if (tmp == -1 || tmp != len)
2963 /* Push subsequent tokens if any. */
2965 for (i = 0; token->next[i]; ++i) {
2966 if (ctx->next_num == RTE_DIM(ctx->next))
2968 ctx->next[ctx->next_num++] = token->next[i];
2970 /* Push arguments if any. */
2972 for (i = 0; token->args[i]; ++i) {
2973 if (ctx->args_num == RTE_DIM(ctx->args))
2975 ctx->args[ctx->args_num++] = token->args[i];
2980 /** Return number of completion entries (cmdline API). */
2982 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
2984 struct context *ctx = &cmd_flow_context;
2985 const struct token *token = &token_list[ctx->curr];
2986 const enum index *list;
2990 /* Count number of tokens in current list. */
2992 list = ctx->next[ctx->next_num - 1];
2994 list = token->next[0];
2995 for (i = 0; list[i]; ++i)
3000 * If there is a single token, use its completion callback, otherwise
3001 * return the number of entries.
3003 token = &token_list[list[0]];
3004 if (i == 1 && token->comp) {
3005 /* Save index for cmd_flow_get_help(). */
3006 ctx->prev = list[0];
3007 return token->comp(ctx, token, 0, NULL, 0);
3012 /** Return a completion entry (cmdline API). */
3014 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
3015 char *dst, unsigned int size)
3017 struct context *ctx = &cmd_flow_context;
3018 const struct token *token = &token_list[ctx->curr];
3019 const enum index *list;
3023 /* Count number of tokens in current list. */
3025 list = ctx->next[ctx->next_num - 1];
3027 list = token->next[0];
3028 for (i = 0; list[i]; ++i)
3032 /* If there is a single token, use its completion callback. */
3033 token = &token_list[list[0]];
3034 if (i == 1 && token->comp) {
3035 /* Save index for cmd_flow_get_help(). */
3036 ctx->prev = list[0];
3037 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
3039 /* Otherwise make sure the index is valid and use defaults. */
3042 token = &token_list[list[index]];
3043 snprintf(dst, size, "%s", token->name);
3044 /* Save index for cmd_flow_get_help(). */
3045 ctx->prev = list[index];
3049 /** Populate help strings for current token (cmdline API). */
3051 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
3053 struct context *ctx = &cmd_flow_context;
3054 const struct token *token = &token_list[ctx->prev];
3059 /* Set token type and update global help with details. */
3060 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
3062 cmd_flow.help_str = token->help;
3064 cmd_flow.help_str = token->name;
3068 /** Token definition template (cmdline API). */
3069 static struct cmdline_token_hdr cmd_flow_token_hdr = {
3070 .ops = &(struct cmdline_token_ops){
3071 .parse = cmd_flow_parse,
3072 .complete_get_nb = cmd_flow_complete_get_nb,
3073 .complete_get_elt = cmd_flow_complete_get_elt,
3074 .get_help = cmd_flow_get_help,
3079 /** Populate the next dynamic token. */
3081 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
3082 cmdline_parse_token_hdr_t **hdr_inst)
3084 struct context *ctx = &cmd_flow_context;
3086 /* Always reinitialize context before requesting the first token. */
3087 if (!(hdr_inst - cmd_flow.tokens))
3088 cmd_flow_context_init(ctx);
3089 /* Return NULL when no more tokens are expected. */
3090 if (!ctx->next_num && ctx->curr) {
3094 /* Determine if command should end here. */
3095 if (ctx->eol && ctx->last && ctx->next_num) {
3096 const enum index *list = ctx->next[ctx->next_num - 1];
3099 for (i = 0; list[i]; ++i) {
3106 *hdr = &cmd_flow_token_hdr;
3109 /** Dispatch parsed buffer to function calls. */
3111 cmd_flow_parsed(const struct buffer *in)
3113 switch (in->command) {
3115 port_flow_validate(in->port, &in->args.vc.attr,
3116 in->args.vc.pattern, in->args.vc.actions);
3119 port_flow_create(in->port, &in->args.vc.attr,
3120 in->args.vc.pattern, in->args.vc.actions);
3123 port_flow_destroy(in->port, in->args.destroy.rule_n,
3124 in->args.destroy.rule);
3127 port_flow_flush(in->port);
3130 port_flow_query(in->port, in->args.query.rule,
3131 in->args.query.action);
3134 port_flow_list(in->port, in->args.list.group_n,
3135 in->args.list.group);
3138 port_flow_isolate(in->port, in->args.isolate.set);
3145 /** Token generator and output processing callback (cmdline API). */
3147 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
3150 cmd_flow_tok(arg0, arg2);
3152 cmd_flow_parsed(arg0);
3155 /** Global parser instance (cmdline API). */
3156 cmdline_parse_inst_t cmd_flow = {
3158 .data = NULL, /**< Unused. */
3159 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
3162 }, /**< Tokens are returned by cmd_flow_tok(). */