1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
13 #include <arpa/inet.h>
14 #include <sys/socket.h>
16 #include <rte_common.h>
17 #include <rte_ethdev.h>
18 #include <rte_byteorder.h>
19 #include <cmdline_parse.h>
20 #include <cmdline_parse_etheraddr.h>
25 /** Parser token indices. */
45 /* Top-level command. */
48 /* Sub-level commands. */
57 /* Destroy arguments. */
60 /* Query arguments. */
66 /* Validate/create arguments. */
72 /* Validate/create pattern. */
137 ITEM_E_TAG_GRP_ECID_B,
154 /* Validate/create actions. */
182 /** Maximum size for pattern in struct rte_flow_item_raw. */
183 #define ITEM_RAW_PATTERN_SIZE 40
185 /** Storage size for struct rte_flow_item_raw including pattern. */
186 #define ITEM_RAW_SIZE \
187 (sizeof(struct rte_flow_item_raw) + ITEM_RAW_PATTERN_SIZE)
189 /** Maximum number of queue indices in struct rte_flow_action_rss. */
190 #define ACTION_RSS_QUEUE_NUM 32
192 /** Storage for struct rte_flow_action_rss including external data. */
193 struct action_rss_data {
194 struct rte_flow_action_rss conf;
195 uint8_t key[RSS_HASH_KEY_LENGTH];
196 uint16_t queue[ACTION_RSS_QUEUE_NUM];
199 /** Maximum number of subsequent tokens and arguments on the stack. */
200 #define CTX_STACK_SIZE 16
202 /** Parser context. */
204 /** Stack of subsequent token lists to process. */
205 const enum index *next[CTX_STACK_SIZE];
206 /** Arguments for stacked tokens. */
207 const void *args[CTX_STACK_SIZE];
208 enum index curr; /**< Current token index. */
209 enum index prev; /**< Index of the last token seen. */
210 int next_num; /**< Number of entries in next[]. */
211 int args_num; /**< Number of entries in args[]. */
212 uint32_t eol:1; /**< EOL has been detected. */
213 uint32_t last:1; /**< No more arguments. */
214 portid_t port; /**< Current port ID (for completions). */
215 uint32_t objdata; /**< Object-specific data. */
216 void *object; /**< Address of current object for relative offsets. */
217 void *objmask; /**< Object a full mask must be written to. */
220 /** Token argument. */
222 uint32_t hton:1; /**< Use network byte ordering. */
223 uint32_t sign:1; /**< Value is signed. */
224 uint32_t bounded:1; /**< Value is bounded. */
225 uintmax_t min; /**< Minimum value if bounded. */
226 uintmax_t max; /**< Maximum value if bounded. */
227 uint32_t offset; /**< Relative offset from ctx->object. */
228 uint32_t size; /**< Field size. */
229 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
232 /** Parser token definition. */
234 /** Type displayed during completion (defaults to "TOKEN"). */
236 /** Help displayed during completion (defaults to token name). */
238 /** Private data used by parser functions. */
241 * Lists of subsequent tokens to push on the stack. Each call to the
242 * parser consumes the last entry of that stack.
244 const enum index *const *next;
245 /** Arguments stack for subsequent tokens that need them. */
246 const struct arg *const *args;
248 * Token-processing callback, returns -1 in case of error, the
249 * length of the matched string otherwise. If NULL, attempts to
250 * match the token name.
252 * If buf is not NULL, the result should be stored in it according
253 * to context. An error is returned if not large enough.
255 int (*call)(struct context *ctx, const struct token *token,
256 const char *str, unsigned int len,
257 void *buf, unsigned int size);
259 * Callback that provides possible values for this token, used for
260 * completion. Returns -1 in case of error, the number of possible
261 * values otherwise. If NULL, the token name is used.
263 * If buf is not NULL, entry index ent is written to buf and the
264 * full length of the entry is returned (same behavior as
267 int (*comp)(struct context *ctx, const struct token *token,
268 unsigned int ent, char *buf, unsigned int size);
269 /** Mandatory token name, no default value. */
273 /** Static initializer for the next field. */
274 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
276 /** Static initializer for a NEXT() entry. */
277 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
279 /** Static initializer for the args field. */
280 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
282 /** Static initializer for ARGS() to target a field. */
283 #define ARGS_ENTRY(s, f) \
284 (&(const struct arg){ \
285 .offset = offsetof(s, f), \
286 .size = sizeof(((s *)0)->f), \
289 /** Static initializer for ARGS() to target a bit-field. */
290 #define ARGS_ENTRY_BF(s, f, b) \
291 (&(const struct arg){ \
293 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
296 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
297 #define ARGS_ENTRY_MASK(s, f, m) \
298 (&(const struct arg){ \
299 .offset = offsetof(s, f), \
300 .size = sizeof(((s *)0)->f), \
301 .mask = (const void *)(m), \
304 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
305 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
306 (&(const struct arg){ \
308 .offset = offsetof(s, f), \
309 .size = sizeof(((s *)0)->f), \
310 .mask = (const void *)(m), \
313 /** Static initializer for ARGS() to target a pointer. */
314 #define ARGS_ENTRY_PTR(s, f) \
315 (&(const struct arg){ \
316 .size = sizeof(*((s *)0)->f), \
319 /** Static initializer for ARGS() with arbitrary offset and size. */
320 #define ARGS_ENTRY_ARB(o, s) \
321 (&(const struct arg){ \
326 /** Same as ARGS_ENTRY_ARB() with bounded values. */
327 #define ARGS_ENTRY_ARB_BOUNDED(o, s, i, a) \
328 (&(const struct arg){ \
336 /** Same as ARGS_ENTRY() using network byte ordering. */
337 #define ARGS_ENTRY_HTON(s, f) \
338 (&(const struct arg){ \
340 .offset = offsetof(s, f), \
341 .size = sizeof(((s *)0)->f), \
344 /** Parser output buffer layout expected by cmd_flow_parsed(). */
346 enum index command; /**< Flow command. */
347 portid_t port; /**< Affected port ID. */
350 struct rte_flow_attr attr;
351 struct rte_flow_item *pattern;
352 struct rte_flow_action *actions;
356 } vc; /**< Validate/create arguments. */
360 } destroy; /**< Destroy arguments. */
363 enum rte_flow_action_type action;
364 } query; /**< Query arguments. */
368 } list; /**< List arguments. */
371 } isolate; /**< Isolated mode arguments. */
372 } args; /**< Command arguments. */
375 /** Private data for pattern items. */
376 struct parse_item_priv {
377 enum rte_flow_item_type type; /**< Item type. */
378 uint32_t size; /**< Size of item specification structure. */
381 #define PRIV_ITEM(t, s) \
382 (&(const struct parse_item_priv){ \
383 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
387 /** Private data for actions. */
388 struct parse_action_priv {
389 enum rte_flow_action_type type; /**< Action type. */
390 uint32_t size; /**< Size of action configuration structure. */
393 #define PRIV_ACTION(t, s) \
394 (&(const struct parse_action_priv){ \
395 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
399 static const enum index next_vc_attr[] = {
408 static const enum index next_destroy_attr[] = {
414 static const enum index next_list_attr[] = {
420 static const enum index item_param[] = {
429 static const enum index next_item[] = {
459 static const enum index item_fuzzy[] = {
465 static const enum index item_any[] = {
471 static const enum index item_vf[] = {
477 static const enum index item_port[] = {
483 static const enum index item_raw[] = {
493 static const enum index item_eth[] = {
501 static const enum index item_vlan[] = {
511 static const enum index item_ipv4[] = {
521 static const enum index item_ipv6[] = {
532 static const enum index item_icmp[] = {
539 static const enum index item_udp[] = {
546 static const enum index item_tcp[] = {
554 static const enum index item_sctp[] = {
563 static const enum index item_vxlan[] = {
569 static const enum index item_e_tag[] = {
570 ITEM_E_TAG_GRP_ECID_B,
575 static const enum index item_nvgre[] = {
581 static const enum index item_mpls[] = {
587 static const enum index item_gre[] = {
593 static const enum index item_gtp[] = {
599 static const enum index item_geneve[] = {
606 static const enum index next_action[] = {
622 static const enum index action_mark[] = {
628 static const enum index action_queue[] = {
634 static const enum index action_rss[] = {
643 static const enum index action_vf[] = {
650 static const enum index action_meter[] = {
656 static int parse_init(struct context *, const struct token *,
657 const char *, unsigned int,
658 void *, unsigned int);
659 static int parse_vc(struct context *, const struct token *,
660 const char *, unsigned int,
661 void *, unsigned int);
662 static int parse_vc_spec(struct context *, const struct token *,
663 const char *, unsigned int, void *, unsigned int);
664 static int parse_vc_conf(struct context *, const struct token *,
665 const char *, unsigned int, void *, unsigned int);
666 static int parse_vc_action_rss(struct context *, const struct token *,
667 const char *, unsigned int, void *,
669 static int parse_vc_action_rss_type(struct context *, const struct token *,
670 const char *, unsigned int, void *,
672 static int parse_vc_action_rss_queue(struct context *, const struct token *,
673 const char *, unsigned int, void *,
675 static int parse_destroy(struct context *, const struct token *,
676 const char *, unsigned int,
677 void *, unsigned int);
678 static int parse_flush(struct context *, const struct token *,
679 const char *, unsigned int,
680 void *, unsigned int);
681 static int parse_query(struct context *, const struct token *,
682 const char *, unsigned int,
683 void *, unsigned int);
684 static int parse_action(struct context *, const struct token *,
685 const char *, unsigned int,
686 void *, unsigned int);
687 static int parse_list(struct context *, const struct token *,
688 const char *, unsigned int,
689 void *, unsigned int);
690 static int parse_isolate(struct context *, const struct token *,
691 const char *, unsigned int,
692 void *, unsigned int);
693 static int parse_int(struct context *, const struct token *,
694 const char *, unsigned int,
695 void *, unsigned int);
696 static int parse_prefix(struct context *, const struct token *,
697 const char *, unsigned int,
698 void *, unsigned int);
699 static int parse_boolean(struct context *, const struct token *,
700 const char *, unsigned int,
701 void *, unsigned int);
702 static int parse_string(struct context *, const struct token *,
703 const char *, unsigned int,
704 void *, unsigned int);
705 static int parse_mac_addr(struct context *, const struct token *,
706 const char *, unsigned int,
707 void *, unsigned int);
708 static int parse_ipv4_addr(struct context *, const struct token *,
709 const char *, unsigned int,
710 void *, unsigned int);
711 static int parse_ipv6_addr(struct context *, const struct token *,
712 const char *, unsigned int,
713 void *, unsigned int);
714 static int parse_port(struct context *, const struct token *,
715 const char *, unsigned int,
716 void *, unsigned int);
717 static int comp_none(struct context *, const struct token *,
718 unsigned int, char *, unsigned int);
719 static int comp_boolean(struct context *, const struct token *,
720 unsigned int, char *, unsigned int);
721 static int comp_action(struct context *, const struct token *,
722 unsigned int, char *, unsigned int);
723 static int comp_port(struct context *, const struct token *,
724 unsigned int, char *, unsigned int);
725 static int comp_rule_id(struct context *, const struct token *,
726 unsigned int, char *, unsigned int);
727 static int comp_vc_action_rss_type(struct context *, const struct token *,
728 unsigned int, char *, unsigned int);
729 static int comp_vc_action_rss_queue(struct context *, const struct token *,
730 unsigned int, char *, unsigned int);
732 /** Token definitions. */
733 static const struct token token_list[] = {
734 /* Special tokens. */
737 .help = "null entry, abused as the entry point",
738 .next = NEXT(NEXT_ENTRY(FLOW)),
743 .help = "command may end here",
749 .help = "integer value",
754 .name = "{unsigned}",
756 .help = "unsigned integer value",
763 .help = "prefix length for bit-mask",
764 .call = parse_prefix,
770 .help = "any boolean value",
771 .call = parse_boolean,
772 .comp = comp_boolean,
777 .help = "fixed string",
778 .call = parse_string,
782 .name = "{MAC address}",
784 .help = "standard MAC address notation",
785 .call = parse_mac_addr,
789 .name = "{IPv4 address}",
790 .type = "IPV4 ADDRESS",
791 .help = "standard IPv4 address notation",
792 .call = parse_ipv4_addr,
796 .name = "{IPv6 address}",
797 .type = "IPV6 ADDRESS",
798 .help = "standard IPv6 address notation",
799 .call = parse_ipv6_addr,
805 .help = "rule identifier",
807 .comp = comp_rule_id,
812 .help = "port identifier",
817 .name = "{group_id}",
819 .help = "group identifier",
826 .help = "priority level",
830 /* Top-level command. */
833 .type = "{command} {port_id} [{arg} [...]]",
834 .help = "manage ingress/egress flow rules",
835 .next = NEXT(NEXT_ENTRY
845 /* Sub-level commands. */
848 .help = "check whether a flow rule can be created",
849 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
850 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
855 .help = "create a flow rule",
856 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
857 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
862 .help = "destroy specific flow rules",
863 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
864 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
865 .call = parse_destroy,
869 .help = "destroy all flow rules",
870 .next = NEXT(NEXT_ENTRY(PORT_ID)),
871 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
876 .help = "query an existing flow rule",
877 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
879 NEXT_ENTRY(PORT_ID)),
880 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action),
881 ARGS_ENTRY(struct buffer, args.query.rule),
882 ARGS_ENTRY(struct buffer, port)),
887 .help = "list existing flow rules",
888 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
889 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
894 .help = "restrict ingress traffic to the defined flow rules",
895 .next = NEXT(NEXT_ENTRY(BOOLEAN),
896 NEXT_ENTRY(PORT_ID)),
897 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
898 ARGS_ENTRY(struct buffer, port)),
899 .call = parse_isolate,
901 /* Destroy arguments. */
904 .help = "specify a rule identifier",
905 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
906 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
907 .call = parse_destroy,
909 /* Query arguments. */
913 .help = "action to query, must be part of the rule",
914 .call = parse_action,
917 /* List arguments. */
920 .help = "specify a group",
921 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
922 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
925 /* Validate/create attributes. */
928 .help = "specify a group",
929 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
930 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
935 .help = "specify a priority level",
936 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
937 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
942 .help = "affect rule to ingress",
943 .next = NEXT(next_vc_attr),
948 .help = "affect rule to egress",
949 .next = NEXT(next_vc_attr),
952 /* Validate/create pattern. */
955 .help = "submit a list of pattern items",
956 .next = NEXT(next_item),
961 .help = "match value perfectly (with full bit-mask)",
962 .call = parse_vc_spec,
964 [ITEM_PARAM_SPEC] = {
966 .help = "match value according to configured bit-mask",
967 .call = parse_vc_spec,
969 [ITEM_PARAM_LAST] = {
971 .help = "specify upper bound to establish a range",
972 .call = parse_vc_spec,
974 [ITEM_PARAM_MASK] = {
976 .help = "specify bit-mask with relevant bits set to one",
977 .call = parse_vc_spec,
979 [ITEM_PARAM_PREFIX] = {
981 .help = "generate bit-mask from a prefix length",
982 .call = parse_vc_spec,
986 .help = "specify next pattern item",
987 .next = NEXT(next_item),
991 .help = "end list of pattern items",
992 .priv = PRIV_ITEM(END, 0),
993 .next = NEXT(NEXT_ENTRY(ACTIONS)),
998 .help = "no-op pattern item",
999 .priv = PRIV_ITEM(VOID, 0),
1000 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1005 .help = "perform actions when pattern does not match",
1006 .priv = PRIV_ITEM(INVERT, 0),
1007 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1012 .help = "match any protocol for the current layer",
1013 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
1014 .next = NEXT(item_any),
1019 .help = "number of layers covered",
1020 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
1021 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
1025 .help = "match packets addressed to the physical function",
1026 .priv = PRIV_ITEM(PF, 0),
1027 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1032 .help = "match packets addressed to a virtual function ID",
1033 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
1034 .next = NEXT(item_vf),
1039 .help = "destination VF ID",
1040 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
1041 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
1045 .help = "device-specific physical port index to use",
1046 .priv = PRIV_ITEM(PORT, sizeof(struct rte_flow_item_port)),
1047 .next = NEXT(item_port),
1050 [ITEM_PORT_INDEX] = {
1052 .help = "physical port index",
1053 .next = NEXT(item_port, NEXT_ENTRY(UNSIGNED), item_param),
1054 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port, index)),
1058 .help = "match an arbitrary byte string",
1059 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1060 .next = NEXT(item_raw),
1063 [ITEM_RAW_RELATIVE] = {
1065 .help = "look for pattern after the previous item",
1066 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1067 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1070 [ITEM_RAW_SEARCH] = {
1072 .help = "search pattern from offset (see also limit)",
1073 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1074 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1077 [ITEM_RAW_OFFSET] = {
1079 .help = "absolute or relative offset for pattern",
1080 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1081 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1083 [ITEM_RAW_LIMIT] = {
1085 .help = "search area limit for start of pattern",
1086 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1087 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1089 [ITEM_RAW_PATTERN] = {
1091 .help = "byte string to look for",
1092 .next = NEXT(item_raw,
1094 NEXT_ENTRY(ITEM_PARAM_IS,
1097 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, pattern),
1098 ARGS_ENTRY(struct rte_flow_item_raw, length),
1099 ARGS_ENTRY_ARB(sizeof(struct rte_flow_item_raw),
1100 ITEM_RAW_PATTERN_SIZE)),
1104 .help = "match Ethernet header",
1105 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1106 .next = NEXT(item_eth),
1111 .help = "destination MAC",
1112 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1113 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1117 .help = "source MAC",
1118 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1119 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1123 .help = "EtherType",
1124 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1125 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1129 .help = "match 802.1Q/ad VLAN tag",
1130 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1131 .next = NEXT(item_vlan),
1134 [ITEM_VLAN_TPID] = {
1136 .help = "tag protocol identifier",
1137 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1138 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tpid)),
1142 .help = "tag control information",
1143 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1144 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1148 .help = "priority code point",
1149 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1150 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1155 .help = "drop eligible indicator",
1156 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1157 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1162 .help = "VLAN identifier",
1163 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1164 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1169 .help = "match IPv4 header",
1170 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1171 .next = NEXT(item_ipv4),
1176 .help = "type of service",
1177 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1178 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1179 hdr.type_of_service)),
1183 .help = "time to live",
1184 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1185 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1188 [ITEM_IPV4_PROTO] = {
1190 .help = "next protocol ID",
1191 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1192 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1193 hdr.next_proto_id)),
1197 .help = "source address",
1198 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1199 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1204 .help = "destination address",
1205 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1206 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1211 .help = "match IPv6 header",
1212 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1213 .next = NEXT(item_ipv6),
1218 .help = "traffic class",
1219 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1220 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1222 "\x0f\xf0\x00\x00")),
1224 [ITEM_IPV6_FLOW] = {
1226 .help = "flow label",
1227 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1228 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1230 "\x00\x0f\xff\xff")),
1232 [ITEM_IPV6_PROTO] = {
1234 .help = "protocol (next header)",
1235 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1236 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1241 .help = "hop limit",
1242 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1243 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1248 .help = "source address",
1249 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1250 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1255 .help = "destination address",
1256 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1257 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1262 .help = "match ICMP header",
1263 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1264 .next = NEXT(item_icmp),
1267 [ITEM_ICMP_TYPE] = {
1269 .help = "ICMP packet type",
1270 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1271 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1274 [ITEM_ICMP_CODE] = {
1276 .help = "ICMP packet code",
1277 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1278 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1283 .help = "match UDP header",
1284 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1285 .next = NEXT(item_udp),
1290 .help = "UDP source port",
1291 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1292 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1297 .help = "UDP destination port",
1298 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1299 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1304 .help = "match TCP header",
1305 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1306 .next = NEXT(item_tcp),
1311 .help = "TCP source port",
1312 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1313 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1318 .help = "TCP destination port",
1319 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1320 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1323 [ITEM_TCP_FLAGS] = {
1325 .help = "TCP flags",
1326 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1327 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1332 .help = "match SCTP header",
1333 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1334 .next = NEXT(item_sctp),
1339 .help = "SCTP source port",
1340 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1341 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1346 .help = "SCTP destination port",
1347 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1348 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1353 .help = "validation tag",
1354 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1355 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1358 [ITEM_SCTP_CKSUM] = {
1361 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1362 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1367 .help = "match VXLAN header",
1368 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1369 .next = NEXT(item_vxlan),
1372 [ITEM_VXLAN_VNI] = {
1374 .help = "VXLAN identifier",
1375 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1376 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
1380 .help = "match E-Tag header",
1381 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
1382 .next = NEXT(item_e_tag),
1385 [ITEM_E_TAG_GRP_ECID_B] = {
1386 .name = "grp_ecid_b",
1387 .help = "GRP and E-CID base",
1388 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
1389 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
1395 .help = "match NVGRE header",
1396 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
1397 .next = NEXT(item_nvgre),
1400 [ITEM_NVGRE_TNI] = {
1402 .help = "virtual subnet ID",
1403 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
1404 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
1408 .help = "match MPLS header",
1409 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
1410 .next = NEXT(item_mpls),
1413 [ITEM_MPLS_LABEL] = {
1415 .help = "MPLS label",
1416 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
1417 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
1423 .help = "match GRE header",
1424 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
1425 .next = NEXT(item_gre),
1428 [ITEM_GRE_PROTO] = {
1430 .help = "GRE protocol type",
1431 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
1432 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
1437 .help = "fuzzy pattern match, expect faster than default",
1438 .priv = PRIV_ITEM(FUZZY,
1439 sizeof(struct rte_flow_item_fuzzy)),
1440 .next = NEXT(item_fuzzy),
1443 [ITEM_FUZZY_THRESH] = {
1445 .help = "match accuracy threshold",
1446 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
1447 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
1452 .help = "match GTP header",
1453 .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
1454 .next = NEXT(item_gtp),
1459 .help = "tunnel endpoint identifier",
1460 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
1461 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
1465 .help = "match GTP header",
1466 .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
1467 .next = NEXT(item_gtp),
1472 .help = "match GTP header",
1473 .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
1474 .next = NEXT(item_gtp),
1479 .help = "match GENEVE header",
1480 .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
1481 .next = NEXT(item_geneve),
1484 [ITEM_GENEVE_VNI] = {
1486 .help = "virtual network identifier",
1487 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1488 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)),
1490 [ITEM_GENEVE_PROTO] = {
1492 .help = "GENEVE protocol type",
1493 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1494 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve,
1498 /* Validate/create actions. */
1501 .help = "submit a list of associated actions",
1502 .next = NEXT(next_action),
1507 .help = "specify next action",
1508 .next = NEXT(next_action),
1512 .help = "end list of actions",
1513 .priv = PRIV_ACTION(END, 0),
1518 .help = "no-op action",
1519 .priv = PRIV_ACTION(VOID, 0),
1520 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1523 [ACTION_PASSTHRU] = {
1525 .help = "let subsequent rule process matched packets",
1526 .priv = PRIV_ACTION(PASSTHRU, 0),
1527 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1532 .help = "attach 32 bit value to packets",
1533 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
1534 .next = NEXT(action_mark),
1537 [ACTION_MARK_ID] = {
1539 .help = "32 bit value to return with packets",
1540 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
1541 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
1542 .call = parse_vc_conf,
1546 .help = "flag packets",
1547 .priv = PRIV_ACTION(FLAG, 0),
1548 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1553 .help = "assign packets to a given queue index",
1554 .priv = PRIV_ACTION(QUEUE,
1555 sizeof(struct rte_flow_action_queue)),
1556 .next = NEXT(action_queue),
1559 [ACTION_QUEUE_INDEX] = {
1561 .help = "queue index to use",
1562 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
1563 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
1564 .call = parse_vc_conf,
1568 .help = "drop packets (note: passthru has priority)",
1569 .priv = PRIV_ACTION(DROP, 0),
1570 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1575 .help = "enable counters for this rule",
1576 .priv = PRIV_ACTION(COUNT, 0),
1577 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1582 .help = "spread packets among several queues",
1583 .priv = PRIV_ACTION(RSS, sizeof(struct action_rss_data)),
1584 .next = NEXT(action_rss),
1585 .call = parse_vc_action_rss,
1587 [ACTION_RSS_TYPES] = {
1589 .help = "specific RSS hash types",
1590 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)),
1592 [ACTION_RSS_TYPE] = {
1594 .help = "RSS hash type",
1595 .call = parse_vc_action_rss_type,
1596 .comp = comp_vc_action_rss_type,
1598 [ACTION_RSS_KEY] = {
1600 .help = "RSS hash key",
1601 .next = NEXT(action_rss, NEXT_ENTRY(STRING)),
1602 .args = ARGS(ARGS_ENTRY_ARB(0, 0),
1604 (offsetof(struct action_rss_data, conf) +
1605 offsetof(struct rte_flow_action_rss, key_len),
1606 sizeof(((struct rte_flow_action_rss *)0)->
1608 ARGS_ENTRY(struct action_rss_data, key)),
1610 [ACTION_RSS_KEY_LEN] = {
1612 .help = "RSS hash key length in bytes",
1613 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
1614 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
1615 (offsetof(struct action_rss_data, conf) +
1616 offsetof(struct rte_flow_action_rss, key_len),
1617 sizeof(((struct rte_flow_action_rss *)0)->
1620 RSS_HASH_KEY_LENGTH)),
1622 [ACTION_RSS_QUEUES] = {
1624 .help = "queue indices to use",
1625 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
1626 .call = parse_vc_conf,
1628 [ACTION_RSS_QUEUE] = {
1630 .help = "queue index",
1631 .call = parse_vc_action_rss_queue,
1632 .comp = comp_vc_action_rss_queue,
1636 .help = "redirect packets to physical device function",
1637 .priv = PRIV_ACTION(PF, 0),
1638 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1643 .help = "redirect packets to virtual device function",
1644 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
1645 .next = NEXT(action_vf),
1648 [ACTION_VF_ORIGINAL] = {
1650 .help = "use original VF ID if possible",
1651 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
1652 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
1654 .call = parse_vc_conf,
1658 .help = "VF ID to redirect packets to",
1659 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
1660 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
1661 .call = parse_vc_conf,
1665 .help = "meter the directed packets at given id",
1666 .priv = PRIV_ACTION(METER,
1667 sizeof(struct rte_flow_action_meter)),
1668 .next = NEXT(action_meter),
1671 [ACTION_METER_ID] = {
1673 .help = "meter id to use",
1674 .next = NEXT(action_meter, NEXT_ENTRY(UNSIGNED)),
1675 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
1676 .call = parse_vc_conf,
1680 /** Remove and return last entry from argument stack. */
1681 static const struct arg *
1682 pop_args(struct context *ctx)
1684 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
1687 /** Add entry on top of the argument stack. */
1689 push_args(struct context *ctx, const struct arg *arg)
1691 if (ctx->args_num == CTX_STACK_SIZE)
1693 ctx->args[ctx->args_num++] = arg;
1697 /** Spread value into buffer according to bit-mask. */
1699 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
1701 uint32_t i = arg->size;
1709 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1718 unsigned int shift = 0;
1719 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
1721 for (shift = 0; arg->mask[i] >> shift; ++shift) {
1722 if (!(arg->mask[i] & (1 << shift)))
1727 *buf &= ~(1 << shift);
1728 *buf |= (val & 1) << shift;
1736 /** Compare a string with a partial one of a given length. */
1738 strcmp_partial(const char *full, const char *partial, size_t partial_len)
1740 int r = strncmp(full, partial, partial_len);
1744 if (strlen(full) <= partial_len)
1746 return full[partial_len];
1750 * Parse a prefix length and generate a bit-mask.
1752 * Last argument (ctx->args) is retrieved to determine mask size, storage
1753 * location and whether the result must use network byte ordering.
1756 parse_prefix(struct context *ctx, const struct token *token,
1757 const char *str, unsigned int len,
1758 void *buf, unsigned int size)
1760 const struct arg *arg = pop_args(ctx);
1761 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
1768 /* Argument is expected. */
1772 u = strtoumax(str, &end, 0);
1773 if (errno || (size_t)(end - str) != len)
1778 extra = arg_entry_bf_fill(NULL, 0, arg);
1787 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
1788 !arg_entry_bf_fill(ctx->objmask, -1, arg))
1795 if (bytes > size || bytes + !!extra > size)
1799 buf = (uint8_t *)ctx->object + arg->offset;
1800 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1802 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
1803 memset(buf, 0x00, size - bytes);
1805 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
1809 memset(buf, 0xff, bytes);
1810 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
1812 ((uint8_t *)buf)[bytes] = conv[extra];
1815 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
1818 push_args(ctx, arg);
1822 /** Default parsing function for token name matching. */
1824 parse_default(struct context *ctx, const struct token *token,
1825 const char *str, unsigned int len,
1826 void *buf, unsigned int size)
1831 if (strcmp_partial(token->name, str, len))
1836 /** Parse flow command, initialize output buffer for subsequent tokens. */
1838 parse_init(struct context *ctx, const struct token *token,
1839 const char *str, unsigned int len,
1840 void *buf, unsigned int size)
1842 struct buffer *out = buf;
1844 /* Token name must match. */
1845 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1847 /* Nothing else to do if there is no buffer. */
1850 /* Make sure buffer is large enough. */
1851 if (size < sizeof(*out))
1853 /* Initialize buffer. */
1854 memset(out, 0x00, sizeof(*out));
1855 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
1858 ctx->objmask = NULL;
1862 /** Parse tokens for validate/create commands. */
1864 parse_vc(struct context *ctx, const struct token *token,
1865 const char *str, unsigned int len,
1866 void *buf, unsigned int size)
1868 struct buffer *out = buf;
1872 /* Token name must match. */
1873 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1875 /* Nothing else to do if there is no buffer. */
1878 if (!out->command) {
1879 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
1881 if (sizeof(*out) > size)
1883 out->command = ctx->curr;
1886 ctx->objmask = NULL;
1887 out->args.vc.data = (uint8_t *)out + size;
1891 ctx->object = &out->args.vc.attr;
1892 ctx->objmask = NULL;
1893 switch (ctx->curr) {
1898 out->args.vc.attr.ingress = 1;
1901 out->args.vc.attr.egress = 1;
1904 out->args.vc.pattern =
1905 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1907 ctx->object = out->args.vc.pattern;
1908 ctx->objmask = NULL;
1911 out->args.vc.actions =
1912 (void *)RTE_ALIGN_CEIL((uintptr_t)
1913 (out->args.vc.pattern +
1914 out->args.vc.pattern_n),
1916 ctx->object = out->args.vc.actions;
1917 ctx->objmask = NULL;
1924 if (!out->args.vc.actions) {
1925 const struct parse_item_priv *priv = token->priv;
1926 struct rte_flow_item *item =
1927 out->args.vc.pattern + out->args.vc.pattern_n;
1929 data_size = priv->size * 3; /* spec, last, mask */
1930 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1931 (out->args.vc.data - data_size),
1933 if ((uint8_t *)item + sizeof(*item) > data)
1935 *item = (struct rte_flow_item){
1938 ++out->args.vc.pattern_n;
1940 ctx->objmask = NULL;
1942 const struct parse_action_priv *priv = token->priv;
1943 struct rte_flow_action *action =
1944 out->args.vc.actions + out->args.vc.actions_n;
1946 data_size = priv->size; /* configuration */
1947 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1948 (out->args.vc.data - data_size),
1950 if ((uint8_t *)action + sizeof(*action) > data)
1952 *action = (struct rte_flow_action){
1954 .conf = data_size ? data : NULL,
1956 ++out->args.vc.actions_n;
1957 ctx->object = action;
1958 ctx->objmask = NULL;
1960 memset(data, 0, data_size);
1961 out->args.vc.data = data;
1962 ctx->objdata = data_size;
1966 /** Parse pattern item parameter type. */
1968 parse_vc_spec(struct context *ctx, const struct token *token,
1969 const char *str, unsigned int len,
1970 void *buf, unsigned int size)
1972 struct buffer *out = buf;
1973 struct rte_flow_item *item;
1979 /* Token name must match. */
1980 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1982 /* Parse parameter types. */
1983 switch (ctx->curr) {
1984 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
1990 case ITEM_PARAM_SPEC:
1993 case ITEM_PARAM_LAST:
1996 case ITEM_PARAM_PREFIX:
1997 /* Modify next token to expect a prefix. */
1998 if (ctx->next_num < 2)
2000 ctx->next[ctx->next_num - 2] = prefix;
2002 case ITEM_PARAM_MASK:
2008 /* Nothing else to do if there is no buffer. */
2011 if (!out->args.vc.pattern_n)
2013 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
2014 data_size = ctx->objdata / 3; /* spec, last, mask */
2015 /* Point to selected object. */
2016 ctx->object = out->args.vc.data + (data_size * index);
2018 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
2019 item->mask = ctx->objmask;
2021 ctx->objmask = NULL;
2022 /* Update relevant item pointer. */
2023 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
2028 /** Parse action configuration field. */
2030 parse_vc_conf(struct context *ctx, const struct token *token,
2031 const char *str, unsigned int len,
2032 void *buf, unsigned int size)
2034 struct buffer *out = buf;
2037 /* Token name must match. */
2038 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2040 /* Nothing else to do if there is no buffer. */
2043 /* Point to selected object. */
2044 ctx->object = out->args.vc.data;
2045 ctx->objmask = NULL;
2049 /** Parse RSS action. */
2051 parse_vc_action_rss(struct context *ctx, const struct token *token,
2052 const char *str, unsigned int len,
2053 void *buf, unsigned int size)
2055 struct buffer *out = buf;
2056 struct rte_flow_action *action;
2057 struct action_rss_data *action_rss_data;
2061 ret = parse_vc(ctx, token, str, len, buf, size);
2064 /* Nothing else to do if there is no buffer. */
2067 if (!out->args.vc.actions_n)
2069 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
2070 /* Point to selected object. */
2071 ctx->object = out->args.vc.data;
2072 ctx->objmask = NULL;
2073 /* Set up default configuration. */
2074 action_rss_data = ctx->object;
2075 *action_rss_data = (struct action_rss_data){
2076 .conf = (struct rte_flow_action_rss){
2078 .key_len = sizeof(action_rss_data->key),
2079 .queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
2080 .key = action_rss_data->key,
2081 .queue = action_rss_data->queue,
2083 .key = "testpmd's default RSS hash key",
2086 for (i = 0; i < action_rss_data->conf.queue_num; ++i)
2087 action_rss_data->queue[i] = i;
2088 if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
2089 ctx->port != (portid_t)RTE_PORT_ALL) {
2090 struct rte_eth_dev_info info;
2092 rte_eth_dev_info_get(ctx->port, &info);
2093 action_rss_data->conf.key_len =
2094 RTE_MIN(sizeof(action_rss_data->key),
2095 info.hash_key_size);
2097 action->conf = &action_rss_data->conf;
2102 * Parse type field for RSS action.
2104 * Valid tokens are type field names and the "end" token.
2107 parse_vc_action_rss_type(struct context *ctx, const struct token *token,
2108 const char *str, unsigned int len,
2109 void *buf, unsigned int size)
2111 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_TYPE);
2112 struct action_rss_data *action_rss_data;
2118 if (ctx->curr != ACTION_RSS_TYPE)
2120 if (!(ctx->objdata >> 16) && ctx->object) {
2121 action_rss_data = ctx->object;
2122 action_rss_data->conf.types = 0;
2124 if (!strcmp_partial("end", str, len)) {
2125 ctx->objdata &= 0xffff;
2128 for (i = 0; rss_type_table[i].str; ++i)
2129 if (!strcmp_partial(rss_type_table[i].str, str, len))
2131 if (!rss_type_table[i].str)
2133 ctx->objdata = 1 << 16 | (ctx->objdata & 0xffff);
2135 if (ctx->next_num == RTE_DIM(ctx->next))
2137 ctx->next[ctx->next_num++] = next;
2140 action_rss_data = ctx->object;
2141 action_rss_data->conf.types |= rss_type_table[i].rss_type;
2146 * Parse queue field for RSS action.
2148 * Valid tokens are queue indices and the "end" token.
2151 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
2152 const char *str, unsigned int len,
2153 void *buf, unsigned int size)
2155 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
2156 struct action_rss_data *action_rss_data;
2163 if (ctx->curr != ACTION_RSS_QUEUE)
2165 i = ctx->objdata >> 16;
2166 if (!strcmp_partial("end", str, len)) {
2167 ctx->objdata &= 0xffff;
2170 if (i >= ACTION_RSS_QUEUE_NUM)
2173 ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) +
2174 i * sizeof(action_rss_data->queue[i]),
2175 sizeof(action_rss_data->queue[i]))))
2177 ret = parse_int(ctx, token, str, len, NULL, 0);
2183 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
2185 if (ctx->next_num == RTE_DIM(ctx->next))
2187 ctx->next[ctx->next_num++] = next;
2190 action_rss_data = ctx->object;
2191 action_rss_data->conf.queue_num = i;
2192 action_rss_data->conf.queue = i ? action_rss_data->queue : NULL;
2196 /** Parse tokens for destroy command. */
2198 parse_destroy(struct context *ctx, const struct token *token,
2199 const char *str, unsigned int len,
2200 void *buf, unsigned int size)
2202 struct buffer *out = buf;
2204 /* Token name must match. */
2205 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2207 /* Nothing else to do if there is no buffer. */
2210 if (!out->command) {
2211 if (ctx->curr != DESTROY)
2213 if (sizeof(*out) > size)
2215 out->command = ctx->curr;
2218 ctx->objmask = NULL;
2219 out->args.destroy.rule =
2220 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2224 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
2225 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
2228 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
2229 ctx->objmask = NULL;
2233 /** Parse tokens for flush command. */
2235 parse_flush(struct context *ctx, const struct token *token,
2236 const char *str, unsigned int len,
2237 void *buf, unsigned int size)
2239 struct buffer *out = buf;
2241 /* Token name must match. */
2242 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2244 /* Nothing else to do if there is no buffer. */
2247 if (!out->command) {
2248 if (ctx->curr != FLUSH)
2250 if (sizeof(*out) > size)
2252 out->command = ctx->curr;
2255 ctx->objmask = NULL;
2260 /** Parse tokens for query command. */
2262 parse_query(struct context *ctx, const struct token *token,
2263 const char *str, unsigned int len,
2264 void *buf, unsigned int size)
2266 struct buffer *out = buf;
2268 /* Token name must match. */
2269 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2271 /* Nothing else to do if there is no buffer. */
2274 if (!out->command) {
2275 if (ctx->curr != QUERY)
2277 if (sizeof(*out) > size)
2279 out->command = ctx->curr;
2282 ctx->objmask = NULL;
2287 /** Parse action names. */
2289 parse_action(struct context *ctx, const struct token *token,
2290 const char *str, unsigned int len,
2291 void *buf, unsigned int size)
2293 struct buffer *out = buf;
2294 const struct arg *arg = pop_args(ctx);
2298 /* Argument is expected. */
2301 /* Parse action name. */
2302 for (i = 0; next_action[i]; ++i) {
2303 const struct parse_action_priv *priv;
2305 token = &token_list[next_action[i]];
2306 if (strcmp_partial(token->name, str, len))
2312 memcpy((uint8_t *)ctx->object + arg->offset,
2318 push_args(ctx, arg);
2322 /** Parse tokens for list command. */
2324 parse_list(struct context *ctx, const struct token *token,
2325 const char *str, unsigned int len,
2326 void *buf, unsigned int size)
2328 struct buffer *out = buf;
2330 /* Token name must match. */
2331 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2333 /* Nothing else to do if there is no buffer. */
2336 if (!out->command) {
2337 if (ctx->curr != LIST)
2339 if (sizeof(*out) > size)
2341 out->command = ctx->curr;
2344 ctx->objmask = NULL;
2345 out->args.list.group =
2346 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2350 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
2351 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
2354 ctx->object = out->args.list.group + out->args.list.group_n++;
2355 ctx->objmask = NULL;
2359 /** Parse tokens for isolate command. */
2361 parse_isolate(struct context *ctx, const struct token *token,
2362 const char *str, unsigned int len,
2363 void *buf, unsigned int size)
2365 struct buffer *out = buf;
2367 /* Token name must match. */
2368 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2370 /* Nothing else to do if there is no buffer. */
2373 if (!out->command) {
2374 if (ctx->curr != ISOLATE)
2376 if (sizeof(*out) > size)
2378 out->command = ctx->curr;
2381 ctx->objmask = NULL;
2387 * Parse signed/unsigned integers 8 to 64-bit long.
2389 * Last argument (ctx->args) is retrieved to determine integer type and
2393 parse_int(struct context *ctx, const struct token *token,
2394 const char *str, unsigned int len,
2395 void *buf, unsigned int size)
2397 const struct arg *arg = pop_args(ctx);
2402 /* Argument is expected. */
2407 (uintmax_t)strtoimax(str, &end, 0) :
2408 strtoumax(str, &end, 0);
2409 if (errno || (size_t)(end - str) != len)
2412 ((arg->sign && ((intmax_t)u < (intmax_t)arg->min ||
2413 (intmax_t)u > (intmax_t)arg->max)) ||
2414 (!arg->sign && (u < arg->min || u > arg->max))))
2419 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
2420 !arg_entry_bf_fill(ctx->objmask, -1, arg))
2424 buf = (uint8_t *)ctx->object + arg->offset;
2428 case sizeof(uint8_t):
2429 *(uint8_t *)buf = u;
2431 case sizeof(uint16_t):
2432 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
2434 case sizeof(uint8_t [3]):
2435 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2437 ((uint8_t *)buf)[0] = u;
2438 ((uint8_t *)buf)[1] = u >> 8;
2439 ((uint8_t *)buf)[2] = u >> 16;
2443 ((uint8_t *)buf)[0] = u >> 16;
2444 ((uint8_t *)buf)[1] = u >> 8;
2445 ((uint8_t *)buf)[2] = u;
2447 case sizeof(uint32_t):
2448 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
2450 case sizeof(uint64_t):
2451 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
2456 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
2458 buf = (uint8_t *)ctx->objmask + arg->offset;
2463 push_args(ctx, arg);
2470 * Three arguments (ctx->args) are retrieved from the stack to store data,
2471 * its actual length and address (in that order).
2474 parse_string(struct context *ctx, const struct token *token,
2475 const char *str, unsigned int len,
2476 void *buf, unsigned int size)
2478 const struct arg *arg_data = pop_args(ctx);
2479 const struct arg *arg_len = pop_args(ctx);
2480 const struct arg *arg_addr = pop_args(ctx);
2481 char tmp[16]; /* Ought to be enough. */
2484 /* Arguments are expected. */
2488 push_args(ctx, arg_data);
2492 push_args(ctx, arg_len);
2493 push_args(ctx, arg_data);
2496 size = arg_data->size;
2497 /* Bit-mask fill is not supported. */
2498 if (arg_data->mask || size < len)
2502 /* Let parse_int() fill length information first. */
2503 ret = snprintf(tmp, sizeof(tmp), "%u", len);
2506 push_args(ctx, arg_len);
2507 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
2512 buf = (uint8_t *)ctx->object + arg_data->offset;
2513 /* Output buffer is not necessarily NUL-terminated. */
2514 memcpy(buf, str, len);
2515 memset((uint8_t *)buf + len, 0x00, size - len);
2517 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
2518 /* Save address if requested. */
2519 if (arg_addr->size) {
2520 memcpy((uint8_t *)ctx->object + arg_addr->offset,
2522 (uint8_t *)ctx->object + arg_data->offset
2526 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
2528 (uint8_t *)ctx->objmask + arg_data->offset
2534 push_args(ctx, arg_addr);
2535 push_args(ctx, arg_len);
2536 push_args(ctx, arg_data);
2541 * Parse a MAC address.
2543 * Last argument (ctx->args) is retrieved to determine storage size and
2547 parse_mac_addr(struct context *ctx, const struct token *token,
2548 const char *str, unsigned int len,
2549 void *buf, unsigned int size)
2551 const struct arg *arg = pop_args(ctx);
2552 struct ether_addr tmp;
2556 /* Argument is expected. */
2560 /* Bit-mask fill is not supported. */
2561 if (arg->mask || size != sizeof(tmp))
2563 /* Only network endian is supported. */
2566 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
2567 if (ret < 0 || (unsigned int)ret != len)
2571 buf = (uint8_t *)ctx->object + arg->offset;
2572 memcpy(buf, &tmp, size);
2574 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2577 push_args(ctx, arg);
2582 * Parse an IPv4 address.
2584 * Last argument (ctx->args) is retrieved to determine storage size and
2588 parse_ipv4_addr(struct context *ctx, const struct token *token,
2589 const char *str, unsigned int len,
2590 void *buf, unsigned int size)
2592 const struct arg *arg = pop_args(ctx);
2597 /* Argument is expected. */
2601 /* Bit-mask fill is not supported. */
2602 if (arg->mask || size != sizeof(tmp))
2604 /* Only network endian is supported. */
2607 memcpy(str2, str, len);
2609 ret = inet_pton(AF_INET, str2, &tmp);
2611 /* Attempt integer parsing. */
2612 push_args(ctx, arg);
2613 return parse_int(ctx, token, str, len, buf, size);
2617 buf = (uint8_t *)ctx->object + arg->offset;
2618 memcpy(buf, &tmp, size);
2620 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2623 push_args(ctx, arg);
2628 * Parse an IPv6 address.
2630 * Last argument (ctx->args) is retrieved to determine storage size and
2634 parse_ipv6_addr(struct context *ctx, const struct token *token,
2635 const char *str, unsigned int len,
2636 void *buf, unsigned int size)
2638 const struct arg *arg = pop_args(ctx);
2640 struct in6_addr tmp;
2644 /* Argument is expected. */
2648 /* Bit-mask fill is not supported. */
2649 if (arg->mask || size != sizeof(tmp))
2651 /* Only network endian is supported. */
2654 memcpy(str2, str, len);
2656 ret = inet_pton(AF_INET6, str2, &tmp);
2661 buf = (uint8_t *)ctx->object + arg->offset;
2662 memcpy(buf, &tmp, size);
2664 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2667 push_args(ctx, arg);
2671 /** Boolean values (even indices stand for false). */
2672 static const char *const boolean_name[] = {
2682 * Parse a boolean value.
2684 * Last argument (ctx->args) is retrieved to determine storage size and
2688 parse_boolean(struct context *ctx, const struct token *token,
2689 const char *str, unsigned int len,
2690 void *buf, unsigned int size)
2692 const struct arg *arg = pop_args(ctx);
2696 /* Argument is expected. */
2699 for (i = 0; boolean_name[i]; ++i)
2700 if (!strcmp_partial(boolean_name[i], str, len))
2702 /* Process token as integer. */
2703 if (boolean_name[i])
2704 str = i & 1 ? "1" : "0";
2705 push_args(ctx, arg);
2706 ret = parse_int(ctx, token, str, strlen(str), buf, size);
2707 return ret > 0 ? (int)len : ret;
2710 /** Parse port and update context. */
2712 parse_port(struct context *ctx, const struct token *token,
2713 const char *str, unsigned int len,
2714 void *buf, unsigned int size)
2716 struct buffer *out = &(struct buffer){ .port = 0 };
2724 ctx->objmask = NULL;
2725 size = sizeof(*out);
2727 ret = parse_int(ctx, token, str, len, out, size);
2729 ctx->port = out->port;
2735 /** No completion. */
2737 comp_none(struct context *ctx, const struct token *token,
2738 unsigned int ent, char *buf, unsigned int size)
2748 /** Complete boolean values. */
2750 comp_boolean(struct context *ctx, const struct token *token,
2751 unsigned int ent, char *buf, unsigned int size)
2757 for (i = 0; boolean_name[i]; ++i)
2758 if (buf && i == ent)
2759 return snprintf(buf, size, "%s", boolean_name[i]);
2765 /** Complete action names. */
2767 comp_action(struct context *ctx, const struct token *token,
2768 unsigned int ent, char *buf, unsigned int size)
2774 for (i = 0; next_action[i]; ++i)
2775 if (buf && i == ent)
2776 return snprintf(buf, size, "%s",
2777 token_list[next_action[i]].name);
2783 /** Complete available ports. */
2785 comp_port(struct context *ctx, const struct token *token,
2786 unsigned int ent, char *buf, unsigned int size)
2793 RTE_ETH_FOREACH_DEV(p) {
2794 if (buf && i == ent)
2795 return snprintf(buf, size, "%u", p);
2803 /** Complete available rule IDs. */
2805 comp_rule_id(struct context *ctx, const struct token *token,
2806 unsigned int ent, char *buf, unsigned int size)
2809 struct rte_port *port;
2810 struct port_flow *pf;
2813 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
2814 ctx->port == (portid_t)RTE_PORT_ALL)
2816 port = &ports[ctx->port];
2817 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
2818 if (buf && i == ent)
2819 return snprintf(buf, size, "%u", pf->id);
2827 /** Complete type field for RSS action. */
2829 comp_vc_action_rss_type(struct context *ctx, const struct token *token,
2830 unsigned int ent, char *buf, unsigned int size)
2836 for (i = 0; rss_type_table[i].str; ++i)
2841 return snprintf(buf, size, "%s", rss_type_table[ent].str);
2843 return snprintf(buf, size, "end");
2847 /** Complete queue field for RSS action. */
2849 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
2850 unsigned int ent, char *buf, unsigned int size)
2857 return snprintf(buf, size, "%u", ent);
2859 return snprintf(buf, size, "end");
2863 /** Internal context. */
2864 static struct context cmd_flow_context;
2866 /** Global parser instance (cmdline API). */
2867 cmdline_parse_inst_t cmd_flow;
2869 /** Initialize context. */
2871 cmd_flow_context_init(struct context *ctx)
2873 /* A full memset() is not necessary. */
2883 ctx->objmask = NULL;
2886 /** Parse a token (cmdline API). */
2888 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
2891 struct context *ctx = &cmd_flow_context;
2892 const struct token *token;
2893 const enum index *list;
2898 token = &token_list[ctx->curr];
2899 /* Check argument length. */
2902 for (len = 0; src[len]; ++len)
2903 if (src[len] == '#' || isspace(src[len]))
2907 /* Last argument and EOL detection. */
2908 for (i = len; src[i]; ++i)
2909 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
2911 else if (!isspace(src[i])) {
2916 if (src[i] == '\r' || src[i] == '\n') {
2920 /* Initialize context if necessary. */
2921 if (!ctx->next_num) {
2924 ctx->next[ctx->next_num++] = token->next[0];
2926 /* Process argument through candidates. */
2927 ctx->prev = ctx->curr;
2928 list = ctx->next[ctx->next_num - 1];
2929 for (i = 0; list[i]; ++i) {
2930 const struct token *next = &token_list[list[i]];
2933 ctx->curr = list[i];
2935 tmp = next->call(ctx, next, src, len, result, size);
2937 tmp = parse_default(ctx, next, src, len, result, size);
2938 if (tmp == -1 || tmp != len)
2946 /* Push subsequent tokens if any. */
2948 for (i = 0; token->next[i]; ++i) {
2949 if (ctx->next_num == RTE_DIM(ctx->next))
2951 ctx->next[ctx->next_num++] = token->next[i];
2953 /* Push arguments if any. */
2955 for (i = 0; token->args[i]; ++i) {
2956 if (ctx->args_num == RTE_DIM(ctx->args))
2958 ctx->args[ctx->args_num++] = token->args[i];
2963 /** Return number of completion entries (cmdline API). */
2965 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
2967 struct context *ctx = &cmd_flow_context;
2968 const struct token *token = &token_list[ctx->curr];
2969 const enum index *list;
2973 /* Count number of tokens in current list. */
2975 list = ctx->next[ctx->next_num - 1];
2977 list = token->next[0];
2978 for (i = 0; list[i]; ++i)
2983 * If there is a single token, use its completion callback, otherwise
2984 * return the number of entries.
2986 token = &token_list[list[0]];
2987 if (i == 1 && token->comp) {
2988 /* Save index for cmd_flow_get_help(). */
2989 ctx->prev = list[0];
2990 return token->comp(ctx, token, 0, NULL, 0);
2995 /** Return a completion entry (cmdline API). */
2997 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
2998 char *dst, unsigned int size)
3000 struct context *ctx = &cmd_flow_context;
3001 const struct token *token = &token_list[ctx->curr];
3002 const enum index *list;
3006 /* Count number of tokens in current list. */
3008 list = ctx->next[ctx->next_num - 1];
3010 list = token->next[0];
3011 for (i = 0; list[i]; ++i)
3015 /* If there is a single token, use its completion callback. */
3016 token = &token_list[list[0]];
3017 if (i == 1 && token->comp) {
3018 /* Save index for cmd_flow_get_help(). */
3019 ctx->prev = list[0];
3020 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
3022 /* Otherwise make sure the index is valid and use defaults. */
3025 token = &token_list[list[index]];
3026 snprintf(dst, size, "%s", token->name);
3027 /* Save index for cmd_flow_get_help(). */
3028 ctx->prev = list[index];
3032 /** Populate help strings for current token (cmdline API). */
3034 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
3036 struct context *ctx = &cmd_flow_context;
3037 const struct token *token = &token_list[ctx->prev];
3042 /* Set token type and update global help with details. */
3043 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
3045 cmd_flow.help_str = token->help;
3047 cmd_flow.help_str = token->name;
3051 /** Token definition template (cmdline API). */
3052 static struct cmdline_token_hdr cmd_flow_token_hdr = {
3053 .ops = &(struct cmdline_token_ops){
3054 .parse = cmd_flow_parse,
3055 .complete_get_nb = cmd_flow_complete_get_nb,
3056 .complete_get_elt = cmd_flow_complete_get_elt,
3057 .get_help = cmd_flow_get_help,
3062 /** Populate the next dynamic token. */
3064 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
3065 cmdline_parse_token_hdr_t **hdr_inst)
3067 struct context *ctx = &cmd_flow_context;
3069 /* Always reinitialize context before requesting the first token. */
3070 if (!(hdr_inst - cmd_flow.tokens))
3071 cmd_flow_context_init(ctx);
3072 /* Return NULL when no more tokens are expected. */
3073 if (!ctx->next_num && ctx->curr) {
3077 /* Determine if command should end here. */
3078 if (ctx->eol && ctx->last && ctx->next_num) {
3079 const enum index *list = ctx->next[ctx->next_num - 1];
3082 for (i = 0; list[i]; ++i) {
3089 *hdr = &cmd_flow_token_hdr;
3092 /** Dispatch parsed buffer to function calls. */
3094 cmd_flow_parsed(const struct buffer *in)
3096 switch (in->command) {
3098 port_flow_validate(in->port, &in->args.vc.attr,
3099 in->args.vc.pattern, in->args.vc.actions);
3102 port_flow_create(in->port, &in->args.vc.attr,
3103 in->args.vc.pattern, in->args.vc.actions);
3106 port_flow_destroy(in->port, in->args.destroy.rule_n,
3107 in->args.destroy.rule);
3110 port_flow_flush(in->port);
3113 port_flow_query(in->port, in->args.query.rule,
3114 in->args.query.action);
3117 port_flow_list(in->port, in->args.list.group_n,
3118 in->args.list.group);
3121 port_flow_isolate(in->port, in->args.isolate.set);
3128 /** Token generator and output processing callback (cmdline API). */
3130 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
3133 cmd_flow_tok(arg0, arg2);
3135 cmd_flow_parsed(arg0);
3138 /** Global parser instance (cmdline API). */
3139 cmdline_parse_inst_t cmd_flow = {
3141 .data = NULL, /**< Unused. */
3142 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
3145 }, /**< Tokens are returned by cmd_flow_tok(). */