1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
13 #include <arpa/inet.h>
14 #include <sys/socket.h>
16 #include <rte_common.h>
17 #include <rte_ethdev.h>
18 #include <rte_byteorder.h>
19 #include <cmdline_parse.h>
20 #include <cmdline_parse_etheraddr.h>
25 /** Parser token indices. */
45 /* Top-level command. */
48 /* Sub-level commands. */
57 /* Destroy arguments. */
60 /* Query arguments. */
66 /* Validate/create arguments. */
72 /* Validate/create pattern. */
137 ITEM_E_TAG_GRP_ECID_B,
154 /* Validate/create actions. */
182 /** Maximum size for pattern in struct rte_flow_item_raw. */
183 #define ITEM_RAW_PATTERN_SIZE 40
185 /** Storage size for struct rte_flow_item_raw including pattern. */
186 #define ITEM_RAW_SIZE \
187 (sizeof(struct rte_flow_item_raw) + ITEM_RAW_PATTERN_SIZE)
189 /** Maximum number of queue indices in struct rte_flow_action_rss. */
190 #define ACTION_RSS_QUEUE_NUM 32
192 /** Storage for struct rte_flow_action_rss including external data. */
193 struct action_rss_data {
194 struct rte_flow_action_rss conf;
195 uint16_t queue[ACTION_RSS_QUEUE_NUM];
196 struct rte_eth_rss_conf rss_conf;
197 uint8_t rss_key[RSS_HASH_KEY_LENGTH];
200 /** Maximum number of subsequent tokens and arguments on the stack. */
201 #define CTX_STACK_SIZE 16
203 /** Parser context. */
205 /** Stack of subsequent token lists to process. */
206 const enum index *next[CTX_STACK_SIZE];
207 /** Arguments for stacked tokens. */
208 const void *args[CTX_STACK_SIZE];
209 enum index curr; /**< Current token index. */
210 enum index prev; /**< Index of the last token seen. */
211 int next_num; /**< Number of entries in next[]. */
212 int args_num; /**< Number of entries in args[]. */
213 uint32_t eol:1; /**< EOL has been detected. */
214 uint32_t last:1; /**< No more arguments. */
215 portid_t port; /**< Current port ID (for completions). */
216 uint32_t objdata; /**< Object-specific data. */
217 void *object; /**< Address of current object for relative offsets. */
218 void *objmask; /**< Object a full mask must be written to. */
221 /** Token argument. */
223 uint32_t hton:1; /**< Use network byte ordering. */
224 uint32_t sign:1; /**< Value is signed. */
225 uint32_t bounded:1; /**< Value is bounded. */
226 uintmax_t min; /**< Minimum value if bounded. */
227 uintmax_t max; /**< Maximum value if bounded. */
228 uint32_t offset; /**< Relative offset from ctx->object. */
229 uint32_t size; /**< Field size. */
230 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
233 /** Parser token definition. */
235 /** Type displayed during completion (defaults to "TOKEN"). */
237 /** Help displayed during completion (defaults to token name). */
239 /** Private data used by parser functions. */
242 * Lists of subsequent tokens to push on the stack. Each call to the
243 * parser consumes the last entry of that stack.
245 const enum index *const *next;
246 /** Arguments stack for subsequent tokens that need them. */
247 const struct arg *const *args;
249 * Token-processing callback, returns -1 in case of error, the
250 * length of the matched string otherwise. If NULL, attempts to
251 * match the token name.
253 * If buf is not NULL, the result should be stored in it according
254 * to context. An error is returned if not large enough.
256 int (*call)(struct context *ctx, const struct token *token,
257 const char *str, unsigned int len,
258 void *buf, unsigned int size);
260 * Callback that provides possible values for this token, used for
261 * completion. Returns -1 in case of error, the number of possible
262 * values otherwise. If NULL, the token name is used.
264 * If buf is not NULL, entry index ent is written to buf and the
265 * full length of the entry is returned (same behavior as
268 int (*comp)(struct context *ctx, const struct token *token,
269 unsigned int ent, char *buf, unsigned int size);
270 /** Mandatory token name, no default value. */
274 /** Static initializer for the next field. */
275 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
277 /** Static initializer for a NEXT() entry. */
278 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
280 /** Static initializer for the args field. */
281 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
283 /** Static initializer for ARGS() to target a field. */
284 #define ARGS_ENTRY(s, f) \
285 (&(const struct arg){ \
286 .offset = offsetof(s, f), \
287 .size = sizeof(((s *)0)->f), \
290 /** Static initializer for ARGS() to target a bit-field. */
291 #define ARGS_ENTRY_BF(s, f, b) \
292 (&(const struct arg){ \
294 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
297 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
298 #define ARGS_ENTRY_MASK(s, f, m) \
299 (&(const struct arg){ \
300 .offset = offsetof(s, f), \
301 .size = sizeof(((s *)0)->f), \
302 .mask = (const void *)(m), \
305 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
306 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
307 (&(const struct arg){ \
309 .offset = offsetof(s, f), \
310 .size = sizeof(((s *)0)->f), \
311 .mask = (const void *)(m), \
314 /** Static initializer for ARGS() to target a pointer. */
315 #define ARGS_ENTRY_PTR(s, f) \
316 (&(const struct arg){ \
317 .size = sizeof(*((s *)0)->f), \
320 /** Static initializer for ARGS() with arbitrary offset and size. */
321 #define ARGS_ENTRY_ARB(o, s) \
322 (&(const struct arg){ \
327 /** Same as ARGS_ENTRY_ARB() with bounded values. */
328 #define ARGS_ENTRY_ARB_BOUNDED(o, s, i, a) \
329 (&(const struct arg){ \
337 /** Same as ARGS_ENTRY() using network byte ordering. */
338 #define ARGS_ENTRY_HTON(s, f) \
339 (&(const struct arg){ \
341 .offset = offsetof(s, f), \
342 .size = sizeof(((s *)0)->f), \
345 /** Parser output buffer layout expected by cmd_flow_parsed(). */
347 enum index command; /**< Flow command. */
348 portid_t port; /**< Affected port ID. */
351 struct rte_flow_attr attr;
352 struct rte_flow_item *pattern;
353 struct rte_flow_action *actions;
357 } vc; /**< Validate/create arguments. */
361 } destroy; /**< Destroy arguments. */
364 enum rte_flow_action_type action;
365 } query; /**< Query arguments. */
369 } list; /**< List arguments. */
372 } isolate; /**< Isolated mode arguments. */
373 } args; /**< Command arguments. */
376 /** Private data for pattern items. */
377 struct parse_item_priv {
378 enum rte_flow_item_type type; /**< Item type. */
379 uint32_t size; /**< Size of item specification structure. */
382 #define PRIV_ITEM(t, s) \
383 (&(const struct parse_item_priv){ \
384 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
388 /** Private data for actions. */
389 struct parse_action_priv {
390 enum rte_flow_action_type type; /**< Action type. */
391 uint32_t size; /**< Size of action configuration structure. */
394 #define PRIV_ACTION(t, s) \
395 (&(const struct parse_action_priv){ \
396 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
400 static const enum index next_vc_attr[] = {
409 static const enum index next_destroy_attr[] = {
415 static const enum index next_list_attr[] = {
421 static const enum index item_param[] = {
430 static const enum index next_item[] = {
460 static const enum index item_fuzzy[] = {
466 static const enum index item_any[] = {
472 static const enum index item_vf[] = {
478 static const enum index item_port[] = {
484 static const enum index item_raw[] = {
494 static const enum index item_eth[] = {
502 static const enum index item_vlan[] = {
512 static const enum index item_ipv4[] = {
522 static const enum index item_ipv6[] = {
533 static const enum index item_icmp[] = {
540 static const enum index item_udp[] = {
547 static const enum index item_tcp[] = {
555 static const enum index item_sctp[] = {
564 static const enum index item_vxlan[] = {
570 static const enum index item_e_tag[] = {
571 ITEM_E_TAG_GRP_ECID_B,
576 static const enum index item_nvgre[] = {
582 static const enum index item_mpls[] = {
588 static const enum index item_gre[] = {
594 static const enum index item_gtp[] = {
600 static const enum index item_geneve[] = {
607 static const enum index next_action[] = {
623 static const enum index action_mark[] = {
629 static const enum index action_queue[] = {
635 static const enum index action_rss[] = {
644 static const enum index action_vf[] = {
651 static const enum index action_meter[] = {
657 static int parse_init(struct context *, const struct token *,
658 const char *, unsigned int,
659 void *, unsigned int);
660 static int parse_vc(struct context *, const struct token *,
661 const char *, unsigned int,
662 void *, unsigned int);
663 static int parse_vc_spec(struct context *, const struct token *,
664 const char *, unsigned int, void *, unsigned int);
665 static int parse_vc_conf(struct context *, const struct token *,
666 const char *, unsigned int, void *, unsigned int);
667 static int parse_vc_action_rss(struct context *, const struct token *,
668 const char *, unsigned int, void *,
670 static int parse_vc_action_rss_type(struct context *, const struct token *,
671 const char *, unsigned int, void *,
673 static int parse_vc_action_rss_queue(struct context *, const struct token *,
674 const char *, unsigned int, void *,
676 static int parse_destroy(struct context *, const struct token *,
677 const char *, unsigned int,
678 void *, unsigned int);
679 static int parse_flush(struct context *, const struct token *,
680 const char *, unsigned int,
681 void *, unsigned int);
682 static int parse_query(struct context *, const struct token *,
683 const char *, unsigned int,
684 void *, unsigned int);
685 static int parse_action(struct context *, const struct token *,
686 const char *, unsigned int,
687 void *, unsigned int);
688 static int parse_list(struct context *, const struct token *,
689 const char *, unsigned int,
690 void *, unsigned int);
691 static int parse_isolate(struct context *, const struct token *,
692 const char *, unsigned int,
693 void *, unsigned int);
694 static int parse_int(struct context *, const struct token *,
695 const char *, unsigned int,
696 void *, unsigned int);
697 static int parse_prefix(struct context *, const struct token *,
698 const char *, unsigned int,
699 void *, unsigned int);
700 static int parse_boolean(struct context *, const struct token *,
701 const char *, unsigned int,
702 void *, unsigned int);
703 static int parse_string(struct context *, const struct token *,
704 const char *, unsigned int,
705 void *, unsigned int);
706 static int parse_mac_addr(struct context *, const struct token *,
707 const char *, unsigned int,
708 void *, unsigned int);
709 static int parse_ipv4_addr(struct context *, const struct token *,
710 const char *, unsigned int,
711 void *, unsigned int);
712 static int parse_ipv6_addr(struct context *, const struct token *,
713 const char *, unsigned int,
714 void *, unsigned int);
715 static int parse_port(struct context *, const struct token *,
716 const char *, unsigned int,
717 void *, unsigned int);
718 static int comp_none(struct context *, const struct token *,
719 unsigned int, char *, unsigned int);
720 static int comp_boolean(struct context *, const struct token *,
721 unsigned int, char *, unsigned int);
722 static int comp_action(struct context *, const struct token *,
723 unsigned int, char *, unsigned int);
724 static int comp_port(struct context *, const struct token *,
725 unsigned int, char *, unsigned int);
726 static int comp_rule_id(struct context *, const struct token *,
727 unsigned int, char *, unsigned int);
728 static int comp_vc_action_rss_type(struct context *, const struct token *,
729 unsigned int, char *, unsigned int);
730 static int comp_vc_action_rss_queue(struct context *, const struct token *,
731 unsigned int, char *, unsigned int);
733 /** Token definitions. */
734 static const struct token token_list[] = {
735 /* Special tokens. */
738 .help = "null entry, abused as the entry point",
739 .next = NEXT(NEXT_ENTRY(FLOW)),
744 .help = "command may end here",
750 .help = "integer value",
755 .name = "{unsigned}",
757 .help = "unsigned integer value",
764 .help = "prefix length for bit-mask",
765 .call = parse_prefix,
771 .help = "any boolean value",
772 .call = parse_boolean,
773 .comp = comp_boolean,
778 .help = "fixed string",
779 .call = parse_string,
783 .name = "{MAC address}",
785 .help = "standard MAC address notation",
786 .call = parse_mac_addr,
790 .name = "{IPv4 address}",
791 .type = "IPV4 ADDRESS",
792 .help = "standard IPv4 address notation",
793 .call = parse_ipv4_addr,
797 .name = "{IPv6 address}",
798 .type = "IPV6 ADDRESS",
799 .help = "standard IPv6 address notation",
800 .call = parse_ipv6_addr,
806 .help = "rule identifier",
808 .comp = comp_rule_id,
813 .help = "port identifier",
818 .name = "{group_id}",
820 .help = "group identifier",
827 .help = "priority level",
831 /* Top-level command. */
834 .type = "{command} {port_id} [{arg} [...]]",
835 .help = "manage ingress/egress flow rules",
836 .next = NEXT(NEXT_ENTRY
846 /* Sub-level commands. */
849 .help = "check whether a flow rule can be created",
850 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
851 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
856 .help = "create a flow rule",
857 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
858 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
863 .help = "destroy specific flow rules",
864 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
865 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
866 .call = parse_destroy,
870 .help = "destroy all flow rules",
871 .next = NEXT(NEXT_ENTRY(PORT_ID)),
872 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
877 .help = "query an existing flow rule",
878 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
880 NEXT_ENTRY(PORT_ID)),
881 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action),
882 ARGS_ENTRY(struct buffer, args.query.rule),
883 ARGS_ENTRY(struct buffer, port)),
888 .help = "list existing flow rules",
889 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
890 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
895 .help = "restrict ingress traffic to the defined flow rules",
896 .next = NEXT(NEXT_ENTRY(BOOLEAN),
897 NEXT_ENTRY(PORT_ID)),
898 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
899 ARGS_ENTRY(struct buffer, port)),
900 .call = parse_isolate,
902 /* Destroy arguments. */
905 .help = "specify a rule identifier",
906 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
907 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
908 .call = parse_destroy,
910 /* Query arguments. */
914 .help = "action to query, must be part of the rule",
915 .call = parse_action,
918 /* List arguments. */
921 .help = "specify a group",
922 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
923 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
926 /* Validate/create attributes. */
929 .help = "specify a group",
930 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
931 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
936 .help = "specify a priority level",
937 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
938 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
943 .help = "affect rule to ingress",
944 .next = NEXT(next_vc_attr),
949 .help = "affect rule to egress",
950 .next = NEXT(next_vc_attr),
953 /* Validate/create pattern. */
956 .help = "submit a list of pattern items",
957 .next = NEXT(next_item),
962 .help = "match value perfectly (with full bit-mask)",
963 .call = parse_vc_spec,
965 [ITEM_PARAM_SPEC] = {
967 .help = "match value according to configured bit-mask",
968 .call = parse_vc_spec,
970 [ITEM_PARAM_LAST] = {
972 .help = "specify upper bound to establish a range",
973 .call = parse_vc_spec,
975 [ITEM_PARAM_MASK] = {
977 .help = "specify bit-mask with relevant bits set to one",
978 .call = parse_vc_spec,
980 [ITEM_PARAM_PREFIX] = {
982 .help = "generate bit-mask from a prefix length",
983 .call = parse_vc_spec,
987 .help = "specify next pattern item",
988 .next = NEXT(next_item),
992 .help = "end list of pattern items",
993 .priv = PRIV_ITEM(END, 0),
994 .next = NEXT(NEXT_ENTRY(ACTIONS)),
999 .help = "no-op pattern item",
1000 .priv = PRIV_ITEM(VOID, 0),
1001 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1006 .help = "perform actions when pattern does not match",
1007 .priv = PRIV_ITEM(INVERT, 0),
1008 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1013 .help = "match any protocol for the current layer",
1014 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
1015 .next = NEXT(item_any),
1020 .help = "number of layers covered",
1021 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
1022 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
1026 .help = "match packets addressed to the physical function",
1027 .priv = PRIV_ITEM(PF, 0),
1028 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1033 .help = "match packets addressed to a virtual function ID",
1034 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
1035 .next = NEXT(item_vf),
1040 .help = "destination VF ID",
1041 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
1042 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
1046 .help = "device-specific physical port index to use",
1047 .priv = PRIV_ITEM(PORT, sizeof(struct rte_flow_item_port)),
1048 .next = NEXT(item_port),
1051 [ITEM_PORT_INDEX] = {
1053 .help = "physical port index",
1054 .next = NEXT(item_port, NEXT_ENTRY(UNSIGNED), item_param),
1055 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port, index)),
1059 .help = "match an arbitrary byte string",
1060 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1061 .next = NEXT(item_raw),
1064 [ITEM_RAW_RELATIVE] = {
1066 .help = "look for pattern after the previous item",
1067 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1068 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1071 [ITEM_RAW_SEARCH] = {
1073 .help = "search pattern from offset (see also limit)",
1074 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1075 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1078 [ITEM_RAW_OFFSET] = {
1080 .help = "absolute or relative offset for pattern",
1081 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1082 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1084 [ITEM_RAW_LIMIT] = {
1086 .help = "search area limit for start of pattern",
1087 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1088 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1090 [ITEM_RAW_PATTERN] = {
1092 .help = "byte string to look for",
1093 .next = NEXT(item_raw,
1095 NEXT_ENTRY(ITEM_PARAM_IS,
1098 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, pattern),
1099 ARGS_ENTRY(struct rte_flow_item_raw, length),
1100 ARGS_ENTRY_ARB(sizeof(struct rte_flow_item_raw),
1101 ITEM_RAW_PATTERN_SIZE)),
1105 .help = "match Ethernet header",
1106 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1107 .next = NEXT(item_eth),
1112 .help = "destination MAC",
1113 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1114 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1118 .help = "source MAC",
1119 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1120 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1124 .help = "EtherType",
1125 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1126 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1130 .help = "match 802.1Q/ad VLAN tag",
1131 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1132 .next = NEXT(item_vlan),
1135 [ITEM_VLAN_TPID] = {
1137 .help = "tag protocol identifier",
1138 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1139 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tpid)),
1143 .help = "tag control information",
1144 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1145 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1149 .help = "priority code point",
1150 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1151 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1156 .help = "drop eligible indicator",
1157 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1158 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1163 .help = "VLAN identifier",
1164 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1165 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1170 .help = "match IPv4 header",
1171 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1172 .next = NEXT(item_ipv4),
1177 .help = "type of service",
1178 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1179 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1180 hdr.type_of_service)),
1184 .help = "time to live",
1185 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1186 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1189 [ITEM_IPV4_PROTO] = {
1191 .help = "next protocol ID",
1192 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1193 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1194 hdr.next_proto_id)),
1198 .help = "source address",
1199 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1200 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1205 .help = "destination address",
1206 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1207 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1212 .help = "match IPv6 header",
1213 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1214 .next = NEXT(item_ipv6),
1219 .help = "traffic class",
1220 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1221 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1223 "\x0f\xf0\x00\x00")),
1225 [ITEM_IPV6_FLOW] = {
1227 .help = "flow label",
1228 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1229 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1231 "\x00\x0f\xff\xff")),
1233 [ITEM_IPV6_PROTO] = {
1235 .help = "protocol (next header)",
1236 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1237 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1242 .help = "hop limit",
1243 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1244 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1249 .help = "source address",
1250 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1251 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1256 .help = "destination address",
1257 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1258 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1263 .help = "match ICMP header",
1264 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1265 .next = NEXT(item_icmp),
1268 [ITEM_ICMP_TYPE] = {
1270 .help = "ICMP packet type",
1271 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1272 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1275 [ITEM_ICMP_CODE] = {
1277 .help = "ICMP packet code",
1278 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1279 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1284 .help = "match UDP header",
1285 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1286 .next = NEXT(item_udp),
1291 .help = "UDP source port",
1292 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1293 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1298 .help = "UDP destination port",
1299 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1300 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1305 .help = "match TCP header",
1306 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1307 .next = NEXT(item_tcp),
1312 .help = "TCP source port",
1313 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1314 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1319 .help = "TCP destination port",
1320 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1321 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1324 [ITEM_TCP_FLAGS] = {
1326 .help = "TCP flags",
1327 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1328 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1333 .help = "match SCTP header",
1334 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1335 .next = NEXT(item_sctp),
1340 .help = "SCTP source port",
1341 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1342 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1347 .help = "SCTP destination port",
1348 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1349 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1354 .help = "validation tag",
1355 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1356 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1359 [ITEM_SCTP_CKSUM] = {
1362 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1363 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1368 .help = "match VXLAN header",
1369 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1370 .next = NEXT(item_vxlan),
1373 [ITEM_VXLAN_VNI] = {
1375 .help = "VXLAN identifier",
1376 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1377 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
1381 .help = "match E-Tag header",
1382 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
1383 .next = NEXT(item_e_tag),
1386 [ITEM_E_TAG_GRP_ECID_B] = {
1387 .name = "grp_ecid_b",
1388 .help = "GRP and E-CID base",
1389 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
1390 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
1396 .help = "match NVGRE header",
1397 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
1398 .next = NEXT(item_nvgre),
1401 [ITEM_NVGRE_TNI] = {
1403 .help = "virtual subnet ID",
1404 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
1405 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
1409 .help = "match MPLS header",
1410 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
1411 .next = NEXT(item_mpls),
1414 [ITEM_MPLS_LABEL] = {
1416 .help = "MPLS label",
1417 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
1418 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
1424 .help = "match GRE header",
1425 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
1426 .next = NEXT(item_gre),
1429 [ITEM_GRE_PROTO] = {
1431 .help = "GRE protocol type",
1432 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
1433 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
1438 .help = "fuzzy pattern match, expect faster than default",
1439 .priv = PRIV_ITEM(FUZZY,
1440 sizeof(struct rte_flow_item_fuzzy)),
1441 .next = NEXT(item_fuzzy),
1444 [ITEM_FUZZY_THRESH] = {
1446 .help = "match accuracy threshold",
1447 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
1448 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
1453 .help = "match GTP header",
1454 .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
1455 .next = NEXT(item_gtp),
1460 .help = "tunnel endpoint identifier",
1461 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
1462 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
1466 .help = "match GTP header",
1467 .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
1468 .next = NEXT(item_gtp),
1473 .help = "match GTP header",
1474 .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
1475 .next = NEXT(item_gtp),
1480 .help = "match GENEVE header",
1481 .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
1482 .next = NEXT(item_geneve),
1485 [ITEM_GENEVE_VNI] = {
1487 .help = "virtual network identifier",
1488 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1489 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)),
1491 [ITEM_GENEVE_PROTO] = {
1493 .help = "GENEVE protocol type",
1494 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1495 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve,
1499 /* Validate/create actions. */
1502 .help = "submit a list of associated actions",
1503 .next = NEXT(next_action),
1508 .help = "specify next action",
1509 .next = NEXT(next_action),
1513 .help = "end list of actions",
1514 .priv = PRIV_ACTION(END, 0),
1519 .help = "no-op action",
1520 .priv = PRIV_ACTION(VOID, 0),
1521 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1524 [ACTION_PASSTHRU] = {
1526 .help = "let subsequent rule process matched packets",
1527 .priv = PRIV_ACTION(PASSTHRU, 0),
1528 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1533 .help = "attach 32 bit value to packets",
1534 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
1535 .next = NEXT(action_mark),
1538 [ACTION_MARK_ID] = {
1540 .help = "32 bit value to return with packets",
1541 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
1542 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
1543 .call = parse_vc_conf,
1547 .help = "flag packets",
1548 .priv = PRIV_ACTION(FLAG, 0),
1549 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1554 .help = "assign packets to a given queue index",
1555 .priv = PRIV_ACTION(QUEUE,
1556 sizeof(struct rte_flow_action_queue)),
1557 .next = NEXT(action_queue),
1560 [ACTION_QUEUE_INDEX] = {
1562 .help = "queue index to use",
1563 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
1564 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
1565 .call = parse_vc_conf,
1569 .help = "drop packets (note: passthru has priority)",
1570 .priv = PRIV_ACTION(DROP, 0),
1571 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1576 .help = "enable counters for this rule",
1577 .priv = PRIV_ACTION(COUNT, 0),
1578 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1583 .help = "spread packets among several queues",
1584 .priv = PRIV_ACTION(RSS, sizeof(struct action_rss_data)),
1585 .next = NEXT(action_rss),
1586 .call = parse_vc_action_rss,
1588 [ACTION_RSS_TYPES] = {
1590 .help = "RSS hash types",
1591 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)),
1593 [ACTION_RSS_TYPE] = {
1595 .help = "RSS hash type",
1596 .call = parse_vc_action_rss_type,
1597 .comp = comp_vc_action_rss_type,
1599 [ACTION_RSS_KEY] = {
1601 .help = "RSS hash key",
1602 .next = NEXT(action_rss, NEXT_ENTRY(STRING)),
1603 .args = ARGS(ARGS_ENTRY_ARB(0, 0),
1605 (offsetof(struct action_rss_data, rss_conf) +
1606 offsetof(struct rte_eth_rss_conf, rss_key_len),
1607 sizeof(((struct rte_eth_rss_conf *)0)->
1609 ARGS_ENTRY(struct action_rss_data, rss_key)),
1611 [ACTION_RSS_KEY_LEN] = {
1613 .help = "RSS hash key length in bytes",
1614 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
1615 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
1616 (offsetof(struct action_rss_data, rss_conf) +
1617 offsetof(struct rte_eth_rss_conf, rss_key_len),
1618 sizeof(((struct rte_eth_rss_conf *)0)->
1621 RSS_HASH_KEY_LENGTH)),
1623 [ACTION_RSS_QUEUES] = {
1625 .help = "queue indices to use",
1626 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
1627 .call = parse_vc_conf,
1629 [ACTION_RSS_QUEUE] = {
1631 .help = "queue index",
1632 .call = parse_vc_action_rss_queue,
1633 .comp = comp_vc_action_rss_queue,
1637 .help = "redirect packets to physical device function",
1638 .priv = PRIV_ACTION(PF, 0),
1639 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1644 .help = "redirect packets to virtual device function",
1645 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
1646 .next = NEXT(action_vf),
1649 [ACTION_VF_ORIGINAL] = {
1651 .help = "use original VF ID if possible",
1652 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
1653 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
1655 .call = parse_vc_conf,
1659 .help = "VF ID to redirect packets to",
1660 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
1661 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
1662 .call = parse_vc_conf,
1666 .help = "meter the directed packets at given id",
1667 .priv = PRIV_ACTION(METER,
1668 sizeof(struct rte_flow_action_meter)),
1669 .next = NEXT(action_meter),
1672 [ACTION_METER_ID] = {
1674 .help = "meter id to use",
1675 .next = NEXT(action_meter, NEXT_ENTRY(UNSIGNED)),
1676 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
1677 .call = parse_vc_conf,
1681 /** Remove and return last entry from argument stack. */
1682 static const struct arg *
1683 pop_args(struct context *ctx)
1685 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
1688 /** Add entry on top of the argument stack. */
1690 push_args(struct context *ctx, const struct arg *arg)
1692 if (ctx->args_num == CTX_STACK_SIZE)
1694 ctx->args[ctx->args_num++] = arg;
1698 /** Spread value into buffer according to bit-mask. */
1700 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
1702 uint32_t i = arg->size;
1710 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1719 unsigned int shift = 0;
1720 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
1722 for (shift = 0; arg->mask[i] >> shift; ++shift) {
1723 if (!(arg->mask[i] & (1 << shift)))
1728 *buf &= ~(1 << shift);
1729 *buf |= (val & 1) << shift;
1737 /** Compare a string with a partial one of a given length. */
1739 strcmp_partial(const char *full, const char *partial, size_t partial_len)
1741 int r = strncmp(full, partial, partial_len);
1745 if (strlen(full) <= partial_len)
1747 return full[partial_len];
1751 * Parse a prefix length and generate a bit-mask.
1753 * Last argument (ctx->args) is retrieved to determine mask size, storage
1754 * location and whether the result must use network byte ordering.
1757 parse_prefix(struct context *ctx, const struct token *token,
1758 const char *str, unsigned int len,
1759 void *buf, unsigned int size)
1761 const struct arg *arg = pop_args(ctx);
1762 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
1769 /* Argument is expected. */
1773 u = strtoumax(str, &end, 0);
1774 if (errno || (size_t)(end - str) != len)
1779 extra = arg_entry_bf_fill(NULL, 0, arg);
1788 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
1789 !arg_entry_bf_fill(ctx->objmask, -1, arg))
1796 if (bytes > size || bytes + !!extra > size)
1800 buf = (uint8_t *)ctx->object + arg->offset;
1801 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1803 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
1804 memset(buf, 0x00, size - bytes);
1806 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
1810 memset(buf, 0xff, bytes);
1811 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
1813 ((uint8_t *)buf)[bytes] = conv[extra];
1816 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
1819 push_args(ctx, arg);
1823 /** Default parsing function for token name matching. */
1825 parse_default(struct context *ctx, const struct token *token,
1826 const char *str, unsigned int len,
1827 void *buf, unsigned int size)
1832 if (strcmp_partial(token->name, str, len))
1837 /** Parse flow command, initialize output buffer for subsequent tokens. */
1839 parse_init(struct context *ctx, const struct token *token,
1840 const char *str, unsigned int len,
1841 void *buf, unsigned int size)
1843 struct buffer *out = buf;
1845 /* Token name must match. */
1846 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1848 /* Nothing else to do if there is no buffer. */
1851 /* Make sure buffer is large enough. */
1852 if (size < sizeof(*out))
1854 /* Initialize buffer. */
1855 memset(out, 0x00, sizeof(*out));
1856 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
1859 ctx->objmask = NULL;
1863 /** Parse tokens for validate/create commands. */
1865 parse_vc(struct context *ctx, const struct token *token,
1866 const char *str, unsigned int len,
1867 void *buf, unsigned int size)
1869 struct buffer *out = buf;
1873 /* Token name must match. */
1874 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1876 /* Nothing else to do if there is no buffer. */
1879 if (!out->command) {
1880 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
1882 if (sizeof(*out) > size)
1884 out->command = ctx->curr;
1887 ctx->objmask = NULL;
1888 out->args.vc.data = (uint8_t *)out + size;
1892 ctx->object = &out->args.vc.attr;
1893 ctx->objmask = NULL;
1894 switch (ctx->curr) {
1899 out->args.vc.attr.ingress = 1;
1902 out->args.vc.attr.egress = 1;
1905 out->args.vc.pattern =
1906 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1908 ctx->object = out->args.vc.pattern;
1909 ctx->objmask = NULL;
1912 out->args.vc.actions =
1913 (void *)RTE_ALIGN_CEIL((uintptr_t)
1914 (out->args.vc.pattern +
1915 out->args.vc.pattern_n),
1917 ctx->object = out->args.vc.actions;
1918 ctx->objmask = NULL;
1925 if (!out->args.vc.actions) {
1926 const struct parse_item_priv *priv = token->priv;
1927 struct rte_flow_item *item =
1928 out->args.vc.pattern + out->args.vc.pattern_n;
1930 data_size = priv->size * 3; /* spec, last, mask */
1931 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1932 (out->args.vc.data - data_size),
1934 if ((uint8_t *)item + sizeof(*item) > data)
1936 *item = (struct rte_flow_item){
1939 ++out->args.vc.pattern_n;
1941 ctx->objmask = NULL;
1943 const struct parse_action_priv *priv = token->priv;
1944 struct rte_flow_action *action =
1945 out->args.vc.actions + out->args.vc.actions_n;
1947 data_size = priv->size; /* configuration */
1948 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1949 (out->args.vc.data - data_size),
1951 if ((uint8_t *)action + sizeof(*action) > data)
1953 *action = (struct rte_flow_action){
1955 .conf = data_size ? data : NULL,
1957 ++out->args.vc.actions_n;
1958 ctx->object = action;
1959 ctx->objmask = NULL;
1961 memset(data, 0, data_size);
1962 out->args.vc.data = data;
1963 ctx->objdata = data_size;
1967 /** Parse pattern item parameter type. */
1969 parse_vc_spec(struct context *ctx, const struct token *token,
1970 const char *str, unsigned int len,
1971 void *buf, unsigned int size)
1973 struct buffer *out = buf;
1974 struct rte_flow_item *item;
1980 /* Token name must match. */
1981 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1983 /* Parse parameter types. */
1984 switch (ctx->curr) {
1985 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
1991 case ITEM_PARAM_SPEC:
1994 case ITEM_PARAM_LAST:
1997 case ITEM_PARAM_PREFIX:
1998 /* Modify next token to expect a prefix. */
1999 if (ctx->next_num < 2)
2001 ctx->next[ctx->next_num - 2] = prefix;
2003 case ITEM_PARAM_MASK:
2009 /* Nothing else to do if there is no buffer. */
2012 if (!out->args.vc.pattern_n)
2014 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
2015 data_size = ctx->objdata / 3; /* spec, last, mask */
2016 /* Point to selected object. */
2017 ctx->object = out->args.vc.data + (data_size * index);
2019 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
2020 item->mask = ctx->objmask;
2022 ctx->objmask = NULL;
2023 /* Update relevant item pointer. */
2024 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
2029 /** Parse action configuration field. */
2031 parse_vc_conf(struct context *ctx, const struct token *token,
2032 const char *str, unsigned int len,
2033 void *buf, unsigned int size)
2035 struct buffer *out = buf;
2038 /* Token name must match. */
2039 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2041 /* Nothing else to do if there is no buffer. */
2044 /* Point to selected object. */
2045 ctx->object = out->args.vc.data;
2046 ctx->objmask = NULL;
2050 /** Parse RSS action. */
2052 parse_vc_action_rss(struct context *ctx, const struct token *token,
2053 const char *str, unsigned int len,
2054 void *buf, unsigned int size)
2056 struct buffer *out = buf;
2057 struct rte_flow_action *action;
2058 struct action_rss_data *action_rss_data;
2062 ret = parse_vc(ctx, token, str, len, buf, size);
2065 /* Nothing else to do if there is no buffer. */
2068 if (!out->args.vc.actions_n)
2070 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
2071 /* Point to selected object. */
2072 ctx->object = out->args.vc.data;
2073 ctx->objmask = NULL;
2074 /* Set up default configuration. */
2075 action_rss_data = ctx->object;
2076 *action_rss_data = (struct action_rss_data){
2077 .conf = (struct rte_flow_action_rss){
2078 .rss_conf = &action_rss_data->rss_conf,
2079 .num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
2080 .queue = action_rss_data->queue,
2083 .rss_conf = (struct rte_eth_rss_conf){
2084 .rss_key = action_rss_data->rss_key,
2085 .rss_key_len = sizeof(action_rss_data->rss_key),
2088 .rss_key = "testpmd's default RSS hash key",
2090 for (i = 0; i < action_rss_data->conf.num; ++i)
2091 action_rss_data->queue[i] = i;
2092 if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
2093 ctx->port != (portid_t)RTE_PORT_ALL) {
2094 struct rte_eth_dev_info info;
2096 rte_eth_dev_info_get(ctx->port, &info);
2097 action_rss_data->rss_conf.rss_key_len =
2098 RTE_MIN(sizeof(action_rss_data->rss_key),
2099 info.hash_key_size);
2101 action->conf = &action_rss_data->conf;
2106 * Parse type field for RSS action.
2108 * Valid tokens are type field names and the "end" token.
2111 parse_vc_action_rss_type(struct context *ctx, const struct token *token,
2112 const char *str, unsigned int len,
2113 void *buf, unsigned int size)
2115 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_TYPE);
2116 struct action_rss_data *action_rss_data;
2122 if (ctx->curr != ACTION_RSS_TYPE)
2124 if (!(ctx->objdata >> 16) && ctx->object) {
2125 action_rss_data = ctx->object;
2126 action_rss_data->rss_conf.rss_hf = 0;
2128 if (!strcmp_partial("end", str, len)) {
2129 ctx->objdata &= 0xffff;
2132 for (i = 0; rss_type_table[i].str; ++i)
2133 if (!strcmp_partial(rss_type_table[i].str, str, len))
2135 if (!rss_type_table[i].str)
2137 ctx->objdata = 1 << 16 | (ctx->objdata & 0xffff);
2139 if (ctx->next_num == RTE_DIM(ctx->next))
2141 ctx->next[ctx->next_num++] = next;
2144 action_rss_data = ctx->object;
2145 action_rss_data->rss_conf.rss_hf |= rss_type_table[i].rss_type;
2150 * Parse queue field for RSS action.
2152 * Valid tokens are queue indices and the "end" token.
2155 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
2156 const char *str, unsigned int len,
2157 void *buf, unsigned int size)
2159 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
2160 struct action_rss_data *action_rss_data;
2167 if (ctx->curr != ACTION_RSS_QUEUE)
2169 i = ctx->objdata >> 16;
2170 if (!strcmp_partial("end", str, len)) {
2171 ctx->objdata &= 0xffff;
2174 if (i >= ACTION_RSS_QUEUE_NUM)
2177 ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) +
2178 i * sizeof(action_rss_data->queue[i]),
2179 sizeof(action_rss_data->queue[i]))))
2181 ret = parse_int(ctx, token, str, len, NULL, 0);
2187 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
2189 if (ctx->next_num == RTE_DIM(ctx->next))
2191 ctx->next[ctx->next_num++] = next;
2194 action_rss_data = ctx->object;
2195 action_rss_data->conf.num = i;
2196 action_rss_data->conf.queue = i ? action_rss_data->queue : NULL;
2200 /** Parse tokens for destroy command. */
2202 parse_destroy(struct context *ctx, const struct token *token,
2203 const char *str, unsigned int len,
2204 void *buf, unsigned int size)
2206 struct buffer *out = buf;
2208 /* Token name must match. */
2209 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2211 /* Nothing else to do if there is no buffer. */
2214 if (!out->command) {
2215 if (ctx->curr != DESTROY)
2217 if (sizeof(*out) > size)
2219 out->command = ctx->curr;
2222 ctx->objmask = NULL;
2223 out->args.destroy.rule =
2224 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2228 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
2229 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
2232 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
2233 ctx->objmask = NULL;
2237 /** Parse tokens for flush command. */
2239 parse_flush(struct context *ctx, const struct token *token,
2240 const char *str, unsigned int len,
2241 void *buf, unsigned int size)
2243 struct buffer *out = buf;
2245 /* Token name must match. */
2246 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2248 /* Nothing else to do if there is no buffer. */
2251 if (!out->command) {
2252 if (ctx->curr != FLUSH)
2254 if (sizeof(*out) > size)
2256 out->command = ctx->curr;
2259 ctx->objmask = NULL;
2264 /** Parse tokens for query command. */
2266 parse_query(struct context *ctx, const struct token *token,
2267 const char *str, unsigned int len,
2268 void *buf, unsigned int size)
2270 struct buffer *out = buf;
2272 /* Token name must match. */
2273 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2275 /* Nothing else to do if there is no buffer. */
2278 if (!out->command) {
2279 if (ctx->curr != QUERY)
2281 if (sizeof(*out) > size)
2283 out->command = ctx->curr;
2286 ctx->objmask = NULL;
2291 /** Parse action names. */
2293 parse_action(struct context *ctx, const struct token *token,
2294 const char *str, unsigned int len,
2295 void *buf, unsigned int size)
2297 struct buffer *out = buf;
2298 const struct arg *arg = pop_args(ctx);
2302 /* Argument is expected. */
2305 /* Parse action name. */
2306 for (i = 0; next_action[i]; ++i) {
2307 const struct parse_action_priv *priv;
2309 token = &token_list[next_action[i]];
2310 if (strcmp_partial(token->name, str, len))
2316 memcpy((uint8_t *)ctx->object + arg->offset,
2322 push_args(ctx, arg);
2326 /** Parse tokens for list command. */
2328 parse_list(struct context *ctx, const struct token *token,
2329 const char *str, unsigned int len,
2330 void *buf, unsigned int size)
2332 struct buffer *out = buf;
2334 /* Token name must match. */
2335 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2337 /* Nothing else to do if there is no buffer. */
2340 if (!out->command) {
2341 if (ctx->curr != LIST)
2343 if (sizeof(*out) > size)
2345 out->command = ctx->curr;
2348 ctx->objmask = NULL;
2349 out->args.list.group =
2350 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2354 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
2355 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
2358 ctx->object = out->args.list.group + out->args.list.group_n++;
2359 ctx->objmask = NULL;
2363 /** Parse tokens for isolate command. */
2365 parse_isolate(struct context *ctx, const struct token *token,
2366 const char *str, unsigned int len,
2367 void *buf, unsigned int size)
2369 struct buffer *out = buf;
2371 /* Token name must match. */
2372 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2374 /* Nothing else to do if there is no buffer. */
2377 if (!out->command) {
2378 if (ctx->curr != ISOLATE)
2380 if (sizeof(*out) > size)
2382 out->command = ctx->curr;
2385 ctx->objmask = NULL;
2391 * Parse signed/unsigned integers 8 to 64-bit long.
2393 * Last argument (ctx->args) is retrieved to determine integer type and
2397 parse_int(struct context *ctx, const struct token *token,
2398 const char *str, unsigned int len,
2399 void *buf, unsigned int size)
2401 const struct arg *arg = pop_args(ctx);
2406 /* Argument is expected. */
2411 (uintmax_t)strtoimax(str, &end, 0) :
2412 strtoumax(str, &end, 0);
2413 if (errno || (size_t)(end - str) != len)
2416 ((arg->sign && ((intmax_t)u < (intmax_t)arg->min ||
2417 (intmax_t)u > (intmax_t)arg->max)) ||
2418 (!arg->sign && (u < arg->min || u > arg->max))))
2423 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
2424 !arg_entry_bf_fill(ctx->objmask, -1, arg))
2428 buf = (uint8_t *)ctx->object + arg->offset;
2432 case sizeof(uint8_t):
2433 *(uint8_t *)buf = u;
2435 case sizeof(uint16_t):
2436 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
2438 case sizeof(uint8_t [3]):
2439 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2441 ((uint8_t *)buf)[0] = u;
2442 ((uint8_t *)buf)[1] = u >> 8;
2443 ((uint8_t *)buf)[2] = u >> 16;
2447 ((uint8_t *)buf)[0] = u >> 16;
2448 ((uint8_t *)buf)[1] = u >> 8;
2449 ((uint8_t *)buf)[2] = u;
2451 case sizeof(uint32_t):
2452 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
2454 case sizeof(uint64_t):
2455 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
2460 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
2462 buf = (uint8_t *)ctx->objmask + arg->offset;
2467 push_args(ctx, arg);
2474 * Three arguments (ctx->args) are retrieved from the stack to store data,
2475 * its actual length and address (in that order).
2478 parse_string(struct context *ctx, const struct token *token,
2479 const char *str, unsigned int len,
2480 void *buf, unsigned int size)
2482 const struct arg *arg_data = pop_args(ctx);
2483 const struct arg *arg_len = pop_args(ctx);
2484 const struct arg *arg_addr = pop_args(ctx);
2485 char tmp[16]; /* Ought to be enough. */
2488 /* Arguments are expected. */
2492 push_args(ctx, arg_data);
2496 push_args(ctx, arg_len);
2497 push_args(ctx, arg_data);
2500 size = arg_data->size;
2501 /* Bit-mask fill is not supported. */
2502 if (arg_data->mask || size < len)
2506 /* Let parse_int() fill length information first. */
2507 ret = snprintf(tmp, sizeof(tmp), "%u", len);
2510 push_args(ctx, arg_len);
2511 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
2516 buf = (uint8_t *)ctx->object + arg_data->offset;
2517 /* Output buffer is not necessarily NUL-terminated. */
2518 memcpy(buf, str, len);
2519 memset((uint8_t *)buf + len, 0x00, size - len);
2521 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
2522 /* Save address if requested. */
2523 if (arg_addr->size) {
2524 memcpy((uint8_t *)ctx->object + arg_addr->offset,
2526 (uint8_t *)ctx->object + arg_data->offset
2530 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
2532 (uint8_t *)ctx->objmask + arg_data->offset
2538 push_args(ctx, arg_addr);
2539 push_args(ctx, arg_len);
2540 push_args(ctx, arg_data);
2545 * Parse a MAC address.
2547 * Last argument (ctx->args) is retrieved to determine storage size and
2551 parse_mac_addr(struct context *ctx, const struct token *token,
2552 const char *str, unsigned int len,
2553 void *buf, unsigned int size)
2555 const struct arg *arg = pop_args(ctx);
2556 struct ether_addr tmp;
2560 /* Argument is expected. */
2564 /* Bit-mask fill is not supported. */
2565 if (arg->mask || size != sizeof(tmp))
2567 /* Only network endian is supported. */
2570 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
2571 if (ret < 0 || (unsigned int)ret != len)
2575 buf = (uint8_t *)ctx->object + arg->offset;
2576 memcpy(buf, &tmp, size);
2578 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2581 push_args(ctx, arg);
2586 * Parse an IPv4 address.
2588 * Last argument (ctx->args) is retrieved to determine storage size and
2592 parse_ipv4_addr(struct context *ctx, const struct token *token,
2593 const char *str, unsigned int len,
2594 void *buf, unsigned int size)
2596 const struct arg *arg = pop_args(ctx);
2601 /* Argument is expected. */
2605 /* Bit-mask fill is not supported. */
2606 if (arg->mask || size != sizeof(tmp))
2608 /* Only network endian is supported. */
2611 memcpy(str2, str, len);
2613 ret = inet_pton(AF_INET, str2, &tmp);
2615 /* Attempt integer parsing. */
2616 push_args(ctx, arg);
2617 return parse_int(ctx, token, str, len, buf, size);
2621 buf = (uint8_t *)ctx->object + arg->offset;
2622 memcpy(buf, &tmp, size);
2624 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2627 push_args(ctx, arg);
2632 * Parse an IPv6 address.
2634 * Last argument (ctx->args) is retrieved to determine storage size and
2638 parse_ipv6_addr(struct context *ctx, const struct token *token,
2639 const char *str, unsigned int len,
2640 void *buf, unsigned int size)
2642 const struct arg *arg = pop_args(ctx);
2644 struct in6_addr tmp;
2648 /* Argument is expected. */
2652 /* Bit-mask fill is not supported. */
2653 if (arg->mask || size != sizeof(tmp))
2655 /* Only network endian is supported. */
2658 memcpy(str2, str, len);
2660 ret = inet_pton(AF_INET6, str2, &tmp);
2665 buf = (uint8_t *)ctx->object + arg->offset;
2666 memcpy(buf, &tmp, size);
2668 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2671 push_args(ctx, arg);
2675 /** Boolean values (even indices stand for false). */
2676 static const char *const boolean_name[] = {
2686 * Parse a boolean value.
2688 * Last argument (ctx->args) is retrieved to determine storage size and
2692 parse_boolean(struct context *ctx, const struct token *token,
2693 const char *str, unsigned int len,
2694 void *buf, unsigned int size)
2696 const struct arg *arg = pop_args(ctx);
2700 /* Argument is expected. */
2703 for (i = 0; boolean_name[i]; ++i)
2704 if (!strcmp_partial(boolean_name[i], str, len))
2706 /* Process token as integer. */
2707 if (boolean_name[i])
2708 str = i & 1 ? "1" : "0";
2709 push_args(ctx, arg);
2710 ret = parse_int(ctx, token, str, strlen(str), buf, size);
2711 return ret > 0 ? (int)len : ret;
2714 /** Parse port and update context. */
2716 parse_port(struct context *ctx, const struct token *token,
2717 const char *str, unsigned int len,
2718 void *buf, unsigned int size)
2720 struct buffer *out = &(struct buffer){ .port = 0 };
2728 ctx->objmask = NULL;
2729 size = sizeof(*out);
2731 ret = parse_int(ctx, token, str, len, out, size);
2733 ctx->port = out->port;
2739 /** No completion. */
2741 comp_none(struct context *ctx, const struct token *token,
2742 unsigned int ent, char *buf, unsigned int size)
2752 /** Complete boolean values. */
2754 comp_boolean(struct context *ctx, const struct token *token,
2755 unsigned int ent, char *buf, unsigned int size)
2761 for (i = 0; boolean_name[i]; ++i)
2762 if (buf && i == ent)
2763 return snprintf(buf, size, "%s", boolean_name[i]);
2769 /** Complete action names. */
2771 comp_action(struct context *ctx, const struct token *token,
2772 unsigned int ent, char *buf, unsigned int size)
2778 for (i = 0; next_action[i]; ++i)
2779 if (buf && i == ent)
2780 return snprintf(buf, size, "%s",
2781 token_list[next_action[i]].name);
2787 /** Complete available ports. */
2789 comp_port(struct context *ctx, const struct token *token,
2790 unsigned int ent, char *buf, unsigned int size)
2797 RTE_ETH_FOREACH_DEV(p) {
2798 if (buf && i == ent)
2799 return snprintf(buf, size, "%u", p);
2807 /** Complete available rule IDs. */
2809 comp_rule_id(struct context *ctx, const struct token *token,
2810 unsigned int ent, char *buf, unsigned int size)
2813 struct rte_port *port;
2814 struct port_flow *pf;
2817 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
2818 ctx->port == (portid_t)RTE_PORT_ALL)
2820 port = &ports[ctx->port];
2821 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
2822 if (buf && i == ent)
2823 return snprintf(buf, size, "%u", pf->id);
2831 /** Complete type field for RSS action. */
2833 comp_vc_action_rss_type(struct context *ctx, const struct token *token,
2834 unsigned int ent, char *buf, unsigned int size)
2840 for (i = 0; rss_type_table[i].str; ++i)
2845 return snprintf(buf, size, "%s", rss_type_table[ent].str);
2847 return snprintf(buf, size, "end");
2851 /** Complete queue field for RSS action. */
2853 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
2854 unsigned int ent, char *buf, unsigned int size)
2861 return snprintf(buf, size, "%u", ent);
2863 return snprintf(buf, size, "end");
2867 /** Internal context. */
2868 static struct context cmd_flow_context;
2870 /** Global parser instance (cmdline API). */
2871 cmdline_parse_inst_t cmd_flow;
2873 /** Initialize context. */
2875 cmd_flow_context_init(struct context *ctx)
2877 /* A full memset() is not necessary. */
2887 ctx->objmask = NULL;
2890 /** Parse a token (cmdline API). */
2892 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
2895 struct context *ctx = &cmd_flow_context;
2896 const struct token *token;
2897 const enum index *list;
2902 token = &token_list[ctx->curr];
2903 /* Check argument length. */
2906 for (len = 0; src[len]; ++len)
2907 if (src[len] == '#' || isspace(src[len]))
2911 /* Last argument and EOL detection. */
2912 for (i = len; src[i]; ++i)
2913 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
2915 else if (!isspace(src[i])) {
2920 if (src[i] == '\r' || src[i] == '\n') {
2924 /* Initialize context if necessary. */
2925 if (!ctx->next_num) {
2928 ctx->next[ctx->next_num++] = token->next[0];
2930 /* Process argument through candidates. */
2931 ctx->prev = ctx->curr;
2932 list = ctx->next[ctx->next_num - 1];
2933 for (i = 0; list[i]; ++i) {
2934 const struct token *next = &token_list[list[i]];
2937 ctx->curr = list[i];
2939 tmp = next->call(ctx, next, src, len, result, size);
2941 tmp = parse_default(ctx, next, src, len, result, size);
2942 if (tmp == -1 || tmp != len)
2950 /* Push subsequent tokens if any. */
2952 for (i = 0; token->next[i]; ++i) {
2953 if (ctx->next_num == RTE_DIM(ctx->next))
2955 ctx->next[ctx->next_num++] = token->next[i];
2957 /* Push arguments if any. */
2959 for (i = 0; token->args[i]; ++i) {
2960 if (ctx->args_num == RTE_DIM(ctx->args))
2962 ctx->args[ctx->args_num++] = token->args[i];
2967 /** Return number of completion entries (cmdline API). */
2969 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
2971 struct context *ctx = &cmd_flow_context;
2972 const struct token *token = &token_list[ctx->curr];
2973 const enum index *list;
2977 /* Count number of tokens in current list. */
2979 list = ctx->next[ctx->next_num - 1];
2981 list = token->next[0];
2982 for (i = 0; list[i]; ++i)
2987 * If there is a single token, use its completion callback, otherwise
2988 * return the number of entries.
2990 token = &token_list[list[0]];
2991 if (i == 1 && token->comp) {
2992 /* Save index for cmd_flow_get_help(). */
2993 ctx->prev = list[0];
2994 return token->comp(ctx, token, 0, NULL, 0);
2999 /** Return a completion entry (cmdline API). */
3001 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
3002 char *dst, unsigned int size)
3004 struct context *ctx = &cmd_flow_context;
3005 const struct token *token = &token_list[ctx->curr];
3006 const enum index *list;
3010 /* Count number of tokens in current list. */
3012 list = ctx->next[ctx->next_num - 1];
3014 list = token->next[0];
3015 for (i = 0; list[i]; ++i)
3019 /* If there is a single token, use its completion callback. */
3020 token = &token_list[list[0]];
3021 if (i == 1 && token->comp) {
3022 /* Save index for cmd_flow_get_help(). */
3023 ctx->prev = list[0];
3024 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
3026 /* Otherwise make sure the index is valid and use defaults. */
3029 token = &token_list[list[index]];
3030 snprintf(dst, size, "%s", token->name);
3031 /* Save index for cmd_flow_get_help(). */
3032 ctx->prev = list[index];
3036 /** Populate help strings for current token (cmdline API). */
3038 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
3040 struct context *ctx = &cmd_flow_context;
3041 const struct token *token = &token_list[ctx->prev];
3046 /* Set token type and update global help with details. */
3047 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
3049 cmd_flow.help_str = token->help;
3051 cmd_flow.help_str = token->name;
3055 /** Token definition template (cmdline API). */
3056 static struct cmdline_token_hdr cmd_flow_token_hdr = {
3057 .ops = &(struct cmdline_token_ops){
3058 .parse = cmd_flow_parse,
3059 .complete_get_nb = cmd_flow_complete_get_nb,
3060 .complete_get_elt = cmd_flow_complete_get_elt,
3061 .get_help = cmd_flow_get_help,
3066 /** Populate the next dynamic token. */
3068 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
3069 cmdline_parse_token_hdr_t **hdr_inst)
3071 struct context *ctx = &cmd_flow_context;
3073 /* Always reinitialize context before requesting the first token. */
3074 if (!(hdr_inst - cmd_flow.tokens))
3075 cmd_flow_context_init(ctx);
3076 /* Return NULL when no more tokens are expected. */
3077 if (!ctx->next_num && ctx->curr) {
3081 /* Determine if command should end here. */
3082 if (ctx->eol && ctx->last && ctx->next_num) {
3083 const enum index *list = ctx->next[ctx->next_num - 1];
3086 for (i = 0; list[i]; ++i) {
3093 *hdr = &cmd_flow_token_hdr;
3096 /** Dispatch parsed buffer to function calls. */
3098 cmd_flow_parsed(const struct buffer *in)
3100 switch (in->command) {
3102 port_flow_validate(in->port, &in->args.vc.attr,
3103 in->args.vc.pattern, in->args.vc.actions);
3106 port_flow_create(in->port, &in->args.vc.attr,
3107 in->args.vc.pattern, in->args.vc.actions);
3110 port_flow_destroy(in->port, in->args.destroy.rule_n,
3111 in->args.destroy.rule);
3114 port_flow_flush(in->port);
3117 port_flow_query(in->port, in->args.query.rule,
3118 in->args.query.action);
3121 port_flow_list(in->port, in->args.list.group_n,
3122 in->args.list.group);
3125 port_flow_isolate(in->port, in->args.isolate.set);
3132 /** Token generator and output processing callback (cmdline API). */
3134 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
3137 cmd_flow_tok(arg0, arg2);
3139 cmd_flow_parsed(arg0);
3142 /** Global parser instance (cmdline API). */
3143 cmdline_parse_inst_t cmd_flow = {
3145 .data = NULL, /**< Unused. */
3146 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
3149 }, /**< Tokens are returned by cmd_flow_tok(). */