1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
13 #include <arpa/inet.h>
14 #include <sys/socket.h>
16 #include <rte_common.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_ethdev.h>
19 #include <rte_byteorder.h>
20 #include <cmdline_parse.h>
21 #include <cmdline_parse_etheraddr.h>
26 /** Parser token indices. */
46 /* Top-level command. */
49 /* Sub-level commands. */
58 /* Destroy arguments. */
61 /* Query arguments. */
67 /* Validate/create arguments. */
74 /* Validate/create pattern. */
109 ITEM_VLAN_INNER_TYPE,
141 ITEM_E_TAG_GRP_ECID_B,
158 /* Validate/create actions. */
174 ACTION_RSS_FUNC_DEFAULT,
175 ACTION_RSS_FUNC_TOEPLITZ,
176 ACTION_RSS_FUNC_SIMPLE_XOR,
188 ACTION_PHY_PORT_ORIGINAL,
189 ACTION_PHY_PORT_INDEX,
191 ACTION_PORT_ID_ORIGINAL,
197 /** Maximum size for pattern in struct rte_flow_item_raw. */
198 #define ITEM_RAW_PATTERN_SIZE 40
200 /** Storage size for struct rte_flow_item_raw including pattern. */
201 #define ITEM_RAW_SIZE \
202 (sizeof(struct rte_flow_item_raw) + ITEM_RAW_PATTERN_SIZE)
204 /** Maximum number of queue indices in struct rte_flow_action_rss. */
205 #define ACTION_RSS_QUEUE_NUM 32
207 /** Storage for struct rte_flow_action_rss including external data. */
208 struct action_rss_data {
209 struct rte_flow_action_rss conf;
210 uint8_t key[RSS_HASH_KEY_LENGTH];
211 uint16_t queue[ACTION_RSS_QUEUE_NUM];
214 /** Maximum number of subsequent tokens and arguments on the stack. */
215 #define CTX_STACK_SIZE 16
217 /** Parser context. */
219 /** Stack of subsequent token lists to process. */
220 const enum index *next[CTX_STACK_SIZE];
221 /** Arguments for stacked tokens. */
222 const void *args[CTX_STACK_SIZE];
223 enum index curr; /**< Current token index. */
224 enum index prev; /**< Index of the last token seen. */
225 int next_num; /**< Number of entries in next[]. */
226 int args_num; /**< Number of entries in args[]. */
227 uint32_t eol:1; /**< EOL has been detected. */
228 uint32_t last:1; /**< No more arguments. */
229 portid_t port; /**< Current port ID (for completions). */
230 uint32_t objdata; /**< Object-specific data. */
231 void *object; /**< Address of current object for relative offsets. */
232 void *objmask; /**< Object a full mask must be written to. */
235 /** Token argument. */
237 uint32_t hton:1; /**< Use network byte ordering. */
238 uint32_t sign:1; /**< Value is signed. */
239 uint32_t bounded:1; /**< Value is bounded. */
240 uintmax_t min; /**< Minimum value if bounded. */
241 uintmax_t max; /**< Maximum value if bounded. */
242 uint32_t offset; /**< Relative offset from ctx->object. */
243 uint32_t size; /**< Field size. */
244 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
247 /** Parser token definition. */
249 /** Type displayed during completion (defaults to "TOKEN"). */
251 /** Help displayed during completion (defaults to token name). */
253 /** Private data used by parser functions. */
256 * Lists of subsequent tokens to push on the stack. Each call to the
257 * parser consumes the last entry of that stack.
259 const enum index *const *next;
260 /** Arguments stack for subsequent tokens that need them. */
261 const struct arg *const *args;
263 * Token-processing callback, returns -1 in case of error, the
264 * length of the matched string otherwise. If NULL, attempts to
265 * match the token name.
267 * If buf is not NULL, the result should be stored in it according
268 * to context. An error is returned if not large enough.
270 int (*call)(struct context *ctx, const struct token *token,
271 const char *str, unsigned int len,
272 void *buf, unsigned int size);
274 * Callback that provides possible values for this token, used for
275 * completion. Returns -1 in case of error, the number of possible
276 * values otherwise. If NULL, the token name is used.
278 * If buf is not NULL, entry index ent is written to buf and the
279 * full length of the entry is returned (same behavior as
282 int (*comp)(struct context *ctx, const struct token *token,
283 unsigned int ent, char *buf, unsigned int size);
284 /** Mandatory token name, no default value. */
288 /** Static initializer for the next field. */
289 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
291 /** Static initializer for a NEXT() entry. */
292 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
294 /** Static initializer for the args field. */
295 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
297 /** Static initializer for ARGS() to target a field. */
298 #define ARGS_ENTRY(s, f) \
299 (&(const struct arg){ \
300 .offset = offsetof(s, f), \
301 .size = sizeof(((s *)0)->f), \
304 /** Static initializer for ARGS() to target a bit-field. */
305 #define ARGS_ENTRY_BF(s, f, b) \
306 (&(const struct arg){ \
308 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
311 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
312 #define ARGS_ENTRY_MASK(s, f, m) \
313 (&(const struct arg){ \
314 .offset = offsetof(s, f), \
315 .size = sizeof(((s *)0)->f), \
316 .mask = (const void *)(m), \
319 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
320 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
321 (&(const struct arg){ \
323 .offset = offsetof(s, f), \
324 .size = sizeof(((s *)0)->f), \
325 .mask = (const void *)(m), \
328 /** Static initializer for ARGS() to target a pointer. */
329 #define ARGS_ENTRY_PTR(s, f) \
330 (&(const struct arg){ \
331 .size = sizeof(*((s *)0)->f), \
334 /** Static initializer for ARGS() with arbitrary offset and size. */
335 #define ARGS_ENTRY_ARB(o, s) \
336 (&(const struct arg){ \
341 /** Same as ARGS_ENTRY_ARB() with bounded values. */
342 #define ARGS_ENTRY_ARB_BOUNDED(o, s, i, a) \
343 (&(const struct arg){ \
351 /** Same as ARGS_ENTRY() using network byte ordering. */
352 #define ARGS_ENTRY_HTON(s, f) \
353 (&(const struct arg){ \
355 .offset = offsetof(s, f), \
356 .size = sizeof(((s *)0)->f), \
359 /** Parser output buffer layout expected by cmd_flow_parsed(). */
361 enum index command; /**< Flow command. */
362 portid_t port; /**< Affected port ID. */
365 struct rte_flow_attr attr;
366 struct rte_flow_item *pattern;
367 struct rte_flow_action *actions;
371 } vc; /**< Validate/create arguments. */
375 } destroy; /**< Destroy arguments. */
378 enum rte_flow_action_type action;
379 } query; /**< Query arguments. */
383 } list; /**< List arguments. */
386 } isolate; /**< Isolated mode arguments. */
387 } args; /**< Command arguments. */
390 /** Private data for pattern items. */
391 struct parse_item_priv {
392 enum rte_flow_item_type type; /**< Item type. */
393 uint32_t size; /**< Size of item specification structure. */
396 #define PRIV_ITEM(t, s) \
397 (&(const struct parse_item_priv){ \
398 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
402 /** Private data for actions. */
403 struct parse_action_priv {
404 enum rte_flow_action_type type; /**< Action type. */
405 uint32_t size; /**< Size of action configuration structure. */
408 #define PRIV_ACTION(t, s) \
409 (&(const struct parse_action_priv){ \
410 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
414 static const enum index next_vc_attr[] = {
424 static const enum index next_destroy_attr[] = {
430 static const enum index next_list_attr[] = {
436 static const enum index item_param[] = {
445 static const enum index next_item[] = {
476 static const enum index item_fuzzy[] = {
482 static const enum index item_any[] = {
488 static const enum index item_vf[] = {
494 static const enum index item_phy_port[] = {
500 static const enum index item_port_id[] = {
506 static const enum index item_raw[] = {
516 static const enum index item_eth[] = {
524 static const enum index item_vlan[] = {
529 ITEM_VLAN_INNER_TYPE,
534 static const enum index item_ipv4[] = {
544 static const enum index item_ipv6[] = {
555 static const enum index item_icmp[] = {
562 static const enum index item_udp[] = {
569 static const enum index item_tcp[] = {
577 static const enum index item_sctp[] = {
586 static const enum index item_vxlan[] = {
592 static const enum index item_e_tag[] = {
593 ITEM_E_TAG_GRP_ECID_B,
598 static const enum index item_nvgre[] = {
604 static const enum index item_mpls[] = {
610 static const enum index item_gre[] = {
616 static const enum index item_gtp[] = {
622 static const enum index item_geneve[] = {
629 static const enum index next_action[] = {
647 static const enum index action_mark[] = {
653 static const enum index action_queue[] = {
659 static const enum index action_rss[] = {
670 static const enum index action_vf[] = {
677 static const enum index action_phy_port[] = {
678 ACTION_PHY_PORT_ORIGINAL,
679 ACTION_PHY_PORT_INDEX,
684 static const enum index action_port_id[] = {
685 ACTION_PORT_ID_ORIGINAL,
691 static const enum index action_meter[] = {
697 static int parse_init(struct context *, const struct token *,
698 const char *, unsigned int,
699 void *, unsigned int);
700 static int parse_vc(struct context *, const struct token *,
701 const char *, unsigned int,
702 void *, unsigned int);
703 static int parse_vc_spec(struct context *, const struct token *,
704 const char *, unsigned int, void *, unsigned int);
705 static int parse_vc_conf(struct context *, const struct token *,
706 const char *, unsigned int, void *, unsigned int);
707 static int parse_vc_action_rss(struct context *, const struct token *,
708 const char *, unsigned int, void *,
710 static int parse_vc_action_rss_func(struct context *, const struct token *,
711 const char *, unsigned int, void *,
713 static int parse_vc_action_rss_type(struct context *, const struct token *,
714 const char *, unsigned int, void *,
716 static int parse_vc_action_rss_queue(struct context *, const struct token *,
717 const char *, unsigned int, void *,
719 static int parse_destroy(struct context *, const struct token *,
720 const char *, unsigned int,
721 void *, unsigned int);
722 static int parse_flush(struct context *, const struct token *,
723 const char *, unsigned int,
724 void *, unsigned int);
725 static int parse_query(struct context *, const struct token *,
726 const char *, unsigned int,
727 void *, unsigned int);
728 static int parse_action(struct context *, const struct token *,
729 const char *, unsigned int,
730 void *, unsigned int);
731 static int parse_list(struct context *, const struct token *,
732 const char *, unsigned int,
733 void *, unsigned int);
734 static int parse_isolate(struct context *, const struct token *,
735 const char *, unsigned int,
736 void *, unsigned int);
737 static int parse_int(struct context *, const struct token *,
738 const char *, unsigned int,
739 void *, unsigned int);
740 static int parse_prefix(struct context *, const struct token *,
741 const char *, unsigned int,
742 void *, unsigned int);
743 static int parse_boolean(struct context *, const struct token *,
744 const char *, unsigned int,
745 void *, unsigned int);
746 static int parse_string(struct context *, const struct token *,
747 const char *, unsigned int,
748 void *, unsigned int);
749 static int parse_mac_addr(struct context *, const struct token *,
750 const char *, unsigned int,
751 void *, unsigned int);
752 static int parse_ipv4_addr(struct context *, const struct token *,
753 const char *, unsigned int,
754 void *, unsigned int);
755 static int parse_ipv6_addr(struct context *, const struct token *,
756 const char *, unsigned int,
757 void *, unsigned int);
758 static int parse_port(struct context *, const struct token *,
759 const char *, unsigned int,
760 void *, unsigned int);
761 static int comp_none(struct context *, const struct token *,
762 unsigned int, char *, unsigned int);
763 static int comp_boolean(struct context *, const struct token *,
764 unsigned int, char *, unsigned int);
765 static int comp_action(struct context *, const struct token *,
766 unsigned int, char *, unsigned int);
767 static int comp_port(struct context *, const struct token *,
768 unsigned int, char *, unsigned int);
769 static int comp_rule_id(struct context *, const struct token *,
770 unsigned int, char *, unsigned int);
771 static int comp_vc_action_rss_type(struct context *, const struct token *,
772 unsigned int, char *, unsigned int);
773 static int comp_vc_action_rss_queue(struct context *, const struct token *,
774 unsigned int, char *, unsigned int);
776 /** Token definitions. */
777 static const struct token token_list[] = {
778 /* Special tokens. */
781 .help = "null entry, abused as the entry point",
782 .next = NEXT(NEXT_ENTRY(FLOW)),
787 .help = "command may end here",
793 .help = "integer value",
798 .name = "{unsigned}",
800 .help = "unsigned integer value",
807 .help = "prefix length for bit-mask",
808 .call = parse_prefix,
814 .help = "any boolean value",
815 .call = parse_boolean,
816 .comp = comp_boolean,
821 .help = "fixed string",
822 .call = parse_string,
826 .name = "{MAC address}",
828 .help = "standard MAC address notation",
829 .call = parse_mac_addr,
833 .name = "{IPv4 address}",
834 .type = "IPV4 ADDRESS",
835 .help = "standard IPv4 address notation",
836 .call = parse_ipv4_addr,
840 .name = "{IPv6 address}",
841 .type = "IPV6 ADDRESS",
842 .help = "standard IPv6 address notation",
843 .call = parse_ipv6_addr,
849 .help = "rule identifier",
851 .comp = comp_rule_id,
856 .help = "port identifier",
861 .name = "{group_id}",
863 .help = "group identifier",
870 .help = "priority level",
874 /* Top-level command. */
877 .type = "{command} {port_id} [{arg} [...]]",
878 .help = "manage ingress/egress flow rules",
879 .next = NEXT(NEXT_ENTRY
889 /* Sub-level commands. */
892 .help = "check whether a flow rule can be created",
893 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
894 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
899 .help = "create a flow rule",
900 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
901 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
906 .help = "destroy specific flow rules",
907 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
908 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
909 .call = parse_destroy,
913 .help = "destroy all flow rules",
914 .next = NEXT(NEXT_ENTRY(PORT_ID)),
915 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
920 .help = "query an existing flow rule",
921 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
923 NEXT_ENTRY(PORT_ID)),
924 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action),
925 ARGS_ENTRY(struct buffer, args.query.rule),
926 ARGS_ENTRY(struct buffer, port)),
931 .help = "list existing flow rules",
932 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
933 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
938 .help = "restrict ingress traffic to the defined flow rules",
939 .next = NEXT(NEXT_ENTRY(BOOLEAN),
940 NEXT_ENTRY(PORT_ID)),
941 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
942 ARGS_ENTRY(struct buffer, port)),
943 .call = parse_isolate,
945 /* Destroy arguments. */
948 .help = "specify a rule identifier",
949 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
950 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
951 .call = parse_destroy,
953 /* Query arguments. */
957 .help = "action to query, must be part of the rule",
958 .call = parse_action,
961 /* List arguments. */
964 .help = "specify a group",
965 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
966 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
969 /* Validate/create attributes. */
972 .help = "specify a group",
973 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
974 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
979 .help = "specify a priority level",
980 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
981 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
986 .help = "affect rule to ingress",
987 .next = NEXT(next_vc_attr),
992 .help = "affect rule to egress",
993 .next = NEXT(next_vc_attr),
998 .help = "apply rule directly to endpoints found in pattern",
999 .next = NEXT(next_vc_attr),
1002 /* Validate/create pattern. */
1005 .help = "submit a list of pattern items",
1006 .next = NEXT(next_item),
1011 .help = "match value perfectly (with full bit-mask)",
1012 .call = parse_vc_spec,
1014 [ITEM_PARAM_SPEC] = {
1016 .help = "match value according to configured bit-mask",
1017 .call = parse_vc_spec,
1019 [ITEM_PARAM_LAST] = {
1021 .help = "specify upper bound to establish a range",
1022 .call = parse_vc_spec,
1024 [ITEM_PARAM_MASK] = {
1026 .help = "specify bit-mask with relevant bits set to one",
1027 .call = parse_vc_spec,
1029 [ITEM_PARAM_PREFIX] = {
1031 .help = "generate bit-mask from a prefix length",
1032 .call = parse_vc_spec,
1036 .help = "specify next pattern item",
1037 .next = NEXT(next_item),
1041 .help = "end list of pattern items",
1042 .priv = PRIV_ITEM(END, 0),
1043 .next = NEXT(NEXT_ENTRY(ACTIONS)),
1048 .help = "no-op pattern item",
1049 .priv = PRIV_ITEM(VOID, 0),
1050 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1055 .help = "perform actions when pattern does not match",
1056 .priv = PRIV_ITEM(INVERT, 0),
1057 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1062 .help = "match any protocol for the current layer",
1063 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
1064 .next = NEXT(item_any),
1069 .help = "number of layers covered",
1070 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
1071 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
1075 .help = "match traffic from/to the physical function",
1076 .priv = PRIV_ITEM(PF, 0),
1077 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1082 .help = "match traffic from/to a virtual function ID",
1083 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
1084 .next = NEXT(item_vf),
1090 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
1091 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
1095 .help = "match traffic from/to a specific physical port",
1096 .priv = PRIV_ITEM(PHY_PORT,
1097 sizeof(struct rte_flow_item_phy_port)),
1098 .next = NEXT(item_phy_port),
1101 [ITEM_PHY_PORT_INDEX] = {
1103 .help = "physical port index",
1104 .next = NEXT(item_phy_port, NEXT_ENTRY(UNSIGNED), item_param),
1105 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_phy_port, index)),
1109 .help = "match traffic from/to a given DPDK port ID",
1110 .priv = PRIV_ITEM(PORT_ID,
1111 sizeof(struct rte_flow_item_port_id)),
1112 .next = NEXT(item_port_id),
1115 [ITEM_PORT_ID_ID] = {
1117 .help = "DPDK port ID",
1118 .next = NEXT(item_port_id, NEXT_ENTRY(UNSIGNED), item_param),
1119 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port_id, id)),
1123 .help = "match an arbitrary byte string",
1124 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1125 .next = NEXT(item_raw),
1128 [ITEM_RAW_RELATIVE] = {
1130 .help = "look for pattern after the previous item",
1131 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1132 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1135 [ITEM_RAW_SEARCH] = {
1137 .help = "search pattern from offset (see also limit)",
1138 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1139 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1142 [ITEM_RAW_OFFSET] = {
1144 .help = "absolute or relative offset for pattern",
1145 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1146 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1148 [ITEM_RAW_LIMIT] = {
1150 .help = "search area limit for start of pattern",
1151 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1152 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1154 [ITEM_RAW_PATTERN] = {
1156 .help = "byte string to look for",
1157 .next = NEXT(item_raw,
1159 NEXT_ENTRY(ITEM_PARAM_IS,
1162 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, pattern),
1163 ARGS_ENTRY(struct rte_flow_item_raw, length),
1164 ARGS_ENTRY_ARB(sizeof(struct rte_flow_item_raw),
1165 ITEM_RAW_PATTERN_SIZE)),
1169 .help = "match Ethernet header",
1170 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1171 .next = NEXT(item_eth),
1176 .help = "destination MAC",
1177 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1178 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1182 .help = "source MAC",
1183 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1184 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1188 .help = "EtherType",
1189 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1190 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1194 .help = "match 802.1Q/ad VLAN tag",
1195 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1196 .next = NEXT(item_vlan),
1201 .help = "tag control information",
1202 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1203 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1207 .help = "priority code point",
1208 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1209 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1214 .help = "drop eligible indicator",
1215 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1216 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1221 .help = "VLAN identifier",
1222 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1223 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1226 [ITEM_VLAN_INNER_TYPE] = {
1227 .name = "inner_type",
1228 .help = "inner EtherType",
1229 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1230 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan,
1235 .help = "match IPv4 header",
1236 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1237 .next = NEXT(item_ipv4),
1242 .help = "type of service",
1243 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1244 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1245 hdr.type_of_service)),
1249 .help = "time to live",
1250 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1251 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1254 [ITEM_IPV4_PROTO] = {
1256 .help = "next protocol ID",
1257 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1258 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1259 hdr.next_proto_id)),
1263 .help = "source address",
1264 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1265 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1270 .help = "destination address",
1271 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1272 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1277 .help = "match IPv6 header",
1278 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1279 .next = NEXT(item_ipv6),
1284 .help = "traffic class",
1285 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1286 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1288 "\x0f\xf0\x00\x00")),
1290 [ITEM_IPV6_FLOW] = {
1292 .help = "flow label",
1293 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1294 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1296 "\x00\x0f\xff\xff")),
1298 [ITEM_IPV6_PROTO] = {
1300 .help = "protocol (next header)",
1301 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1302 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1307 .help = "hop limit",
1308 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1309 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1314 .help = "source address",
1315 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1316 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1321 .help = "destination address",
1322 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1323 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1328 .help = "match ICMP header",
1329 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1330 .next = NEXT(item_icmp),
1333 [ITEM_ICMP_TYPE] = {
1335 .help = "ICMP packet type",
1336 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1337 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1340 [ITEM_ICMP_CODE] = {
1342 .help = "ICMP packet code",
1343 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1344 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1349 .help = "match UDP header",
1350 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1351 .next = NEXT(item_udp),
1356 .help = "UDP source port",
1357 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1358 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1363 .help = "UDP destination port",
1364 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1365 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1370 .help = "match TCP header",
1371 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1372 .next = NEXT(item_tcp),
1377 .help = "TCP source port",
1378 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1379 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1384 .help = "TCP destination port",
1385 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1386 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1389 [ITEM_TCP_FLAGS] = {
1391 .help = "TCP flags",
1392 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1393 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1398 .help = "match SCTP header",
1399 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1400 .next = NEXT(item_sctp),
1405 .help = "SCTP source port",
1406 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1407 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1412 .help = "SCTP destination port",
1413 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1414 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1419 .help = "validation tag",
1420 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1421 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1424 [ITEM_SCTP_CKSUM] = {
1427 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1428 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1433 .help = "match VXLAN header",
1434 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1435 .next = NEXT(item_vxlan),
1438 [ITEM_VXLAN_VNI] = {
1440 .help = "VXLAN identifier",
1441 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1442 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
1446 .help = "match E-Tag header",
1447 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
1448 .next = NEXT(item_e_tag),
1451 [ITEM_E_TAG_GRP_ECID_B] = {
1452 .name = "grp_ecid_b",
1453 .help = "GRP and E-CID base",
1454 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
1455 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
1461 .help = "match NVGRE header",
1462 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
1463 .next = NEXT(item_nvgre),
1466 [ITEM_NVGRE_TNI] = {
1468 .help = "virtual subnet ID",
1469 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
1470 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
1474 .help = "match MPLS header",
1475 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
1476 .next = NEXT(item_mpls),
1479 [ITEM_MPLS_LABEL] = {
1481 .help = "MPLS label",
1482 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
1483 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
1489 .help = "match GRE header",
1490 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
1491 .next = NEXT(item_gre),
1494 [ITEM_GRE_PROTO] = {
1496 .help = "GRE protocol type",
1497 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
1498 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
1503 .help = "fuzzy pattern match, expect faster than default",
1504 .priv = PRIV_ITEM(FUZZY,
1505 sizeof(struct rte_flow_item_fuzzy)),
1506 .next = NEXT(item_fuzzy),
1509 [ITEM_FUZZY_THRESH] = {
1511 .help = "match accuracy threshold",
1512 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
1513 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
1518 .help = "match GTP header",
1519 .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
1520 .next = NEXT(item_gtp),
1525 .help = "tunnel endpoint identifier",
1526 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
1527 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
1531 .help = "match GTP header",
1532 .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
1533 .next = NEXT(item_gtp),
1538 .help = "match GTP header",
1539 .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
1540 .next = NEXT(item_gtp),
1545 .help = "match GENEVE header",
1546 .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
1547 .next = NEXT(item_geneve),
1550 [ITEM_GENEVE_VNI] = {
1552 .help = "virtual network identifier",
1553 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1554 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)),
1556 [ITEM_GENEVE_PROTO] = {
1558 .help = "GENEVE protocol type",
1559 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1560 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve,
1564 /* Validate/create actions. */
1567 .help = "submit a list of associated actions",
1568 .next = NEXT(next_action),
1573 .help = "specify next action",
1574 .next = NEXT(next_action),
1578 .help = "end list of actions",
1579 .priv = PRIV_ACTION(END, 0),
1584 .help = "no-op action",
1585 .priv = PRIV_ACTION(VOID, 0),
1586 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1589 [ACTION_PASSTHRU] = {
1591 .help = "let subsequent rule process matched packets",
1592 .priv = PRIV_ACTION(PASSTHRU, 0),
1593 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1598 .help = "attach 32 bit value to packets",
1599 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
1600 .next = NEXT(action_mark),
1603 [ACTION_MARK_ID] = {
1605 .help = "32 bit value to return with packets",
1606 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
1607 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
1608 .call = parse_vc_conf,
1612 .help = "flag packets",
1613 .priv = PRIV_ACTION(FLAG, 0),
1614 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1619 .help = "assign packets to a given queue index",
1620 .priv = PRIV_ACTION(QUEUE,
1621 sizeof(struct rte_flow_action_queue)),
1622 .next = NEXT(action_queue),
1625 [ACTION_QUEUE_INDEX] = {
1627 .help = "queue index to use",
1628 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
1629 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
1630 .call = parse_vc_conf,
1634 .help = "drop packets (note: passthru has priority)",
1635 .priv = PRIV_ACTION(DROP, 0),
1636 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1641 .help = "enable counters for this rule",
1642 .priv = PRIV_ACTION(COUNT, 0),
1643 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1648 .help = "spread packets among several queues",
1649 .priv = PRIV_ACTION(RSS, sizeof(struct action_rss_data)),
1650 .next = NEXT(action_rss),
1651 .call = parse_vc_action_rss,
1653 [ACTION_RSS_FUNC] = {
1655 .help = "RSS hash function to apply",
1656 .next = NEXT(action_rss,
1657 NEXT_ENTRY(ACTION_RSS_FUNC_DEFAULT,
1658 ACTION_RSS_FUNC_TOEPLITZ,
1659 ACTION_RSS_FUNC_SIMPLE_XOR)),
1661 [ACTION_RSS_FUNC_DEFAULT] = {
1663 .help = "default hash function",
1664 .call = parse_vc_action_rss_func,
1666 [ACTION_RSS_FUNC_TOEPLITZ] = {
1668 .help = "Toeplitz hash function",
1669 .call = parse_vc_action_rss_func,
1671 [ACTION_RSS_FUNC_SIMPLE_XOR] = {
1672 .name = "simple_xor",
1673 .help = "simple XOR hash function",
1674 .call = parse_vc_action_rss_func,
1676 [ACTION_RSS_LEVEL] = {
1678 .help = "encapsulation level for \"types\"",
1679 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
1680 .args = ARGS(ARGS_ENTRY_ARB
1681 (offsetof(struct action_rss_data, conf) +
1682 offsetof(struct rte_flow_action_rss, level),
1683 sizeof(((struct rte_flow_action_rss *)0)->
1686 [ACTION_RSS_TYPES] = {
1688 .help = "specific RSS hash types",
1689 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)),
1691 [ACTION_RSS_TYPE] = {
1693 .help = "RSS hash type",
1694 .call = parse_vc_action_rss_type,
1695 .comp = comp_vc_action_rss_type,
1697 [ACTION_RSS_KEY] = {
1699 .help = "RSS hash key",
1700 .next = NEXT(action_rss, NEXT_ENTRY(STRING)),
1701 .args = ARGS(ARGS_ENTRY_ARB(0, 0),
1703 (offsetof(struct action_rss_data, conf) +
1704 offsetof(struct rte_flow_action_rss, key_len),
1705 sizeof(((struct rte_flow_action_rss *)0)->
1707 ARGS_ENTRY(struct action_rss_data, key)),
1709 [ACTION_RSS_KEY_LEN] = {
1711 .help = "RSS hash key length in bytes",
1712 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
1713 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
1714 (offsetof(struct action_rss_data, conf) +
1715 offsetof(struct rte_flow_action_rss, key_len),
1716 sizeof(((struct rte_flow_action_rss *)0)->
1719 RSS_HASH_KEY_LENGTH)),
1721 [ACTION_RSS_QUEUES] = {
1723 .help = "queue indices to use",
1724 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
1725 .call = parse_vc_conf,
1727 [ACTION_RSS_QUEUE] = {
1729 .help = "queue index",
1730 .call = parse_vc_action_rss_queue,
1731 .comp = comp_vc_action_rss_queue,
1735 .help = "direct traffic to physical function",
1736 .priv = PRIV_ACTION(PF, 0),
1737 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1742 .help = "direct traffic to a virtual function ID",
1743 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
1744 .next = NEXT(action_vf),
1747 [ACTION_VF_ORIGINAL] = {
1749 .help = "use original VF ID if possible",
1750 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
1751 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
1753 .call = parse_vc_conf,
1758 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
1759 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
1760 .call = parse_vc_conf,
1762 [ACTION_PHY_PORT] = {
1764 .help = "direct packets to physical port index",
1765 .priv = PRIV_ACTION(PHY_PORT,
1766 sizeof(struct rte_flow_action_phy_port)),
1767 .next = NEXT(action_phy_port),
1770 [ACTION_PHY_PORT_ORIGINAL] = {
1772 .help = "use original port index if possible",
1773 .next = NEXT(action_phy_port, NEXT_ENTRY(BOOLEAN)),
1774 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_phy_port,
1776 .call = parse_vc_conf,
1778 [ACTION_PHY_PORT_INDEX] = {
1780 .help = "physical port index",
1781 .next = NEXT(action_phy_port, NEXT_ENTRY(UNSIGNED)),
1782 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_phy_port,
1784 .call = parse_vc_conf,
1786 [ACTION_PORT_ID] = {
1788 .help = "direct matching traffic to a given DPDK port ID",
1789 .priv = PRIV_ACTION(PORT_ID,
1790 sizeof(struct rte_flow_action_port_id)),
1791 .next = NEXT(action_port_id),
1794 [ACTION_PORT_ID_ORIGINAL] = {
1796 .help = "use original DPDK port ID if possible",
1797 .next = NEXT(action_port_id, NEXT_ENTRY(BOOLEAN)),
1798 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_port_id,
1800 .call = parse_vc_conf,
1802 [ACTION_PORT_ID_ID] = {
1804 .help = "DPDK port ID",
1805 .next = NEXT(action_port_id, NEXT_ENTRY(UNSIGNED)),
1806 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_port_id, id)),
1807 .call = parse_vc_conf,
1811 .help = "meter the directed packets at given id",
1812 .priv = PRIV_ACTION(METER,
1813 sizeof(struct rte_flow_action_meter)),
1814 .next = NEXT(action_meter),
1817 [ACTION_METER_ID] = {
1819 .help = "meter id to use",
1820 .next = NEXT(action_meter, NEXT_ENTRY(UNSIGNED)),
1821 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
1822 .call = parse_vc_conf,
1826 /** Remove and return last entry from argument stack. */
1827 static const struct arg *
1828 pop_args(struct context *ctx)
1830 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
1833 /** Add entry on top of the argument stack. */
1835 push_args(struct context *ctx, const struct arg *arg)
1837 if (ctx->args_num == CTX_STACK_SIZE)
1839 ctx->args[ctx->args_num++] = arg;
1843 /** Spread value into buffer according to bit-mask. */
1845 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
1847 uint32_t i = arg->size;
1855 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1864 unsigned int shift = 0;
1865 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
1867 for (shift = 0; arg->mask[i] >> shift; ++shift) {
1868 if (!(arg->mask[i] & (1 << shift)))
1873 *buf &= ~(1 << shift);
1874 *buf |= (val & 1) << shift;
1882 /** Compare a string with a partial one of a given length. */
1884 strcmp_partial(const char *full, const char *partial, size_t partial_len)
1886 int r = strncmp(full, partial, partial_len);
1890 if (strlen(full) <= partial_len)
1892 return full[partial_len];
1896 * Parse a prefix length and generate a bit-mask.
1898 * Last argument (ctx->args) is retrieved to determine mask size, storage
1899 * location and whether the result must use network byte ordering.
1902 parse_prefix(struct context *ctx, const struct token *token,
1903 const char *str, unsigned int len,
1904 void *buf, unsigned int size)
1906 const struct arg *arg = pop_args(ctx);
1907 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
1914 /* Argument is expected. */
1918 u = strtoumax(str, &end, 0);
1919 if (errno || (size_t)(end - str) != len)
1924 extra = arg_entry_bf_fill(NULL, 0, arg);
1933 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
1934 !arg_entry_bf_fill(ctx->objmask, -1, arg))
1941 if (bytes > size || bytes + !!extra > size)
1945 buf = (uint8_t *)ctx->object + arg->offset;
1946 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1948 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
1949 memset(buf, 0x00, size - bytes);
1951 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
1955 memset(buf, 0xff, bytes);
1956 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
1958 ((uint8_t *)buf)[bytes] = conv[extra];
1961 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
1964 push_args(ctx, arg);
1968 /** Default parsing function for token name matching. */
1970 parse_default(struct context *ctx, const struct token *token,
1971 const char *str, unsigned int len,
1972 void *buf, unsigned int size)
1977 if (strcmp_partial(token->name, str, len))
1982 /** Parse flow command, initialize output buffer for subsequent tokens. */
1984 parse_init(struct context *ctx, const struct token *token,
1985 const char *str, unsigned int len,
1986 void *buf, unsigned int size)
1988 struct buffer *out = buf;
1990 /* Token name must match. */
1991 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1993 /* Nothing else to do if there is no buffer. */
1996 /* Make sure buffer is large enough. */
1997 if (size < sizeof(*out))
1999 /* Initialize buffer. */
2000 memset(out, 0x00, sizeof(*out));
2001 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
2004 ctx->objmask = NULL;
2008 /** Parse tokens for validate/create commands. */
2010 parse_vc(struct context *ctx, const struct token *token,
2011 const char *str, unsigned int len,
2012 void *buf, unsigned int size)
2014 struct buffer *out = buf;
2018 /* Token name must match. */
2019 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2021 /* Nothing else to do if there is no buffer. */
2024 if (!out->command) {
2025 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
2027 if (sizeof(*out) > size)
2029 out->command = ctx->curr;
2032 ctx->objmask = NULL;
2033 out->args.vc.data = (uint8_t *)out + size;
2037 ctx->object = &out->args.vc.attr;
2038 ctx->objmask = NULL;
2039 switch (ctx->curr) {
2044 out->args.vc.attr.ingress = 1;
2047 out->args.vc.attr.egress = 1;
2050 out->args.vc.attr.transfer = 1;
2053 out->args.vc.pattern =
2054 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2056 ctx->object = out->args.vc.pattern;
2057 ctx->objmask = NULL;
2060 out->args.vc.actions =
2061 (void *)RTE_ALIGN_CEIL((uintptr_t)
2062 (out->args.vc.pattern +
2063 out->args.vc.pattern_n),
2065 ctx->object = out->args.vc.actions;
2066 ctx->objmask = NULL;
2073 if (!out->args.vc.actions) {
2074 const struct parse_item_priv *priv = token->priv;
2075 struct rte_flow_item *item =
2076 out->args.vc.pattern + out->args.vc.pattern_n;
2078 data_size = priv->size * 3; /* spec, last, mask */
2079 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
2080 (out->args.vc.data - data_size),
2082 if ((uint8_t *)item + sizeof(*item) > data)
2084 *item = (struct rte_flow_item){
2087 ++out->args.vc.pattern_n;
2089 ctx->objmask = NULL;
2091 const struct parse_action_priv *priv = token->priv;
2092 struct rte_flow_action *action =
2093 out->args.vc.actions + out->args.vc.actions_n;
2095 data_size = priv->size; /* configuration */
2096 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
2097 (out->args.vc.data - data_size),
2099 if ((uint8_t *)action + sizeof(*action) > data)
2101 *action = (struct rte_flow_action){
2103 .conf = data_size ? data : NULL,
2105 ++out->args.vc.actions_n;
2106 ctx->object = action;
2107 ctx->objmask = NULL;
2109 memset(data, 0, data_size);
2110 out->args.vc.data = data;
2111 ctx->objdata = data_size;
2115 /** Parse pattern item parameter type. */
2117 parse_vc_spec(struct context *ctx, const struct token *token,
2118 const char *str, unsigned int len,
2119 void *buf, unsigned int size)
2121 struct buffer *out = buf;
2122 struct rte_flow_item *item;
2128 /* Token name must match. */
2129 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2131 /* Parse parameter types. */
2132 switch (ctx->curr) {
2133 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
2139 case ITEM_PARAM_SPEC:
2142 case ITEM_PARAM_LAST:
2145 case ITEM_PARAM_PREFIX:
2146 /* Modify next token to expect a prefix. */
2147 if (ctx->next_num < 2)
2149 ctx->next[ctx->next_num - 2] = prefix;
2151 case ITEM_PARAM_MASK:
2157 /* Nothing else to do if there is no buffer. */
2160 if (!out->args.vc.pattern_n)
2162 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
2163 data_size = ctx->objdata / 3; /* spec, last, mask */
2164 /* Point to selected object. */
2165 ctx->object = out->args.vc.data + (data_size * index);
2167 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
2168 item->mask = ctx->objmask;
2170 ctx->objmask = NULL;
2171 /* Update relevant item pointer. */
2172 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
2177 /** Parse action configuration field. */
2179 parse_vc_conf(struct context *ctx, const struct token *token,
2180 const char *str, unsigned int len,
2181 void *buf, unsigned int size)
2183 struct buffer *out = buf;
2186 /* Token name must match. */
2187 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2189 /* Nothing else to do if there is no buffer. */
2192 /* Point to selected object. */
2193 ctx->object = out->args.vc.data;
2194 ctx->objmask = NULL;
2198 /** Parse RSS action. */
2200 parse_vc_action_rss(struct context *ctx, const struct token *token,
2201 const char *str, unsigned int len,
2202 void *buf, unsigned int size)
2204 struct buffer *out = buf;
2205 struct rte_flow_action *action;
2206 struct action_rss_data *action_rss_data;
2210 ret = parse_vc(ctx, token, str, len, buf, size);
2213 /* Nothing else to do if there is no buffer. */
2216 if (!out->args.vc.actions_n)
2218 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
2219 /* Point to selected object. */
2220 ctx->object = out->args.vc.data;
2221 ctx->objmask = NULL;
2222 /* Set up default configuration. */
2223 action_rss_data = ctx->object;
2224 *action_rss_data = (struct action_rss_data){
2225 .conf = (struct rte_flow_action_rss){
2226 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
2229 .key_len = sizeof(action_rss_data->key),
2230 .queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
2231 .key = action_rss_data->key,
2232 .queue = action_rss_data->queue,
2234 .key = "testpmd's default RSS hash key",
2237 for (i = 0; i < action_rss_data->conf.queue_num; ++i)
2238 action_rss_data->queue[i] = i;
2239 if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
2240 ctx->port != (portid_t)RTE_PORT_ALL) {
2241 struct rte_eth_dev_info info;
2243 rte_eth_dev_info_get(ctx->port, &info);
2244 action_rss_data->conf.key_len =
2245 RTE_MIN(sizeof(action_rss_data->key),
2246 info.hash_key_size);
2248 action->conf = &action_rss_data->conf;
2253 * Parse func field for RSS action.
2255 * The RTE_ETH_HASH_FUNCTION_* value to assign is derived from the
2256 * ACTION_RSS_FUNC_* index that called this function.
2259 parse_vc_action_rss_func(struct context *ctx, const struct token *token,
2260 const char *str, unsigned int len,
2261 void *buf, unsigned int size)
2263 struct action_rss_data *action_rss_data;
2264 enum rte_eth_hash_function func;
2268 /* Token name must match. */
2269 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2271 switch (ctx->curr) {
2272 case ACTION_RSS_FUNC_DEFAULT:
2273 func = RTE_ETH_HASH_FUNCTION_DEFAULT;
2275 case ACTION_RSS_FUNC_TOEPLITZ:
2276 func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
2278 case ACTION_RSS_FUNC_SIMPLE_XOR:
2279 func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
2286 action_rss_data = ctx->object;
2287 action_rss_data->conf.func = func;
2292 * Parse type field for RSS action.
2294 * Valid tokens are type field names and the "end" token.
2297 parse_vc_action_rss_type(struct context *ctx, const struct token *token,
2298 const char *str, unsigned int len,
2299 void *buf, unsigned int size)
2301 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_TYPE);
2302 struct action_rss_data *action_rss_data;
2308 if (ctx->curr != ACTION_RSS_TYPE)
2310 if (!(ctx->objdata >> 16) && ctx->object) {
2311 action_rss_data = ctx->object;
2312 action_rss_data->conf.types = 0;
2314 if (!strcmp_partial("end", str, len)) {
2315 ctx->objdata &= 0xffff;
2318 for (i = 0; rss_type_table[i].str; ++i)
2319 if (!strcmp_partial(rss_type_table[i].str, str, len))
2321 if (!rss_type_table[i].str)
2323 ctx->objdata = 1 << 16 | (ctx->objdata & 0xffff);
2325 if (ctx->next_num == RTE_DIM(ctx->next))
2327 ctx->next[ctx->next_num++] = next;
2330 action_rss_data = ctx->object;
2331 action_rss_data->conf.types |= rss_type_table[i].rss_type;
2336 * Parse queue field for RSS action.
2338 * Valid tokens are queue indices and the "end" token.
2341 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
2342 const char *str, unsigned int len,
2343 void *buf, unsigned int size)
2345 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
2346 struct action_rss_data *action_rss_data;
2353 if (ctx->curr != ACTION_RSS_QUEUE)
2355 i = ctx->objdata >> 16;
2356 if (!strcmp_partial("end", str, len)) {
2357 ctx->objdata &= 0xffff;
2360 if (i >= ACTION_RSS_QUEUE_NUM)
2363 ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) +
2364 i * sizeof(action_rss_data->queue[i]),
2365 sizeof(action_rss_data->queue[i]))))
2367 ret = parse_int(ctx, token, str, len, NULL, 0);
2373 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
2375 if (ctx->next_num == RTE_DIM(ctx->next))
2377 ctx->next[ctx->next_num++] = next;
2380 action_rss_data = ctx->object;
2381 action_rss_data->conf.queue_num = i;
2382 action_rss_data->conf.queue = i ? action_rss_data->queue : NULL;
2386 /** Parse tokens for destroy command. */
2388 parse_destroy(struct context *ctx, const struct token *token,
2389 const char *str, unsigned int len,
2390 void *buf, unsigned int size)
2392 struct buffer *out = buf;
2394 /* Token name must match. */
2395 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2397 /* Nothing else to do if there is no buffer. */
2400 if (!out->command) {
2401 if (ctx->curr != DESTROY)
2403 if (sizeof(*out) > size)
2405 out->command = ctx->curr;
2408 ctx->objmask = NULL;
2409 out->args.destroy.rule =
2410 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2414 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
2415 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
2418 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
2419 ctx->objmask = NULL;
2423 /** Parse tokens for flush command. */
2425 parse_flush(struct context *ctx, const struct token *token,
2426 const char *str, unsigned int len,
2427 void *buf, unsigned int size)
2429 struct buffer *out = buf;
2431 /* Token name must match. */
2432 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2434 /* Nothing else to do if there is no buffer. */
2437 if (!out->command) {
2438 if (ctx->curr != FLUSH)
2440 if (sizeof(*out) > size)
2442 out->command = ctx->curr;
2445 ctx->objmask = NULL;
2450 /** Parse tokens for query command. */
2452 parse_query(struct context *ctx, const struct token *token,
2453 const char *str, unsigned int len,
2454 void *buf, unsigned int size)
2456 struct buffer *out = buf;
2458 /* Token name must match. */
2459 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2461 /* Nothing else to do if there is no buffer. */
2464 if (!out->command) {
2465 if (ctx->curr != QUERY)
2467 if (sizeof(*out) > size)
2469 out->command = ctx->curr;
2472 ctx->objmask = NULL;
2477 /** Parse action names. */
2479 parse_action(struct context *ctx, const struct token *token,
2480 const char *str, unsigned int len,
2481 void *buf, unsigned int size)
2483 struct buffer *out = buf;
2484 const struct arg *arg = pop_args(ctx);
2488 /* Argument is expected. */
2491 /* Parse action name. */
2492 for (i = 0; next_action[i]; ++i) {
2493 const struct parse_action_priv *priv;
2495 token = &token_list[next_action[i]];
2496 if (strcmp_partial(token->name, str, len))
2502 memcpy((uint8_t *)ctx->object + arg->offset,
2508 push_args(ctx, arg);
2512 /** Parse tokens for list command. */
2514 parse_list(struct context *ctx, const struct token *token,
2515 const char *str, unsigned int len,
2516 void *buf, unsigned int size)
2518 struct buffer *out = buf;
2520 /* Token name must match. */
2521 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2523 /* Nothing else to do if there is no buffer. */
2526 if (!out->command) {
2527 if (ctx->curr != LIST)
2529 if (sizeof(*out) > size)
2531 out->command = ctx->curr;
2534 ctx->objmask = NULL;
2535 out->args.list.group =
2536 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2540 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
2541 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
2544 ctx->object = out->args.list.group + out->args.list.group_n++;
2545 ctx->objmask = NULL;
2549 /** Parse tokens for isolate command. */
2551 parse_isolate(struct context *ctx, const struct token *token,
2552 const char *str, unsigned int len,
2553 void *buf, unsigned int size)
2555 struct buffer *out = buf;
2557 /* Token name must match. */
2558 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2560 /* Nothing else to do if there is no buffer. */
2563 if (!out->command) {
2564 if (ctx->curr != ISOLATE)
2566 if (sizeof(*out) > size)
2568 out->command = ctx->curr;
2571 ctx->objmask = NULL;
2577 * Parse signed/unsigned integers 8 to 64-bit long.
2579 * Last argument (ctx->args) is retrieved to determine integer type and
2583 parse_int(struct context *ctx, const struct token *token,
2584 const char *str, unsigned int len,
2585 void *buf, unsigned int size)
2587 const struct arg *arg = pop_args(ctx);
2592 /* Argument is expected. */
2597 (uintmax_t)strtoimax(str, &end, 0) :
2598 strtoumax(str, &end, 0);
2599 if (errno || (size_t)(end - str) != len)
2602 ((arg->sign && ((intmax_t)u < (intmax_t)arg->min ||
2603 (intmax_t)u > (intmax_t)arg->max)) ||
2604 (!arg->sign && (u < arg->min || u > arg->max))))
2609 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
2610 !arg_entry_bf_fill(ctx->objmask, -1, arg))
2614 buf = (uint8_t *)ctx->object + arg->offset;
2618 case sizeof(uint8_t):
2619 *(uint8_t *)buf = u;
2621 case sizeof(uint16_t):
2622 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
2624 case sizeof(uint8_t [3]):
2625 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2627 ((uint8_t *)buf)[0] = u;
2628 ((uint8_t *)buf)[1] = u >> 8;
2629 ((uint8_t *)buf)[2] = u >> 16;
2633 ((uint8_t *)buf)[0] = u >> 16;
2634 ((uint8_t *)buf)[1] = u >> 8;
2635 ((uint8_t *)buf)[2] = u;
2637 case sizeof(uint32_t):
2638 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
2640 case sizeof(uint64_t):
2641 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
2646 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
2648 buf = (uint8_t *)ctx->objmask + arg->offset;
2653 push_args(ctx, arg);
2660 * Three arguments (ctx->args) are retrieved from the stack to store data,
2661 * its actual length and address (in that order).
2664 parse_string(struct context *ctx, const struct token *token,
2665 const char *str, unsigned int len,
2666 void *buf, unsigned int size)
2668 const struct arg *arg_data = pop_args(ctx);
2669 const struct arg *arg_len = pop_args(ctx);
2670 const struct arg *arg_addr = pop_args(ctx);
2671 char tmp[16]; /* Ought to be enough. */
2674 /* Arguments are expected. */
2678 push_args(ctx, arg_data);
2682 push_args(ctx, arg_len);
2683 push_args(ctx, arg_data);
2686 size = arg_data->size;
2687 /* Bit-mask fill is not supported. */
2688 if (arg_data->mask || size < len)
2692 /* Let parse_int() fill length information first. */
2693 ret = snprintf(tmp, sizeof(tmp), "%u", len);
2696 push_args(ctx, arg_len);
2697 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
2702 buf = (uint8_t *)ctx->object + arg_data->offset;
2703 /* Output buffer is not necessarily NUL-terminated. */
2704 memcpy(buf, str, len);
2705 memset((uint8_t *)buf + len, 0x00, size - len);
2707 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
2708 /* Save address if requested. */
2709 if (arg_addr->size) {
2710 memcpy((uint8_t *)ctx->object + arg_addr->offset,
2712 (uint8_t *)ctx->object + arg_data->offset
2716 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
2718 (uint8_t *)ctx->objmask + arg_data->offset
2724 push_args(ctx, arg_addr);
2725 push_args(ctx, arg_len);
2726 push_args(ctx, arg_data);
2731 * Parse a MAC address.
2733 * Last argument (ctx->args) is retrieved to determine storage size and
2737 parse_mac_addr(struct context *ctx, const struct token *token,
2738 const char *str, unsigned int len,
2739 void *buf, unsigned int size)
2741 const struct arg *arg = pop_args(ctx);
2742 struct ether_addr tmp;
2746 /* Argument is expected. */
2750 /* Bit-mask fill is not supported. */
2751 if (arg->mask || size != sizeof(tmp))
2753 /* Only network endian is supported. */
2756 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
2757 if (ret < 0 || (unsigned int)ret != len)
2761 buf = (uint8_t *)ctx->object + arg->offset;
2762 memcpy(buf, &tmp, size);
2764 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2767 push_args(ctx, arg);
2772 * Parse an IPv4 address.
2774 * Last argument (ctx->args) is retrieved to determine storage size and
2778 parse_ipv4_addr(struct context *ctx, const struct token *token,
2779 const char *str, unsigned int len,
2780 void *buf, unsigned int size)
2782 const struct arg *arg = pop_args(ctx);
2787 /* Argument is expected. */
2791 /* Bit-mask fill is not supported. */
2792 if (arg->mask || size != sizeof(tmp))
2794 /* Only network endian is supported. */
2797 memcpy(str2, str, len);
2799 ret = inet_pton(AF_INET, str2, &tmp);
2801 /* Attempt integer parsing. */
2802 push_args(ctx, arg);
2803 return parse_int(ctx, token, str, len, buf, size);
2807 buf = (uint8_t *)ctx->object + arg->offset;
2808 memcpy(buf, &tmp, size);
2810 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2813 push_args(ctx, arg);
2818 * Parse an IPv6 address.
2820 * Last argument (ctx->args) is retrieved to determine storage size and
2824 parse_ipv6_addr(struct context *ctx, const struct token *token,
2825 const char *str, unsigned int len,
2826 void *buf, unsigned int size)
2828 const struct arg *arg = pop_args(ctx);
2830 struct in6_addr tmp;
2834 /* Argument is expected. */
2838 /* Bit-mask fill is not supported. */
2839 if (arg->mask || size != sizeof(tmp))
2841 /* Only network endian is supported. */
2844 memcpy(str2, str, len);
2846 ret = inet_pton(AF_INET6, str2, &tmp);
2851 buf = (uint8_t *)ctx->object + arg->offset;
2852 memcpy(buf, &tmp, size);
2854 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2857 push_args(ctx, arg);
2861 /** Boolean values (even indices stand for false). */
2862 static const char *const boolean_name[] = {
2872 * Parse a boolean value.
2874 * Last argument (ctx->args) is retrieved to determine storage size and
2878 parse_boolean(struct context *ctx, const struct token *token,
2879 const char *str, unsigned int len,
2880 void *buf, unsigned int size)
2882 const struct arg *arg = pop_args(ctx);
2886 /* Argument is expected. */
2889 for (i = 0; boolean_name[i]; ++i)
2890 if (!strcmp_partial(boolean_name[i], str, len))
2892 /* Process token as integer. */
2893 if (boolean_name[i])
2894 str = i & 1 ? "1" : "0";
2895 push_args(ctx, arg);
2896 ret = parse_int(ctx, token, str, strlen(str), buf, size);
2897 return ret > 0 ? (int)len : ret;
2900 /** Parse port and update context. */
2902 parse_port(struct context *ctx, const struct token *token,
2903 const char *str, unsigned int len,
2904 void *buf, unsigned int size)
2906 struct buffer *out = &(struct buffer){ .port = 0 };
2914 ctx->objmask = NULL;
2915 size = sizeof(*out);
2917 ret = parse_int(ctx, token, str, len, out, size);
2919 ctx->port = out->port;
2925 /** No completion. */
2927 comp_none(struct context *ctx, const struct token *token,
2928 unsigned int ent, char *buf, unsigned int size)
2938 /** Complete boolean values. */
2940 comp_boolean(struct context *ctx, const struct token *token,
2941 unsigned int ent, char *buf, unsigned int size)
2947 for (i = 0; boolean_name[i]; ++i)
2948 if (buf && i == ent)
2949 return snprintf(buf, size, "%s", boolean_name[i]);
2955 /** Complete action names. */
2957 comp_action(struct context *ctx, const struct token *token,
2958 unsigned int ent, char *buf, unsigned int size)
2964 for (i = 0; next_action[i]; ++i)
2965 if (buf && i == ent)
2966 return snprintf(buf, size, "%s",
2967 token_list[next_action[i]].name);
2973 /** Complete available ports. */
2975 comp_port(struct context *ctx, const struct token *token,
2976 unsigned int ent, char *buf, unsigned int size)
2983 RTE_ETH_FOREACH_DEV(p) {
2984 if (buf && i == ent)
2985 return snprintf(buf, size, "%u", p);
2993 /** Complete available rule IDs. */
2995 comp_rule_id(struct context *ctx, const struct token *token,
2996 unsigned int ent, char *buf, unsigned int size)
2999 struct rte_port *port;
3000 struct port_flow *pf;
3003 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
3004 ctx->port == (portid_t)RTE_PORT_ALL)
3006 port = &ports[ctx->port];
3007 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
3008 if (buf && i == ent)
3009 return snprintf(buf, size, "%u", pf->id);
3017 /** Complete type field for RSS action. */
3019 comp_vc_action_rss_type(struct context *ctx, const struct token *token,
3020 unsigned int ent, char *buf, unsigned int size)
3026 for (i = 0; rss_type_table[i].str; ++i)
3031 return snprintf(buf, size, "%s", rss_type_table[ent].str);
3033 return snprintf(buf, size, "end");
3037 /** Complete queue field for RSS action. */
3039 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
3040 unsigned int ent, char *buf, unsigned int size)
3047 return snprintf(buf, size, "%u", ent);
3049 return snprintf(buf, size, "end");
3053 /** Internal context. */
3054 static struct context cmd_flow_context;
3056 /** Global parser instance (cmdline API). */
3057 cmdline_parse_inst_t cmd_flow;
3059 /** Initialize context. */
3061 cmd_flow_context_init(struct context *ctx)
3063 /* A full memset() is not necessary. */
3073 ctx->objmask = NULL;
3076 /** Parse a token (cmdline API). */
3078 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
3081 struct context *ctx = &cmd_flow_context;
3082 const struct token *token;
3083 const enum index *list;
3088 token = &token_list[ctx->curr];
3089 /* Check argument length. */
3092 for (len = 0; src[len]; ++len)
3093 if (src[len] == '#' || isspace(src[len]))
3097 /* Last argument and EOL detection. */
3098 for (i = len; src[i]; ++i)
3099 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
3101 else if (!isspace(src[i])) {
3106 if (src[i] == '\r' || src[i] == '\n') {
3110 /* Initialize context if necessary. */
3111 if (!ctx->next_num) {
3114 ctx->next[ctx->next_num++] = token->next[0];
3116 /* Process argument through candidates. */
3117 ctx->prev = ctx->curr;
3118 list = ctx->next[ctx->next_num - 1];
3119 for (i = 0; list[i]; ++i) {
3120 const struct token *next = &token_list[list[i]];
3123 ctx->curr = list[i];
3125 tmp = next->call(ctx, next, src, len, result, size);
3127 tmp = parse_default(ctx, next, src, len, result, size);
3128 if (tmp == -1 || tmp != len)
3136 /* Push subsequent tokens if any. */
3138 for (i = 0; token->next[i]; ++i) {
3139 if (ctx->next_num == RTE_DIM(ctx->next))
3141 ctx->next[ctx->next_num++] = token->next[i];
3143 /* Push arguments if any. */
3145 for (i = 0; token->args[i]; ++i) {
3146 if (ctx->args_num == RTE_DIM(ctx->args))
3148 ctx->args[ctx->args_num++] = token->args[i];
3153 /** Return number of completion entries (cmdline API). */
3155 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
3157 struct context *ctx = &cmd_flow_context;
3158 const struct token *token = &token_list[ctx->curr];
3159 const enum index *list;
3163 /* Count number of tokens in current list. */
3165 list = ctx->next[ctx->next_num - 1];
3167 list = token->next[0];
3168 for (i = 0; list[i]; ++i)
3173 * If there is a single token, use its completion callback, otherwise
3174 * return the number of entries.
3176 token = &token_list[list[0]];
3177 if (i == 1 && token->comp) {
3178 /* Save index for cmd_flow_get_help(). */
3179 ctx->prev = list[0];
3180 return token->comp(ctx, token, 0, NULL, 0);
3185 /** Return a completion entry (cmdline API). */
3187 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
3188 char *dst, unsigned int size)
3190 struct context *ctx = &cmd_flow_context;
3191 const struct token *token = &token_list[ctx->curr];
3192 const enum index *list;
3196 /* Count number of tokens in current list. */
3198 list = ctx->next[ctx->next_num - 1];
3200 list = token->next[0];
3201 for (i = 0; list[i]; ++i)
3205 /* If there is a single token, use its completion callback. */
3206 token = &token_list[list[0]];
3207 if (i == 1 && token->comp) {
3208 /* Save index for cmd_flow_get_help(). */
3209 ctx->prev = list[0];
3210 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
3212 /* Otherwise make sure the index is valid and use defaults. */
3215 token = &token_list[list[index]];
3216 snprintf(dst, size, "%s", token->name);
3217 /* Save index for cmd_flow_get_help(). */
3218 ctx->prev = list[index];
3222 /** Populate help strings for current token (cmdline API). */
3224 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
3226 struct context *ctx = &cmd_flow_context;
3227 const struct token *token = &token_list[ctx->prev];
3232 /* Set token type and update global help with details. */
3233 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
3235 cmd_flow.help_str = token->help;
3237 cmd_flow.help_str = token->name;
3241 /** Token definition template (cmdline API). */
3242 static struct cmdline_token_hdr cmd_flow_token_hdr = {
3243 .ops = &(struct cmdline_token_ops){
3244 .parse = cmd_flow_parse,
3245 .complete_get_nb = cmd_flow_complete_get_nb,
3246 .complete_get_elt = cmd_flow_complete_get_elt,
3247 .get_help = cmd_flow_get_help,
3252 /** Populate the next dynamic token. */
3254 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
3255 cmdline_parse_token_hdr_t **hdr_inst)
3257 struct context *ctx = &cmd_flow_context;
3259 /* Always reinitialize context before requesting the first token. */
3260 if (!(hdr_inst - cmd_flow.tokens))
3261 cmd_flow_context_init(ctx);
3262 /* Return NULL when no more tokens are expected. */
3263 if (!ctx->next_num && ctx->curr) {
3267 /* Determine if command should end here. */
3268 if (ctx->eol && ctx->last && ctx->next_num) {
3269 const enum index *list = ctx->next[ctx->next_num - 1];
3272 for (i = 0; list[i]; ++i) {
3279 *hdr = &cmd_flow_token_hdr;
3282 /** Dispatch parsed buffer to function calls. */
3284 cmd_flow_parsed(const struct buffer *in)
3286 switch (in->command) {
3288 port_flow_validate(in->port, &in->args.vc.attr,
3289 in->args.vc.pattern, in->args.vc.actions);
3292 port_flow_create(in->port, &in->args.vc.attr,
3293 in->args.vc.pattern, in->args.vc.actions);
3296 port_flow_destroy(in->port, in->args.destroy.rule_n,
3297 in->args.destroy.rule);
3300 port_flow_flush(in->port);
3303 port_flow_query(in->port, in->args.query.rule,
3304 in->args.query.action);
3307 port_flow_list(in->port, in->args.list.group_n,
3308 in->args.list.group);
3311 port_flow_isolate(in->port, in->args.isolate.set);
3318 /** Token generator and output processing callback (cmdline API). */
3320 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
3323 cmd_flow_tok(arg0, arg2);
3325 cmd_flow_parsed(arg0);
3328 /** Global parser instance (cmdline API). */
3329 cmdline_parse_inst_t cmd_flow = {
3331 .data = NULL, /**< Unused. */
3332 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
3335 }, /**< Tokens are returned by cmd_flow_tok(). */