1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
13 #include <arpa/inet.h>
14 #include <sys/socket.h>
16 #include <rte_common.h>
17 #include <rte_ethdev.h>
18 #include <rte_byteorder.h>
19 #include <cmdline_parse.h>
20 #include <cmdline_parse_etheraddr.h>
25 /** Parser token indices. */
45 /* Top-level command. */
48 /* Sub-level commands. */
57 /* Destroy arguments. */
60 /* Query arguments. */
66 /* Validate/create arguments. */
72 /* Validate/create pattern. */
137 ITEM_E_TAG_GRP_ECID_B,
154 /* Validate/create actions. */
180 /** Size of pattern[] field in struct rte_flow_item_raw. */
181 #define ITEM_RAW_PATTERN_SIZE 36
183 /** Storage size for struct rte_flow_item_raw including pattern. */
184 #define ITEM_RAW_SIZE \
185 (offsetof(struct rte_flow_item_raw, pattern) + ITEM_RAW_PATTERN_SIZE)
187 /** Number of queue[] entries in struct rte_flow_action_rss. */
188 #define ACTION_RSS_NUM 32
190 /** Storage size for struct rte_flow_action_rss including queues. */
191 #define ACTION_RSS_SIZE \
192 (offsetof(struct rte_flow_action_rss, queue) + \
193 sizeof(*((struct rte_flow_action_rss *)0)->queue) * ACTION_RSS_NUM)
195 /** Maximum number of subsequent tokens and arguments on the stack. */
196 #define CTX_STACK_SIZE 16
198 /** Parser context. */
200 /** Stack of subsequent token lists to process. */
201 const enum index *next[CTX_STACK_SIZE];
202 /** Arguments for stacked tokens. */
203 const void *args[CTX_STACK_SIZE];
204 enum index curr; /**< Current token index. */
205 enum index prev; /**< Index of the last token seen. */
206 int next_num; /**< Number of entries in next[]. */
207 int args_num; /**< Number of entries in args[]. */
208 uint32_t eol:1; /**< EOL has been detected. */
209 uint32_t last:1; /**< No more arguments. */
210 portid_t port; /**< Current port ID (for completions). */
211 uint32_t objdata; /**< Object-specific data. */
212 void *object; /**< Address of current object for relative offsets. */
213 void *objmask; /**< Object a full mask must be written to. */
216 /** Token argument. */
218 uint32_t hton:1; /**< Use network byte ordering. */
219 uint32_t sign:1; /**< Value is signed. */
220 uint32_t offset; /**< Relative offset from ctx->object. */
221 uint32_t size; /**< Field size. */
222 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
225 /** Parser token definition. */
227 /** Type displayed during completion (defaults to "TOKEN"). */
229 /** Help displayed during completion (defaults to token name). */
231 /** Private data used by parser functions. */
234 * Lists of subsequent tokens to push on the stack. Each call to the
235 * parser consumes the last entry of that stack.
237 const enum index *const *next;
238 /** Arguments stack for subsequent tokens that need them. */
239 const struct arg *const *args;
241 * Token-processing callback, returns -1 in case of error, the
242 * length of the matched string otherwise. If NULL, attempts to
243 * match the token name.
245 * If buf is not NULL, the result should be stored in it according
246 * to context. An error is returned if not large enough.
248 int (*call)(struct context *ctx, const struct token *token,
249 const char *str, unsigned int len,
250 void *buf, unsigned int size);
252 * Callback that provides possible values for this token, used for
253 * completion. Returns -1 in case of error, the number of possible
254 * values otherwise. If NULL, the token name is used.
256 * If buf is not NULL, entry index ent is written to buf and the
257 * full length of the entry is returned (same behavior as
260 int (*comp)(struct context *ctx, const struct token *token,
261 unsigned int ent, char *buf, unsigned int size);
262 /** Mandatory token name, no default value. */
266 /** Static initializer for the next field. */
267 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
269 /** Static initializer for a NEXT() entry. */
270 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
272 /** Static initializer for the args field. */
273 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
275 /** Static initializer for ARGS() to target a field. */
276 #define ARGS_ENTRY(s, f) \
277 (&(const struct arg){ \
278 .offset = offsetof(s, f), \
279 .size = sizeof(((s *)0)->f), \
282 /** Static initializer for ARGS() to target a bit-field. */
283 #define ARGS_ENTRY_BF(s, f, b) \
284 (&(const struct arg){ \
286 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
289 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
290 #define ARGS_ENTRY_MASK(s, f, m) \
291 (&(const struct arg){ \
292 .offset = offsetof(s, f), \
293 .size = sizeof(((s *)0)->f), \
294 .mask = (const void *)(m), \
297 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
298 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
299 (&(const struct arg){ \
301 .offset = offsetof(s, f), \
302 .size = sizeof(((s *)0)->f), \
303 .mask = (const void *)(m), \
306 /** Static initializer for ARGS() to target a pointer. */
307 #define ARGS_ENTRY_PTR(s, f) \
308 (&(const struct arg){ \
309 .size = sizeof(*((s *)0)->f), \
312 /** Static initializer for ARGS() with arbitrary size. */
313 #define ARGS_ENTRY_USZ(s, f, sz) \
314 (&(const struct arg){ \
315 .offset = offsetof(s, f), \
319 /** Same as ARGS_ENTRY() using network byte ordering. */
320 #define ARGS_ENTRY_HTON(s, f) \
321 (&(const struct arg){ \
323 .offset = offsetof(s, f), \
324 .size = sizeof(((s *)0)->f), \
327 /** Parser output buffer layout expected by cmd_flow_parsed(). */
329 enum index command; /**< Flow command. */
330 portid_t port; /**< Affected port ID. */
333 struct rte_flow_attr attr;
334 struct rte_flow_item *pattern;
335 struct rte_flow_action *actions;
339 } vc; /**< Validate/create arguments. */
343 } destroy; /**< Destroy arguments. */
346 enum rte_flow_action_type action;
347 } query; /**< Query arguments. */
351 } list; /**< List arguments. */
354 } isolate; /**< Isolated mode arguments. */
355 } args; /**< Command arguments. */
358 /** Private data for pattern items. */
359 struct parse_item_priv {
360 enum rte_flow_item_type type; /**< Item type. */
361 uint32_t size; /**< Size of item specification structure. */
364 #define PRIV_ITEM(t, s) \
365 (&(const struct parse_item_priv){ \
366 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
370 /** Private data for actions. */
371 struct parse_action_priv {
372 enum rte_flow_action_type type; /**< Action type. */
373 uint32_t size; /**< Size of action configuration structure. */
376 #define PRIV_ACTION(t, s) \
377 (&(const struct parse_action_priv){ \
378 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
382 static const enum index next_vc_attr[] = {
391 static const enum index next_destroy_attr[] = {
397 static const enum index next_list_attr[] = {
403 static const enum index item_param[] = {
412 static const enum index next_item[] = {
442 static const enum index item_fuzzy[] = {
448 static const enum index item_any[] = {
454 static const enum index item_vf[] = {
460 static const enum index item_port[] = {
466 static const enum index item_raw[] = {
476 static const enum index item_eth[] = {
484 static const enum index item_vlan[] = {
494 static const enum index item_ipv4[] = {
504 static const enum index item_ipv6[] = {
515 static const enum index item_icmp[] = {
522 static const enum index item_udp[] = {
529 static const enum index item_tcp[] = {
537 static const enum index item_sctp[] = {
546 static const enum index item_vxlan[] = {
552 static const enum index item_e_tag[] = {
553 ITEM_E_TAG_GRP_ECID_B,
558 static const enum index item_nvgre[] = {
564 static const enum index item_mpls[] = {
570 static const enum index item_gre[] = {
576 static const enum index item_gtp[] = {
582 static const enum index item_geneve[] = {
589 static const enum index next_action[] = {
606 static const enum index action_mark[] = {
612 static const enum index action_queue[] = {
618 static const enum index action_dup[] = {
624 static const enum index action_rss[] = {
630 static const enum index action_vf[] = {
637 static const enum index action_meter[] = {
643 static int parse_init(struct context *, const struct token *,
644 const char *, unsigned int,
645 void *, unsigned int);
646 static int parse_vc(struct context *, const struct token *,
647 const char *, unsigned int,
648 void *, unsigned int);
649 static int parse_vc_spec(struct context *, const struct token *,
650 const char *, unsigned int, void *, unsigned int);
651 static int parse_vc_conf(struct context *, const struct token *,
652 const char *, unsigned int, void *, unsigned int);
653 static int parse_vc_action_rss_queue(struct context *, const struct token *,
654 const char *, unsigned int, void *,
656 static int parse_destroy(struct context *, const struct token *,
657 const char *, unsigned int,
658 void *, unsigned int);
659 static int parse_flush(struct context *, const struct token *,
660 const char *, unsigned int,
661 void *, unsigned int);
662 static int parse_query(struct context *, const struct token *,
663 const char *, unsigned int,
664 void *, unsigned int);
665 static int parse_action(struct context *, const struct token *,
666 const char *, unsigned int,
667 void *, unsigned int);
668 static int parse_list(struct context *, const struct token *,
669 const char *, unsigned int,
670 void *, unsigned int);
671 static int parse_isolate(struct context *, const struct token *,
672 const char *, unsigned int,
673 void *, unsigned int);
674 static int parse_int(struct context *, const struct token *,
675 const char *, unsigned int,
676 void *, unsigned int);
677 static int parse_prefix(struct context *, const struct token *,
678 const char *, unsigned int,
679 void *, unsigned int);
680 static int parse_boolean(struct context *, const struct token *,
681 const char *, unsigned int,
682 void *, unsigned int);
683 static int parse_string(struct context *, const struct token *,
684 const char *, unsigned int,
685 void *, unsigned int);
686 static int parse_mac_addr(struct context *, const struct token *,
687 const char *, unsigned int,
688 void *, unsigned int);
689 static int parse_ipv4_addr(struct context *, const struct token *,
690 const char *, unsigned int,
691 void *, unsigned int);
692 static int parse_ipv6_addr(struct context *, const struct token *,
693 const char *, unsigned int,
694 void *, unsigned int);
695 static int parse_port(struct context *, const struct token *,
696 const char *, unsigned int,
697 void *, unsigned int);
698 static int comp_none(struct context *, const struct token *,
699 unsigned int, char *, unsigned int);
700 static int comp_boolean(struct context *, const struct token *,
701 unsigned int, char *, unsigned int);
702 static int comp_action(struct context *, const struct token *,
703 unsigned int, char *, unsigned int);
704 static int comp_port(struct context *, const struct token *,
705 unsigned int, char *, unsigned int);
706 static int comp_rule_id(struct context *, const struct token *,
707 unsigned int, char *, unsigned int);
708 static int comp_vc_action_rss_queue(struct context *, const struct token *,
709 unsigned int, char *, unsigned int);
711 /** Token definitions. */
712 static const struct token token_list[] = {
713 /* Special tokens. */
716 .help = "null entry, abused as the entry point",
717 .next = NEXT(NEXT_ENTRY(FLOW)),
722 .help = "command may end here",
728 .help = "integer value",
733 .name = "{unsigned}",
735 .help = "unsigned integer value",
742 .help = "prefix length for bit-mask",
743 .call = parse_prefix,
749 .help = "any boolean value",
750 .call = parse_boolean,
751 .comp = comp_boolean,
756 .help = "fixed string",
757 .call = parse_string,
761 .name = "{MAC address}",
763 .help = "standard MAC address notation",
764 .call = parse_mac_addr,
768 .name = "{IPv4 address}",
769 .type = "IPV4 ADDRESS",
770 .help = "standard IPv4 address notation",
771 .call = parse_ipv4_addr,
775 .name = "{IPv6 address}",
776 .type = "IPV6 ADDRESS",
777 .help = "standard IPv6 address notation",
778 .call = parse_ipv6_addr,
784 .help = "rule identifier",
786 .comp = comp_rule_id,
791 .help = "port identifier",
796 .name = "{group_id}",
798 .help = "group identifier",
805 .help = "priority level",
809 /* Top-level command. */
812 .type = "{command} {port_id} [{arg} [...]]",
813 .help = "manage ingress/egress flow rules",
814 .next = NEXT(NEXT_ENTRY
824 /* Sub-level commands. */
827 .help = "check whether a flow rule can be created",
828 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
829 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
834 .help = "create a flow rule",
835 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
836 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
841 .help = "destroy specific flow rules",
842 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
843 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
844 .call = parse_destroy,
848 .help = "destroy all flow rules",
849 .next = NEXT(NEXT_ENTRY(PORT_ID)),
850 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
855 .help = "query an existing flow rule",
856 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
858 NEXT_ENTRY(PORT_ID)),
859 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action),
860 ARGS_ENTRY(struct buffer, args.query.rule),
861 ARGS_ENTRY(struct buffer, port)),
866 .help = "list existing flow rules",
867 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
868 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
873 .help = "restrict ingress traffic to the defined flow rules",
874 .next = NEXT(NEXT_ENTRY(BOOLEAN),
875 NEXT_ENTRY(PORT_ID)),
876 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
877 ARGS_ENTRY(struct buffer, port)),
878 .call = parse_isolate,
880 /* Destroy arguments. */
883 .help = "specify a rule identifier",
884 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
885 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
886 .call = parse_destroy,
888 /* Query arguments. */
892 .help = "action to query, must be part of the rule",
893 .call = parse_action,
896 /* List arguments. */
899 .help = "specify a group",
900 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
901 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
904 /* Validate/create attributes. */
907 .help = "specify a group",
908 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
909 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
914 .help = "specify a priority level",
915 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
916 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
921 .help = "affect rule to ingress",
922 .next = NEXT(next_vc_attr),
927 .help = "affect rule to egress",
928 .next = NEXT(next_vc_attr),
931 /* Validate/create pattern. */
934 .help = "submit a list of pattern items",
935 .next = NEXT(next_item),
940 .help = "match value perfectly (with full bit-mask)",
941 .call = parse_vc_spec,
943 [ITEM_PARAM_SPEC] = {
945 .help = "match value according to configured bit-mask",
946 .call = parse_vc_spec,
948 [ITEM_PARAM_LAST] = {
950 .help = "specify upper bound to establish a range",
951 .call = parse_vc_spec,
953 [ITEM_PARAM_MASK] = {
955 .help = "specify bit-mask with relevant bits set to one",
956 .call = parse_vc_spec,
958 [ITEM_PARAM_PREFIX] = {
960 .help = "generate bit-mask from a prefix length",
961 .call = parse_vc_spec,
965 .help = "specify next pattern item",
966 .next = NEXT(next_item),
970 .help = "end list of pattern items",
971 .priv = PRIV_ITEM(END, 0),
972 .next = NEXT(NEXT_ENTRY(ACTIONS)),
977 .help = "no-op pattern item",
978 .priv = PRIV_ITEM(VOID, 0),
979 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
984 .help = "perform actions when pattern does not match",
985 .priv = PRIV_ITEM(INVERT, 0),
986 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
991 .help = "match any protocol for the current layer",
992 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
993 .next = NEXT(item_any),
998 .help = "number of layers covered",
999 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
1000 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
1004 .help = "match packets addressed to the physical function",
1005 .priv = PRIV_ITEM(PF, 0),
1006 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1011 .help = "match packets addressed to a virtual function ID",
1012 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
1013 .next = NEXT(item_vf),
1018 .help = "destination VF ID",
1019 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
1020 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
1024 .help = "device-specific physical port index to use",
1025 .priv = PRIV_ITEM(PORT, sizeof(struct rte_flow_item_port)),
1026 .next = NEXT(item_port),
1029 [ITEM_PORT_INDEX] = {
1031 .help = "physical port index",
1032 .next = NEXT(item_port, NEXT_ENTRY(UNSIGNED), item_param),
1033 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port, index)),
1037 .help = "match an arbitrary byte string",
1038 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1039 .next = NEXT(item_raw),
1042 [ITEM_RAW_RELATIVE] = {
1044 .help = "look for pattern after the previous item",
1045 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1046 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1049 [ITEM_RAW_SEARCH] = {
1051 .help = "search pattern from offset (see also limit)",
1052 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1053 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1056 [ITEM_RAW_OFFSET] = {
1058 .help = "absolute or relative offset for pattern",
1059 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1060 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1062 [ITEM_RAW_LIMIT] = {
1064 .help = "search area limit for start of pattern",
1065 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1066 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1068 [ITEM_RAW_PATTERN] = {
1070 .help = "byte string to look for",
1071 .next = NEXT(item_raw,
1073 NEXT_ENTRY(ITEM_PARAM_IS,
1076 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, length),
1077 ARGS_ENTRY_USZ(struct rte_flow_item_raw,
1079 ITEM_RAW_PATTERN_SIZE)),
1083 .help = "match Ethernet header",
1084 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1085 .next = NEXT(item_eth),
1090 .help = "destination MAC",
1091 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1092 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1096 .help = "source MAC",
1097 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1098 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1102 .help = "EtherType",
1103 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1104 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1108 .help = "match 802.1Q/ad VLAN tag",
1109 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1110 .next = NEXT(item_vlan),
1113 [ITEM_VLAN_TPID] = {
1115 .help = "tag protocol identifier",
1116 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1117 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tpid)),
1121 .help = "tag control information",
1122 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1123 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1127 .help = "priority code point",
1128 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1129 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1134 .help = "drop eligible indicator",
1135 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1136 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1141 .help = "VLAN identifier",
1142 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1143 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1148 .help = "match IPv4 header",
1149 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1150 .next = NEXT(item_ipv4),
1155 .help = "type of service",
1156 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1157 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1158 hdr.type_of_service)),
1162 .help = "time to live",
1163 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1164 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1167 [ITEM_IPV4_PROTO] = {
1169 .help = "next protocol ID",
1170 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1171 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1172 hdr.next_proto_id)),
1176 .help = "source address",
1177 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1178 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1183 .help = "destination address",
1184 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1185 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1190 .help = "match IPv6 header",
1191 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1192 .next = NEXT(item_ipv6),
1197 .help = "traffic class",
1198 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1199 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1201 "\x0f\xf0\x00\x00")),
1203 [ITEM_IPV6_FLOW] = {
1205 .help = "flow label",
1206 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1207 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1209 "\x00\x0f\xff\xff")),
1211 [ITEM_IPV6_PROTO] = {
1213 .help = "protocol (next header)",
1214 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1215 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1220 .help = "hop limit",
1221 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1222 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1227 .help = "source address",
1228 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1229 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1234 .help = "destination address",
1235 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1236 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1241 .help = "match ICMP header",
1242 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1243 .next = NEXT(item_icmp),
1246 [ITEM_ICMP_TYPE] = {
1248 .help = "ICMP packet type",
1249 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1250 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1253 [ITEM_ICMP_CODE] = {
1255 .help = "ICMP packet code",
1256 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1257 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1262 .help = "match UDP header",
1263 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1264 .next = NEXT(item_udp),
1269 .help = "UDP source port",
1270 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1271 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1276 .help = "UDP destination port",
1277 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1278 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1283 .help = "match TCP header",
1284 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1285 .next = NEXT(item_tcp),
1290 .help = "TCP source port",
1291 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1292 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1297 .help = "TCP destination port",
1298 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1299 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1302 [ITEM_TCP_FLAGS] = {
1304 .help = "TCP flags",
1305 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1306 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1311 .help = "match SCTP header",
1312 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1313 .next = NEXT(item_sctp),
1318 .help = "SCTP source port",
1319 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1320 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1325 .help = "SCTP destination port",
1326 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1327 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1332 .help = "validation tag",
1333 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1334 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1337 [ITEM_SCTP_CKSUM] = {
1340 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1341 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1346 .help = "match VXLAN header",
1347 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1348 .next = NEXT(item_vxlan),
1351 [ITEM_VXLAN_VNI] = {
1353 .help = "VXLAN identifier",
1354 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1355 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
1359 .help = "match E-Tag header",
1360 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
1361 .next = NEXT(item_e_tag),
1364 [ITEM_E_TAG_GRP_ECID_B] = {
1365 .name = "grp_ecid_b",
1366 .help = "GRP and E-CID base",
1367 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
1368 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
1374 .help = "match NVGRE header",
1375 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
1376 .next = NEXT(item_nvgre),
1379 [ITEM_NVGRE_TNI] = {
1381 .help = "virtual subnet ID",
1382 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
1383 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
1387 .help = "match MPLS header",
1388 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
1389 .next = NEXT(item_mpls),
1392 [ITEM_MPLS_LABEL] = {
1394 .help = "MPLS label",
1395 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
1396 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
1402 .help = "match GRE header",
1403 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
1404 .next = NEXT(item_gre),
1407 [ITEM_GRE_PROTO] = {
1409 .help = "GRE protocol type",
1410 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
1411 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
1416 .help = "fuzzy pattern match, expect faster than default",
1417 .priv = PRIV_ITEM(FUZZY,
1418 sizeof(struct rte_flow_item_fuzzy)),
1419 .next = NEXT(item_fuzzy),
1422 [ITEM_FUZZY_THRESH] = {
1424 .help = "match accuracy threshold",
1425 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
1426 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
1431 .help = "match GTP header",
1432 .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
1433 .next = NEXT(item_gtp),
1438 .help = "tunnel endpoint identifier",
1439 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
1440 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
1444 .help = "match GTP header",
1445 .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
1446 .next = NEXT(item_gtp),
1451 .help = "match GTP header",
1452 .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
1453 .next = NEXT(item_gtp),
1458 .help = "match GENEVE header",
1459 .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
1460 .next = NEXT(item_geneve),
1463 [ITEM_GENEVE_VNI] = {
1465 .help = "virtual network identifier",
1466 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1467 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)),
1469 [ITEM_GENEVE_PROTO] = {
1471 .help = "GENEVE protocol type",
1472 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1473 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve,
1477 /* Validate/create actions. */
1480 .help = "submit a list of associated actions",
1481 .next = NEXT(next_action),
1486 .help = "specify next action",
1487 .next = NEXT(next_action),
1491 .help = "end list of actions",
1492 .priv = PRIV_ACTION(END, 0),
1497 .help = "no-op action",
1498 .priv = PRIV_ACTION(VOID, 0),
1499 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1502 [ACTION_PASSTHRU] = {
1504 .help = "let subsequent rule process matched packets",
1505 .priv = PRIV_ACTION(PASSTHRU, 0),
1506 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1511 .help = "attach 32 bit value to packets",
1512 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
1513 .next = NEXT(action_mark),
1516 [ACTION_MARK_ID] = {
1518 .help = "32 bit value to return with packets",
1519 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
1520 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
1521 .call = parse_vc_conf,
1525 .help = "flag packets",
1526 .priv = PRIV_ACTION(FLAG, 0),
1527 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1532 .help = "assign packets to a given queue index",
1533 .priv = PRIV_ACTION(QUEUE,
1534 sizeof(struct rte_flow_action_queue)),
1535 .next = NEXT(action_queue),
1538 [ACTION_QUEUE_INDEX] = {
1540 .help = "queue index to use",
1541 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
1542 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
1543 .call = parse_vc_conf,
1547 .help = "drop packets (note: passthru has priority)",
1548 .priv = PRIV_ACTION(DROP, 0),
1549 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1554 .help = "enable counters for this rule",
1555 .priv = PRIV_ACTION(COUNT, 0),
1556 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1561 .help = "duplicate packets to a given queue index",
1562 .priv = PRIV_ACTION(DUP, sizeof(struct rte_flow_action_dup)),
1563 .next = NEXT(action_dup),
1566 [ACTION_DUP_INDEX] = {
1568 .help = "queue index to duplicate packets to",
1569 .next = NEXT(action_dup, NEXT_ENTRY(UNSIGNED)),
1570 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_dup, index)),
1571 .call = parse_vc_conf,
1575 .help = "spread packets among several queues",
1576 .priv = PRIV_ACTION(RSS, ACTION_RSS_SIZE),
1577 .next = NEXT(action_rss),
1580 [ACTION_RSS_QUEUES] = {
1582 .help = "queue indices to use",
1583 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
1584 .call = parse_vc_conf,
1586 [ACTION_RSS_QUEUE] = {
1588 .help = "queue index",
1589 .call = parse_vc_action_rss_queue,
1590 .comp = comp_vc_action_rss_queue,
1594 .help = "redirect packets to physical device function",
1595 .priv = PRIV_ACTION(PF, 0),
1596 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1601 .help = "redirect packets to virtual device function",
1602 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
1603 .next = NEXT(action_vf),
1606 [ACTION_VF_ORIGINAL] = {
1608 .help = "use original VF ID if possible",
1609 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
1610 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
1612 .call = parse_vc_conf,
1616 .help = "VF ID to redirect packets to",
1617 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
1618 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
1619 .call = parse_vc_conf,
1623 .help = "meter the directed packets at given id",
1624 .priv = PRIV_ACTION(METER,
1625 sizeof(struct rte_flow_action_meter)),
1626 .next = NEXT(action_meter),
1629 [ACTION_METER_ID] = {
1631 .help = "meter id to use",
1632 .next = NEXT(action_meter, NEXT_ENTRY(UNSIGNED)),
1633 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
1634 .call = parse_vc_conf,
1638 /** Remove and return last entry from argument stack. */
1639 static const struct arg *
1640 pop_args(struct context *ctx)
1642 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
1645 /** Add entry on top of the argument stack. */
1647 push_args(struct context *ctx, const struct arg *arg)
1649 if (ctx->args_num == CTX_STACK_SIZE)
1651 ctx->args[ctx->args_num++] = arg;
1655 /** Spread value into buffer according to bit-mask. */
1657 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
1659 uint32_t i = arg->size;
1667 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1676 unsigned int shift = 0;
1677 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
1679 for (shift = 0; arg->mask[i] >> shift; ++shift) {
1680 if (!(arg->mask[i] & (1 << shift)))
1685 *buf &= ~(1 << shift);
1686 *buf |= (val & 1) << shift;
1694 /** Compare a string with a partial one of a given length. */
1696 strcmp_partial(const char *full, const char *partial, size_t partial_len)
1698 int r = strncmp(full, partial, partial_len);
1702 if (strlen(full) <= partial_len)
1704 return full[partial_len];
1708 * Parse a prefix length and generate a bit-mask.
1710 * Last argument (ctx->args) is retrieved to determine mask size, storage
1711 * location and whether the result must use network byte ordering.
1714 parse_prefix(struct context *ctx, const struct token *token,
1715 const char *str, unsigned int len,
1716 void *buf, unsigned int size)
1718 const struct arg *arg = pop_args(ctx);
1719 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
1726 /* Argument is expected. */
1730 u = strtoumax(str, &end, 0);
1731 if (errno || (size_t)(end - str) != len)
1736 extra = arg_entry_bf_fill(NULL, 0, arg);
1745 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
1746 !arg_entry_bf_fill(ctx->objmask, -1, arg))
1753 if (bytes > size || bytes + !!extra > size)
1757 buf = (uint8_t *)ctx->object + arg->offset;
1758 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1760 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
1761 memset(buf, 0x00, size - bytes);
1763 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
1767 memset(buf, 0xff, bytes);
1768 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
1770 ((uint8_t *)buf)[bytes] = conv[extra];
1773 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
1776 push_args(ctx, arg);
1780 /** Default parsing function for token name matching. */
1782 parse_default(struct context *ctx, const struct token *token,
1783 const char *str, unsigned int len,
1784 void *buf, unsigned int size)
1789 if (strcmp_partial(token->name, str, len))
1794 /** Parse flow command, initialize output buffer for subsequent tokens. */
1796 parse_init(struct context *ctx, const struct token *token,
1797 const char *str, unsigned int len,
1798 void *buf, unsigned int size)
1800 struct buffer *out = buf;
1802 /* Token name must match. */
1803 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1805 /* Nothing else to do if there is no buffer. */
1808 /* Make sure buffer is large enough. */
1809 if (size < sizeof(*out))
1811 /* Initialize buffer. */
1812 memset(out, 0x00, sizeof(*out));
1813 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
1816 ctx->objmask = NULL;
1820 /** Parse tokens for validate/create commands. */
1822 parse_vc(struct context *ctx, const struct token *token,
1823 const char *str, unsigned int len,
1824 void *buf, unsigned int size)
1826 struct buffer *out = buf;
1830 /* Token name must match. */
1831 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1833 /* Nothing else to do if there is no buffer. */
1836 if (!out->command) {
1837 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
1839 if (sizeof(*out) > size)
1841 out->command = ctx->curr;
1844 ctx->objmask = NULL;
1845 out->args.vc.data = (uint8_t *)out + size;
1849 ctx->object = &out->args.vc.attr;
1850 ctx->objmask = NULL;
1851 switch (ctx->curr) {
1856 out->args.vc.attr.ingress = 1;
1859 out->args.vc.attr.egress = 1;
1862 out->args.vc.pattern =
1863 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1865 ctx->object = out->args.vc.pattern;
1866 ctx->objmask = NULL;
1869 out->args.vc.actions =
1870 (void *)RTE_ALIGN_CEIL((uintptr_t)
1871 (out->args.vc.pattern +
1872 out->args.vc.pattern_n),
1874 ctx->object = out->args.vc.actions;
1875 ctx->objmask = NULL;
1882 if (!out->args.vc.actions) {
1883 const struct parse_item_priv *priv = token->priv;
1884 struct rte_flow_item *item =
1885 out->args.vc.pattern + out->args.vc.pattern_n;
1887 data_size = priv->size * 3; /* spec, last, mask */
1888 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1889 (out->args.vc.data - data_size),
1891 if ((uint8_t *)item + sizeof(*item) > data)
1893 *item = (struct rte_flow_item){
1896 ++out->args.vc.pattern_n;
1898 ctx->objmask = NULL;
1900 const struct parse_action_priv *priv = token->priv;
1901 struct rte_flow_action *action =
1902 out->args.vc.actions + out->args.vc.actions_n;
1904 data_size = priv->size; /* configuration */
1905 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1906 (out->args.vc.data - data_size),
1908 if ((uint8_t *)action + sizeof(*action) > data)
1910 *action = (struct rte_flow_action){
1913 ++out->args.vc.actions_n;
1914 ctx->object = action;
1915 ctx->objmask = NULL;
1917 memset(data, 0, data_size);
1918 out->args.vc.data = data;
1919 ctx->objdata = data_size;
1923 /** Parse pattern item parameter type. */
1925 parse_vc_spec(struct context *ctx, const struct token *token,
1926 const char *str, unsigned int len,
1927 void *buf, unsigned int size)
1929 struct buffer *out = buf;
1930 struct rte_flow_item *item;
1936 /* Token name must match. */
1937 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1939 /* Parse parameter types. */
1940 switch (ctx->curr) {
1941 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
1947 case ITEM_PARAM_SPEC:
1950 case ITEM_PARAM_LAST:
1953 case ITEM_PARAM_PREFIX:
1954 /* Modify next token to expect a prefix. */
1955 if (ctx->next_num < 2)
1957 ctx->next[ctx->next_num - 2] = prefix;
1959 case ITEM_PARAM_MASK:
1965 /* Nothing else to do if there is no buffer. */
1968 if (!out->args.vc.pattern_n)
1970 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
1971 data_size = ctx->objdata / 3; /* spec, last, mask */
1972 /* Point to selected object. */
1973 ctx->object = out->args.vc.data + (data_size * index);
1975 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
1976 item->mask = ctx->objmask;
1978 ctx->objmask = NULL;
1979 /* Update relevant item pointer. */
1980 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
1985 /** Parse action configuration field. */
1987 parse_vc_conf(struct context *ctx, const struct token *token,
1988 const char *str, unsigned int len,
1989 void *buf, unsigned int size)
1991 struct buffer *out = buf;
1992 struct rte_flow_action *action;
1995 /* Token name must match. */
1996 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1998 /* Nothing else to do if there is no buffer. */
2001 if (!out->args.vc.actions_n)
2003 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
2004 /* Point to selected object. */
2005 ctx->object = out->args.vc.data;
2006 ctx->objmask = NULL;
2007 /* Update configuration pointer. */
2008 action->conf = ctx->object;
2013 * Parse queue field for RSS action.
2015 * Valid tokens are queue indices and the "end" token.
2018 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
2019 const char *str, unsigned int len,
2020 void *buf, unsigned int size)
2022 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
2029 if (ctx->curr != ACTION_RSS_QUEUE)
2031 i = ctx->objdata >> 16;
2032 if (!strcmp_partial("end", str, len)) {
2033 ctx->objdata &= 0xffff;
2036 if (i >= ACTION_RSS_NUM)
2038 if (push_args(ctx, ARGS_ENTRY(struct rte_flow_action_rss, queue[i])))
2040 ret = parse_int(ctx, token, str, len, NULL, 0);
2046 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
2048 if (ctx->next_num == RTE_DIM(ctx->next))
2050 ctx->next[ctx->next_num++] = next;
2053 ((struct rte_flow_action_rss *)ctx->object)->num = i;
2057 /** Parse tokens for destroy command. */
2059 parse_destroy(struct context *ctx, const struct token *token,
2060 const char *str, unsigned int len,
2061 void *buf, unsigned int size)
2063 struct buffer *out = buf;
2065 /* Token name must match. */
2066 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2068 /* Nothing else to do if there is no buffer. */
2071 if (!out->command) {
2072 if (ctx->curr != DESTROY)
2074 if (sizeof(*out) > size)
2076 out->command = ctx->curr;
2079 ctx->objmask = NULL;
2080 out->args.destroy.rule =
2081 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2085 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
2086 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
2089 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
2090 ctx->objmask = NULL;
2094 /** Parse tokens for flush command. */
2096 parse_flush(struct context *ctx, const struct token *token,
2097 const char *str, unsigned int len,
2098 void *buf, unsigned int size)
2100 struct buffer *out = buf;
2102 /* Token name must match. */
2103 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2105 /* Nothing else to do if there is no buffer. */
2108 if (!out->command) {
2109 if (ctx->curr != FLUSH)
2111 if (sizeof(*out) > size)
2113 out->command = ctx->curr;
2116 ctx->objmask = NULL;
2121 /** Parse tokens for query command. */
2123 parse_query(struct context *ctx, const struct token *token,
2124 const char *str, unsigned int len,
2125 void *buf, unsigned int size)
2127 struct buffer *out = buf;
2129 /* Token name must match. */
2130 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2132 /* Nothing else to do if there is no buffer. */
2135 if (!out->command) {
2136 if (ctx->curr != QUERY)
2138 if (sizeof(*out) > size)
2140 out->command = ctx->curr;
2143 ctx->objmask = NULL;
2148 /** Parse action names. */
2150 parse_action(struct context *ctx, const struct token *token,
2151 const char *str, unsigned int len,
2152 void *buf, unsigned int size)
2154 struct buffer *out = buf;
2155 const struct arg *arg = pop_args(ctx);
2159 /* Argument is expected. */
2162 /* Parse action name. */
2163 for (i = 0; next_action[i]; ++i) {
2164 const struct parse_action_priv *priv;
2166 token = &token_list[next_action[i]];
2167 if (strcmp_partial(token->name, str, len))
2173 memcpy((uint8_t *)ctx->object + arg->offset,
2179 push_args(ctx, arg);
2183 /** Parse tokens for list command. */
2185 parse_list(struct context *ctx, const struct token *token,
2186 const char *str, unsigned int len,
2187 void *buf, unsigned int size)
2189 struct buffer *out = buf;
2191 /* Token name must match. */
2192 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2194 /* Nothing else to do if there is no buffer. */
2197 if (!out->command) {
2198 if (ctx->curr != LIST)
2200 if (sizeof(*out) > size)
2202 out->command = ctx->curr;
2205 ctx->objmask = NULL;
2206 out->args.list.group =
2207 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2211 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
2212 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
2215 ctx->object = out->args.list.group + out->args.list.group_n++;
2216 ctx->objmask = NULL;
2220 /** Parse tokens for isolate command. */
2222 parse_isolate(struct context *ctx, const struct token *token,
2223 const char *str, unsigned int len,
2224 void *buf, unsigned int size)
2226 struct buffer *out = buf;
2228 /* Token name must match. */
2229 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2231 /* Nothing else to do if there is no buffer. */
2234 if (!out->command) {
2235 if (ctx->curr != ISOLATE)
2237 if (sizeof(*out) > size)
2239 out->command = ctx->curr;
2242 ctx->objmask = NULL;
2248 * Parse signed/unsigned integers 8 to 64-bit long.
2250 * Last argument (ctx->args) is retrieved to determine integer type and
2254 parse_int(struct context *ctx, const struct token *token,
2255 const char *str, unsigned int len,
2256 void *buf, unsigned int size)
2258 const struct arg *arg = pop_args(ctx);
2263 /* Argument is expected. */
2268 (uintmax_t)strtoimax(str, &end, 0) :
2269 strtoumax(str, &end, 0);
2270 if (errno || (size_t)(end - str) != len)
2275 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
2276 !arg_entry_bf_fill(ctx->objmask, -1, arg))
2280 buf = (uint8_t *)ctx->object + arg->offset;
2284 case sizeof(uint8_t):
2285 *(uint8_t *)buf = u;
2287 case sizeof(uint16_t):
2288 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
2290 case sizeof(uint8_t [3]):
2291 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2293 ((uint8_t *)buf)[0] = u;
2294 ((uint8_t *)buf)[1] = u >> 8;
2295 ((uint8_t *)buf)[2] = u >> 16;
2299 ((uint8_t *)buf)[0] = u >> 16;
2300 ((uint8_t *)buf)[1] = u >> 8;
2301 ((uint8_t *)buf)[2] = u;
2303 case sizeof(uint32_t):
2304 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
2306 case sizeof(uint64_t):
2307 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
2312 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
2314 buf = (uint8_t *)ctx->objmask + arg->offset;
2319 push_args(ctx, arg);
2326 * Two arguments (ctx->args) are retrieved from the stack to store data and
2327 * its length (in that order).
2330 parse_string(struct context *ctx, const struct token *token,
2331 const char *str, unsigned int len,
2332 void *buf, unsigned int size)
2334 const struct arg *arg_data = pop_args(ctx);
2335 const struct arg *arg_len = pop_args(ctx);
2336 char tmp[16]; /* Ought to be enough. */
2339 /* Arguments are expected. */
2343 push_args(ctx, arg_data);
2346 size = arg_data->size;
2347 /* Bit-mask fill is not supported. */
2348 if (arg_data->mask || size < len)
2352 /* Let parse_int() fill length information first. */
2353 ret = snprintf(tmp, sizeof(tmp), "%u", len);
2356 push_args(ctx, arg_len);
2357 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
2362 buf = (uint8_t *)ctx->object + arg_data->offset;
2363 /* Output buffer is not necessarily NUL-terminated. */
2364 memcpy(buf, str, len);
2365 memset((uint8_t *)buf + len, 0x55, size - len);
2367 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
2370 push_args(ctx, arg_len);
2371 push_args(ctx, arg_data);
2376 * Parse a MAC address.
2378 * Last argument (ctx->args) is retrieved to determine storage size and
2382 parse_mac_addr(struct context *ctx, const struct token *token,
2383 const char *str, unsigned int len,
2384 void *buf, unsigned int size)
2386 const struct arg *arg = pop_args(ctx);
2387 struct ether_addr tmp;
2391 /* Argument is expected. */
2395 /* Bit-mask fill is not supported. */
2396 if (arg->mask || size != sizeof(tmp))
2398 /* Only network endian is supported. */
2401 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
2402 if (ret < 0 || (unsigned int)ret != len)
2406 buf = (uint8_t *)ctx->object + arg->offset;
2407 memcpy(buf, &tmp, size);
2409 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2412 push_args(ctx, arg);
2417 * Parse an IPv4 address.
2419 * Last argument (ctx->args) is retrieved to determine storage size and
2423 parse_ipv4_addr(struct context *ctx, const struct token *token,
2424 const char *str, unsigned int len,
2425 void *buf, unsigned int size)
2427 const struct arg *arg = pop_args(ctx);
2432 /* Argument is expected. */
2436 /* Bit-mask fill is not supported. */
2437 if (arg->mask || size != sizeof(tmp))
2439 /* Only network endian is supported. */
2442 memcpy(str2, str, len);
2444 ret = inet_pton(AF_INET, str2, &tmp);
2446 /* Attempt integer parsing. */
2447 push_args(ctx, arg);
2448 return parse_int(ctx, token, str, len, buf, size);
2452 buf = (uint8_t *)ctx->object + arg->offset;
2453 memcpy(buf, &tmp, size);
2455 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2458 push_args(ctx, arg);
2463 * Parse an IPv6 address.
2465 * Last argument (ctx->args) is retrieved to determine storage size and
2469 parse_ipv6_addr(struct context *ctx, const struct token *token,
2470 const char *str, unsigned int len,
2471 void *buf, unsigned int size)
2473 const struct arg *arg = pop_args(ctx);
2475 struct in6_addr tmp;
2479 /* Argument is expected. */
2483 /* Bit-mask fill is not supported. */
2484 if (arg->mask || size != sizeof(tmp))
2486 /* Only network endian is supported. */
2489 memcpy(str2, str, len);
2491 ret = inet_pton(AF_INET6, str2, &tmp);
2496 buf = (uint8_t *)ctx->object + arg->offset;
2497 memcpy(buf, &tmp, size);
2499 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2502 push_args(ctx, arg);
2506 /** Boolean values (even indices stand for false). */
2507 static const char *const boolean_name[] = {
2516 * Parse a boolean value.
2518 * Last argument (ctx->args) is retrieved to determine storage size and
2522 parse_boolean(struct context *ctx, const struct token *token,
2523 const char *str, unsigned int len,
2524 void *buf, unsigned int size)
2526 const struct arg *arg = pop_args(ctx);
2530 /* Argument is expected. */
2533 for (i = 0; boolean_name[i]; ++i)
2534 if (!strcmp_partial(boolean_name[i], str, len))
2536 /* Process token as integer. */
2537 if (boolean_name[i])
2538 str = i & 1 ? "1" : "0";
2539 push_args(ctx, arg);
2540 ret = parse_int(ctx, token, str, strlen(str), buf, size);
2541 return ret > 0 ? (int)len : ret;
2544 /** Parse port and update context. */
2546 parse_port(struct context *ctx, const struct token *token,
2547 const char *str, unsigned int len,
2548 void *buf, unsigned int size)
2550 struct buffer *out = &(struct buffer){ .port = 0 };
2558 ctx->objmask = NULL;
2559 size = sizeof(*out);
2561 ret = parse_int(ctx, token, str, len, out, size);
2563 ctx->port = out->port;
2569 /** No completion. */
2571 comp_none(struct context *ctx, const struct token *token,
2572 unsigned int ent, char *buf, unsigned int size)
2582 /** Complete boolean values. */
2584 comp_boolean(struct context *ctx, const struct token *token,
2585 unsigned int ent, char *buf, unsigned int size)
2591 for (i = 0; boolean_name[i]; ++i)
2592 if (buf && i == ent)
2593 return snprintf(buf, size, "%s", boolean_name[i]);
2599 /** Complete action names. */
2601 comp_action(struct context *ctx, const struct token *token,
2602 unsigned int ent, char *buf, unsigned int size)
2608 for (i = 0; next_action[i]; ++i)
2609 if (buf && i == ent)
2610 return snprintf(buf, size, "%s",
2611 token_list[next_action[i]].name);
2617 /** Complete available ports. */
2619 comp_port(struct context *ctx, const struct token *token,
2620 unsigned int ent, char *buf, unsigned int size)
2627 RTE_ETH_FOREACH_DEV(p) {
2628 if (buf && i == ent)
2629 return snprintf(buf, size, "%u", p);
2637 /** Complete available rule IDs. */
2639 comp_rule_id(struct context *ctx, const struct token *token,
2640 unsigned int ent, char *buf, unsigned int size)
2643 struct rte_port *port;
2644 struct port_flow *pf;
2647 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
2648 ctx->port == (portid_t)RTE_PORT_ALL)
2650 port = &ports[ctx->port];
2651 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
2652 if (buf && i == ent)
2653 return snprintf(buf, size, "%u", pf->id);
2661 /** Complete queue field for RSS action. */
2663 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
2664 unsigned int ent, char *buf, unsigned int size)
2666 static const char *const str[] = { "", "end", NULL };
2671 for (i = 0; str[i] != NULL; ++i)
2672 if (buf && i == ent)
2673 return snprintf(buf, size, "%s", str[i]);
2679 /** Internal context. */
2680 static struct context cmd_flow_context;
2682 /** Global parser instance (cmdline API). */
2683 cmdline_parse_inst_t cmd_flow;
2685 /** Initialize context. */
2687 cmd_flow_context_init(struct context *ctx)
2689 /* A full memset() is not necessary. */
2699 ctx->objmask = NULL;
2702 /** Parse a token (cmdline API). */
2704 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
2707 struct context *ctx = &cmd_flow_context;
2708 const struct token *token;
2709 const enum index *list;
2714 token = &token_list[ctx->curr];
2715 /* Check argument length. */
2718 for (len = 0; src[len]; ++len)
2719 if (src[len] == '#' || isspace(src[len]))
2723 /* Last argument and EOL detection. */
2724 for (i = len; src[i]; ++i)
2725 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
2727 else if (!isspace(src[i])) {
2732 if (src[i] == '\r' || src[i] == '\n') {
2736 /* Initialize context if necessary. */
2737 if (!ctx->next_num) {
2740 ctx->next[ctx->next_num++] = token->next[0];
2742 /* Process argument through candidates. */
2743 ctx->prev = ctx->curr;
2744 list = ctx->next[ctx->next_num - 1];
2745 for (i = 0; list[i]; ++i) {
2746 const struct token *next = &token_list[list[i]];
2749 ctx->curr = list[i];
2751 tmp = next->call(ctx, next, src, len, result, size);
2753 tmp = parse_default(ctx, next, src, len, result, size);
2754 if (tmp == -1 || tmp != len)
2762 /* Push subsequent tokens if any. */
2764 for (i = 0; token->next[i]; ++i) {
2765 if (ctx->next_num == RTE_DIM(ctx->next))
2767 ctx->next[ctx->next_num++] = token->next[i];
2769 /* Push arguments if any. */
2771 for (i = 0; token->args[i]; ++i) {
2772 if (ctx->args_num == RTE_DIM(ctx->args))
2774 ctx->args[ctx->args_num++] = token->args[i];
2779 /** Return number of completion entries (cmdline API). */
2781 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
2783 struct context *ctx = &cmd_flow_context;
2784 const struct token *token = &token_list[ctx->curr];
2785 const enum index *list;
2789 /* Count number of tokens in current list. */
2791 list = ctx->next[ctx->next_num - 1];
2793 list = token->next[0];
2794 for (i = 0; list[i]; ++i)
2799 * If there is a single token, use its completion callback, otherwise
2800 * return the number of entries.
2802 token = &token_list[list[0]];
2803 if (i == 1 && token->comp) {
2804 /* Save index for cmd_flow_get_help(). */
2805 ctx->prev = list[0];
2806 return token->comp(ctx, token, 0, NULL, 0);
2811 /** Return a completion entry (cmdline API). */
2813 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
2814 char *dst, unsigned int size)
2816 struct context *ctx = &cmd_flow_context;
2817 const struct token *token = &token_list[ctx->curr];
2818 const enum index *list;
2822 /* Count number of tokens in current list. */
2824 list = ctx->next[ctx->next_num - 1];
2826 list = token->next[0];
2827 for (i = 0; list[i]; ++i)
2831 /* If there is a single token, use its completion callback. */
2832 token = &token_list[list[0]];
2833 if (i == 1 && token->comp) {
2834 /* Save index for cmd_flow_get_help(). */
2835 ctx->prev = list[0];
2836 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
2838 /* Otherwise make sure the index is valid and use defaults. */
2841 token = &token_list[list[index]];
2842 snprintf(dst, size, "%s", token->name);
2843 /* Save index for cmd_flow_get_help(). */
2844 ctx->prev = list[index];
2848 /** Populate help strings for current token (cmdline API). */
2850 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
2852 struct context *ctx = &cmd_flow_context;
2853 const struct token *token = &token_list[ctx->prev];
2858 /* Set token type and update global help with details. */
2859 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
2861 cmd_flow.help_str = token->help;
2863 cmd_flow.help_str = token->name;
2867 /** Token definition template (cmdline API). */
2868 static struct cmdline_token_hdr cmd_flow_token_hdr = {
2869 .ops = &(struct cmdline_token_ops){
2870 .parse = cmd_flow_parse,
2871 .complete_get_nb = cmd_flow_complete_get_nb,
2872 .complete_get_elt = cmd_flow_complete_get_elt,
2873 .get_help = cmd_flow_get_help,
2878 /** Populate the next dynamic token. */
2880 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
2881 cmdline_parse_token_hdr_t **hdr_inst)
2883 struct context *ctx = &cmd_flow_context;
2885 /* Always reinitialize context before requesting the first token. */
2886 if (!(hdr_inst - cmd_flow.tokens))
2887 cmd_flow_context_init(ctx);
2888 /* Return NULL when no more tokens are expected. */
2889 if (!ctx->next_num && ctx->curr) {
2893 /* Determine if command should end here. */
2894 if (ctx->eol && ctx->last && ctx->next_num) {
2895 const enum index *list = ctx->next[ctx->next_num - 1];
2898 for (i = 0; list[i]; ++i) {
2905 *hdr = &cmd_flow_token_hdr;
2908 /** Dispatch parsed buffer to function calls. */
2910 cmd_flow_parsed(const struct buffer *in)
2912 switch (in->command) {
2914 port_flow_validate(in->port, &in->args.vc.attr,
2915 in->args.vc.pattern, in->args.vc.actions);
2918 port_flow_create(in->port, &in->args.vc.attr,
2919 in->args.vc.pattern, in->args.vc.actions);
2922 port_flow_destroy(in->port, in->args.destroy.rule_n,
2923 in->args.destroy.rule);
2926 port_flow_flush(in->port);
2929 port_flow_query(in->port, in->args.query.rule,
2930 in->args.query.action);
2933 port_flow_list(in->port, in->args.list.group_n,
2934 in->args.list.group);
2937 port_flow_isolate(in->port, in->args.isolate.set);
2944 /** Token generator and output processing callback (cmdline API). */
2946 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
2949 cmd_flow_tok(arg0, arg2);
2951 cmd_flow_parsed(arg0);
2954 /** Global parser instance (cmdline API). */
2955 cmdline_parse_inst_t cmd_flow = {
2957 .data = NULL, /**< Unused. */
2958 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
2961 }, /**< Tokens are returned by cmd_flow_tok(). */