1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
13 #include <arpa/inet.h>
14 #include <sys/socket.h>
16 #include <rte_common.h>
17 #include <rte_ethdev.h>
18 #include <rte_byteorder.h>
19 #include <cmdline_parse.h>
20 #include <cmdline_parse_etheraddr.h>
25 /** Parser token indices. */
45 /* Top-level command. */
48 /* Sub-level commands. */
57 /* Destroy arguments. */
60 /* Query arguments. */
66 /* Validate/create arguments. */
72 /* Validate/create pattern. */
137 ITEM_E_TAG_GRP_ECID_B,
154 /* Validate/create actions. */
180 /** Size of pattern[] field in struct rte_flow_item_raw. */
181 #define ITEM_RAW_PATTERN_SIZE 36
183 /** Storage size for struct rte_flow_item_raw including pattern. */
184 #define ITEM_RAW_SIZE \
185 (offsetof(struct rte_flow_item_raw, pattern) + ITEM_RAW_PATTERN_SIZE)
187 /** Maximum number of queue indices in struct rte_flow_action_rss. */
188 #define ACTION_RSS_QUEUE_NUM 32
190 /** Storage for struct rte_flow_action_rss including external data. */
191 union action_rss_data {
192 struct rte_flow_action_rss conf;
194 uint8_t conf_data[offsetof(struct rte_flow_action_rss, queue)];
195 uint16_t queue[ACTION_RSS_QUEUE_NUM];
196 struct rte_eth_rss_conf rss_conf;
197 uint8_t rss_key[RSS_HASH_KEY_LENGTH];
201 /** Maximum number of subsequent tokens and arguments on the stack. */
202 #define CTX_STACK_SIZE 16
204 /** Parser context. */
206 /** Stack of subsequent token lists to process. */
207 const enum index *next[CTX_STACK_SIZE];
208 /** Arguments for stacked tokens. */
209 const void *args[CTX_STACK_SIZE];
210 enum index curr; /**< Current token index. */
211 enum index prev; /**< Index of the last token seen. */
212 int next_num; /**< Number of entries in next[]. */
213 int args_num; /**< Number of entries in args[]. */
214 uint32_t eol:1; /**< EOL has been detected. */
215 uint32_t last:1; /**< No more arguments. */
216 portid_t port; /**< Current port ID (for completions). */
217 uint32_t objdata; /**< Object-specific data. */
218 void *object; /**< Address of current object for relative offsets. */
219 void *objmask; /**< Object a full mask must be written to. */
222 /** Token argument. */
224 uint32_t hton:1; /**< Use network byte ordering. */
225 uint32_t sign:1; /**< Value is signed. */
226 uint32_t offset; /**< Relative offset from ctx->object. */
227 uint32_t size; /**< Field size. */
228 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
231 /** Parser token definition. */
233 /** Type displayed during completion (defaults to "TOKEN"). */
235 /** Help displayed during completion (defaults to token name). */
237 /** Private data used by parser functions. */
240 * Lists of subsequent tokens to push on the stack. Each call to the
241 * parser consumes the last entry of that stack.
243 const enum index *const *next;
244 /** Arguments stack for subsequent tokens that need them. */
245 const struct arg *const *args;
247 * Token-processing callback, returns -1 in case of error, the
248 * length of the matched string otherwise. If NULL, attempts to
249 * match the token name.
251 * If buf is not NULL, the result should be stored in it according
252 * to context. An error is returned if not large enough.
254 int (*call)(struct context *ctx, const struct token *token,
255 const char *str, unsigned int len,
256 void *buf, unsigned int size);
258 * Callback that provides possible values for this token, used for
259 * completion. Returns -1 in case of error, the number of possible
260 * values otherwise. If NULL, the token name is used.
262 * If buf is not NULL, entry index ent is written to buf and the
263 * full length of the entry is returned (same behavior as
266 int (*comp)(struct context *ctx, const struct token *token,
267 unsigned int ent, char *buf, unsigned int size);
268 /** Mandatory token name, no default value. */
272 /** Static initializer for the next field. */
273 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
275 /** Static initializer for a NEXT() entry. */
276 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
278 /** Static initializer for the args field. */
279 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
281 /** Static initializer for ARGS() to target a field. */
282 #define ARGS_ENTRY(s, f) \
283 (&(const struct arg){ \
284 .offset = offsetof(s, f), \
285 .size = sizeof(((s *)0)->f), \
288 /** Static initializer for ARGS() to target a bit-field. */
289 #define ARGS_ENTRY_BF(s, f, b) \
290 (&(const struct arg){ \
292 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
295 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
296 #define ARGS_ENTRY_MASK(s, f, m) \
297 (&(const struct arg){ \
298 .offset = offsetof(s, f), \
299 .size = sizeof(((s *)0)->f), \
300 .mask = (const void *)(m), \
303 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
304 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
305 (&(const struct arg){ \
307 .offset = offsetof(s, f), \
308 .size = sizeof(((s *)0)->f), \
309 .mask = (const void *)(m), \
312 /** Static initializer for ARGS() to target a pointer. */
313 #define ARGS_ENTRY_PTR(s, f) \
314 (&(const struct arg){ \
315 .size = sizeof(*((s *)0)->f), \
318 /** Static initializer for ARGS() with arbitrary size. */
319 #define ARGS_ENTRY_USZ(s, f, sz) \
320 (&(const struct arg){ \
321 .offset = offsetof(s, f), \
325 /** Static initializer for ARGS() with arbitrary offset and size. */
326 #define ARGS_ENTRY_ARB(o, s) \
327 (&(const struct arg){ \
332 /** Same as ARGS_ENTRY() using network byte ordering. */
333 #define ARGS_ENTRY_HTON(s, f) \
334 (&(const struct arg){ \
336 .offset = offsetof(s, f), \
337 .size = sizeof(((s *)0)->f), \
340 /** Parser output buffer layout expected by cmd_flow_parsed(). */
342 enum index command; /**< Flow command. */
343 portid_t port; /**< Affected port ID. */
346 struct rte_flow_attr attr;
347 struct rte_flow_item *pattern;
348 struct rte_flow_action *actions;
352 } vc; /**< Validate/create arguments. */
356 } destroy; /**< Destroy arguments. */
359 enum rte_flow_action_type action;
360 } query; /**< Query arguments. */
364 } list; /**< List arguments. */
367 } isolate; /**< Isolated mode arguments. */
368 } args; /**< Command arguments. */
371 /** Private data for pattern items. */
372 struct parse_item_priv {
373 enum rte_flow_item_type type; /**< Item type. */
374 uint32_t size; /**< Size of item specification structure. */
377 #define PRIV_ITEM(t, s) \
378 (&(const struct parse_item_priv){ \
379 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
383 /** Private data for actions. */
384 struct parse_action_priv {
385 enum rte_flow_action_type type; /**< Action type. */
386 uint32_t size; /**< Size of action configuration structure. */
389 #define PRIV_ACTION(t, s) \
390 (&(const struct parse_action_priv){ \
391 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
395 static const enum index next_vc_attr[] = {
404 static const enum index next_destroy_attr[] = {
410 static const enum index next_list_attr[] = {
416 static const enum index item_param[] = {
425 static const enum index next_item[] = {
455 static const enum index item_fuzzy[] = {
461 static const enum index item_any[] = {
467 static const enum index item_vf[] = {
473 static const enum index item_port[] = {
479 static const enum index item_raw[] = {
489 static const enum index item_eth[] = {
497 static const enum index item_vlan[] = {
507 static const enum index item_ipv4[] = {
517 static const enum index item_ipv6[] = {
528 static const enum index item_icmp[] = {
535 static const enum index item_udp[] = {
542 static const enum index item_tcp[] = {
550 static const enum index item_sctp[] = {
559 static const enum index item_vxlan[] = {
565 static const enum index item_e_tag[] = {
566 ITEM_E_TAG_GRP_ECID_B,
571 static const enum index item_nvgre[] = {
577 static const enum index item_mpls[] = {
583 static const enum index item_gre[] = {
589 static const enum index item_gtp[] = {
595 static const enum index item_geneve[] = {
602 static const enum index next_action[] = {
619 static const enum index action_mark[] = {
625 static const enum index action_queue[] = {
631 static const enum index action_dup[] = {
637 static const enum index action_rss[] = {
643 static const enum index action_vf[] = {
650 static const enum index action_meter[] = {
656 static int parse_init(struct context *, const struct token *,
657 const char *, unsigned int,
658 void *, unsigned int);
659 static int parse_vc(struct context *, const struct token *,
660 const char *, unsigned int,
661 void *, unsigned int);
662 static int parse_vc_spec(struct context *, const struct token *,
663 const char *, unsigned int, void *, unsigned int);
664 static int parse_vc_conf(struct context *, const struct token *,
665 const char *, unsigned int, void *, unsigned int);
666 static int parse_vc_action_rss(struct context *, const struct token *,
667 const char *, unsigned int, void *,
669 static int parse_vc_action_rss_queue(struct context *, const struct token *,
670 const char *, unsigned int, void *,
672 static int parse_destroy(struct context *, const struct token *,
673 const char *, unsigned int,
674 void *, unsigned int);
675 static int parse_flush(struct context *, const struct token *,
676 const char *, unsigned int,
677 void *, unsigned int);
678 static int parse_query(struct context *, const struct token *,
679 const char *, unsigned int,
680 void *, unsigned int);
681 static int parse_action(struct context *, const struct token *,
682 const char *, unsigned int,
683 void *, unsigned int);
684 static int parse_list(struct context *, const struct token *,
685 const char *, unsigned int,
686 void *, unsigned int);
687 static int parse_isolate(struct context *, const struct token *,
688 const char *, unsigned int,
689 void *, unsigned int);
690 static int parse_int(struct context *, const struct token *,
691 const char *, unsigned int,
692 void *, unsigned int);
693 static int parse_prefix(struct context *, const struct token *,
694 const char *, unsigned int,
695 void *, unsigned int);
696 static int parse_boolean(struct context *, const struct token *,
697 const char *, unsigned int,
698 void *, unsigned int);
699 static int parse_string(struct context *, const struct token *,
700 const char *, unsigned int,
701 void *, unsigned int);
702 static int parse_mac_addr(struct context *, const struct token *,
703 const char *, unsigned int,
704 void *, unsigned int);
705 static int parse_ipv4_addr(struct context *, const struct token *,
706 const char *, unsigned int,
707 void *, unsigned int);
708 static int parse_ipv6_addr(struct context *, const struct token *,
709 const char *, unsigned int,
710 void *, unsigned int);
711 static int parse_port(struct context *, const struct token *,
712 const char *, unsigned int,
713 void *, unsigned int);
714 static int comp_none(struct context *, const struct token *,
715 unsigned int, char *, unsigned int);
716 static int comp_boolean(struct context *, const struct token *,
717 unsigned int, char *, unsigned int);
718 static int comp_action(struct context *, const struct token *,
719 unsigned int, char *, unsigned int);
720 static int comp_port(struct context *, const struct token *,
721 unsigned int, char *, unsigned int);
722 static int comp_rule_id(struct context *, const struct token *,
723 unsigned int, char *, unsigned int);
724 static int comp_vc_action_rss_queue(struct context *, const struct token *,
725 unsigned int, char *, unsigned int);
727 /** Token definitions. */
728 static const struct token token_list[] = {
729 /* Special tokens. */
732 .help = "null entry, abused as the entry point",
733 .next = NEXT(NEXT_ENTRY(FLOW)),
738 .help = "command may end here",
744 .help = "integer value",
749 .name = "{unsigned}",
751 .help = "unsigned integer value",
758 .help = "prefix length for bit-mask",
759 .call = parse_prefix,
765 .help = "any boolean value",
766 .call = parse_boolean,
767 .comp = comp_boolean,
772 .help = "fixed string",
773 .call = parse_string,
777 .name = "{MAC address}",
779 .help = "standard MAC address notation",
780 .call = parse_mac_addr,
784 .name = "{IPv4 address}",
785 .type = "IPV4 ADDRESS",
786 .help = "standard IPv4 address notation",
787 .call = parse_ipv4_addr,
791 .name = "{IPv6 address}",
792 .type = "IPV6 ADDRESS",
793 .help = "standard IPv6 address notation",
794 .call = parse_ipv6_addr,
800 .help = "rule identifier",
802 .comp = comp_rule_id,
807 .help = "port identifier",
812 .name = "{group_id}",
814 .help = "group identifier",
821 .help = "priority level",
825 /* Top-level command. */
828 .type = "{command} {port_id} [{arg} [...]]",
829 .help = "manage ingress/egress flow rules",
830 .next = NEXT(NEXT_ENTRY
840 /* Sub-level commands. */
843 .help = "check whether a flow rule can be created",
844 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
845 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
850 .help = "create a flow rule",
851 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
852 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
857 .help = "destroy specific flow rules",
858 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
859 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
860 .call = parse_destroy,
864 .help = "destroy all flow rules",
865 .next = NEXT(NEXT_ENTRY(PORT_ID)),
866 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
871 .help = "query an existing flow rule",
872 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
874 NEXT_ENTRY(PORT_ID)),
875 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action),
876 ARGS_ENTRY(struct buffer, args.query.rule),
877 ARGS_ENTRY(struct buffer, port)),
882 .help = "list existing flow rules",
883 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
884 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
889 .help = "restrict ingress traffic to the defined flow rules",
890 .next = NEXT(NEXT_ENTRY(BOOLEAN),
891 NEXT_ENTRY(PORT_ID)),
892 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
893 ARGS_ENTRY(struct buffer, port)),
894 .call = parse_isolate,
896 /* Destroy arguments. */
899 .help = "specify a rule identifier",
900 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
901 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
902 .call = parse_destroy,
904 /* Query arguments. */
908 .help = "action to query, must be part of the rule",
909 .call = parse_action,
912 /* List arguments. */
915 .help = "specify a group",
916 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
917 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
920 /* Validate/create attributes. */
923 .help = "specify a group",
924 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
925 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
930 .help = "specify a priority level",
931 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
932 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
937 .help = "affect rule to ingress",
938 .next = NEXT(next_vc_attr),
943 .help = "affect rule to egress",
944 .next = NEXT(next_vc_attr),
947 /* Validate/create pattern. */
950 .help = "submit a list of pattern items",
951 .next = NEXT(next_item),
956 .help = "match value perfectly (with full bit-mask)",
957 .call = parse_vc_spec,
959 [ITEM_PARAM_SPEC] = {
961 .help = "match value according to configured bit-mask",
962 .call = parse_vc_spec,
964 [ITEM_PARAM_LAST] = {
966 .help = "specify upper bound to establish a range",
967 .call = parse_vc_spec,
969 [ITEM_PARAM_MASK] = {
971 .help = "specify bit-mask with relevant bits set to one",
972 .call = parse_vc_spec,
974 [ITEM_PARAM_PREFIX] = {
976 .help = "generate bit-mask from a prefix length",
977 .call = parse_vc_spec,
981 .help = "specify next pattern item",
982 .next = NEXT(next_item),
986 .help = "end list of pattern items",
987 .priv = PRIV_ITEM(END, 0),
988 .next = NEXT(NEXT_ENTRY(ACTIONS)),
993 .help = "no-op pattern item",
994 .priv = PRIV_ITEM(VOID, 0),
995 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1000 .help = "perform actions when pattern does not match",
1001 .priv = PRIV_ITEM(INVERT, 0),
1002 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1007 .help = "match any protocol for the current layer",
1008 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
1009 .next = NEXT(item_any),
1014 .help = "number of layers covered",
1015 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
1016 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
1020 .help = "match packets addressed to the physical function",
1021 .priv = PRIV_ITEM(PF, 0),
1022 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1027 .help = "match packets addressed to a virtual function ID",
1028 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
1029 .next = NEXT(item_vf),
1034 .help = "destination VF ID",
1035 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
1036 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
1040 .help = "device-specific physical port index to use",
1041 .priv = PRIV_ITEM(PORT, sizeof(struct rte_flow_item_port)),
1042 .next = NEXT(item_port),
1045 [ITEM_PORT_INDEX] = {
1047 .help = "physical port index",
1048 .next = NEXT(item_port, NEXT_ENTRY(UNSIGNED), item_param),
1049 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port, index)),
1053 .help = "match an arbitrary byte string",
1054 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1055 .next = NEXT(item_raw),
1058 [ITEM_RAW_RELATIVE] = {
1060 .help = "look for pattern after the previous item",
1061 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1062 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1065 [ITEM_RAW_SEARCH] = {
1067 .help = "search pattern from offset (see also limit)",
1068 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1069 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1072 [ITEM_RAW_OFFSET] = {
1074 .help = "absolute or relative offset for pattern",
1075 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1076 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1078 [ITEM_RAW_LIMIT] = {
1080 .help = "search area limit for start of pattern",
1081 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1082 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1084 [ITEM_RAW_PATTERN] = {
1086 .help = "byte string to look for",
1087 .next = NEXT(item_raw,
1089 NEXT_ENTRY(ITEM_PARAM_IS,
1092 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, length),
1093 ARGS_ENTRY_USZ(struct rte_flow_item_raw,
1095 ITEM_RAW_PATTERN_SIZE)),
1099 .help = "match Ethernet header",
1100 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1101 .next = NEXT(item_eth),
1106 .help = "destination MAC",
1107 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1108 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1112 .help = "source MAC",
1113 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1114 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1118 .help = "EtherType",
1119 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1120 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1124 .help = "match 802.1Q/ad VLAN tag",
1125 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1126 .next = NEXT(item_vlan),
1129 [ITEM_VLAN_TPID] = {
1131 .help = "tag protocol identifier",
1132 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1133 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tpid)),
1137 .help = "tag control information",
1138 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1139 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1143 .help = "priority code point",
1144 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1145 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1150 .help = "drop eligible indicator",
1151 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1152 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1157 .help = "VLAN identifier",
1158 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1159 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1164 .help = "match IPv4 header",
1165 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1166 .next = NEXT(item_ipv4),
1171 .help = "type of service",
1172 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1173 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1174 hdr.type_of_service)),
1178 .help = "time to live",
1179 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1180 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1183 [ITEM_IPV4_PROTO] = {
1185 .help = "next protocol ID",
1186 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1187 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1188 hdr.next_proto_id)),
1192 .help = "source address",
1193 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1194 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1199 .help = "destination address",
1200 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1201 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1206 .help = "match IPv6 header",
1207 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1208 .next = NEXT(item_ipv6),
1213 .help = "traffic class",
1214 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1215 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1217 "\x0f\xf0\x00\x00")),
1219 [ITEM_IPV6_FLOW] = {
1221 .help = "flow label",
1222 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1223 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1225 "\x00\x0f\xff\xff")),
1227 [ITEM_IPV6_PROTO] = {
1229 .help = "protocol (next header)",
1230 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1231 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1236 .help = "hop limit",
1237 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1238 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1243 .help = "source address",
1244 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1245 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1250 .help = "destination address",
1251 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1252 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1257 .help = "match ICMP header",
1258 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1259 .next = NEXT(item_icmp),
1262 [ITEM_ICMP_TYPE] = {
1264 .help = "ICMP packet type",
1265 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1266 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1269 [ITEM_ICMP_CODE] = {
1271 .help = "ICMP packet code",
1272 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1273 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1278 .help = "match UDP header",
1279 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1280 .next = NEXT(item_udp),
1285 .help = "UDP source port",
1286 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1287 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1292 .help = "UDP destination port",
1293 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1294 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1299 .help = "match TCP header",
1300 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1301 .next = NEXT(item_tcp),
1306 .help = "TCP source port",
1307 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1308 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1313 .help = "TCP destination port",
1314 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1315 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1318 [ITEM_TCP_FLAGS] = {
1320 .help = "TCP flags",
1321 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1322 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1327 .help = "match SCTP header",
1328 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1329 .next = NEXT(item_sctp),
1334 .help = "SCTP source port",
1335 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1336 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1341 .help = "SCTP destination port",
1342 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1343 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1348 .help = "validation tag",
1349 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1350 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1353 [ITEM_SCTP_CKSUM] = {
1356 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1357 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1362 .help = "match VXLAN header",
1363 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1364 .next = NEXT(item_vxlan),
1367 [ITEM_VXLAN_VNI] = {
1369 .help = "VXLAN identifier",
1370 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1371 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
1375 .help = "match E-Tag header",
1376 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
1377 .next = NEXT(item_e_tag),
1380 [ITEM_E_TAG_GRP_ECID_B] = {
1381 .name = "grp_ecid_b",
1382 .help = "GRP and E-CID base",
1383 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
1384 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
1390 .help = "match NVGRE header",
1391 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
1392 .next = NEXT(item_nvgre),
1395 [ITEM_NVGRE_TNI] = {
1397 .help = "virtual subnet ID",
1398 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
1399 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
1403 .help = "match MPLS header",
1404 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
1405 .next = NEXT(item_mpls),
1408 [ITEM_MPLS_LABEL] = {
1410 .help = "MPLS label",
1411 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
1412 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
1418 .help = "match GRE header",
1419 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
1420 .next = NEXT(item_gre),
1423 [ITEM_GRE_PROTO] = {
1425 .help = "GRE protocol type",
1426 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
1427 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
1432 .help = "fuzzy pattern match, expect faster than default",
1433 .priv = PRIV_ITEM(FUZZY,
1434 sizeof(struct rte_flow_item_fuzzy)),
1435 .next = NEXT(item_fuzzy),
1438 [ITEM_FUZZY_THRESH] = {
1440 .help = "match accuracy threshold",
1441 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
1442 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
1447 .help = "match GTP header",
1448 .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
1449 .next = NEXT(item_gtp),
1454 .help = "tunnel endpoint identifier",
1455 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
1456 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
1460 .help = "match GTP header",
1461 .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
1462 .next = NEXT(item_gtp),
1467 .help = "match GTP header",
1468 .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
1469 .next = NEXT(item_gtp),
1474 .help = "match GENEVE header",
1475 .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
1476 .next = NEXT(item_geneve),
1479 [ITEM_GENEVE_VNI] = {
1481 .help = "virtual network identifier",
1482 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1483 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)),
1485 [ITEM_GENEVE_PROTO] = {
1487 .help = "GENEVE protocol type",
1488 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1489 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve,
1493 /* Validate/create actions. */
1496 .help = "submit a list of associated actions",
1497 .next = NEXT(next_action),
1502 .help = "specify next action",
1503 .next = NEXT(next_action),
1507 .help = "end list of actions",
1508 .priv = PRIV_ACTION(END, 0),
1513 .help = "no-op action",
1514 .priv = PRIV_ACTION(VOID, 0),
1515 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1518 [ACTION_PASSTHRU] = {
1520 .help = "let subsequent rule process matched packets",
1521 .priv = PRIV_ACTION(PASSTHRU, 0),
1522 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1527 .help = "attach 32 bit value to packets",
1528 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
1529 .next = NEXT(action_mark),
1532 [ACTION_MARK_ID] = {
1534 .help = "32 bit value to return with packets",
1535 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
1536 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
1537 .call = parse_vc_conf,
1541 .help = "flag packets",
1542 .priv = PRIV_ACTION(FLAG, 0),
1543 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1548 .help = "assign packets to a given queue index",
1549 .priv = PRIV_ACTION(QUEUE,
1550 sizeof(struct rte_flow_action_queue)),
1551 .next = NEXT(action_queue),
1554 [ACTION_QUEUE_INDEX] = {
1556 .help = "queue index to use",
1557 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
1558 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
1559 .call = parse_vc_conf,
1563 .help = "drop packets (note: passthru has priority)",
1564 .priv = PRIV_ACTION(DROP, 0),
1565 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1570 .help = "enable counters for this rule",
1571 .priv = PRIV_ACTION(COUNT, 0),
1572 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1577 .help = "duplicate packets to a given queue index",
1578 .priv = PRIV_ACTION(DUP, sizeof(struct rte_flow_action_dup)),
1579 .next = NEXT(action_dup),
1582 [ACTION_DUP_INDEX] = {
1584 .help = "queue index to duplicate packets to",
1585 .next = NEXT(action_dup, NEXT_ENTRY(UNSIGNED)),
1586 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_dup, index)),
1587 .call = parse_vc_conf,
1591 .help = "spread packets among several queues",
1592 .priv = PRIV_ACTION(RSS, sizeof(union action_rss_data)),
1593 .next = NEXT(action_rss),
1594 .call = parse_vc_action_rss,
1596 [ACTION_RSS_QUEUES] = {
1598 .help = "queue indices to use",
1599 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
1600 .call = parse_vc_conf,
1602 [ACTION_RSS_QUEUE] = {
1604 .help = "queue index",
1605 .call = parse_vc_action_rss_queue,
1606 .comp = comp_vc_action_rss_queue,
1610 .help = "redirect packets to physical device function",
1611 .priv = PRIV_ACTION(PF, 0),
1612 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1617 .help = "redirect packets to virtual device function",
1618 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
1619 .next = NEXT(action_vf),
1622 [ACTION_VF_ORIGINAL] = {
1624 .help = "use original VF ID if possible",
1625 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
1626 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
1628 .call = parse_vc_conf,
1632 .help = "VF ID to redirect packets to",
1633 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
1634 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
1635 .call = parse_vc_conf,
1639 .help = "meter the directed packets at given id",
1640 .priv = PRIV_ACTION(METER,
1641 sizeof(struct rte_flow_action_meter)),
1642 .next = NEXT(action_meter),
1645 [ACTION_METER_ID] = {
1647 .help = "meter id to use",
1648 .next = NEXT(action_meter, NEXT_ENTRY(UNSIGNED)),
1649 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
1650 .call = parse_vc_conf,
1654 /** Remove and return last entry from argument stack. */
1655 static const struct arg *
1656 pop_args(struct context *ctx)
1658 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
1661 /** Add entry on top of the argument stack. */
1663 push_args(struct context *ctx, const struct arg *arg)
1665 if (ctx->args_num == CTX_STACK_SIZE)
1667 ctx->args[ctx->args_num++] = arg;
1671 /** Spread value into buffer according to bit-mask. */
1673 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
1675 uint32_t i = arg->size;
1683 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1692 unsigned int shift = 0;
1693 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
1695 for (shift = 0; arg->mask[i] >> shift; ++shift) {
1696 if (!(arg->mask[i] & (1 << shift)))
1701 *buf &= ~(1 << shift);
1702 *buf |= (val & 1) << shift;
1710 /** Compare a string with a partial one of a given length. */
1712 strcmp_partial(const char *full, const char *partial, size_t partial_len)
1714 int r = strncmp(full, partial, partial_len);
1718 if (strlen(full) <= partial_len)
1720 return full[partial_len];
1724 * Parse a prefix length and generate a bit-mask.
1726 * Last argument (ctx->args) is retrieved to determine mask size, storage
1727 * location and whether the result must use network byte ordering.
1730 parse_prefix(struct context *ctx, const struct token *token,
1731 const char *str, unsigned int len,
1732 void *buf, unsigned int size)
1734 const struct arg *arg = pop_args(ctx);
1735 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
1742 /* Argument is expected. */
1746 u = strtoumax(str, &end, 0);
1747 if (errno || (size_t)(end - str) != len)
1752 extra = arg_entry_bf_fill(NULL, 0, arg);
1761 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
1762 !arg_entry_bf_fill(ctx->objmask, -1, arg))
1769 if (bytes > size || bytes + !!extra > size)
1773 buf = (uint8_t *)ctx->object + arg->offset;
1774 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1776 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
1777 memset(buf, 0x00, size - bytes);
1779 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
1783 memset(buf, 0xff, bytes);
1784 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
1786 ((uint8_t *)buf)[bytes] = conv[extra];
1789 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
1792 push_args(ctx, arg);
1796 /** Default parsing function for token name matching. */
1798 parse_default(struct context *ctx, const struct token *token,
1799 const char *str, unsigned int len,
1800 void *buf, unsigned int size)
1805 if (strcmp_partial(token->name, str, len))
1810 /** Parse flow command, initialize output buffer for subsequent tokens. */
1812 parse_init(struct context *ctx, const struct token *token,
1813 const char *str, unsigned int len,
1814 void *buf, unsigned int size)
1816 struct buffer *out = buf;
1818 /* Token name must match. */
1819 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1821 /* Nothing else to do if there is no buffer. */
1824 /* Make sure buffer is large enough. */
1825 if (size < sizeof(*out))
1827 /* Initialize buffer. */
1828 memset(out, 0x00, sizeof(*out));
1829 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
1832 ctx->objmask = NULL;
1836 /** Parse tokens for validate/create commands. */
1838 parse_vc(struct context *ctx, const struct token *token,
1839 const char *str, unsigned int len,
1840 void *buf, unsigned int size)
1842 struct buffer *out = buf;
1846 /* Token name must match. */
1847 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1849 /* Nothing else to do if there is no buffer. */
1852 if (!out->command) {
1853 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
1855 if (sizeof(*out) > size)
1857 out->command = ctx->curr;
1860 ctx->objmask = NULL;
1861 out->args.vc.data = (uint8_t *)out + size;
1865 ctx->object = &out->args.vc.attr;
1866 ctx->objmask = NULL;
1867 switch (ctx->curr) {
1872 out->args.vc.attr.ingress = 1;
1875 out->args.vc.attr.egress = 1;
1878 out->args.vc.pattern =
1879 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1881 ctx->object = out->args.vc.pattern;
1882 ctx->objmask = NULL;
1885 out->args.vc.actions =
1886 (void *)RTE_ALIGN_CEIL((uintptr_t)
1887 (out->args.vc.pattern +
1888 out->args.vc.pattern_n),
1890 ctx->object = out->args.vc.actions;
1891 ctx->objmask = NULL;
1898 if (!out->args.vc.actions) {
1899 const struct parse_item_priv *priv = token->priv;
1900 struct rte_flow_item *item =
1901 out->args.vc.pattern + out->args.vc.pattern_n;
1903 data_size = priv->size * 3; /* spec, last, mask */
1904 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1905 (out->args.vc.data - data_size),
1907 if ((uint8_t *)item + sizeof(*item) > data)
1909 *item = (struct rte_flow_item){
1912 ++out->args.vc.pattern_n;
1914 ctx->objmask = NULL;
1916 const struct parse_action_priv *priv = token->priv;
1917 struct rte_flow_action *action =
1918 out->args.vc.actions + out->args.vc.actions_n;
1920 data_size = priv->size; /* configuration */
1921 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1922 (out->args.vc.data - data_size),
1924 if ((uint8_t *)action + sizeof(*action) > data)
1926 *action = (struct rte_flow_action){
1928 .conf = data_size ? data : NULL,
1930 ++out->args.vc.actions_n;
1931 ctx->object = action;
1932 ctx->objmask = NULL;
1934 memset(data, 0, data_size);
1935 out->args.vc.data = data;
1936 ctx->objdata = data_size;
1940 /** Parse pattern item parameter type. */
1942 parse_vc_spec(struct context *ctx, const struct token *token,
1943 const char *str, unsigned int len,
1944 void *buf, unsigned int size)
1946 struct buffer *out = buf;
1947 struct rte_flow_item *item;
1953 /* Token name must match. */
1954 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1956 /* Parse parameter types. */
1957 switch (ctx->curr) {
1958 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
1964 case ITEM_PARAM_SPEC:
1967 case ITEM_PARAM_LAST:
1970 case ITEM_PARAM_PREFIX:
1971 /* Modify next token to expect a prefix. */
1972 if (ctx->next_num < 2)
1974 ctx->next[ctx->next_num - 2] = prefix;
1976 case ITEM_PARAM_MASK:
1982 /* Nothing else to do if there is no buffer. */
1985 if (!out->args.vc.pattern_n)
1987 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
1988 data_size = ctx->objdata / 3; /* spec, last, mask */
1989 /* Point to selected object. */
1990 ctx->object = out->args.vc.data + (data_size * index);
1992 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
1993 item->mask = ctx->objmask;
1995 ctx->objmask = NULL;
1996 /* Update relevant item pointer. */
1997 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
2002 /** Parse action configuration field. */
2004 parse_vc_conf(struct context *ctx, const struct token *token,
2005 const char *str, unsigned int len,
2006 void *buf, unsigned int size)
2008 struct buffer *out = buf;
2011 /* Token name must match. */
2012 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2014 /* Nothing else to do if there is no buffer. */
2017 /* Point to selected object. */
2018 ctx->object = out->args.vc.data;
2019 ctx->objmask = NULL;
2023 /** Parse RSS action. */
2025 parse_vc_action_rss(struct context *ctx, const struct token *token,
2026 const char *str, unsigned int len,
2027 void *buf, unsigned int size)
2029 struct buffer *out = buf;
2030 struct rte_flow_action *action;
2031 union action_rss_data *action_rss_data;
2035 ret = parse_vc(ctx, token, str, len, buf, size);
2038 /* Nothing else to do if there is no buffer. */
2041 if (!out->args.vc.actions_n)
2043 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
2044 /* Point to selected object. */
2045 ctx->object = out->args.vc.data;
2046 ctx->objmask = NULL;
2047 /* Set up default configuration. */
2048 action_rss_data = ctx->object;
2049 *action_rss_data = (union action_rss_data){
2050 .conf = (struct rte_flow_action_rss){
2051 .rss_conf = &action_rss_data->s.rss_conf,
2052 .num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
2055 action_rss_data->s.rss_conf = (struct rte_eth_rss_conf){
2056 .rss_key = action_rss_data->s.rss_key,
2057 .rss_key_len = sizeof(action_rss_data->s.rss_key),
2060 strncpy((void *)action_rss_data->s.rss_key,
2061 "testpmd's default RSS hash key",
2062 sizeof(action_rss_data->s.rss_key));
2063 for (i = 0; i < action_rss_data->conf.num; ++i)
2064 action_rss_data->conf.queue[i] = i;
2065 if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
2066 ctx->port != (portid_t)RTE_PORT_ALL) {
2067 struct rte_eth_dev_info info;
2069 rte_eth_dev_info_get(ctx->port, &info);
2070 action_rss_data->s.rss_conf.rss_key_len =
2071 RTE_MIN(sizeof(action_rss_data->s.rss_key),
2072 info.hash_key_size);
2074 action->conf = &action_rss_data->conf;
2079 * Parse queue field for RSS action.
2081 * Valid tokens are queue indices and the "end" token.
2084 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
2085 const char *str, unsigned int len,
2086 void *buf, unsigned int size)
2088 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
2089 union action_rss_data *action_rss_data;
2096 if (ctx->curr != ACTION_RSS_QUEUE)
2098 i = ctx->objdata >> 16;
2099 if (!strcmp_partial("end", str, len)) {
2100 ctx->objdata &= 0xffff;
2103 if (i >= ACTION_RSS_QUEUE_NUM)
2106 ARGS_ENTRY_ARB(offsetof(struct rte_flow_action_rss,
2108 i * sizeof(action_rss_data->s.queue[i]),
2109 sizeof(action_rss_data->s.queue[i]))))
2111 ret = parse_int(ctx, token, str, len, NULL, 0);
2117 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
2119 if (ctx->next_num == RTE_DIM(ctx->next))
2121 ctx->next[ctx->next_num++] = next;
2124 action_rss_data = ctx->object;
2125 action_rss_data->conf.num = i;
2129 /** Parse tokens for destroy command. */
2131 parse_destroy(struct context *ctx, const struct token *token,
2132 const char *str, unsigned int len,
2133 void *buf, unsigned int size)
2135 struct buffer *out = buf;
2137 /* Token name must match. */
2138 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2140 /* Nothing else to do if there is no buffer. */
2143 if (!out->command) {
2144 if (ctx->curr != DESTROY)
2146 if (sizeof(*out) > size)
2148 out->command = ctx->curr;
2151 ctx->objmask = NULL;
2152 out->args.destroy.rule =
2153 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2157 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
2158 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
2161 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
2162 ctx->objmask = NULL;
2166 /** Parse tokens for flush command. */
2168 parse_flush(struct context *ctx, const struct token *token,
2169 const char *str, unsigned int len,
2170 void *buf, unsigned int size)
2172 struct buffer *out = buf;
2174 /* Token name must match. */
2175 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2177 /* Nothing else to do if there is no buffer. */
2180 if (!out->command) {
2181 if (ctx->curr != FLUSH)
2183 if (sizeof(*out) > size)
2185 out->command = ctx->curr;
2188 ctx->objmask = NULL;
2193 /** Parse tokens for query command. */
2195 parse_query(struct context *ctx, const struct token *token,
2196 const char *str, unsigned int len,
2197 void *buf, unsigned int size)
2199 struct buffer *out = buf;
2201 /* Token name must match. */
2202 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2204 /* Nothing else to do if there is no buffer. */
2207 if (!out->command) {
2208 if (ctx->curr != QUERY)
2210 if (sizeof(*out) > size)
2212 out->command = ctx->curr;
2215 ctx->objmask = NULL;
2220 /** Parse action names. */
2222 parse_action(struct context *ctx, const struct token *token,
2223 const char *str, unsigned int len,
2224 void *buf, unsigned int size)
2226 struct buffer *out = buf;
2227 const struct arg *arg = pop_args(ctx);
2231 /* Argument is expected. */
2234 /* Parse action name. */
2235 for (i = 0; next_action[i]; ++i) {
2236 const struct parse_action_priv *priv;
2238 token = &token_list[next_action[i]];
2239 if (strcmp_partial(token->name, str, len))
2245 memcpy((uint8_t *)ctx->object + arg->offset,
2251 push_args(ctx, arg);
2255 /** Parse tokens for list command. */
2257 parse_list(struct context *ctx, const struct token *token,
2258 const char *str, unsigned int len,
2259 void *buf, unsigned int size)
2261 struct buffer *out = buf;
2263 /* Token name must match. */
2264 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2266 /* Nothing else to do if there is no buffer. */
2269 if (!out->command) {
2270 if (ctx->curr != LIST)
2272 if (sizeof(*out) > size)
2274 out->command = ctx->curr;
2277 ctx->objmask = NULL;
2278 out->args.list.group =
2279 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2283 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
2284 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
2287 ctx->object = out->args.list.group + out->args.list.group_n++;
2288 ctx->objmask = NULL;
2292 /** Parse tokens for isolate command. */
2294 parse_isolate(struct context *ctx, const struct token *token,
2295 const char *str, unsigned int len,
2296 void *buf, unsigned int size)
2298 struct buffer *out = buf;
2300 /* Token name must match. */
2301 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2303 /* Nothing else to do if there is no buffer. */
2306 if (!out->command) {
2307 if (ctx->curr != ISOLATE)
2309 if (sizeof(*out) > size)
2311 out->command = ctx->curr;
2314 ctx->objmask = NULL;
2320 * Parse signed/unsigned integers 8 to 64-bit long.
2322 * Last argument (ctx->args) is retrieved to determine integer type and
2326 parse_int(struct context *ctx, const struct token *token,
2327 const char *str, unsigned int len,
2328 void *buf, unsigned int size)
2330 const struct arg *arg = pop_args(ctx);
2335 /* Argument is expected. */
2340 (uintmax_t)strtoimax(str, &end, 0) :
2341 strtoumax(str, &end, 0);
2342 if (errno || (size_t)(end - str) != len)
2347 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
2348 !arg_entry_bf_fill(ctx->objmask, -1, arg))
2352 buf = (uint8_t *)ctx->object + arg->offset;
2356 case sizeof(uint8_t):
2357 *(uint8_t *)buf = u;
2359 case sizeof(uint16_t):
2360 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
2362 case sizeof(uint8_t [3]):
2363 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2365 ((uint8_t *)buf)[0] = u;
2366 ((uint8_t *)buf)[1] = u >> 8;
2367 ((uint8_t *)buf)[2] = u >> 16;
2371 ((uint8_t *)buf)[0] = u >> 16;
2372 ((uint8_t *)buf)[1] = u >> 8;
2373 ((uint8_t *)buf)[2] = u;
2375 case sizeof(uint32_t):
2376 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
2378 case sizeof(uint64_t):
2379 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
2384 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
2386 buf = (uint8_t *)ctx->objmask + arg->offset;
2391 push_args(ctx, arg);
2398 * Two arguments (ctx->args) are retrieved from the stack to store data and
2399 * its length (in that order).
2402 parse_string(struct context *ctx, const struct token *token,
2403 const char *str, unsigned int len,
2404 void *buf, unsigned int size)
2406 const struct arg *arg_data = pop_args(ctx);
2407 const struct arg *arg_len = pop_args(ctx);
2408 char tmp[16]; /* Ought to be enough. */
2411 /* Arguments are expected. */
2415 push_args(ctx, arg_data);
2418 size = arg_data->size;
2419 /* Bit-mask fill is not supported. */
2420 if (arg_data->mask || size < len)
2424 /* Let parse_int() fill length information first. */
2425 ret = snprintf(tmp, sizeof(tmp), "%u", len);
2428 push_args(ctx, arg_len);
2429 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
2434 buf = (uint8_t *)ctx->object + arg_data->offset;
2435 /* Output buffer is not necessarily NUL-terminated. */
2436 memcpy(buf, str, len);
2437 memset((uint8_t *)buf + len, 0x55, size - len);
2439 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
2442 push_args(ctx, arg_len);
2443 push_args(ctx, arg_data);
2448 * Parse a MAC address.
2450 * Last argument (ctx->args) is retrieved to determine storage size and
2454 parse_mac_addr(struct context *ctx, const struct token *token,
2455 const char *str, unsigned int len,
2456 void *buf, unsigned int size)
2458 const struct arg *arg = pop_args(ctx);
2459 struct ether_addr tmp;
2463 /* Argument is expected. */
2467 /* Bit-mask fill is not supported. */
2468 if (arg->mask || size != sizeof(tmp))
2470 /* Only network endian is supported. */
2473 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
2474 if (ret < 0 || (unsigned int)ret != len)
2478 buf = (uint8_t *)ctx->object + arg->offset;
2479 memcpy(buf, &tmp, size);
2481 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2484 push_args(ctx, arg);
2489 * Parse an IPv4 address.
2491 * Last argument (ctx->args) is retrieved to determine storage size and
2495 parse_ipv4_addr(struct context *ctx, const struct token *token,
2496 const char *str, unsigned int len,
2497 void *buf, unsigned int size)
2499 const struct arg *arg = pop_args(ctx);
2504 /* Argument is expected. */
2508 /* Bit-mask fill is not supported. */
2509 if (arg->mask || size != sizeof(tmp))
2511 /* Only network endian is supported. */
2514 memcpy(str2, str, len);
2516 ret = inet_pton(AF_INET, str2, &tmp);
2518 /* Attempt integer parsing. */
2519 push_args(ctx, arg);
2520 return parse_int(ctx, token, str, len, buf, size);
2524 buf = (uint8_t *)ctx->object + arg->offset;
2525 memcpy(buf, &tmp, size);
2527 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2530 push_args(ctx, arg);
2535 * Parse an IPv6 address.
2537 * Last argument (ctx->args) is retrieved to determine storage size and
2541 parse_ipv6_addr(struct context *ctx, const struct token *token,
2542 const char *str, unsigned int len,
2543 void *buf, unsigned int size)
2545 const struct arg *arg = pop_args(ctx);
2547 struct in6_addr tmp;
2551 /* Argument is expected. */
2555 /* Bit-mask fill is not supported. */
2556 if (arg->mask || size != sizeof(tmp))
2558 /* Only network endian is supported. */
2561 memcpy(str2, str, len);
2563 ret = inet_pton(AF_INET6, str2, &tmp);
2568 buf = (uint8_t *)ctx->object + arg->offset;
2569 memcpy(buf, &tmp, size);
2571 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2574 push_args(ctx, arg);
2578 /** Boolean values (even indices stand for false). */
2579 static const char *const boolean_name[] = {
2588 * Parse a boolean value.
2590 * Last argument (ctx->args) is retrieved to determine storage size and
2594 parse_boolean(struct context *ctx, const struct token *token,
2595 const char *str, unsigned int len,
2596 void *buf, unsigned int size)
2598 const struct arg *arg = pop_args(ctx);
2602 /* Argument is expected. */
2605 for (i = 0; boolean_name[i]; ++i)
2606 if (!strcmp_partial(boolean_name[i], str, len))
2608 /* Process token as integer. */
2609 if (boolean_name[i])
2610 str = i & 1 ? "1" : "0";
2611 push_args(ctx, arg);
2612 ret = parse_int(ctx, token, str, strlen(str), buf, size);
2613 return ret > 0 ? (int)len : ret;
2616 /** Parse port and update context. */
2618 parse_port(struct context *ctx, const struct token *token,
2619 const char *str, unsigned int len,
2620 void *buf, unsigned int size)
2622 struct buffer *out = &(struct buffer){ .port = 0 };
2630 ctx->objmask = NULL;
2631 size = sizeof(*out);
2633 ret = parse_int(ctx, token, str, len, out, size);
2635 ctx->port = out->port;
2641 /** No completion. */
2643 comp_none(struct context *ctx, const struct token *token,
2644 unsigned int ent, char *buf, unsigned int size)
2654 /** Complete boolean values. */
2656 comp_boolean(struct context *ctx, const struct token *token,
2657 unsigned int ent, char *buf, unsigned int size)
2663 for (i = 0; boolean_name[i]; ++i)
2664 if (buf && i == ent)
2665 return snprintf(buf, size, "%s", boolean_name[i]);
2671 /** Complete action names. */
2673 comp_action(struct context *ctx, const struct token *token,
2674 unsigned int ent, char *buf, unsigned int size)
2680 for (i = 0; next_action[i]; ++i)
2681 if (buf && i == ent)
2682 return snprintf(buf, size, "%s",
2683 token_list[next_action[i]].name);
2689 /** Complete available ports. */
2691 comp_port(struct context *ctx, const struct token *token,
2692 unsigned int ent, char *buf, unsigned int size)
2699 RTE_ETH_FOREACH_DEV(p) {
2700 if (buf && i == ent)
2701 return snprintf(buf, size, "%u", p);
2709 /** Complete available rule IDs. */
2711 comp_rule_id(struct context *ctx, const struct token *token,
2712 unsigned int ent, char *buf, unsigned int size)
2715 struct rte_port *port;
2716 struct port_flow *pf;
2719 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
2720 ctx->port == (portid_t)RTE_PORT_ALL)
2722 port = &ports[ctx->port];
2723 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
2724 if (buf && i == ent)
2725 return snprintf(buf, size, "%u", pf->id);
2733 /** Complete queue field for RSS action. */
2735 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
2736 unsigned int ent, char *buf, unsigned int size)
2743 return snprintf(buf, size, "%u", ent);
2745 return snprintf(buf, size, "end");
2749 /** Internal context. */
2750 static struct context cmd_flow_context;
2752 /** Global parser instance (cmdline API). */
2753 cmdline_parse_inst_t cmd_flow;
2755 /** Initialize context. */
2757 cmd_flow_context_init(struct context *ctx)
2759 /* A full memset() is not necessary. */
2769 ctx->objmask = NULL;
2772 /** Parse a token (cmdline API). */
2774 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
2777 struct context *ctx = &cmd_flow_context;
2778 const struct token *token;
2779 const enum index *list;
2784 token = &token_list[ctx->curr];
2785 /* Check argument length. */
2788 for (len = 0; src[len]; ++len)
2789 if (src[len] == '#' || isspace(src[len]))
2793 /* Last argument and EOL detection. */
2794 for (i = len; src[i]; ++i)
2795 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
2797 else if (!isspace(src[i])) {
2802 if (src[i] == '\r' || src[i] == '\n') {
2806 /* Initialize context if necessary. */
2807 if (!ctx->next_num) {
2810 ctx->next[ctx->next_num++] = token->next[0];
2812 /* Process argument through candidates. */
2813 ctx->prev = ctx->curr;
2814 list = ctx->next[ctx->next_num - 1];
2815 for (i = 0; list[i]; ++i) {
2816 const struct token *next = &token_list[list[i]];
2819 ctx->curr = list[i];
2821 tmp = next->call(ctx, next, src, len, result, size);
2823 tmp = parse_default(ctx, next, src, len, result, size);
2824 if (tmp == -1 || tmp != len)
2832 /* Push subsequent tokens if any. */
2834 for (i = 0; token->next[i]; ++i) {
2835 if (ctx->next_num == RTE_DIM(ctx->next))
2837 ctx->next[ctx->next_num++] = token->next[i];
2839 /* Push arguments if any. */
2841 for (i = 0; token->args[i]; ++i) {
2842 if (ctx->args_num == RTE_DIM(ctx->args))
2844 ctx->args[ctx->args_num++] = token->args[i];
2849 /** Return number of completion entries (cmdline API). */
2851 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
2853 struct context *ctx = &cmd_flow_context;
2854 const struct token *token = &token_list[ctx->curr];
2855 const enum index *list;
2859 /* Count number of tokens in current list. */
2861 list = ctx->next[ctx->next_num - 1];
2863 list = token->next[0];
2864 for (i = 0; list[i]; ++i)
2869 * If there is a single token, use its completion callback, otherwise
2870 * return the number of entries.
2872 token = &token_list[list[0]];
2873 if (i == 1 && token->comp) {
2874 /* Save index for cmd_flow_get_help(). */
2875 ctx->prev = list[0];
2876 return token->comp(ctx, token, 0, NULL, 0);
2881 /** Return a completion entry (cmdline API). */
2883 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
2884 char *dst, unsigned int size)
2886 struct context *ctx = &cmd_flow_context;
2887 const struct token *token = &token_list[ctx->curr];
2888 const enum index *list;
2892 /* Count number of tokens in current list. */
2894 list = ctx->next[ctx->next_num - 1];
2896 list = token->next[0];
2897 for (i = 0; list[i]; ++i)
2901 /* If there is a single token, use its completion callback. */
2902 token = &token_list[list[0]];
2903 if (i == 1 && token->comp) {
2904 /* Save index for cmd_flow_get_help(). */
2905 ctx->prev = list[0];
2906 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
2908 /* Otherwise make sure the index is valid and use defaults. */
2911 token = &token_list[list[index]];
2912 snprintf(dst, size, "%s", token->name);
2913 /* Save index for cmd_flow_get_help(). */
2914 ctx->prev = list[index];
2918 /** Populate help strings for current token (cmdline API). */
2920 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
2922 struct context *ctx = &cmd_flow_context;
2923 const struct token *token = &token_list[ctx->prev];
2928 /* Set token type and update global help with details. */
2929 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
2931 cmd_flow.help_str = token->help;
2933 cmd_flow.help_str = token->name;
2937 /** Token definition template (cmdline API). */
2938 static struct cmdline_token_hdr cmd_flow_token_hdr = {
2939 .ops = &(struct cmdline_token_ops){
2940 .parse = cmd_flow_parse,
2941 .complete_get_nb = cmd_flow_complete_get_nb,
2942 .complete_get_elt = cmd_flow_complete_get_elt,
2943 .get_help = cmd_flow_get_help,
2948 /** Populate the next dynamic token. */
2950 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
2951 cmdline_parse_token_hdr_t **hdr_inst)
2953 struct context *ctx = &cmd_flow_context;
2955 /* Always reinitialize context before requesting the first token. */
2956 if (!(hdr_inst - cmd_flow.tokens))
2957 cmd_flow_context_init(ctx);
2958 /* Return NULL when no more tokens are expected. */
2959 if (!ctx->next_num && ctx->curr) {
2963 /* Determine if command should end here. */
2964 if (ctx->eol && ctx->last && ctx->next_num) {
2965 const enum index *list = ctx->next[ctx->next_num - 1];
2968 for (i = 0; list[i]; ++i) {
2975 *hdr = &cmd_flow_token_hdr;
2978 /** Dispatch parsed buffer to function calls. */
2980 cmd_flow_parsed(const struct buffer *in)
2982 switch (in->command) {
2984 port_flow_validate(in->port, &in->args.vc.attr,
2985 in->args.vc.pattern, in->args.vc.actions);
2988 port_flow_create(in->port, &in->args.vc.attr,
2989 in->args.vc.pattern, in->args.vc.actions);
2992 port_flow_destroy(in->port, in->args.destroy.rule_n,
2993 in->args.destroy.rule);
2996 port_flow_flush(in->port);
2999 port_flow_query(in->port, in->args.query.rule,
3000 in->args.query.action);
3003 port_flow_list(in->port, in->args.list.group_n,
3004 in->args.list.group);
3007 port_flow_isolate(in->port, in->args.isolate.set);
3014 /** Token generator and output processing callback (cmdline API). */
3016 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
3019 cmd_flow_tok(arg0, arg2);
3021 cmd_flow_parsed(arg0);
3024 /** Global parser instance (cmdline API). */
3025 cmdline_parse_inst_t cmd_flow = {
3027 .data = NULL, /**< Unused. */
3028 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
3031 }, /**< Tokens are returned by cmd_flow_tok(). */