1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
13 #include <arpa/inet.h>
14 #include <sys/socket.h>
16 #include <rte_common.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_ethdev.h>
19 #include <rte_byteorder.h>
20 #include <cmdline_parse.h>
21 #include <cmdline_parse_etheraddr.h>
26 /** Parser token indices. */
46 /* Top-level command. */
49 /* Sub-level commands. */
58 /* Destroy arguments. */
61 /* Query arguments. */
67 /* Validate/create arguments. */
74 /* Validate/create pattern. */
109 ITEM_VLAN_INNER_TYPE,
141 ITEM_E_TAG_GRP_ECID_B,
160 /* Validate/create actions. */
176 ACTION_RSS_FUNC_DEFAULT,
177 ACTION_RSS_FUNC_TOEPLITZ,
178 ACTION_RSS_FUNC_SIMPLE_XOR,
190 ACTION_PHY_PORT_ORIGINAL,
191 ACTION_PHY_PORT_INDEX,
193 ACTION_PORT_ID_ORIGINAL,
199 /** Maximum size for pattern in struct rte_flow_item_raw. */
200 #define ITEM_RAW_PATTERN_SIZE 40
202 /** Storage size for struct rte_flow_item_raw including pattern. */
203 #define ITEM_RAW_SIZE \
204 (sizeof(struct rte_flow_item_raw) + ITEM_RAW_PATTERN_SIZE)
206 /** Maximum number of queue indices in struct rte_flow_action_rss. */
207 #define ACTION_RSS_QUEUE_NUM 32
209 /** Storage for struct rte_flow_action_rss including external data. */
210 struct action_rss_data {
211 struct rte_flow_action_rss conf;
212 uint8_t key[RSS_HASH_KEY_LENGTH];
213 uint16_t queue[ACTION_RSS_QUEUE_NUM];
216 /** Maximum number of subsequent tokens and arguments on the stack. */
217 #define CTX_STACK_SIZE 16
219 /** Parser context. */
221 /** Stack of subsequent token lists to process. */
222 const enum index *next[CTX_STACK_SIZE];
223 /** Arguments for stacked tokens. */
224 const void *args[CTX_STACK_SIZE];
225 enum index curr; /**< Current token index. */
226 enum index prev; /**< Index of the last token seen. */
227 int next_num; /**< Number of entries in next[]. */
228 int args_num; /**< Number of entries in args[]. */
229 uint32_t eol:1; /**< EOL has been detected. */
230 uint32_t last:1; /**< No more arguments. */
231 portid_t port; /**< Current port ID (for completions). */
232 uint32_t objdata; /**< Object-specific data. */
233 void *object; /**< Address of current object for relative offsets. */
234 void *objmask; /**< Object a full mask must be written to. */
237 /** Token argument. */
239 uint32_t hton:1; /**< Use network byte ordering. */
240 uint32_t sign:1; /**< Value is signed. */
241 uint32_t bounded:1; /**< Value is bounded. */
242 uintmax_t min; /**< Minimum value if bounded. */
243 uintmax_t max; /**< Maximum value if bounded. */
244 uint32_t offset; /**< Relative offset from ctx->object. */
245 uint32_t size; /**< Field size. */
246 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
249 /** Parser token definition. */
251 /** Type displayed during completion (defaults to "TOKEN"). */
253 /** Help displayed during completion (defaults to token name). */
255 /** Private data used by parser functions. */
258 * Lists of subsequent tokens to push on the stack. Each call to the
259 * parser consumes the last entry of that stack.
261 const enum index *const *next;
262 /** Arguments stack for subsequent tokens that need them. */
263 const struct arg *const *args;
265 * Token-processing callback, returns -1 in case of error, the
266 * length of the matched string otherwise. If NULL, attempts to
267 * match the token name.
269 * If buf is not NULL, the result should be stored in it according
270 * to context. An error is returned if not large enough.
272 int (*call)(struct context *ctx, const struct token *token,
273 const char *str, unsigned int len,
274 void *buf, unsigned int size);
276 * Callback that provides possible values for this token, used for
277 * completion. Returns -1 in case of error, the number of possible
278 * values otherwise. If NULL, the token name is used.
280 * If buf is not NULL, entry index ent is written to buf and the
281 * full length of the entry is returned (same behavior as
284 int (*comp)(struct context *ctx, const struct token *token,
285 unsigned int ent, char *buf, unsigned int size);
286 /** Mandatory token name, no default value. */
290 /** Static initializer for the next field. */
291 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
293 /** Static initializer for a NEXT() entry. */
294 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
296 /** Static initializer for the args field. */
297 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
299 /** Static initializer for ARGS() to target a field. */
300 #define ARGS_ENTRY(s, f) \
301 (&(const struct arg){ \
302 .offset = offsetof(s, f), \
303 .size = sizeof(((s *)0)->f), \
306 /** Static initializer for ARGS() to target a bit-field. */
307 #define ARGS_ENTRY_BF(s, f, b) \
308 (&(const struct arg){ \
310 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
313 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
314 #define ARGS_ENTRY_MASK(s, f, m) \
315 (&(const struct arg){ \
316 .offset = offsetof(s, f), \
317 .size = sizeof(((s *)0)->f), \
318 .mask = (const void *)(m), \
321 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
322 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
323 (&(const struct arg){ \
325 .offset = offsetof(s, f), \
326 .size = sizeof(((s *)0)->f), \
327 .mask = (const void *)(m), \
330 /** Static initializer for ARGS() to target a pointer. */
331 #define ARGS_ENTRY_PTR(s, f) \
332 (&(const struct arg){ \
333 .size = sizeof(*((s *)0)->f), \
336 /** Static initializer for ARGS() with arbitrary offset and size. */
337 #define ARGS_ENTRY_ARB(o, s) \
338 (&(const struct arg){ \
343 /** Same as ARGS_ENTRY_ARB() with bounded values. */
344 #define ARGS_ENTRY_ARB_BOUNDED(o, s, i, a) \
345 (&(const struct arg){ \
353 /** Same as ARGS_ENTRY() using network byte ordering. */
354 #define ARGS_ENTRY_HTON(s, f) \
355 (&(const struct arg){ \
357 .offset = offsetof(s, f), \
358 .size = sizeof(((s *)0)->f), \
361 /** Parser output buffer layout expected by cmd_flow_parsed(). */
363 enum index command; /**< Flow command. */
364 portid_t port; /**< Affected port ID. */
367 struct rte_flow_attr attr;
368 struct rte_flow_item *pattern;
369 struct rte_flow_action *actions;
373 } vc; /**< Validate/create arguments. */
377 } destroy; /**< Destroy arguments. */
380 enum rte_flow_action_type action;
381 } query; /**< Query arguments. */
385 } list; /**< List arguments. */
388 } isolate; /**< Isolated mode arguments. */
389 } args; /**< Command arguments. */
392 /** Private data for pattern items. */
393 struct parse_item_priv {
394 enum rte_flow_item_type type; /**< Item type. */
395 uint32_t size; /**< Size of item specification structure. */
398 #define PRIV_ITEM(t, s) \
399 (&(const struct parse_item_priv){ \
400 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
404 /** Private data for actions. */
405 struct parse_action_priv {
406 enum rte_flow_action_type type; /**< Action type. */
407 uint32_t size; /**< Size of action configuration structure. */
410 #define PRIV_ACTION(t, s) \
411 (&(const struct parse_action_priv){ \
412 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
416 static const enum index next_vc_attr[] = {
426 static const enum index next_destroy_attr[] = {
432 static const enum index next_list_attr[] = {
438 static const enum index item_param[] = {
447 static const enum index next_item[] = {
479 static const enum index item_fuzzy[] = {
485 static const enum index item_any[] = {
491 static const enum index item_vf[] = {
497 static const enum index item_phy_port[] = {
503 static const enum index item_port_id[] = {
509 static const enum index item_raw[] = {
519 static const enum index item_eth[] = {
527 static const enum index item_vlan[] = {
532 ITEM_VLAN_INNER_TYPE,
537 static const enum index item_ipv4[] = {
547 static const enum index item_ipv6[] = {
558 static const enum index item_icmp[] = {
565 static const enum index item_udp[] = {
572 static const enum index item_tcp[] = {
580 static const enum index item_sctp[] = {
589 static const enum index item_vxlan[] = {
595 static const enum index item_e_tag[] = {
596 ITEM_E_TAG_GRP_ECID_B,
601 static const enum index item_nvgre[] = {
607 static const enum index item_mpls[] = {
613 static const enum index item_gre[] = {
619 static const enum index item_gtp[] = {
625 static const enum index item_geneve[] = {
632 static const enum index item_vxlan_gpe[] = {
638 static const enum index next_action[] = {
656 static const enum index action_mark[] = {
662 static const enum index action_queue[] = {
668 static const enum index action_rss[] = {
679 static const enum index action_vf[] = {
686 static const enum index action_phy_port[] = {
687 ACTION_PHY_PORT_ORIGINAL,
688 ACTION_PHY_PORT_INDEX,
693 static const enum index action_port_id[] = {
694 ACTION_PORT_ID_ORIGINAL,
700 static const enum index action_meter[] = {
706 static int parse_init(struct context *, const struct token *,
707 const char *, unsigned int,
708 void *, unsigned int);
709 static int parse_vc(struct context *, const struct token *,
710 const char *, unsigned int,
711 void *, unsigned int);
712 static int parse_vc_spec(struct context *, const struct token *,
713 const char *, unsigned int, void *, unsigned int);
714 static int parse_vc_conf(struct context *, const struct token *,
715 const char *, unsigned int, void *, unsigned int);
716 static int parse_vc_action_rss(struct context *, const struct token *,
717 const char *, unsigned int, void *,
719 static int parse_vc_action_rss_func(struct context *, const struct token *,
720 const char *, unsigned int, void *,
722 static int parse_vc_action_rss_type(struct context *, const struct token *,
723 const char *, unsigned int, void *,
725 static int parse_vc_action_rss_queue(struct context *, const struct token *,
726 const char *, unsigned int, void *,
728 static int parse_destroy(struct context *, const struct token *,
729 const char *, unsigned int,
730 void *, unsigned int);
731 static int parse_flush(struct context *, const struct token *,
732 const char *, unsigned int,
733 void *, unsigned int);
734 static int parse_query(struct context *, const struct token *,
735 const char *, unsigned int,
736 void *, unsigned int);
737 static int parse_action(struct context *, const struct token *,
738 const char *, unsigned int,
739 void *, unsigned int);
740 static int parse_list(struct context *, const struct token *,
741 const char *, unsigned int,
742 void *, unsigned int);
743 static int parse_isolate(struct context *, const struct token *,
744 const char *, unsigned int,
745 void *, unsigned int);
746 static int parse_int(struct context *, const struct token *,
747 const char *, unsigned int,
748 void *, unsigned int);
749 static int parse_prefix(struct context *, const struct token *,
750 const char *, unsigned int,
751 void *, unsigned int);
752 static int parse_boolean(struct context *, const struct token *,
753 const char *, unsigned int,
754 void *, unsigned int);
755 static int parse_string(struct context *, const struct token *,
756 const char *, unsigned int,
757 void *, unsigned int);
758 static int parse_mac_addr(struct context *, const struct token *,
759 const char *, unsigned int,
760 void *, unsigned int);
761 static int parse_ipv4_addr(struct context *, const struct token *,
762 const char *, unsigned int,
763 void *, unsigned int);
764 static int parse_ipv6_addr(struct context *, const struct token *,
765 const char *, unsigned int,
766 void *, unsigned int);
767 static int parse_port(struct context *, const struct token *,
768 const char *, unsigned int,
769 void *, unsigned int);
770 static int comp_none(struct context *, const struct token *,
771 unsigned int, char *, unsigned int);
772 static int comp_boolean(struct context *, const struct token *,
773 unsigned int, char *, unsigned int);
774 static int comp_action(struct context *, const struct token *,
775 unsigned int, char *, unsigned int);
776 static int comp_port(struct context *, const struct token *,
777 unsigned int, char *, unsigned int);
778 static int comp_rule_id(struct context *, const struct token *,
779 unsigned int, char *, unsigned int);
780 static int comp_vc_action_rss_type(struct context *, const struct token *,
781 unsigned int, char *, unsigned int);
782 static int comp_vc_action_rss_queue(struct context *, const struct token *,
783 unsigned int, char *, unsigned int);
785 /** Token definitions. */
786 static const struct token token_list[] = {
787 /* Special tokens. */
790 .help = "null entry, abused as the entry point",
791 .next = NEXT(NEXT_ENTRY(FLOW)),
796 .help = "command may end here",
802 .help = "integer value",
807 .name = "{unsigned}",
809 .help = "unsigned integer value",
816 .help = "prefix length for bit-mask",
817 .call = parse_prefix,
823 .help = "any boolean value",
824 .call = parse_boolean,
825 .comp = comp_boolean,
830 .help = "fixed string",
831 .call = parse_string,
835 .name = "{MAC address}",
837 .help = "standard MAC address notation",
838 .call = parse_mac_addr,
842 .name = "{IPv4 address}",
843 .type = "IPV4 ADDRESS",
844 .help = "standard IPv4 address notation",
845 .call = parse_ipv4_addr,
849 .name = "{IPv6 address}",
850 .type = "IPV6 ADDRESS",
851 .help = "standard IPv6 address notation",
852 .call = parse_ipv6_addr,
858 .help = "rule identifier",
860 .comp = comp_rule_id,
865 .help = "port identifier",
870 .name = "{group_id}",
872 .help = "group identifier",
879 .help = "priority level",
883 /* Top-level command. */
886 .type = "{command} {port_id} [{arg} [...]]",
887 .help = "manage ingress/egress flow rules",
888 .next = NEXT(NEXT_ENTRY
898 /* Sub-level commands. */
901 .help = "check whether a flow rule can be created",
902 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
903 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
908 .help = "create a flow rule",
909 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
910 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
915 .help = "destroy specific flow rules",
916 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
917 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
918 .call = parse_destroy,
922 .help = "destroy all flow rules",
923 .next = NEXT(NEXT_ENTRY(PORT_ID)),
924 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
929 .help = "query an existing flow rule",
930 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
932 NEXT_ENTRY(PORT_ID)),
933 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action),
934 ARGS_ENTRY(struct buffer, args.query.rule),
935 ARGS_ENTRY(struct buffer, port)),
940 .help = "list existing flow rules",
941 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
942 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
947 .help = "restrict ingress traffic to the defined flow rules",
948 .next = NEXT(NEXT_ENTRY(BOOLEAN),
949 NEXT_ENTRY(PORT_ID)),
950 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
951 ARGS_ENTRY(struct buffer, port)),
952 .call = parse_isolate,
954 /* Destroy arguments. */
957 .help = "specify a rule identifier",
958 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
959 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
960 .call = parse_destroy,
962 /* Query arguments. */
966 .help = "action to query, must be part of the rule",
967 .call = parse_action,
970 /* List arguments. */
973 .help = "specify a group",
974 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
975 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
978 /* Validate/create attributes. */
981 .help = "specify a group",
982 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
983 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
988 .help = "specify a priority level",
989 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
990 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
995 .help = "affect rule to ingress",
996 .next = NEXT(next_vc_attr),
1001 .help = "affect rule to egress",
1002 .next = NEXT(next_vc_attr),
1007 .help = "apply rule directly to endpoints found in pattern",
1008 .next = NEXT(next_vc_attr),
1011 /* Validate/create pattern. */
1014 .help = "submit a list of pattern items",
1015 .next = NEXT(next_item),
1020 .help = "match value perfectly (with full bit-mask)",
1021 .call = parse_vc_spec,
1023 [ITEM_PARAM_SPEC] = {
1025 .help = "match value according to configured bit-mask",
1026 .call = parse_vc_spec,
1028 [ITEM_PARAM_LAST] = {
1030 .help = "specify upper bound to establish a range",
1031 .call = parse_vc_spec,
1033 [ITEM_PARAM_MASK] = {
1035 .help = "specify bit-mask with relevant bits set to one",
1036 .call = parse_vc_spec,
1038 [ITEM_PARAM_PREFIX] = {
1040 .help = "generate bit-mask from a prefix length",
1041 .call = parse_vc_spec,
1045 .help = "specify next pattern item",
1046 .next = NEXT(next_item),
1050 .help = "end list of pattern items",
1051 .priv = PRIV_ITEM(END, 0),
1052 .next = NEXT(NEXT_ENTRY(ACTIONS)),
1057 .help = "no-op pattern item",
1058 .priv = PRIV_ITEM(VOID, 0),
1059 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1064 .help = "perform actions when pattern does not match",
1065 .priv = PRIV_ITEM(INVERT, 0),
1066 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1071 .help = "match any protocol for the current layer",
1072 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
1073 .next = NEXT(item_any),
1078 .help = "number of layers covered",
1079 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
1080 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
1084 .help = "match traffic from/to the physical function",
1085 .priv = PRIV_ITEM(PF, 0),
1086 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1091 .help = "match traffic from/to a virtual function ID",
1092 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
1093 .next = NEXT(item_vf),
1099 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
1100 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
1104 .help = "match traffic from/to a specific physical port",
1105 .priv = PRIV_ITEM(PHY_PORT,
1106 sizeof(struct rte_flow_item_phy_port)),
1107 .next = NEXT(item_phy_port),
1110 [ITEM_PHY_PORT_INDEX] = {
1112 .help = "physical port index",
1113 .next = NEXT(item_phy_port, NEXT_ENTRY(UNSIGNED), item_param),
1114 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_phy_port, index)),
1118 .help = "match traffic from/to a given DPDK port ID",
1119 .priv = PRIV_ITEM(PORT_ID,
1120 sizeof(struct rte_flow_item_port_id)),
1121 .next = NEXT(item_port_id),
1124 [ITEM_PORT_ID_ID] = {
1126 .help = "DPDK port ID",
1127 .next = NEXT(item_port_id, NEXT_ENTRY(UNSIGNED), item_param),
1128 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port_id, id)),
1132 .help = "match an arbitrary byte string",
1133 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1134 .next = NEXT(item_raw),
1137 [ITEM_RAW_RELATIVE] = {
1139 .help = "look for pattern after the previous item",
1140 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1141 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1144 [ITEM_RAW_SEARCH] = {
1146 .help = "search pattern from offset (see also limit)",
1147 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1148 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1151 [ITEM_RAW_OFFSET] = {
1153 .help = "absolute or relative offset for pattern",
1154 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1155 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1157 [ITEM_RAW_LIMIT] = {
1159 .help = "search area limit for start of pattern",
1160 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1161 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1163 [ITEM_RAW_PATTERN] = {
1165 .help = "byte string to look for",
1166 .next = NEXT(item_raw,
1168 NEXT_ENTRY(ITEM_PARAM_IS,
1171 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, pattern),
1172 ARGS_ENTRY(struct rte_flow_item_raw, length),
1173 ARGS_ENTRY_ARB(sizeof(struct rte_flow_item_raw),
1174 ITEM_RAW_PATTERN_SIZE)),
1178 .help = "match Ethernet header",
1179 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1180 .next = NEXT(item_eth),
1185 .help = "destination MAC",
1186 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1187 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1191 .help = "source MAC",
1192 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1193 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1197 .help = "EtherType",
1198 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1199 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1203 .help = "match 802.1Q/ad VLAN tag",
1204 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1205 .next = NEXT(item_vlan),
1210 .help = "tag control information",
1211 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1212 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1216 .help = "priority code point",
1217 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1218 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1223 .help = "drop eligible indicator",
1224 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1225 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1230 .help = "VLAN identifier",
1231 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1232 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1235 [ITEM_VLAN_INNER_TYPE] = {
1236 .name = "inner_type",
1237 .help = "inner EtherType",
1238 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1239 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan,
1244 .help = "match IPv4 header",
1245 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1246 .next = NEXT(item_ipv4),
1251 .help = "type of service",
1252 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1253 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1254 hdr.type_of_service)),
1258 .help = "time to live",
1259 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1260 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1263 [ITEM_IPV4_PROTO] = {
1265 .help = "next protocol ID",
1266 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1267 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1268 hdr.next_proto_id)),
1272 .help = "source address",
1273 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1274 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1279 .help = "destination address",
1280 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1281 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1286 .help = "match IPv6 header",
1287 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1288 .next = NEXT(item_ipv6),
1293 .help = "traffic class",
1294 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1295 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1297 "\x0f\xf0\x00\x00")),
1299 [ITEM_IPV6_FLOW] = {
1301 .help = "flow label",
1302 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1303 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1305 "\x00\x0f\xff\xff")),
1307 [ITEM_IPV6_PROTO] = {
1309 .help = "protocol (next header)",
1310 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1311 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1316 .help = "hop limit",
1317 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1318 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1323 .help = "source address",
1324 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1325 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1330 .help = "destination address",
1331 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1332 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1337 .help = "match ICMP header",
1338 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1339 .next = NEXT(item_icmp),
1342 [ITEM_ICMP_TYPE] = {
1344 .help = "ICMP packet type",
1345 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1346 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1349 [ITEM_ICMP_CODE] = {
1351 .help = "ICMP packet code",
1352 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1353 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1358 .help = "match UDP header",
1359 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1360 .next = NEXT(item_udp),
1365 .help = "UDP source port",
1366 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1367 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1372 .help = "UDP destination port",
1373 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1374 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1379 .help = "match TCP header",
1380 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1381 .next = NEXT(item_tcp),
1386 .help = "TCP source port",
1387 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1388 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1393 .help = "TCP destination port",
1394 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1395 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1398 [ITEM_TCP_FLAGS] = {
1400 .help = "TCP flags",
1401 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1402 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1407 .help = "match SCTP header",
1408 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1409 .next = NEXT(item_sctp),
1414 .help = "SCTP source port",
1415 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1416 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1421 .help = "SCTP destination port",
1422 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1423 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1428 .help = "validation tag",
1429 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1430 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1433 [ITEM_SCTP_CKSUM] = {
1436 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1437 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1442 .help = "match VXLAN header",
1443 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1444 .next = NEXT(item_vxlan),
1447 [ITEM_VXLAN_VNI] = {
1449 .help = "VXLAN identifier",
1450 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1451 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
1455 .help = "match E-Tag header",
1456 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
1457 .next = NEXT(item_e_tag),
1460 [ITEM_E_TAG_GRP_ECID_B] = {
1461 .name = "grp_ecid_b",
1462 .help = "GRP and E-CID base",
1463 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
1464 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
1470 .help = "match NVGRE header",
1471 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
1472 .next = NEXT(item_nvgre),
1475 [ITEM_NVGRE_TNI] = {
1477 .help = "virtual subnet ID",
1478 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
1479 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
1483 .help = "match MPLS header",
1484 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
1485 .next = NEXT(item_mpls),
1488 [ITEM_MPLS_LABEL] = {
1490 .help = "MPLS label",
1491 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
1492 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
1498 .help = "match GRE header",
1499 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
1500 .next = NEXT(item_gre),
1503 [ITEM_GRE_PROTO] = {
1505 .help = "GRE protocol type",
1506 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
1507 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
1512 .help = "fuzzy pattern match, expect faster than default",
1513 .priv = PRIV_ITEM(FUZZY,
1514 sizeof(struct rte_flow_item_fuzzy)),
1515 .next = NEXT(item_fuzzy),
1518 [ITEM_FUZZY_THRESH] = {
1520 .help = "match accuracy threshold",
1521 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
1522 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
1527 .help = "match GTP header",
1528 .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
1529 .next = NEXT(item_gtp),
1534 .help = "tunnel endpoint identifier",
1535 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
1536 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
1540 .help = "match GTP header",
1541 .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
1542 .next = NEXT(item_gtp),
1547 .help = "match GTP header",
1548 .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
1549 .next = NEXT(item_gtp),
1554 .help = "match GENEVE header",
1555 .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
1556 .next = NEXT(item_geneve),
1559 [ITEM_GENEVE_VNI] = {
1561 .help = "virtual network identifier",
1562 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1563 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)),
1565 [ITEM_GENEVE_PROTO] = {
1567 .help = "GENEVE protocol type",
1568 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1569 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve,
1572 [ITEM_VXLAN_GPE] = {
1573 .name = "vxlan-gpe",
1574 .help = "match VXLAN-GPE header",
1575 .priv = PRIV_ITEM(VXLAN_GPE,
1576 sizeof(struct rte_flow_item_vxlan_gpe)),
1577 .next = NEXT(item_vxlan_gpe),
1580 [ITEM_VXLAN_GPE_VNI] = {
1582 .help = "VXLAN-GPE identifier",
1583 .next = NEXT(item_vxlan_gpe, NEXT_ENTRY(UNSIGNED), item_param),
1584 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan_gpe,
1588 /* Validate/create actions. */
1591 .help = "submit a list of associated actions",
1592 .next = NEXT(next_action),
1597 .help = "specify next action",
1598 .next = NEXT(next_action),
1602 .help = "end list of actions",
1603 .priv = PRIV_ACTION(END, 0),
1608 .help = "no-op action",
1609 .priv = PRIV_ACTION(VOID, 0),
1610 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1613 [ACTION_PASSTHRU] = {
1615 .help = "let subsequent rule process matched packets",
1616 .priv = PRIV_ACTION(PASSTHRU, 0),
1617 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1622 .help = "attach 32 bit value to packets",
1623 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
1624 .next = NEXT(action_mark),
1627 [ACTION_MARK_ID] = {
1629 .help = "32 bit value to return with packets",
1630 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
1631 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
1632 .call = parse_vc_conf,
1636 .help = "flag packets",
1637 .priv = PRIV_ACTION(FLAG, 0),
1638 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1643 .help = "assign packets to a given queue index",
1644 .priv = PRIV_ACTION(QUEUE,
1645 sizeof(struct rte_flow_action_queue)),
1646 .next = NEXT(action_queue),
1649 [ACTION_QUEUE_INDEX] = {
1651 .help = "queue index to use",
1652 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
1653 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
1654 .call = parse_vc_conf,
1658 .help = "drop packets (note: passthru has priority)",
1659 .priv = PRIV_ACTION(DROP, 0),
1660 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1665 .help = "enable counters for this rule",
1666 .priv = PRIV_ACTION(COUNT, 0),
1667 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1672 .help = "spread packets among several queues",
1673 .priv = PRIV_ACTION(RSS, sizeof(struct action_rss_data)),
1674 .next = NEXT(action_rss),
1675 .call = parse_vc_action_rss,
1677 [ACTION_RSS_FUNC] = {
1679 .help = "RSS hash function to apply",
1680 .next = NEXT(action_rss,
1681 NEXT_ENTRY(ACTION_RSS_FUNC_DEFAULT,
1682 ACTION_RSS_FUNC_TOEPLITZ,
1683 ACTION_RSS_FUNC_SIMPLE_XOR)),
1685 [ACTION_RSS_FUNC_DEFAULT] = {
1687 .help = "default hash function",
1688 .call = parse_vc_action_rss_func,
1690 [ACTION_RSS_FUNC_TOEPLITZ] = {
1692 .help = "Toeplitz hash function",
1693 .call = parse_vc_action_rss_func,
1695 [ACTION_RSS_FUNC_SIMPLE_XOR] = {
1696 .name = "simple_xor",
1697 .help = "simple XOR hash function",
1698 .call = parse_vc_action_rss_func,
1700 [ACTION_RSS_LEVEL] = {
1702 .help = "encapsulation level for \"types\"",
1703 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
1704 .args = ARGS(ARGS_ENTRY_ARB
1705 (offsetof(struct action_rss_data, conf) +
1706 offsetof(struct rte_flow_action_rss, level),
1707 sizeof(((struct rte_flow_action_rss *)0)->
1710 [ACTION_RSS_TYPES] = {
1712 .help = "specific RSS hash types",
1713 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)),
1715 [ACTION_RSS_TYPE] = {
1717 .help = "RSS hash type",
1718 .call = parse_vc_action_rss_type,
1719 .comp = comp_vc_action_rss_type,
1721 [ACTION_RSS_KEY] = {
1723 .help = "RSS hash key",
1724 .next = NEXT(action_rss, NEXT_ENTRY(STRING)),
1725 .args = ARGS(ARGS_ENTRY_ARB(0, 0),
1727 (offsetof(struct action_rss_data, conf) +
1728 offsetof(struct rte_flow_action_rss, key_len),
1729 sizeof(((struct rte_flow_action_rss *)0)->
1731 ARGS_ENTRY(struct action_rss_data, key)),
1733 [ACTION_RSS_KEY_LEN] = {
1735 .help = "RSS hash key length in bytes",
1736 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
1737 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
1738 (offsetof(struct action_rss_data, conf) +
1739 offsetof(struct rte_flow_action_rss, key_len),
1740 sizeof(((struct rte_flow_action_rss *)0)->
1743 RSS_HASH_KEY_LENGTH)),
1745 [ACTION_RSS_QUEUES] = {
1747 .help = "queue indices to use",
1748 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
1749 .call = parse_vc_conf,
1751 [ACTION_RSS_QUEUE] = {
1753 .help = "queue index",
1754 .call = parse_vc_action_rss_queue,
1755 .comp = comp_vc_action_rss_queue,
1759 .help = "direct traffic to physical function",
1760 .priv = PRIV_ACTION(PF, 0),
1761 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1766 .help = "direct traffic to a virtual function ID",
1767 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
1768 .next = NEXT(action_vf),
1771 [ACTION_VF_ORIGINAL] = {
1773 .help = "use original VF ID if possible",
1774 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
1775 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
1777 .call = parse_vc_conf,
1782 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
1783 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
1784 .call = parse_vc_conf,
1786 [ACTION_PHY_PORT] = {
1788 .help = "direct packets to physical port index",
1789 .priv = PRIV_ACTION(PHY_PORT,
1790 sizeof(struct rte_flow_action_phy_port)),
1791 .next = NEXT(action_phy_port),
1794 [ACTION_PHY_PORT_ORIGINAL] = {
1796 .help = "use original port index if possible",
1797 .next = NEXT(action_phy_port, NEXT_ENTRY(BOOLEAN)),
1798 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_phy_port,
1800 .call = parse_vc_conf,
1802 [ACTION_PHY_PORT_INDEX] = {
1804 .help = "physical port index",
1805 .next = NEXT(action_phy_port, NEXT_ENTRY(UNSIGNED)),
1806 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_phy_port,
1808 .call = parse_vc_conf,
1810 [ACTION_PORT_ID] = {
1812 .help = "direct matching traffic to a given DPDK port ID",
1813 .priv = PRIV_ACTION(PORT_ID,
1814 sizeof(struct rte_flow_action_port_id)),
1815 .next = NEXT(action_port_id),
1818 [ACTION_PORT_ID_ORIGINAL] = {
1820 .help = "use original DPDK port ID if possible",
1821 .next = NEXT(action_port_id, NEXT_ENTRY(BOOLEAN)),
1822 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_port_id,
1824 .call = parse_vc_conf,
1826 [ACTION_PORT_ID_ID] = {
1828 .help = "DPDK port ID",
1829 .next = NEXT(action_port_id, NEXT_ENTRY(UNSIGNED)),
1830 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_port_id, id)),
1831 .call = parse_vc_conf,
1835 .help = "meter the directed packets at given id",
1836 .priv = PRIV_ACTION(METER,
1837 sizeof(struct rte_flow_action_meter)),
1838 .next = NEXT(action_meter),
1841 [ACTION_METER_ID] = {
1843 .help = "meter id to use",
1844 .next = NEXT(action_meter, NEXT_ENTRY(UNSIGNED)),
1845 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
1846 .call = parse_vc_conf,
1850 /** Remove and return last entry from argument stack. */
1851 static const struct arg *
1852 pop_args(struct context *ctx)
1854 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
1857 /** Add entry on top of the argument stack. */
1859 push_args(struct context *ctx, const struct arg *arg)
1861 if (ctx->args_num == CTX_STACK_SIZE)
1863 ctx->args[ctx->args_num++] = arg;
1867 /** Spread value into buffer according to bit-mask. */
1869 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
1871 uint32_t i = arg->size;
1879 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1888 unsigned int shift = 0;
1889 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
1891 for (shift = 0; arg->mask[i] >> shift; ++shift) {
1892 if (!(arg->mask[i] & (1 << shift)))
1897 *buf &= ~(1 << shift);
1898 *buf |= (val & 1) << shift;
1906 /** Compare a string with a partial one of a given length. */
1908 strcmp_partial(const char *full, const char *partial, size_t partial_len)
1910 int r = strncmp(full, partial, partial_len);
1914 if (strlen(full) <= partial_len)
1916 return full[partial_len];
1920 * Parse a prefix length and generate a bit-mask.
1922 * Last argument (ctx->args) is retrieved to determine mask size, storage
1923 * location and whether the result must use network byte ordering.
1926 parse_prefix(struct context *ctx, const struct token *token,
1927 const char *str, unsigned int len,
1928 void *buf, unsigned int size)
1930 const struct arg *arg = pop_args(ctx);
1931 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
1938 /* Argument is expected. */
1942 u = strtoumax(str, &end, 0);
1943 if (errno || (size_t)(end - str) != len)
1948 extra = arg_entry_bf_fill(NULL, 0, arg);
1957 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
1958 !arg_entry_bf_fill(ctx->objmask, -1, arg))
1965 if (bytes > size || bytes + !!extra > size)
1969 buf = (uint8_t *)ctx->object + arg->offset;
1970 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1972 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
1973 memset(buf, 0x00, size - bytes);
1975 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
1979 memset(buf, 0xff, bytes);
1980 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
1982 ((uint8_t *)buf)[bytes] = conv[extra];
1985 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
1988 push_args(ctx, arg);
1992 /** Default parsing function for token name matching. */
1994 parse_default(struct context *ctx, const struct token *token,
1995 const char *str, unsigned int len,
1996 void *buf, unsigned int size)
2001 if (strcmp_partial(token->name, str, len))
2006 /** Parse flow command, initialize output buffer for subsequent tokens. */
2008 parse_init(struct context *ctx, const struct token *token,
2009 const char *str, unsigned int len,
2010 void *buf, unsigned int size)
2012 struct buffer *out = buf;
2014 /* Token name must match. */
2015 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2017 /* Nothing else to do if there is no buffer. */
2020 /* Make sure buffer is large enough. */
2021 if (size < sizeof(*out))
2023 /* Initialize buffer. */
2024 memset(out, 0x00, sizeof(*out));
2025 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
2028 ctx->objmask = NULL;
2032 /** Parse tokens for validate/create commands. */
2034 parse_vc(struct context *ctx, const struct token *token,
2035 const char *str, unsigned int len,
2036 void *buf, unsigned int size)
2038 struct buffer *out = buf;
2042 /* Token name must match. */
2043 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2045 /* Nothing else to do if there is no buffer. */
2048 if (!out->command) {
2049 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
2051 if (sizeof(*out) > size)
2053 out->command = ctx->curr;
2056 ctx->objmask = NULL;
2057 out->args.vc.data = (uint8_t *)out + size;
2061 ctx->object = &out->args.vc.attr;
2062 ctx->objmask = NULL;
2063 switch (ctx->curr) {
2068 out->args.vc.attr.ingress = 1;
2071 out->args.vc.attr.egress = 1;
2074 out->args.vc.attr.transfer = 1;
2077 out->args.vc.pattern =
2078 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2080 ctx->object = out->args.vc.pattern;
2081 ctx->objmask = NULL;
2084 out->args.vc.actions =
2085 (void *)RTE_ALIGN_CEIL((uintptr_t)
2086 (out->args.vc.pattern +
2087 out->args.vc.pattern_n),
2089 ctx->object = out->args.vc.actions;
2090 ctx->objmask = NULL;
2097 if (!out->args.vc.actions) {
2098 const struct parse_item_priv *priv = token->priv;
2099 struct rte_flow_item *item =
2100 out->args.vc.pattern + out->args.vc.pattern_n;
2102 data_size = priv->size * 3; /* spec, last, mask */
2103 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
2104 (out->args.vc.data - data_size),
2106 if ((uint8_t *)item + sizeof(*item) > data)
2108 *item = (struct rte_flow_item){
2111 ++out->args.vc.pattern_n;
2113 ctx->objmask = NULL;
2115 const struct parse_action_priv *priv = token->priv;
2116 struct rte_flow_action *action =
2117 out->args.vc.actions + out->args.vc.actions_n;
2119 data_size = priv->size; /* configuration */
2120 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
2121 (out->args.vc.data - data_size),
2123 if ((uint8_t *)action + sizeof(*action) > data)
2125 *action = (struct rte_flow_action){
2127 .conf = data_size ? data : NULL,
2129 ++out->args.vc.actions_n;
2130 ctx->object = action;
2131 ctx->objmask = NULL;
2133 memset(data, 0, data_size);
2134 out->args.vc.data = data;
2135 ctx->objdata = data_size;
2139 /** Parse pattern item parameter type. */
2141 parse_vc_spec(struct context *ctx, const struct token *token,
2142 const char *str, unsigned int len,
2143 void *buf, unsigned int size)
2145 struct buffer *out = buf;
2146 struct rte_flow_item *item;
2152 /* Token name must match. */
2153 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2155 /* Parse parameter types. */
2156 switch (ctx->curr) {
2157 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
2163 case ITEM_PARAM_SPEC:
2166 case ITEM_PARAM_LAST:
2169 case ITEM_PARAM_PREFIX:
2170 /* Modify next token to expect a prefix. */
2171 if (ctx->next_num < 2)
2173 ctx->next[ctx->next_num - 2] = prefix;
2175 case ITEM_PARAM_MASK:
2181 /* Nothing else to do if there is no buffer. */
2184 if (!out->args.vc.pattern_n)
2186 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
2187 data_size = ctx->objdata / 3; /* spec, last, mask */
2188 /* Point to selected object. */
2189 ctx->object = out->args.vc.data + (data_size * index);
2191 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
2192 item->mask = ctx->objmask;
2194 ctx->objmask = NULL;
2195 /* Update relevant item pointer. */
2196 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
2201 /** Parse action configuration field. */
2203 parse_vc_conf(struct context *ctx, const struct token *token,
2204 const char *str, unsigned int len,
2205 void *buf, unsigned int size)
2207 struct buffer *out = buf;
2210 /* Token name must match. */
2211 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2213 /* Nothing else to do if there is no buffer. */
2216 /* Point to selected object. */
2217 ctx->object = out->args.vc.data;
2218 ctx->objmask = NULL;
2222 /** Parse RSS action. */
2224 parse_vc_action_rss(struct context *ctx, const struct token *token,
2225 const char *str, unsigned int len,
2226 void *buf, unsigned int size)
2228 struct buffer *out = buf;
2229 struct rte_flow_action *action;
2230 struct action_rss_data *action_rss_data;
2234 ret = parse_vc(ctx, token, str, len, buf, size);
2237 /* Nothing else to do if there is no buffer. */
2240 if (!out->args.vc.actions_n)
2242 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
2243 /* Point to selected object. */
2244 ctx->object = out->args.vc.data;
2245 ctx->objmask = NULL;
2246 /* Set up default configuration. */
2247 action_rss_data = ctx->object;
2248 *action_rss_data = (struct action_rss_data){
2249 .conf = (struct rte_flow_action_rss){
2250 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
2253 .key_len = sizeof(action_rss_data->key),
2254 .queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
2255 .key = action_rss_data->key,
2256 .queue = action_rss_data->queue,
2258 .key = "testpmd's default RSS hash key",
2261 for (i = 0; i < action_rss_data->conf.queue_num; ++i)
2262 action_rss_data->queue[i] = i;
2263 if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
2264 ctx->port != (portid_t)RTE_PORT_ALL) {
2265 struct rte_eth_dev_info info;
2267 rte_eth_dev_info_get(ctx->port, &info);
2268 action_rss_data->conf.key_len =
2269 RTE_MIN(sizeof(action_rss_data->key),
2270 info.hash_key_size);
2272 action->conf = &action_rss_data->conf;
2277 * Parse func field for RSS action.
2279 * The RTE_ETH_HASH_FUNCTION_* value to assign is derived from the
2280 * ACTION_RSS_FUNC_* index that called this function.
2283 parse_vc_action_rss_func(struct context *ctx, const struct token *token,
2284 const char *str, unsigned int len,
2285 void *buf, unsigned int size)
2287 struct action_rss_data *action_rss_data;
2288 enum rte_eth_hash_function func;
2292 /* Token name must match. */
2293 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2295 switch (ctx->curr) {
2296 case ACTION_RSS_FUNC_DEFAULT:
2297 func = RTE_ETH_HASH_FUNCTION_DEFAULT;
2299 case ACTION_RSS_FUNC_TOEPLITZ:
2300 func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
2302 case ACTION_RSS_FUNC_SIMPLE_XOR:
2303 func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
2310 action_rss_data = ctx->object;
2311 action_rss_data->conf.func = func;
2316 * Parse type field for RSS action.
2318 * Valid tokens are type field names and the "end" token.
2321 parse_vc_action_rss_type(struct context *ctx, const struct token *token,
2322 const char *str, unsigned int len,
2323 void *buf, unsigned int size)
2325 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_TYPE);
2326 struct action_rss_data *action_rss_data;
2332 if (ctx->curr != ACTION_RSS_TYPE)
2334 if (!(ctx->objdata >> 16) && ctx->object) {
2335 action_rss_data = ctx->object;
2336 action_rss_data->conf.types = 0;
2338 if (!strcmp_partial("end", str, len)) {
2339 ctx->objdata &= 0xffff;
2342 for (i = 0; rss_type_table[i].str; ++i)
2343 if (!strcmp_partial(rss_type_table[i].str, str, len))
2345 if (!rss_type_table[i].str)
2347 ctx->objdata = 1 << 16 | (ctx->objdata & 0xffff);
2349 if (ctx->next_num == RTE_DIM(ctx->next))
2351 ctx->next[ctx->next_num++] = next;
2354 action_rss_data = ctx->object;
2355 action_rss_data->conf.types |= rss_type_table[i].rss_type;
2360 * Parse queue field for RSS action.
2362 * Valid tokens are queue indices and the "end" token.
2365 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
2366 const char *str, unsigned int len,
2367 void *buf, unsigned int size)
2369 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
2370 struct action_rss_data *action_rss_data;
2377 if (ctx->curr != ACTION_RSS_QUEUE)
2379 i = ctx->objdata >> 16;
2380 if (!strcmp_partial("end", str, len)) {
2381 ctx->objdata &= 0xffff;
2384 if (i >= ACTION_RSS_QUEUE_NUM)
2387 ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) +
2388 i * sizeof(action_rss_data->queue[i]),
2389 sizeof(action_rss_data->queue[i]))))
2391 ret = parse_int(ctx, token, str, len, NULL, 0);
2397 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
2399 if (ctx->next_num == RTE_DIM(ctx->next))
2401 ctx->next[ctx->next_num++] = next;
2404 action_rss_data = ctx->object;
2405 action_rss_data->conf.queue_num = i;
2406 action_rss_data->conf.queue = i ? action_rss_data->queue : NULL;
2410 /** Parse tokens for destroy command. */
2412 parse_destroy(struct context *ctx, const struct token *token,
2413 const char *str, unsigned int len,
2414 void *buf, unsigned int size)
2416 struct buffer *out = buf;
2418 /* Token name must match. */
2419 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2421 /* Nothing else to do if there is no buffer. */
2424 if (!out->command) {
2425 if (ctx->curr != DESTROY)
2427 if (sizeof(*out) > size)
2429 out->command = ctx->curr;
2432 ctx->objmask = NULL;
2433 out->args.destroy.rule =
2434 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2438 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
2439 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
2442 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
2443 ctx->objmask = NULL;
2447 /** Parse tokens for flush command. */
2449 parse_flush(struct context *ctx, const struct token *token,
2450 const char *str, unsigned int len,
2451 void *buf, unsigned int size)
2453 struct buffer *out = buf;
2455 /* Token name must match. */
2456 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2458 /* Nothing else to do if there is no buffer. */
2461 if (!out->command) {
2462 if (ctx->curr != FLUSH)
2464 if (sizeof(*out) > size)
2466 out->command = ctx->curr;
2469 ctx->objmask = NULL;
2474 /** Parse tokens for query command. */
2476 parse_query(struct context *ctx, const struct token *token,
2477 const char *str, unsigned int len,
2478 void *buf, unsigned int size)
2480 struct buffer *out = buf;
2482 /* Token name must match. */
2483 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2485 /* Nothing else to do if there is no buffer. */
2488 if (!out->command) {
2489 if (ctx->curr != QUERY)
2491 if (sizeof(*out) > size)
2493 out->command = ctx->curr;
2496 ctx->objmask = NULL;
2501 /** Parse action names. */
2503 parse_action(struct context *ctx, const struct token *token,
2504 const char *str, unsigned int len,
2505 void *buf, unsigned int size)
2507 struct buffer *out = buf;
2508 const struct arg *arg = pop_args(ctx);
2512 /* Argument is expected. */
2515 /* Parse action name. */
2516 for (i = 0; next_action[i]; ++i) {
2517 const struct parse_action_priv *priv;
2519 token = &token_list[next_action[i]];
2520 if (strcmp_partial(token->name, str, len))
2526 memcpy((uint8_t *)ctx->object + arg->offset,
2532 push_args(ctx, arg);
2536 /** Parse tokens for list command. */
2538 parse_list(struct context *ctx, const struct token *token,
2539 const char *str, unsigned int len,
2540 void *buf, unsigned int size)
2542 struct buffer *out = buf;
2544 /* Token name must match. */
2545 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2547 /* Nothing else to do if there is no buffer. */
2550 if (!out->command) {
2551 if (ctx->curr != LIST)
2553 if (sizeof(*out) > size)
2555 out->command = ctx->curr;
2558 ctx->objmask = NULL;
2559 out->args.list.group =
2560 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2564 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
2565 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
2568 ctx->object = out->args.list.group + out->args.list.group_n++;
2569 ctx->objmask = NULL;
2573 /** Parse tokens for isolate command. */
2575 parse_isolate(struct context *ctx, const struct token *token,
2576 const char *str, unsigned int len,
2577 void *buf, unsigned int size)
2579 struct buffer *out = buf;
2581 /* Token name must match. */
2582 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2584 /* Nothing else to do if there is no buffer. */
2587 if (!out->command) {
2588 if (ctx->curr != ISOLATE)
2590 if (sizeof(*out) > size)
2592 out->command = ctx->curr;
2595 ctx->objmask = NULL;
2601 * Parse signed/unsigned integers 8 to 64-bit long.
2603 * Last argument (ctx->args) is retrieved to determine integer type and
2607 parse_int(struct context *ctx, const struct token *token,
2608 const char *str, unsigned int len,
2609 void *buf, unsigned int size)
2611 const struct arg *arg = pop_args(ctx);
2616 /* Argument is expected. */
2621 (uintmax_t)strtoimax(str, &end, 0) :
2622 strtoumax(str, &end, 0);
2623 if (errno || (size_t)(end - str) != len)
2626 ((arg->sign && ((intmax_t)u < (intmax_t)arg->min ||
2627 (intmax_t)u > (intmax_t)arg->max)) ||
2628 (!arg->sign && (u < arg->min || u > arg->max))))
2633 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
2634 !arg_entry_bf_fill(ctx->objmask, -1, arg))
2638 buf = (uint8_t *)ctx->object + arg->offset;
2642 case sizeof(uint8_t):
2643 *(uint8_t *)buf = u;
2645 case sizeof(uint16_t):
2646 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
2648 case sizeof(uint8_t [3]):
2649 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2651 ((uint8_t *)buf)[0] = u;
2652 ((uint8_t *)buf)[1] = u >> 8;
2653 ((uint8_t *)buf)[2] = u >> 16;
2657 ((uint8_t *)buf)[0] = u >> 16;
2658 ((uint8_t *)buf)[1] = u >> 8;
2659 ((uint8_t *)buf)[2] = u;
2661 case sizeof(uint32_t):
2662 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
2664 case sizeof(uint64_t):
2665 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
2670 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
2672 buf = (uint8_t *)ctx->objmask + arg->offset;
2677 push_args(ctx, arg);
2684 * Three arguments (ctx->args) are retrieved from the stack to store data,
2685 * its actual length and address (in that order).
2688 parse_string(struct context *ctx, const struct token *token,
2689 const char *str, unsigned int len,
2690 void *buf, unsigned int size)
2692 const struct arg *arg_data = pop_args(ctx);
2693 const struct arg *arg_len = pop_args(ctx);
2694 const struct arg *arg_addr = pop_args(ctx);
2695 char tmp[16]; /* Ought to be enough. */
2698 /* Arguments are expected. */
2702 push_args(ctx, arg_data);
2706 push_args(ctx, arg_len);
2707 push_args(ctx, arg_data);
2710 size = arg_data->size;
2711 /* Bit-mask fill is not supported. */
2712 if (arg_data->mask || size < len)
2716 /* Let parse_int() fill length information first. */
2717 ret = snprintf(tmp, sizeof(tmp), "%u", len);
2720 push_args(ctx, arg_len);
2721 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
2726 buf = (uint8_t *)ctx->object + arg_data->offset;
2727 /* Output buffer is not necessarily NUL-terminated. */
2728 memcpy(buf, str, len);
2729 memset((uint8_t *)buf + len, 0x00, size - len);
2731 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
2732 /* Save address if requested. */
2733 if (arg_addr->size) {
2734 memcpy((uint8_t *)ctx->object + arg_addr->offset,
2736 (uint8_t *)ctx->object + arg_data->offset
2740 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
2742 (uint8_t *)ctx->objmask + arg_data->offset
2748 push_args(ctx, arg_addr);
2749 push_args(ctx, arg_len);
2750 push_args(ctx, arg_data);
2755 * Parse a MAC address.
2757 * Last argument (ctx->args) is retrieved to determine storage size and
2761 parse_mac_addr(struct context *ctx, const struct token *token,
2762 const char *str, unsigned int len,
2763 void *buf, unsigned int size)
2765 const struct arg *arg = pop_args(ctx);
2766 struct ether_addr tmp;
2770 /* Argument is expected. */
2774 /* Bit-mask fill is not supported. */
2775 if (arg->mask || size != sizeof(tmp))
2777 /* Only network endian is supported. */
2780 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
2781 if (ret < 0 || (unsigned int)ret != len)
2785 buf = (uint8_t *)ctx->object + arg->offset;
2786 memcpy(buf, &tmp, size);
2788 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2791 push_args(ctx, arg);
2796 * Parse an IPv4 address.
2798 * Last argument (ctx->args) is retrieved to determine storage size and
2802 parse_ipv4_addr(struct context *ctx, const struct token *token,
2803 const char *str, unsigned int len,
2804 void *buf, unsigned int size)
2806 const struct arg *arg = pop_args(ctx);
2811 /* Argument is expected. */
2815 /* Bit-mask fill is not supported. */
2816 if (arg->mask || size != sizeof(tmp))
2818 /* Only network endian is supported. */
2821 memcpy(str2, str, len);
2823 ret = inet_pton(AF_INET, str2, &tmp);
2825 /* Attempt integer parsing. */
2826 push_args(ctx, arg);
2827 return parse_int(ctx, token, str, len, buf, size);
2831 buf = (uint8_t *)ctx->object + arg->offset;
2832 memcpy(buf, &tmp, size);
2834 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2837 push_args(ctx, arg);
2842 * Parse an IPv6 address.
2844 * Last argument (ctx->args) is retrieved to determine storage size and
2848 parse_ipv6_addr(struct context *ctx, const struct token *token,
2849 const char *str, unsigned int len,
2850 void *buf, unsigned int size)
2852 const struct arg *arg = pop_args(ctx);
2854 struct in6_addr tmp;
2858 /* Argument is expected. */
2862 /* Bit-mask fill is not supported. */
2863 if (arg->mask || size != sizeof(tmp))
2865 /* Only network endian is supported. */
2868 memcpy(str2, str, len);
2870 ret = inet_pton(AF_INET6, str2, &tmp);
2875 buf = (uint8_t *)ctx->object + arg->offset;
2876 memcpy(buf, &tmp, size);
2878 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2881 push_args(ctx, arg);
2885 /** Boolean values (even indices stand for false). */
2886 static const char *const boolean_name[] = {
2896 * Parse a boolean value.
2898 * Last argument (ctx->args) is retrieved to determine storage size and
2902 parse_boolean(struct context *ctx, const struct token *token,
2903 const char *str, unsigned int len,
2904 void *buf, unsigned int size)
2906 const struct arg *arg = pop_args(ctx);
2910 /* Argument is expected. */
2913 for (i = 0; boolean_name[i]; ++i)
2914 if (!strcmp_partial(boolean_name[i], str, len))
2916 /* Process token as integer. */
2917 if (boolean_name[i])
2918 str = i & 1 ? "1" : "0";
2919 push_args(ctx, arg);
2920 ret = parse_int(ctx, token, str, strlen(str), buf, size);
2921 return ret > 0 ? (int)len : ret;
2924 /** Parse port and update context. */
2926 parse_port(struct context *ctx, const struct token *token,
2927 const char *str, unsigned int len,
2928 void *buf, unsigned int size)
2930 struct buffer *out = &(struct buffer){ .port = 0 };
2938 ctx->objmask = NULL;
2939 size = sizeof(*out);
2941 ret = parse_int(ctx, token, str, len, out, size);
2943 ctx->port = out->port;
2949 /** No completion. */
2951 comp_none(struct context *ctx, const struct token *token,
2952 unsigned int ent, char *buf, unsigned int size)
2962 /** Complete boolean values. */
2964 comp_boolean(struct context *ctx, const struct token *token,
2965 unsigned int ent, char *buf, unsigned int size)
2971 for (i = 0; boolean_name[i]; ++i)
2972 if (buf && i == ent)
2973 return snprintf(buf, size, "%s", boolean_name[i]);
2979 /** Complete action names. */
2981 comp_action(struct context *ctx, const struct token *token,
2982 unsigned int ent, char *buf, unsigned int size)
2988 for (i = 0; next_action[i]; ++i)
2989 if (buf && i == ent)
2990 return snprintf(buf, size, "%s",
2991 token_list[next_action[i]].name);
2997 /** Complete available ports. */
2999 comp_port(struct context *ctx, const struct token *token,
3000 unsigned int ent, char *buf, unsigned int size)
3007 RTE_ETH_FOREACH_DEV(p) {
3008 if (buf && i == ent)
3009 return snprintf(buf, size, "%u", p);
3017 /** Complete available rule IDs. */
3019 comp_rule_id(struct context *ctx, const struct token *token,
3020 unsigned int ent, char *buf, unsigned int size)
3023 struct rte_port *port;
3024 struct port_flow *pf;
3027 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
3028 ctx->port == (portid_t)RTE_PORT_ALL)
3030 port = &ports[ctx->port];
3031 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
3032 if (buf && i == ent)
3033 return snprintf(buf, size, "%u", pf->id);
3041 /** Complete type field for RSS action. */
3043 comp_vc_action_rss_type(struct context *ctx, const struct token *token,
3044 unsigned int ent, char *buf, unsigned int size)
3050 for (i = 0; rss_type_table[i].str; ++i)
3055 return snprintf(buf, size, "%s", rss_type_table[ent].str);
3057 return snprintf(buf, size, "end");
3061 /** Complete queue field for RSS action. */
3063 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
3064 unsigned int ent, char *buf, unsigned int size)
3071 return snprintf(buf, size, "%u", ent);
3073 return snprintf(buf, size, "end");
3077 /** Internal context. */
3078 static struct context cmd_flow_context;
3080 /** Global parser instance (cmdline API). */
3081 cmdline_parse_inst_t cmd_flow;
3083 /** Initialize context. */
3085 cmd_flow_context_init(struct context *ctx)
3087 /* A full memset() is not necessary. */
3097 ctx->objmask = NULL;
3100 /** Parse a token (cmdline API). */
3102 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
3105 struct context *ctx = &cmd_flow_context;
3106 const struct token *token;
3107 const enum index *list;
3112 token = &token_list[ctx->curr];
3113 /* Check argument length. */
3116 for (len = 0; src[len]; ++len)
3117 if (src[len] == '#' || isspace(src[len]))
3121 /* Last argument and EOL detection. */
3122 for (i = len; src[i]; ++i)
3123 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
3125 else if (!isspace(src[i])) {
3130 if (src[i] == '\r' || src[i] == '\n') {
3134 /* Initialize context if necessary. */
3135 if (!ctx->next_num) {
3138 ctx->next[ctx->next_num++] = token->next[0];
3140 /* Process argument through candidates. */
3141 ctx->prev = ctx->curr;
3142 list = ctx->next[ctx->next_num - 1];
3143 for (i = 0; list[i]; ++i) {
3144 const struct token *next = &token_list[list[i]];
3147 ctx->curr = list[i];
3149 tmp = next->call(ctx, next, src, len, result, size);
3151 tmp = parse_default(ctx, next, src, len, result, size);
3152 if (tmp == -1 || tmp != len)
3160 /* Push subsequent tokens if any. */
3162 for (i = 0; token->next[i]; ++i) {
3163 if (ctx->next_num == RTE_DIM(ctx->next))
3165 ctx->next[ctx->next_num++] = token->next[i];
3167 /* Push arguments if any. */
3169 for (i = 0; token->args[i]; ++i) {
3170 if (ctx->args_num == RTE_DIM(ctx->args))
3172 ctx->args[ctx->args_num++] = token->args[i];
3177 /** Return number of completion entries (cmdline API). */
3179 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
3181 struct context *ctx = &cmd_flow_context;
3182 const struct token *token = &token_list[ctx->curr];
3183 const enum index *list;
3187 /* Count number of tokens in current list. */
3189 list = ctx->next[ctx->next_num - 1];
3191 list = token->next[0];
3192 for (i = 0; list[i]; ++i)
3197 * If there is a single token, use its completion callback, otherwise
3198 * return the number of entries.
3200 token = &token_list[list[0]];
3201 if (i == 1 && token->comp) {
3202 /* Save index for cmd_flow_get_help(). */
3203 ctx->prev = list[0];
3204 return token->comp(ctx, token, 0, NULL, 0);
3209 /** Return a completion entry (cmdline API). */
3211 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
3212 char *dst, unsigned int size)
3214 struct context *ctx = &cmd_flow_context;
3215 const struct token *token = &token_list[ctx->curr];
3216 const enum index *list;
3220 /* Count number of tokens in current list. */
3222 list = ctx->next[ctx->next_num - 1];
3224 list = token->next[0];
3225 for (i = 0; list[i]; ++i)
3229 /* If there is a single token, use its completion callback. */
3230 token = &token_list[list[0]];
3231 if (i == 1 && token->comp) {
3232 /* Save index for cmd_flow_get_help(). */
3233 ctx->prev = list[0];
3234 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
3236 /* Otherwise make sure the index is valid and use defaults. */
3239 token = &token_list[list[index]];
3240 snprintf(dst, size, "%s", token->name);
3241 /* Save index for cmd_flow_get_help(). */
3242 ctx->prev = list[index];
3246 /** Populate help strings for current token (cmdline API). */
3248 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
3250 struct context *ctx = &cmd_flow_context;
3251 const struct token *token = &token_list[ctx->prev];
3256 /* Set token type and update global help with details. */
3257 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
3259 cmd_flow.help_str = token->help;
3261 cmd_flow.help_str = token->name;
3265 /** Token definition template (cmdline API). */
3266 static struct cmdline_token_hdr cmd_flow_token_hdr = {
3267 .ops = &(struct cmdline_token_ops){
3268 .parse = cmd_flow_parse,
3269 .complete_get_nb = cmd_flow_complete_get_nb,
3270 .complete_get_elt = cmd_flow_complete_get_elt,
3271 .get_help = cmd_flow_get_help,
3276 /** Populate the next dynamic token. */
3278 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
3279 cmdline_parse_token_hdr_t **hdr_inst)
3281 struct context *ctx = &cmd_flow_context;
3283 /* Always reinitialize context before requesting the first token. */
3284 if (!(hdr_inst - cmd_flow.tokens))
3285 cmd_flow_context_init(ctx);
3286 /* Return NULL when no more tokens are expected. */
3287 if (!ctx->next_num && ctx->curr) {
3291 /* Determine if command should end here. */
3292 if (ctx->eol && ctx->last && ctx->next_num) {
3293 const enum index *list = ctx->next[ctx->next_num - 1];
3296 for (i = 0; list[i]; ++i) {
3303 *hdr = &cmd_flow_token_hdr;
3306 /** Dispatch parsed buffer to function calls. */
3308 cmd_flow_parsed(const struct buffer *in)
3310 switch (in->command) {
3312 port_flow_validate(in->port, &in->args.vc.attr,
3313 in->args.vc.pattern, in->args.vc.actions);
3316 port_flow_create(in->port, &in->args.vc.attr,
3317 in->args.vc.pattern, in->args.vc.actions);
3320 port_flow_destroy(in->port, in->args.destroy.rule_n,
3321 in->args.destroy.rule);
3324 port_flow_flush(in->port);
3327 port_flow_query(in->port, in->args.query.rule,
3328 in->args.query.action);
3331 port_flow_list(in->port, in->args.list.group_n,
3332 in->args.list.group);
3335 port_flow_isolate(in->port, in->args.isolate.set);
3342 /** Token generator and output processing callback (cmdline API). */
3344 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
3347 cmd_flow_tok(arg0, arg2);
3349 cmd_flow_parsed(arg0);
3352 /** Global parser instance (cmdline API). */
3353 cmdline_parse_inst_t cmd_flow = {
3355 .data = NULL, /**< Unused. */
3356 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
3359 }, /**< Tokens are returned by cmd_flow_tok(). */