1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
13 #include <arpa/inet.h>
14 #include <sys/socket.h>
16 #include <rte_common.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_ethdev.h>
19 #include <rte_byteorder.h>
20 #include <cmdline_parse.h>
21 #include <cmdline_parse_etheraddr.h>
26 /** Parser token indices. */
46 /* Top-level command. */
49 /* Sub-level commands. */
58 /* Destroy arguments. */
61 /* Query arguments. */
67 /* Validate/create arguments. */
74 /* Validate/create pattern. */
109 ITEM_VLAN_INNER_TYPE,
141 ITEM_E_TAG_GRP_ECID_B,
160 ITEM_ARP_ETH_IPV4_SHA,
161 ITEM_ARP_ETH_IPV4_SPA,
162 ITEM_ARP_ETH_IPV4_THA,
163 ITEM_ARP_ETH_IPV4_TPA,
165 ITEM_IPV6_EXT_NEXT_HDR,
170 ITEM_ICMP6_ND_NS_TARGET_ADDR,
172 ITEM_ICMP6_ND_NA_TARGET_ADDR,
174 ITEM_ICMP6_ND_OPT_TYPE,
175 ITEM_ICMP6_ND_OPT_SLA_ETH,
176 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
177 ITEM_ICMP6_ND_OPT_TLA_ETH,
178 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
180 /* Validate/create actions. */
196 ACTION_RSS_FUNC_DEFAULT,
197 ACTION_RSS_FUNC_TOEPLITZ,
198 ACTION_RSS_FUNC_SIMPLE_XOR,
210 ACTION_PHY_PORT_ORIGINAL,
211 ACTION_PHY_PORT_INDEX,
213 ACTION_PORT_ID_ORIGINAL,
219 /** Maximum size for pattern in struct rte_flow_item_raw. */
220 #define ITEM_RAW_PATTERN_SIZE 40
222 /** Storage size for struct rte_flow_item_raw including pattern. */
223 #define ITEM_RAW_SIZE \
224 (sizeof(struct rte_flow_item_raw) + ITEM_RAW_PATTERN_SIZE)
226 /** Maximum number of queue indices in struct rte_flow_action_rss. */
227 #define ACTION_RSS_QUEUE_NUM 32
229 /** Storage for struct rte_flow_action_rss including external data. */
230 struct action_rss_data {
231 struct rte_flow_action_rss conf;
232 uint8_t key[RSS_HASH_KEY_LENGTH];
233 uint16_t queue[ACTION_RSS_QUEUE_NUM];
236 /** Maximum number of subsequent tokens and arguments on the stack. */
237 #define CTX_STACK_SIZE 16
239 /** Parser context. */
241 /** Stack of subsequent token lists to process. */
242 const enum index *next[CTX_STACK_SIZE];
243 /** Arguments for stacked tokens. */
244 const void *args[CTX_STACK_SIZE];
245 enum index curr; /**< Current token index. */
246 enum index prev; /**< Index of the last token seen. */
247 int next_num; /**< Number of entries in next[]. */
248 int args_num; /**< Number of entries in args[]. */
249 uint32_t eol:1; /**< EOL has been detected. */
250 uint32_t last:1; /**< No more arguments. */
251 portid_t port; /**< Current port ID (for completions). */
252 uint32_t objdata; /**< Object-specific data. */
253 void *object; /**< Address of current object for relative offsets. */
254 void *objmask; /**< Object a full mask must be written to. */
257 /** Token argument. */
259 uint32_t hton:1; /**< Use network byte ordering. */
260 uint32_t sign:1; /**< Value is signed. */
261 uint32_t bounded:1; /**< Value is bounded. */
262 uintmax_t min; /**< Minimum value if bounded. */
263 uintmax_t max; /**< Maximum value if bounded. */
264 uint32_t offset; /**< Relative offset from ctx->object. */
265 uint32_t size; /**< Field size. */
266 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
269 /** Parser token definition. */
271 /** Type displayed during completion (defaults to "TOKEN"). */
273 /** Help displayed during completion (defaults to token name). */
275 /** Private data used by parser functions. */
278 * Lists of subsequent tokens to push on the stack. Each call to the
279 * parser consumes the last entry of that stack.
281 const enum index *const *next;
282 /** Arguments stack for subsequent tokens that need them. */
283 const struct arg *const *args;
285 * Token-processing callback, returns -1 in case of error, the
286 * length of the matched string otherwise. If NULL, attempts to
287 * match the token name.
289 * If buf is not NULL, the result should be stored in it according
290 * to context. An error is returned if not large enough.
292 int (*call)(struct context *ctx, const struct token *token,
293 const char *str, unsigned int len,
294 void *buf, unsigned int size);
296 * Callback that provides possible values for this token, used for
297 * completion. Returns -1 in case of error, the number of possible
298 * values otherwise. If NULL, the token name is used.
300 * If buf is not NULL, entry index ent is written to buf and the
301 * full length of the entry is returned (same behavior as
304 int (*comp)(struct context *ctx, const struct token *token,
305 unsigned int ent, char *buf, unsigned int size);
306 /** Mandatory token name, no default value. */
310 /** Static initializer for the next field. */
311 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
313 /** Static initializer for a NEXT() entry. */
314 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
316 /** Static initializer for the args field. */
317 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
319 /** Static initializer for ARGS() to target a field. */
320 #define ARGS_ENTRY(s, f) \
321 (&(const struct arg){ \
322 .offset = offsetof(s, f), \
323 .size = sizeof(((s *)0)->f), \
326 /** Static initializer for ARGS() to target a bit-field. */
327 #define ARGS_ENTRY_BF(s, f, b) \
328 (&(const struct arg){ \
330 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
333 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
334 #define ARGS_ENTRY_MASK(s, f, m) \
335 (&(const struct arg){ \
336 .offset = offsetof(s, f), \
337 .size = sizeof(((s *)0)->f), \
338 .mask = (const void *)(m), \
341 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
342 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
343 (&(const struct arg){ \
345 .offset = offsetof(s, f), \
346 .size = sizeof(((s *)0)->f), \
347 .mask = (const void *)(m), \
350 /** Static initializer for ARGS() to target a pointer. */
351 #define ARGS_ENTRY_PTR(s, f) \
352 (&(const struct arg){ \
353 .size = sizeof(*((s *)0)->f), \
356 /** Static initializer for ARGS() with arbitrary offset and size. */
357 #define ARGS_ENTRY_ARB(o, s) \
358 (&(const struct arg){ \
363 /** Same as ARGS_ENTRY_ARB() with bounded values. */
364 #define ARGS_ENTRY_ARB_BOUNDED(o, s, i, a) \
365 (&(const struct arg){ \
373 /** Same as ARGS_ENTRY() using network byte ordering. */
374 #define ARGS_ENTRY_HTON(s, f) \
375 (&(const struct arg){ \
377 .offset = offsetof(s, f), \
378 .size = sizeof(((s *)0)->f), \
381 /** Parser output buffer layout expected by cmd_flow_parsed(). */
383 enum index command; /**< Flow command. */
384 portid_t port; /**< Affected port ID. */
387 struct rte_flow_attr attr;
388 struct rte_flow_item *pattern;
389 struct rte_flow_action *actions;
393 } vc; /**< Validate/create arguments. */
397 } destroy; /**< Destroy arguments. */
400 enum rte_flow_action_type action;
401 } query; /**< Query arguments. */
405 } list; /**< List arguments. */
408 } isolate; /**< Isolated mode arguments. */
409 } args; /**< Command arguments. */
412 /** Private data for pattern items. */
413 struct parse_item_priv {
414 enum rte_flow_item_type type; /**< Item type. */
415 uint32_t size; /**< Size of item specification structure. */
418 #define PRIV_ITEM(t, s) \
419 (&(const struct parse_item_priv){ \
420 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
424 /** Private data for actions. */
425 struct parse_action_priv {
426 enum rte_flow_action_type type; /**< Action type. */
427 uint32_t size; /**< Size of action configuration structure. */
430 #define PRIV_ACTION(t, s) \
431 (&(const struct parse_action_priv){ \
432 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
436 static const enum index next_vc_attr[] = {
446 static const enum index next_destroy_attr[] = {
452 static const enum index next_list_attr[] = {
458 static const enum index item_param[] = {
467 static const enum index next_item[] = {
502 ITEM_ICMP6_ND_OPT_SLA_ETH,
503 ITEM_ICMP6_ND_OPT_TLA_ETH,
507 static const enum index item_fuzzy[] = {
513 static const enum index item_any[] = {
519 static const enum index item_vf[] = {
525 static const enum index item_phy_port[] = {
531 static const enum index item_port_id[] = {
537 static const enum index item_raw[] = {
547 static const enum index item_eth[] = {
555 static const enum index item_vlan[] = {
560 ITEM_VLAN_INNER_TYPE,
565 static const enum index item_ipv4[] = {
575 static const enum index item_ipv6[] = {
586 static const enum index item_icmp[] = {
593 static const enum index item_udp[] = {
600 static const enum index item_tcp[] = {
608 static const enum index item_sctp[] = {
617 static const enum index item_vxlan[] = {
623 static const enum index item_e_tag[] = {
624 ITEM_E_TAG_GRP_ECID_B,
629 static const enum index item_nvgre[] = {
635 static const enum index item_mpls[] = {
641 static const enum index item_gre[] = {
647 static const enum index item_gtp[] = {
653 static const enum index item_geneve[] = {
660 static const enum index item_vxlan_gpe[] = {
666 static const enum index item_arp_eth_ipv4[] = {
667 ITEM_ARP_ETH_IPV4_SHA,
668 ITEM_ARP_ETH_IPV4_SPA,
669 ITEM_ARP_ETH_IPV4_THA,
670 ITEM_ARP_ETH_IPV4_TPA,
675 static const enum index item_ipv6_ext[] = {
676 ITEM_IPV6_EXT_NEXT_HDR,
681 static const enum index item_icmp6[] = {
688 static const enum index item_icmp6_nd_ns[] = {
689 ITEM_ICMP6_ND_NS_TARGET_ADDR,
694 static const enum index item_icmp6_nd_na[] = {
695 ITEM_ICMP6_ND_NA_TARGET_ADDR,
700 static const enum index item_icmp6_nd_opt[] = {
701 ITEM_ICMP6_ND_OPT_TYPE,
706 static const enum index item_icmp6_nd_opt_sla_eth[] = {
707 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
712 static const enum index item_icmp6_nd_opt_tla_eth[] = {
713 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
718 static const enum index next_action[] = {
736 static const enum index action_mark[] = {
742 static const enum index action_queue[] = {
748 static const enum index action_rss[] = {
759 static const enum index action_vf[] = {
766 static const enum index action_phy_port[] = {
767 ACTION_PHY_PORT_ORIGINAL,
768 ACTION_PHY_PORT_INDEX,
773 static const enum index action_port_id[] = {
774 ACTION_PORT_ID_ORIGINAL,
780 static const enum index action_meter[] = {
786 static int parse_init(struct context *, const struct token *,
787 const char *, unsigned int,
788 void *, unsigned int);
789 static int parse_vc(struct context *, const struct token *,
790 const char *, unsigned int,
791 void *, unsigned int);
792 static int parse_vc_spec(struct context *, const struct token *,
793 const char *, unsigned int, void *, unsigned int);
794 static int parse_vc_conf(struct context *, const struct token *,
795 const char *, unsigned int, void *, unsigned int);
796 static int parse_vc_action_rss(struct context *, const struct token *,
797 const char *, unsigned int, void *,
799 static int parse_vc_action_rss_func(struct context *, const struct token *,
800 const char *, unsigned int, void *,
802 static int parse_vc_action_rss_type(struct context *, const struct token *,
803 const char *, unsigned int, void *,
805 static int parse_vc_action_rss_queue(struct context *, const struct token *,
806 const char *, unsigned int, void *,
808 static int parse_destroy(struct context *, const struct token *,
809 const char *, unsigned int,
810 void *, unsigned int);
811 static int parse_flush(struct context *, const struct token *,
812 const char *, unsigned int,
813 void *, unsigned int);
814 static int parse_query(struct context *, const struct token *,
815 const char *, unsigned int,
816 void *, unsigned int);
817 static int parse_action(struct context *, const struct token *,
818 const char *, unsigned int,
819 void *, unsigned int);
820 static int parse_list(struct context *, const struct token *,
821 const char *, unsigned int,
822 void *, unsigned int);
823 static int parse_isolate(struct context *, const struct token *,
824 const char *, unsigned int,
825 void *, unsigned int);
826 static int parse_int(struct context *, const struct token *,
827 const char *, unsigned int,
828 void *, unsigned int);
829 static int parse_prefix(struct context *, const struct token *,
830 const char *, unsigned int,
831 void *, unsigned int);
832 static int parse_boolean(struct context *, const struct token *,
833 const char *, unsigned int,
834 void *, unsigned int);
835 static int parse_string(struct context *, const struct token *,
836 const char *, unsigned int,
837 void *, unsigned int);
838 static int parse_mac_addr(struct context *, const struct token *,
839 const char *, unsigned int,
840 void *, unsigned int);
841 static int parse_ipv4_addr(struct context *, const struct token *,
842 const char *, unsigned int,
843 void *, unsigned int);
844 static int parse_ipv6_addr(struct context *, const struct token *,
845 const char *, unsigned int,
846 void *, unsigned int);
847 static int parse_port(struct context *, const struct token *,
848 const char *, unsigned int,
849 void *, unsigned int);
850 static int comp_none(struct context *, const struct token *,
851 unsigned int, char *, unsigned int);
852 static int comp_boolean(struct context *, const struct token *,
853 unsigned int, char *, unsigned int);
854 static int comp_action(struct context *, const struct token *,
855 unsigned int, char *, unsigned int);
856 static int comp_port(struct context *, const struct token *,
857 unsigned int, char *, unsigned int);
858 static int comp_rule_id(struct context *, const struct token *,
859 unsigned int, char *, unsigned int);
860 static int comp_vc_action_rss_type(struct context *, const struct token *,
861 unsigned int, char *, unsigned int);
862 static int comp_vc_action_rss_queue(struct context *, const struct token *,
863 unsigned int, char *, unsigned int);
865 /** Token definitions. */
866 static const struct token token_list[] = {
867 /* Special tokens. */
870 .help = "null entry, abused as the entry point",
871 .next = NEXT(NEXT_ENTRY(FLOW)),
876 .help = "command may end here",
882 .help = "integer value",
887 .name = "{unsigned}",
889 .help = "unsigned integer value",
896 .help = "prefix length for bit-mask",
897 .call = parse_prefix,
903 .help = "any boolean value",
904 .call = parse_boolean,
905 .comp = comp_boolean,
910 .help = "fixed string",
911 .call = parse_string,
915 .name = "{MAC address}",
917 .help = "standard MAC address notation",
918 .call = parse_mac_addr,
922 .name = "{IPv4 address}",
923 .type = "IPV4 ADDRESS",
924 .help = "standard IPv4 address notation",
925 .call = parse_ipv4_addr,
929 .name = "{IPv6 address}",
930 .type = "IPV6 ADDRESS",
931 .help = "standard IPv6 address notation",
932 .call = parse_ipv6_addr,
938 .help = "rule identifier",
940 .comp = comp_rule_id,
945 .help = "port identifier",
950 .name = "{group_id}",
952 .help = "group identifier",
959 .help = "priority level",
963 /* Top-level command. */
966 .type = "{command} {port_id} [{arg} [...]]",
967 .help = "manage ingress/egress flow rules",
968 .next = NEXT(NEXT_ENTRY
978 /* Sub-level commands. */
981 .help = "check whether a flow rule can be created",
982 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
983 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
988 .help = "create a flow rule",
989 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
990 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
995 .help = "destroy specific flow rules",
996 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
997 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
998 .call = parse_destroy,
1002 .help = "destroy all flow rules",
1003 .next = NEXT(NEXT_ENTRY(PORT_ID)),
1004 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1005 .call = parse_flush,
1009 .help = "query an existing flow rule",
1010 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
1011 NEXT_ENTRY(RULE_ID),
1012 NEXT_ENTRY(PORT_ID)),
1013 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action),
1014 ARGS_ENTRY(struct buffer, args.query.rule),
1015 ARGS_ENTRY(struct buffer, port)),
1016 .call = parse_query,
1020 .help = "list existing flow rules",
1021 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
1022 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1027 .help = "restrict ingress traffic to the defined flow rules",
1028 .next = NEXT(NEXT_ENTRY(BOOLEAN),
1029 NEXT_ENTRY(PORT_ID)),
1030 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
1031 ARGS_ENTRY(struct buffer, port)),
1032 .call = parse_isolate,
1034 /* Destroy arguments. */
1037 .help = "specify a rule identifier",
1038 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
1039 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
1040 .call = parse_destroy,
1042 /* Query arguments. */
1046 .help = "action to query, must be part of the rule",
1047 .call = parse_action,
1048 .comp = comp_action,
1050 /* List arguments. */
1053 .help = "specify a group",
1054 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
1055 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
1058 /* Validate/create attributes. */
1061 .help = "specify a group",
1062 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
1063 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
1068 .help = "specify a priority level",
1069 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
1070 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
1075 .help = "affect rule to ingress",
1076 .next = NEXT(next_vc_attr),
1081 .help = "affect rule to egress",
1082 .next = NEXT(next_vc_attr),
1087 .help = "apply rule directly to endpoints found in pattern",
1088 .next = NEXT(next_vc_attr),
1091 /* Validate/create pattern. */
1094 .help = "submit a list of pattern items",
1095 .next = NEXT(next_item),
1100 .help = "match value perfectly (with full bit-mask)",
1101 .call = parse_vc_spec,
1103 [ITEM_PARAM_SPEC] = {
1105 .help = "match value according to configured bit-mask",
1106 .call = parse_vc_spec,
1108 [ITEM_PARAM_LAST] = {
1110 .help = "specify upper bound to establish a range",
1111 .call = parse_vc_spec,
1113 [ITEM_PARAM_MASK] = {
1115 .help = "specify bit-mask with relevant bits set to one",
1116 .call = parse_vc_spec,
1118 [ITEM_PARAM_PREFIX] = {
1120 .help = "generate bit-mask from a prefix length",
1121 .call = parse_vc_spec,
1125 .help = "specify next pattern item",
1126 .next = NEXT(next_item),
1130 .help = "end list of pattern items",
1131 .priv = PRIV_ITEM(END, 0),
1132 .next = NEXT(NEXT_ENTRY(ACTIONS)),
1137 .help = "no-op pattern item",
1138 .priv = PRIV_ITEM(VOID, 0),
1139 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1144 .help = "perform actions when pattern does not match",
1145 .priv = PRIV_ITEM(INVERT, 0),
1146 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1151 .help = "match any protocol for the current layer",
1152 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
1153 .next = NEXT(item_any),
1158 .help = "number of layers covered",
1159 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
1160 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
1164 .help = "match traffic from/to the physical function",
1165 .priv = PRIV_ITEM(PF, 0),
1166 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1171 .help = "match traffic from/to a virtual function ID",
1172 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
1173 .next = NEXT(item_vf),
1179 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
1180 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
1184 .help = "match traffic from/to a specific physical port",
1185 .priv = PRIV_ITEM(PHY_PORT,
1186 sizeof(struct rte_flow_item_phy_port)),
1187 .next = NEXT(item_phy_port),
1190 [ITEM_PHY_PORT_INDEX] = {
1192 .help = "physical port index",
1193 .next = NEXT(item_phy_port, NEXT_ENTRY(UNSIGNED), item_param),
1194 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_phy_port, index)),
1198 .help = "match traffic from/to a given DPDK port ID",
1199 .priv = PRIV_ITEM(PORT_ID,
1200 sizeof(struct rte_flow_item_port_id)),
1201 .next = NEXT(item_port_id),
1204 [ITEM_PORT_ID_ID] = {
1206 .help = "DPDK port ID",
1207 .next = NEXT(item_port_id, NEXT_ENTRY(UNSIGNED), item_param),
1208 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port_id, id)),
1212 .help = "match an arbitrary byte string",
1213 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1214 .next = NEXT(item_raw),
1217 [ITEM_RAW_RELATIVE] = {
1219 .help = "look for pattern after the previous item",
1220 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1221 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1224 [ITEM_RAW_SEARCH] = {
1226 .help = "search pattern from offset (see also limit)",
1227 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1228 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1231 [ITEM_RAW_OFFSET] = {
1233 .help = "absolute or relative offset for pattern",
1234 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1235 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1237 [ITEM_RAW_LIMIT] = {
1239 .help = "search area limit for start of pattern",
1240 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1241 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1243 [ITEM_RAW_PATTERN] = {
1245 .help = "byte string to look for",
1246 .next = NEXT(item_raw,
1248 NEXT_ENTRY(ITEM_PARAM_IS,
1251 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, pattern),
1252 ARGS_ENTRY(struct rte_flow_item_raw, length),
1253 ARGS_ENTRY_ARB(sizeof(struct rte_flow_item_raw),
1254 ITEM_RAW_PATTERN_SIZE)),
1258 .help = "match Ethernet header",
1259 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1260 .next = NEXT(item_eth),
1265 .help = "destination MAC",
1266 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1267 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1271 .help = "source MAC",
1272 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1273 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1277 .help = "EtherType",
1278 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1279 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1283 .help = "match 802.1Q/ad VLAN tag",
1284 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1285 .next = NEXT(item_vlan),
1290 .help = "tag control information",
1291 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1292 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1296 .help = "priority code point",
1297 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1298 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1303 .help = "drop eligible indicator",
1304 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1305 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1310 .help = "VLAN identifier",
1311 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1312 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1315 [ITEM_VLAN_INNER_TYPE] = {
1316 .name = "inner_type",
1317 .help = "inner EtherType",
1318 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1319 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan,
1324 .help = "match IPv4 header",
1325 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1326 .next = NEXT(item_ipv4),
1331 .help = "type of service",
1332 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1333 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1334 hdr.type_of_service)),
1338 .help = "time to live",
1339 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1340 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1343 [ITEM_IPV4_PROTO] = {
1345 .help = "next protocol ID",
1346 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1347 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1348 hdr.next_proto_id)),
1352 .help = "source address",
1353 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1354 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1359 .help = "destination address",
1360 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1361 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1366 .help = "match IPv6 header",
1367 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1368 .next = NEXT(item_ipv6),
1373 .help = "traffic class",
1374 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1375 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1377 "\x0f\xf0\x00\x00")),
1379 [ITEM_IPV6_FLOW] = {
1381 .help = "flow label",
1382 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1383 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1385 "\x00\x0f\xff\xff")),
1387 [ITEM_IPV6_PROTO] = {
1389 .help = "protocol (next header)",
1390 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1391 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1396 .help = "hop limit",
1397 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1398 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1403 .help = "source address",
1404 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1405 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1410 .help = "destination address",
1411 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1412 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1417 .help = "match ICMP header",
1418 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1419 .next = NEXT(item_icmp),
1422 [ITEM_ICMP_TYPE] = {
1424 .help = "ICMP packet type",
1425 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1426 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1429 [ITEM_ICMP_CODE] = {
1431 .help = "ICMP packet code",
1432 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1433 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1438 .help = "match UDP header",
1439 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1440 .next = NEXT(item_udp),
1445 .help = "UDP source port",
1446 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1447 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1452 .help = "UDP destination port",
1453 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1454 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1459 .help = "match TCP header",
1460 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1461 .next = NEXT(item_tcp),
1466 .help = "TCP source port",
1467 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1468 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1473 .help = "TCP destination port",
1474 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1475 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1478 [ITEM_TCP_FLAGS] = {
1480 .help = "TCP flags",
1481 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1482 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1487 .help = "match SCTP header",
1488 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1489 .next = NEXT(item_sctp),
1494 .help = "SCTP source port",
1495 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1496 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1501 .help = "SCTP destination port",
1502 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1503 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1508 .help = "validation tag",
1509 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1510 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1513 [ITEM_SCTP_CKSUM] = {
1516 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1517 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1522 .help = "match VXLAN header",
1523 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1524 .next = NEXT(item_vxlan),
1527 [ITEM_VXLAN_VNI] = {
1529 .help = "VXLAN identifier",
1530 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1531 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
1535 .help = "match E-Tag header",
1536 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
1537 .next = NEXT(item_e_tag),
1540 [ITEM_E_TAG_GRP_ECID_B] = {
1541 .name = "grp_ecid_b",
1542 .help = "GRP and E-CID base",
1543 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
1544 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
1550 .help = "match NVGRE header",
1551 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
1552 .next = NEXT(item_nvgre),
1555 [ITEM_NVGRE_TNI] = {
1557 .help = "virtual subnet ID",
1558 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
1559 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
1563 .help = "match MPLS header",
1564 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
1565 .next = NEXT(item_mpls),
1568 [ITEM_MPLS_LABEL] = {
1570 .help = "MPLS label",
1571 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
1572 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
1578 .help = "match GRE header",
1579 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
1580 .next = NEXT(item_gre),
1583 [ITEM_GRE_PROTO] = {
1585 .help = "GRE protocol type",
1586 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
1587 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
1592 .help = "fuzzy pattern match, expect faster than default",
1593 .priv = PRIV_ITEM(FUZZY,
1594 sizeof(struct rte_flow_item_fuzzy)),
1595 .next = NEXT(item_fuzzy),
1598 [ITEM_FUZZY_THRESH] = {
1600 .help = "match accuracy threshold",
1601 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
1602 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
1607 .help = "match GTP header",
1608 .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
1609 .next = NEXT(item_gtp),
1614 .help = "tunnel endpoint identifier",
1615 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
1616 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
1620 .help = "match GTP header",
1621 .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
1622 .next = NEXT(item_gtp),
1627 .help = "match GTP header",
1628 .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
1629 .next = NEXT(item_gtp),
1634 .help = "match GENEVE header",
1635 .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
1636 .next = NEXT(item_geneve),
1639 [ITEM_GENEVE_VNI] = {
1641 .help = "virtual network identifier",
1642 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1643 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)),
1645 [ITEM_GENEVE_PROTO] = {
1647 .help = "GENEVE protocol type",
1648 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1649 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve,
1652 [ITEM_VXLAN_GPE] = {
1653 .name = "vxlan-gpe",
1654 .help = "match VXLAN-GPE header",
1655 .priv = PRIV_ITEM(VXLAN_GPE,
1656 sizeof(struct rte_flow_item_vxlan_gpe)),
1657 .next = NEXT(item_vxlan_gpe),
1660 [ITEM_VXLAN_GPE_VNI] = {
1662 .help = "VXLAN-GPE identifier",
1663 .next = NEXT(item_vxlan_gpe, NEXT_ENTRY(UNSIGNED), item_param),
1664 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan_gpe,
1667 [ITEM_ARP_ETH_IPV4] = {
1668 .name = "arp_eth_ipv4",
1669 .help = "match ARP header for Ethernet/IPv4",
1670 .priv = PRIV_ITEM(ARP_ETH_IPV4,
1671 sizeof(struct rte_flow_item_arp_eth_ipv4)),
1672 .next = NEXT(item_arp_eth_ipv4),
1675 [ITEM_ARP_ETH_IPV4_SHA] = {
1677 .help = "sender hardware address",
1678 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
1680 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
1683 [ITEM_ARP_ETH_IPV4_SPA] = {
1685 .help = "sender IPv4 address",
1686 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
1688 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
1691 [ITEM_ARP_ETH_IPV4_THA] = {
1693 .help = "target hardware address",
1694 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
1696 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
1699 [ITEM_ARP_ETH_IPV4_TPA] = {
1701 .help = "target IPv4 address",
1702 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
1704 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
1709 .help = "match presence of any IPv6 extension header",
1710 .priv = PRIV_ITEM(IPV6_EXT,
1711 sizeof(struct rte_flow_item_ipv6_ext)),
1712 .next = NEXT(item_ipv6_ext),
1715 [ITEM_IPV6_EXT_NEXT_HDR] = {
1717 .help = "next header",
1718 .next = NEXT(item_ipv6_ext, NEXT_ENTRY(UNSIGNED), item_param),
1719 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6_ext,
1724 .help = "match any ICMPv6 header",
1725 .priv = PRIV_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
1726 .next = NEXT(item_icmp6),
1729 [ITEM_ICMP6_TYPE] = {
1731 .help = "ICMPv6 type",
1732 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
1733 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
1736 [ITEM_ICMP6_CODE] = {
1738 .help = "ICMPv6 code",
1739 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
1740 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
1743 [ITEM_ICMP6_ND_NS] = {
1744 .name = "icmp6_nd_ns",
1745 .help = "match ICMPv6 neighbor discovery solicitation",
1746 .priv = PRIV_ITEM(ICMP6_ND_NS,
1747 sizeof(struct rte_flow_item_icmp6_nd_ns)),
1748 .next = NEXT(item_icmp6_nd_ns),
1751 [ITEM_ICMP6_ND_NS_TARGET_ADDR] = {
1752 .name = "target_addr",
1753 .help = "target address",
1754 .next = NEXT(item_icmp6_nd_ns, NEXT_ENTRY(IPV6_ADDR),
1756 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_ns,
1759 [ITEM_ICMP6_ND_NA] = {
1760 .name = "icmp6_nd_na",
1761 .help = "match ICMPv6 neighbor discovery advertisement",
1762 .priv = PRIV_ITEM(ICMP6_ND_NA,
1763 sizeof(struct rte_flow_item_icmp6_nd_na)),
1764 .next = NEXT(item_icmp6_nd_na),
1767 [ITEM_ICMP6_ND_NA_TARGET_ADDR] = {
1768 .name = "target_addr",
1769 .help = "target address",
1770 .next = NEXT(item_icmp6_nd_na, NEXT_ENTRY(IPV6_ADDR),
1772 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_na,
1775 [ITEM_ICMP6_ND_OPT] = {
1776 .name = "icmp6_nd_opt",
1777 .help = "match presence of any ICMPv6 neighbor discovery"
1779 .priv = PRIV_ITEM(ICMP6_ND_OPT,
1780 sizeof(struct rte_flow_item_icmp6_nd_opt)),
1781 .next = NEXT(item_icmp6_nd_opt),
1784 [ITEM_ICMP6_ND_OPT_TYPE] = {
1786 .help = "ND option type",
1787 .next = NEXT(item_icmp6_nd_opt, NEXT_ENTRY(UNSIGNED),
1789 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_opt,
1792 [ITEM_ICMP6_ND_OPT_SLA_ETH] = {
1793 .name = "icmp6_nd_opt_sla_eth",
1794 .help = "match ICMPv6 neighbor discovery source Ethernet"
1795 " link-layer address option",
1797 (ICMP6_ND_OPT_SLA_ETH,
1798 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
1799 .next = NEXT(item_icmp6_nd_opt_sla_eth),
1802 [ITEM_ICMP6_ND_OPT_SLA_ETH_SLA] = {
1804 .help = "source Ethernet LLA",
1805 .next = NEXT(item_icmp6_nd_opt_sla_eth, NEXT_ENTRY(MAC_ADDR),
1807 .args = ARGS(ARGS_ENTRY_HTON
1808 (struct rte_flow_item_icmp6_nd_opt_sla_eth, sla)),
1810 [ITEM_ICMP6_ND_OPT_TLA_ETH] = {
1811 .name = "icmp6_nd_opt_tla_eth",
1812 .help = "match ICMPv6 neighbor discovery target Ethernet"
1813 " link-layer address option",
1815 (ICMP6_ND_OPT_TLA_ETH,
1816 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
1817 .next = NEXT(item_icmp6_nd_opt_tla_eth),
1820 [ITEM_ICMP6_ND_OPT_TLA_ETH_TLA] = {
1822 .help = "target Ethernet LLA",
1823 .next = NEXT(item_icmp6_nd_opt_tla_eth, NEXT_ENTRY(MAC_ADDR),
1825 .args = ARGS(ARGS_ENTRY_HTON
1826 (struct rte_flow_item_icmp6_nd_opt_tla_eth, tla)),
1829 /* Validate/create actions. */
1832 .help = "submit a list of associated actions",
1833 .next = NEXT(next_action),
1838 .help = "specify next action",
1839 .next = NEXT(next_action),
1843 .help = "end list of actions",
1844 .priv = PRIV_ACTION(END, 0),
1849 .help = "no-op action",
1850 .priv = PRIV_ACTION(VOID, 0),
1851 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1854 [ACTION_PASSTHRU] = {
1856 .help = "let subsequent rule process matched packets",
1857 .priv = PRIV_ACTION(PASSTHRU, 0),
1858 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1863 .help = "attach 32 bit value to packets",
1864 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
1865 .next = NEXT(action_mark),
1868 [ACTION_MARK_ID] = {
1870 .help = "32 bit value to return with packets",
1871 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
1872 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
1873 .call = parse_vc_conf,
1877 .help = "flag packets",
1878 .priv = PRIV_ACTION(FLAG, 0),
1879 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1884 .help = "assign packets to a given queue index",
1885 .priv = PRIV_ACTION(QUEUE,
1886 sizeof(struct rte_flow_action_queue)),
1887 .next = NEXT(action_queue),
1890 [ACTION_QUEUE_INDEX] = {
1892 .help = "queue index to use",
1893 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
1894 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
1895 .call = parse_vc_conf,
1899 .help = "drop packets (note: passthru has priority)",
1900 .priv = PRIV_ACTION(DROP, 0),
1901 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1906 .help = "enable counters for this rule",
1907 .priv = PRIV_ACTION(COUNT, 0),
1908 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1913 .help = "spread packets among several queues",
1914 .priv = PRIV_ACTION(RSS, sizeof(struct action_rss_data)),
1915 .next = NEXT(action_rss),
1916 .call = parse_vc_action_rss,
1918 [ACTION_RSS_FUNC] = {
1920 .help = "RSS hash function to apply",
1921 .next = NEXT(action_rss,
1922 NEXT_ENTRY(ACTION_RSS_FUNC_DEFAULT,
1923 ACTION_RSS_FUNC_TOEPLITZ,
1924 ACTION_RSS_FUNC_SIMPLE_XOR)),
1926 [ACTION_RSS_FUNC_DEFAULT] = {
1928 .help = "default hash function",
1929 .call = parse_vc_action_rss_func,
1931 [ACTION_RSS_FUNC_TOEPLITZ] = {
1933 .help = "Toeplitz hash function",
1934 .call = parse_vc_action_rss_func,
1936 [ACTION_RSS_FUNC_SIMPLE_XOR] = {
1937 .name = "simple_xor",
1938 .help = "simple XOR hash function",
1939 .call = parse_vc_action_rss_func,
1941 [ACTION_RSS_LEVEL] = {
1943 .help = "encapsulation level for \"types\"",
1944 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
1945 .args = ARGS(ARGS_ENTRY_ARB
1946 (offsetof(struct action_rss_data, conf) +
1947 offsetof(struct rte_flow_action_rss, level),
1948 sizeof(((struct rte_flow_action_rss *)0)->
1951 [ACTION_RSS_TYPES] = {
1953 .help = "specific RSS hash types",
1954 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)),
1956 [ACTION_RSS_TYPE] = {
1958 .help = "RSS hash type",
1959 .call = parse_vc_action_rss_type,
1960 .comp = comp_vc_action_rss_type,
1962 [ACTION_RSS_KEY] = {
1964 .help = "RSS hash key",
1965 .next = NEXT(action_rss, NEXT_ENTRY(STRING)),
1966 .args = ARGS(ARGS_ENTRY_ARB(0, 0),
1968 (offsetof(struct action_rss_data, conf) +
1969 offsetof(struct rte_flow_action_rss, key_len),
1970 sizeof(((struct rte_flow_action_rss *)0)->
1972 ARGS_ENTRY(struct action_rss_data, key)),
1974 [ACTION_RSS_KEY_LEN] = {
1976 .help = "RSS hash key length in bytes",
1977 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
1978 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
1979 (offsetof(struct action_rss_data, conf) +
1980 offsetof(struct rte_flow_action_rss, key_len),
1981 sizeof(((struct rte_flow_action_rss *)0)->
1984 RSS_HASH_KEY_LENGTH)),
1986 [ACTION_RSS_QUEUES] = {
1988 .help = "queue indices to use",
1989 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
1990 .call = parse_vc_conf,
1992 [ACTION_RSS_QUEUE] = {
1994 .help = "queue index",
1995 .call = parse_vc_action_rss_queue,
1996 .comp = comp_vc_action_rss_queue,
2000 .help = "direct traffic to physical function",
2001 .priv = PRIV_ACTION(PF, 0),
2002 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2007 .help = "direct traffic to a virtual function ID",
2008 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
2009 .next = NEXT(action_vf),
2012 [ACTION_VF_ORIGINAL] = {
2014 .help = "use original VF ID if possible",
2015 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
2016 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
2018 .call = parse_vc_conf,
2023 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
2024 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
2025 .call = parse_vc_conf,
2027 [ACTION_PHY_PORT] = {
2029 .help = "direct packets to physical port index",
2030 .priv = PRIV_ACTION(PHY_PORT,
2031 sizeof(struct rte_flow_action_phy_port)),
2032 .next = NEXT(action_phy_port),
2035 [ACTION_PHY_PORT_ORIGINAL] = {
2037 .help = "use original port index if possible",
2038 .next = NEXT(action_phy_port, NEXT_ENTRY(BOOLEAN)),
2039 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_phy_port,
2041 .call = parse_vc_conf,
2043 [ACTION_PHY_PORT_INDEX] = {
2045 .help = "physical port index",
2046 .next = NEXT(action_phy_port, NEXT_ENTRY(UNSIGNED)),
2047 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_phy_port,
2049 .call = parse_vc_conf,
2051 [ACTION_PORT_ID] = {
2053 .help = "direct matching traffic to a given DPDK port ID",
2054 .priv = PRIV_ACTION(PORT_ID,
2055 sizeof(struct rte_flow_action_port_id)),
2056 .next = NEXT(action_port_id),
2059 [ACTION_PORT_ID_ORIGINAL] = {
2061 .help = "use original DPDK port ID if possible",
2062 .next = NEXT(action_port_id, NEXT_ENTRY(BOOLEAN)),
2063 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_port_id,
2065 .call = parse_vc_conf,
2067 [ACTION_PORT_ID_ID] = {
2069 .help = "DPDK port ID",
2070 .next = NEXT(action_port_id, NEXT_ENTRY(UNSIGNED)),
2071 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_port_id, id)),
2072 .call = parse_vc_conf,
2076 .help = "meter the directed packets at given id",
2077 .priv = PRIV_ACTION(METER,
2078 sizeof(struct rte_flow_action_meter)),
2079 .next = NEXT(action_meter),
2082 [ACTION_METER_ID] = {
2084 .help = "meter id to use",
2085 .next = NEXT(action_meter, NEXT_ENTRY(UNSIGNED)),
2086 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
2087 .call = parse_vc_conf,
2091 /** Remove and return last entry from argument stack. */
2092 static const struct arg *
2093 pop_args(struct context *ctx)
2095 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
2098 /** Add entry on top of the argument stack. */
2100 push_args(struct context *ctx, const struct arg *arg)
2102 if (ctx->args_num == CTX_STACK_SIZE)
2104 ctx->args[ctx->args_num++] = arg;
2108 /** Spread value into buffer according to bit-mask. */
2110 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
2112 uint32_t i = arg->size;
2120 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2129 unsigned int shift = 0;
2130 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
2132 for (shift = 0; arg->mask[i] >> shift; ++shift) {
2133 if (!(arg->mask[i] & (1 << shift)))
2138 *buf &= ~(1 << shift);
2139 *buf |= (val & 1) << shift;
2147 /** Compare a string with a partial one of a given length. */
2149 strcmp_partial(const char *full, const char *partial, size_t partial_len)
2151 int r = strncmp(full, partial, partial_len);
2155 if (strlen(full) <= partial_len)
2157 return full[partial_len];
2161 * Parse a prefix length and generate a bit-mask.
2163 * Last argument (ctx->args) is retrieved to determine mask size, storage
2164 * location and whether the result must use network byte ordering.
2167 parse_prefix(struct context *ctx, const struct token *token,
2168 const char *str, unsigned int len,
2169 void *buf, unsigned int size)
2171 const struct arg *arg = pop_args(ctx);
2172 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
2179 /* Argument is expected. */
2183 u = strtoumax(str, &end, 0);
2184 if (errno || (size_t)(end - str) != len)
2189 extra = arg_entry_bf_fill(NULL, 0, arg);
2198 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
2199 !arg_entry_bf_fill(ctx->objmask, -1, arg))
2206 if (bytes > size || bytes + !!extra > size)
2210 buf = (uint8_t *)ctx->object + arg->offset;
2211 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2213 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
2214 memset(buf, 0x00, size - bytes);
2216 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
2220 memset(buf, 0xff, bytes);
2221 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
2223 ((uint8_t *)buf)[bytes] = conv[extra];
2226 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2229 push_args(ctx, arg);
2233 /** Default parsing function for token name matching. */
2235 parse_default(struct context *ctx, const struct token *token,
2236 const char *str, unsigned int len,
2237 void *buf, unsigned int size)
2242 if (strcmp_partial(token->name, str, len))
2247 /** Parse flow command, initialize output buffer for subsequent tokens. */
2249 parse_init(struct context *ctx, const struct token *token,
2250 const char *str, unsigned int len,
2251 void *buf, unsigned int size)
2253 struct buffer *out = buf;
2255 /* Token name must match. */
2256 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2258 /* Nothing else to do if there is no buffer. */
2261 /* Make sure buffer is large enough. */
2262 if (size < sizeof(*out))
2264 /* Initialize buffer. */
2265 memset(out, 0x00, sizeof(*out));
2266 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
2269 ctx->objmask = NULL;
2273 /** Parse tokens for validate/create commands. */
2275 parse_vc(struct context *ctx, const struct token *token,
2276 const char *str, unsigned int len,
2277 void *buf, unsigned int size)
2279 struct buffer *out = buf;
2283 /* Token name must match. */
2284 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2286 /* Nothing else to do if there is no buffer. */
2289 if (!out->command) {
2290 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
2292 if (sizeof(*out) > size)
2294 out->command = ctx->curr;
2297 ctx->objmask = NULL;
2298 out->args.vc.data = (uint8_t *)out + size;
2302 ctx->object = &out->args.vc.attr;
2303 ctx->objmask = NULL;
2304 switch (ctx->curr) {
2309 out->args.vc.attr.ingress = 1;
2312 out->args.vc.attr.egress = 1;
2315 out->args.vc.attr.transfer = 1;
2318 out->args.vc.pattern =
2319 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2321 ctx->object = out->args.vc.pattern;
2322 ctx->objmask = NULL;
2325 out->args.vc.actions =
2326 (void *)RTE_ALIGN_CEIL((uintptr_t)
2327 (out->args.vc.pattern +
2328 out->args.vc.pattern_n),
2330 ctx->object = out->args.vc.actions;
2331 ctx->objmask = NULL;
2338 if (!out->args.vc.actions) {
2339 const struct parse_item_priv *priv = token->priv;
2340 struct rte_flow_item *item =
2341 out->args.vc.pattern + out->args.vc.pattern_n;
2343 data_size = priv->size * 3; /* spec, last, mask */
2344 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
2345 (out->args.vc.data - data_size),
2347 if ((uint8_t *)item + sizeof(*item) > data)
2349 *item = (struct rte_flow_item){
2352 ++out->args.vc.pattern_n;
2354 ctx->objmask = NULL;
2356 const struct parse_action_priv *priv = token->priv;
2357 struct rte_flow_action *action =
2358 out->args.vc.actions + out->args.vc.actions_n;
2360 data_size = priv->size; /* configuration */
2361 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
2362 (out->args.vc.data - data_size),
2364 if ((uint8_t *)action + sizeof(*action) > data)
2366 *action = (struct rte_flow_action){
2368 .conf = data_size ? data : NULL,
2370 ++out->args.vc.actions_n;
2371 ctx->object = action;
2372 ctx->objmask = NULL;
2374 memset(data, 0, data_size);
2375 out->args.vc.data = data;
2376 ctx->objdata = data_size;
2380 /** Parse pattern item parameter type. */
2382 parse_vc_spec(struct context *ctx, const struct token *token,
2383 const char *str, unsigned int len,
2384 void *buf, unsigned int size)
2386 struct buffer *out = buf;
2387 struct rte_flow_item *item;
2393 /* Token name must match. */
2394 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2396 /* Parse parameter types. */
2397 switch (ctx->curr) {
2398 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
2404 case ITEM_PARAM_SPEC:
2407 case ITEM_PARAM_LAST:
2410 case ITEM_PARAM_PREFIX:
2411 /* Modify next token to expect a prefix. */
2412 if (ctx->next_num < 2)
2414 ctx->next[ctx->next_num - 2] = prefix;
2416 case ITEM_PARAM_MASK:
2422 /* Nothing else to do if there is no buffer. */
2425 if (!out->args.vc.pattern_n)
2427 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
2428 data_size = ctx->objdata / 3; /* spec, last, mask */
2429 /* Point to selected object. */
2430 ctx->object = out->args.vc.data + (data_size * index);
2432 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
2433 item->mask = ctx->objmask;
2435 ctx->objmask = NULL;
2436 /* Update relevant item pointer. */
2437 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
2442 /** Parse action configuration field. */
2444 parse_vc_conf(struct context *ctx, const struct token *token,
2445 const char *str, unsigned int len,
2446 void *buf, unsigned int size)
2448 struct buffer *out = buf;
2451 /* Token name must match. */
2452 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2454 /* Nothing else to do if there is no buffer. */
2457 /* Point to selected object. */
2458 ctx->object = out->args.vc.data;
2459 ctx->objmask = NULL;
2463 /** Parse RSS action. */
2465 parse_vc_action_rss(struct context *ctx, const struct token *token,
2466 const char *str, unsigned int len,
2467 void *buf, unsigned int size)
2469 struct buffer *out = buf;
2470 struct rte_flow_action *action;
2471 struct action_rss_data *action_rss_data;
2475 ret = parse_vc(ctx, token, str, len, buf, size);
2478 /* Nothing else to do if there is no buffer. */
2481 if (!out->args.vc.actions_n)
2483 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
2484 /* Point to selected object. */
2485 ctx->object = out->args.vc.data;
2486 ctx->objmask = NULL;
2487 /* Set up default configuration. */
2488 action_rss_data = ctx->object;
2489 *action_rss_data = (struct action_rss_data){
2490 .conf = (struct rte_flow_action_rss){
2491 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
2494 .key_len = sizeof(action_rss_data->key),
2495 .queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
2496 .key = action_rss_data->key,
2497 .queue = action_rss_data->queue,
2499 .key = "testpmd's default RSS hash key",
2502 for (i = 0; i < action_rss_data->conf.queue_num; ++i)
2503 action_rss_data->queue[i] = i;
2504 if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
2505 ctx->port != (portid_t)RTE_PORT_ALL) {
2506 struct rte_eth_dev_info info;
2508 rte_eth_dev_info_get(ctx->port, &info);
2509 action_rss_data->conf.key_len =
2510 RTE_MIN(sizeof(action_rss_data->key),
2511 info.hash_key_size);
2513 action->conf = &action_rss_data->conf;
2518 * Parse func field for RSS action.
2520 * The RTE_ETH_HASH_FUNCTION_* value to assign is derived from the
2521 * ACTION_RSS_FUNC_* index that called this function.
2524 parse_vc_action_rss_func(struct context *ctx, const struct token *token,
2525 const char *str, unsigned int len,
2526 void *buf, unsigned int size)
2528 struct action_rss_data *action_rss_data;
2529 enum rte_eth_hash_function func;
2533 /* Token name must match. */
2534 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2536 switch (ctx->curr) {
2537 case ACTION_RSS_FUNC_DEFAULT:
2538 func = RTE_ETH_HASH_FUNCTION_DEFAULT;
2540 case ACTION_RSS_FUNC_TOEPLITZ:
2541 func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
2543 case ACTION_RSS_FUNC_SIMPLE_XOR:
2544 func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
2551 action_rss_data = ctx->object;
2552 action_rss_data->conf.func = func;
2557 * Parse type field for RSS action.
2559 * Valid tokens are type field names and the "end" token.
2562 parse_vc_action_rss_type(struct context *ctx, const struct token *token,
2563 const char *str, unsigned int len,
2564 void *buf, unsigned int size)
2566 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_TYPE);
2567 struct action_rss_data *action_rss_data;
2573 if (ctx->curr != ACTION_RSS_TYPE)
2575 if (!(ctx->objdata >> 16) && ctx->object) {
2576 action_rss_data = ctx->object;
2577 action_rss_data->conf.types = 0;
2579 if (!strcmp_partial("end", str, len)) {
2580 ctx->objdata &= 0xffff;
2583 for (i = 0; rss_type_table[i].str; ++i)
2584 if (!strcmp_partial(rss_type_table[i].str, str, len))
2586 if (!rss_type_table[i].str)
2588 ctx->objdata = 1 << 16 | (ctx->objdata & 0xffff);
2590 if (ctx->next_num == RTE_DIM(ctx->next))
2592 ctx->next[ctx->next_num++] = next;
2595 action_rss_data = ctx->object;
2596 action_rss_data->conf.types |= rss_type_table[i].rss_type;
2601 * Parse queue field for RSS action.
2603 * Valid tokens are queue indices and the "end" token.
2606 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
2607 const char *str, unsigned int len,
2608 void *buf, unsigned int size)
2610 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
2611 struct action_rss_data *action_rss_data;
2618 if (ctx->curr != ACTION_RSS_QUEUE)
2620 i = ctx->objdata >> 16;
2621 if (!strcmp_partial("end", str, len)) {
2622 ctx->objdata &= 0xffff;
2625 if (i >= ACTION_RSS_QUEUE_NUM)
2628 ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) +
2629 i * sizeof(action_rss_data->queue[i]),
2630 sizeof(action_rss_data->queue[i]))))
2632 ret = parse_int(ctx, token, str, len, NULL, 0);
2638 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
2640 if (ctx->next_num == RTE_DIM(ctx->next))
2642 ctx->next[ctx->next_num++] = next;
2645 action_rss_data = ctx->object;
2646 action_rss_data->conf.queue_num = i;
2647 action_rss_data->conf.queue = i ? action_rss_data->queue : NULL;
2651 /** Parse tokens for destroy command. */
2653 parse_destroy(struct context *ctx, const struct token *token,
2654 const char *str, unsigned int len,
2655 void *buf, unsigned int size)
2657 struct buffer *out = buf;
2659 /* Token name must match. */
2660 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2662 /* Nothing else to do if there is no buffer. */
2665 if (!out->command) {
2666 if (ctx->curr != DESTROY)
2668 if (sizeof(*out) > size)
2670 out->command = ctx->curr;
2673 ctx->objmask = NULL;
2674 out->args.destroy.rule =
2675 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2679 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
2680 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
2683 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
2684 ctx->objmask = NULL;
2688 /** Parse tokens for flush command. */
2690 parse_flush(struct context *ctx, const struct token *token,
2691 const char *str, unsigned int len,
2692 void *buf, unsigned int size)
2694 struct buffer *out = buf;
2696 /* Token name must match. */
2697 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2699 /* Nothing else to do if there is no buffer. */
2702 if (!out->command) {
2703 if (ctx->curr != FLUSH)
2705 if (sizeof(*out) > size)
2707 out->command = ctx->curr;
2710 ctx->objmask = NULL;
2715 /** Parse tokens for query command. */
2717 parse_query(struct context *ctx, const struct token *token,
2718 const char *str, unsigned int len,
2719 void *buf, unsigned int size)
2721 struct buffer *out = buf;
2723 /* Token name must match. */
2724 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2726 /* Nothing else to do if there is no buffer. */
2729 if (!out->command) {
2730 if (ctx->curr != QUERY)
2732 if (sizeof(*out) > size)
2734 out->command = ctx->curr;
2737 ctx->objmask = NULL;
2742 /** Parse action names. */
2744 parse_action(struct context *ctx, const struct token *token,
2745 const char *str, unsigned int len,
2746 void *buf, unsigned int size)
2748 struct buffer *out = buf;
2749 const struct arg *arg = pop_args(ctx);
2753 /* Argument is expected. */
2756 /* Parse action name. */
2757 for (i = 0; next_action[i]; ++i) {
2758 const struct parse_action_priv *priv;
2760 token = &token_list[next_action[i]];
2761 if (strcmp_partial(token->name, str, len))
2767 memcpy((uint8_t *)ctx->object + arg->offset,
2773 push_args(ctx, arg);
2777 /** Parse tokens for list command. */
2779 parse_list(struct context *ctx, const struct token *token,
2780 const char *str, unsigned int len,
2781 void *buf, unsigned int size)
2783 struct buffer *out = buf;
2785 /* Token name must match. */
2786 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2788 /* Nothing else to do if there is no buffer. */
2791 if (!out->command) {
2792 if (ctx->curr != LIST)
2794 if (sizeof(*out) > size)
2796 out->command = ctx->curr;
2799 ctx->objmask = NULL;
2800 out->args.list.group =
2801 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2805 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
2806 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
2809 ctx->object = out->args.list.group + out->args.list.group_n++;
2810 ctx->objmask = NULL;
2814 /** Parse tokens for isolate command. */
2816 parse_isolate(struct context *ctx, const struct token *token,
2817 const char *str, unsigned int len,
2818 void *buf, unsigned int size)
2820 struct buffer *out = buf;
2822 /* Token name must match. */
2823 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2825 /* Nothing else to do if there is no buffer. */
2828 if (!out->command) {
2829 if (ctx->curr != ISOLATE)
2831 if (sizeof(*out) > size)
2833 out->command = ctx->curr;
2836 ctx->objmask = NULL;
2842 * Parse signed/unsigned integers 8 to 64-bit long.
2844 * Last argument (ctx->args) is retrieved to determine integer type and
2848 parse_int(struct context *ctx, const struct token *token,
2849 const char *str, unsigned int len,
2850 void *buf, unsigned int size)
2852 const struct arg *arg = pop_args(ctx);
2857 /* Argument is expected. */
2862 (uintmax_t)strtoimax(str, &end, 0) :
2863 strtoumax(str, &end, 0);
2864 if (errno || (size_t)(end - str) != len)
2867 ((arg->sign && ((intmax_t)u < (intmax_t)arg->min ||
2868 (intmax_t)u > (intmax_t)arg->max)) ||
2869 (!arg->sign && (u < arg->min || u > arg->max))))
2874 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
2875 !arg_entry_bf_fill(ctx->objmask, -1, arg))
2879 buf = (uint8_t *)ctx->object + arg->offset;
2883 case sizeof(uint8_t):
2884 *(uint8_t *)buf = u;
2886 case sizeof(uint16_t):
2887 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
2889 case sizeof(uint8_t [3]):
2890 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2892 ((uint8_t *)buf)[0] = u;
2893 ((uint8_t *)buf)[1] = u >> 8;
2894 ((uint8_t *)buf)[2] = u >> 16;
2898 ((uint8_t *)buf)[0] = u >> 16;
2899 ((uint8_t *)buf)[1] = u >> 8;
2900 ((uint8_t *)buf)[2] = u;
2902 case sizeof(uint32_t):
2903 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
2905 case sizeof(uint64_t):
2906 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
2911 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
2913 buf = (uint8_t *)ctx->objmask + arg->offset;
2918 push_args(ctx, arg);
2925 * Three arguments (ctx->args) are retrieved from the stack to store data,
2926 * its actual length and address (in that order).
2929 parse_string(struct context *ctx, const struct token *token,
2930 const char *str, unsigned int len,
2931 void *buf, unsigned int size)
2933 const struct arg *arg_data = pop_args(ctx);
2934 const struct arg *arg_len = pop_args(ctx);
2935 const struct arg *arg_addr = pop_args(ctx);
2936 char tmp[16]; /* Ought to be enough. */
2939 /* Arguments are expected. */
2943 push_args(ctx, arg_data);
2947 push_args(ctx, arg_len);
2948 push_args(ctx, arg_data);
2951 size = arg_data->size;
2952 /* Bit-mask fill is not supported. */
2953 if (arg_data->mask || size < len)
2957 /* Let parse_int() fill length information first. */
2958 ret = snprintf(tmp, sizeof(tmp), "%u", len);
2961 push_args(ctx, arg_len);
2962 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
2967 buf = (uint8_t *)ctx->object + arg_data->offset;
2968 /* Output buffer is not necessarily NUL-terminated. */
2969 memcpy(buf, str, len);
2970 memset((uint8_t *)buf + len, 0x00, size - len);
2972 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
2973 /* Save address if requested. */
2974 if (arg_addr->size) {
2975 memcpy((uint8_t *)ctx->object + arg_addr->offset,
2977 (uint8_t *)ctx->object + arg_data->offset
2981 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
2983 (uint8_t *)ctx->objmask + arg_data->offset
2989 push_args(ctx, arg_addr);
2990 push_args(ctx, arg_len);
2991 push_args(ctx, arg_data);
2996 * Parse a MAC address.
2998 * Last argument (ctx->args) is retrieved to determine storage size and
3002 parse_mac_addr(struct context *ctx, const struct token *token,
3003 const char *str, unsigned int len,
3004 void *buf, unsigned int size)
3006 const struct arg *arg = pop_args(ctx);
3007 struct ether_addr tmp;
3011 /* Argument is expected. */
3015 /* Bit-mask fill is not supported. */
3016 if (arg->mask || size != sizeof(tmp))
3018 /* Only network endian is supported. */
3021 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
3022 if (ret < 0 || (unsigned int)ret != len)
3026 buf = (uint8_t *)ctx->object + arg->offset;
3027 memcpy(buf, &tmp, size);
3029 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
3032 push_args(ctx, arg);
3037 * Parse an IPv4 address.
3039 * Last argument (ctx->args) is retrieved to determine storage size and
3043 parse_ipv4_addr(struct context *ctx, const struct token *token,
3044 const char *str, unsigned int len,
3045 void *buf, unsigned int size)
3047 const struct arg *arg = pop_args(ctx);
3052 /* Argument is expected. */
3056 /* Bit-mask fill is not supported. */
3057 if (arg->mask || size != sizeof(tmp))
3059 /* Only network endian is supported. */
3062 memcpy(str2, str, len);
3064 ret = inet_pton(AF_INET, str2, &tmp);
3066 /* Attempt integer parsing. */
3067 push_args(ctx, arg);
3068 return parse_int(ctx, token, str, len, buf, size);
3072 buf = (uint8_t *)ctx->object + arg->offset;
3073 memcpy(buf, &tmp, size);
3075 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
3078 push_args(ctx, arg);
3083 * Parse an IPv6 address.
3085 * Last argument (ctx->args) is retrieved to determine storage size and
3089 parse_ipv6_addr(struct context *ctx, const struct token *token,
3090 const char *str, unsigned int len,
3091 void *buf, unsigned int size)
3093 const struct arg *arg = pop_args(ctx);
3095 struct in6_addr tmp;
3099 /* Argument is expected. */
3103 /* Bit-mask fill is not supported. */
3104 if (arg->mask || size != sizeof(tmp))
3106 /* Only network endian is supported. */
3109 memcpy(str2, str, len);
3111 ret = inet_pton(AF_INET6, str2, &tmp);
3116 buf = (uint8_t *)ctx->object + arg->offset;
3117 memcpy(buf, &tmp, size);
3119 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
3122 push_args(ctx, arg);
3126 /** Boolean values (even indices stand for false). */
3127 static const char *const boolean_name[] = {
3137 * Parse a boolean value.
3139 * Last argument (ctx->args) is retrieved to determine storage size and
3143 parse_boolean(struct context *ctx, const struct token *token,
3144 const char *str, unsigned int len,
3145 void *buf, unsigned int size)
3147 const struct arg *arg = pop_args(ctx);
3151 /* Argument is expected. */
3154 for (i = 0; boolean_name[i]; ++i)
3155 if (!strcmp_partial(boolean_name[i], str, len))
3157 /* Process token as integer. */
3158 if (boolean_name[i])
3159 str = i & 1 ? "1" : "0";
3160 push_args(ctx, arg);
3161 ret = parse_int(ctx, token, str, strlen(str), buf, size);
3162 return ret > 0 ? (int)len : ret;
3165 /** Parse port and update context. */
3167 parse_port(struct context *ctx, const struct token *token,
3168 const char *str, unsigned int len,
3169 void *buf, unsigned int size)
3171 struct buffer *out = &(struct buffer){ .port = 0 };
3179 ctx->objmask = NULL;
3180 size = sizeof(*out);
3182 ret = parse_int(ctx, token, str, len, out, size);
3184 ctx->port = out->port;
3190 /** No completion. */
3192 comp_none(struct context *ctx, const struct token *token,
3193 unsigned int ent, char *buf, unsigned int size)
3203 /** Complete boolean values. */
3205 comp_boolean(struct context *ctx, const struct token *token,
3206 unsigned int ent, char *buf, unsigned int size)
3212 for (i = 0; boolean_name[i]; ++i)
3213 if (buf && i == ent)
3214 return snprintf(buf, size, "%s", boolean_name[i]);
3220 /** Complete action names. */
3222 comp_action(struct context *ctx, const struct token *token,
3223 unsigned int ent, char *buf, unsigned int size)
3229 for (i = 0; next_action[i]; ++i)
3230 if (buf && i == ent)
3231 return snprintf(buf, size, "%s",
3232 token_list[next_action[i]].name);
3238 /** Complete available ports. */
3240 comp_port(struct context *ctx, const struct token *token,
3241 unsigned int ent, char *buf, unsigned int size)
3248 RTE_ETH_FOREACH_DEV(p) {
3249 if (buf && i == ent)
3250 return snprintf(buf, size, "%u", p);
3258 /** Complete available rule IDs. */
3260 comp_rule_id(struct context *ctx, const struct token *token,
3261 unsigned int ent, char *buf, unsigned int size)
3264 struct rte_port *port;
3265 struct port_flow *pf;
3268 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
3269 ctx->port == (portid_t)RTE_PORT_ALL)
3271 port = &ports[ctx->port];
3272 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
3273 if (buf && i == ent)
3274 return snprintf(buf, size, "%u", pf->id);
3282 /** Complete type field for RSS action. */
3284 comp_vc_action_rss_type(struct context *ctx, const struct token *token,
3285 unsigned int ent, char *buf, unsigned int size)
3291 for (i = 0; rss_type_table[i].str; ++i)
3296 return snprintf(buf, size, "%s", rss_type_table[ent].str);
3298 return snprintf(buf, size, "end");
3302 /** Complete queue field for RSS action. */
3304 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
3305 unsigned int ent, char *buf, unsigned int size)
3312 return snprintf(buf, size, "%u", ent);
3314 return snprintf(buf, size, "end");
3318 /** Internal context. */
3319 static struct context cmd_flow_context;
3321 /** Global parser instance (cmdline API). */
3322 cmdline_parse_inst_t cmd_flow;
3324 /** Initialize context. */
3326 cmd_flow_context_init(struct context *ctx)
3328 /* A full memset() is not necessary. */
3338 ctx->objmask = NULL;
3341 /** Parse a token (cmdline API). */
3343 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
3346 struct context *ctx = &cmd_flow_context;
3347 const struct token *token;
3348 const enum index *list;
3353 token = &token_list[ctx->curr];
3354 /* Check argument length. */
3357 for (len = 0; src[len]; ++len)
3358 if (src[len] == '#' || isspace(src[len]))
3362 /* Last argument and EOL detection. */
3363 for (i = len; src[i]; ++i)
3364 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
3366 else if (!isspace(src[i])) {
3371 if (src[i] == '\r' || src[i] == '\n') {
3375 /* Initialize context if necessary. */
3376 if (!ctx->next_num) {
3379 ctx->next[ctx->next_num++] = token->next[0];
3381 /* Process argument through candidates. */
3382 ctx->prev = ctx->curr;
3383 list = ctx->next[ctx->next_num - 1];
3384 for (i = 0; list[i]; ++i) {
3385 const struct token *next = &token_list[list[i]];
3388 ctx->curr = list[i];
3390 tmp = next->call(ctx, next, src, len, result, size);
3392 tmp = parse_default(ctx, next, src, len, result, size);
3393 if (tmp == -1 || tmp != len)
3401 /* Push subsequent tokens if any. */
3403 for (i = 0; token->next[i]; ++i) {
3404 if (ctx->next_num == RTE_DIM(ctx->next))
3406 ctx->next[ctx->next_num++] = token->next[i];
3408 /* Push arguments if any. */
3410 for (i = 0; token->args[i]; ++i) {
3411 if (ctx->args_num == RTE_DIM(ctx->args))
3413 ctx->args[ctx->args_num++] = token->args[i];
3418 /** Return number of completion entries (cmdline API). */
3420 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
3422 struct context *ctx = &cmd_flow_context;
3423 const struct token *token = &token_list[ctx->curr];
3424 const enum index *list;
3428 /* Count number of tokens in current list. */
3430 list = ctx->next[ctx->next_num - 1];
3432 list = token->next[0];
3433 for (i = 0; list[i]; ++i)
3438 * If there is a single token, use its completion callback, otherwise
3439 * return the number of entries.
3441 token = &token_list[list[0]];
3442 if (i == 1 && token->comp) {
3443 /* Save index for cmd_flow_get_help(). */
3444 ctx->prev = list[0];
3445 return token->comp(ctx, token, 0, NULL, 0);
3450 /** Return a completion entry (cmdline API). */
3452 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
3453 char *dst, unsigned int size)
3455 struct context *ctx = &cmd_flow_context;
3456 const struct token *token = &token_list[ctx->curr];
3457 const enum index *list;
3461 /* Count number of tokens in current list. */
3463 list = ctx->next[ctx->next_num - 1];
3465 list = token->next[0];
3466 for (i = 0; list[i]; ++i)
3470 /* If there is a single token, use its completion callback. */
3471 token = &token_list[list[0]];
3472 if (i == 1 && token->comp) {
3473 /* Save index for cmd_flow_get_help(). */
3474 ctx->prev = list[0];
3475 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
3477 /* Otherwise make sure the index is valid and use defaults. */
3480 token = &token_list[list[index]];
3481 snprintf(dst, size, "%s", token->name);
3482 /* Save index for cmd_flow_get_help(). */
3483 ctx->prev = list[index];
3487 /** Populate help strings for current token (cmdline API). */
3489 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
3491 struct context *ctx = &cmd_flow_context;
3492 const struct token *token = &token_list[ctx->prev];
3497 /* Set token type and update global help with details. */
3498 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
3500 cmd_flow.help_str = token->help;
3502 cmd_flow.help_str = token->name;
3506 /** Token definition template (cmdline API). */
3507 static struct cmdline_token_hdr cmd_flow_token_hdr = {
3508 .ops = &(struct cmdline_token_ops){
3509 .parse = cmd_flow_parse,
3510 .complete_get_nb = cmd_flow_complete_get_nb,
3511 .complete_get_elt = cmd_flow_complete_get_elt,
3512 .get_help = cmd_flow_get_help,
3517 /** Populate the next dynamic token. */
3519 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
3520 cmdline_parse_token_hdr_t **hdr_inst)
3522 struct context *ctx = &cmd_flow_context;
3524 /* Always reinitialize context before requesting the first token. */
3525 if (!(hdr_inst - cmd_flow.tokens))
3526 cmd_flow_context_init(ctx);
3527 /* Return NULL when no more tokens are expected. */
3528 if (!ctx->next_num && ctx->curr) {
3532 /* Determine if command should end here. */
3533 if (ctx->eol && ctx->last && ctx->next_num) {
3534 const enum index *list = ctx->next[ctx->next_num - 1];
3537 for (i = 0; list[i]; ++i) {
3544 *hdr = &cmd_flow_token_hdr;
3547 /** Dispatch parsed buffer to function calls. */
3549 cmd_flow_parsed(const struct buffer *in)
3551 switch (in->command) {
3553 port_flow_validate(in->port, &in->args.vc.attr,
3554 in->args.vc.pattern, in->args.vc.actions);
3557 port_flow_create(in->port, &in->args.vc.attr,
3558 in->args.vc.pattern, in->args.vc.actions);
3561 port_flow_destroy(in->port, in->args.destroy.rule_n,
3562 in->args.destroy.rule);
3565 port_flow_flush(in->port);
3568 port_flow_query(in->port, in->args.query.rule,
3569 in->args.query.action);
3572 port_flow_list(in->port, in->args.list.group_n,
3573 in->args.list.group);
3576 port_flow_isolate(in->port, in->args.isolate.set);
3583 /** Token generator and output processing callback (cmdline API). */
3585 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
3588 cmd_flow_tok(arg0, arg2);
3590 cmd_flow_parsed(arg0);
3593 /** Global parser instance (cmdline API). */
3594 cmdline_parse_inst_t cmd_flow = {
3596 .data = NULL, /**< Unused. */
3597 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
3600 }, /**< Tokens are returned by cmd_flow_tok(). */