1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
13 #include <arpa/inet.h>
14 #include <sys/socket.h>
16 #include <rte_common.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_ethdev.h>
19 #include <rte_byteorder.h>
20 #include <cmdline_parse.h>
21 #include <cmdline_parse_etheraddr.h>
26 /** Parser token indices. */
46 /* Top-level command. */
49 /* Sub-level commands. */
58 /* Destroy arguments. */
61 /* Query arguments. */
67 /* Validate/create arguments. */
74 /* Validate/create pattern. */
111 ITEM_VLAN_INNER_TYPE,
143 ITEM_E_TAG_GRP_ECID_B,
162 ITEM_ARP_ETH_IPV4_SHA,
163 ITEM_ARP_ETH_IPV4_SPA,
164 ITEM_ARP_ETH_IPV4_THA,
165 ITEM_ARP_ETH_IPV4_TPA,
167 ITEM_IPV6_EXT_NEXT_HDR,
172 ITEM_ICMP6_ND_NS_TARGET_ADDR,
174 ITEM_ICMP6_ND_NA_TARGET_ADDR,
176 ITEM_ICMP6_ND_OPT_TYPE,
177 ITEM_ICMP6_ND_OPT_SLA_ETH,
178 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
179 ITEM_ICMP6_ND_OPT_TLA_ETH,
180 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
182 /* Validate/create actions. */
202 ACTION_RSS_FUNC_DEFAULT,
203 ACTION_RSS_FUNC_TOEPLITZ,
204 ACTION_RSS_FUNC_SIMPLE_XOR,
216 ACTION_PHY_PORT_ORIGINAL,
217 ACTION_PHY_PORT_INDEX,
219 ACTION_PORT_ID_ORIGINAL,
223 ACTION_OF_SET_MPLS_TTL,
224 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
225 ACTION_OF_DEC_MPLS_TTL,
226 ACTION_OF_SET_NW_TTL,
227 ACTION_OF_SET_NW_TTL_NW_TTL,
228 ACTION_OF_DEC_NW_TTL,
229 ACTION_OF_COPY_TTL_OUT,
230 ACTION_OF_COPY_TTL_IN,
233 ACTION_OF_PUSH_VLAN_ETHERTYPE,
234 ACTION_OF_SET_VLAN_VID,
235 ACTION_OF_SET_VLAN_VID_VLAN_VID,
236 ACTION_OF_SET_VLAN_PCP,
237 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
239 ACTION_OF_POP_MPLS_ETHERTYPE,
241 ACTION_OF_PUSH_MPLS_ETHERTYPE,
248 /** Maximum size for pattern in struct rte_flow_item_raw. */
249 #define ITEM_RAW_PATTERN_SIZE 40
251 /** Storage size for struct rte_flow_item_raw including pattern. */
252 #define ITEM_RAW_SIZE \
253 (sizeof(struct rte_flow_item_raw) + ITEM_RAW_PATTERN_SIZE)
255 /** Maximum number of queue indices in struct rte_flow_action_rss. */
256 #define ACTION_RSS_QUEUE_NUM 32
258 /** Storage for struct rte_flow_action_rss including external data. */
259 struct action_rss_data {
260 struct rte_flow_action_rss conf;
261 uint8_t key[RSS_HASH_KEY_LENGTH];
262 uint16_t queue[ACTION_RSS_QUEUE_NUM];
265 /** Maximum number of items in struct rte_flow_action_vxlan_encap. */
266 #define ACTION_VXLAN_ENCAP_ITEMS_NUM 6
268 /** Storage for struct rte_flow_action_vxlan_encap including external data. */
269 struct action_vxlan_encap_data {
270 struct rte_flow_action_vxlan_encap conf;
271 struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM];
272 struct rte_flow_item_eth item_eth;
273 struct rte_flow_item_vlan item_vlan;
275 struct rte_flow_item_ipv4 item_ipv4;
276 struct rte_flow_item_ipv6 item_ipv6;
278 struct rte_flow_item_udp item_udp;
279 struct rte_flow_item_vxlan item_vxlan;
282 /** Maximum number of items in struct rte_flow_action_nvgre_encap. */
283 #define ACTION_NVGRE_ENCAP_ITEMS_NUM 5
285 /** Storage for struct rte_flow_action_nvgre_encap including external data. */
286 struct action_nvgre_encap_data {
287 struct rte_flow_action_nvgre_encap conf;
288 struct rte_flow_item items[ACTION_NVGRE_ENCAP_ITEMS_NUM];
289 struct rte_flow_item_eth item_eth;
290 struct rte_flow_item_vlan item_vlan;
292 struct rte_flow_item_ipv4 item_ipv4;
293 struct rte_flow_item_ipv6 item_ipv6;
295 struct rte_flow_item_nvgre item_nvgre;
298 /** Maximum number of subsequent tokens and arguments on the stack. */
299 #define CTX_STACK_SIZE 16
301 /** Parser context. */
303 /** Stack of subsequent token lists to process. */
304 const enum index *next[CTX_STACK_SIZE];
305 /** Arguments for stacked tokens. */
306 const void *args[CTX_STACK_SIZE];
307 enum index curr; /**< Current token index. */
308 enum index prev; /**< Index of the last token seen. */
309 int next_num; /**< Number of entries in next[]. */
310 int args_num; /**< Number of entries in args[]. */
311 uint32_t eol:1; /**< EOL has been detected. */
312 uint32_t last:1; /**< No more arguments. */
313 portid_t port; /**< Current port ID (for completions). */
314 uint32_t objdata; /**< Object-specific data. */
315 void *object; /**< Address of current object for relative offsets. */
316 void *objmask; /**< Object a full mask must be written to. */
319 /** Token argument. */
321 uint32_t hton:1; /**< Use network byte ordering. */
322 uint32_t sign:1; /**< Value is signed. */
323 uint32_t bounded:1; /**< Value is bounded. */
324 uintmax_t min; /**< Minimum value if bounded. */
325 uintmax_t max; /**< Maximum value if bounded. */
326 uint32_t offset; /**< Relative offset from ctx->object. */
327 uint32_t size; /**< Field size. */
328 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
331 /** Parser token definition. */
333 /** Type displayed during completion (defaults to "TOKEN"). */
335 /** Help displayed during completion (defaults to token name). */
337 /** Private data used by parser functions. */
340 * Lists of subsequent tokens to push on the stack. Each call to the
341 * parser consumes the last entry of that stack.
343 const enum index *const *next;
344 /** Arguments stack for subsequent tokens that need them. */
345 const struct arg *const *args;
347 * Token-processing callback, returns -1 in case of error, the
348 * length of the matched string otherwise. If NULL, attempts to
349 * match the token name.
351 * If buf is not NULL, the result should be stored in it according
352 * to context. An error is returned if not large enough.
354 int (*call)(struct context *ctx, const struct token *token,
355 const char *str, unsigned int len,
356 void *buf, unsigned int size);
358 * Callback that provides possible values for this token, used for
359 * completion. Returns -1 in case of error, the number of possible
360 * values otherwise. If NULL, the token name is used.
362 * If buf is not NULL, entry index ent is written to buf and the
363 * full length of the entry is returned (same behavior as
366 int (*comp)(struct context *ctx, const struct token *token,
367 unsigned int ent, char *buf, unsigned int size);
368 /** Mandatory token name, no default value. */
372 /** Static initializer for the next field. */
373 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
375 /** Static initializer for a NEXT() entry. */
376 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
378 /** Static initializer for the args field. */
379 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
381 /** Static initializer for ARGS() to target a field. */
382 #define ARGS_ENTRY(s, f) \
383 (&(const struct arg){ \
384 .offset = offsetof(s, f), \
385 .size = sizeof(((s *)0)->f), \
388 /** Static initializer for ARGS() to target a bit-field. */
389 #define ARGS_ENTRY_BF(s, f, b) \
390 (&(const struct arg){ \
392 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
395 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
396 #define ARGS_ENTRY_MASK(s, f, m) \
397 (&(const struct arg){ \
398 .offset = offsetof(s, f), \
399 .size = sizeof(((s *)0)->f), \
400 .mask = (const void *)(m), \
403 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
404 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
405 (&(const struct arg){ \
407 .offset = offsetof(s, f), \
408 .size = sizeof(((s *)0)->f), \
409 .mask = (const void *)(m), \
412 /** Static initializer for ARGS() to target a pointer. */
413 #define ARGS_ENTRY_PTR(s, f) \
414 (&(const struct arg){ \
415 .size = sizeof(*((s *)0)->f), \
418 /** Static initializer for ARGS() with arbitrary offset and size. */
419 #define ARGS_ENTRY_ARB(o, s) \
420 (&(const struct arg){ \
425 /** Same as ARGS_ENTRY_ARB() with bounded values. */
426 #define ARGS_ENTRY_ARB_BOUNDED(o, s, i, a) \
427 (&(const struct arg){ \
435 /** Same as ARGS_ENTRY() using network byte ordering. */
436 #define ARGS_ENTRY_HTON(s, f) \
437 (&(const struct arg){ \
439 .offset = offsetof(s, f), \
440 .size = sizeof(((s *)0)->f), \
443 /** Parser output buffer layout expected by cmd_flow_parsed(). */
445 enum index command; /**< Flow command. */
446 portid_t port; /**< Affected port ID. */
449 struct rte_flow_attr attr;
450 struct rte_flow_item *pattern;
451 struct rte_flow_action *actions;
455 } vc; /**< Validate/create arguments. */
459 } destroy; /**< Destroy arguments. */
462 struct rte_flow_action action;
463 } query; /**< Query arguments. */
467 } list; /**< List arguments. */
470 } isolate; /**< Isolated mode arguments. */
471 } args; /**< Command arguments. */
474 /** Private data for pattern items. */
475 struct parse_item_priv {
476 enum rte_flow_item_type type; /**< Item type. */
477 uint32_t size; /**< Size of item specification structure. */
480 #define PRIV_ITEM(t, s) \
481 (&(const struct parse_item_priv){ \
482 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
486 /** Private data for actions. */
487 struct parse_action_priv {
488 enum rte_flow_action_type type; /**< Action type. */
489 uint32_t size; /**< Size of action configuration structure. */
492 #define PRIV_ACTION(t, s) \
493 (&(const struct parse_action_priv){ \
494 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
498 static const enum index next_vc_attr[] = {
508 static const enum index next_destroy_attr[] = {
514 static const enum index next_list_attr[] = {
520 static const enum index item_param[] = {
529 static const enum index next_item[] = {
565 ITEM_ICMP6_ND_OPT_SLA_ETH,
566 ITEM_ICMP6_ND_OPT_TLA_ETH,
570 static const enum index item_fuzzy[] = {
576 static const enum index item_any[] = {
582 static const enum index item_vf[] = {
588 static const enum index item_phy_port[] = {
594 static const enum index item_port_id[] = {
600 static const enum index item_mark[] = {
606 static const enum index item_raw[] = {
616 static const enum index item_eth[] = {
624 static const enum index item_vlan[] = {
629 ITEM_VLAN_INNER_TYPE,
634 static const enum index item_ipv4[] = {
644 static const enum index item_ipv6[] = {
655 static const enum index item_icmp[] = {
662 static const enum index item_udp[] = {
669 static const enum index item_tcp[] = {
677 static const enum index item_sctp[] = {
686 static const enum index item_vxlan[] = {
692 static const enum index item_e_tag[] = {
693 ITEM_E_TAG_GRP_ECID_B,
698 static const enum index item_nvgre[] = {
704 static const enum index item_mpls[] = {
710 static const enum index item_gre[] = {
716 static const enum index item_gtp[] = {
722 static const enum index item_geneve[] = {
729 static const enum index item_vxlan_gpe[] = {
735 static const enum index item_arp_eth_ipv4[] = {
736 ITEM_ARP_ETH_IPV4_SHA,
737 ITEM_ARP_ETH_IPV4_SPA,
738 ITEM_ARP_ETH_IPV4_THA,
739 ITEM_ARP_ETH_IPV4_TPA,
744 static const enum index item_ipv6_ext[] = {
745 ITEM_IPV6_EXT_NEXT_HDR,
750 static const enum index item_icmp6[] = {
757 static const enum index item_icmp6_nd_ns[] = {
758 ITEM_ICMP6_ND_NS_TARGET_ADDR,
763 static const enum index item_icmp6_nd_na[] = {
764 ITEM_ICMP6_ND_NA_TARGET_ADDR,
769 static const enum index item_icmp6_nd_opt[] = {
770 ITEM_ICMP6_ND_OPT_TYPE,
775 static const enum index item_icmp6_nd_opt_sla_eth[] = {
776 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
781 static const enum index item_icmp6_nd_opt_tla_eth[] = {
782 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
787 static const enum index next_action[] = {
803 ACTION_OF_SET_MPLS_TTL,
804 ACTION_OF_DEC_MPLS_TTL,
805 ACTION_OF_SET_NW_TTL,
806 ACTION_OF_DEC_NW_TTL,
807 ACTION_OF_COPY_TTL_OUT,
808 ACTION_OF_COPY_TTL_IN,
811 ACTION_OF_SET_VLAN_VID,
812 ACTION_OF_SET_VLAN_PCP,
822 static const enum index action_mark[] = {
828 static const enum index action_queue[] = {
834 static const enum index action_count[] = {
841 static const enum index action_rss[] = {
852 static const enum index action_vf[] = {
859 static const enum index action_phy_port[] = {
860 ACTION_PHY_PORT_ORIGINAL,
861 ACTION_PHY_PORT_INDEX,
866 static const enum index action_port_id[] = {
867 ACTION_PORT_ID_ORIGINAL,
873 static const enum index action_meter[] = {
879 static const enum index action_of_set_mpls_ttl[] = {
880 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
885 static const enum index action_of_set_nw_ttl[] = {
886 ACTION_OF_SET_NW_TTL_NW_TTL,
891 static const enum index action_of_push_vlan[] = {
892 ACTION_OF_PUSH_VLAN_ETHERTYPE,
897 static const enum index action_of_set_vlan_vid[] = {
898 ACTION_OF_SET_VLAN_VID_VLAN_VID,
903 static const enum index action_of_set_vlan_pcp[] = {
904 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
909 static const enum index action_of_pop_mpls[] = {
910 ACTION_OF_POP_MPLS_ETHERTYPE,
915 static const enum index action_of_push_mpls[] = {
916 ACTION_OF_PUSH_MPLS_ETHERTYPE,
921 static const enum index action_jump[] = {
927 static int parse_init(struct context *, const struct token *,
928 const char *, unsigned int,
929 void *, unsigned int);
930 static int parse_vc(struct context *, const struct token *,
931 const char *, unsigned int,
932 void *, unsigned int);
933 static int parse_vc_spec(struct context *, const struct token *,
934 const char *, unsigned int, void *, unsigned int);
935 static int parse_vc_conf(struct context *, const struct token *,
936 const char *, unsigned int, void *, unsigned int);
937 static int parse_vc_action_rss(struct context *, const struct token *,
938 const char *, unsigned int, void *,
940 static int parse_vc_action_rss_func(struct context *, const struct token *,
941 const char *, unsigned int, void *,
943 static int parse_vc_action_rss_type(struct context *, const struct token *,
944 const char *, unsigned int, void *,
946 static int parse_vc_action_rss_queue(struct context *, const struct token *,
947 const char *, unsigned int, void *,
949 static int parse_vc_action_vxlan_encap(struct context *, const struct token *,
950 const char *, unsigned int, void *,
952 static int parse_vc_action_nvgre_encap(struct context *, const struct token *,
953 const char *, unsigned int, void *,
955 static int parse_destroy(struct context *, const struct token *,
956 const char *, unsigned int,
957 void *, unsigned int);
958 static int parse_flush(struct context *, const struct token *,
959 const char *, unsigned int,
960 void *, unsigned int);
961 static int parse_query(struct context *, const struct token *,
962 const char *, unsigned int,
963 void *, unsigned int);
964 static int parse_action(struct context *, const struct token *,
965 const char *, unsigned int,
966 void *, unsigned int);
967 static int parse_list(struct context *, const struct token *,
968 const char *, unsigned int,
969 void *, unsigned int);
970 static int parse_isolate(struct context *, const struct token *,
971 const char *, unsigned int,
972 void *, unsigned int);
973 static int parse_int(struct context *, const struct token *,
974 const char *, unsigned int,
975 void *, unsigned int);
976 static int parse_prefix(struct context *, const struct token *,
977 const char *, unsigned int,
978 void *, unsigned int);
979 static int parse_boolean(struct context *, const struct token *,
980 const char *, unsigned int,
981 void *, unsigned int);
982 static int parse_string(struct context *, const struct token *,
983 const char *, unsigned int,
984 void *, unsigned int);
985 static int parse_mac_addr(struct context *, const struct token *,
986 const char *, unsigned int,
987 void *, unsigned int);
988 static int parse_ipv4_addr(struct context *, const struct token *,
989 const char *, unsigned int,
990 void *, unsigned int);
991 static int parse_ipv6_addr(struct context *, const struct token *,
992 const char *, unsigned int,
993 void *, unsigned int);
994 static int parse_port(struct context *, const struct token *,
995 const char *, unsigned int,
996 void *, unsigned int);
997 static int comp_none(struct context *, const struct token *,
998 unsigned int, char *, unsigned int);
999 static int comp_boolean(struct context *, const struct token *,
1000 unsigned int, char *, unsigned int);
1001 static int comp_action(struct context *, const struct token *,
1002 unsigned int, char *, unsigned int);
1003 static int comp_port(struct context *, const struct token *,
1004 unsigned int, char *, unsigned int);
1005 static int comp_rule_id(struct context *, const struct token *,
1006 unsigned int, char *, unsigned int);
1007 static int comp_vc_action_rss_type(struct context *, const struct token *,
1008 unsigned int, char *, unsigned int);
1009 static int comp_vc_action_rss_queue(struct context *, const struct token *,
1010 unsigned int, char *, unsigned int);
1012 /** Token definitions. */
1013 static const struct token token_list[] = {
1014 /* Special tokens. */
1017 .help = "null entry, abused as the entry point",
1018 .next = NEXT(NEXT_ENTRY(FLOW)),
1023 .help = "command may end here",
1025 /* Common tokens. */
1029 .help = "integer value",
1034 .name = "{unsigned}",
1036 .help = "unsigned integer value",
1043 .help = "prefix length for bit-mask",
1044 .call = parse_prefix,
1048 .name = "{boolean}",
1050 .help = "any boolean value",
1051 .call = parse_boolean,
1052 .comp = comp_boolean,
1057 .help = "fixed string",
1058 .call = parse_string,
1062 .name = "{MAC address}",
1064 .help = "standard MAC address notation",
1065 .call = parse_mac_addr,
1069 .name = "{IPv4 address}",
1070 .type = "IPV4 ADDRESS",
1071 .help = "standard IPv4 address notation",
1072 .call = parse_ipv4_addr,
1076 .name = "{IPv6 address}",
1077 .type = "IPV6 ADDRESS",
1078 .help = "standard IPv6 address notation",
1079 .call = parse_ipv6_addr,
1083 .name = "{rule id}",
1085 .help = "rule identifier",
1087 .comp = comp_rule_id,
1090 .name = "{port_id}",
1092 .help = "port identifier",
1097 .name = "{group_id}",
1099 .help = "group identifier",
1103 [PRIORITY_LEVEL] = {
1106 .help = "priority level",
1110 /* Top-level command. */
1113 .type = "{command} {port_id} [{arg} [...]]",
1114 .help = "manage ingress/egress flow rules",
1115 .next = NEXT(NEXT_ENTRY
1125 /* Sub-level commands. */
1128 .help = "check whether a flow rule can be created",
1129 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1130 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1135 .help = "create a flow rule",
1136 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1137 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1142 .help = "destroy specific flow rules",
1143 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
1144 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1145 .call = parse_destroy,
1149 .help = "destroy all flow rules",
1150 .next = NEXT(NEXT_ENTRY(PORT_ID)),
1151 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1152 .call = parse_flush,
1156 .help = "query an existing flow rule",
1157 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
1158 NEXT_ENTRY(RULE_ID),
1159 NEXT_ENTRY(PORT_ID)),
1160 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action.type),
1161 ARGS_ENTRY(struct buffer, args.query.rule),
1162 ARGS_ENTRY(struct buffer, port)),
1163 .call = parse_query,
1167 .help = "list existing flow rules",
1168 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
1169 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1174 .help = "restrict ingress traffic to the defined flow rules",
1175 .next = NEXT(NEXT_ENTRY(BOOLEAN),
1176 NEXT_ENTRY(PORT_ID)),
1177 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
1178 ARGS_ENTRY(struct buffer, port)),
1179 .call = parse_isolate,
1181 /* Destroy arguments. */
1184 .help = "specify a rule identifier",
1185 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
1186 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
1187 .call = parse_destroy,
1189 /* Query arguments. */
1193 .help = "action to query, must be part of the rule",
1194 .call = parse_action,
1195 .comp = comp_action,
1197 /* List arguments. */
1200 .help = "specify a group",
1201 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
1202 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
1205 /* Validate/create attributes. */
1208 .help = "specify a group",
1209 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
1210 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
1215 .help = "specify a priority level",
1216 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
1217 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
1222 .help = "affect rule to ingress",
1223 .next = NEXT(next_vc_attr),
1228 .help = "affect rule to egress",
1229 .next = NEXT(next_vc_attr),
1234 .help = "apply rule directly to endpoints found in pattern",
1235 .next = NEXT(next_vc_attr),
1238 /* Validate/create pattern. */
1241 .help = "submit a list of pattern items",
1242 .next = NEXT(next_item),
1247 .help = "match value perfectly (with full bit-mask)",
1248 .call = parse_vc_spec,
1250 [ITEM_PARAM_SPEC] = {
1252 .help = "match value according to configured bit-mask",
1253 .call = parse_vc_spec,
1255 [ITEM_PARAM_LAST] = {
1257 .help = "specify upper bound to establish a range",
1258 .call = parse_vc_spec,
1260 [ITEM_PARAM_MASK] = {
1262 .help = "specify bit-mask with relevant bits set to one",
1263 .call = parse_vc_spec,
1265 [ITEM_PARAM_PREFIX] = {
1267 .help = "generate bit-mask from a prefix length",
1268 .call = parse_vc_spec,
1272 .help = "specify next pattern item",
1273 .next = NEXT(next_item),
1277 .help = "end list of pattern items",
1278 .priv = PRIV_ITEM(END, 0),
1279 .next = NEXT(NEXT_ENTRY(ACTIONS)),
1284 .help = "no-op pattern item",
1285 .priv = PRIV_ITEM(VOID, 0),
1286 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1291 .help = "perform actions when pattern does not match",
1292 .priv = PRIV_ITEM(INVERT, 0),
1293 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1298 .help = "match any protocol for the current layer",
1299 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
1300 .next = NEXT(item_any),
1305 .help = "number of layers covered",
1306 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
1307 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
1311 .help = "match traffic from/to the physical function",
1312 .priv = PRIV_ITEM(PF, 0),
1313 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1318 .help = "match traffic from/to a virtual function ID",
1319 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
1320 .next = NEXT(item_vf),
1326 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
1327 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
1331 .help = "match traffic from/to a specific physical port",
1332 .priv = PRIV_ITEM(PHY_PORT,
1333 sizeof(struct rte_flow_item_phy_port)),
1334 .next = NEXT(item_phy_port),
1337 [ITEM_PHY_PORT_INDEX] = {
1339 .help = "physical port index",
1340 .next = NEXT(item_phy_port, NEXT_ENTRY(UNSIGNED), item_param),
1341 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_phy_port, index)),
1345 .help = "match traffic from/to a given DPDK port ID",
1346 .priv = PRIV_ITEM(PORT_ID,
1347 sizeof(struct rte_flow_item_port_id)),
1348 .next = NEXT(item_port_id),
1351 [ITEM_PORT_ID_ID] = {
1353 .help = "DPDK port ID",
1354 .next = NEXT(item_port_id, NEXT_ENTRY(UNSIGNED), item_param),
1355 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port_id, id)),
1359 .help = "match traffic against value set in previously matched rule",
1360 .priv = PRIV_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
1361 .next = NEXT(item_mark),
1366 .help = "Integer value to match against",
1367 .next = NEXT(item_mark, NEXT_ENTRY(UNSIGNED), item_param),
1368 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_mark, id)),
1372 .help = "match an arbitrary byte string",
1373 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1374 .next = NEXT(item_raw),
1377 [ITEM_RAW_RELATIVE] = {
1379 .help = "look for pattern after the previous item",
1380 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1381 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1384 [ITEM_RAW_SEARCH] = {
1386 .help = "search pattern from offset (see also limit)",
1387 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1388 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1391 [ITEM_RAW_OFFSET] = {
1393 .help = "absolute or relative offset for pattern",
1394 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1395 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1397 [ITEM_RAW_LIMIT] = {
1399 .help = "search area limit for start of pattern",
1400 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1401 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1403 [ITEM_RAW_PATTERN] = {
1405 .help = "byte string to look for",
1406 .next = NEXT(item_raw,
1408 NEXT_ENTRY(ITEM_PARAM_IS,
1411 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, pattern),
1412 ARGS_ENTRY(struct rte_flow_item_raw, length),
1413 ARGS_ENTRY_ARB(sizeof(struct rte_flow_item_raw),
1414 ITEM_RAW_PATTERN_SIZE)),
1418 .help = "match Ethernet header",
1419 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1420 .next = NEXT(item_eth),
1425 .help = "destination MAC",
1426 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1427 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1431 .help = "source MAC",
1432 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1433 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1437 .help = "EtherType",
1438 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1439 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1443 .help = "match 802.1Q/ad VLAN tag",
1444 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1445 .next = NEXT(item_vlan),
1450 .help = "tag control information",
1451 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1452 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1456 .help = "priority code point",
1457 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1458 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1463 .help = "drop eligible indicator",
1464 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1465 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1470 .help = "VLAN identifier",
1471 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1472 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1475 [ITEM_VLAN_INNER_TYPE] = {
1476 .name = "inner_type",
1477 .help = "inner EtherType",
1478 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1479 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan,
1484 .help = "match IPv4 header",
1485 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1486 .next = NEXT(item_ipv4),
1491 .help = "type of service",
1492 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1493 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1494 hdr.type_of_service)),
1498 .help = "time to live",
1499 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1500 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1503 [ITEM_IPV4_PROTO] = {
1505 .help = "next protocol ID",
1506 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1507 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1508 hdr.next_proto_id)),
1512 .help = "source address",
1513 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1514 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1519 .help = "destination address",
1520 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1521 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1526 .help = "match IPv6 header",
1527 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1528 .next = NEXT(item_ipv6),
1533 .help = "traffic class",
1534 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1535 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1537 "\x0f\xf0\x00\x00")),
1539 [ITEM_IPV6_FLOW] = {
1541 .help = "flow label",
1542 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1543 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1545 "\x00\x0f\xff\xff")),
1547 [ITEM_IPV6_PROTO] = {
1549 .help = "protocol (next header)",
1550 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1551 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1556 .help = "hop limit",
1557 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1558 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1563 .help = "source address",
1564 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1565 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1570 .help = "destination address",
1571 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1572 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1577 .help = "match ICMP header",
1578 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1579 .next = NEXT(item_icmp),
1582 [ITEM_ICMP_TYPE] = {
1584 .help = "ICMP packet type",
1585 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1586 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1589 [ITEM_ICMP_CODE] = {
1591 .help = "ICMP packet code",
1592 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1593 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1598 .help = "match UDP header",
1599 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1600 .next = NEXT(item_udp),
1605 .help = "UDP source port",
1606 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1607 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1612 .help = "UDP destination port",
1613 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1614 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1619 .help = "match TCP header",
1620 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1621 .next = NEXT(item_tcp),
1626 .help = "TCP source port",
1627 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1628 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1633 .help = "TCP destination port",
1634 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1635 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1638 [ITEM_TCP_FLAGS] = {
1640 .help = "TCP flags",
1641 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1642 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1647 .help = "match SCTP header",
1648 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1649 .next = NEXT(item_sctp),
1654 .help = "SCTP source port",
1655 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1656 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1661 .help = "SCTP destination port",
1662 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1663 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1668 .help = "validation tag",
1669 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1670 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1673 [ITEM_SCTP_CKSUM] = {
1676 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1677 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1682 .help = "match VXLAN header",
1683 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1684 .next = NEXT(item_vxlan),
1687 [ITEM_VXLAN_VNI] = {
1689 .help = "VXLAN identifier",
1690 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1691 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
1695 .help = "match E-Tag header",
1696 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
1697 .next = NEXT(item_e_tag),
1700 [ITEM_E_TAG_GRP_ECID_B] = {
1701 .name = "grp_ecid_b",
1702 .help = "GRP and E-CID base",
1703 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
1704 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
1710 .help = "match NVGRE header",
1711 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
1712 .next = NEXT(item_nvgre),
1715 [ITEM_NVGRE_TNI] = {
1717 .help = "virtual subnet ID",
1718 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
1719 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
1723 .help = "match MPLS header",
1724 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
1725 .next = NEXT(item_mpls),
1728 [ITEM_MPLS_LABEL] = {
1730 .help = "MPLS label",
1731 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
1732 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
1738 .help = "match GRE header",
1739 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
1740 .next = NEXT(item_gre),
1743 [ITEM_GRE_PROTO] = {
1745 .help = "GRE protocol type",
1746 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
1747 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
1752 .help = "fuzzy pattern match, expect faster than default",
1753 .priv = PRIV_ITEM(FUZZY,
1754 sizeof(struct rte_flow_item_fuzzy)),
1755 .next = NEXT(item_fuzzy),
1758 [ITEM_FUZZY_THRESH] = {
1760 .help = "match accuracy threshold",
1761 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
1762 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
1767 .help = "match GTP header",
1768 .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
1769 .next = NEXT(item_gtp),
1774 .help = "tunnel endpoint identifier",
1775 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
1776 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
1780 .help = "match GTP header",
1781 .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
1782 .next = NEXT(item_gtp),
1787 .help = "match GTP header",
1788 .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
1789 .next = NEXT(item_gtp),
1794 .help = "match GENEVE header",
1795 .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
1796 .next = NEXT(item_geneve),
1799 [ITEM_GENEVE_VNI] = {
1801 .help = "virtual network identifier",
1802 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1803 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)),
1805 [ITEM_GENEVE_PROTO] = {
1807 .help = "GENEVE protocol type",
1808 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1809 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve,
1812 [ITEM_VXLAN_GPE] = {
1813 .name = "vxlan-gpe",
1814 .help = "match VXLAN-GPE header",
1815 .priv = PRIV_ITEM(VXLAN_GPE,
1816 sizeof(struct rte_flow_item_vxlan_gpe)),
1817 .next = NEXT(item_vxlan_gpe),
1820 [ITEM_VXLAN_GPE_VNI] = {
1822 .help = "VXLAN-GPE identifier",
1823 .next = NEXT(item_vxlan_gpe, NEXT_ENTRY(UNSIGNED), item_param),
1824 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan_gpe,
1827 [ITEM_ARP_ETH_IPV4] = {
1828 .name = "arp_eth_ipv4",
1829 .help = "match ARP header for Ethernet/IPv4",
1830 .priv = PRIV_ITEM(ARP_ETH_IPV4,
1831 sizeof(struct rte_flow_item_arp_eth_ipv4)),
1832 .next = NEXT(item_arp_eth_ipv4),
1835 [ITEM_ARP_ETH_IPV4_SHA] = {
1837 .help = "sender hardware address",
1838 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
1840 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
1843 [ITEM_ARP_ETH_IPV4_SPA] = {
1845 .help = "sender IPv4 address",
1846 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
1848 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
1851 [ITEM_ARP_ETH_IPV4_THA] = {
1853 .help = "target hardware address",
1854 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
1856 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
1859 [ITEM_ARP_ETH_IPV4_TPA] = {
1861 .help = "target IPv4 address",
1862 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
1864 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
1869 .help = "match presence of any IPv6 extension header",
1870 .priv = PRIV_ITEM(IPV6_EXT,
1871 sizeof(struct rte_flow_item_ipv6_ext)),
1872 .next = NEXT(item_ipv6_ext),
1875 [ITEM_IPV6_EXT_NEXT_HDR] = {
1877 .help = "next header",
1878 .next = NEXT(item_ipv6_ext, NEXT_ENTRY(UNSIGNED), item_param),
1879 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6_ext,
1884 .help = "match any ICMPv6 header",
1885 .priv = PRIV_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
1886 .next = NEXT(item_icmp6),
1889 [ITEM_ICMP6_TYPE] = {
1891 .help = "ICMPv6 type",
1892 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
1893 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
1896 [ITEM_ICMP6_CODE] = {
1898 .help = "ICMPv6 code",
1899 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
1900 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
1903 [ITEM_ICMP6_ND_NS] = {
1904 .name = "icmp6_nd_ns",
1905 .help = "match ICMPv6 neighbor discovery solicitation",
1906 .priv = PRIV_ITEM(ICMP6_ND_NS,
1907 sizeof(struct rte_flow_item_icmp6_nd_ns)),
1908 .next = NEXT(item_icmp6_nd_ns),
1911 [ITEM_ICMP6_ND_NS_TARGET_ADDR] = {
1912 .name = "target_addr",
1913 .help = "target address",
1914 .next = NEXT(item_icmp6_nd_ns, NEXT_ENTRY(IPV6_ADDR),
1916 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_ns,
1919 [ITEM_ICMP6_ND_NA] = {
1920 .name = "icmp6_nd_na",
1921 .help = "match ICMPv6 neighbor discovery advertisement",
1922 .priv = PRIV_ITEM(ICMP6_ND_NA,
1923 sizeof(struct rte_flow_item_icmp6_nd_na)),
1924 .next = NEXT(item_icmp6_nd_na),
1927 [ITEM_ICMP6_ND_NA_TARGET_ADDR] = {
1928 .name = "target_addr",
1929 .help = "target address",
1930 .next = NEXT(item_icmp6_nd_na, NEXT_ENTRY(IPV6_ADDR),
1932 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_na,
1935 [ITEM_ICMP6_ND_OPT] = {
1936 .name = "icmp6_nd_opt",
1937 .help = "match presence of any ICMPv6 neighbor discovery"
1939 .priv = PRIV_ITEM(ICMP6_ND_OPT,
1940 sizeof(struct rte_flow_item_icmp6_nd_opt)),
1941 .next = NEXT(item_icmp6_nd_opt),
1944 [ITEM_ICMP6_ND_OPT_TYPE] = {
1946 .help = "ND option type",
1947 .next = NEXT(item_icmp6_nd_opt, NEXT_ENTRY(UNSIGNED),
1949 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_opt,
1952 [ITEM_ICMP6_ND_OPT_SLA_ETH] = {
1953 .name = "icmp6_nd_opt_sla_eth",
1954 .help = "match ICMPv6 neighbor discovery source Ethernet"
1955 " link-layer address option",
1957 (ICMP6_ND_OPT_SLA_ETH,
1958 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
1959 .next = NEXT(item_icmp6_nd_opt_sla_eth),
1962 [ITEM_ICMP6_ND_OPT_SLA_ETH_SLA] = {
1964 .help = "source Ethernet LLA",
1965 .next = NEXT(item_icmp6_nd_opt_sla_eth, NEXT_ENTRY(MAC_ADDR),
1967 .args = ARGS(ARGS_ENTRY_HTON
1968 (struct rte_flow_item_icmp6_nd_opt_sla_eth, sla)),
1970 [ITEM_ICMP6_ND_OPT_TLA_ETH] = {
1971 .name = "icmp6_nd_opt_tla_eth",
1972 .help = "match ICMPv6 neighbor discovery target Ethernet"
1973 " link-layer address option",
1975 (ICMP6_ND_OPT_TLA_ETH,
1976 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
1977 .next = NEXT(item_icmp6_nd_opt_tla_eth),
1980 [ITEM_ICMP6_ND_OPT_TLA_ETH_TLA] = {
1982 .help = "target Ethernet LLA",
1983 .next = NEXT(item_icmp6_nd_opt_tla_eth, NEXT_ENTRY(MAC_ADDR),
1985 .args = ARGS(ARGS_ENTRY_HTON
1986 (struct rte_flow_item_icmp6_nd_opt_tla_eth, tla)),
1989 /* Validate/create actions. */
1992 .help = "submit a list of associated actions",
1993 .next = NEXT(next_action),
1998 .help = "specify next action",
1999 .next = NEXT(next_action),
2003 .help = "end list of actions",
2004 .priv = PRIV_ACTION(END, 0),
2009 .help = "no-op action",
2010 .priv = PRIV_ACTION(VOID, 0),
2011 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2014 [ACTION_PASSTHRU] = {
2016 .help = "let subsequent rule process matched packets",
2017 .priv = PRIV_ACTION(PASSTHRU, 0),
2018 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2023 .help = "redirect traffic to a given group",
2024 .priv = PRIV_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
2025 .next = NEXT(action_jump),
2028 [ACTION_JUMP_GROUP] = {
2030 .help = "group to redirect traffic to",
2031 .next = NEXT(action_jump, NEXT_ENTRY(UNSIGNED)),
2032 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_jump, group)),
2033 .call = parse_vc_conf,
2037 .help = "attach 32 bit value to packets",
2038 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
2039 .next = NEXT(action_mark),
2042 [ACTION_MARK_ID] = {
2044 .help = "32 bit value to return with packets",
2045 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
2046 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
2047 .call = parse_vc_conf,
2051 .help = "flag packets",
2052 .priv = PRIV_ACTION(FLAG, 0),
2053 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2058 .help = "assign packets to a given queue index",
2059 .priv = PRIV_ACTION(QUEUE,
2060 sizeof(struct rte_flow_action_queue)),
2061 .next = NEXT(action_queue),
2064 [ACTION_QUEUE_INDEX] = {
2066 .help = "queue index to use",
2067 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
2068 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
2069 .call = parse_vc_conf,
2073 .help = "drop packets (note: passthru has priority)",
2074 .priv = PRIV_ACTION(DROP, 0),
2075 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2080 .help = "enable counters for this rule",
2081 .priv = PRIV_ACTION(COUNT,
2082 sizeof(struct rte_flow_action_count)),
2083 .next = NEXT(action_count),
2086 [ACTION_COUNT_ID] = {
2087 .name = "identifier",
2088 .help = "counter identifier to use",
2089 .next = NEXT(action_count, NEXT_ENTRY(UNSIGNED)),
2090 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_count, id)),
2091 .call = parse_vc_conf,
2093 [ACTION_COUNT_SHARED] = {
2095 .help = "shared counter",
2096 .next = NEXT(action_count, NEXT_ENTRY(BOOLEAN)),
2097 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_count,
2099 .call = parse_vc_conf,
2103 .help = "spread packets among several queues",
2104 .priv = PRIV_ACTION(RSS, sizeof(struct action_rss_data)),
2105 .next = NEXT(action_rss),
2106 .call = parse_vc_action_rss,
2108 [ACTION_RSS_FUNC] = {
2110 .help = "RSS hash function to apply",
2111 .next = NEXT(action_rss,
2112 NEXT_ENTRY(ACTION_RSS_FUNC_DEFAULT,
2113 ACTION_RSS_FUNC_TOEPLITZ,
2114 ACTION_RSS_FUNC_SIMPLE_XOR)),
2116 [ACTION_RSS_FUNC_DEFAULT] = {
2118 .help = "default hash function",
2119 .call = parse_vc_action_rss_func,
2121 [ACTION_RSS_FUNC_TOEPLITZ] = {
2123 .help = "Toeplitz hash function",
2124 .call = parse_vc_action_rss_func,
2126 [ACTION_RSS_FUNC_SIMPLE_XOR] = {
2127 .name = "simple_xor",
2128 .help = "simple XOR hash function",
2129 .call = parse_vc_action_rss_func,
2131 [ACTION_RSS_LEVEL] = {
2133 .help = "encapsulation level for \"types\"",
2134 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2135 .args = ARGS(ARGS_ENTRY_ARB
2136 (offsetof(struct action_rss_data, conf) +
2137 offsetof(struct rte_flow_action_rss, level),
2138 sizeof(((struct rte_flow_action_rss *)0)->
2141 [ACTION_RSS_TYPES] = {
2143 .help = "specific RSS hash types",
2144 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)),
2146 [ACTION_RSS_TYPE] = {
2148 .help = "RSS hash type",
2149 .call = parse_vc_action_rss_type,
2150 .comp = comp_vc_action_rss_type,
2152 [ACTION_RSS_KEY] = {
2154 .help = "RSS hash key",
2155 .next = NEXT(action_rss, NEXT_ENTRY(STRING)),
2156 .args = ARGS(ARGS_ENTRY_ARB(0, 0),
2158 (offsetof(struct action_rss_data, conf) +
2159 offsetof(struct rte_flow_action_rss, key_len),
2160 sizeof(((struct rte_flow_action_rss *)0)->
2162 ARGS_ENTRY(struct action_rss_data, key)),
2164 [ACTION_RSS_KEY_LEN] = {
2166 .help = "RSS hash key length in bytes",
2167 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2168 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
2169 (offsetof(struct action_rss_data, conf) +
2170 offsetof(struct rte_flow_action_rss, key_len),
2171 sizeof(((struct rte_flow_action_rss *)0)->
2174 RSS_HASH_KEY_LENGTH)),
2176 [ACTION_RSS_QUEUES] = {
2178 .help = "queue indices to use",
2179 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
2180 .call = parse_vc_conf,
2182 [ACTION_RSS_QUEUE] = {
2184 .help = "queue index",
2185 .call = parse_vc_action_rss_queue,
2186 .comp = comp_vc_action_rss_queue,
2190 .help = "direct traffic to physical function",
2191 .priv = PRIV_ACTION(PF, 0),
2192 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2197 .help = "direct traffic to a virtual function ID",
2198 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
2199 .next = NEXT(action_vf),
2202 [ACTION_VF_ORIGINAL] = {
2204 .help = "use original VF ID if possible",
2205 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
2206 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
2208 .call = parse_vc_conf,
2213 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
2214 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
2215 .call = parse_vc_conf,
2217 [ACTION_PHY_PORT] = {
2219 .help = "direct packets to physical port index",
2220 .priv = PRIV_ACTION(PHY_PORT,
2221 sizeof(struct rte_flow_action_phy_port)),
2222 .next = NEXT(action_phy_port),
2225 [ACTION_PHY_PORT_ORIGINAL] = {
2227 .help = "use original port index if possible",
2228 .next = NEXT(action_phy_port, NEXT_ENTRY(BOOLEAN)),
2229 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_phy_port,
2231 .call = parse_vc_conf,
2233 [ACTION_PHY_PORT_INDEX] = {
2235 .help = "physical port index",
2236 .next = NEXT(action_phy_port, NEXT_ENTRY(UNSIGNED)),
2237 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_phy_port,
2239 .call = parse_vc_conf,
2241 [ACTION_PORT_ID] = {
2243 .help = "direct matching traffic to a given DPDK port ID",
2244 .priv = PRIV_ACTION(PORT_ID,
2245 sizeof(struct rte_flow_action_port_id)),
2246 .next = NEXT(action_port_id),
2249 [ACTION_PORT_ID_ORIGINAL] = {
2251 .help = "use original DPDK port ID if possible",
2252 .next = NEXT(action_port_id, NEXT_ENTRY(BOOLEAN)),
2253 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_port_id,
2255 .call = parse_vc_conf,
2257 [ACTION_PORT_ID_ID] = {
2259 .help = "DPDK port ID",
2260 .next = NEXT(action_port_id, NEXT_ENTRY(UNSIGNED)),
2261 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_port_id, id)),
2262 .call = parse_vc_conf,
2266 .help = "meter the directed packets at given id",
2267 .priv = PRIV_ACTION(METER,
2268 sizeof(struct rte_flow_action_meter)),
2269 .next = NEXT(action_meter),
2272 [ACTION_METER_ID] = {
2274 .help = "meter id to use",
2275 .next = NEXT(action_meter, NEXT_ENTRY(UNSIGNED)),
2276 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
2277 .call = parse_vc_conf,
2279 [ACTION_OF_SET_MPLS_TTL] = {
2280 .name = "of_set_mpls_ttl",
2281 .help = "OpenFlow's OFPAT_SET_MPLS_TTL",
2284 sizeof(struct rte_flow_action_of_set_mpls_ttl)),
2285 .next = NEXT(action_of_set_mpls_ttl),
2288 [ACTION_OF_SET_MPLS_TTL_MPLS_TTL] = {
2291 .next = NEXT(action_of_set_mpls_ttl, NEXT_ENTRY(UNSIGNED)),
2292 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_mpls_ttl,
2294 .call = parse_vc_conf,
2296 [ACTION_OF_DEC_MPLS_TTL] = {
2297 .name = "of_dec_mpls_ttl",
2298 .help = "OpenFlow's OFPAT_DEC_MPLS_TTL",
2299 .priv = PRIV_ACTION(OF_DEC_MPLS_TTL, 0),
2300 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2303 [ACTION_OF_SET_NW_TTL] = {
2304 .name = "of_set_nw_ttl",
2305 .help = "OpenFlow's OFPAT_SET_NW_TTL",
2308 sizeof(struct rte_flow_action_of_set_nw_ttl)),
2309 .next = NEXT(action_of_set_nw_ttl),
2312 [ACTION_OF_SET_NW_TTL_NW_TTL] = {
2315 .next = NEXT(action_of_set_nw_ttl, NEXT_ENTRY(UNSIGNED)),
2316 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_nw_ttl,
2318 .call = parse_vc_conf,
2320 [ACTION_OF_DEC_NW_TTL] = {
2321 .name = "of_dec_nw_ttl",
2322 .help = "OpenFlow's OFPAT_DEC_NW_TTL",
2323 .priv = PRIV_ACTION(OF_DEC_NW_TTL, 0),
2324 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2327 [ACTION_OF_COPY_TTL_OUT] = {
2328 .name = "of_copy_ttl_out",
2329 .help = "OpenFlow's OFPAT_COPY_TTL_OUT",
2330 .priv = PRIV_ACTION(OF_COPY_TTL_OUT, 0),
2331 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2334 [ACTION_OF_COPY_TTL_IN] = {
2335 .name = "of_copy_ttl_in",
2336 .help = "OpenFlow's OFPAT_COPY_TTL_IN",
2337 .priv = PRIV_ACTION(OF_COPY_TTL_IN, 0),
2338 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2341 [ACTION_OF_POP_VLAN] = {
2342 .name = "of_pop_vlan",
2343 .help = "OpenFlow's OFPAT_POP_VLAN",
2344 .priv = PRIV_ACTION(OF_POP_VLAN, 0),
2345 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2348 [ACTION_OF_PUSH_VLAN] = {
2349 .name = "of_push_vlan",
2350 .help = "OpenFlow's OFPAT_PUSH_VLAN",
2353 sizeof(struct rte_flow_action_of_push_vlan)),
2354 .next = NEXT(action_of_push_vlan),
2357 [ACTION_OF_PUSH_VLAN_ETHERTYPE] = {
2358 .name = "ethertype",
2359 .help = "EtherType",
2360 .next = NEXT(action_of_push_vlan, NEXT_ENTRY(UNSIGNED)),
2361 .args = ARGS(ARGS_ENTRY_HTON
2362 (struct rte_flow_action_of_push_vlan,
2364 .call = parse_vc_conf,
2366 [ACTION_OF_SET_VLAN_VID] = {
2367 .name = "of_set_vlan_vid",
2368 .help = "OpenFlow's OFPAT_SET_VLAN_VID",
2371 sizeof(struct rte_flow_action_of_set_vlan_vid)),
2372 .next = NEXT(action_of_set_vlan_vid),
2375 [ACTION_OF_SET_VLAN_VID_VLAN_VID] = {
2378 .next = NEXT(action_of_set_vlan_vid, NEXT_ENTRY(UNSIGNED)),
2379 .args = ARGS(ARGS_ENTRY_HTON
2380 (struct rte_flow_action_of_set_vlan_vid,
2382 .call = parse_vc_conf,
2384 [ACTION_OF_SET_VLAN_PCP] = {
2385 .name = "of_set_vlan_pcp",
2386 .help = "OpenFlow's OFPAT_SET_VLAN_PCP",
2389 sizeof(struct rte_flow_action_of_set_vlan_pcp)),
2390 .next = NEXT(action_of_set_vlan_pcp),
2393 [ACTION_OF_SET_VLAN_PCP_VLAN_PCP] = {
2395 .help = "VLAN priority",
2396 .next = NEXT(action_of_set_vlan_pcp, NEXT_ENTRY(UNSIGNED)),
2397 .args = ARGS(ARGS_ENTRY_HTON
2398 (struct rte_flow_action_of_set_vlan_pcp,
2400 .call = parse_vc_conf,
2402 [ACTION_OF_POP_MPLS] = {
2403 .name = "of_pop_mpls",
2404 .help = "OpenFlow's OFPAT_POP_MPLS",
2405 .priv = PRIV_ACTION(OF_POP_MPLS,
2406 sizeof(struct rte_flow_action_of_pop_mpls)),
2407 .next = NEXT(action_of_pop_mpls),
2410 [ACTION_OF_POP_MPLS_ETHERTYPE] = {
2411 .name = "ethertype",
2412 .help = "EtherType",
2413 .next = NEXT(action_of_pop_mpls, NEXT_ENTRY(UNSIGNED)),
2414 .args = ARGS(ARGS_ENTRY_HTON
2415 (struct rte_flow_action_of_pop_mpls,
2417 .call = parse_vc_conf,
2419 [ACTION_OF_PUSH_MPLS] = {
2420 .name = "of_push_mpls",
2421 .help = "OpenFlow's OFPAT_PUSH_MPLS",
2424 sizeof(struct rte_flow_action_of_push_mpls)),
2425 .next = NEXT(action_of_push_mpls),
2428 [ACTION_OF_PUSH_MPLS_ETHERTYPE] = {
2429 .name = "ethertype",
2430 .help = "EtherType",
2431 .next = NEXT(action_of_push_mpls, NEXT_ENTRY(UNSIGNED)),
2432 .args = ARGS(ARGS_ENTRY_HTON
2433 (struct rte_flow_action_of_push_mpls,
2435 .call = parse_vc_conf,
2437 [ACTION_VXLAN_ENCAP] = {
2438 .name = "vxlan_encap",
2439 .help = "VXLAN encapsulation, uses configuration set by \"set"
2441 .priv = PRIV_ACTION(VXLAN_ENCAP,
2442 sizeof(struct action_vxlan_encap_data)),
2443 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2444 .call = parse_vc_action_vxlan_encap,
2446 [ACTION_VXLAN_DECAP] = {
2447 .name = "vxlan_decap",
2448 .help = "Performs a decapsulation action by stripping all"
2449 " headers of the VXLAN tunnel network overlay from the"
2451 .priv = PRIV_ACTION(VXLAN_DECAP, 0),
2452 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2455 [ACTION_NVGRE_ENCAP] = {
2456 .name = "nvgre_encap",
2457 .help = "NVGRE encapsulation, uses configuration set by \"set"
2459 .priv = PRIV_ACTION(NVGRE_ENCAP,
2460 sizeof(struct action_nvgre_encap_data)),
2461 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2462 .call = parse_vc_action_nvgre_encap,
2464 [ACTION_NVGRE_DECAP] = {
2465 .name = "nvgre_decap",
2466 .help = "Performs a decapsulation action by stripping all"
2467 " headers of the NVGRE tunnel network overlay from the"
2469 .priv = PRIV_ACTION(NVGRE_DECAP, 0),
2470 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2475 /** Remove and return last entry from argument stack. */
2476 static const struct arg *
2477 pop_args(struct context *ctx)
2479 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
2482 /** Add entry on top of the argument stack. */
2484 push_args(struct context *ctx, const struct arg *arg)
2486 if (ctx->args_num == CTX_STACK_SIZE)
2488 ctx->args[ctx->args_num++] = arg;
2492 /** Spread value into buffer according to bit-mask. */
2494 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
2496 uint32_t i = arg->size;
2504 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2513 unsigned int shift = 0;
2514 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
2516 for (shift = 0; arg->mask[i] >> shift; ++shift) {
2517 if (!(arg->mask[i] & (1 << shift)))
2522 *buf &= ~(1 << shift);
2523 *buf |= (val & 1) << shift;
2531 /** Compare a string with a partial one of a given length. */
2533 strcmp_partial(const char *full, const char *partial, size_t partial_len)
2535 int r = strncmp(full, partial, partial_len);
2539 if (strlen(full) <= partial_len)
2541 return full[partial_len];
2545 * Parse a prefix length and generate a bit-mask.
2547 * Last argument (ctx->args) is retrieved to determine mask size, storage
2548 * location and whether the result must use network byte ordering.
2551 parse_prefix(struct context *ctx, const struct token *token,
2552 const char *str, unsigned int len,
2553 void *buf, unsigned int size)
2555 const struct arg *arg = pop_args(ctx);
2556 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
2563 /* Argument is expected. */
2567 u = strtoumax(str, &end, 0);
2568 if (errno || (size_t)(end - str) != len)
2573 extra = arg_entry_bf_fill(NULL, 0, arg);
2582 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
2583 !arg_entry_bf_fill(ctx->objmask, -1, arg))
2590 if (bytes > size || bytes + !!extra > size)
2594 buf = (uint8_t *)ctx->object + arg->offset;
2595 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2597 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
2598 memset(buf, 0x00, size - bytes);
2600 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
2604 memset(buf, 0xff, bytes);
2605 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
2607 ((uint8_t *)buf)[bytes] = conv[extra];
2610 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2613 push_args(ctx, arg);
2617 /** Default parsing function for token name matching. */
2619 parse_default(struct context *ctx, const struct token *token,
2620 const char *str, unsigned int len,
2621 void *buf, unsigned int size)
2626 if (strcmp_partial(token->name, str, len))
2631 /** Parse flow command, initialize output buffer for subsequent tokens. */
2633 parse_init(struct context *ctx, const struct token *token,
2634 const char *str, unsigned int len,
2635 void *buf, unsigned int size)
2637 struct buffer *out = buf;
2639 /* Token name must match. */
2640 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2642 /* Nothing else to do if there is no buffer. */
2645 /* Make sure buffer is large enough. */
2646 if (size < sizeof(*out))
2648 /* Initialize buffer. */
2649 memset(out, 0x00, sizeof(*out));
2650 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
2653 ctx->objmask = NULL;
2657 /** Parse tokens for validate/create commands. */
2659 parse_vc(struct context *ctx, const struct token *token,
2660 const char *str, unsigned int len,
2661 void *buf, unsigned int size)
2663 struct buffer *out = buf;
2667 /* Token name must match. */
2668 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2670 /* Nothing else to do if there is no buffer. */
2673 if (!out->command) {
2674 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
2676 if (sizeof(*out) > size)
2678 out->command = ctx->curr;
2681 ctx->objmask = NULL;
2682 out->args.vc.data = (uint8_t *)out + size;
2686 ctx->object = &out->args.vc.attr;
2687 ctx->objmask = NULL;
2688 switch (ctx->curr) {
2693 out->args.vc.attr.ingress = 1;
2696 out->args.vc.attr.egress = 1;
2699 out->args.vc.attr.transfer = 1;
2702 out->args.vc.pattern =
2703 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2705 ctx->object = out->args.vc.pattern;
2706 ctx->objmask = NULL;
2709 out->args.vc.actions =
2710 (void *)RTE_ALIGN_CEIL((uintptr_t)
2711 (out->args.vc.pattern +
2712 out->args.vc.pattern_n),
2714 ctx->object = out->args.vc.actions;
2715 ctx->objmask = NULL;
2722 if (!out->args.vc.actions) {
2723 const struct parse_item_priv *priv = token->priv;
2724 struct rte_flow_item *item =
2725 out->args.vc.pattern + out->args.vc.pattern_n;
2727 data_size = priv->size * 3; /* spec, last, mask */
2728 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
2729 (out->args.vc.data - data_size),
2731 if ((uint8_t *)item + sizeof(*item) > data)
2733 *item = (struct rte_flow_item){
2736 ++out->args.vc.pattern_n;
2738 ctx->objmask = NULL;
2740 const struct parse_action_priv *priv = token->priv;
2741 struct rte_flow_action *action =
2742 out->args.vc.actions + out->args.vc.actions_n;
2744 data_size = priv->size; /* configuration */
2745 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
2746 (out->args.vc.data - data_size),
2748 if ((uint8_t *)action + sizeof(*action) > data)
2750 *action = (struct rte_flow_action){
2752 .conf = data_size ? data : NULL,
2754 ++out->args.vc.actions_n;
2755 ctx->object = action;
2756 ctx->objmask = NULL;
2758 memset(data, 0, data_size);
2759 out->args.vc.data = data;
2760 ctx->objdata = data_size;
2764 /** Parse pattern item parameter type. */
2766 parse_vc_spec(struct context *ctx, const struct token *token,
2767 const char *str, unsigned int len,
2768 void *buf, unsigned int size)
2770 struct buffer *out = buf;
2771 struct rte_flow_item *item;
2777 /* Token name must match. */
2778 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2780 /* Parse parameter types. */
2781 switch (ctx->curr) {
2782 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
2788 case ITEM_PARAM_SPEC:
2791 case ITEM_PARAM_LAST:
2794 case ITEM_PARAM_PREFIX:
2795 /* Modify next token to expect a prefix. */
2796 if (ctx->next_num < 2)
2798 ctx->next[ctx->next_num - 2] = prefix;
2800 case ITEM_PARAM_MASK:
2806 /* Nothing else to do if there is no buffer. */
2809 if (!out->args.vc.pattern_n)
2811 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
2812 data_size = ctx->objdata / 3; /* spec, last, mask */
2813 /* Point to selected object. */
2814 ctx->object = out->args.vc.data + (data_size * index);
2816 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
2817 item->mask = ctx->objmask;
2819 ctx->objmask = NULL;
2820 /* Update relevant item pointer. */
2821 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
2826 /** Parse action configuration field. */
2828 parse_vc_conf(struct context *ctx, const struct token *token,
2829 const char *str, unsigned int len,
2830 void *buf, unsigned int size)
2832 struct buffer *out = buf;
2835 /* Token name must match. */
2836 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2838 /* Nothing else to do if there is no buffer. */
2841 /* Point to selected object. */
2842 ctx->object = out->args.vc.data;
2843 ctx->objmask = NULL;
2847 /** Parse RSS action. */
2849 parse_vc_action_rss(struct context *ctx, const struct token *token,
2850 const char *str, unsigned int len,
2851 void *buf, unsigned int size)
2853 struct buffer *out = buf;
2854 struct rte_flow_action *action;
2855 struct action_rss_data *action_rss_data;
2859 ret = parse_vc(ctx, token, str, len, buf, size);
2862 /* Nothing else to do if there is no buffer. */
2865 if (!out->args.vc.actions_n)
2867 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
2868 /* Point to selected object. */
2869 ctx->object = out->args.vc.data;
2870 ctx->objmask = NULL;
2871 /* Set up default configuration. */
2872 action_rss_data = ctx->object;
2873 *action_rss_data = (struct action_rss_data){
2874 .conf = (struct rte_flow_action_rss){
2875 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
2878 .key_len = sizeof(action_rss_data->key),
2879 .queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
2880 .key = action_rss_data->key,
2881 .queue = action_rss_data->queue,
2883 .key = "testpmd's default RSS hash key, "
2884 "override it for better balancing",
2887 for (i = 0; i < action_rss_data->conf.queue_num; ++i)
2888 action_rss_data->queue[i] = i;
2889 if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
2890 ctx->port != (portid_t)RTE_PORT_ALL) {
2891 struct rte_eth_dev_info info;
2893 rte_eth_dev_info_get(ctx->port, &info);
2894 action_rss_data->conf.key_len =
2895 RTE_MIN(sizeof(action_rss_data->key),
2896 info.hash_key_size);
2898 action->conf = &action_rss_data->conf;
2903 * Parse func field for RSS action.
2905 * The RTE_ETH_HASH_FUNCTION_* value to assign is derived from the
2906 * ACTION_RSS_FUNC_* index that called this function.
2909 parse_vc_action_rss_func(struct context *ctx, const struct token *token,
2910 const char *str, unsigned int len,
2911 void *buf, unsigned int size)
2913 struct action_rss_data *action_rss_data;
2914 enum rte_eth_hash_function func;
2918 /* Token name must match. */
2919 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2921 switch (ctx->curr) {
2922 case ACTION_RSS_FUNC_DEFAULT:
2923 func = RTE_ETH_HASH_FUNCTION_DEFAULT;
2925 case ACTION_RSS_FUNC_TOEPLITZ:
2926 func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
2928 case ACTION_RSS_FUNC_SIMPLE_XOR:
2929 func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
2936 action_rss_data = ctx->object;
2937 action_rss_data->conf.func = func;
2942 * Parse type field for RSS action.
2944 * Valid tokens are type field names and the "end" token.
2947 parse_vc_action_rss_type(struct context *ctx, const struct token *token,
2948 const char *str, unsigned int len,
2949 void *buf, unsigned int size)
2951 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_TYPE);
2952 struct action_rss_data *action_rss_data;
2958 if (ctx->curr != ACTION_RSS_TYPE)
2960 if (!(ctx->objdata >> 16) && ctx->object) {
2961 action_rss_data = ctx->object;
2962 action_rss_data->conf.types = 0;
2964 if (!strcmp_partial("end", str, len)) {
2965 ctx->objdata &= 0xffff;
2968 for (i = 0; rss_type_table[i].str; ++i)
2969 if (!strcmp_partial(rss_type_table[i].str, str, len))
2971 if (!rss_type_table[i].str)
2973 ctx->objdata = 1 << 16 | (ctx->objdata & 0xffff);
2975 if (ctx->next_num == RTE_DIM(ctx->next))
2977 ctx->next[ctx->next_num++] = next;
2980 action_rss_data = ctx->object;
2981 action_rss_data->conf.types |= rss_type_table[i].rss_type;
2986 * Parse queue field for RSS action.
2988 * Valid tokens are queue indices and the "end" token.
2991 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
2992 const char *str, unsigned int len,
2993 void *buf, unsigned int size)
2995 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
2996 struct action_rss_data *action_rss_data;
3003 if (ctx->curr != ACTION_RSS_QUEUE)
3005 i = ctx->objdata >> 16;
3006 if (!strcmp_partial("end", str, len)) {
3007 ctx->objdata &= 0xffff;
3010 if (i >= ACTION_RSS_QUEUE_NUM)
3013 ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) +
3014 i * sizeof(action_rss_data->queue[i]),
3015 sizeof(action_rss_data->queue[i]))))
3017 ret = parse_int(ctx, token, str, len, NULL, 0);
3023 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
3025 if (ctx->next_num == RTE_DIM(ctx->next))
3027 ctx->next[ctx->next_num++] = next;
3031 action_rss_data = ctx->object;
3032 action_rss_data->conf.queue_num = i;
3033 action_rss_data->conf.queue = i ? action_rss_data->queue : NULL;
3037 /** Parse VXLAN encap action. */
3039 parse_vc_action_vxlan_encap(struct context *ctx, const struct token *token,
3040 const char *str, unsigned int len,
3041 void *buf, unsigned int size)
3043 struct buffer *out = buf;
3044 struct rte_flow_action *action;
3045 struct action_vxlan_encap_data *action_vxlan_encap_data;
3048 ret = parse_vc(ctx, token, str, len, buf, size);
3051 /* Nothing else to do if there is no buffer. */
3054 if (!out->args.vc.actions_n)
3056 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3057 /* Point to selected object. */
3058 ctx->object = out->args.vc.data;
3059 ctx->objmask = NULL;
3060 /* Set up default configuration. */
3061 action_vxlan_encap_data = ctx->object;
3062 *action_vxlan_encap_data = (struct action_vxlan_encap_data){
3063 .conf = (struct rte_flow_action_vxlan_encap){
3064 .definition = action_vxlan_encap_data->items,
3068 .type = RTE_FLOW_ITEM_TYPE_ETH,
3069 .spec = &action_vxlan_encap_data->item_eth,
3070 .mask = &rte_flow_item_eth_mask,
3073 .type = RTE_FLOW_ITEM_TYPE_VLAN,
3074 .spec = &action_vxlan_encap_data->item_vlan,
3075 .mask = &rte_flow_item_vlan_mask,
3078 .type = RTE_FLOW_ITEM_TYPE_IPV4,
3079 .spec = &action_vxlan_encap_data->item_ipv4,
3080 .mask = &rte_flow_item_ipv4_mask,
3083 .type = RTE_FLOW_ITEM_TYPE_UDP,
3084 .spec = &action_vxlan_encap_data->item_udp,
3085 .mask = &rte_flow_item_udp_mask,
3088 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
3089 .spec = &action_vxlan_encap_data->item_vxlan,
3090 .mask = &rte_flow_item_vxlan_mask,
3093 .type = RTE_FLOW_ITEM_TYPE_END,
3098 .tci = vxlan_encap_conf.vlan_tci,
3102 .src_addr = vxlan_encap_conf.ipv4_src,
3103 .dst_addr = vxlan_encap_conf.ipv4_dst,
3106 .src_port = vxlan_encap_conf.udp_src,
3107 .dst_port = vxlan_encap_conf.udp_dst,
3109 .item_vxlan.flags = 0,
3111 memcpy(action_vxlan_encap_data->item_eth.dst.addr_bytes,
3112 vxlan_encap_conf.eth_dst, ETHER_ADDR_LEN);
3113 memcpy(action_vxlan_encap_data->item_eth.src.addr_bytes,
3114 vxlan_encap_conf.eth_src, ETHER_ADDR_LEN);
3115 if (!vxlan_encap_conf.select_ipv4) {
3116 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr,
3117 &vxlan_encap_conf.ipv6_src,
3118 sizeof(vxlan_encap_conf.ipv6_src));
3119 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.dst_addr,
3120 &vxlan_encap_conf.ipv6_dst,
3121 sizeof(vxlan_encap_conf.ipv6_dst));
3122 action_vxlan_encap_data->items[2] = (struct rte_flow_item){
3123 .type = RTE_FLOW_ITEM_TYPE_IPV6,
3124 .spec = &action_vxlan_encap_data->item_ipv6,
3125 .mask = &rte_flow_item_ipv6_mask,
3128 if (!vxlan_encap_conf.select_vlan)
3129 action_vxlan_encap_data->items[1].type =
3130 RTE_FLOW_ITEM_TYPE_VOID;
3131 memcpy(action_vxlan_encap_data->item_vxlan.vni, vxlan_encap_conf.vni,
3132 RTE_DIM(vxlan_encap_conf.vni));
3133 action->conf = &action_vxlan_encap_data->conf;
3137 /** Parse NVGRE encap action. */
3139 parse_vc_action_nvgre_encap(struct context *ctx, const struct token *token,
3140 const char *str, unsigned int len,
3141 void *buf, unsigned int size)
3143 struct buffer *out = buf;
3144 struct rte_flow_action *action;
3145 struct action_nvgre_encap_data *action_nvgre_encap_data;
3148 ret = parse_vc(ctx, token, str, len, buf, size);
3151 /* Nothing else to do if there is no buffer. */
3154 if (!out->args.vc.actions_n)
3156 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3157 /* Point to selected object. */
3158 ctx->object = out->args.vc.data;
3159 ctx->objmask = NULL;
3160 /* Set up default configuration. */
3161 action_nvgre_encap_data = ctx->object;
3162 *action_nvgre_encap_data = (struct action_nvgre_encap_data){
3163 .conf = (struct rte_flow_action_nvgre_encap){
3164 .definition = action_nvgre_encap_data->items,
3168 .type = RTE_FLOW_ITEM_TYPE_ETH,
3169 .spec = &action_nvgre_encap_data->item_eth,
3170 .mask = &rte_flow_item_eth_mask,
3173 .type = RTE_FLOW_ITEM_TYPE_VLAN,
3174 .spec = &action_nvgre_encap_data->item_vlan,
3175 .mask = &rte_flow_item_vlan_mask,
3178 .type = RTE_FLOW_ITEM_TYPE_IPV4,
3179 .spec = &action_nvgre_encap_data->item_ipv4,
3180 .mask = &rte_flow_item_ipv4_mask,
3183 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
3184 .spec = &action_nvgre_encap_data->item_nvgre,
3185 .mask = &rte_flow_item_nvgre_mask,
3188 .type = RTE_FLOW_ITEM_TYPE_END,
3193 .tci = nvgre_encap_conf.vlan_tci,
3197 .src_addr = nvgre_encap_conf.ipv4_src,
3198 .dst_addr = nvgre_encap_conf.ipv4_dst,
3200 .item_nvgre.flow_id = 0,
3202 memcpy(action_nvgre_encap_data->item_eth.dst.addr_bytes,
3203 nvgre_encap_conf.eth_dst, ETHER_ADDR_LEN);
3204 memcpy(action_nvgre_encap_data->item_eth.src.addr_bytes,
3205 nvgre_encap_conf.eth_src, ETHER_ADDR_LEN);
3206 if (!nvgre_encap_conf.select_ipv4) {
3207 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.src_addr,
3208 &nvgre_encap_conf.ipv6_src,
3209 sizeof(nvgre_encap_conf.ipv6_src));
3210 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.dst_addr,
3211 &nvgre_encap_conf.ipv6_dst,
3212 sizeof(nvgre_encap_conf.ipv6_dst));
3213 action_nvgre_encap_data->items[2] = (struct rte_flow_item){
3214 .type = RTE_FLOW_ITEM_TYPE_IPV6,
3215 .spec = &action_nvgre_encap_data->item_ipv6,
3216 .mask = &rte_flow_item_ipv6_mask,
3219 if (!nvgre_encap_conf.select_vlan)
3220 action_nvgre_encap_data->items[1].type =
3221 RTE_FLOW_ITEM_TYPE_VOID;
3222 memcpy(action_nvgre_encap_data->item_nvgre.tni, nvgre_encap_conf.tni,
3223 RTE_DIM(nvgre_encap_conf.tni));
3224 action->conf = &action_nvgre_encap_data->conf;
3228 /** Parse tokens for destroy command. */
3230 parse_destroy(struct context *ctx, const struct token *token,
3231 const char *str, unsigned int len,
3232 void *buf, unsigned int size)
3234 struct buffer *out = buf;
3236 /* Token name must match. */
3237 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3239 /* Nothing else to do if there is no buffer. */
3242 if (!out->command) {
3243 if (ctx->curr != DESTROY)
3245 if (sizeof(*out) > size)
3247 out->command = ctx->curr;
3250 ctx->objmask = NULL;
3251 out->args.destroy.rule =
3252 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
3256 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
3257 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
3260 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
3261 ctx->objmask = NULL;
3265 /** Parse tokens for flush command. */
3267 parse_flush(struct context *ctx, const struct token *token,
3268 const char *str, unsigned int len,
3269 void *buf, unsigned int size)
3271 struct buffer *out = buf;
3273 /* Token name must match. */
3274 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3276 /* Nothing else to do if there is no buffer. */
3279 if (!out->command) {
3280 if (ctx->curr != FLUSH)
3282 if (sizeof(*out) > size)
3284 out->command = ctx->curr;
3287 ctx->objmask = NULL;
3292 /** Parse tokens for query command. */
3294 parse_query(struct context *ctx, const struct token *token,
3295 const char *str, unsigned int len,
3296 void *buf, unsigned int size)
3298 struct buffer *out = buf;
3300 /* Token name must match. */
3301 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3303 /* Nothing else to do if there is no buffer. */
3306 if (!out->command) {
3307 if (ctx->curr != QUERY)
3309 if (sizeof(*out) > size)
3311 out->command = ctx->curr;
3314 ctx->objmask = NULL;
3319 /** Parse action names. */
3321 parse_action(struct context *ctx, const struct token *token,
3322 const char *str, unsigned int len,
3323 void *buf, unsigned int size)
3325 struct buffer *out = buf;
3326 const struct arg *arg = pop_args(ctx);
3330 /* Argument is expected. */
3333 /* Parse action name. */
3334 for (i = 0; next_action[i]; ++i) {
3335 const struct parse_action_priv *priv;
3337 token = &token_list[next_action[i]];
3338 if (strcmp_partial(token->name, str, len))
3344 memcpy((uint8_t *)ctx->object + arg->offset,
3350 push_args(ctx, arg);
3354 /** Parse tokens for list command. */
3356 parse_list(struct context *ctx, const struct token *token,
3357 const char *str, unsigned int len,
3358 void *buf, unsigned int size)
3360 struct buffer *out = buf;
3362 /* Token name must match. */
3363 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3365 /* Nothing else to do if there is no buffer. */
3368 if (!out->command) {
3369 if (ctx->curr != LIST)
3371 if (sizeof(*out) > size)
3373 out->command = ctx->curr;
3376 ctx->objmask = NULL;
3377 out->args.list.group =
3378 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
3382 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
3383 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
3386 ctx->object = out->args.list.group + out->args.list.group_n++;
3387 ctx->objmask = NULL;
3391 /** Parse tokens for isolate command. */
3393 parse_isolate(struct context *ctx, const struct token *token,
3394 const char *str, unsigned int len,
3395 void *buf, unsigned int size)
3397 struct buffer *out = buf;
3399 /* Token name must match. */
3400 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3402 /* Nothing else to do if there is no buffer. */
3405 if (!out->command) {
3406 if (ctx->curr != ISOLATE)
3408 if (sizeof(*out) > size)
3410 out->command = ctx->curr;
3413 ctx->objmask = NULL;
3419 * Parse signed/unsigned integers 8 to 64-bit long.
3421 * Last argument (ctx->args) is retrieved to determine integer type and
3425 parse_int(struct context *ctx, const struct token *token,
3426 const char *str, unsigned int len,
3427 void *buf, unsigned int size)
3429 const struct arg *arg = pop_args(ctx);
3434 /* Argument is expected. */
3439 (uintmax_t)strtoimax(str, &end, 0) :
3440 strtoumax(str, &end, 0);
3441 if (errno || (size_t)(end - str) != len)
3444 ((arg->sign && ((intmax_t)u < (intmax_t)arg->min ||
3445 (intmax_t)u > (intmax_t)arg->max)) ||
3446 (!arg->sign && (u < arg->min || u > arg->max))))
3451 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
3452 !arg_entry_bf_fill(ctx->objmask, -1, arg))
3456 buf = (uint8_t *)ctx->object + arg->offset;
3460 case sizeof(uint8_t):
3461 *(uint8_t *)buf = u;
3463 case sizeof(uint16_t):
3464 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
3466 case sizeof(uint8_t [3]):
3467 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3469 ((uint8_t *)buf)[0] = u;
3470 ((uint8_t *)buf)[1] = u >> 8;
3471 ((uint8_t *)buf)[2] = u >> 16;
3475 ((uint8_t *)buf)[0] = u >> 16;
3476 ((uint8_t *)buf)[1] = u >> 8;
3477 ((uint8_t *)buf)[2] = u;
3479 case sizeof(uint32_t):
3480 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
3482 case sizeof(uint64_t):
3483 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
3488 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
3490 buf = (uint8_t *)ctx->objmask + arg->offset;
3495 push_args(ctx, arg);
3502 * Three arguments (ctx->args) are retrieved from the stack to store data,
3503 * its actual length and address (in that order).
3506 parse_string(struct context *ctx, const struct token *token,
3507 const char *str, unsigned int len,
3508 void *buf, unsigned int size)
3510 const struct arg *arg_data = pop_args(ctx);
3511 const struct arg *arg_len = pop_args(ctx);
3512 const struct arg *arg_addr = pop_args(ctx);
3513 char tmp[16]; /* Ought to be enough. */
3516 /* Arguments are expected. */
3520 push_args(ctx, arg_data);
3524 push_args(ctx, arg_len);
3525 push_args(ctx, arg_data);
3528 size = arg_data->size;
3529 /* Bit-mask fill is not supported. */
3530 if (arg_data->mask || size < len)
3534 /* Let parse_int() fill length information first. */
3535 ret = snprintf(tmp, sizeof(tmp), "%u", len);
3538 push_args(ctx, arg_len);
3539 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
3544 buf = (uint8_t *)ctx->object + arg_data->offset;
3545 /* Output buffer is not necessarily NUL-terminated. */
3546 memcpy(buf, str, len);
3547 memset((uint8_t *)buf + len, 0x00, size - len);
3549 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
3550 /* Save address if requested. */
3551 if (arg_addr->size) {
3552 memcpy((uint8_t *)ctx->object + arg_addr->offset,
3554 (uint8_t *)ctx->object + arg_data->offset
3558 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
3560 (uint8_t *)ctx->objmask + arg_data->offset
3566 push_args(ctx, arg_addr);
3567 push_args(ctx, arg_len);
3568 push_args(ctx, arg_data);
3573 * Parse a MAC address.
3575 * Last argument (ctx->args) is retrieved to determine storage size and
3579 parse_mac_addr(struct context *ctx, const struct token *token,
3580 const char *str, unsigned int len,
3581 void *buf, unsigned int size)
3583 const struct arg *arg = pop_args(ctx);
3584 struct ether_addr tmp;
3588 /* Argument is expected. */
3592 /* Bit-mask fill is not supported. */
3593 if (arg->mask || size != sizeof(tmp))
3595 /* Only network endian is supported. */
3598 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
3599 if (ret < 0 || (unsigned int)ret != len)
3603 buf = (uint8_t *)ctx->object + arg->offset;
3604 memcpy(buf, &tmp, size);
3606 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
3609 push_args(ctx, arg);
3614 * Parse an IPv4 address.
3616 * Last argument (ctx->args) is retrieved to determine storage size and
3620 parse_ipv4_addr(struct context *ctx, const struct token *token,
3621 const char *str, unsigned int len,
3622 void *buf, unsigned int size)
3624 const struct arg *arg = pop_args(ctx);
3629 /* Argument is expected. */
3633 /* Bit-mask fill is not supported. */
3634 if (arg->mask || size != sizeof(tmp))
3636 /* Only network endian is supported. */
3639 memcpy(str2, str, len);
3641 ret = inet_pton(AF_INET, str2, &tmp);
3643 /* Attempt integer parsing. */
3644 push_args(ctx, arg);
3645 return parse_int(ctx, token, str, len, buf, size);
3649 buf = (uint8_t *)ctx->object + arg->offset;
3650 memcpy(buf, &tmp, size);
3652 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
3655 push_args(ctx, arg);
3660 * Parse an IPv6 address.
3662 * Last argument (ctx->args) is retrieved to determine storage size and
3666 parse_ipv6_addr(struct context *ctx, const struct token *token,
3667 const char *str, unsigned int len,
3668 void *buf, unsigned int size)
3670 const struct arg *arg = pop_args(ctx);
3672 struct in6_addr tmp;
3676 /* Argument is expected. */
3680 /* Bit-mask fill is not supported. */
3681 if (arg->mask || size != sizeof(tmp))
3683 /* Only network endian is supported. */
3686 memcpy(str2, str, len);
3688 ret = inet_pton(AF_INET6, str2, &tmp);
3693 buf = (uint8_t *)ctx->object + arg->offset;
3694 memcpy(buf, &tmp, size);
3696 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
3699 push_args(ctx, arg);
3703 /** Boolean values (even indices stand for false). */
3704 static const char *const boolean_name[] = {
3714 * Parse a boolean value.
3716 * Last argument (ctx->args) is retrieved to determine storage size and
3720 parse_boolean(struct context *ctx, const struct token *token,
3721 const char *str, unsigned int len,
3722 void *buf, unsigned int size)
3724 const struct arg *arg = pop_args(ctx);
3728 /* Argument is expected. */
3731 for (i = 0; boolean_name[i]; ++i)
3732 if (!strcmp_partial(boolean_name[i], str, len))
3734 /* Process token as integer. */
3735 if (boolean_name[i])
3736 str = i & 1 ? "1" : "0";
3737 push_args(ctx, arg);
3738 ret = parse_int(ctx, token, str, strlen(str), buf, size);
3739 return ret > 0 ? (int)len : ret;
3742 /** Parse port and update context. */
3744 parse_port(struct context *ctx, const struct token *token,
3745 const char *str, unsigned int len,
3746 void *buf, unsigned int size)
3748 struct buffer *out = &(struct buffer){ .port = 0 };
3756 ctx->objmask = NULL;
3757 size = sizeof(*out);
3759 ret = parse_int(ctx, token, str, len, out, size);
3761 ctx->port = out->port;
3767 /** No completion. */
3769 comp_none(struct context *ctx, const struct token *token,
3770 unsigned int ent, char *buf, unsigned int size)
3780 /** Complete boolean values. */
3782 comp_boolean(struct context *ctx, const struct token *token,
3783 unsigned int ent, char *buf, unsigned int size)
3789 for (i = 0; boolean_name[i]; ++i)
3790 if (buf && i == ent)
3791 return snprintf(buf, size, "%s", boolean_name[i]);
3797 /** Complete action names. */
3799 comp_action(struct context *ctx, const struct token *token,
3800 unsigned int ent, char *buf, unsigned int size)
3806 for (i = 0; next_action[i]; ++i)
3807 if (buf && i == ent)
3808 return snprintf(buf, size, "%s",
3809 token_list[next_action[i]].name);
3815 /** Complete available ports. */
3817 comp_port(struct context *ctx, const struct token *token,
3818 unsigned int ent, char *buf, unsigned int size)
3825 RTE_ETH_FOREACH_DEV(p) {
3826 if (buf && i == ent)
3827 return snprintf(buf, size, "%u", p);
3835 /** Complete available rule IDs. */
3837 comp_rule_id(struct context *ctx, const struct token *token,
3838 unsigned int ent, char *buf, unsigned int size)
3841 struct rte_port *port;
3842 struct port_flow *pf;
3845 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
3846 ctx->port == (portid_t)RTE_PORT_ALL)
3848 port = &ports[ctx->port];
3849 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
3850 if (buf && i == ent)
3851 return snprintf(buf, size, "%u", pf->id);
3859 /** Complete type field for RSS action. */
3861 comp_vc_action_rss_type(struct context *ctx, const struct token *token,
3862 unsigned int ent, char *buf, unsigned int size)
3868 for (i = 0; rss_type_table[i].str; ++i)
3873 return snprintf(buf, size, "%s", rss_type_table[ent].str);
3875 return snprintf(buf, size, "end");
3879 /** Complete queue field for RSS action. */
3881 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
3882 unsigned int ent, char *buf, unsigned int size)
3889 return snprintf(buf, size, "%u", ent);
3891 return snprintf(buf, size, "end");
3895 /** Internal context. */
3896 static struct context cmd_flow_context;
3898 /** Global parser instance (cmdline API). */
3899 cmdline_parse_inst_t cmd_flow;
3901 /** Initialize context. */
3903 cmd_flow_context_init(struct context *ctx)
3905 /* A full memset() is not necessary. */
3915 ctx->objmask = NULL;
3918 /** Parse a token (cmdline API). */
3920 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
3923 struct context *ctx = &cmd_flow_context;
3924 const struct token *token;
3925 const enum index *list;
3930 token = &token_list[ctx->curr];
3931 /* Check argument length. */
3934 for (len = 0; src[len]; ++len)
3935 if (src[len] == '#' || isspace(src[len]))
3939 /* Last argument and EOL detection. */
3940 for (i = len; src[i]; ++i)
3941 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
3943 else if (!isspace(src[i])) {
3948 if (src[i] == '\r' || src[i] == '\n') {
3952 /* Initialize context if necessary. */
3953 if (!ctx->next_num) {
3956 ctx->next[ctx->next_num++] = token->next[0];
3958 /* Process argument through candidates. */
3959 ctx->prev = ctx->curr;
3960 list = ctx->next[ctx->next_num - 1];
3961 for (i = 0; list[i]; ++i) {
3962 const struct token *next = &token_list[list[i]];
3965 ctx->curr = list[i];
3967 tmp = next->call(ctx, next, src, len, result, size);
3969 tmp = parse_default(ctx, next, src, len, result, size);
3970 if (tmp == -1 || tmp != len)
3978 /* Push subsequent tokens if any. */
3980 for (i = 0; token->next[i]; ++i) {
3981 if (ctx->next_num == RTE_DIM(ctx->next))
3983 ctx->next[ctx->next_num++] = token->next[i];
3985 /* Push arguments if any. */
3987 for (i = 0; token->args[i]; ++i) {
3988 if (ctx->args_num == RTE_DIM(ctx->args))
3990 ctx->args[ctx->args_num++] = token->args[i];
3995 /** Return number of completion entries (cmdline API). */
3997 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
3999 struct context *ctx = &cmd_flow_context;
4000 const struct token *token = &token_list[ctx->curr];
4001 const enum index *list;
4005 /* Count number of tokens in current list. */
4007 list = ctx->next[ctx->next_num - 1];
4009 list = token->next[0];
4010 for (i = 0; list[i]; ++i)
4015 * If there is a single token, use its completion callback, otherwise
4016 * return the number of entries.
4018 token = &token_list[list[0]];
4019 if (i == 1 && token->comp) {
4020 /* Save index for cmd_flow_get_help(). */
4021 ctx->prev = list[0];
4022 return token->comp(ctx, token, 0, NULL, 0);
4027 /** Return a completion entry (cmdline API). */
4029 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
4030 char *dst, unsigned int size)
4032 struct context *ctx = &cmd_flow_context;
4033 const struct token *token = &token_list[ctx->curr];
4034 const enum index *list;
4038 /* Count number of tokens in current list. */
4040 list = ctx->next[ctx->next_num - 1];
4042 list = token->next[0];
4043 for (i = 0; list[i]; ++i)
4047 /* If there is a single token, use its completion callback. */
4048 token = &token_list[list[0]];
4049 if (i == 1 && token->comp) {
4050 /* Save index for cmd_flow_get_help(). */
4051 ctx->prev = list[0];
4052 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
4054 /* Otherwise make sure the index is valid and use defaults. */
4057 token = &token_list[list[index]];
4058 snprintf(dst, size, "%s", token->name);
4059 /* Save index for cmd_flow_get_help(). */
4060 ctx->prev = list[index];
4064 /** Populate help strings for current token (cmdline API). */
4066 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
4068 struct context *ctx = &cmd_flow_context;
4069 const struct token *token = &token_list[ctx->prev];
4074 /* Set token type and update global help with details. */
4075 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
4077 cmd_flow.help_str = token->help;
4079 cmd_flow.help_str = token->name;
4083 /** Token definition template (cmdline API). */
4084 static struct cmdline_token_hdr cmd_flow_token_hdr = {
4085 .ops = &(struct cmdline_token_ops){
4086 .parse = cmd_flow_parse,
4087 .complete_get_nb = cmd_flow_complete_get_nb,
4088 .complete_get_elt = cmd_flow_complete_get_elt,
4089 .get_help = cmd_flow_get_help,
4094 /** Populate the next dynamic token. */
4096 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
4097 cmdline_parse_token_hdr_t **hdr_inst)
4099 struct context *ctx = &cmd_flow_context;
4101 /* Always reinitialize context before requesting the first token. */
4102 if (!(hdr_inst - cmd_flow.tokens))
4103 cmd_flow_context_init(ctx);
4104 /* Return NULL when no more tokens are expected. */
4105 if (!ctx->next_num && ctx->curr) {
4109 /* Determine if command should end here. */
4110 if (ctx->eol && ctx->last && ctx->next_num) {
4111 const enum index *list = ctx->next[ctx->next_num - 1];
4114 for (i = 0; list[i]; ++i) {
4121 *hdr = &cmd_flow_token_hdr;
4124 /** Dispatch parsed buffer to function calls. */
4126 cmd_flow_parsed(const struct buffer *in)
4128 switch (in->command) {
4130 port_flow_validate(in->port, &in->args.vc.attr,
4131 in->args.vc.pattern, in->args.vc.actions);
4134 port_flow_create(in->port, &in->args.vc.attr,
4135 in->args.vc.pattern, in->args.vc.actions);
4138 port_flow_destroy(in->port, in->args.destroy.rule_n,
4139 in->args.destroy.rule);
4142 port_flow_flush(in->port);
4145 port_flow_query(in->port, in->args.query.rule,
4146 &in->args.query.action);
4149 port_flow_list(in->port, in->args.list.group_n,
4150 in->args.list.group);
4153 port_flow_isolate(in->port, in->args.isolate.set);
4160 /** Token generator and output processing callback (cmdline API). */
4162 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
4165 cmd_flow_tok(arg0, arg2);
4167 cmd_flow_parsed(arg0);
4170 /** Global parser instance (cmdline API). */
4171 cmdline_parse_inst_t cmd_flow = {
4173 .data = NULL, /**< Unused. */
4174 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
4177 }, /**< Tokens are returned by cmd_flow_tok(). */