1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
13 #include <arpa/inet.h>
14 #include <sys/socket.h>
16 #include <rte_common.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_ethdev.h>
19 #include <rte_byteorder.h>
20 #include <cmdline_parse.h>
21 #include <cmdline_parse_etheraddr.h>
26 /** Parser token indices. */
46 /* Top-level command. */
49 /* Sub-level commands. */
58 /* Destroy arguments. */
61 /* Query arguments. */
67 /* Validate/create arguments. */
74 /* Validate/create pattern. */
111 ITEM_VLAN_INNER_TYPE,
143 ITEM_E_TAG_GRP_ECID_B,
162 ITEM_ARP_ETH_IPV4_SHA,
163 ITEM_ARP_ETH_IPV4_SPA,
164 ITEM_ARP_ETH_IPV4_THA,
165 ITEM_ARP_ETH_IPV4_TPA,
167 ITEM_IPV6_EXT_NEXT_HDR,
172 ITEM_ICMP6_ND_NS_TARGET_ADDR,
174 ITEM_ICMP6_ND_NA_TARGET_ADDR,
176 ITEM_ICMP6_ND_OPT_TYPE,
177 ITEM_ICMP6_ND_OPT_SLA_ETH,
178 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
179 ITEM_ICMP6_ND_OPT_TLA_ETH,
180 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
182 /* Validate/create actions. */
202 ACTION_RSS_FUNC_DEFAULT,
203 ACTION_RSS_FUNC_TOEPLITZ,
204 ACTION_RSS_FUNC_SIMPLE_XOR,
216 ACTION_PHY_PORT_ORIGINAL,
217 ACTION_PHY_PORT_INDEX,
219 ACTION_PORT_ID_ORIGINAL,
223 ACTION_OF_SET_MPLS_TTL,
224 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
225 ACTION_OF_DEC_MPLS_TTL,
226 ACTION_OF_SET_NW_TTL,
227 ACTION_OF_SET_NW_TTL_NW_TTL,
228 ACTION_OF_DEC_NW_TTL,
229 ACTION_OF_COPY_TTL_OUT,
230 ACTION_OF_COPY_TTL_IN,
233 ACTION_OF_PUSH_VLAN_ETHERTYPE,
234 ACTION_OF_SET_VLAN_VID,
235 ACTION_OF_SET_VLAN_VID_VLAN_VID,
236 ACTION_OF_SET_VLAN_PCP,
237 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
239 ACTION_OF_POP_MPLS_ETHERTYPE,
241 ACTION_OF_PUSH_MPLS_ETHERTYPE,
247 ACTION_SET_IPV4_SRC_IPV4_SRC,
249 ACTION_SET_IPV4_DST_IPV4_DST,
251 ACTION_SET_IPV6_SRC_IPV6_SRC,
253 ACTION_SET_IPV6_DST_IPV6_DST,
255 ACTION_SET_TP_SRC_TP_SRC,
257 ACTION_SET_TP_DST_TP_DST,
261 /** Maximum size for pattern in struct rte_flow_item_raw. */
262 #define ITEM_RAW_PATTERN_SIZE 40
264 /** Storage size for struct rte_flow_item_raw including pattern. */
265 #define ITEM_RAW_SIZE \
266 (sizeof(struct rte_flow_item_raw) + ITEM_RAW_PATTERN_SIZE)
268 /** Maximum number of queue indices in struct rte_flow_action_rss. */
269 #define ACTION_RSS_QUEUE_NUM 32
271 /** Storage for struct rte_flow_action_rss including external data. */
272 struct action_rss_data {
273 struct rte_flow_action_rss conf;
274 uint8_t key[RSS_HASH_KEY_LENGTH];
275 uint16_t queue[ACTION_RSS_QUEUE_NUM];
278 /** Maximum number of items in struct rte_flow_action_vxlan_encap. */
279 #define ACTION_VXLAN_ENCAP_ITEMS_NUM 6
281 /** Storage for struct rte_flow_action_vxlan_encap including external data. */
282 struct action_vxlan_encap_data {
283 struct rte_flow_action_vxlan_encap conf;
284 struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM];
285 struct rte_flow_item_eth item_eth;
286 struct rte_flow_item_vlan item_vlan;
288 struct rte_flow_item_ipv4 item_ipv4;
289 struct rte_flow_item_ipv6 item_ipv6;
291 struct rte_flow_item_udp item_udp;
292 struct rte_flow_item_vxlan item_vxlan;
295 /** Maximum number of items in struct rte_flow_action_nvgre_encap. */
296 #define ACTION_NVGRE_ENCAP_ITEMS_NUM 5
298 /** Storage for struct rte_flow_action_nvgre_encap including external data. */
299 struct action_nvgre_encap_data {
300 struct rte_flow_action_nvgre_encap conf;
301 struct rte_flow_item items[ACTION_NVGRE_ENCAP_ITEMS_NUM];
302 struct rte_flow_item_eth item_eth;
303 struct rte_flow_item_vlan item_vlan;
305 struct rte_flow_item_ipv4 item_ipv4;
306 struct rte_flow_item_ipv6 item_ipv6;
308 struct rte_flow_item_nvgre item_nvgre;
311 /** Maximum number of subsequent tokens and arguments on the stack. */
312 #define CTX_STACK_SIZE 16
314 /** Parser context. */
316 /** Stack of subsequent token lists to process. */
317 const enum index *next[CTX_STACK_SIZE];
318 /** Arguments for stacked tokens. */
319 const void *args[CTX_STACK_SIZE];
320 enum index curr; /**< Current token index. */
321 enum index prev; /**< Index of the last token seen. */
322 int next_num; /**< Number of entries in next[]. */
323 int args_num; /**< Number of entries in args[]. */
324 uint32_t eol:1; /**< EOL has been detected. */
325 uint32_t last:1; /**< No more arguments. */
326 portid_t port; /**< Current port ID (for completions). */
327 uint32_t objdata; /**< Object-specific data. */
328 void *object; /**< Address of current object for relative offsets. */
329 void *objmask; /**< Object a full mask must be written to. */
332 /** Token argument. */
334 uint32_t hton:1; /**< Use network byte ordering. */
335 uint32_t sign:1; /**< Value is signed. */
336 uint32_t bounded:1; /**< Value is bounded. */
337 uintmax_t min; /**< Minimum value if bounded. */
338 uintmax_t max; /**< Maximum value if bounded. */
339 uint32_t offset; /**< Relative offset from ctx->object. */
340 uint32_t size; /**< Field size. */
341 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
344 /** Parser token definition. */
346 /** Type displayed during completion (defaults to "TOKEN"). */
348 /** Help displayed during completion (defaults to token name). */
350 /** Private data used by parser functions. */
353 * Lists of subsequent tokens to push on the stack. Each call to the
354 * parser consumes the last entry of that stack.
356 const enum index *const *next;
357 /** Arguments stack for subsequent tokens that need them. */
358 const struct arg *const *args;
360 * Token-processing callback, returns -1 in case of error, the
361 * length of the matched string otherwise. If NULL, attempts to
362 * match the token name.
364 * If buf is not NULL, the result should be stored in it according
365 * to context. An error is returned if not large enough.
367 int (*call)(struct context *ctx, const struct token *token,
368 const char *str, unsigned int len,
369 void *buf, unsigned int size);
371 * Callback that provides possible values for this token, used for
372 * completion. Returns -1 in case of error, the number of possible
373 * values otherwise. If NULL, the token name is used.
375 * If buf is not NULL, entry index ent is written to buf and the
376 * full length of the entry is returned (same behavior as
379 int (*comp)(struct context *ctx, const struct token *token,
380 unsigned int ent, char *buf, unsigned int size);
381 /** Mandatory token name, no default value. */
385 /** Static initializer for the next field. */
386 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
388 /** Static initializer for a NEXT() entry. */
389 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
391 /** Static initializer for the args field. */
392 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
394 /** Static initializer for ARGS() to target a field. */
395 #define ARGS_ENTRY(s, f) \
396 (&(const struct arg){ \
397 .offset = offsetof(s, f), \
398 .size = sizeof(((s *)0)->f), \
401 /** Static initializer for ARGS() to target a bit-field. */
402 #define ARGS_ENTRY_BF(s, f, b) \
403 (&(const struct arg){ \
405 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
408 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
409 #define ARGS_ENTRY_MASK(s, f, m) \
410 (&(const struct arg){ \
411 .offset = offsetof(s, f), \
412 .size = sizeof(((s *)0)->f), \
413 .mask = (const void *)(m), \
416 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
417 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
418 (&(const struct arg){ \
420 .offset = offsetof(s, f), \
421 .size = sizeof(((s *)0)->f), \
422 .mask = (const void *)(m), \
425 /** Static initializer for ARGS() to target a pointer. */
426 #define ARGS_ENTRY_PTR(s, f) \
427 (&(const struct arg){ \
428 .size = sizeof(*((s *)0)->f), \
431 /** Static initializer for ARGS() with arbitrary offset and size. */
432 #define ARGS_ENTRY_ARB(o, s) \
433 (&(const struct arg){ \
438 /** Same as ARGS_ENTRY_ARB() with bounded values. */
439 #define ARGS_ENTRY_ARB_BOUNDED(o, s, i, a) \
440 (&(const struct arg){ \
448 /** Same as ARGS_ENTRY() using network byte ordering. */
449 #define ARGS_ENTRY_HTON(s, f) \
450 (&(const struct arg){ \
452 .offset = offsetof(s, f), \
453 .size = sizeof(((s *)0)->f), \
456 /** Parser output buffer layout expected by cmd_flow_parsed(). */
458 enum index command; /**< Flow command. */
459 portid_t port; /**< Affected port ID. */
462 struct rte_flow_attr attr;
463 struct rte_flow_item *pattern;
464 struct rte_flow_action *actions;
468 } vc; /**< Validate/create arguments. */
472 } destroy; /**< Destroy arguments. */
475 struct rte_flow_action action;
476 } query; /**< Query arguments. */
480 } list; /**< List arguments. */
483 } isolate; /**< Isolated mode arguments. */
484 } args; /**< Command arguments. */
487 /** Private data for pattern items. */
488 struct parse_item_priv {
489 enum rte_flow_item_type type; /**< Item type. */
490 uint32_t size; /**< Size of item specification structure. */
493 #define PRIV_ITEM(t, s) \
494 (&(const struct parse_item_priv){ \
495 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
499 /** Private data for actions. */
500 struct parse_action_priv {
501 enum rte_flow_action_type type; /**< Action type. */
502 uint32_t size; /**< Size of action configuration structure. */
505 #define PRIV_ACTION(t, s) \
506 (&(const struct parse_action_priv){ \
507 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
511 static const enum index next_vc_attr[] = {
521 static const enum index next_destroy_attr[] = {
527 static const enum index next_list_attr[] = {
533 static const enum index item_param[] = {
542 static const enum index next_item[] = {
578 ITEM_ICMP6_ND_OPT_SLA_ETH,
579 ITEM_ICMP6_ND_OPT_TLA_ETH,
583 static const enum index item_fuzzy[] = {
589 static const enum index item_any[] = {
595 static const enum index item_vf[] = {
601 static const enum index item_phy_port[] = {
607 static const enum index item_port_id[] = {
613 static const enum index item_mark[] = {
619 static const enum index item_raw[] = {
629 static const enum index item_eth[] = {
637 static const enum index item_vlan[] = {
642 ITEM_VLAN_INNER_TYPE,
647 static const enum index item_ipv4[] = {
657 static const enum index item_ipv6[] = {
668 static const enum index item_icmp[] = {
675 static const enum index item_udp[] = {
682 static const enum index item_tcp[] = {
690 static const enum index item_sctp[] = {
699 static const enum index item_vxlan[] = {
705 static const enum index item_e_tag[] = {
706 ITEM_E_TAG_GRP_ECID_B,
711 static const enum index item_nvgre[] = {
717 static const enum index item_mpls[] = {
723 static const enum index item_gre[] = {
729 static const enum index item_gtp[] = {
735 static const enum index item_geneve[] = {
742 static const enum index item_vxlan_gpe[] = {
748 static const enum index item_arp_eth_ipv4[] = {
749 ITEM_ARP_ETH_IPV4_SHA,
750 ITEM_ARP_ETH_IPV4_SPA,
751 ITEM_ARP_ETH_IPV4_THA,
752 ITEM_ARP_ETH_IPV4_TPA,
757 static const enum index item_ipv6_ext[] = {
758 ITEM_IPV6_EXT_NEXT_HDR,
763 static const enum index item_icmp6[] = {
770 static const enum index item_icmp6_nd_ns[] = {
771 ITEM_ICMP6_ND_NS_TARGET_ADDR,
776 static const enum index item_icmp6_nd_na[] = {
777 ITEM_ICMP6_ND_NA_TARGET_ADDR,
782 static const enum index item_icmp6_nd_opt[] = {
783 ITEM_ICMP6_ND_OPT_TYPE,
788 static const enum index item_icmp6_nd_opt_sla_eth[] = {
789 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
794 static const enum index item_icmp6_nd_opt_tla_eth[] = {
795 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
800 static const enum index next_action[] = {
816 ACTION_OF_SET_MPLS_TTL,
817 ACTION_OF_DEC_MPLS_TTL,
818 ACTION_OF_SET_NW_TTL,
819 ACTION_OF_DEC_NW_TTL,
820 ACTION_OF_COPY_TTL_OUT,
821 ACTION_OF_COPY_TTL_IN,
824 ACTION_OF_SET_VLAN_VID,
825 ACTION_OF_SET_VLAN_PCP,
842 static const enum index action_mark[] = {
848 static const enum index action_queue[] = {
854 static const enum index action_count[] = {
861 static const enum index action_rss[] = {
872 static const enum index action_vf[] = {
879 static const enum index action_phy_port[] = {
880 ACTION_PHY_PORT_ORIGINAL,
881 ACTION_PHY_PORT_INDEX,
886 static const enum index action_port_id[] = {
887 ACTION_PORT_ID_ORIGINAL,
893 static const enum index action_meter[] = {
899 static const enum index action_of_set_mpls_ttl[] = {
900 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
905 static const enum index action_of_set_nw_ttl[] = {
906 ACTION_OF_SET_NW_TTL_NW_TTL,
911 static const enum index action_of_push_vlan[] = {
912 ACTION_OF_PUSH_VLAN_ETHERTYPE,
917 static const enum index action_of_set_vlan_vid[] = {
918 ACTION_OF_SET_VLAN_VID_VLAN_VID,
923 static const enum index action_of_set_vlan_pcp[] = {
924 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
929 static const enum index action_of_pop_mpls[] = {
930 ACTION_OF_POP_MPLS_ETHERTYPE,
935 static const enum index action_of_push_mpls[] = {
936 ACTION_OF_PUSH_MPLS_ETHERTYPE,
941 static const enum index action_set_ipv4_src[] = {
942 ACTION_SET_IPV4_SRC_IPV4_SRC,
947 static const enum index action_set_ipv4_dst[] = {
948 ACTION_SET_IPV4_DST_IPV4_DST,
953 static const enum index action_set_ipv6_src[] = {
954 ACTION_SET_IPV6_SRC_IPV6_SRC,
959 static const enum index action_set_ipv6_dst[] = {
960 ACTION_SET_IPV6_DST_IPV6_DST,
965 static const enum index action_set_tp_src[] = {
966 ACTION_SET_TP_SRC_TP_SRC,
971 static const enum index action_set_tp_dst[] = {
972 ACTION_SET_TP_DST_TP_DST,
977 static const enum index action_jump[] = {
983 static int parse_init(struct context *, const struct token *,
984 const char *, unsigned int,
985 void *, unsigned int);
986 static int parse_vc(struct context *, const struct token *,
987 const char *, unsigned int,
988 void *, unsigned int);
989 static int parse_vc_spec(struct context *, const struct token *,
990 const char *, unsigned int, void *, unsigned int);
991 static int parse_vc_conf(struct context *, const struct token *,
992 const char *, unsigned int, void *, unsigned int);
993 static int parse_vc_action_rss(struct context *, const struct token *,
994 const char *, unsigned int, void *,
996 static int parse_vc_action_rss_func(struct context *, const struct token *,
997 const char *, unsigned int, void *,
999 static int parse_vc_action_rss_type(struct context *, const struct token *,
1000 const char *, unsigned int, void *,
1002 static int parse_vc_action_rss_queue(struct context *, const struct token *,
1003 const char *, unsigned int, void *,
1005 static int parse_vc_action_vxlan_encap(struct context *, const struct token *,
1006 const char *, unsigned int, void *,
1008 static int parse_vc_action_nvgre_encap(struct context *, const struct token *,
1009 const char *, unsigned int, void *,
1011 static int parse_destroy(struct context *, const struct token *,
1012 const char *, unsigned int,
1013 void *, unsigned int);
1014 static int parse_flush(struct context *, const struct token *,
1015 const char *, unsigned int,
1016 void *, unsigned int);
1017 static int parse_query(struct context *, const struct token *,
1018 const char *, unsigned int,
1019 void *, unsigned int);
1020 static int parse_action(struct context *, const struct token *,
1021 const char *, unsigned int,
1022 void *, unsigned int);
1023 static int parse_list(struct context *, const struct token *,
1024 const char *, unsigned int,
1025 void *, unsigned int);
1026 static int parse_isolate(struct context *, const struct token *,
1027 const char *, unsigned int,
1028 void *, unsigned int);
1029 static int parse_int(struct context *, const struct token *,
1030 const char *, unsigned int,
1031 void *, unsigned int);
1032 static int parse_prefix(struct context *, const struct token *,
1033 const char *, unsigned int,
1034 void *, unsigned int);
1035 static int parse_boolean(struct context *, const struct token *,
1036 const char *, unsigned int,
1037 void *, unsigned int);
1038 static int parse_string(struct context *, const struct token *,
1039 const char *, unsigned int,
1040 void *, unsigned int);
1041 static int parse_mac_addr(struct context *, const struct token *,
1042 const char *, unsigned int,
1043 void *, unsigned int);
1044 static int parse_ipv4_addr(struct context *, const struct token *,
1045 const char *, unsigned int,
1046 void *, unsigned int);
1047 static int parse_ipv6_addr(struct context *, const struct token *,
1048 const char *, unsigned int,
1049 void *, unsigned int);
1050 static int parse_port(struct context *, const struct token *,
1051 const char *, unsigned int,
1052 void *, unsigned int);
1053 static int comp_none(struct context *, const struct token *,
1054 unsigned int, char *, unsigned int);
1055 static int comp_boolean(struct context *, const struct token *,
1056 unsigned int, char *, unsigned int);
1057 static int comp_action(struct context *, const struct token *,
1058 unsigned int, char *, unsigned int);
1059 static int comp_port(struct context *, const struct token *,
1060 unsigned int, char *, unsigned int);
1061 static int comp_rule_id(struct context *, const struct token *,
1062 unsigned int, char *, unsigned int);
1063 static int comp_vc_action_rss_type(struct context *, const struct token *,
1064 unsigned int, char *, unsigned int);
1065 static int comp_vc_action_rss_queue(struct context *, const struct token *,
1066 unsigned int, char *, unsigned int);
1068 /** Token definitions. */
1069 static const struct token token_list[] = {
1070 /* Special tokens. */
1073 .help = "null entry, abused as the entry point",
1074 .next = NEXT(NEXT_ENTRY(FLOW)),
1079 .help = "command may end here",
1081 /* Common tokens. */
1085 .help = "integer value",
1090 .name = "{unsigned}",
1092 .help = "unsigned integer value",
1099 .help = "prefix length for bit-mask",
1100 .call = parse_prefix,
1104 .name = "{boolean}",
1106 .help = "any boolean value",
1107 .call = parse_boolean,
1108 .comp = comp_boolean,
1113 .help = "fixed string",
1114 .call = parse_string,
1118 .name = "{MAC address}",
1120 .help = "standard MAC address notation",
1121 .call = parse_mac_addr,
1125 .name = "{IPv4 address}",
1126 .type = "IPV4 ADDRESS",
1127 .help = "standard IPv4 address notation",
1128 .call = parse_ipv4_addr,
1132 .name = "{IPv6 address}",
1133 .type = "IPV6 ADDRESS",
1134 .help = "standard IPv6 address notation",
1135 .call = parse_ipv6_addr,
1139 .name = "{rule id}",
1141 .help = "rule identifier",
1143 .comp = comp_rule_id,
1146 .name = "{port_id}",
1148 .help = "port identifier",
1153 .name = "{group_id}",
1155 .help = "group identifier",
1159 [PRIORITY_LEVEL] = {
1162 .help = "priority level",
1166 /* Top-level command. */
1169 .type = "{command} {port_id} [{arg} [...]]",
1170 .help = "manage ingress/egress flow rules",
1171 .next = NEXT(NEXT_ENTRY
1181 /* Sub-level commands. */
1184 .help = "check whether a flow rule can be created",
1185 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1186 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1191 .help = "create a flow rule",
1192 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1193 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1198 .help = "destroy specific flow rules",
1199 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
1200 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1201 .call = parse_destroy,
1205 .help = "destroy all flow rules",
1206 .next = NEXT(NEXT_ENTRY(PORT_ID)),
1207 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1208 .call = parse_flush,
1212 .help = "query an existing flow rule",
1213 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
1214 NEXT_ENTRY(RULE_ID),
1215 NEXT_ENTRY(PORT_ID)),
1216 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action.type),
1217 ARGS_ENTRY(struct buffer, args.query.rule),
1218 ARGS_ENTRY(struct buffer, port)),
1219 .call = parse_query,
1223 .help = "list existing flow rules",
1224 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
1225 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1230 .help = "restrict ingress traffic to the defined flow rules",
1231 .next = NEXT(NEXT_ENTRY(BOOLEAN),
1232 NEXT_ENTRY(PORT_ID)),
1233 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
1234 ARGS_ENTRY(struct buffer, port)),
1235 .call = parse_isolate,
1237 /* Destroy arguments. */
1240 .help = "specify a rule identifier",
1241 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
1242 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
1243 .call = parse_destroy,
1245 /* Query arguments. */
1249 .help = "action to query, must be part of the rule",
1250 .call = parse_action,
1251 .comp = comp_action,
1253 /* List arguments. */
1256 .help = "specify a group",
1257 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
1258 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
1261 /* Validate/create attributes. */
1264 .help = "specify a group",
1265 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
1266 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
1271 .help = "specify a priority level",
1272 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
1273 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
1278 .help = "affect rule to ingress",
1279 .next = NEXT(next_vc_attr),
1284 .help = "affect rule to egress",
1285 .next = NEXT(next_vc_attr),
1290 .help = "apply rule directly to endpoints found in pattern",
1291 .next = NEXT(next_vc_attr),
1294 /* Validate/create pattern. */
1297 .help = "submit a list of pattern items",
1298 .next = NEXT(next_item),
1303 .help = "match value perfectly (with full bit-mask)",
1304 .call = parse_vc_spec,
1306 [ITEM_PARAM_SPEC] = {
1308 .help = "match value according to configured bit-mask",
1309 .call = parse_vc_spec,
1311 [ITEM_PARAM_LAST] = {
1313 .help = "specify upper bound to establish a range",
1314 .call = parse_vc_spec,
1316 [ITEM_PARAM_MASK] = {
1318 .help = "specify bit-mask with relevant bits set to one",
1319 .call = parse_vc_spec,
1321 [ITEM_PARAM_PREFIX] = {
1323 .help = "generate bit-mask from a prefix length",
1324 .call = parse_vc_spec,
1328 .help = "specify next pattern item",
1329 .next = NEXT(next_item),
1333 .help = "end list of pattern items",
1334 .priv = PRIV_ITEM(END, 0),
1335 .next = NEXT(NEXT_ENTRY(ACTIONS)),
1340 .help = "no-op pattern item",
1341 .priv = PRIV_ITEM(VOID, 0),
1342 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1347 .help = "perform actions when pattern does not match",
1348 .priv = PRIV_ITEM(INVERT, 0),
1349 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1354 .help = "match any protocol for the current layer",
1355 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
1356 .next = NEXT(item_any),
1361 .help = "number of layers covered",
1362 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
1363 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
1367 .help = "match traffic from/to the physical function",
1368 .priv = PRIV_ITEM(PF, 0),
1369 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1374 .help = "match traffic from/to a virtual function ID",
1375 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
1376 .next = NEXT(item_vf),
1382 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
1383 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
1387 .help = "match traffic from/to a specific physical port",
1388 .priv = PRIV_ITEM(PHY_PORT,
1389 sizeof(struct rte_flow_item_phy_port)),
1390 .next = NEXT(item_phy_port),
1393 [ITEM_PHY_PORT_INDEX] = {
1395 .help = "physical port index",
1396 .next = NEXT(item_phy_port, NEXT_ENTRY(UNSIGNED), item_param),
1397 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_phy_port, index)),
1401 .help = "match traffic from/to a given DPDK port ID",
1402 .priv = PRIV_ITEM(PORT_ID,
1403 sizeof(struct rte_flow_item_port_id)),
1404 .next = NEXT(item_port_id),
1407 [ITEM_PORT_ID_ID] = {
1409 .help = "DPDK port ID",
1410 .next = NEXT(item_port_id, NEXT_ENTRY(UNSIGNED), item_param),
1411 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port_id, id)),
1415 .help = "match traffic against value set in previously matched rule",
1416 .priv = PRIV_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
1417 .next = NEXT(item_mark),
1422 .help = "Integer value to match against",
1423 .next = NEXT(item_mark, NEXT_ENTRY(UNSIGNED), item_param),
1424 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_mark, id)),
1428 .help = "match an arbitrary byte string",
1429 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1430 .next = NEXT(item_raw),
1433 [ITEM_RAW_RELATIVE] = {
1435 .help = "look for pattern after the previous item",
1436 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1437 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1440 [ITEM_RAW_SEARCH] = {
1442 .help = "search pattern from offset (see also limit)",
1443 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1444 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1447 [ITEM_RAW_OFFSET] = {
1449 .help = "absolute or relative offset for pattern",
1450 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1451 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1453 [ITEM_RAW_LIMIT] = {
1455 .help = "search area limit for start of pattern",
1456 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1457 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1459 [ITEM_RAW_PATTERN] = {
1461 .help = "byte string to look for",
1462 .next = NEXT(item_raw,
1464 NEXT_ENTRY(ITEM_PARAM_IS,
1467 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, pattern),
1468 ARGS_ENTRY(struct rte_flow_item_raw, length),
1469 ARGS_ENTRY_ARB(sizeof(struct rte_flow_item_raw),
1470 ITEM_RAW_PATTERN_SIZE)),
1474 .help = "match Ethernet header",
1475 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1476 .next = NEXT(item_eth),
1481 .help = "destination MAC",
1482 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1483 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1487 .help = "source MAC",
1488 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1489 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1493 .help = "EtherType",
1494 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1495 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1499 .help = "match 802.1Q/ad VLAN tag",
1500 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1501 .next = NEXT(item_vlan),
1506 .help = "tag control information",
1507 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1508 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1512 .help = "priority code point",
1513 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1514 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1519 .help = "drop eligible indicator",
1520 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1521 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1526 .help = "VLAN identifier",
1527 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1528 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1531 [ITEM_VLAN_INNER_TYPE] = {
1532 .name = "inner_type",
1533 .help = "inner EtherType",
1534 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1535 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan,
1540 .help = "match IPv4 header",
1541 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1542 .next = NEXT(item_ipv4),
1547 .help = "type of service",
1548 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1549 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1550 hdr.type_of_service)),
1554 .help = "time to live",
1555 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1556 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1559 [ITEM_IPV4_PROTO] = {
1561 .help = "next protocol ID",
1562 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1563 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1564 hdr.next_proto_id)),
1568 .help = "source address",
1569 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1570 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1575 .help = "destination address",
1576 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1577 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1582 .help = "match IPv6 header",
1583 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1584 .next = NEXT(item_ipv6),
1589 .help = "traffic class",
1590 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1591 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1593 "\x0f\xf0\x00\x00")),
1595 [ITEM_IPV6_FLOW] = {
1597 .help = "flow label",
1598 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1599 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1601 "\x00\x0f\xff\xff")),
1603 [ITEM_IPV6_PROTO] = {
1605 .help = "protocol (next header)",
1606 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1607 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1612 .help = "hop limit",
1613 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1614 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1619 .help = "source address",
1620 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1621 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1626 .help = "destination address",
1627 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1628 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1633 .help = "match ICMP header",
1634 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1635 .next = NEXT(item_icmp),
1638 [ITEM_ICMP_TYPE] = {
1640 .help = "ICMP packet type",
1641 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1642 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1645 [ITEM_ICMP_CODE] = {
1647 .help = "ICMP packet code",
1648 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1649 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1654 .help = "match UDP header",
1655 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1656 .next = NEXT(item_udp),
1661 .help = "UDP source port",
1662 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1663 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1668 .help = "UDP destination port",
1669 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1670 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1675 .help = "match TCP header",
1676 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1677 .next = NEXT(item_tcp),
1682 .help = "TCP source port",
1683 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1684 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1689 .help = "TCP destination port",
1690 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1691 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1694 [ITEM_TCP_FLAGS] = {
1696 .help = "TCP flags",
1697 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1698 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1703 .help = "match SCTP header",
1704 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1705 .next = NEXT(item_sctp),
1710 .help = "SCTP source port",
1711 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1712 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1717 .help = "SCTP destination port",
1718 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1719 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1724 .help = "validation tag",
1725 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1726 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1729 [ITEM_SCTP_CKSUM] = {
1732 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1733 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1738 .help = "match VXLAN header",
1739 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1740 .next = NEXT(item_vxlan),
1743 [ITEM_VXLAN_VNI] = {
1745 .help = "VXLAN identifier",
1746 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1747 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
1751 .help = "match E-Tag header",
1752 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
1753 .next = NEXT(item_e_tag),
1756 [ITEM_E_TAG_GRP_ECID_B] = {
1757 .name = "grp_ecid_b",
1758 .help = "GRP and E-CID base",
1759 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
1760 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
1766 .help = "match NVGRE header",
1767 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
1768 .next = NEXT(item_nvgre),
1771 [ITEM_NVGRE_TNI] = {
1773 .help = "virtual subnet ID",
1774 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
1775 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
1779 .help = "match MPLS header",
1780 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
1781 .next = NEXT(item_mpls),
1784 [ITEM_MPLS_LABEL] = {
1786 .help = "MPLS label",
1787 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
1788 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
1794 .help = "match GRE header",
1795 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
1796 .next = NEXT(item_gre),
1799 [ITEM_GRE_PROTO] = {
1801 .help = "GRE protocol type",
1802 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
1803 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
1808 .help = "fuzzy pattern match, expect faster than default",
1809 .priv = PRIV_ITEM(FUZZY,
1810 sizeof(struct rte_flow_item_fuzzy)),
1811 .next = NEXT(item_fuzzy),
1814 [ITEM_FUZZY_THRESH] = {
1816 .help = "match accuracy threshold",
1817 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
1818 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
1823 .help = "match GTP header",
1824 .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
1825 .next = NEXT(item_gtp),
1830 .help = "tunnel endpoint identifier",
1831 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
1832 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
1836 .help = "match GTP header",
1837 .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
1838 .next = NEXT(item_gtp),
1843 .help = "match GTP header",
1844 .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
1845 .next = NEXT(item_gtp),
1850 .help = "match GENEVE header",
1851 .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
1852 .next = NEXT(item_geneve),
1855 [ITEM_GENEVE_VNI] = {
1857 .help = "virtual network identifier",
1858 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1859 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)),
1861 [ITEM_GENEVE_PROTO] = {
1863 .help = "GENEVE protocol type",
1864 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1865 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve,
1868 [ITEM_VXLAN_GPE] = {
1869 .name = "vxlan-gpe",
1870 .help = "match VXLAN-GPE header",
1871 .priv = PRIV_ITEM(VXLAN_GPE,
1872 sizeof(struct rte_flow_item_vxlan_gpe)),
1873 .next = NEXT(item_vxlan_gpe),
1876 [ITEM_VXLAN_GPE_VNI] = {
1878 .help = "VXLAN-GPE identifier",
1879 .next = NEXT(item_vxlan_gpe, NEXT_ENTRY(UNSIGNED), item_param),
1880 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan_gpe,
1883 [ITEM_ARP_ETH_IPV4] = {
1884 .name = "arp_eth_ipv4",
1885 .help = "match ARP header for Ethernet/IPv4",
1886 .priv = PRIV_ITEM(ARP_ETH_IPV4,
1887 sizeof(struct rte_flow_item_arp_eth_ipv4)),
1888 .next = NEXT(item_arp_eth_ipv4),
1891 [ITEM_ARP_ETH_IPV4_SHA] = {
1893 .help = "sender hardware address",
1894 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
1896 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
1899 [ITEM_ARP_ETH_IPV4_SPA] = {
1901 .help = "sender IPv4 address",
1902 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
1904 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
1907 [ITEM_ARP_ETH_IPV4_THA] = {
1909 .help = "target hardware address",
1910 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
1912 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
1915 [ITEM_ARP_ETH_IPV4_TPA] = {
1917 .help = "target IPv4 address",
1918 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
1920 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
1925 .help = "match presence of any IPv6 extension header",
1926 .priv = PRIV_ITEM(IPV6_EXT,
1927 sizeof(struct rte_flow_item_ipv6_ext)),
1928 .next = NEXT(item_ipv6_ext),
1931 [ITEM_IPV6_EXT_NEXT_HDR] = {
1933 .help = "next header",
1934 .next = NEXT(item_ipv6_ext, NEXT_ENTRY(UNSIGNED), item_param),
1935 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6_ext,
1940 .help = "match any ICMPv6 header",
1941 .priv = PRIV_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
1942 .next = NEXT(item_icmp6),
1945 [ITEM_ICMP6_TYPE] = {
1947 .help = "ICMPv6 type",
1948 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
1949 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
1952 [ITEM_ICMP6_CODE] = {
1954 .help = "ICMPv6 code",
1955 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
1956 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
1959 [ITEM_ICMP6_ND_NS] = {
1960 .name = "icmp6_nd_ns",
1961 .help = "match ICMPv6 neighbor discovery solicitation",
1962 .priv = PRIV_ITEM(ICMP6_ND_NS,
1963 sizeof(struct rte_flow_item_icmp6_nd_ns)),
1964 .next = NEXT(item_icmp6_nd_ns),
1967 [ITEM_ICMP6_ND_NS_TARGET_ADDR] = {
1968 .name = "target_addr",
1969 .help = "target address",
1970 .next = NEXT(item_icmp6_nd_ns, NEXT_ENTRY(IPV6_ADDR),
1972 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_ns,
1975 [ITEM_ICMP6_ND_NA] = {
1976 .name = "icmp6_nd_na",
1977 .help = "match ICMPv6 neighbor discovery advertisement",
1978 .priv = PRIV_ITEM(ICMP6_ND_NA,
1979 sizeof(struct rte_flow_item_icmp6_nd_na)),
1980 .next = NEXT(item_icmp6_nd_na),
1983 [ITEM_ICMP6_ND_NA_TARGET_ADDR] = {
1984 .name = "target_addr",
1985 .help = "target address",
1986 .next = NEXT(item_icmp6_nd_na, NEXT_ENTRY(IPV6_ADDR),
1988 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_na,
1991 [ITEM_ICMP6_ND_OPT] = {
1992 .name = "icmp6_nd_opt",
1993 .help = "match presence of any ICMPv6 neighbor discovery"
1995 .priv = PRIV_ITEM(ICMP6_ND_OPT,
1996 sizeof(struct rte_flow_item_icmp6_nd_opt)),
1997 .next = NEXT(item_icmp6_nd_opt),
2000 [ITEM_ICMP6_ND_OPT_TYPE] = {
2002 .help = "ND option type",
2003 .next = NEXT(item_icmp6_nd_opt, NEXT_ENTRY(UNSIGNED),
2005 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_opt,
2008 [ITEM_ICMP6_ND_OPT_SLA_ETH] = {
2009 .name = "icmp6_nd_opt_sla_eth",
2010 .help = "match ICMPv6 neighbor discovery source Ethernet"
2011 " link-layer address option",
2013 (ICMP6_ND_OPT_SLA_ETH,
2014 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
2015 .next = NEXT(item_icmp6_nd_opt_sla_eth),
2018 [ITEM_ICMP6_ND_OPT_SLA_ETH_SLA] = {
2020 .help = "source Ethernet LLA",
2021 .next = NEXT(item_icmp6_nd_opt_sla_eth, NEXT_ENTRY(MAC_ADDR),
2023 .args = ARGS(ARGS_ENTRY_HTON
2024 (struct rte_flow_item_icmp6_nd_opt_sla_eth, sla)),
2026 [ITEM_ICMP6_ND_OPT_TLA_ETH] = {
2027 .name = "icmp6_nd_opt_tla_eth",
2028 .help = "match ICMPv6 neighbor discovery target Ethernet"
2029 " link-layer address option",
2031 (ICMP6_ND_OPT_TLA_ETH,
2032 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
2033 .next = NEXT(item_icmp6_nd_opt_tla_eth),
2036 [ITEM_ICMP6_ND_OPT_TLA_ETH_TLA] = {
2038 .help = "target Ethernet LLA",
2039 .next = NEXT(item_icmp6_nd_opt_tla_eth, NEXT_ENTRY(MAC_ADDR),
2041 .args = ARGS(ARGS_ENTRY_HTON
2042 (struct rte_flow_item_icmp6_nd_opt_tla_eth, tla)),
2045 /* Validate/create actions. */
2048 .help = "submit a list of associated actions",
2049 .next = NEXT(next_action),
2054 .help = "specify next action",
2055 .next = NEXT(next_action),
2059 .help = "end list of actions",
2060 .priv = PRIV_ACTION(END, 0),
2065 .help = "no-op action",
2066 .priv = PRIV_ACTION(VOID, 0),
2067 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2070 [ACTION_PASSTHRU] = {
2072 .help = "let subsequent rule process matched packets",
2073 .priv = PRIV_ACTION(PASSTHRU, 0),
2074 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2079 .help = "redirect traffic to a given group",
2080 .priv = PRIV_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
2081 .next = NEXT(action_jump),
2084 [ACTION_JUMP_GROUP] = {
2086 .help = "group to redirect traffic to",
2087 .next = NEXT(action_jump, NEXT_ENTRY(UNSIGNED)),
2088 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_jump, group)),
2089 .call = parse_vc_conf,
2093 .help = "attach 32 bit value to packets",
2094 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
2095 .next = NEXT(action_mark),
2098 [ACTION_MARK_ID] = {
2100 .help = "32 bit value to return with packets",
2101 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
2102 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
2103 .call = parse_vc_conf,
2107 .help = "flag packets",
2108 .priv = PRIV_ACTION(FLAG, 0),
2109 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2114 .help = "assign packets to a given queue index",
2115 .priv = PRIV_ACTION(QUEUE,
2116 sizeof(struct rte_flow_action_queue)),
2117 .next = NEXT(action_queue),
2120 [ACTION_QUEUE_INDEX] = {
2122 .help = "queue index to use",
2123 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
2124 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
2125 .call = parse_vc_conf,
2129 .help = "drop packets (note: passthru has priority)",
2130 .priv = PRIV_ACTION(DROP, 0),
2131 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2136 .help = "enable counters for this rule",
2137 .priv = PRIV_ACTION(COUNT,
2138 sizeof(struct rte_flow_action_count)),
2139 .next = NEXT(action_count),
2142 [ACTION_COUNT_ID] = {
2143 .name = "identifier",
2144 .help = "counter identifier to use",
2145 .next = NEXT(action_count, NEXT_ENTRY(UNSIGNED)),
2146 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_count, id)),
2147 .call = parse_vc_conf,
2149 [ACTION_COUNT_SHARED] = {
2151 .help = "shared counter",
2152 .next = NEXT(action_count, NEXT_ENTRY(BOOLEAN)),
2153 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_count,
2155 .call = parse_vc_conf,
2159 .help = "spread packets among several queues",
2160 .priv = PRIV_ACTION(RSS, sizeof(struct action_rss_data)),
2161 .next = NEXT(action_rss),
2162 .call = parse_vc_action_rss,
2164 [ACTION_RSS_FUNC] = {
2166 .help = "RSS hash function to apply",
2167 .next = NEXT(action_rss,
2168 NEXT_ENTRY(ACTION_RSS_FUNC_DEFAULT,
2169 ACTION_RSS_FUNC_TOEPLITZ,
2170 ACTION_RSS_FUNC_SIMPLE_XOR)),
2172 [ACTION_RSS_FUNC_DEFAULT] = {
2174 .help = "default hash function",
2175 .call = parse_vc_action_rss_func,
2177 [ACTION_RSS_FUNC_TOEPLITZ] = {
2179 .help = "Toeplitz hash function",
2180 .call = parse_vc_action_rss_func,
2182 [ACTION_RSS_FUNC_SIMPLE_XOR] = {
2183 .name = "simple_xor",
2184 .help = "simple XOR hash function",
2185 .call = parse_vc_action_rss_func,
2187 [ACTION_RSS_LEVEL] = {
2189 .help = "encapsulation level for \"types\"",
2190 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2191 .args = ARGS(ARGS_ENTRY_ARB
2192 (offsetof(struct action_rss_data, conf) +
2193 offsetof(struct rte_flow_action_rss, level),
2194 sizeof(((struct rte_flow_action_rss *)0)->
2197 [ACTION_RSS_TYPES] = {
2199 .help = "specific RSS hash types",
2200 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)),
2202 [ACTION_RSS_TYPE] = {
2204 .help = "RSS hash type",
2205 .call = parse_vc_action_rss_type,
2206 .comp = comp_vc_action_rss_type,
2208 [ACTION_RSS_KEY] = {
2210 .help = "RSS hash key",
2211 .next = NEXT(action_rss, NEXT_ENTRY(STRING)),
2212 .args = ARGS(ARGS_ENTRY_ARB(0, 0),
2214 (offsetof(struct action_rss_data, conf) +
2215 offsetof(struct rte_flow_action_rss, key_len),
2216 sizeof(((struct rte_flow_action_rss *)0)->
2218 ARGS_ENTRY(struct action_rss_data, key)),
2220 [ACTION_RSS_KEY_LEN] = {
2222 .help = "RSS hash key length in bytes",
2223 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2224 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
2225 (offsetof(struct action_rss_data, conf) +
2226 offsetof(struct rte_flow_action_rss, key_len),
2227 sizeof(((struct rte_flow_action_rss *)0)->
2230 RSS_HASH_KEY_LENGTH)),
2232 [ACTION_RSS_QUEUES] = {
2234 .help = "queue indices to use",
2235 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
2236 .call = parse_vc_conf,
2238 [ACTION_RSS_QUEUE] = {
2240 .help = "queue index",
2241 .call = parse_vc_action_rss_queue,
2242 .comp = comp_vc_action_rss_queue,
2246 .help = "direct traffic to physical function",
2247 .priv = PRIV_ACTION(PF, 0),
2248 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2253 .help = "direct traffic to a virtual function ID",
2254 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
2255 .next = NEXT(action_vf),
2258 [ACTION_VF_ORIGINAL] = {
2260 .help = "use original VF ID if possible",
2261 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
2262 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
2264 .call = parse_vc_conf,
2269 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
2270 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
2271 .call = parse_vc_conf,
2273 [ACTION_PHY_PORT] = {
2275 .help = "direct packets to physical port index",
2276 .priv = PRIV_ACTION(PHY_PORT,
2277 sizeof(struct rte_flow_action_phy_port)),
2278 .next = NEXT(action_phy_port),
2281 [ACTION_PHY_PORT_ORIGINAL] = {
2283 .help = "use original port index if possible",
2284 .next = NEXT(action_phy_port, NEXT_ENTRY(BOOLEAN)),
2285 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_phy_port,
2287 .call = parse_vc_conf,
2289 [ACTION_PHY_PORT_INDEX] = {
2291 .help = "physical port index",
2292 .next = NEXT(action_phy_port, NEXT_ENTRY(UNSIGNED)),
2293 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_phy_port,
2295 .call = parse_vc_conf,
2297 [ACTION_PORT_ID] = {
2299 .help = "direct matching traffic to a given DPDK port ID",
2300 .priv = PRIV_ACTION(PORT_ID,
2301 sizeof(struct rte_flow_action_port_id)),
2302 .next = NEXT(action_port_id),
2305 [ACTION_PORT_ID_ORIGINAL] = {
2307 .help = "use original DPDK port ID if possible",
2308 .next = NEXT(action_port_id, NEXT_ENTRY(BOOLEAN)),
2309 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_port_id,
2311 .call = parse_vc_conf,
2313 [ACTION_PORT_ID_ID] = {
2315 .help = "DPDK port ID",
2316 .next = NEXT(action_port_id, NEXT_ENTRY(UNSIGNED)),
2317 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_port_id, id)),
2318 .call = parse_vc_conf,
2322 .help = "meter the directed packets at given id",
2323 .priv = PRIV_ACTION(METER,
2324 sizeof(struct rte_flow_action_meter)),
2325 .next = NEXT(action_meter),
2328 [ACTION_METER_ID] = {
2330 .help = "meter id to use",
2331 .next = NEXT(action_meter, NEXT_ENTRY(UNSIGNED)),
2332 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
2333 .call = parse_vc_conf,
2335 [ACTION_OF_SET_MPLS_TTL] = {
2336 .name = "of_set_mpls_ttl",
2337 .help = "OpenFlow's OFPAT_SET_MPLS_TTL",
2340 sizeof(struct rte_flow_action_of_set_mpls_ttl)),
2341 .next = NEXT(action_of_set_mpls_ttl),
2344 [ACTION_OF_SET_MPLS_TTL_MPLS_TTL] = {
2347 .next = NEXT(action_of_set_mpls_ttl, NEXT_ENTRY(UNSIGNED)),
2348 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_mpls_ttl,
2350 .call = parse_vc_conf,
2352 [ACTION_OF_DEC_MPLS_TTL] = {
2353 .name = "of_dec_mpls_ttl",
2354 .help = "OpenFlow's OFPAT_DEC_MPLS_TTL",
2355 .priv = PRIV_ACTION(OF_DEC_MPLS_TTL, 0),
2356 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2359 [ACTION_OF_SET_NW_TTL] = {
2360 .name = "of_set_nw_ttl",
2361 .help = "OpenFlow's OFPAT_SET_NW_TTL",
2364 sizeof(struct rte_flow_action_of_set_nw_ttl)),
2365 .next = NEXT(action_of_set_nw_ttl),
2368 [ACTION_OF_SET_NW_TTL_NW_TTL] = {
2371 .next = NEXT(action_of_set_nw_ttl, NEXT_ENTRY(UNSIGNED)),
2372 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_nw_ttl,
2374 .call = parse_vc_conf,
2376 [ACTION_OF_DEC_NW_TTL] = {
2377 .name = "of_dec_nw_ttl",
2378 .help = "OpenFlow's OFPAT_DEC_NW_TTL",
2379 .priv = PRIV_ACTION(OF_DEC_NW_TTL, 0),
2380 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2383 [ACTION_OF_COPY_TTL_OUT] = {
2384 .name = "of_copy_ttl_out",
2385 .help = "OpenFlow's OFPAT_COPY_TTL_OUT",
2386 .priv = PRIV_ACTION(OF_COPY_TTL_OUT, 0),
2387 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2390 [ACTION_OF_COPY_TTL_IN] = {
2391 .name = "of_copy_ttl_in",
2392 .help = "OpenFlow's OFPAT_COPY_TTL_IN",
2393 .priv = PRIV_ACTION(OF_COPY_TTL_IN, 0),
2394 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2397 [ACTION_OF_POP_VLAN] = {
2398 .name = "of_pop_vlan",
2399 .help = "OpenFlow's OFPAT_POP_VLAN",
2400 .priv = PRIV_ACTION(OF_POP_VLAN, 0),
2401 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2404 [ACTION_OF_PUSH_VLAN] = {
2405 .name = "of_push_vlan",
2406 .help = "OpenFlow's OFPAT_PUSH_VLAN",
2409 sizeof(struct rte_flow_action_of_push_vlan)),
2410 .next = NEXT(action_of_push_vlan),
2413 [ACTION_OF_PUSH_VLAN_ETHERTYPE] = {
2414 .name = "ethertype",
2415 .help = "EtherType",
2416 .next = NEXT(action_of_push_vlan, NEXT_ENTRY(UNSIGNED)),
2417 .args = ARGS(ARGS_ENTRY_HTON
2418 (struct rte_flow_action_of_push_vlan,
2420 .call = parse_vc_conf,
2422 [ACTION_OF_SET_VLAN_VID] = {
2423 .name = "of_set_vlan_vid",
2424 .help = "OpenFlow's OFPAT_SET_VLAN_VID",
2427 sizeof(struct rte_flow_action_of_set_vlan_vid)),
2428 .next = NEXT(action_of_set_vlan_vid),
2431 [ACTION_OF_SET_VLAN_VID_VLAN_VID] = {
2434 .next = NEXT(action_of_set_vlan_vid, NEXT_ENTRY(UNSIGNED)),
2435 .args = ARGS(ARGS_ENTRY_HTON
2436 (struct rte_flow_action_of_set_vlan_vid,
2438 .call = parse_vc_conf,
2440 [ACTION_OF_SET_VLAN_PCP] = {
2441 .name = "of_set_vlan_pcp",
2442 .help = "OpenFlow's OFPAT_SET_VLAN_PCP",
2445 sizeof(struct rte_flow_action_of_set_vlan_pcp)),
2446 .next = NEXT(action_of_set_vlan_pcp),
2449 [ACTION_OF_SET_VLAN_PCP_VLAN_PCP] = {
2451 .help = "VLAN priority",
2452 .next = NEXT(action_of_set_vlan_pcp, NEXT_ENTRY(UNSIGNED)),
2453 .args = ARGS(ARGS_ENTRY_HTON
2454 (struct rte_flow_action_of_set_vlan_pcp,
2456 .call = parse_vc_conf,
2458 [ACTION_OF_POP_MPLS] = {
2459 .name = "of_pop_mpls",
2460 .help = "OpenFlow's OFPAT_POP_MPLS",
2461 .priv = PRIV_ACTION(OF_POP_MPLS,
2462 sizeof(struct rte_flow_action_of_pop_mpls)),
2463 .next = NEXT(action_of_pop_mpls),
2466 [ACTION_OF_POP_MPLS_ETHERTYPE] = {
2467 .name = "ethertype",
2468 .help = "EtherType",
2469 .next = NEXT(action_of_pop_mpls, NEXT_ENTRY(UNSIGNED)),
2470 .args = ARGS(ARGS_ENTRY_HTON
2471 (struct rte_flow_action_of_pop_mpls,
2473 .call = parse_vc_conf,
2475 [ACTION_OF_PUSH_MPLS] = {
2476 .name = "of_push_mpls",
2477 .help = "OpenFlow's OFPAT_PUSH_MPLS",
2480 sizeof(struct rte_flow_action_of_push_mpls)),
2481 .next = NEXT(action_of_push_mpls),
2484 [ACTION_OF_PUSH_MPLS_ETHERTYPE] = {
2485 .name = "ethertype",
2486 .help = "EtherType",
2487 .next = NEXT(action_of_push_mpls, NEXT_ENTRY(UNSIGNED)),
2488 .args = ARGS(ARGS_ENTRY_HTON
2489 (struct rte_flow_action_of_push_mpls,
2491 .call = parse_vc_conf,
2493 [ACTION_VXLAN_ENCAP] = {
2494 .name = "vxlan_encap",
2495 .help = "VXLAN encapsulation, uses configuration set by \"set"
2497 .priv = PRIV_ACTION(VXLAN_ENCAP,
2498 sizeof(struct action_vxlan_encap_data)),
2499 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2500 .call = parse_vc_action_vxlan_encap,
2502 [ACTION_VXLAN_DECAP] = {
2503 .name = "vxlan_decap",
2504 .help = "Performs a decapsulation action by stripping all"
2505 " headers of the VXLAN tunnel network overlay from the"
2507 .priv = PRIV_ACTION(VXLAN_DECAP, 0),
2508 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2511 [ACTION_NVGRE_ENCAP] = {
2512 .name = "nvgre_encap",
2513 .help = "NVGRE encapsulation, uses configuration set by \"set"
2515 .priv = PRIV_ACTION(NVGRE_ENCAP,
2516 sizeof(struct action_nvgre_encap_data)),
2517 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2518 .call = parse_vc_action_nvgre_encap,
2520 [ACTION_NVGRE_DECAP] = {
2521 .name = "nvgre_decap",
2522 .help = "Performs a decapsulation action by stripping all"
2523 " headers of the NVGRE tunnel network overlay from the"
2525 .priv = PRIV_ACTION(NVGRE_DECAP, 0),
2526 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2529 [ACTION_SET_IPV4_SRC] = {
2530 .name = "set_ipv4_src",
2531 .help = "Set a new IPv4 source address in the outermost"
2533 .priv = PRIV_ACTION(SET_IPV4_SRC,
2534 sizeof(struct rte_flow_action_set_ipv4)),
2535 .next = NEXT(action_set_ipv4_src),
2538 [ACTION_SET_IPV4_SRC_IPV4_SRC] = {
2539 .name = "ipv4_addr",
2540 .help = "new IPv4 source address to set",
2541 .next = NEXT(action_set_ipv4_src, NEXT_ENTRY(IPV4_ADDR)),
2542 .args = ARGS(ARGS_ENTRY_HTON
2543 (struct rte_flow_action_set_ipv4, ipv4_addr)),
2544 .call = parse_vc_conf,
2546 [ACTION_SET_IPV4_DST] = {
2547 .name = "set_ipv4_dst",
2548 .help = "Set a new IPv4 destination address in the outermost"
2550 .priv = PRIV_ACTION(SET_IPV4_DST,
2551 sizeof(struct rte_flow_action_set_ipv4)),
2552 .next = NEXT(action_set_ipv4_dst),
2555 [ACTION_SET_IPV4_DST_IPV4_DST] = {
2556 .name = "ipv4_addr",
2557 .help = "new IPv4 destination address to set",
2558 .next = NEXT(action_set_ipv4_dst, NEXT_ENTRY(IPV4_ADDR)),
2559 .args = ARGS(ARGS_ENTRY_HTON
2560 (struct rte_flow_action_set_ipv4, ipv4_addr)),
2561 .call = parse_vc_conf,
2563 [ACTION_SET_IPV6_SRC] = {
2564 .name = "set_ipv6_src",
2565 .help = "Set a new IPv6 source address in the outermost"
2567 .priv = PRIV_ACTION(SET_IPV6_SRC,
2568 sizeof(struct rte_flow_action_set_ipv6)),
2569 .next = NEXT(action_set_ipv6_src),
2572 [ACTION_SET_IPV6_SRC_IPV6_SRC] = {
2573 .name = "ipv6_addr",
2574 .help = "new IPv6 source address to set",
2575 .next = NEXT(action_set_ipv6_src, NEXT_ENTRY(IPV6_ADDR)),
2576 .args = ARGS(ARGS_ENTRY_HTON
2577 (struct rte_flow_action_set_ipv6, ipv6_addr)),
2578 .call = parse_vc_conf,
2580 [ACTION_SET_IPV6_DST] = {
2581 .name = "set_ipv6_dst",
2582 .help = "Set a new IPv6 destination address in the outermost"
2584 .priv = PRIV_ACTION(SET_IPV6_DST,
2585 sizeof(struct rte_flow_action_set_ipv6)),
2586 .next = NEXT(action_set_ipv6_dst),
2589 [ACTION_SET_IPV6_DST_IPV6_DST] = {
2590 .name = "ipv6_addr",
2591 .help = "new IPv6 destination address to set",
2592 .next = NEXT(action_set_ipv6_dst, NEXT_ENTRY(IPV6_ADDR)),
2593 .args = ARGS(ARGS_ENTRY_HTON
2594 (struct rte_flow_action_set_ipv6, ipv6_addr)),
2595 .call = parse_vc_conf,
2597 [ACTION_SET_TP_SRC] = {
2598 .name = "set_tp_src",
2599 .help = "set a new source port number in the outermost"
2601 .priv = PRIV_ACTION(SET_TP_SRC,
2602 sizeof(struct rte_flow_action_set_tp)),
2603 .next = NEXT(action_set_tp_src),
2606 [ACTION_SET_TP_SRC_TP_SRC] = {
2608 .help = "new source port number to set",
2609 .next = NEXT(action_set_tp_src, NEXT_ENTRY(UNSIGNED)),
2610 .args = ARGS(ARGS_ENTRY_HTON
2611 (struct rte_flow_action_set_tp, port)),
2612 .call = parse_vc_conf,
2614 [ACTION_SET_TP_DST] = {
2615 .name = "set_tp_dst",
2616 .help = "set a new destination port number in the outermost"
2618 .priv = PRIV_ACTION(SET_TP_DST,
2619 sizeof(struct rte_flow_action_set_tp)),
2620 .next = NEXT(action_set_tp_dst),
2623 [ACTION_SET_TP_DST_TP_DST] = {
2625 .help = "new destination port number to set",
2626 .next = NEXT(action_set_tp_dst, NEXT_ENTRY(UNSIGNED)),
2627 .args = ARGS(ARGS_ENTRY_HTON
2628 (struct rte_flow_action_set_tp, port)),
2629 .call = parse_vc_conf,
2631 [ACTION_MAC_SWAP] = {
2633 .help = "Swap the source and destination MAC addresses"
2634 " in the outermost Ethernet header",
2635 .priv = PRIV_ACTION(MAC_SWAP, 0),
2636 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2641 /** Remove and return last entry from argument stack. */
2642 static const struct arg *
2643 pop_args(struct context *ctx)
2645 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
2648 /** Add entry on top of the argument stack. */
2650 push_args(struct context *ctx, const struct arg *arg)
2652 if (ctx->args_num == CTX_STACK_SIZE)
2654 ctx->args[ctx->args_num++] = arg;
2658 /** Spread value into buffer according to bit-mask. */
2660 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
2662 uint32_t i = arg->size;
2670 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2679 unsigned int shift = 0;
2680 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
2682 for (shift = 0; arg->mask[i] >> shift; ++shift) {
2683 if (!(arg->mask[i] & (1 << shift)))
2688 *buf &= ~(1 << shift);
2689 *buf |= (val & 1) << shift;
2697 /** Compare a string with a partial one of a given length. */
2699 strcmp_partial(const char *full, const char *partial, size_t partial_len)
2701 int r = strncmp(full, partial, partial_len);
2705 if (strlen(full) <= partial_len)
2707 return full[partial_len];
2711 * Parse a prefix length and generate a bit-mask.
2713 * Last argument (ctx->args) is retrieved to determine mask size, storage
2714 * location and whether the result must use network byte ordering.
2717 parse_prefix(struct context *ctx, const struct token *token,
2718 const char *str, unsigned int len,
2719 void *buf, unsigned int size)
2721 const struct arg *arg = pop_args(ctx);
2722 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
2729 /* Argument is expected. */
2733 u = strtoumax(str, &end, 0);
2734 if (errno || (size_t)(end - str) != len)
2739 extra = arg_entry_bf_fill(NULL, 0, arg);
2748 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
2749 !arg_entry_bf_fill(ctx->objmask, -1, arg))
2756 if (bytes > size || bytes + !!extra > size)
2760 buf = (uint8_t *)ctx->object + arg->offset;
2761 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2763 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
2764 memset(buf, 0x00, size - bytes);
2766 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
2770 memset(buf, 0xff, bytes);
2771 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
2773 ((uint8_t *)buf)[bytes] = conv[extra];
2776 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2779 push_args(ctx, arg);
2783 /** Default parsing function for token name matching. */
2785 parse_default(struct context *ctx, const struct token *token,
2786 const char *str, unsigned int len,
2787 void *buf, unsigned int size)
2792 if (strcmp_partial(token->name, str, len))
2797 /** Parse flow command, initialize output buffer for subsequent tokens. */
2799 parse_init(struct context *ctx, const struct token *token,
2800 const char *str, unsigned int len,
2801 void *buf, unsigned int size)
2803 struct buffer *out = buf;
2805 /* Token name must match. */
2806 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2808 /* Nothing else to do if there is no buffer. */
2811 /* Make sure buffer is large enough. */
2812 if (size < sizeof(*out))
2814 /* Initialize buffer. */
2815 memset(out, 0x00, sizeof(*out));
2816 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
2819 ctx->objmask = NULL;
2823 /** Parse tokens for validate/create commands. */
2825 parse_vc(struct context *ctx, const struct token *token,
2826 const char *str, unsigned int len,
2827 void *buf, unsigned int size)
2829 struct buffer *out = buf;
2833 /* Token name must match. */
2834 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2836 /* Nothing else to do if there is no buffer. */
2839 if (!out->command) {
2840 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
2842 if (sizeof(*out) > size)
2844 out->command = ctx->curr;
2847 ctx->objmask = NULL;
2848 out->args.vc.data = (uint8_t *)out + size;
2852 ctx->object = &out->args.vc.attr;
2853 ctx->objmask = NULL;
2854 switch (ctx->curr) {
2859 out->args.vc.attr.ingress = 1;
2862 out->args.vc.attr.egress = 1;
2865 out->args.vc.attr.transfer = 1;
2868 out->args.vc.pattern =
2869 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2871 ctx->object = out->args.vc.pattern;
2872 ctx->objmask = NULL;
2875 out->args.vc.actions =
2876 (void *)RTE_ALIGN_CEIL((uintptr_t)
2877 (out->args.vc.pattern +
2878 out->args.vc.pattern_n),
2880 ctx->object = out->args.vc.actions;
2881 ctx->objmask = NULL;
2888 if (!out->args.vc.actions) {
2889 const struct parse_item_priv *priv = token->priv;
2890 struct rte_flow_item *item =
2891 out->args.vc.pattern + out->args.vc.pattern_n;
2893 data_size = priv->size * 3; /* spec, last, mask */
2894 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
2895 (out->args.vc.data - data_size),
2897 if ((uint8_t *)item + sizeof(*item) > data)
2899 *item = (struct rte_flow_item){
2902 ++out->args.vc.pattern_n;
2904 ctx->objmask = NULL;
2906 const struct parse_action_priv *priv = token->priv;
2907 struct rte_flow_action *action =
2908 out->args.vc.actions + out->args.vc.actions_n;
2910 data_size = priv->size; /* configuration */
2911 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
2912 (out->args.vc.data - data_size),
2914 if ((uint8_t *)action + sizeof(*action) > data)
2916 *action = (struct rte_flow_action){
2918 .conf = data_size ? data : NULL,
2920 ++out->args.vc.actions_n;
2921 ctx->object = action;
2922 ctx->objmask = NULL;
2924 memset(data, 0, data_size);
2925 out->args.vc.data = data;
2926 ctx->objdata = data_size;
2930 /** Parse pattern item parameter type. */
2932 parse_vc_spec(struct context *ctx, const struct token *token,
2933 const char *str, unsigned int len,
2934 void *buf, unsigned int size)
2936 struct buffer *out = buf;
2937 struct rte_flow_item *item;
2943 /* Token name must match. */
2944 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2946 /* Parse parameter types. */
2947 switch (ctx->curr) {
2948 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
2954 case ITEM_PARAM_SPEC:
2957 case ITEM_PARAM_LAST:
2960 case ITEM_PARAM_PREFIX:
2961 /* Modify next token to expect a prefix. */
2962 if (ctx->next_num < 2)
2964 ctx->next[ctx->next_num - 2] = prefix;
2966 case ITEM_PARAM_MASK:
2972 /* Nothing else to do if there is no buffer. */
2975 if (!out->args.vc.pattern_n)
2977 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
2978 data_size = ctx->objdata / 3; /* spec, last, mask */
2979 /* Point to selected object. */
2980 ctx->object = out->args.vc.data + (data_size * index);
2982 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
2983 item->mask = ctx->objmask;
2985 ctx->objmask = NULL;
2986 /* Update relevant item pointer. */
2987 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
2992 /** Parse action configuration field. */
2994 parse_vc_conf(struct context *ctx, const struct token *token,
2995 const char *str, unsigned int len,
2996 void *buf, unsigned int size)
2998 struct buffer *out = buf;
3001 /* Token name must match. */
3002 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3004 /* Nothing else to do if there is no buffer. */
3007 /* Point to selected object. */
3008 ctx->object = out->args.vc.data;
3009 ctx->objmask = NULL;
3013 /** Parse RSS action. */
3015 parse_vc_action_rss(struct context *ctx, const struct token *token,
3016 const char *str, unsigned int len,
3017 void *buf, unsigned int size)
3019 struct buffer *out = buf;
3020 struct rte_flow_action *action;
3021 struct action_rss_data *action_rss_data;
3025 ret = parse_vc(ctx, token, str, len, buf, size);
3028 /* Nothing else to do if there is no buffer. */
3031 if (!out->args.vc.actions_n)
3033 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3034 /* Point to selected object. */
3035 ctx->object = out->args.vc.data;
3036 ctx->objmask = NULL;
3037 /* Set up default configuration. */
3038 action_rss_data = ctx->object;
3039 *action_rss_data = (struct action_rss_data){
3040 .conf = (struct rte_flow_action_rss){
3041 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
3044 .key_len = sizeof(action_rss_data->key),
3045 .queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
3046 .key = action_rss_data->key,
3047 .queue = action_rss_data->queue,
3049 .key = "testpmd's default RSS hash key, "
3050 "override it for better balancing",
3053 for (i = 0; i < action_rss_data->conf.queue_num; ++i)
3054 action_rss_data->queue[i] = i;
3055 if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
3056 ctx->port != (portid_t)RTE_PORT_ALL) {
3057 struct rte_eth_dev_info info;
3059 rte_eth_dev_info_get(ctx->port, &info);
3060 action_rss_data->conf.key_len =
3061 RTE_MIN(sizeof(action_rss_data->key),
3062 info.hash_key_size);
3064 action->conf = &action_rss_data->conf;
3069 * Parse func field for RSS action.
3071 * The RTE_ETH_HASH_FUNCTION_* value to assign is derived from the
3072 * ACTION_RSS_FUNC_* index that called this function.
3075 parse_vc_action_rss_func(struct context *ctx, const struct token *token,
3076 const char *str, unsigned int len,
3077 void *buf, unsigned int size)
3079 struct action_rss_data *action_rss_data;
3080 enum rte_eth_hash_function func;
3084 /* Token name must match. */
3085 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3087 switch (ctx->curr) {
3088 case ACTION_RSS_FUNC_DEFAULT:
3089 func = RTE_ETH_HASH_FUNCTION_DEFAULT;
3091 case ACTION_RSS_FUNC_TOEPLITZ:
3092 func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
3094 case ACTION_RSS_FUNC_SIMPLE_XOR:
3095 func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
3102 action_rss_data = ctx->object;
3103 action_rss_data->conf.func = func;
3108 * Parse type field for RSS action.
3110 * Valid tokens are type field names and the "end" token.
3113 parse_vc_action_rss_type(struct context *ctx, const struct token *token,
3114 const char *str, unsigned int len,
3115 void *buf, unsigned int size)
3117 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_TYPE);
3118 struct action_rss_data *action_rss_data;
3124 if (ctx->curr != ACTION_RSS_TYPE)
3126 if (!(ctx->objdata >> 16) && ctx->object) {
3127 action_rss_data = ctx->object;
3128 action_rss_data->conf.types = 0;
3130 if (!strcmp_partial("end", str, len)) {
3131 ctx->objdata &= 0xffff;
3134 for (i = 0; rss_type_table[i].str; ++i)
3135 if (!strcmp_partial(rss_type_table[i].str, str, len))
3137 if (!rss_type_table[i].str)
3139 ctx->objdata = 1 << 16 | (ctx->objdata & 0xffff);
3141 if (ctx->next_num == RTE_DIM(ctx->next))
3143 ctx->next[ctx->next_num++] = next;
3146 action_rss_data = ctx->object;
3147 action_rss_data->conf.types |= rss_type_table[i].rss_type;
3152 * Parse queue field for RSS action.
3154 * Valid tokens are queue indices and the "end" token.
3157 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
3158 const char *str, unsigned int len,
3159 void *buf, unsigned int size)
3161 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
3162 struct action_rss_data *action_rss_data;
3169 if (ctx->curr != ACTION_RSS_QUEUE)
3171 i = ctx->objdata >> 16;
3172 if (!strcmp_partial("end", str, len)) {
3173 ctx->objdata &= 0xffff;
3176 if (i >= ACTION_RSS_QUEUE_NUM)
3179 ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) +
3180 i * sizeof(action_rss_data->queue[i]),
3181 sizeof(action_rss_data->queue[i]))))
3183 ret = parse_int(ctx, token, str, len, NULL, 0);
3189 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
3191 if (ctx->next_num == RTE_DIM(ctx->next))
3193 ctx->next[ctx->next_num++] = next;
3197 action_rss_data = ctx->object;
3198 action_rss_data->conf.queue_num = i;
3199 action_rss_data->conf.queue = i ? action_rss_data->queue : NULL;
3203 /** Parse VXLAN encap action. */
3205 parse_vc_action_vxlan_encap(struct context *ctx, const struct token *token,
3206 const char *str, unsigned int len,
3207 void *buf, unsigned int size)
3209 struct buffer *out = buf;
3210 struct rte_flow_action *action;
3211 struct action_vxlan_encap_data *action_vxlan_encap_data;
3214 ret = parse_vc(ctx, token, str, len, buf, size);
3217 /* Nothing else to do if there is no buffer. */
3220 if (!out->args.vc.actions_n)
3222 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3223 /* Point to selected object. */
3224 ctx->object = out->args.vc.data;
3225 ctx->objmask = NULL;
3226 /* Set up default configuration. */
3227 action_vxlan_encap_data = ctx->object;
3228 *action_vxlan_encap_data = (struct action_vxlan_encap_data){
3229 .conf = (struct rte_flow_action_vxlan_encap){
3230 .definition = action_vxlan_encap_data->items,
3234 .type = RTE_FLOW_ITEM_TYPE_ETH,
3235 .spec = &action_vxlan_encap_data->item_eth,
3236 .mask = &rte_flow_item_eth_mask,
3239 .type = RTE_FLOW_ITEM_TYPE_VLAN,
3240 .spec = &action_vxlan_encap_data->item_vlan,
3241 .mask = &rte_flow_item_vlan_mask,
3244 .type = RTE_FLOW_ITEM_TYPE_IPV4,
3245 .spec = &action_vxlan_encap_data->item_ipv4,
3246 .mask = &rte_flow_item_ipv4_mask,
3249 .type = RTE_FLOW_ITEM_TYPE_UDP,
3250 .spec = &action_vxlan_encap_data->item_udp,
3251 .mask = &rte_flow_item_udp_mask,
3254 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
3255 .spec = &action_vxlan_encap_data->item_vxlan,
3256 .mask = &rte_flow_item_vxlan_mask,
3259 .type = RTE_FLOW_ITEM_TYPE_END,
3264 .tci = vxlan_encap_conf.vlan_tci,
3268 .src_addr = vxlan_encap_conf.ipv4_src,
3269 .dst_addr = vxlan_encap_conf.ipv4_dst,
3272 .src_port = vxlan_encap_conf.udp_src,
3273 .dst_port = vxlan_encap_conf.udp_dst,
3275 .item_vxlan.flags = 0,
3277 memcpy(action_vxlan_encap_data->item_eth.dst.addr_bytes,
3278 vxlan_encap_conf.eth_dst, ETHER_ADDR_LEN);
3279 memcpy(action_vxlan_encap_data->item_eth.src.addr_bytes,
3280 vxlan_encap_conf.eth_src, ETHER_ADDR_LEN);
3281 if (!vxlan_encap_conf.select_ipv4) {
3282 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr,
3283 &vxlan_encap_conf.ipv6_src,
3284 sizeof(vxlan_encap_conf.ipv6_src));
3285 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.dst_addr,
3286 &vxlan_encap_conf.ipv6_dst,
3287 sizeof(vxlan_encap_conf.ipv6_dst));
3288 action_vxlan_encap_data->items[2] = (struct rte_flow_item){
3289 .type = RTE_FLOW_ITEM_TYPE_IPV6,
3290 .spec = &action_vxlan_encap_data->item_ipv6,
3291 .mask = &rte_flow_item_ipv6_mask,
3294 if (!vxlan_encap_conf.select_vlan)
3295 action_vxlan_encap_data->items[1].type =
3296 RTE_FLOW_ITEM_TYPE_VOID;
3297 memcpy(action_vxlan_encap_data->item_vxlan.vni, vxlan_encap_conf.vni,
3298 RTE_DIM(vxlan_encap_conf.vni));
3299 action->conf = &action_vxlan_encap_data->conf;
3303 /** Parse NVGRE encap action. */
3305 parse_vc_action_nvgre_encap(struct context *ctx, const struct token *token,
3306 const char *str, unsigned int len,
3307 void *buf, unsigned int size)
3309 struct buffer *out = buf;
3310 struct rte_flow_action *action;
3311 struct action_nvgre_encap_data *action_nvgre_encap_data;
3314 ret = parse_vc(ctx, token, str, len, buf, size);
3317 /* Nothing else to do if there is no buffer. */
3320 if (!out->args.vc.actions_n)
3322 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3323 /* Point to selected object. */
3324 ctx->object = out->args.vc.data;
3325 ctx->objmask = NULL;
3326 /* Set up default configuration. */
3327 action_nvgre_encap_data = ctx->object;
3328 *action_nvgre_encap_data = (struct action_nvgre_encap_data){
3329 .conf = (struct rte_flow_action_nvgre_encap){
3330 .definition = action_nvgre_encap_data->items,
3334 .type = RTE_FLOW_ITEM_TYPE_ETH,
3335 .spec = &action_nvgre_encap_data->item_eth,
3336 .mask = &rte_flow_item_eth_mask,
3339 .type = RTE_FLOW_ITEM_TYPE_VLAN,
3340 .spec = &action_nvgre_encap_data->item_vlan,
3341 .mask = &rte_flow_item_vlan_mask,
3344 .type = RTE_FLOW_ITEM_TYPE_IPV4,
3345 .spec = &action_nvgre_encap_data->item_ipv4,
3346 .mask = &rte_flow_item_ipv4_mask,
3349 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
3350 .spec = &action_nvgre_encap_data->item_nvgre,
3351 .mask = &rte_flow_item_nvgre_mask,
3354 .type = RTE_FLOW_ITEM_TYPE_END,
3359 .tci = nvgre_encap_conf.vlan_tci,
3363 .src_addr = nvgre_encap_conf.ipv4_src,
3364 .dst_addr = nvgre_encap_conf.ipv4_dst,
3366 .item_nvgre.flow_id = 0,
3368 memcpy(action_nvgre_encap_data->item_eth.dst.addr_bytes,
3369 nvgre_encap_conf.eth_dst, ETHER_ADDR_LEN);
3370 memcpy(action_nvgre_encap_data->item_eth.src.addr_bytes,
3371 nvgre_encap_conf.eth_src, ETHER_ADDR_LEN);
3372 if (!nvgre_encap_conf.select_ipv4) {
3373 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.src_addr,
3374 &nvgre_encap_conf.ipv6_src,
3375 sizeof(nvgre_encap_conf.ipv6_src));
3376 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.dst_addr,
3377 &nvgre_encap_conf.ipv6_dst,
3378 sizeof(nvgre_encap_conf.ipv6_dst));
3379 action_nvgre_encap_data->items[2] = (struct rte_flow_item){
3380 .type = RTE_FLOW_ITEM_TYPE_IPV6,
3381 .spec = &action_nvgre_encap_data->item_ipv6,
3382 .mask = &rte_flow_item_ipv6_mask,
3385 if (!nvgre_encap_conf.select_vlan)
3386 action_nvgre_encap_data->items[1].type =
3387 RTE_FLOW_ITEM_TYPE_VOID;
3388 memcpy(action_nvgre_encap_data->item_nvgre.tni, nvgre_encap_conf.tni,
3389 RTE_DIM(nvgre_encap_conf.tni));
3390 action->conf = &action_nvgre_encap_data->conf;
3394 /** Parse tokens for destroy command. */
3396 parse_destroy(struct context *ctx, const struct token *token,
3397 const char *str, unsigned int len,
3398 void *buf, unsigned int size)
3400 struct buffer *out = buf;
3402 /* Token name must match. */
3403 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3405 /* Nothing else to do if there is no buffer. */
3408 if (!out->command) {
3409 if (ctx->curr != DESTROY)
3411 if (sizeof(*out) > size)
3413 out->command = ctx->curr;
3416 ctx->objmask = NULL;
3417 out->args.destroy.rule =
3418 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
3422 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
3423 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
3426 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
3427 ctx->objmask = NULL;
3431 /** Parse tokens for flush command. */
3433 parse_flush(struct context *ctx, const struct token *token,
3434 const char *str, unsigned int len,
3435 void *buf, unsigned int size)
3437 struct buffer *out = buf;
3439 /* Token name must match. */
3440 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3442 /* Nothing else to do if there is no buffer. */
3445 if (!out->command) {
3446 if (ctx->curr != FLUSH)
3448 if (sizeof(*out) > size)
3450 out->command = ctx->curr;
3453 ctx->objmask = NULL;
3458 /** Parse tokens for query command. */
3460 parse_query(struct context *ctx, const struct token *token,
3461 const char *str, unsigned int len,
3462 void *buf, unsigned int size)
3464 struct buffer *out = buf;
3466 /* Token name must match. */
3467 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3469 /* Nothing else to do if there is no buffer. */
3472 if (!out->command) {
3473 if (ctx->curr != QUERY)
3475 if (sizeof(*out) > size)
3477 out->command = ctx->curr;
3480 ctx->objmask = NULL;
3485 /** Parse action names. */
3487 parse_action(struct context *ctx, const struct token *token,
3488 const char *str, unsigned int len,
3489 void *buf, unsigned int size)
3491 struct buffer *out = buf;
3492 const struct arg *arg = pop_args(ctx);
3496 /* Argument is expected. */
3499 /* Parse action name. */
3500 for (i = 0; next_action[i]; ++i) {
3501 const struct parse_action_priv *priv;
3503 token = &token_list[next_action[i]];
3504 if (strcmp_partial(token->name, str, len))
3510 memcpy((uint8_t *)ctx->object + arg->offset,
3516 push_args(ctx, arg);
3520 /** Parse tokens for list command. */
3522 parse_list(struct context *ctx, const struct token *token,
3523 const char *str, unsigned int len,
3524 void *buf, unsigned int size)
3526 struct buffer *out = buf;
3528 /* Token name must match. */
3529 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3531 /* Nothing else to do if there is no buffer. */
3534 if (!out->command) {
3535 if (ctx->curr != LIST)
3537 if (sizeof(*out) > size)
3539 out->command = ctx->curr;
3542 ctx->objmask = NULL;
3543 out->args.list.group =
3544 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
3548 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
3549 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
3552 ctx->object = out->args.list.group + out->args.list.group_n++;
3553 ctx->objmask = NULL;
3557 /** Parse tokens for isolate command. */
3559 parse_isolate(struct context *ctx, const struct token *token,
3560 const char *str, unsigned int len,
3561 void *buf, unsigned int size)
3563 struct buffer *out = buf;
3565 /* Token name must match. */
3566 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3568 /* Nothing else to do if there is no buffer. */
3571 if (!out->command) {
3572 if (ctx->curr != ISOLATE)
3574 if (sizeof(*out) > size)
3576 out->command = ctx->curr;
3579 ctx->objmask = NULL;
3585 * Parse signed/unsigned integers 8 to 64-bit long.
3587 * Last argument (ctx->args) is retrieved to determine integer type and
3591 parse_int(struct context *ctx, const struct token *token,
3592 const char *str, unsigned int len,
3593 void *buf, unsigned int size)
3595 const struct arg *arg = pop_args(ctx);
3600 /* Argument is expected. */
3605 (uintmax_t)strtoimax(str, &end, 0) :
3606 strtoumax(str, &end, 0);
3607 if (errno || (size_t)(end - str) != len)
3610 ((arg->sign && ((intmax_t)u < (intmax_t)arg->min ||
3611 (intmax_t)u > (intmax_t)arg->max)) ||
3612 (!arg->sign && (u < arg->min || u > arg->max))))
3617 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
3618 !arg_entry_bf_fill(ctx->objmask, -1, arg))
3622 buf = (uint8_t *)ctx->object + arg->offset;
3626 case sizeof(uint8_t):
3627 *(uint8_t *)buf = u;
3629 case sizeof(uint16_t):
3630 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
3632 case sizeof(uint8_t [3]):
3633 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3635 ((uint8_t *)buf)[0] = u;
3636 ((uint8_t *)buf)[1] = u >> 8;
3637 ((uint8_t *)buf)[2] = u >> 16;
3641 ((uint8_t *)buf)[0] = u >> 16;
3642 ((uint8_t *)buf)[1] = u >> 8;
3643 ((uint8_t *)buf)[2] = u;
3645 case sizeof(uint32_t):
3646 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
3648 case sizeof(uint64_t):
3649 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
3654 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
3656 buf = (uint8_t *)ctx->objmask + arg->offset;
3661 push_args(ctx, arg);
3668 * Three arguments (ctx->args) are retrieved from the stack to store data,
3669 * its actual length and address (in that order).
3672 parse_string(struct context *ctx, const struct token *token,
3673 const char *str, unsigned int len,
3674 void *buf, unsigned int size)
3676 const struct arg *arg_data = pop_args(ctx);
3677 const struct arg *arg_len = pop_args(ctx);
3678 const struct arg *arg_addr = pop_args(ctx);
3679 char tmp[16]; /* Ought to be enough. */
3682 /* Arguments are expected. */
3686 push_args(ctx, arg_data);
3690 push_args(ctx, arg_len);
3691 push_args(ctx, arg_data);
3694 size = arg_data->size;
3695 /* Bit-mask fill is not supported. */
3696 if (arg_data->mask || size < len)
3700 /* Let parse_int() fill length information first. */
3701 ret = snprintf(tmp, sizeof(tmp), "%u", len);
3704 push_args(ctx, arg_len);
3705 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
3710 buf = (uint8_t *)ctx->object + arg_data->offset;
3711 /* Output buffer is not necessarily NUL-terminated. */
3712 memcpy(buf, str, len);
3713 memset((uint8_t *)buf + len, 0x00, size - len);
3715 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
3716 /* Save address if requested. */
3717 if (arg_addr->size) {
3718 memcpy((uint8_t *)ctx->object + arg_addr->offset,
3720 (uint8_t *)ctx->object + arg_data->offset
3724 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
3726 (uint8_t *)ctx->objmask + arg_data->offset
3732 push_args(ctx, arg_addr);
3733 push_args(ctx, arg_len);
3734 push_args(ctx, arg_data);
3739 * Parse a MAC address.
3741 * Last argument (ctx->args) is retrieved to determine storage size and
3745 parse_mac_addr(struct context *ctx, const struct token *token,
3746 const char *str, unsigned int len,
3747 void *buf, unsigned int size)
3749 const struct arg *arg = pop_args(ctx);
3750 struct ether_addr tmp;
3754 /* Argument is expected. */
3758 /* Bit-mask fill is not supported. */
3759 if (arg->mask || size != sizeof(tmp))
3761 /* Only network endian is supported. */
3764 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
3765 if (ret < 0 || (unsigned int)ret != len)
3769 buf = (uint8_t *)ctx->object + arg->offset;
3770 memcpy(buf, &tmp, size);
3772 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
3775 push_args(ctx, arg);
3780 * Parse an IPv4 address.
3782 * Last argument (ctx->args) is retrieved to determine storage size and
3786 parse_ipv4_addr(struct context *ctx, const struct token *token,
3787 const char *str, unsigned int len,
3788 void *buf, unsigned int size)
3790 const struct arg *arg = pop_args(ctx);
3795 /* Argument is expected. */
3799 /* Bit-mask fill is not supported. */
3800 if (arg->mask || size != sizeof(tmp))
3802 /* Only network endian is supported. */
3805 memcpy(str2, str, len);
3807 ret = inet_pton(AF_INET, str2, &tmp);
3809 /* Attempt integer parsing. */
3810 push_args(ctx, arg);
3811 return parse_int(ctx, token, str, len, buf, size);
3815 buf = (uint8_t *)ctx->object + arg->offset;
3816 memcpy(buf, &tmp, size);
3818 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
3821 push_args(ctx, arg);
3826 * Parse an IPv6 address.
3828 * Last argument (ctx->args) is retrieved to determine storage size and
3832 parse_ipv6_addr(struct context *ctx, const struct token *token,
3833 const char *str, unsigned int len,
3834 void *buf, unsigned int size)
3836 const struct arg *arg = pop_args(ctx);
3838 struct in6_addr tmp;
3842 /* Argument is expected. */
3846 /* Bit-mask fill is not supported. */
3847 if (arg->mask || size != sizeof(tmp))
3849 /* Only network endian is supported. */
3852 memcpy(str2, str, len);
3854 ret = inet_pton(AF_INET6, str2, &tmp);
3859 buf = (uint8_t *)ctx->object + arg->offset;
3860 memcpy(buf, &tmp, size);
3862 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
3865 push_args(ctx, arg);
3869 /** Boolean values (even indices stand for false). */
3870 static const char *const boolean_name[] = {
3880 * Parse a boolean value.
3882 * Last argument (ctx->args) is retrieved to determine storage size and
3886 parse_boolean(struct context *ctx, const struct token *token,
3887 const char *str, unsigned int len,
3888 void *buf, unsigned int size)
3890 const struct arg *arg = pop_args(ctx);
3894 /* Argument is expected. */
3897 for (i = 0; boolean_name[i]; ++i)
3898 if (!strcmp_partial(boolean_name[i], str, len))
3900 /* Process token as integer. */
3901 if (boolean_name[i])
3902 str = i & 1 ? "1" : "0";
3903 push_args(ctx, arg);
3904 ret = parse_int(ctx, token, str, strlen(str), buf, size);
3905 return ret > 0 ? (int)len : ret;
3908 /** Parse port and update context. */
3910 parse_port(struct context *ctx, const struct token *token,
3911 const char *str, unsigned int len,
3912 void *buf, unsigned int size)
3914 struct buffer *out = &(struct buffer){ .port = 0 };
3922 ctx->objmask = NULL;
3923 size = sizeof(*out);
3925 ret = parse_int(ctx, token, str, len, out, size);
3927 ctx->port = out->port;
3933 /** No completion. */
3935 comp_none(struct context *ctx, const struct token *token,
3936 unsigned int ent, char *buf, unsigned int size)
3946 /** Complete boolean values. */
3948 comp_boolean(struct context *ctx, const struct token *token,
3949 unsigned int ent, char *buf, unsigned int size)
3955 for (i = 0; boolean_name[i]; ++i)
3956 if (buf && i == ent)
3957 return snprintf(buf, size, "%s", boolean_name[i]);
3963 /** Complete action names. */
3965 comp_action(struct context *ctx, const struct token *token,
3966 unsigned int ent, char *buf, unsigned int size)
3972 for (i = 0; next_action[i]; ++i)
3973 if (buf && i == ent)
3974 return snprintf(buf, size, "%s",
3975 token_list[next_action[i]].name);
3981 /** Complete available ports. */
3983 comp_port(struct context *ctx, const struct token *token,
3984 unsigned int ent, char *buf, unsigned int size)
3991 RTE_ETH_FOREACH_DEV(p) {
3992 if (buf && i == ent)
3993 return snprintf(buf, size, "%u", p);
4001 /** Complete available rule IDs. */
4003 comp_rule_id(struct context *ctx, const struct token *token,
4004 unsigned int ent, char *buf, unsigned int size)
4007 struct rte_port *port;
4008 struct port_flow *pf;
4011 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
4012 ctx->port == (portid_t)RTE_PORT_ALL)
4014 port = &ports[ctx->port];
4015 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
4016 if (buf && i == ent)
4017 return snprintf(buf, size, "%u", pf->id);
4025 /** Complete type field for RSS action. */
4027 comp_vc_action_rss_type(struct context *ctx, const struct token *token,
4028 unsigned int ent, char *buf, unsigned int size)
4034 for (i = 0; rss_type_table[i].str; ++i)
4039 return snprintf(buf, size, "%s", rss_type_table[ent].str);
4041 return snprintf(buf, size, "end");
4045 /** Complete queue field for RSS action. */
4047 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
4048 unsigned int ent, char *buf, unsigned int size)
4055 return snprintf(buf, size, "%u", ent);
4057 return snprintf(buf, size, "end");
4061 /** Internal context. */
4062 static struct context cmd_flow_context;
4064 /** Global parser instance (cmdline API). */
4065 cmdline_parse_inst_t cmd_flow;
4067 /** Initialize context. */
4069 cmd_flow_context_init(struct context *ctx)
4071 /* A full memset() is not necessary. */
4081 ctx->objmask = NULL;
4084 /** Parse a token (cmdline API). */
4086 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
4089 struct context *ctx = &cmd_flow_context;
4090 const struct token *token;
4091 const enum index *list;
4096 token = &token_list[ctx->curr];
4097 /* Check argument length. */
4100 for (len = 0; src[len]; ++len)
4101 if (src[len] == '#' || isspace(src[len]))
4105 /* Last argument and EOL detection. */
4106 for (i = len; src[i]; ++i)
4107 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
4109 else if (!isspace(src[i])) {
4114 if (src[i] == '\r' || src[i] == '\n') {
4118 /* Initialize context if necessary. */
4119 if (!ctx->next_num) {
4122 ctx->next[ctx->next_num++] = token->next[0];
4124 /* Process argument through candidates. */
4125 ctx->prev = ctx->curr;
4126 list = ctx->next[ctx->next_num - 1];
4127 for (i = 0; list[i]; ++i) {
4128 const struct token *next = &token_list[list[i]];
4131 ctx->curr = list[i];
4133 tmp = next->call(ctx, next, src, len, result, size);
4135 tmp = parse_default(ctx, next, src, len, result, size);
4136 if (tmp == -1 || tmp != len)
4144 /* Push subsequent tokens if any. */
4146 for (i = 0; token->next[i]; ++i) {
4147 if (ctx->next_num == RTE_DIM(ctx->next))
4149 ctx->next[ctx->next_num++] = token->next[i];
4151 /* Push arguments if any. */
4153 for (i = 0; token->args[i]; ++i) {
4154 if (ctx->args_num == RTE_DIM(ctx->args))
4156 ctx->args[ctx->args_num++] = token->args[i];
4161 /** Return number of completion entries (cmdline API). */
4163 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
4165 struct context *ctx = &cmd_flow_context;
4166 const struct token *token = &token_list[ctx->curr];
4167 const enum index *list;
4171 /* Count number of tokens in current list. */
4173 list = ctx->next[ctx->next_num - 1];
4175 list = token->next[0];
4176 for (i = 0; list[i]; ++i)
4181 * If there is a single token, use its completion callback, otherwise
4182 * return the number of entries.
4184 token = &token_list[list[0]];
4185 if (i == 1 && token->comp) {
4186 /* Save index for cmd_flow_get_help(). */
4187 ctx->prev = list[0];
4188 return token->comp(ctx, token, 0, NULL, 0);
4193 /** Return a completion entry (cmdline API). */
4195 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
4196 char *dst, unsigned int size)
4198 struct context *ctx = &cmd_flow_context;
4199 const struct token *token = &token_list[ctx->curr];
4200 const enum index *list;
4204 /* Count number of tokens in current list. */
4206 list = ctx->next[ctx->next_num - 1];
4208 list = token->next[0];
4209 for (i = 0; list[i]; ++i)
4213 /* If there is a single token, use its completion callback. */
4214 token = &token_list[list[0]];
4215 if (i == 1 && token->comp) {
4216 /* Save index for cmd_flow_get_help(). */
4217 ctx->prev = list[0];
4218 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
4220 /* Otherwise make sure the index is valid and use defaults. */
4223 token = &token_list[list[index]];
4224 snprintf(dst, size, "%s", token->name);
4225 /* Save index for cmd_flow_get_help(). */
4226 ctx->prev = list[index];
4230 /** Populate help strings for current token (cmdline API). */
4232 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
4234 struct context *ctx = &cmd_flow_context;
4235 const struct token *token = &token_list[ctx->prev];
4240 /* Set token type and update global help with details. */
4241 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
4243 cmd_flow.help_str = token->help;
4245 cmd_flow.help_str = token->name;
4249 /** Token definition template (cmdline API). */
4250 static struct cmdline_token_hdr cmd_flow_token_hdr = {
4251 .ops = &(struct cmdline_token_ops){
4252 .parse = cmd_flow_parse,
4253 .complete_get_nb = cmd_flow_complete_get_nb,
4254 .complete_get_elt = cmd_flow_complete_get_elt,
4255 .get_help = cmd_flow_get_help,
4260 /** Populate the next dynamic token. */
4262 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
4263 cmdline_parse_token_hdr_t **hdr_inst)
4265 struct context *ctx = &cmd_flow_context;
4267 /* Always reinitialize context before requesting the first token. */
4268 if (!(hdr_inst - cmd_flow.tokens))
4269 cmd_flow_context_init(ctx);
4270 /* Return NULL when no more tokens are expected. */
4271 if (!ctx->next_num && ctx->curr) {
4275 /* Determine if command should end here. */
4276 if (ctx->eol && ctx->last && ctx->next_num) {
4277 const enum index *list = ctx->next[ctx->next_num - 1];
4280 for (i = 0; list[i]; ++i) {
4287 *hdr = &cmd_flow_token_hdr;
4290 /** Dispatch parsed buffer to function calls. */
4292 cmd_flow_parsed(const struct buffer *in)
4294 switch (in->command) {
4296 port_flow_validate(in->port, &in->args.vc.attr,
4297 in->args.vc.pattern, in->args.vc.actions);
4300 port_flow_create(in->port, &in->args.vc.attr,
4301 in->args.vc.pattern, in->args.vc.actions);
4304 port_flow_destroy(in->port, in->args.destroy.rule_n,
4305 in->args.destroy.rule);
4308 port_flow_flush(in->port);
4311 port_flow_query(in->port, in->args.query.rule,
4312 &in->args.query.action);
4315 port_flow_list(in->port, in->args.list.group_n,
4316 in->args.list.group);
4319 port_flow_isolate(in->port, in->args.isolate.set);
4326 /** Token generator and output processing callback (cmdline API). */
4328 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
4331 cmd_flow_tok(arg0, arg2);
4333 cmd_flow_parsed(arg0);
4336 /** Global parser instance (cmdline API). */
4337 cmdline_parse_inst_t cmd_flow = {
4339 .data = NULL, /**< Unused. */
4340 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
4343 }, /**< Tokens are returned by cmd_flow_tok(). */