1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
13 #include <arpa/inet.h>
14 #include <sys/socket.h>
16 #include <rte_common.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_ethdev.h>
19 #include <rte_byteorder.h>
20 #include <cmdline_parse.h>
21 #include <cmdline_parse_etheraddr.h>
26 /** Parser token indices. */
46 /* Top-level command. */
49 /* Sub-level commands. */
58 /* Destroy arguments. */
61 /* Query arguments. */
67 /* Validate/create arguments. */
74 /* Validate/create pattern. */
111 ITEM_VLAN_INNER_TYPE,
143 ITEM_E_TAG_GRP_ECID_B,
162 ITEM_ARP_ETH_IPV4_SHA,
163 ITEM_ARP_ETH_IPV4_SPA,
164 ITEM_ARP_ETH_IPV4_THA,
165 ITEM_ARP_ETH_IPV4_TPA,
167 ITEM_IPV6_EXT_NEXT_HDR,
172 ITEM_ICMP6_ND_NS_TARGET_ADDR,
174 ITEM_ICMP6_ND_NA_TARGET_ADDR,
176 ITEM_ICMP6_ND_OPT_TYPE,
177 ITEM_ICMP6_ND_OPT_SLA_ETH,
178 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
179 ITEM_ICMP6_ND_OPT_TLA_ETH,
180 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
182 /* Validate/create actions. */
202 ACTION_RSS_FUNC_DEFAULT,
203 ACTION_RSS_FUNC_TOEPLITZ,
204 ACTION_RSS_FUNC_SIMPLE_XOR,
216 ACTION_PHY_PORT_ORIGINAL,
217 ACTION_PHY_PORT_INDEX,
219 ACTION_PORT_ID_ORIGINAL,
223 ACTION_OF_SET_MPLS_TTL,
224 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
225 ACTION_OF_DEC_MPLS_TTL,
226 ACTION_OF_SET_NW_TTL,
227 ACTION_OF_SET_NW_TTL_NW_TTL,
228 ACTION_OF_DEC_NW_TTL,
229 ACTION_OF_COPY_TTL_OUT,
230 ACTION_OF_COPY_TTL_IN,
233 ACTION_OF_PUSH_VLAN_ETHERTYPE,
234 ACTION_OF_SET_VLAN_VID,
235 ACTION_OF_SET_VLAN_VID_VLAN_VID,
236 ACTION_OF_SET_VLAN_PCP,
237 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
239 ACTION_OF_POP_MPLS_ETHERTYPE,
241 ACTION_OF_PUSH_MPLS_ETHERTYPE,
247 ACTION_SET_IPV4_SRC_IPV4_SRC,
249 ACTION_SET_IPV4_DST_IPV4_DST,
251 ACTION_SET_IPV6_SRC_IPV6_SRC,
253 ACTION_SET_IPV6_DST_IPV6_DST,
255 ACTION_SET_TP_SRC_TP_SRC,
257 ACTION_SET_TP_DST_TP_DST,
260 /** Maximum size for pattern in struct rte_flow_item_raw. */
261 #define ITEM_RAW_PATTERN_SIZE 40
263 /** Storage size for struct rte_flow_item_raw including pattern. */
264 #define ITEM_RAW_SIZE \
265 (sizeof(struct rte_flow_item_raw) + ITEM_RAW_PATTERN_SIZE)
267 /** Maximum number of queue indices in struct rte_flow_action_rss. */
268 #define ACTION_RSS_QUEUE_NUM 32
270 /** Storage for struct rte_flow_action_rss including external data. */
271 struct action_rss_data {
272 struct rte_flow_action_rss conf;
273 uint8_t key[RSS_HASH_KEY_LENGTH];
274 uint16_t queue[ACTION_RSS_QUEUE_NUM];
277 /** Maximum number of items in struct rte_flow_action_vxlan_encap. */
278 #define ACTION_VXLAN_ENCAP_ITEMS_NUM 6
280 /** Storage for struct rte_flow_action_vxlan_encap including external data. */
281 struct action_vxlan_encap_data {
282 struct rte_flow_action_vxlan_encap conf;
283 struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM];
284 struct rte_flow_item_eth item_eth;
285 struct rte_flow_item_vlan item_vlan;
287 struct rte_flow_item_ipv4 item_ipv4;
288 struct rte_flow_item_ipv6 item_ipv6;
290 struct rte_flow_item_udp item_udp;
291 struct rte_flow_item_vxlan item_vxlan;
294 /** Maximum number of items in struct rte_flow_action_nvgre_encap. */
295 #define ACTION_NVGRE_ENCAP_ITEMS_NUM 5
297 /** Storage for struct rte_flow_action_nvgre_encap including external data. */
298 struct action_nvgre_encap_data {
299 struct rte_flow_action_nvgre_encap conf;
300 struct rte_flow_item items[ACTION_NVGRE_ENCAP_ITEMS_NUM];
301 struct rte_flow_item_eth item_eth;
302 struct rte_flow_item_vlan item_vlan;
304 struct rte_flow_item_ipv4 item_ipv4;
305 struct rte_flow_item_ipv6 item_ipv6;
307 struct rte_flow_item_nvgre item_nvgre;
310 /** Maximum number of subsequent tokens and arguments on the stack. */
311 #define CTX_STACK_SIZE 16
313 /** Parser context. */
315 /** Stack of subsequent token lists to process. */
316 const enum index *next[CTX_STACK_SIZE];
317 /** Arguments for stacked tokens. */
318 const void *args[CTX_STACK_SIZE];
319 enum index curr; /**< Current token index. */
320 enum index prev; /**< Index of the last token seen. */
321 int next_num; /**< Number of entries in next[]. */
322 int args_num; /**< Number of entries in args[]. */
323 uint32_t eol:1; /**< EOL has been detected. */
324 uint32_t last:1; /**< No more arguments. */
325 portid_t port; /**< Current port ID (for completions). */
326 uint32_t objdata; /**< Object-specific data. */
327 void *object; /**< Address of current object for relative offsets. */
328 void *objmask; /**< Object a full mask must be written to. */
331 /** Token argument. */
333 uint32_t hton:1; /**< Use network byte ordering. */
334 uint32_t sign:1; /**< Value is signed. */
335 uint32_t bounded:1; /**< Value is bounded. */
336 uintmax_t min; /**< Minimum value if bounded. */
337 uintmax_t max; /**< Maximum value if bounded. */
338 uint32_t offset; /**< Relative offset from ctx->object. */
339 uint32_t size; /**< Field size. */
340 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
343 /** Parser token definition. */
345 /** Type displayed during completion (defaults to "TOKEN"). */
347 /** Help displayed during completion (defaults to token name). */
349 /** Private data used by parser functions. */
352 * Lists of subsequent tokens to push on the stack. Each call to the
353 * parser consumes the last entry of that stack.
355 const enum index *const *next;
356 /** Arguments stack for subsequent tokens that need them. */
357 const struct arg *const *args;
359 * Token-processing callback, returns -1 in case of error, the
360 * length of the matched string otherwise. If NULL, attempts to
361 * match the token name.
363 * If buf is not NULL, the result should be stored in it according
364 * to context. An error is returned if not large enough.
366 int (*call)(struct context *ctx, const struct token *token,
367 const char *str, unsigned int len,
368 void *buf, unsigned int size);
370 * Callback that provides possible values for this token, used for
371 * completion. Returns -1 in case of error, the number of possible
372 * values otherwise. If NULL, the token name is used.
374 * If buf is not NULL, entry index ent is written to buf and the
375 * full length of the entry is returned (same behavior as
378 int (*comp)(struct context *ctx, const struct token *token,
379 unsigned int ent, char *buf, unsigned int size);
380 /** Mandatory token name, no default value. */
384 /** Static initializer for the next field. */
385 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
387 /** Static initializer for a NEXT() entry. */
388 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
390 /** Static initializer for the args field. */
391 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
393 /** Static initializer for ARGS() to target a field. */
394 #define ARGS_ENTRY(s, f) \
395 (&(const struct arg){ \
396 .offset = offsetof(s, f), \
397 .size = sizeof(((s *)0)->f), \
400 /** Static initializer for ARGS() to target a bit-field. */
401 #define ARGS_ENTRY_BF(s, f, b) \
402 (&(const struct arg){ \
404 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
407 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
408 #define ARGS_ENTRY_MASK(s, f, m) \
409 (&(const struct arg){ \
410 .offset = offsetof(s, f), \
411 .size = sizeof(((s *)0)->f), \
412 .mask = (const void *)(m), \
415 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
416 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
417 (&(const struct arg){ \
419 .offset = offsetof(s, f), \
420 .size = sizeof(((s *)0)->f), \
421 .mask = (const void *)(m), \
424 /** Static initializer for ARGS() to target a pointer. */
425 #define ARGS_ENTRY_PTR(s, f) \
426 (&(const struct arg){ \
427 .size = sizeof(*((s *)0)->f), \
430 /** Static initializer for ARGS() with arbitrary offset and size. */
431 #define ARGS_ENTRY_ARB(o, s) \
432 (&(const struct arg){ \
437 /** Same as ARGS_ENTRY_ARB() with bounded values. */
438 #define ARGS_ENTRY_ARB_BOUNDED(o, s, i, a) \
439 (&(const struct arg){ \
447 /** Same as ARGS_ENTRY() using network byte ordering. */
448 #define ARGS_ENTRY_HTON(s, f) \
449 (&(const struct arg){ \
451 .offset = offsetof(s, f), \
452 .size = sizeof(((s *)0)->f), \
455 /** Parser output buffer layout expected by cmd_flow_parsed(). */
457 enum index command; /**< Flow command. */
458 portid_t port; /**< Affected port ID. */
461 struct rte_flow_attr attr;
462 struct rte_flow_item *pattern;
463 struct rte_flow_action *actions;
467 } vc; /**< Validate/create arguments. */
471 } destroy; /**< Destroy arguments. */
474 struct rte_flow_action action;
475 } query; /**< Query arguments. */
479 } list; /**< List arguments. */
482 } isolate; /**< Isolated mode arguments. */
483 } args; /**< Command arguments. */
486 /** Private data for pattern items. */
487 struct parse_item_priv {
488 enum rte_flow_item_type type; /**< Item type. */
489 uint32_t size; /**< Size of item specification structure. */
492 #define PRIV_ITEM(t, s) \
493 (&(const struct parse_item_priv){ \
494 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
498 /** Private data for actions. */
499 struct parse_action_priv {
500 enum rte_flow_action_type type; /**< Action type. */
501 uint32_t size; /**< Size of action configuration structure. */
504 #define PRIV_ACTION(t, s) \
505 (&(const struct parse_action_priv){ \
506 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
510 static const enum index next_vc_attr[] = {
520 static const enum index next_destroy_attr[] = {
526 static const enum index next_list_attr[] = {
532 static const enum index item_param[] = {
541 static const enum index next_item[] = {
577 ITEM_ICMP6_ND_OPT_SLA_ETH,
578 ITEM_ICMP6_ND_OPT_TLA_ETH,
582 static const enum index item_fuzzy[] = {
588 static const enum index item_any[] = {
594 static const enum index item_vf[] = {
600 static const enum index item_phy_port[] = {
606 static const enum index item_port_id[] = {
612 static const enum index item_mark[] = {
618 static const enum index item_raw[] = {
628 static const enum index item_eth[] = {
636 static const enum index item_vlan[] = {
641 ITEM_VLAN_INNER_TYPE,
646 static const enum index item_ipv4[] = {
656 static const enum index item_ipv6[] = {
667 static const enum index item_icmp[] = {
674 static const enum index item_udp[] = {
681 static const enum index item_tcp[] = {
689 static const enum index item_sctp[] = {
698 static const enum index item_vxlan[] = {
704 static const enum index item_e_tag[] = {
705 ITEM_E_TAG_GRP_ECID_B,
710 static const enum index item_nvgre[] = {
716 static const enum index item_mpls[] = {
722 static const enum index item_gre[] = {
728 static const enum index item_gtp[] = {
734 static const enum index item_geneve[] = {
741 static const enum index item_vxlan_gpe[] = {
747 static const enum index item_arp_eth_ipv4[] = {
748 ITEM_ARP_ETH_IPV4_SHA,
749 ITEM_ARP_ETH_IPV4_SPA,
750 ITEM_ARP_ETH_IPV4_THA,
751 ITEM_ARP_ETH_IPV4_TPA,
756 static const enum index item_ipv6_ext[] = {
757 ITEM_IPV6_EXT_NEXT_HDR,
762 static const enum index item_icmp6[] = {
769 static const enum index item_icmp6_nd_ns[] = {
770 ITEM_ICMP6_ND_NS_TARGET_ADDR,
775 static const enum index item_icmp6_nd_na[] = {
776 ITEM_ICMP6_ND_NA_TARGET_ADDR,
781 static const enum index item_icmp6_nd_opt[] = {
782 ITEM_ICMP6_ND_OPT_TYPE,
787 static const enum index item_icmp6_nd_opt_sla_eth[] = {
788 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
793 static const enum index item_icmp6_nd_opt_tla_eth[] = {
794 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
799 static const enum index next_action[] = {
815 ACTION_OF_SET_MPLS_TTL,
816 ACTION_OF_DEC_MPLS_TTL,
817 ACTION_OF_SET_NW_TTL,
818 ACTION_OF_DEC_NW_TTL,
819 ACTION_OF_COPY_TTL_OUT,
820 ACTION_OF_COPY_TTL_IN,
823 ACTION_OF_SET_VLAN_VID,
824 ACTION_OF_SET_VLAN_PCP,
840 static const enum index action_mark[] = {
846 static const enum index action_queue[] = {
852 static const enum index action_count[] = {
859 static const enum index action_rss[] = {
870 static const enum index action_vf[] = {
877 static const enum index action_phy_port[] = {
878 ACTION_PHY_PORT_ORIGINAL,
879 ACTION_PHY_PORT_INDEX,
884 static const enum index action_port_id[] = {
885 ACTION_PORT_ID_ORIGINAL,
891 static const enum index action_meter[] = {
897 static const enum index action_of_set_mpls_ttl[] = {
898 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
903 static const enum index action_of_set_nw_ttl[] = {
904 ACTION_OF_SET_NW_TTL_NW_TTL,
909 static const enum index action_of_push_vlan[] = {
910 ACTION_OF_PUSH_VLAN_ETHERTYPE,
915 static const enum index action_of_set_vlan_vid[] = {
916 ACTION_OF_SET_VLAN_VID_VLAN_VID,
921 static const enum index action_of_set_vlan_pcp[] = {
922 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
927 static const enum index action_of_pop_mpls[] = {
928 ACTION_OF_POP_MPLS_ETHERTYPE,
933 static const enum index action_of_push_mpls[] = {
934 ACTION_OF_PUSH_MPLS_ETHERTYPE,
939 static const enum index action_set_ipv4_src[] = {
940 ACTION_SET_IPV4_SRC_IPV4_SRC,
945 static const enum index action_set_ipv4_dst[] = {
946 ACTION_SET_IPV4_DST_IPV4_DST,
951 static const enum index action_set_ipv6_src[] = {
952 ACTION_SET_IPV6_SRC_IPV6_SRC,
957 static const enum index action_set_ipv6_dst[] = {
958 ACTION_SET_IPV6_DST_IPV6_DST,
963 static const enum index action_set_tp_src[] = {
964 ACTION_SET_TP_SRC_TP_SRC,
969 static const enum index action_set_tp_dst[] = {
970 ACTION_SET_TP_DST_TP_DST,
975 static const enum index action_jump[] = {
981 static int parse_init(struct context *, const struct token *,
982 const char *, unsigned int,
983 void *, unsigned int);
984 static int parse_vc(struct context *, const struct token *,
985 const char *, unsigned int,
986 void *, unsigned int);
987 static int parse_vc_spec(struct context *, const struct token *,
988 const char *, unsigned int, void *, unsigned int);
989 static int parse_vc_conf(struct context *, const struct token *,
990 const char *, unsigned int, void *, unsigned int);
991 static int parse_vc_action_rss(struct context *, const struct token *,
992 const char *, unsigned int, void *,
994 static int parse_vc_action_rss_func(struct context *, const struct token *,
995 const char *, unsigned int, void *,
997 static int parse_vc_action_rss_type(struct context *, const struct token *,
998 const char *, unsigned int, void *,
1000 static int parse_vc_action_rss_queue(struct context *, const struct token *,
1001 const char *, unsigned int, void *,
1003 static int parse_vc_action_vxlan_encap(struct context *, const struct token *,
1004 const char *, unsigned int, void *,
1006 static int parse_vc_action_nvgre_encap(struct context *, const struct token *,
1007 const char *, unsigned int, void *,
1009 static int parse_destroy(struct context *, const struct token *,
1010 const char *, unsigned int,
1011 void *, unsigned int);
1012 static int parse_flush(struct context *, const struct token *,
1013 const char *, unsigned int,
1014 void *, unsigned int);
1015 static int parse_query(struct context *, const struct token *,
1016 const char *, unsigned int,
1017 void *, unsigned int);
1018 static int parse_action(struct context *, const struct token *,
1019 const char *, unsigned int,
1020 void *, unsigned int);
1021 static int parse_list(struct context *, const struct token *,
1022 const char *, unsigned int,
1023 void *, unsigned int);
1024 static int parse_isolate(struct context *, const struct token *,
1025 const char *, unsigned int,
1026 void *, unsigned int);
1027 static int parse_int(struct context *, const struct token *,
1028 const char *, unsigned int,
1029 void *, unsigned int);
1030 static int parse_prefix(struct context *, const struct token *,
1031 const char *, unsigned int,
1032 void *, unsigned int);
1033 static int parse_boolean(struct context *, const struct token *,
1034 const char *, unsigned int,
1035 void *, unsigned int);
1036 static int parse_string(struct context *, const struct token *,
1037 const char *, unsigned int,
1038 void *, unsigned int);
1039 static int parse_mac_addr(struct context *, const struct token *,
1040 const char *, unsigned int,
1041 void *, unsigned int);
1042 static int parse_ipv4_addr(struct context *, const struct token *,
1043 const char *, unsigned int,
1044 void *, unsigned int);
1045 static int parse_ipv6_addr(struct context *, const struct token *,
1046 const char *, unsigned int,
1047 void *, unsigned int);
1048 static int parse_port(struct context *, const struct token *,
1049 const char *, unsigned int,
1050 void *, unsigned int);
1051 static int comp_none(struct context *, const struct token *,
1052 unsigned int, char *, unsigned int);
1053 static int comp_boolean(struct context *, const struct token *,
1054 unsigned int, char *, unsigned int);
1055 static int comp_action(struct context *, const struct token *,
1056 unsigned int, char *, unsigned int);
1057 static int comp_port(struct context *, const struct token *,
1058 unsigned int, char *, unsigned int);
1059 static int comp_rule_id(struct context *, const struct token *,
1060 unsigned int, char *, unsigned int);
1061 static int comp_vc_action_rss_type(struct context *, const struct token *,
1062 unsigned int, char *, unsigned int);
1063 static int comp_vc_action_rss_queue(struct context *, const struct token *,
1064 unsigned int, char *, unsigned int);
1066 /** Token definitions. */
1067 static const struct token token_list[] = {
1068 /* Special tokens. */
1071 .help = "null entry, abused as the entry point",
1072 .next = NEXT(NEXT_ENTRY(FLOW)),
1077 .help = "command may end here",
1079 /* Common tokens. */
1083 .help = "integer value",
1088 .name = "{unsigned}",
1090 .help = "unsigned integer value",
1097 .help = "prefix length for bit-mask",
1098 .call = parse_prefix,
1102 .name = "{boolean}",
1104 .help = "any boolean value",
1105 .call = parse_boolean,
1106 .comp = comp_boolean,
1111 .help = "fixed string",
1112 .call = parse_string,
1116 .name = "{MAC address}",
1118 .help = "standard MAC address notation",
1119 .call = parse_mac_addr,
1123 .name = "{IPv4 address}",
1124 .type = "IPV4 ADDRESS",
1125 .help = "standard IPv4 address notation",
1126 .call = parse_ipv4_addr,
1130 .name = "{IPv6 address}",
1131 .type = "IPV6 ADDRESS",
1132 .help = "standard IPv6 address notation",
1133 .call = parse_ipv6_addr,
1137 .name = "{rule id}",
1139 .help = "rule identifier",
1141 .comp = comp_rule_id,
1144 .name = "{port_id}",
1146 .help = "port identifier",
1151 .name = "{group_id}",
1153 .help = "group identifier",
1157 [PRIORITY_LEVEL] = {
1160 .help = "priority level",
1164 /* Top-level command. */
1167 .type = "{command} {port_id} [{arg} [...]]",
1168 .help = "manage ingress/egress flow rules",
1169 .next = NEXT(NEXT_ENTRY
1179 /* Sub-level commands. */
1182 .help = "check whether a flow rule can be created",
1183 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1184 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1189 .help = "create a flow rule",
1190 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1191 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1196 .help = "destroy specific flow rules",
1197 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
1198 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1199 .call = parse_destroy,
1203 .help = "destroy all flow rules",
1204 .next = NEXT(NEXT_ENTRY(PORT_ID)),
1205 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1206 .call = parse_flush,
1210 .help = "query an existing flow rule",
1211 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
1212 NEXT_ENTRY(RULE_ID),
1213 NEXT_ENTRY(PORT_ID)),
1214 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action.type),
1215 ARGS_ENTRY(struct buffer, args.query.rule),
1216 ARGS_ENTRY(struct buffer, port)),
1217 .call = parse_query,
1221 .help = "list existing flow rules",
1222 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
1223 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1228 .help = "restrict ingress traffic to the defined flow rules",
1229 .next = NEXT(NEXT_ENTRY(BOOLEAN),
1230 NEXT_ENTRY(PORT_ID)),
1231 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
1232 ARGS_ENTRY(struct buffer, port)),
1233 .call = parse_isolate,
1235 /* Destroy arguments. */
1238 .help = "specify a rule identifier",
1239 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
1240 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
1241 .call = parse_destroy,
1243 /* Query arguments. */
1247 .help = "action to query, must be part of the rule",
1248 .call = parse_action,
1249 .comp = comp_action,
1251 /* List arguments. */
1254 .help = "specify a group",
1255 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
1256 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
1259 /* Validate/create attributes. */
1262 .help = "specify a group",
1263 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
1264 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
1269 .help = "specify a priority level",
1270 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
1271 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
1276 .help = "affect rule to ingress",
1277 .next = NEXT(next_vc_attr),
1282 .help = "affect rule to egress",
1283 .next = NEXT(next_vc_attr),
1288 .help = "apply rule directly to endpoints found in pattern",
1289 .next = NEXT(next_vc_attr),
1292 /* Validate/create pattern. */
1295 .help = "submit a list of pattern items",
1296 .next = NEXT(next_item),
1301 .help = "match value perfectly (with full bit-mask)",
1302 .call = parse_vc_spec,
1304 [ITEM_PARAM_SPEC] = {
1306 .help = "match value according to configured bit-mask",
1307 .call = parse_vc_spec,
1309 [ITEM_PARAM_LAST] = {
1311 .help = "specify upper bound to establish a range",
1312 .call = parse_vc_spec,
1314 [ITEM_PARAM_MASK] = {
1316 .help = "specify bit-mask with relevant bits set to one",
1317 .call = parse_vc_spec,
1319 [ITEM_PARAM_PREFIX] = {
1321 .help = "generate bit-mask from a prefix length",
1322 .call = parse_vc_spec,
1326 .help = "specify next pattern item",
1327 .next = NEXT(next_item),
1331 .help = "end list of pattern items",
1332 .priv = PRIV_ITEM(END, 0),
1333 .next = NEXT(NEXT_ENTRY(ACTIONS)),
1338 .help = "no-op pattern item",
1339 .priv = PRIV_ITEM(VOID, 0),
1340 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1345 .help = "perform actions when pattern does not match",
1346 .priv = PRIV_ITEM(INVERT, 0),
1347 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1352 .help = "match any protocol for the current layer",
1353 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
1354 .next = NEXT(item_any),
1359 .help = "number of layers covered",
1360 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
1361 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
1365 .help = "match traffic from/to the physical function",
1366 .priv = PRIV_ITEM(PF, 0),
1367 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1372 .help = "match traffic from/to a virtual function ID",
1373 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
1374 .next = NEXT(item_vf),
1380 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
1381 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
1385 .help = "match traffic from/to a specific physical port",
1386 .priv = PRIV_ITEM(PHY_PORT,
1387 sizeof(struct rte_flow_item_phy_port)),
1388 .next = NEXT(item_phy_port),
1391 [ITEM_PHY_PORT_INDEX] = {
1393 .help = "physical port index",
1394 .next = NEXT(item_phy_port, NEXT_ENTRY(UNSIGNED), item_param),
1395 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_phy_port, index)),
1399 .help = "match traffic from/to a given DPDK port ID",
1400 .priv = PRIV_ITEM(PORT_ID,
1401 sizeof(struct rte_flow_item_port_id)),
1402 .next = NEXT(item_port_id),
1405 [ITEM_PORT_ID_ID] = {
1407 .help = "DPDK port ID",
1408 .next = NEXT(item_port_id, NEXT_ENTRY(UNSIGNED), item_param),
1409 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port_id, id)),
1413 .help = "match traffic against value set in previously matched rule",
1414 .priv = PRIV_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
1415 .next = NEXT(item_mark),
1420 .help = "Integer value to match against",
1421 .next = NEXT(item_mark, NEXT_ENTRY(UNSIGNED), item_param),
1422 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_mark, id)),
1426 .help = "match an arbitrary byte string",
1427 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1428 .next = NEXT(item_raw),
1431 [ITEM_RAW_RELATIVE] = {
1433 .help = "look for pattern after the previous item",
1434 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1435 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1438 [ITEM_RAW_SEARCH] = {
1440 .help = "search pattern from offset (see also limit)",
1441 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1442 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1445 [ITEM_RAW_OFFSET] = {
1447 .help = "absolute or relative offset for pattern",
1448 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1449 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1451 [ITEM_RAW_LIMIT] = {
1453 .help = "search area limit for start of pattern",
1454 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1455 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1457 [ITEM_RAW_PATTERN] = {
1459 .help = "byte string to look for",
1460 .next = NEXT(item_raw,
1462 NEXT_ENTRY(ITEM_PARAM_IS,
1465 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, pattern),
1466 ARGS_ENTRY(struct rte_flow_item_raw, length),
1467 ARGS_ENTRY_ARB(sizeof(struct rte_flow_item_raw),
1468 ITEM_RAW_PATTERN_SIZE)),
1472 .help = "match Ethernet header",
1473 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1474 .next = NEXT(item_eth),
1479 .help = "destination MAC",
1480 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1481 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1485 .help = "source MAC",
1486 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1487 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1491 .help = "EtherType",
1492 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1493 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1497 .help = "match 802.1Q/ad VLAN tag",
1498 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1499 .next = NEXT(item_vlan),
1504 .help = "tag control information",
1505 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1506 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1510 .help = "priority code point",
1511 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1512 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1517 .help = "drop eligible indicator",
1518 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1519 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1524 .help = "VLAN identifier",
1525 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1526 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1529 [ITEM_VLAN_INNER_TYPE] = {
1530 .name = "inner_type",
1531 .help = "inner EtherType",
1532 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1533 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan,
1538 .help = "match IPv4 header",
1539 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1540 .next = NEXT(item_ipv4),
1545 .help = "type of service",
1546 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1547 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1548 hdr.type_of_service)),
1552 .help = "time to live",
1553 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1554 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1557 [ITEM_IPV4_PROTO] = {
1559 .help = "next protocol ID",
1560 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1561 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1562 hdr.next_proto_id)),
1566 .help = "source address",
1567 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1568 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1573 .help = "destination address",
1574 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1575 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1580 .help = "match IPv6 header",
1581 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1582 .next = NEXT(item_ipv6),
1587 .help = "traffic class",
1588 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1589 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1591 "\x0f\xf0\x00\x00")),
1593 [ITEM_IPV6_FLOW] = {
1595 .help = "flow label",
1596 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1597 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1599 "\x00\x0f\xff\xff")),
1601 [ITEM_IPV6_PROTO] = {
1603 .help = "protocol (next header)",
1604 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1605 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1610 .help = "hop limit",
1611 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1612 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1617 .help = "source address",
1618 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1619 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1624 .help = "destination address",
1625 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1626 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1631 .help = "match ICMP header",
1632 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1633 .next = NEXT(item_icmp),
1636 [ITEM_ICMP_TYPE] = {
1638 .help = "ICMP packet type",
1639 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1640 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1643 [ITEM_ICMP_CODE] = {
1645 .help = "ICMP packet code",
1646 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1647 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1652 .help = "match UDP header",
1653 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1654 .next = NEXT(item_udp),
1659 .help = "UDP source port",
1660 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1661 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1666 .help = "UDP destination port",
1667 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1668 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1673 .help = "match TCP header",
1674 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1675 .next = NEXT(item_tcp),
1680 .help = "TCP source port",
1681 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1682 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1687 .help = "TCP destination port",
1688 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1689 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1692 [ITEM_TCP_FLAGS] = {
1694 .help = "TCP flags",
1695 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1696 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1701 .help = "match SCTP header",
1702 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1703 .next = NEXT(item_sctp),
1708 .help = "SCTP source port",
1709 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1710 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1715 .help = "SCTP destination port",
1716 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1717 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1722 .help = "validation tag",
1723 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1724 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1727 [ITEM_SCTP_CKSUM] = {
1730 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1731 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1736 .help = "match VXLAN header",
1737 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1738 .next = NEXT(item_vxlan),
1741 [ITEM_VXLAN_VNI] = {
1743 .help = "VXLAN identifier",
1744 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1745 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
1749 .help = "match E-Tag header",
1750 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
1751 .next = NEXT(item_e_tag),
1754 [ITEM_E_TAG_GRP_ECID_B] = {
1755 .name = "grp_ecid_b",
1756 .help = "GRP and E-CID base",
1757 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
1758 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
1764 .help = "match NVGRE header",
1765 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
1766 .next = NEXT(item_nvgre),
1769 [ITEM_NVGRE_TNI] = {
1771 .help = "virtual subnet ID",
1772 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
1773 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
1777 .help = "match MPLS header",
1778 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
1779 .next = NEXT(item_mpls),
1782 [ITEM_MPLS_LABEL] = {
1784 .help = "MPLS label",
1785 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
1786 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
1792 .help = "match GRE header",
1793 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
1794 .next = NEXT(item_gre),
1797 [ITEM_GRE_PROTO] = {
1799 .help = "GRE protocol type",
1800 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
1801 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
1806 .help = "fuzzy pattern match, expect faster than default",
1807 .priv = PRIV_ITEM(FUZZY,
1808 sizeof(struct rte_flow_item_fuzzy)),
1809 .next = NEXT(item_fuzzy),
1812 [ITEM_FUZZY_THRESH] = {
1814 .help = "match accuracy threshold",
1815 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
1816 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
1821 .help = "match GTP header",
1822 .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
1823 .next = NEXT(item_gtp),
1828 .help = "tunnel endpoint identifier",
1829 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
1830 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
1834 .help = "match GTP header",
1835 .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
1836 .next = NEXT(item_gtp),
1841 .help = "match GTP header",
1842 .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
1843 .next = NEXT(item_gtp),
1848 .help = "match GENEVE header",
1849 .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
1850 .next = NEXT(item_geneve),
1853 [ITEM_GENEVE_VNI] = {
1855 .help = "virtual network identifier",
1856 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1857 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)),
1859 [ITEM_GENEVE_PROTO] = {
1861 .help = "GENEVE protocol type",
1862 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1863 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve,
1866 [ITEM_VXLAN_GPE] = {
1867 .name = "vxlan-gpe",
1868 .help = "match VXLAN-GPE header",
1869 .priv = PRIV_ITEM(VXLAN_GPE,
1870 sizeof(struct rte_flow_item_vxlan_gpe)),
1871 .next = NEXT(item_vxlan_gpe),
1874 [ITEM_VXLAN_GPE_VNI] = {
1876 .help = "VXLAN-GPE identifier",
1877 .next = NEXT(item_vxlan_gpe, NEXT_ENTRY(UNSIGNED), item_param),
1878 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan_gpe,
1881 [ITEM_ARP_ETH_IPV4] = {
1882 .name = "arp_eth_ipv4",
1883 .help = "match ARP header for Ethernet/IPv4",
1884 .priv = PRIV_ITEM(ARP_ETH_IPV4,
1885 sizeof(struct rte_flow_item_arp_eth_ipv4)),
1886 .next = NEXT(item_arp_eth_ipv4),
1889 [ITEM_ARP_ETH_IPV4_SHA] = {
1891 .help = "sender hardware address",
1892 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
1894 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
1897 [ITEM_ARP_ETH_IPV4_SPA] = {
1899 .help = "sender IPv4 address",
1900 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
1902 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
1905 [ITEM_ARP_ETH_IPV4_THA] = {
1907 .help = "target hardware address",
1908 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
1910 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
1913 [ITEM_ARP_ETH_IPV4_TPA] = {
1915 .help = "target IPv4 address",
1916 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
1918 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
1923 .help = "match presence of any IPv6 extension header",
1924 .priv = PRIV_ITEM(IPV6_EXT,
1925 sizeof(struct rte_flow_item_ipv6_ext)),
1926 .next = NEXT(item_ipv6_ext),
1929 [ITEM_IPV6_EXT_NEXT_HDR] = {
1931 .help = "next header",
1932 .next = NEXT(item_ipv6_ext, NEXT_ENTRY(UNSIGNED), item_param),
1933 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6_ext,
1938 .help = "match any ICMPv6 header",
1939 .priv = PRIV_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
1940 .next = NEXT(item_icmp6),
1943 [ITEM_ICMP6_TYPE] = {
1945 .help = "ICMPv6 type",
1946 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
1947 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
1950 [ITEM_ICMP6_CODE] = {
1952 .help = "ICMPv6 code",
1953 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
1954 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
1957 [ITEM_ICMP6_ND_NS] = {
1958 .name = "icmp6_nd_ns",
1959 .help = "match ICMPv6 neighbor discovery solicitation",
1960 .priv = PRIV_ITEM(ICMP6_ND_NS,
1961 sizeof(struct rte_flow_item_icmp6_nd_ns)),
1962 .next = NEXT(item_icmp6_nd_ns),
1965 [ITEM_ICMP6_ND_NS_TARGET_ADDR] = {
1966 .name = "target_addr",
1967 .help = "target address",
1968 .next = NEXT(item_icmp6_nd_ns, NEXT_ENTRY(IPV6_ADDR),
1970 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_ns,
1973 [ITEM_ICMP6_ND_NA] = {
1974 .name = "icmp6_nd_na",
1975 .help = "match ICMPv6 neighbor discovery advertisement",
1976 .priv = PRIV_ITEM(ICMP6_ND_NA,
1977 sizeof(struct rte_flow_item_icmp6_nd_na)),
1978 .next = NEXT(item_icmp6_nd_na),
1981 [ITEM_ICMP6_ND_NA_TARGET_ADDR] = {
1982 .name = "target_addr",
1983 .help = "target address",
1984 .next = NEXT(item_icmp6_nd_na, NEXT_ENTRY(IPV6_ADDR),
1986 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_na,
1989 [ITEM_ICMP6_ND_OPT] = {
1990 .name = "icmp6_nd_opt",
1991 .help = "match presence of any ICMPv6 neighbor discovery"
1993 .priv = PRIV_ITEM(ICMP6_ND_OPT,
1994 sizeof(struct rte_flow_item_icmp6_nd_opt)),
1995 .next = NEXT(item_icmp6_nd_opt),
1998 [ITEM_ICMP6_ND_OPT_TYPE] = {
2000 .help = "ND option type",
2001 .next = NEXT(item_icmp6_nd_opt, NEXT_ENTRY(UNSIGNED),
2003 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_opt,
2006 [ITEM_ICMP6_ND_OPT_SLA_ETH] = {
2007 .name = "icmp6_nd_opt_sla_eth",
2008 .help = "match ICMPv6 neighbor discovery source Ethernet"
2009 " link-layer address option",
2011 (ICMP6_ND_OPT_SLA_ETH,
2012 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
2013 .next = NEXT(item_icmp6_nd_opt_sla_eth),
2016 [ITEM_ICMP6_ND_OPT_SLA_ETH_SLA] = {
2018 .help = "source Ethernet LLA",
2019 .next = NEXT(item_icmp6_nd_opt_sla_eth, NEXT_ENTRY(MAC_ADDR),
2021 .args = ARGS(ARGS_ENTRY_HTON
2022 (struct rte_flow_item_icmp6_nd_opt_sla_eth, sla)),
2024 [ITEM_ICMP6_ND_OPT_TLA_ETH] = {
2025 .name = "icmp6_nd_opt_tla_eth",
2026 .help = "match ICMPv6 neighbor discovery target Ethernet"
2027 " link-layer address option",
2029 (ICMP6_ND_OPT_TLA_ETH,
2030 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
2031 .next = NEXT(item_icmp6_nd_opt_tla_eth),
2034 [ITEM_ICMP6_ND_OPT_TLA_ETH_TLA] = {
2036 .help = "target Ethernet LLA",
2037 .next = NEXT(item_icmp6_nd_opt_tla_eth, NEXT_ENTRY(MAC_ADDR),
2039 .args = ARGS(ARGS_ENTRY_HTON
2040 (struct rte_flow_item_icmp6_nd_opt_tla_eth, tla)),
2043 /* Validate/create actions. */
2046 .help = "submit a list of associated actions",
2047 .next = NEXT(next_action),
2052 .help = "specify next action",
2053 .next = NEXT(next_action),
2057 .help = "end list of actions",
2058 .priv = PRIV_ACTION(END, 0),
2063 .help = "no-op action",
2064 .priv = PRIV_ACTION(VOID, 0),
2065 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2068 [ACTION_PASSTHRU] = {
2070 .help = "let subsequent rule process matched packets",
2071 .priv = PRIV_ACTION(PASSTHRU, 0),
2072 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2077 .help = "redirect traffic to a given group",
2078 .priv = PRIV_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
2079 .next = NEXT(action_jump),
2082 [ACTION_JUMP_GROUP] = {
2084 .help = "group to redirect traffic to",
2085 .next = NEXT(action_jump, NEXT_ENTRY(UNSIGNED)),
2086 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_jump, group)),
2087 .call = parse_vc_conf,
2091 .help = "attach 32 bit value to packets",
2092 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
2093 .next = NEXT(action_mark),
2096 [ACTION_MARK_ID] = {
2098 .help = "32 bit value to return with packets",
2099 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
2100 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
2101 .call = parse_vc_conf,
2105 .help = "flag packets",
2106 .priv = PRIV_ACTION(FLAG, 0),
2107 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2112 .help = "assign packets to a given queue index",
2113 .priv = PRIV_ACTION(QUEUE,
2114 sizeof(struct rte_flow_action_queue)),
2115 .next = NEXT(action_queue),
2118 [ACTION_QUEUE_INDEX] = {
2120 .help = "queue index to use",
2121 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
2122 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
2123 .call = parse_vc_conf,
2127 .help = "drop packets (note: passthru has priority)",
2128 .priv = PRIV_ACTION(DROP, 0),
2129 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2134 .help = "enable counters for this rule",
2135 .priv = PRIV_ACTION(COUNT,
2136 sizeof(struct rte_flow_action_count)),
2137 .next = NEXT(action_count),
2140 [ACTION_COUNT_ID] = {
2141 .name = "identifier",
2142 .help = "counter identifier to use",
2143 .next = NEXT(action_count, NEXT_ENTRY(UNSIGNED)),
2144 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_count, id)),
2145 .call = parse_vc_conf,
2147 [ACTION_COUNT_SHARED] = {
2149 .help = "shared counter",
2150 .next = NEXT(action_count, NEXT_ENTRY(BOOLEAN)),
2151 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_count,
2153 .call = parse_vc_conf,
2157 .help = "spread packets among several queues",
2158 .priv = PRIV_ACTION(RSS, sizeof(struct action_rss_data)),
2159 .next = NEXT(action_rss),
2160 .call = parse_vc_action_rss,
2162 [ACTION_RSS_FUNC] = {
2164 .help = "RSS hash function to apply",
2165 .next = NEXT(action_rss,
2166 NEXT_ENTRY(ACTION_RSS_FUNC_DEFAULT,
2167 ACTION_RSS_FUNC_TOEPLITZ,
2168 ACTION_RSS_FUNC_SIMPLE_XOR)),
2170 [ACTION_RSS_FUNC_DEFAULT] = {
2172 .help = "default hash function",
2173 .call = parse_vc_action_rss_func,
2175 [ACTION_RSS_FUNC_TOEPLITZ] = {
2177 .help = "Toeplitz hash function",
2178 .call = parse_vc_action_rss_func,
2180 [ACTION_RSS_FUNC_SIMPLE_XOR] = {
2181 .name = "simple_xor",
2182 .help = "simple XOR hash function",
2183 .call = parse_vc_action_rss_func,
2185 [ACTION_RSS_LEVEL] = {
2187 .help = "encapsulation level for \"types\"",
2188 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2189 .args = ARGS(ARGS_ENTRY_ARB
2190 (offsetof(struct action_rss_data, conf) +
2191 offsetof(struct rte_flow_action_rss, level),
2192 sizeof(((struct rte_flow_action_rss *)0)->
2195 [ACTION_RSS_TYPES] = {
2197 .help = "specific RSS hash types",
2198 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)),
2200 [ACTION_RSS_TYPE] = {
2202 .help = "RSS hash type",
2203 .call = parse_vc_action_rss_type,
2204 .comp = comp_vc_action_rss_type,
2206 [ACTION_RSS_KEY] = {
2208 .help = "RSS hash key",
2209 .next = NEXT(action_rss, NEXT_ENTRY(STRING)),
2210 .args = ARGS(ARGS_ENTRY_ARB(0, 0),
2212 (offsetof(struct action_rss_data, conf) +
2213 offsetof(struct rte_flow_action_rss, key_len),
2214 sizeof(((struct rte_flow_action_rss *)0)->
2216 ARGS_ENTRY(struct action_rss_data, key)),
2218 [ACTION_RSS_KEY_LEN] = {
2220 .help = "RSS hash key length in bytes",
2221 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2222 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
2223 (offsetof(struct action_rss_data, conf) +
2224 offsetof(struct rte_flow_action_rss, key_len),
2225 sizeof(((struct rte_flow_action_rss *)0)->
2228 RSS_HASH_KEY_LENGTH)),
2230 [ACTION_RSS_QUEUES] = {
2232 .help = "queue indices to use",
2233 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
2234 .call = parse_vc_conf,
2236 [ACTION_RSS_QUEUE] = {
2238 .help = "queue index",
2239 .call = parse_vc_action_rss_queue,
2240 .comp = comp_vc_action_rss_queue,
2244 .help = "direct traffic to physical function",
2245 .priv = PRIV_ACTION(PF, 0),
2246 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2251 .help = "direct traffic to a virtual function ID",
2252 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
2253 .next = NEXT(action_vf),
2256 [ACTION_VF_ORIGINAL] = {
2258 .help = "use original VF ID if possible",
2259 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
2260 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
2262 .call = parse_vc_conf,
2267 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
2268 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
2269 .call = parse_vc_conf,
2271 [ACTION_PHY_PORT] = {
2273 .help = "direct packets to physical port index",
2274 .priv = PRIV_ACTION(PHY_PORT,
2275 sizeof(struct rte_flow_action_phy_port)),
2276 .next = NEXT(action_phy_port),
2279 [ACTION_PHY_PORT_ORIGINAL] = {
2281 .help = "use original port index if possible",
2282 .next = NEXT(action_phy_port, NEXT_ENTRY(BOOLEAN)),
2283 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_phy_port,
2285 .call = parse_vc_conf,
2287 [ACTION_PHY_PORT_INDEX] = {
2289 .help = "physical port index",
2290 .next = NEXT(action_phy_port, NEXT_ENTRY(UNSIGNED)),
2291 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_phy_port,
2293 .call = parse_vc_conf,
2295 [ACTION_PORT_ID] = {
2297 .help = "direct matching traffic to a given DPDK port ID",
2298 .priv = PRIV_ACTION(PORT_ID,
2299 sizeof(struct rte_flow_action_port_id)),
2300 .next = NEXT(action_port_id),
2303 [ACTION_PORT_ID_ORIGINAL] = {
2305 .help = "use original DPDK port ID if possible",
2306 .next = NEXT(action_port_id, NEXT_ENTRY(BOOLEAN)),
2307 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_port_id,
2309 .call = parse_vc_conf,
2311 [ACTION_PORT_ID_ID] = {
2313 .help = "DPDK port ID",
2314 .next = NEXT(action_port_id, NEXT_ENTRY(UNSIGNED)),
2315 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_port_id, id)),
2316 .call = parse_vc_conf,
2320 .help = "meter the directed packets at given id",
2321 .priv = PRIV_ACTION(METER,
2322 sizeof(struct rte_flow_action_meter)),
2323 .next = NEXT(action_meter),
2326 [ACTION_METER_ID] = {
2328 .help = "meter id to use",
2329 .next = NEXT(action_meter, NEXT_ENTRY(UNSIGNED)),
2330 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
2331 .call = parse_vc_conf,
2333 [ACTION_OF_SET_MPLS_TTL] = {
2334 .name = "of_set_mpls_ttl",
2335 .help = "OpenFlow's OFPAT_SET_MPLS_TTL",
2338 sizeof(struct rte_flow_action_of_set_mpls_ttl)),
2339 .next = NEXT(action_of_set_mpls_ttl),
2342 [ACTION_OF_SET_MPLS_TTL_MPLS_TTL] = {
2345 .next = NEXT(action_of_set_mpls_ttl, NEXT_ENTRY(UNSIGNED)),
2346 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_mpls_ttl,
2348 .call = parse_vc_conf,
2350 [ACTION_OF_DEC_MPLS_TTL] = {
2351 .name = "of_dec_mpls_ttl",
2352 .help = "OpenFlow's OFPAT_DEC_MPLS_TTL",
2353 .priv = PRIV_ACTION(OF_DEC_MPLS_TTL, 0),
2354 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2357 [ACTION_OF_SET_NW_TTL] = {
2358 .name = "of_set_nw_ttl",
2359 .help = "OpenFlow's OFPAT_SET_NW_TTL",
2362 sizeof(struct rte_flow_action_of_set_nw_ttl)),
2363 .next = NEXT(action_of_set_nw_ttl),
2366 [ACTION_OF_SET_NW_TTL_NW_TTL] = {
2369 .next = NEXT(action_of_set_nw_ttl, NEXT_ENTRY(UNSIGNED)),
2370 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_nw_ttl,
2372 .call = parse_vc_conf,
2374 [ACTION_OF_DEC_NW_TTL] = {
2375 .name = "of_dec_nw_ttl",
2376 .help = "OpenFlow's OFPAT_DEC_NW_TTL",
2377 .priv = PRIV_ACTION(OF_DEC_NW_TTL, 0),
2378 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2381 [ACTION_OF_COPY_TTL_OUT] = {
2382 .name = "of_copy_ttl_out",
2383 .help = "OpenFlow's OFPAT_COPY_TTL_OUT",
2384 .priv = PRIV_ACTION(OF_COPY_TTL_OUT, 0),
2385 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2388 [ACTION_OF_COPY_TTL_IN] = {
2389 .name = "of_copy_ttl_in",
2390 .help = "OpenFlow's OFPAT_COPY_TTL_IN",
2391 .priv = PRIV_ACTION(OF_COPY_TTL_IN, 0),
2392 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2395 [ACTION_OF_POP_VLAN] = {
2396 .name = "of_pop_vlan",
2397 .help = "OpenFlow's OFPAT_POP_VLAN",
2398 .priv = PRIV_ACTION(OF_POP_VLAN, 0),
2399 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2402 [ACTION_OF_PUSH_VLAN] = {
2403 .name = "of_push_vlan",
2404 .help = "OpenFlow's OFPAT_PUSH_VLAN",
2407 sizeof(struct rte_flow_action_of_push_vlan)),
2408 .next = NEXT(action_of_push_vlan),
2411 [ACTION_OF_PUSH_VLAN_ETHERTYPE] = {
2412 .name = "ethertype",
2413 .help = "EtherType",
2414 .next = NEXT(action_of_push_vlan, NEXT_ENTRY(UNSIGNED)),
2415 .args = ARGS(ARGS_ENTRY_HTON
2416 (struct rte_flow_action_of_push_vlan,
2418 .call = parse_vc_conf,
2420 [ACTION_OF_SET_VLAN_VID] = {
2421 .name = "of_set_vlan_vid",
2422 .help = "OpenFlow's OFPAT_SET_VLAN_VID",
2425 sizeof(struct rte_flow_action_of_set_vlan_vid)),
2426 .next = NEXT(action_of_set_vlan_vid),
2429 [ACTION_OF_SET_VLAN_VID_VLAN_VID] = {
2432 .next = NEXT(action_of_set_vlan_vid, NEXT_ENTRY(UNSIGNED)),
2433 .args = ARGS(ARGS_ENTRY_HTON
2434 (struct rte_flow_action_of_set_vlan_vid,
2436 .call = parse_vc_conf,
2438 [ACTION_OF_SET_VLAN_PCP] = {
2439 .name = "of_set_vlan_pcp",
2440 .help = "OpenFlow's OFPAT_SET_VLAN_PCP",
2443 sizeof(struct rte_flow_action_of_set_vlan_pcp)),
2444 .next = NEXT(action_of_set_vlan_pcp),
2447 [ACTION_OF_SET_VLAN_PCP_VLAN_PCP] = {
2449 .help = "VLAN priority",
2450 .next = NEXT(action_of_set_vlan_pcp, NEXT_ENTRY(UNSIGNED)),
2451 .args = ARGS(ARGS_ENTRY_HTON
2452 (struct rte_flow_action_of_set_vlan_pcp,
2454 .call = parse_vc_conf,
2456 [ACTION_OF_POP_MPLS] = {
2457 .name = "of_pop_mpls",
2458 .help = "OpenFlow's OFPAT_POP_MPLS",
2459 .priv = PRIV_ACTION(OF_POP_MPLS,
2460 sizeof(struct rte_flow_action_of_pop_mpls)),
2461 .next = NEXT(action_of_pop_mpls),
2464 [ACTION_OF_POP_MPLS_ETHERTYPE] = {
2465 .name = "ethertype",
2466 .help = "EtherType",
2467 .next = NEXT(action_of_pop_mpls, NEXT_ENTRY(UNSIGNED)),
2468 .args = ARGS(ARGS_ENTRY_HTON
2469 (struct rte_flow_action_of_pop_mpls,
2471 .call = parse_vc_conf,
2473 [ACTION_OF_PUSH_MPLS] = {
2474 .name = "of_push_mpls",
2475 .help = "OpenFlow's OFPAT_PUSH_MPLS",
2478 sizeof(struct rte_flow_action_of_push_mpls)),
2479 .next = NEXT(action_of_push_mpls),
2482 [ACTION_OF_PUSH_MPLS_ETHERTYPE] = {
2483 .name = "ethertype",
2484 .help = "EtherType",
2485 .next = NEXT(action_of_push_mpls, NEXT_ENTRY(UNSIGNED)),
2486 .args = ARGS(ARGS_ENTRY_HTON
2487 (struct rte_flow_action_of_push_mpls,
2489 .call = parse_vc_conf,
2491 [ACTION_VXLAN_ENCAP] = {
2492 .name = "vxlan_encap",
2493 .help = "VXLAN encapsulation, uses configuration set by \"set"
2495 .priv = PRIV_ACTION(VXLAN_ENCAP,
2496 sizeof(struct action_vxlan_encap_data)),
2497 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2498 .call = parse_vc_action_vxlan_encap,
2500 [ACTION_VXLAN_DECAP] = {
2501 .name = "vxlan_decap",
2502 .help = "Performs a decapsulation action by stripping all"
2503 " headers of the VXLAN tunnel network overlay from the"
2505 .priv = PRIV_ACTION(VXLAN_DECAP, 0),
2506 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2509 [ACTION_NVGRE_ENCAP] = {
2510 .name = "nvgre_encap",
2511 .help = "NVGRE encapsulation, uses configuration set by \"set"
2513 .priv = PRIV_ACTION(NVGRE_ENCAP,
2514 sizeof(struct action_nvgre_encap_data)),
2515 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2516 .call = parse_vc_action_nvgre_encap,
2518 [ACTION_NVGRE_DECAP] = {
2519 .name = "nvgre_decap",
2520 .help = "Performs a decapsulation action by stripping all"
2521 " headers of the NVGRE tunnel network overlay from the"
2523 .priv = PRIV_ACTION(NVGRE_DECAP, 0),
2524 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2527 [ACTION_SET_IPV4_SRC] = {
2528 .name = "set_ipv4_src",
2529 .help = "Set a new IPv4 source address in the outermost"
2531 .priv = PRIV_ACTION(SET_IPV4_SRC,
2532 sizeof(struct rte_flow_action_set_ipv4)),
2533 .next = NEXT(action_set_ipv4_src),
2536 [ACTION_SET_IPV4_SRC_IPV4_SRC] = {
2537 .name = "ipv4_addr",
2538 .help = "new IPv4 source address to set",
2539 .next = NEXT(action_set_ipv4_src, NEXT_ENTRY(IPV4_ADDR)),
2540 .args = ARGS(ARGS_ENTRY_HTON
2541 (struct rte_flow_action_set_ipv4, ipv4_addr)),
2542 .call = parse_vc_conf,
2544 [ACTION_SET_IPV4_DST] = {
2545 .name = "set_ipv4_dst",
2546 .help = "Set a new IPv4 destination address in the outermost"
2548 .priv = PRIV_ACTION(SET_IPV4_DST,
2549 sizeof(struct rte_flow_action_set_ipv4)),
2550 .next = NEXT(action_set_ipv4_dst),
2553 [ACTION_SET_IPV4_DST_IPV4_DST] = {
2554 .name = "ipv4_addr",
2555 .help = "new IPv4 destination address to set",
2556 .next = NEXT(action_set_ipv4_dst, NEXT_ENTRY(IPV4_ADDR)),
2557 .args = ARGS(ARGS_ENTRY_HTON
2558 (struct rte_flow_action_set_ipv4, ipv4_addr)),
2559 .call = parse_vc_conf,
2561 [ACTION_SET_IPV6_SRC] = {
2562 .name = "set_ipv6_src",
2563 .help = "Set a new IPv6 source address in the outermost"
2565 .priv = PRIV_ACTION(SET_IPV6_SRC,
2566 sizeof(struct rte_flow_action_set_ipv6)),
2567 .next = NEXT(action_set_ipv6_src),
2570 [ACTION_SET_IPV6_SRC_IPV6_SRC] = {
2571 .name = "ipv6_addr",
2572 .help = "new IPv6 source address to set",
2573 .next = NEXT(action_set_ipv6_src, NEXT_ENTRY(IPV6_ADDR)),
2574 .args = ARGS(ARGS_ENTRY_HTON
2575 (struct rte_flow_action_set_ipv6, ipv6_addr)),
2576 .call = parse_vc_conf,
2578 [ACTION_SET_IPV6_DST] = {
2579 .name = "set_ipv6_dst",
2580 .help = "Set a new IPv6 destination address in the outermost"
2582 .priv = PRIV_ACTION(SET_IPV6_DST,
2583 sizeof(struct rte_flow_action_set_ipv6)),
2584 .next = NEXT(action_set_ipv6_dst),
2587 [ACTION_SET_IPV6_DST_IPV6_DST] = {
2588 .name = "ipv6_addr",
2589 .help = "new IPv6 destination address to set",
2590 .next = NEXT(action_set_ipv6_dst, NEXT_ENTRY(IPV6_ADDR)),
2591 .args = ARGS(ARGS_ENTRY_HTON
2592 (struct rte_flow_action_set_ipv6, ipv6_addr)),
2593 .call = parse_vc_conf,
2595 [ACTION_SET_TP_SRC] = {
2596 .name = "set_tp_src",
2597 .help = "set a new source port number in the outermost"
2599 .priv = PRIV_ACTION(SET_TP_SRC,
2600 sizeof(struct rte_flow_action_set_tp)),
2601 .next = NEXT(action_set_tp_src),
2604 [ACTION_SET_TP_SRC_TP_SRC] = {
2606 .help = "new source port number to set",
2607 .next = NEXT(action_set_tp_src, NEXT_ENTRY(UNSIGNED)),
2608 .args = ARGS(ARGS_ENTRY_HTON
2609 (struct rte_flow_action_set_tp, port)),
2610 .call = parse_vc_conf,
2612 [ACTION_SET_TP_DST] = {
2613 .name = "set_tp_dst",
2614 .help = "set a new destination port number in the outermost"
2616 .priv = PRIV_ACTION(SET_TP_DST,
2617 sizeof(struct rte_flow_action_set_tp)),
2618 .next = NEXT(action_set_tp_dst),
2621 [ACTION_SET_TP_DST_TP_DST] = {
2623 .help = "new destination port number to set",
2624 .next = NEXT(action_set_tp_dst, NEXT_ENTRY(UNSIGNED)),
2625 .args = ARGS(ARGS_ENTRY_HTON
2626 (struct rte_flow_action_set_tp, port)),
2627 .call = parse_vc_conf,
2631 /** Remove and return last entry from argument stack. */
2632 static const struct arg *
2633 pop_args(struct context *ctx)
2635 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
2638 /** Add entry on top of the argument stack. */
2640 push_args(struct context *ctx, const struct arg *arg)
2642 if (ctx->args_num == CTX_STACK_SIZE)
2644 ctx->args[ctx->args_num++] = arg;
2648 /** Spread value into buffer according to bit-mask. */
2650 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
2652 uint32_t i = arg->size;
2660 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2669 unsigned int shift = 0;
2670 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
2672 for (shift = 0; arg->mask[i] >> shift; ++shift) {
2673 if (!(arg->mask[i] & (1 << shift)))
2678 *buf &= ~(1 << shift);
2679 *buf |= (val & 1) << shift;
2687 /** Compare a string with a partial one of a given length. */
2689 strcmp_partial(const char *full, const char *partial, size_t partial_len)
2691 int r = strncmp(full, partial, partial_len);
2695 if (strlen(full) <= partial_len)
2697 return full[partial_len];
2701 * Parse a prefix length and generate a bit-mask.
2703 * Last argument (ctx->args) is retrieved to determine mask size, storage
2704 * location and whether the result must use network byte ordering.
2707 parse_prefix(struct context *ctx, const struct token *token,
2708 const char *str, unsigned int len,
2709 void *buf, unsigned int size)
2711 const struct arg *arg = pop_args(ctx);
2712 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
2719 /* Argument is expected. */
2723 u = strtoumax(str, &end, 0);
2724 if (errno || (size_t)(end - str) != len)
2729 extra = arg_entry_bf_fill(NULL, 0, arg);
2738 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
2739 !arg_entry_bf_fill(ctx->objmask, -1, arg))
2746 if (bytes > size || bytes + !!extra > size)
2750 buf = (uint8_t *)ctx->object + arg->offset;
2751 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2753 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
2754 memset(buf, 0x00, size - bytes);
2756 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
2760 memset(buf, 0xff, bytes);
2761 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
2763 ((uint8_t *)buf)[bytes] = conv[extra];
2766 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2769 push_args(ctx, arg);
2773 /** Default parsing function for token name matching. */
2775 parse_default(struct context *ctx, const struct token *token,
2776 const char *str, unsigned int len,
2777 void *buf, unsigned int size)
2782 if (strcmp_partial(token->name, str, len))
2787 /** Parse flow command, initialize output buffer for subsequent tokens. */
2789 parse_init(struct context *ctx, const struct token *token,
2790 const char *str, unsigned int len,
2791 void *buf, unsigned int size)
2793 struct buffer *out = buf;
2795 /* Token name must match. */
2796 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2798 /* Nothing else to do if there is no buffer. */
2801 /* Make sure buffer is large enough. */
2802 if (size < sizeof(*out))
2804 /* Initialize buffer. */
2805 memset(out, 0x00, sizeof(*out));
2806 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
2809 ctx->objmask = NULL;
2813 /** Parse tokens for validate/create commands. */
2815 parse_vc(struct context *ctx, const struct token *token,
2816 const char *str, unsigned int len,
2817 void *buf, unsigned int size)
2819 struct buffer *out = buf;
2823 /* Token name must match. */
2824 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2826 /* Nothing else to do if there is no buffer. */
2829 if (!out->command) {
2830 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
2832 if (sizeof(*out) > size)
2834 out->command = ctx->curr;
2837 ctx->objmask = NULL;
2838 out->args.vc.data = (uint8_t *)out + size;
2842 ctx->object = &out->args.vc.attr;
2843 ctx->objmask = NULL;
2844 switch (ctx->curr) {
2849 out->args.vc.attr.ingress = 1;
2852 out->args.vc.attr.egress = 1;
2855 out->args.vc.attr.transfer = 1;
2858 out->args.vc.pattern =
2859 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2861 ctx->object = out->args.vc.pattern;
2862 ctx->objmask = NULL;
2865 out->args.vc.actions =
2866 (void *)RTE_ALIGN_CEIL((uintptr_t)
2867 (out->args.vc.pattern +
2868 out->args.vc.pattern_n),
2870 ctx->object = out->args.vc.actions;
2871 ctx->objmask = NULL;
2878 if (!out->args.vc.actions) {
2879 const struct parse_item_priv *priv = token->priv;
2880 struct rte_flow_item *item =
2881 out->args.vc.pattern + out->args.vc.pattern_n;
2883 data_size = priv->size * 3; /* spec, last, mask */
2884 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
2885 (out->args.vc.data - data_size),
2887 if ((uint8_t *)item + sizeof(*item) > data)
2889 *item = (struct rte_flow_item){
2892 ++out->args.vc.pattern_n;
2894 ctx->objmask = NULL;
2896 const struct parse_action_priv *priv = token->priv;
2897 struct rte_flow_action *action =
2898 out->args.vc.actions + out->args.vc.actions_n;
2900 data_size = priv->size; /* configuration */
2901 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
2902 (out->args.vc.data - data_size),
2904 if ((uint8_t *)action + sizeof(*action) > data)
2906 *action = (struct rte_flow_action){
2908 .conf = data_size ? data : NULL,
2910 ++out->args.vc.actions_n;
2911 ctx->object = action;
2912 ctx->objmask = NULL;
2914 memset(data, 0, data_size);
2915 out->args.vc.data = data;
2916 ctx->objdata = data_size;
2920 /** Parse pattern item parameter type. */
2922 parse_vc_spec(struct context *ctx, const struct token *token,
2923 const char *str, unsigned int len,
2924 void *buf, unsigned int size)
2926 struct buffer *out = buf;
2927 struct rte_flow_item *item;
2933 /* Token name must match. */
2934 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2936 /* Parse parameter types. */
2937 switch (ctx->curr) {
2938 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
2944 case ITEM_PARAM_SPEC:
2947 case ITEM_PARAM_LAST:
2950 case ITEM_PARAM_PREFIX:
2951 /* Modify next token to expect a prefix. */
2952 if (ctx->next_num < 2)
2954 ctx->next[ctx->next_num - 2] = prefix;
2956 case ITEM_PARAM_MASK:
2962 /* Nothing else to do if there is no buffer. */
2965 if (!out->args.vc.pattern_n)
2967 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
2968 data_size = ctx->objdata / 3; /* spec, last, mask */
2969 /* Point to selected object. */
2970 ctx->object = out->args.vc.data + (data_size * index);
2972 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
2973 item->mask = ctx->objmask;
2975 ctx->objmask = NULL;
2976 /* Update relevant item pointer. */
2977 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
2982 /** Parse action configuration field. */
2984 parse_vc_conf(struct context *ctx, const struct token *token,
2985 const char *str, unsigned int len,
2986 void *buf, unsigned int size)
2988 struct buffer *out = buf;
2991 /* Token name must match. */
2992 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2994 /* Nothing else to do if there is no buffer. */
2997 /* Point to selected object. */
2998 ctx->object = out->args.vc.data;
2999 ctx->objmask = NULL;
3003 /** Parse RSS action. */
3005 parse_vc_action_rss(struct context *ctx, const struct token *token,
3006 const char *str, unsigned int len,
3007 void *buf, unsigned int size)
3009 struct buffer *out = buf;
3010 struct rte_flow_action *action;
3011 struct action_rss_data *action_rss_data;
3015 ret = parse_vc(ctx, token, str, len, buf, size);
3018 /* Nothing else to do if there is no buffer. */
3021 if (!out->args.vc.actions_n)
3023 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3024 /* Point to selected object. */
3025 ctx->object = out->args.vc.data;
3026 ctx->objmask = NULL;
3027 /* Set up default configuration. */
3028 action_rss_data = ctx->object;
3029 *action_rss_data = (struct action_rss_data){
3030 .conf = (struct rte_flow_action_rss){
3031 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
3034 .key_len = sizeof(action_rss_data->key),
3035 .queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
3036 .key = action_rss_data->key,
3037 .queue = action_rss_data->queue,
3039 .key = "testpmd's default RSS hash key, "
3040 "override it for better balancing",
3043 for (i = 0; i < action_rss_data->conf.queue_num; ++i)
3044 action_rss_data->queue[i] = i;
3045 if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
3046 ctx->port != (portid_t)RTE_PORT_ALL) {
3047 struct rte_eth_dev_info info;
3049 rte_eth_dev_info_get(ctx->port, &info);
3050 action_rss_data->conf.key_len =
3051 RTE_MIN(sizeof(action_rss_data->key),
3052 info.hash_key_size);
3054 action->conf = &action_rss_data->conf;
3059 * Parse func field for RSS action.
3061 * The RTE_ETH_HASH_FUNCTION_* value to assign is derived from the
3062 * ACTION_RSS_FUNC_* index that called this function.
3065 parse_vc_action_rss_func(struct context *ctx, const struct token *token,
3066 const char *str, unsigned int len,
3067 void *buf, unsigned int size)
3069 struct action_rss_data *action_rss_data;
3070 enum rte_eth_hash_function func;
3074 /* Token name must match. */
3075 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3077 switch (ctx->curr) {
3078 case ACTION_RSS_FUNC_DEFAULT:
3079 func = RTE_ETH_HASH_FUNCTION_DEFAULT;
3081 case ACTION_RSS_FUNC_TOEPLITZ:
3082 func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
3084 case ACTION_RSS_FUNC_SIMPLE_XOR:
3085 func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
3092 action_rss_data = ctx->object;
3093 action_rss_data->conf.func = func;
3098 * Parse type field for RSS action.
3100 * Valid tokens are type field names and the "end" token.
3103 parse_vc_action_rss_type(struct context *ctx, const struct token *token,
3104 const char *str, unsigned int len,
3105 void *buf, unsigned int size)
3107 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_TYPE);
3108 struct action_rss_data *action_rss_data;
3114 if (ctx->curr != ACTION_RSS_TYPE)
3116 if (!(ctx->objdata >> 16) && ctx->object) {
3117 action_rss_data = ctx->object;
3118 action_rss_data->conf.types = 0;
3120 if (!strcmp_partial("end", str, len)) {
3121 ctx->objdata &= 0xffff;
3124 for (i = 0; rss_type_table[i].str; ++i)
3125 if (!strcmp_partial(rss_type_table[i].str, str, len))
3127 if (!rss_type_table[i].str)
3129 ctx->objdata = 1 << 16 | (ctx->objdata & 0xffff);
3131 if (ctx->next_num == RTE_DIM(ctx->next))
3133 ctx->next[ctx->next_num++] = next;
3136 action_rss_data = ctx->object;
3137 action_rss_data->conf.types |= rss_type_table[i].rss_type;
3142 * Parse queue field for RSS action.
3144 * Valid tokens are queue indices and the "end" token.
3147 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
3148 const char *str, unsigned int len,
3149 void *buf, unsigned int size)
3151 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
3152 struct action_rss_data *action_rss_data;
3159 if (ctx->curr != ACTION_RSS_QUEUE)
3161 i = ctx->objdata >> 16;
3162 if (!strcmp_partial("end", str, len)) {
3163 ctx->objdata &= 0xffff;
3166 if (i >= ACTION_RSS_QUEUE_NUM)
3169 ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) +
3170 i * sizeof(action_rss_data->queue[i]),
3171 sizeof(action_rss_data->queue[i]))))
3173 ret = parse_int(ctx, token, str, len, NULL, 0);
3179 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
3181 if (ctx->next_num == RTE_DIM(ctx->next))
3183 ctx->next[ctx->next_num++] = next;
3187 action_rss_data = ctx->object;
3188 action_rss_data->conf.queue_num = i;
3189 action_rss_data->conf.queue = i ? action_rss_data->queue : NULL;
3193 /** Parse VXLAN encap action. */
3195 parse_vc_action_vxlan_encap(struct context *ctx, const struct token *token,
3196 const char *str, unsigned int len,
3197 void *buf, unsigned int size)
3199 struct buffer *out = buf;
3200 struct rte_flow_action *action;
3201 struct action_vxlan_encap_data *action_vxlan_encap_data;
3204 ret = parse_vc(ctx, token, str, len, buf, size);
3207 /* Nothing else to do if there is no buffer. */
3210 if (!out->args.vc.actions_n)
3212 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3213 /* Point to selected object. */
3214 ctx->object = out->args.vc.data;
3215 ctx->objmask = NULL;
3216 /* Set up default configuration. */
3217 action_vxlan_encap_data = ctx->object;
3218 *action_vxlan_encap_data = (struct action_vxlan_encap_data){
3219 .conf = (struct rte_flow_action_vxlan_encap){
3220 .definition = action_vxlan_encap_data->items,
3224 .type = RTE_FLOW_ITEM_TYPE_ETH,
3225 .spec = &action_vxlan_encap_data->item_eth,
3226 .mask = &rte_flow_item_eth_mask,
3229 .type = RTE_FLOW_ITEM_TYPE_VLAN,
3230 .spec = &action_vxlan_encap_data->item_vlan,
3231 .mask = &rte_flow_item_vlan_mask,
3234 .type = RTE_FLOW_ITEM_TYPE_IPV4,
3235 .spec = &action_vxlan_encap_data->item_ipv4,
3236 .mask = &rte_flow_item_ipv4_mask,
3239 .type = RTE_FLOW_ITEM_TYPE_UDP,
3240 .spec = &action_vxlan_encap_data->item_udp,
3241 .mask = &rte_flow_item_udp_mask,
3244 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
3245 .spec = &action_vxlan_encap_data->item_vxlan,
3246 .mask = &rte_flow_item_vxlan_mask,
3249 .type = RTE_FLOW_ITEM_TYPE_END,
3254 .tci = vxlan_encap_conf.vlan_tci,
3258 .src_addr = vxlan_encap_conf.ipv4_src,
3259 .dst_addr = vxlan_encap_conf.ipv4_dst,
3262 .src_port = vxlan_encap_conf.udp_src,
3263 .dst_port = vxlan_encap_conf.udp_dst,
3265 .item_vxlan.flags = 0,
3267 memcpy(action_vxlan_encap_data->item_eth.dst.addr_bytes,
3268 vxlan_encap_conf.eth_dst, ETHER_ADDR_LEN);
3269 memcpy(action_vxlan_encap_data->item_eth.src.addr_bytes,
3270 vxlan_encap_conf.eth_src, ETHER_ADDR_LEN);
3271 if (!vxlan_encap_conf.select_ipv4) {
3272 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr,
3273 &vxlan_encap_conf.ipv6_src,
3274 sizeof(vxlan_encap_conf.ipv6_src));
3275 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.dst_addr,
3276 &vxlan_encap_conf.ipv6_dst,
3277 sizeof(vxlan_encap_conf.ipv6_dst));
3278 action_vxlan_encap_data->items[2] = (struct rte_flow_item){
3279 .type = RTE_FLOW_ITEM_TYPE_IPV6,
3280 .spec = &action_vxlan_encap_data->item_ipv6,
3281 .mask = &rte_flow_item_ipv6_mask,
3284 if (!vxlan_encap_conf.select_vlan)
3285 action_vxlan_encap_data->items[1].type =
3286 RTE_FLOW_ITEM_TYPE_VOID;
3287 memcpy(action_vxlan_encap_data->item_vxlan.vni, vxlan_encap_conf.vni,
3288 RTE_DIM(vxlan_encap_conf.vni));
3289 action->conf = &action_vxlan_encap_data->conf;
3293 /** Parse NVGRE encap action. */
3295 parse_vc_action_nvgre_encap(struct context *ctx, const struct token *token,
3296 const char *str, unsigned int len,
3297 void *buf, unsigned int size)
3299 struct buffer *out = buf;
3300 struct rte_flow_action *action;
3301 struct action_nvgre_encap_data *action_nvgre_encap_data;
3304 ret = parse_vc(ctx, token, str, len, buf, size);
3307 /* Nothing else to do if there is no buffer. */
3310 if (!out->args.vc.actions_n)
3312 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3313 /* Point to selected object. */
3314 ctx->object = out->args.vc.data;
3315 ctx->objmask = NULL;
3316 /* Set up default configuration. */
3317 action_nvgre_encap_data = ctx->object;
3318 *action_nvgre_encap_data = (struct action_nvgre_encap_data){
3319 .conf = (struct rte_flow_action_nvgre_encap){
3320 .definition = action_nvgre_encap_data->items,
3324 .type = RTE_FLOW_ITEM_TYPE_ETH,
3325 .spec = &action_nvgre_encap_data->item_eth,
3326 .mask = &rte_flow_item_eth_mask,
3329 .type = RTE_FLOW_ITEM_TYPE_VLAN,
3330 .spec = &action_nvgre_encap_data->item_vlan,
3331 .mask = &rte_flow_item_vlan_mask,
3334 .type = RTE_FLOW_ITEM_TYPE_IPV4,
3335 .spec = &action_nvgre_encap_data->item_ipv4,
3336 .mask = &rte_flow_item_ipv4_mask,
3339 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
3340 .spec = &action_nvgre_encap_data->item_nvgre,
3341 .mask = &rte_flow_item_nvgre_mask,
3344 .type = RTE_FLOW_ITEM_TYPE_END,
3349 .tci = nvgre_encap_conf.vlan_tci,
3353 .src_addr = nvgre_encap_conf.ipv4_src,
3354 .dst_addr = nvgre_encap_conf.ipv4_dst,
3356 .item_nvgre.flow_id = 0,
3358 memcpy(action_nvgre_encap_data->item_eth.dst.addr_bytes,
3359 nvgre_encap_conf.eth_dst, ETHER_ADDR_LEN);
3360 memcpy(action_nvgre_encap_data->item_eth.src.addr_bytes,
3361 nvgre_encap_conf.eth_src, ETHER_ADDR_LEN);
3362 if (!nvgre_encap_conf.select_ipv4) {
3363 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.src_addr,
3364 &nvgre_encap_conf.ipv6_src,
3365 sizeof(nvgre_encap_conf.ipv6_src));
3366 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.dst_addr,
3367 &nvgre_encap_conf.ipv6_dst,
3368 sizeof(nvgre_encap_conf.ipv6_dst));
3369 action_nvgre_encap_data->items[2] = (struct rte_flow_item){
3370 .type = RTE_FLOW_ITEM_TYPE_IPV6,
3371 .spec = &action_nvgre_encap_data->item_ipv6,
3372 .mask = &rte_flow_item_ipv6_mask,
3375 if (!nvgre_encap_conf.select_vlan)
3376 action_nvgre_encap_data->items[1].type =
3377 RTE_FLOW_ITEM_TYPE_VOID;
3378 memcpy(action_nvgre_encap_data->item_nvgre.tni, nvgre_encap_conf.tni,
3379 RTE_DIM(nvgre_encap_conf.tni));
3380 action->conf = &action_nvgre_encap_data->conf;
3384 /** Parse tokens for destroy command. */
3386 parse_destroy(struct context *ctx, const struct token *token,
3387 const char *str, unsigned int len,
3388 void *buf, unsigned int size)
3390 struct buffer *out = buf;
3392 /* Token name must match. */
3393 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3395 /* Nothing else to do if there is no buffer. */
3398 if (!out->command) {
3399 if (ctx->curr != DESTROY)
3401 if (sizeof(*out) > size)
3403 out->command = ctx->curr;
3406 ctx->objmask = NULL;
3407 out->args.destroy.rule =
3408 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
3412 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
3413 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
3416 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
3417 ctx->objmask = NULL;
3421 /** Parse tokens for flush command. */
3423 parse_flush(struct context *ctx, const struct token *token,
3424 const char *str, unsigned int len,
3425 void *buf, unsigned int size)
3427 struct buffer *out = buf;
3429 /* Token name must match. */
3430 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3432 /* Nothing else to do if there is no buffer. */
3435 if (!out->command) {
3436 if (ctx->curr != FLUSH)
3438 if (sizeof(*out) > size)
3440 out->command = ctx->curr;
3443 ctx->objmask = NULL;
3448 /** Parse tokens for query command. */
3450 parse_query(struct context *ctx, const struct token *token,
3451 const char *str, unsigned int len,
3452 void *buf, unsigned int size)
3454 struct buffer *out = buf;
3456 /* Token name must match. */
3457 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3459 /* Nothing else to do if there is no buffer. */
3462 if (!out->command) {
3463 if (ctx->curr != QUERY)
3465 if (sizeof(*out) > size)
3467 out->command = ctx->curr;
3470 ctx->objmask = NULL;
3475 /** Parse action names. */
3477 parse_action(struct context *ctx, const struct token *token,
3478 const char *str, unsigned int len,
3479 void *buf, unsigned int size)
3481 struct buffer *out = buf;
3482 const struct arg *arg = pop_args(ctx);
3486 /* Argument is expected. */
3489 /* Parse action name. */
3490 for (i = 0; next_action[i]; ++i) {
3491 const struct parse_action_priv *priv;
3493 token = &token_list[next_action[i]];
3494 if (strcmp_partial(token->name, str, len))
3500 memcpy((uint8_t *)ctx->object + arg->offset,
3506 push_args(ctx, arg);
3510 /** Parse tokens for list command. */
3512 parse_list(struct context *ctx, const struct token *token,
3513 const char *str, unsigned int len,
3514 void *buf, unsigned int size)
3516 struct buffer *out = buf;
3518 /* Token name must match. */
3519 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3521 /* Nothing else to do if there is no buffer. */
3524 if (!out->command) {
3525 if (ctx->curr != LIST)
3527 if (sizeof(*out) > size)
3529 out->command = ctx->curr;
3532 ctx->objmask = NULL;
3533 out->args.list.group =
3534 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
3538 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
3539 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
3542 ctx->object = out->args.list.group + out->args.list.group_n++;
3543 ctx->objmask = NULL;
3547 /** Parse tokens for isolate command. */
3549 parse_isolate(struct context *ctx, const struct token *token,
3550 const char *str, unsigned int len,
3551 void *buf, unsigned int size)
3553 struct buffer *out = buf;
3555 /* Token name must match. */
3556 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3558 /* Nothing else to do if there is no buffer. */
3561 if (!out->command) {
3562 if (ctx->curr != ISOLATE)
3564 if (sizeof(*out) > size)
3566 out->command = ctx->curr;
3569 ctx->objmask = NULL;
3575 * Parse signed/unsigned integers 8 to 64-bit long.
3577 * Last argument (ctx->args) is retrieved to determine integer type and
3581 parse_int(struct context *ctx, const struct token *token,
3582 const char *str, unsigned int len,
3583 void *buf, unsigned int size)
3585 const struct arg *arg = pop_args(ctx);
3590 /* Argument is expected. */
3595 (uintmax_t)strtoimax(str, &end, 0) :
3596 strtoumax(str, &end, 0);
3597 if (errno || (size_t)(end - str) != len)
3600 ((arg->sign && ((intmax_t)u < (intmax_t)arg->min ||
3601 (intmax_t)u > (intmax_t)arg->max)) ||
3602 (!arg->sign && (u < arg->min || u > arg->max))))
3607 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
3608 !arg_entry_bf_fill(ctx->objmask, -1, arg))
3612 buf = (uint8_t *)ctx->object + arg->offset;
3616 case sizeof(uint8_t):
3617 *(uint8_t *)buf = u;
3619 case sizeof(uint16_t):
3620 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
3622 case sizeof(uint8_t [3]):
3623 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3625 ((uint8_t *)buf)[0] = u;
3626 ((uint8_t *)buf)[1] = u >> 8;
3627 ((uint8_t *)buf)[2] = u >> 16;
3631 ((uint8_t *)buf)[0] = u >> 16;
3632 ((uint8_t *)buf)[1] = u >> 8;
3633 ((uint8_t *)buf)[2] = u;
3635 case sizeof(uint32_t):
3636 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
3638 case sizeof(uint64_t):
3639 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
3644 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
3646 buf = (uint8_t *)ctx->objmask + arg->offset;
3651 push_args(ctx, arg);
3658 * Three arguments (ctx->args) are retrieved from the stack to store data,
3659 * its actual length and address (in that order).
3662 parse_string(struct context *ctx, const struct token *token,
3663 const char *str, unsigned int len,
3664 void *buf, unsigned int size)
3666 const struct arg *arg_data = pop_args(ctx);
3667 const struct arg *arg_len = pop_args(ctx);
3668 const struct arg *arg_addr = pop_args(ctx);
3669 char tmp[16]; /* Ought to be enough. */
3672 /* Arguments are expected. */
3676 push_args(ctx, arg_data);
3680 push_args(ctx, arg_len);
3681 push_args(ctx, arg_data);
3684 size = arg_data->size;
3685 /* Bit-mask fill is not supported. */
3686 if (arg_data->mask || size < len)
3690 /* Let parse_int() fill length information first. */
3691 ret = snprintf(tmp, sizeof(tmp), "%u", len);
3694 push_args(ctx, arg_len);
3695 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
3700 buf = (uint8_t *)ctx->object + arg_data->offset;
3701 /* Output buffer is not necessarily NUL-terminated. */
3702 memcpy(buf, str, len);
3703 memset((uint8_t *)buf + len, 0x00, size - len);
3705 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
3706 /* Save address if requested. */
3707 if (arg_addr->size) {
3708 memcpy((uint8_t *)ctx->object + arg_addr->offset,
3710 (uint8_t *)ctx->object + arg_data->offset
3714 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
3716 (uint8_t *)ctx->objmask + arg_data->offset
3722 push_args(ctx, arg_addr);
3723 push_args(ctx, arg_len);
3724 push_args(ctx, arg_data);
3729 * Parse a MAC address.
3731 * Last argument (ctx->args) is retrieved to determine storage size and
3735 parse_mac_addr(struct context *ctx, const struct token *token,
3736 const char *str, unsigned int len,
3737 void *buf, unsigned int size)
3739 const struct arg *arg = pop_args(ctx);
3740 struct ether_addr tmp;
3744 /* Argument is expected. */
3748 /* Bit-mask fill is not supported. */
3749 if (arg->mask || size != sizeof(tmp))
3751 /* Only network endian is supported. */
3754 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
3755 if (ret < 0 || (unsigned int)ret != len)
3759 buf = (uint8_t *)ctx->object + arg->offset;
3760 memcpy(buf, &tmp, size);
3762 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
3765 push_args(ctx, arg);
3770 * Parse an IPv4 address.
3772 * Last argument (ctx->args) is retrieved to determine storage size and
3776 parse_ipv4_addr(struct context *ctx, const struct token *token,
3777 const char *str, unsigned int len,
3778 void *buf, unsigned int size)
3780 const struct arg *arg = pop_args(ctx);
3785 /* Argument is expected. */
3789 /* Bit-mask fill is not supported. */
3790 if (arg->mask || size != sizeof(tmp))
3792 /* Only network endian is supported. */
3795 memcpy(str2, str, len);
3797 ret = inet_pton(AF_INET, str2, &tmp);
3799 /* Attempt integer parsing. */
3800 push_args(ctx, arg);
3801 return parse_int(ctx, token, str, len, buf, size);
3805 buf = (uint8_t *)ctx->object + arg->offset;
3806 memcpy(buf, &tmp, size);
3808 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
3811 push_args(ctx, arg);
3816 * Parse an IPv6 address.
3818 * Last argument (ctx->args) is retrieved to determine storage size and
3822 parse_ipv6_addr(struct context *ctx, const struct token *token,
3823 const char *str, unsigned int len,
3824 void *buf, unsigned int size)
3826 const struct arg *arg = pop_args(ctx);
3828 struct in6_addr tmp;
3832 /* Argument is expected. */
3836 /* Bit-mask fill is not supported. */
3837 if (arg->mask || size != sizeof(tmp))
3839 /* Only network endian is supported. */
3842 memcpy(str2, str, len);
3844 ret = inet_pton(AF_INET6, str2, &tmp);
3849 buf = (uint8_t *)ctx->object + arg->offset;
3850 memcpy(buf, &tmp, size);
3852 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
3855 push_args(ctx, arg);
3859 /** Boolean values (even indices stand for false). */
3860 static const char *const boolean_name[] = {
3870 * Parse a boolean value.
3872 * Last argument (ctx->args) is retrieved to determine storage size and
3876 parse_boolean(struct context *ctx, const struct token *token,
3877 const char *str, unsigned int len,
3878 void *buf, unsigned int size)
3880 const struct arg *arg = pop_args(ctx);
3884 /* Argument is expected. */
3887 for (i = 0; boolean_name[i]; ++i)
3888 if (!strcmp_partial(boolean_name[i], str, len))
3890 /* Process token as integer. */
3891 if (boolean_name[i])
3892 str = i & 1 ? "1" : "0";
3893 push_args(ctx, arg);
3894 ret = parse_int(ctx, token, str, strlen(str), buf, size);
3895 return ret > 0 ? (int)len : ret;
3898 /** Parse port and update context. */
3900 parse_port(struct context *ctx, const struct token *token,
3901 const char *str, unsigned int len,
3902 void *buf, unsigned int size)
3904 struct buffer *out = &(struct buffer){ .port = 0 };
3912 ctx->objmask = NULL;
3913 size = sizeof(*out);
3915 ret = parse_int(ctx, token, str, len, out, size);
3917 ctx->port = out->port;
3923 /** No completion. */
3925 comp_none(struct context *ctx, const struct token *token,
3926 unsigned int ent, char *buf, unsigned int size)
3936 /** Complete boolean values. */
3938 comp_boolean(struct context *ctx, const struct token *token,
3939 unsigned int ent, char *buf, unsigned int size)
3945 for (i = 0; boolean_name[i]; ++i)
3946 if (buf && i == ent)
3947 return snprintf(buf, size, "%s", boolean_name[i]);
3953 /** Complete action names. */
3955 comp_action(struct context *ctx, const struct token *token,
3956 unsigned int ent, char *buf, unsigned int size)
3962 for (i = 0; next_action[i]; ++i)
3963 if (buf && i == ent)
3964 return snprintf(buf, size, "%s",
3965 token_list[next_action[i]].name);
3971 /** Complete available ports. */
3973 comp_port(struct context *ctx, const struct token *token,
3974 unsigned int ent, char *buf, unsigned int size)
3981 RTE_ETH_FOREACH_DEV(p) {
3982 if (buf && i == ent)
3983 return snprintf(buf, size, "%u", p);
3991 /** Complete available rule IDs. */
3993 comp_rule_id(struct context *ctx, const struct token *token,
3994 unsigned int ent, char *buf, unsigned int size)
3997 struct rte_port *port;
3998 struct port_flow *pf;
4001 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
4002 ctx->port == (portid_t)RTE_PORT_ALL)
4004 port = &ports[ctx->port];
4005 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
4006 if (buf && i == ent)
4007 return snprintf(buf, size, "%u", pf->id);
4015 /** Complete type field for RSS action. */
4017 comp_vc_action_rss_type(struct context *ctx, const struct token *token,
4018 unsigned int ent, char *buf, unsigned int size)
4024 for (i = 0; rss_type_table[i].str; ++i)
4029 return snprintf(buf, size, "%s", rss_type_table[ent].str);
4031 return snprintf(buf, size, "end");
4035 /** Complete queue field for RSS action. */
4037 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
4038 unsigned int ent, char *buf, unsigned int size)
4045 return snprintf(buf, size, "%u", ent);
4047 return snprintf(buf, size, "end");
4051 /** Internal context. */
4052 static struct context cmd_flow_context;
4054 /** Global parser instance (cmdline API). */
4055 cmdline_parse_inst_t cmd_flow;
4057 /** Initialize context. */
4059 cmd_flow_context_init(struct context *ctx)
4061 /* A full memset() is not necessary. */
4071 ctx->objmask = NULL;
4074 /** Parse a token (cmdline API). */
4076 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
4079 struct context *ctx = &cmd_flow_context;
4080 const struct token *token;
4081 const enum index *list;
4086 token = &token_list[ctx->curr];
4087 /* Check argument length. */
4090 for (len = 0; src[len]; ++len)
4091 if (src[len] == '#' || isspace(src[len]))
4095 /* Last argument and EOL detection. */
4096 for (i = len; src[i]; ++i)
4097 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
4099 else if (!isspace(src[i])) {
4104 if (src[i] == '\r' || src[i] == '\n') {
4108 /* Initialize context if necessary. */
4109 if (!ctx->next_num) {
4112 ctx->next[ctx->next_num++] = token->next[0];
4114 /* Process argument through candidates. */
4115 ctx->prev = ctx->curr;
4116 list = ctx->next[ctx->next_num - 1];
4117 for (i = 0; list[i]; ++i) {
4118 const struct token *next = &token_list[list[i]];
4121 ctx->curr = list[i];
4123 tmp = next->call(ctx, next, src, len, result, size);
4125 tmp = parse_default(ctx, next, src, len, result, size);
4126 if (tmp == -1 || tmp != len)
4134 /* Push subsequent tokens if any. */
4136 for (i = 0; token->next[i]; ++i) {
4137 if (ctx->next_num == RTE_DIM(ctx->next))
4139 ctx->next[ctx->next_num++] = token->next[i];
4141 /* Push arguments if any. */
4143 for (i = 0; token->args[i]; ++i) {
4144 if (ctx->args_num == RTE_DIM(ctx->args))
4146 ctx->args[ctx->args_num++] = token->args[i];
4151 /** Return number of completion entries (cmdline API). */
4153 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
4155 struct context *ctx = &cmd_flow_context;
4156 const struct token *token = &token_list[ctx->curr];
4157 const enum index *list;
4161 /* Count number of tokens in current list. */
4163 list = ctx->next[ctx->next_num - 1];
4165 list = token->next[0];
4166 for (i = 0; list[i]; ++i)
4171 * If there is a single token, use its completion callback, otherwise
4172 * return the number of entries.
4174 token = &token_list[list[0]];
4175 if (i == 1 && token->comp) {
4176 /* Save index for cmd_flow_get_help(). */
4177 ctx->prev = list[0];
4178 return token->comp(ctx, token, 0, NULL, 0);
4183 /** Return a completion entry (cmdline API). */
4185 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
4186 char *dst, unsigned int size)
4188 struct context *ctx = &cmd_flow_context;
4189 const struct token *token = &token_list[ctx->curr];
4190 const enum index *list;
4194 /* Count number of tokens in current list. */
4196 list = ctx->next[ctx->next_num - 1];
4198 list = token->next[0];
4199 for (i = 0; list[i]; ++i)
4203 /* If there is a single token, use its completion callback. */
4204 token = &token_list[list[0]];
4205 if (i == 1 && token->comp) {
4206 /* Save index for cmd_flow_get_help(). */
4207 ctx->prev = list[0];
4208 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
4210 /* Otherwise make sure the index is valid and use defaults. */
4213 token = &token_list[list[index]];
4214 snprintf(dst, size, "%s", token->name);
4215 /* Save index for cmd_flow_get_help(). */
4216 ctx->prev = list[index];
4220 /** Populate help strings for current token (cmdline API). */
4222 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
4224 struct context *ctx = &cmd_flow_context;
4225 const struct token *token = &token_list[ctx->prev];
4230 /* Set token type and update global help with details. */
4231 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
4233 cmd_flow.help_str = token->help;
4235 cmd_flow.help_str = token->name;
4239 /** Token definition template (cmdline API). */
4240 static struct cmdline_token_hdr cmd_flow_token_hdr = {
4241 .ops = &(struct cmdline_token_ops){
4242 .parse = cmd_flow_parse,
4243 .complete_get_nb = cmd_flow_complete_get_nb,
4244 .complete_get_elt = cmd_flow_complete_get_elt,
4245 .get_help = cmd_flow_get_help,
4250 /** Populate the next dynamic token. */
4252 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
4253 cmdline_parse_token_hdr_t **hdr_inst)
4255 struct context *ctx = &cmd_flow_context;
4257 /* Always reinitialize context before requesting the first token. */
4258 if (!(hdr_inst - cmd_flow.tokens))
4259 cmd_flow_context_init(ctx);
4260 /* Return NULL when no more tokens are expected. */
4261 if (!ctx->next_num && ctx->curr) {
4265 /* Determine if command should end here. */
4266 if (ctx->eol && ctx->last && ctx->next_num) {
4267 const enum index *list = ctx->next[ctx->next_num - 1];
4270 for (i = 0; list[i]; ++i) {
4277 *hdr = &cmd_flow_token_hdr;
4280 /** Dispatch parsed buffer to function calls. */
4282 cmd_flow_parsed(const struct buffer *in)
4284 switch (in->command) {
4286 port_flow_validate(in->port, &in->args.vc.attr,
4287 in->args.vc.pattern, in->args.vc.actions);
4290 port_flow_create(in->port, &in->args.vc.attr,
4291 in->args.vc.pattern, in->args.vc.actions);
4294 port_flow_destroy(in->port, in->args.destroy.rule_n,
4295 in->args.destroy.rule);
4298 port_flow_flush(in->port);
4301 port_flow_query(in->port, in->args.query.rule,
4302 &in->args.query.action);
4305 port_flow_list(in->port, in->args.list.group_n,
4306 in->args.list.group);
4309 port_flow_isolate(in->port, in->args.isolate.set);
4316 /** Token generator and output processing callback (cmdline API). */
4318 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
4321 cmd_flow_tok(arg0, arg2);
4323 cmd_flow_parsed(arg0);
4326 /** Global parser instance (cmdline API). */
4327 cmdline_parse_inst_t cmd_flow = {
4329 .data = NULL, /**< Unused. */
4330 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
4333 }, /**< Tokens are returned by cmd_flow_tok(). */