1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
13 #include <arpa/inet.h>
14 #include <sys/socket.h>
16 #include <rte_string_fns.h>
17 #include <rte_common.h>
18 #include <rte_ethdev.h>
19 #include <rte_byteorder.h>
20 #include <cmdline_parse.h>
21 #include <cmdline_parse_etheraddr.h>
26 /** Parser token indices. */
47 /* Top-level command. */
50 /* Sub-level commands. */
59 /* Destroy arguments. */
62 /* Query arguments. */
68 /* Validate/create arguments. */
75 /* Validate/create pattern. */
112 ITEM_VLAN_INNER_TYPE,
144 ITEM_E_TAG_GRP_ECID_B,
163 ITEM_ARP_ETH_IPV4_SHA,
164 ITEM_ARP_ETH_IPV4_SPA,
165 ITEM_ARP_ETH_IPV4_THA,
166 ITEM_ARP_ETH_IPV4_TPA,
168 ITEM_IPV6_EXT_NEXT_HDR,
173 ITEM_ICMP6_ND_NS_TARGET_ADDR,
175 ITEM_ICMP6_ND_NA_TARGET_ADDR,
177 ITEM_ICMP6_ND_OPT_TYPE,
178 ITEM_ICMP6_ND_OPT_SLA_ETH,
179 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
180 ITEM_ICMP6_ND_OPT_TLA_ETH,
181 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
185 /* Validate/create actions. */
205 ACTION_RSS_FUNC_DEFAULT,
206 ACTION_RSS_FUNC_TOEPLITZ,
207 ACTION_RSS_FUNC_SIMPLE_XOR,
219 ACTION_PHY_PORT_ORIGINAL,
220 ACTION_PHY_PORT_INDEX,
222 ACTION_PORT_ID_ORIGINAL,
226 ACTION_OF_SET_MPLS_TTL,
227 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
228 ACTION_OF_DEC_MPLS_TTL,
229 ACTION_OF_SET_NW_TTL,
230 ACTION_OF_SET_NW_TTL_NW_TTL,
231 ACTION_OF_DEC_NW_TTL,
232 ACTION_OF_COPY_TTL_OUT,
233 ACTION_OF_COPY_TTL_IN,
236 ACTION_OF_PUSH_VLAN_ETHERTYPE,
237 ACTION_OF_SET_VLAN_VID,
238 ACTION_OF_SET_VLAN_VID_VLAN_VID,
239 ACTION_OF_SET_VLAN_PCP,
240 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
242 ACTION_OF_POP_MPLS_ETHERTYPE,
244 ACTION_OF_PUSH_MPLS_ETHERTYPE,
251 ACTION_MPLSOGRE_ENCAP,
252 ACTION_MPLSOGRE_DECAP,
253 ACTION_MPLSOUDP_ENCAP,
254 ACTION_MPLSOUDP_DECAP,
256 ACTION_SET_IPV4_SRC_IPV4_SRC,
258 ACTION_SET_IPV4_DST_IPV4_DST,
260 ACTION_SET_IPV6_SRC_IPV6_SRC,
262 ACTION_SET_IPV6_DST_IPV6_DST,
264 ACTION_SET_TP_SRC_TP_SRC,
266 ACTION_SET_TP_DST_TP_DST,
272 ACTION_SET_MAC_SRC_MAC_SRC,
274 ACTION_SET_MAC_DST_MAC_DST,
277 /** Maximum size for pattern in struct rte_flow_item_raw. */
278 #define ITEM_RAW_PATTERN_SIZE 40
280 /** Storage size for struct rte_flow_item_raw including pattern. */
281 #define ITEM_RAW_SIZE \
282 (sizeof(struct rte_flow_item_raw) + ITEM_RAW_PATTERN_SIZE)
284 /** Maximum number of queue indices in struct rte_flow_action_rss. */
285 #define ACTION_RSS_QUEUE_NUM 32
287 /** Storage for struct rte_flow_action_rss including external data. */
288 struct action_rss_data {
289 struct rte_flow_action_rss conf;
290 uint8_t key[RSS_HASH_KEY_LENGTH];
291 uint16_t queue[ACTION_RSS_QUEUE_NUM];
294 /** Maximum number of items in struct rte_flow_action_vxlan_encap. */
295 #define ACTION_VXLAN_ENCAP_ITEMS_NUM 6
297 /** Storage for struct rte_flow_action_vxlan_encap including external data. */
298 struct action_vxlan_encap_data {
299 struct rte_flow_action_vxlan_encap conf;
300 struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM];
301 struct rte_flow_item_eth item_eth;
302 struct rte_flow_item_vlan item_vlan;
304 struct rte_flow_item_ipv4 item_ipv4;
305 struct rte_flow_item_ipv6 item_ipv6;
307 struct rte_flow_item_udp item_udp;
308 struct rte_flow_item_vxlan item_vxlan;
311 /** Maximum number of items in struct rte_flow_action_nvgre_encap. */
312 #define ACTION_NVGRE_ENCAP_ITEMS_NUM 5
314 /** Storage for struct rte_flow_action_nvgre_encap including external data. */
315 struct action_nvgre_encap_data {
316 struct rte_flow_action_nvgre_encap conf;
317 struct rte_flow_item items[ACTION_NVGRE_ENCAP_ITEMS_NUM];
318 struct rte_flow_item_eth item_eth;
319 struct rte_flow_item_vlan item_vlan;
321 struct rte_flow_item_ipv4 item_ipv4;
322 struct rte_flow_item_ipv6 item_ipv6;
324 struct rte_flow_item_nvgre item_nvgre;
327 /** Maximum data size in struct rte_flow_action_raw_encap. */
328 #define ACTION_RAW_ENCAP_MAX_DATA 128
330 /** Storage for struct rte_flow_action_raw_encap including external data. */
331 struct action_raw_encap_data {
332 struct rte_flow_action_raw_encap conf;
333 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
334 uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA];
337 /** Storage for struct rte_flow_action_raw_decap including external data. */
338 struct action_raw_decap_data {
339 struct rte_flow_action_raw_decap conf;
340 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
343 /** Maximum number of subsequent tokens and arguments on the stack. */
344 #define CTX_STACK_SIZE 16
346 /** Parser context. */
348 /** Stack of subsequent token lists to process. */
349 const enum index *next[CTX_STACK_SIZE];
350 /** Arguments for stacked tokens. */
351 const void *args[CTX_STACK_SIZE];
352 enum index curr; /**< Current token index. */
353 enum index prev; /**< Index of the last token seen. */
354 int next_num; /**< Number of entries in next[]. */
355 int args_num; /**< Number of entries in args[]. */
356 uint32_t eol:1; /**< EOL has been detected. */
357 uint32_t last:1; /**< No more arguments. */
358 portid_t port; /**< Current port ID (for completions). */
359 uint32_t objdata; /**< Object-specific data. */
360 void *object; /**< Address of current object for relative offsets. */
361 void *objmask; /**< Object a full mask must be written to. */
364 /** Token argument. */
366 uint32_t hton:1; /**< Use network byte ordering. */
367 uint32_t sign:1; /**< Value is signed. */
368 uint32_t bounded:1; /**< Value is bounded. */
369 uintmax_t min; /**< Minimum value if bounded. */
370 uintmax_t max; /**< Maximum value if bounded. */
371 uint32_t offset; /**< Relative offset from ctx->object. */
372 uint32_t size; /**< Field size. */
373 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
376 /** Parser token definition. */
378 /** Type displayed during completion (defaults to "TOKEN"). */
380 /** Help displayed during completion (defaults to token name). */
382 /** Private data used by parser functions. */
385 * Lists of subsequent tokens to push on the stack. Each call to the
386 * parser consumes the last entry of that stack.
388 const enum index *const *next;
389 /** Arguments stack for subsequent tokens that need them. */
390 const struct arg *const *args;
392 * Token-processing callback, returns -1 in case of error, the
393 * length of the matched string otherwise. If NULL, attempts to
394 * match the token name.
396 * If buf is not NULL, the result should be stored in it according
397 * to context. An error is returned if not large enough.
399 int (*call)(struct context *ctx, const struct token *token,
400 const char *str, unsigned int len,
401 void *buf, unsigned int size);
403 * Callback that provides possible values for this token, used for
404 * completion. Returns -1 in case of error, the number of possible
405 * values otherwise. If NULL, the token name is used.
407 * If buf is not NULL, entry index ent is written to buf and the
408 * full length of the entry is returned (same behavior as
411 int (*comp)(struct context *ctx, const struct token *token,
412 unsigned int ent, char *buf, unsigned int size);
413 /** Mandatory token name, no default value. */
417 /** Static initializer for the next field. */
418 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
420 /** Static initializer for a NEXT() entry. */
421 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
423 /** Static initializer for the args field. */
424 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
426 /** Static initializer for ARGS() to target a field. */
427 #define ARGS_ENTRY(s, f) \
428 (&(const struct arg){ \
429 .offset = offsetof(s, f), \
430 .size = sizeof(((s *)0)->f), \
433 /** Static initializer for ARGS() to target a bit-field. */
434 #define ARGS_ENTRY_BF(s, f, b) \
435 (&(const struct arg){ \
437 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
440 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
441 #define ARGS_ENTRY_MASK(s, f, m) \
442 (&(const struct arg){ \
443 .offset = offsetof(s, f), \
444 .size = sizeof(((s *)0)->f), \
445 .mask = (const void *)(m), \
448 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
449 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
450 (&(const struct arg){ \
452 .offset = offsetof(s, f), \
453 .size = sizeof(((s *)0)->f), \
454 .mask = (const void *)(m), \
457 /** Static initializer for ARGS() to target a pointer. */
458 #define ARGS_ENTRY_PTR(s, f) \
459 (&(const struct arg){ \
460 .size = sizeof(*((s *)0)->f), \
463 /** Static initializer for ARGS() with arbitrary offset and size. */
464 #define ARGS_ENTRY_ARB(o, s) \
465 (&(const struct arg){ \
470 /** Same as ARGS_ENTRY_ARB() with bounded values. */
471 #define ARGS_ENTRY_ARB_BOUNDED(o, s, i, a) \
472 (&(const struct arg){ \
480 /** Same as ARGS_ENTRY() using network byte ordering. */
481 #define ARGS_ENTRY_HTON(s, f) \
482 (&(const struct arg){ \
484 .offset = offsetof(s, f), \
485 .size = sizeof(((s *)0)->f), \
488 /** Parser output buffer layout expected by cmd_flow_parsed(). */
490 enum index command; /**< Flow command. */
491 portid_t port; /**< Affected port ID. */
494 struct rte_flow_attr attr;
495 struct rte_flow_item *pattern;
496 struct rte_flow_action *actions;
500 } vc; /**< Validate/create arguments. */
504 } destroy; /**< Destroy arguments. */
507 struct rte_flow_action action;
508 } query; /**< Query arguments. */
512 } list; /**< List arguments. */
515 } isolate; /**< Isolated mode arguments. */
516 } args; /**< Command arguments. */
519 /** Private data for pattern items. */
520 struct parse_item_priv {
521 enum rte_flow_item_type type; /**< Item type. */
522 uint32_t size; /**< Size of item specification structure. */
525 #define PRIV_ITEM(t, s) \
526 (&(const struct parse_item_priv){ \
527 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
531 /** Private data for actions. */
532 struct parse_action_priv {
533 enum rte_flow_action_type type; /**< Action type. */
534 uint32_t size; /**< Size of action configuration structure. */
537 #define PRIV_ACTION(t, s) \
538 (&(const struct parse_action_priv){ \
539 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
543 static const enum index next_vc_attr[] = {
553 static const enum index next_destroy_attr[] = {
559 static const enum index next_list_attr[] = {
565 static const enum index item_param[] = {
574 static const enum index next_item[] = {
610 ITEM_ICMP6_ND_OPT_SLA_ETH,
611 ITEM_ICMP6_ND_OPT_TLA_ETH,
616 static const enum index item_fuzzy[] = {
622 static const enum index item_any[] = {
628 static const enum index item_vf[] = {
634 static const enum index item_phy_port[] = {
640 static const enum index item_port_id[] = {
646 static const enum index item_mark[] = {
652 static const enum index item_raw[] = {
662 static const enum index item_eth[] = {
670 static const enum index item_vlan[] = {
675 ITEM_VLAN_INNER_TYPE,
680 static const enum index item_ipv4[] = {
690 static const enum index item_ipv6[] = {
701 static const enum index item_icmp[] = {
708 static const enum index item_udp[] = {
715 static const enum index item_tcp[] = {
723 static const enum index item_sctp[] = {
732 static const enum index item_vxlan[] = {
738 static const enum index item_e_tag[] = {
739 ITEM_E_TAG_GRP_ECID_B,
744 static const enum index item_nvgre[] = {
750 static const enum index item_mpls[] = {
756 static const enum index item_gre[] = {
762 static const enum index item_gtp[] = {
768 static const enum index item_geneve[] = {
775 static const enum index item_vxlan_gpe[] = {
781 static const enum index item_arp_eth_ipv4[] = {
782 ITEM_ARP_ETH_IPV4_SHA,
783 ITEM_ARP_ETH_IPV4_SPA,
784 ITEM_ARP_ETH_IPV4_THA,
785 ITEM_ARP_ETH_IPV4_TPA,
790 static const enum index item_ipv6_ext[] = {
791 ITEM_IPV6_EXT_NEXT_HDR,
796 static const enum index item_icmp6[] = {
803 static const enum index item_icmp6_nd_ns[] = {
804 ITEM_ICMP6_ND_NS_TARGET_ADDR,
809 static const enum index item_icmp6_nd_na[] = {
810 ITEM_ICMP6_ND_NA_TARGET_ADDR,
815 static const enum index item_icmp6_nd_opt[] = {
816 ITEM_ICMP6_ND_OPT_TYPE,
821 static const enum index item_icmp6_nd_opt_sla_eth[] = {
822 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
827 static const enum index item_icmp6_nd_opt_tla_eth[] = {
828 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
833 static const enum index item_meta[] = {
839 static const enum index next_action[] = {
855 ACTION_OF_SET_MPLS_TTL,
856 ACTION_OF_DEC_MPLS_TTL,
857 ACTION_OF_SET_NW_TTL,
858 ACTION_OF_DEC_NW_TTL,
859 ACTION_OF_COPY_TTL_OUT,
860 ACTION_OF_COPY_TTL_IN,
863 ACTION_OF_SET_VLAN_VID,
864 ACTION_OF_SET_VLAN_PCP,
873 ACTION_MPLSOGRE_ENCAP,
874 ACTION_MPLSOGRE_DECAP,
875 ACTION_MPLSOUDP_ENCAP,
876 ACTION_MPLSOUDP_DECAP,
891 static const enum index action_mark[] = {
897 static const enum index action_queue[] = {
903 static const enum index action_count[] = {
910 static const enum index action_rss[] = {
921 static const enum index action_vf[] = {
928 static const enum index action_phy_port[] = {
929 ACTION_PHY_PORT_ORIGINAL,
930 ACTION_PHY_PORT_INDEX,
935 static const enum index action_port_id[] = {
936 ACTION_PORT_ID_ORIGINAL,
942 static const enum index action_meter[] = {
948 static const enum index action_of_set_mpls_ttl[] = {
949 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
954 static const enum index action_of_set_nw_ttl[] = {
955 ACTION_OF_SET_NW_TTL_NW_TTL,
960 static const enum index action_of_push_vlan[] = {
961 ACTION_OF_PUSH_VLAN_ETHERTYPE,
966 static const enum index action_of_set_vlan_vid[] = {
967 ACTION_OF_SET_VLAN_VID_VLAN_VID,
972 static const enum index action_of_set_vlan_pcp[] = {
973 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
978 static const enum index action_of_pop_mpls[] = {
979 ACTION_OF_POP_MPLS_ETHERTYPE,
984 static const enum index action_of_push_mpls[] = {
985 ACTION_OF_PUSH_MPLS_ETHERTYPE,
990 static const enum index action_set_ipv4_src[] = {
991 ACTION_SET_IPV4_SRC_IPV4_SRC,
996 static const enum index action_set_mac_src[] = {
997 ACTION_SET_MAC_SRC_MAC_SRC,
1002 static const enum index action_set_ipv4_dst[] = {
1003 ACTION_SET_IPV4_DST_IPV4_DST,
1008 static const enum index action_set_ipv6_src[] = {
1009 ACTION_SET_IPV6_SRC_IPV6_SRC,
1014 static const enum index action_set_ipv6_dst[] = {
1015 ACTION_SET_IPV6_DST_IPV6_DST,
1020 static const enum index action_set_tp_src[] = {
1021 ACTION_SET_TP_SRC_TP_SRC,
1026 static const enum index action_set_tp_dst[] = {
1027 ACTION_SET_TP_DST_TP_DST,
1032 static const enum index action_set_ttl[] = {
1038 static const enum index action_jump[] = {
1044 static const enum index action_set_mac_dst[] = {
1045 ACTION_SET_MAC_DST_MAC_DST,
1050 static int parse_init(struct context *, const struct token *,
1051 const char *, unsigned int,
1052 void *, unsigned int);
1053 static int parse_vc(struct context *, const struct token *,
1054 const char *, unsigned int,
1055 void *, unsigned int);
1056 static int parse_vc_spec(struct context *, const struct token *,
1057 const char *, unsigned int, void *, unsigned int);
1058 static int parse_vc_conf(struct context *, const struct token *,
1059 const char *, unsigned int, void *, unsigned int);
1060 static int parse_vc_action_rss(struct context *, const struct token *,
1061 const char *, unsigned int, void *,
1063 static int parse_vc_action_rss_func(struct context *, const struct token *,
1064 const char *, unsigned int, void *,
1066 static int parse_vc_action_rss_type(struct context *, const struct token *,
1067 const char *, unsigned int, void *,
1069 static int parse_vc_action_rss_queue(struct context *, const struct token *,
1070 const char *, unsigned int, void *,
1072 static int parse_vc_action_vxlan_encap(struct context *, const struct token *,
1073 const char *, unsigned int, void *,
1075 static int parse_vc_action_nvgre_encap(struct context *, const struct token *,
1076 const char *, unsigned int, void *,
1078 static int parse_vc_action_l2_encap(struct context *, const struct token *,
1079 const char *, unsigned int, void *,
1081 static int parse_vc_action_l2_decap(struct context *, const struct token *,
1082 const char *, unsigned int, void *,
1084 static int parse_vc_action_mplsogre_encap(struct context *,
1085 const struct token *, const char *,
1086 unsigned int, void *, unsigned int);
1087 static int parse_vc_action_mplsogre_decap(struct context *,
1088 const struct token *, const char *,
1089 unsigned int, void *, unsigned int);
1090 static int parse_vc_action_mplsoudp_encap(struct context *,
1091 const struct token *, const char *,
1092 unsigned int, void *, unsigned int);
1093 static int parse_vc_action_mplsoudp_decap(struct context *,
1094 const struct token *, const char *,
1095 unsigned int, void *, unsigned int);
1096 static int parse_destroy(struct context *, const struct token *,
1097 const char *, unsigned int,
1098 void *, unsigned int);
1099 static int parse_flush(struct context *, const struct token *,
1100 const char *, unsigned int,
1101 void *, unsigned int);
1102 static int parse_query(struct context *, const struct token *,
1103 const char *, unsigned int,
1104 void *, unsigned int);
1105 static int parse_action(struct context *, const struct token *,
1106 const char *, unsigned int,
1107 void *, unsigned int);
1108 static int parse_list(struct context *, const struct token *,
1109 const char *, unsigned int,
1110 void *, unsigned int);
1111 static int parse_isolate(struct context *, const struct token *,
1112 const char *, unsigned int,
1113 void *, unsigned int);
1114 static int parse_int(struct context *, const struct token *,
1115 const char *, unsigned int,
1116 void *, unsigned int);
1117 static int parse_prefix(struct context *, const struct token *,
1118 const char *, unsigned int,
1119 void *, unsigned int);
1120 static int parse_boolean(struct context *, const struct token *,
1121 const char *, unsigned int,
1122 void *, unsigned int);
1123 static int parse_string(struct context *, const struct token *,
1124 const char *, unsigned int,
1125 void *, unsigned int);
1126 static int parse_hex(struct context *ctx, const struct token *token,
1127 const char *str, unsigned int len,
1128 void *buf, unsigned int size);
1129 static int parse_mac_addr(struct context *, const struct token *,
1130 const char *, unsigned int,
1131 void *, unsigned int);
1132 static int parse_ipv4_addr(struct context *, const struct token *,
1133 const char *, unsigned int,
1134 void *, unsigned int);
1135 static int parse_ipv6_addr(struct context *, const struct token *,
1136 const char *, unsigned int,
1137 void *, unsigned int);
1138 static int parse_port(struct context *, const struct token *,
1139 const char *, unsigned int,
1140 void *, unsigned int);
1141 static int comp_none(struct context *, const struct token *,
1142 unsigned int, char *, unsigned int);
1143 static int comp_boolean(struct context *, const struct token *,
1144 unsigned int, char *, unsigned int);
1145 static int comp_action(struct context *, const struct token *,
1146 unsigned int, char *, unsigned int);
1147 static int comp_port(struct context *, const struct token *,
1148 unsigned int, char *, unsigned int);
1149 static int comp_rule_id(struct context *, const struct token *,
1150 unsigned int, char *, unsigned int);
1151 static int comp_vc_action_rss_type(struct context *, const struct token *,
1152 unsigned int, char *, unsigned int);
1153 static int comp_vc_action_rss_queue(struct context *, const struct token *,
1154 unsigned int, char *, unsigned int);
1156 /** Token definitions. */
1157 static const struct token token_list[] = {
1158 /* Special tokens. */
1161 .help = "null entry, abused as the entry point",
1162 .next = NEXT(NEXT_ENTRY(FLOW)),
1167 .help = "command may end here",
1169 /* Common tokens. */
1173 .help = "integer value",
1178 .name = "{unsigned}",
1180 .help = "unsigned integer value",
1187 .help = "prefix length for bit-mask",
1188 .call = parse_prefix,
1192 .name = "{boolean}",
1194 .help = "any boolean value",
1195 .call = parse_boolean,
1196 .comp = comp_boolean,
1201 .help = "fixed string",
1202 .call = parse_string,
1208 .help = "fixed string",
1213 .name = "{MAC address}",
1215 .help = "standard MAC address notation",
1216 .call = parse_mac_addr,
1220 .name = "{IPv4 address}",
1221 .type = "IPV4 ADDRESS",
1222 .help = "standard IPv4 address notation",
1223 .call = parse_ipv4_addr,
1227 .name = "{IPv6 address}",
1228 .type = "IPV6 ADDRESS",
1229 .help = "standard IPv6 address notation",
1230 .call = parse_ipv6_addr,
1234 .name = "{rule id}",
1236 .help = "rule identifier",
1238 .comp = comp_rule_id,
1241 .name = "{port_id}",
1243 .help = "port identifier",
1248 .name = "{group_id}",
1250 .help = "group identifier",
1254 [PRIORITY_LEVEL] = {
1257 .help = "priority level",
1261 /* Top-level command. */
1264 .type = "{command} {port_id} [{arg} [...]]",
1265 .help = "manage ingress/egress flow rules",
1266 .next = NEXT(NEXT_ENTRY
1276 /* Sub-level commands. */
1279 .help = "check whether a flow rule can be created",
1280 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1281 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1286 .help = "create a flow rule",
1287 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1288 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1293 .help = "destroy specific flow rules",
1294 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
1295 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1296 .call = parse_destroy,
1300 .help = "destroy all flow rules",
1301 .next = NEXT(NEXT_ENTRY(PORT_ID)),
1302 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1303 .call = parse_flush,
1307 .help = "query an existing flow rule",
1308 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
1309 NEXT_ENTRY(RULE_ID),
1310 NEXT_ENTRY(PORT_ID)),
1311 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action.type),
1312 ARGS_ENTRY(struct buffer, args.query.rule),
1313 ARGS_ENTRY(struct buffer, port)),
1314 .call = parse_query,
1318 .help = "list existing flow rules",
1319 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
1320 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1325 .help = "restrict ingress traffic to the defined flow rules",
1326 .next = NEXT(NEXT_ENTRY(BOOLEAN),
1327 NEXT_ENTRY(PORT_ID)),
1328 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
1329 ARGS_ENTRY(struct buffer, port)),
1330 .call = parse_isolate,
1332 /* Destroy arguments. */
1335 .help = "specify a rule identifier",
1336 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
1337 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
1338 .call = parse_destroy,
1340 /* Query arguments. */
1344 .help = "action to query, must be part of the rule",
1345 .call = parse_action,
1346 .comp = comp_action,
1348 /* List arguments. */
1351 .help = "specify a group",
1352 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
1353 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
1356 /* Validate/create attributes. */
1359 .help = "specify a group",
1360 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
1361 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
1366 .help = "specify a priority level",
1367 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
1368 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
1373 .help = "affect rule to ingress",
1374 .next = NEXT(next_vc_attr),
1379 .help = "affect rule to egress",
1380 .next = NEXT(next_vc_attr),
1385 .help = "apply rule directly to endpoints found in pattern",
1386 .next = NEXT(next_vc_attr),
1389 /* Validate/create pattern. */
1392 .help = "submit a list of pattern items",
1393 .next = NEXT(next_item),
1398 .help = "match value perfectly (with full bit-mask)",
1399 .call = parse_vc_spec,
1401 [ITEM_PARAM_SPEC] = {
1403 .help = "match value according to configured bit-mask",
1404 .call = parse_vc_spec,
1406 [ITEM_PARAM_LAST] = {
1408 .help = "specify upper bound to establish a range",
1409 .call = parse_vc_spec,
1411 [ITEM_PARAM_MASK] = {
1413 .help = "specify bit-mask with relevant bits set to one",
1414 .call = parse_vc_spec,
1416 [ITEM_PARAM_PREFIX] = {
1418 .help = "generate bit-mask from a prefix length",
1419 .call = parse_vc_spec,
1423 .help = "specify next pattern item",
1424 .next = NEXT(next_item),
1428 .help = "end list of pattern items",
1429 .priv = PRIV_ITEM(END, 0),
1430 .next = NEXT(NEXT_ENTRY(ACTIONS)),
1435 .help = "no-op pattern item",
1436 .priv = PRIV_ITEM(VOID, 0),
1437 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1442 .help = "perform actions when pattern does not match",
1443 .priv = PRIV_ITEM(INVERT, 0),
1444 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1449 .help = "match any protocol for the current layer",
1450 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
1451 .next = NEXT(item_any),
1456 .help = "number of layers covered",
1457 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
1458 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
1462 .help = "match traffic from/to the physical function",
1463 .priv = PRIV_ITEM(PF, 0),
1464 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1469 .help = "match traffic from/to a virtual function ID",
1470 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
1471 .next = NEXT(item_vf),
1477 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
1478 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
1482 .help = "match traffic from/to a specific physical port",
1483 .priv = PRIV_ITEM(PHY_PORT,
1484 sizeof(struct rte_flow_item_phy_port)),
1485 .next = NEXT(item_phy_port),
1488 [ITEM_PHY_PORT_INDEX] = {
1490 .help = "physical port index",
1491 .next = NEXT(item_phy_port, NEXT_ENTRY(UNSIGNED), item_param),
1492 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_phy_port, index)),
1496 .help = "match traffic from/to a given DPDK port ID",
1497 .priv = PRIV_ITEM(PORT_ID,
1498 sizeof(struct rte_flow_item_port_id)),
1499 .next = NEXT(item_port_id),
1502 [ITEM_PORT_ID_ID] = {
1504 .help = "DPDK port ID",
1505 .next = NEXT(item_port_id, NEXT_ENTRY(UNSIGNED), item_param),
1506 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port_id, id)),
1510 .help = "match traffic against value set in previously matched rule",
1511 .priv = PRIV_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
1512 .next = NEXT(item_mark),
1517 .help = "Integer value to match against",
1518 .next = NEXT(item_mark, NEXT_ENTRY(UNSIGNED), item_param),
1519 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_mark, id)),
1523 .help = "match an arbitrary byte string",
1524 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1525 .next = NEXT(item_raw),
1528 [ITEM_RAW_RELATIVE] = {
1530 .help = "look for pattern after the previous item",
1531 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1532 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1535 [ITEM_RAW_SEARCH] = {
1537 .help = "search pattern from offset (see also limit)",
1538 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1539 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1542 [ITEM_RAW_OFFSET] = {
1544 .help = "absolute or relative offset for pattern",
1545 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1546 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1548 [ITEM_RAW_LIMIT] = {
1550 .help = "search area limit for start of pattern",
1551 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1552 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1554 [ITEM_RAW_PATTERN] = {
1556 .help = "byte string to look for",
1557 .next = NEXT(item_raw,
1559 NEXT_ENTRY(ITEM_PARAM_IS,
1562 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, pattern),
1563 ARGS_ENTRY(struct rte_flow_item_raw, length),
1564 ARGS_ENTRY_ARB(sizeof(struct rte_flow_item_raw),
1565 ITEM_RAW_PATTERN_SIZE)),
1569 .help = "match Ethernet header",
1570 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1571 .next = NEXT(item_eth),
1576 .help = "destination MAC",
1577 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1578 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1582 .help = "source MAC",
1583 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1584 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1588 .help = "EtherType",
1589 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1590 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1594 .help = "match 802.1Q/ad VLAN tag",
1595 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1596 .next = NEXT(item_vlan),
1601 .help = "tag control information",
1602 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1603 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1607 .help = "priority code point",
1608 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1609 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1614 .help = "drop eligible indicator",
1615 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1616 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1621 .help = "VLAN identifier",
1622 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1623 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1626 [ITEM_VLAN_INNER_TYPE] = {
1627 .name = "inner_type",
1628 .help = "inner EtherType",
1629 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1630 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan,
1635 .help = "match IPv4 header",
1636 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1637 .next = NEXT(item_ipv4),
1642 .help = "type of service",
1643 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1644 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1645 hdr.type_of_service)),
1649 .help = "time to live",
1650 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1651 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1654 [ITEM_IPV4_PROTO] = {
1656 .help = "next protocol ID",
1657 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1658 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1659 hdr.next_proto_id)),
1663 .help = "source address",
1664 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1665 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1670 .help = "destination address",
1671 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1672 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1677 .help = "match IPv6 header",
1678 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1679 .next = NEXT(item_ipv6),
1684 .help = "traffic class",
1685 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1686 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1688 "\x0f\xf0\x00\x00")),
1690 [ITEM_IPV6_FLOW] = {
1692 .help = "flow label",
1693 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1694 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1696 "\x00\x0f\xff\xff")),
1698 [ITEM_IPV6_PROTO] = {
1700 .help = "protocol (next header)",
1701 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1702 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1707 .help = "hop limit",
1708 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1709 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1714 .help = "source address",
1715 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1716 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1721 .help = "destination address",
1722 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1723 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1728 .help = "match ICMP header",
1729 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1730 .next = NEXT(item_icmp),
1733 [ITEM_ICMP_TYPE] = {
1735 .help = "ICMP packet type",
1736 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1737 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1740 [ITEM_ICMP_CODE] = {
1742 .help = "ICMP packet code",
1743 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1744 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1749 .help = "match UDP header",
1750 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1751 .next = NEXT(item_udp),
1756 .help = "UDP source port",
1757 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1758 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1763 .help = "UDP destination port",
1764 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1765 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1770 .help = "match TCP header",
1771 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1772 .next = NEXT(item_tcp),
1777 .help = "TCP source port",
1778 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1779 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1784 .help = "TCP destination port",
1785 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1786 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1789 [ITEM_TCP_FLAGS] = {
1791 .help = "TCP flags",
1792 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1793 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1798 .help = "match SCTP header",
1799 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1800 .next = NEXT(item_sctp),
1805 .help = "SCTP source port",
1806 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1807 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1812 .help = "SCTP destination port",
1813 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1814 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1819 .help = "validation tag",
1820 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1821 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1824 [ITEM_SCTP_CKSUM] = {
1827 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1828 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1833 .help = "match VXLAN header",
1834 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1835 .next = NEXT(item_vxlan),
1838 [ITEM_VXLAN_VNI] = {
1840 .help = "VXLAN identifier",
1841 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1842 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
1846 .help = "match E-Tag header",
1847 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
1848 .next = NEXT(item_e_tag),
1851 [ITEM_E_TAG_GRP_ECID_B] = {
1852 .name = "grp_ecid_b",
1853 .help = "GRP and E-CID base",
1854 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
1855 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
1861 .help = "match NVGRE header",
1862 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
1863 .next = NEXT(item_nvgre),
1866 [ITEM_NVGRE_TNI] = {
1868 .help = "virtual subnet ID",
1869 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
1870 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
1874 .help = "match MPLS header",
1875 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
1876 .next = NEXT(item_mpls),
1879 [ITEM_MPLS_LABEL] = {
1881 .help = "MPLS label",
1882 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
1883 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
1889 .help = "match GRE header",
1890 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
1891 .next = NEXT(item_gre),
1894 [ITEM_GRE_PROTO] = {
1896 .help = "GRE protocol type",
1897 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
1898 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
1903 .help = "fuzzy pattern match, expect faster than default",
1904 .priv = PRIV_ITEM(FUZZY,
1905 sizeof(struct rte_flow_item_fuzzy)),
1906 .next = NEXT(item_fuzzy),
1909 [ITEM_FUZZY_THRESH] = {
1911 .help = "match accuracy threshold",
1912 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
1913 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
1918 .help = "match GTP header",
1919 .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
1920 .next = NEXT(item_gtp),
1925 .help = "tunnel endpoint identifier",
1926 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
1927 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
1931 .help = "match GTP header",
1932 .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
1933 .next = NEXT(item_gtp),
1938 .help = "match GTP header",
1939 .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
1940 .next = NEXT(item_gtp),
1945 .help = "match GENEVE header",
1946 .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
1947 .next = NEXT(item_geneve),
1950 [ITEM_GENEVE_VNI] = {
1952 .help = "virtual network identifier",
1953 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1954 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)),
1956 [ITEM_GENEVE_PROTO] = {
1958 .help = "GENEVE protocol type",
1959 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1960 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve,
1963 [ITEM_VXLAN_GPE] = {
1964 .name = "vxlan-gpe",
1965 .help = "match VXLAN-GPE header",
1966 .priv = PRIV_ITEM(VXLAN_GPE,
1967 sizeof(struct rte_flow_item_vxlan_gpe)),
1968 .next = NEXT(item_vxlan_gpe),
1971 [ITEM_VXLAN_GPE_VNI] = {
1973 .help = "VXLAN-GPE identifier",
1974 .next = NEXT(item_vxlan_gpe, NEXT_ENTRY(UNSIGNED), item_param),
1975 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan_gpe,
1978 [ITEM_ARP_ETH_IPV4] = {
1979 .name = "arp_eth_ipv4",
1980 .help = "match ARP header for Ethernet/IPv4",
1981 .priv = PRIV_ITEM(ARP_ETH_IPV4,
1982 sizeof(struct rte_flow_item_arp_eth_ipv4)),
1983 .next = NEXT(item_arp_eth_ipv4),
1986 [ITEM_ARP_ETH_IPV4_SHA] = {
1988 .help = "sender hardware address",
1989 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
1991 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
1994 [ITEM_ARP_ETH_IPV4_SPA] = {
1996 .help = "sender IPv4 address",
1997 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
1999 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2002 [ITEM_ARP_ETH_IPV4_THA] = {
2004 .help = "target hardware address",
2005 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
2007 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2010 [ITEM_ARP_ETH_IPV4_TPA] = {
2012 .help = "target IPv4 address",
2013 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
2015 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2020 .help = "match presence of any IPv6 extension header",
2021 .priv = PRIV_ITEM(IPV6_EXT,
2022 sizeof(struct rte_flow_item_ipv6_ext)),
2023 .next = NEXT(item_ipv6_ext),
2026 [ITEM_IPV6_EXT_NEXT_HDR] = {
2028 .help = "next header",
2029 .next = NEXT(item_ipv6_ext, NEXT_ENTRY(UNSIGNED), item_param),
2030 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6_ext,
2035 .help = "match any ICMPv6 header",
2036 .priv = PRIV_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
2037 .next = NEXT(item_icmp6),
2040 [ITEM_ICMP6_TYPE] = {
2042 .help = "ICMPv6 type",
2043 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2044 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2047 [ITEM_ICMP6_CODE] = {
2049 .help = "ICMPv6 code",
2050 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2051 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2054 [ITEM_ICMP6_ND_NS] = {
2055 .name = "icmp6_nd_ns",
2056 .help = "match ICMPv6 neighbor discovery solicitation",
2057 .priv = PRIV_ITEM(ICMP6_ND_NS,
2058 sizeof(struct rte_flow_item_icmp6_nd_ns)),
2059 .next = NEXT(item_icmp6_nd_ns),
2062 [ITEM_ICMP6_ND_NS_TARGET_ADDR] = {
2063 .name = "target_addr",
2064 .help = "target address",
2065 .next = NEXT(item_icmp6_nd_ns, NEXT_ENTRY(IPV6_ADDR),
2067 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_ns,
2070 [ITEM_ICMP6_ND_NA] = {
2071 .name = "icmp6_nd_na",
2072 .help = "match ICMPv6 neighbor discovery advertisement",
2073 .priv = PRIV_ITEM(ICMP6_ND_NA,
2074 sizeof(struct rte_flow_item_icmp6_nd_na)),
2075 .next = NEXT(item_icmp6_nd_na),
2078 [ITEM_ICMP6_ND_NA_TARGET_ADDR] = {
2079 .name = "target_addr",
2080 .help = "target address",
2081 .next = NEXT(item_icmp6_nd_na, NEXT_ENTRY(IPV6_ADDR),
2083 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_na,
2086 [ITEM_ICMP6_ND_OPT] = {
2087 .name = "icmp6_nd_opt",
2088 .help = "match presence of any ICMPv6 neighbor discovery"
2090 .priv = PRIV_ITEM(ICMP6_ND_OPT,
2091 sizeof(struct rte_flow_item_icmp6_nd_opt)),
2092 .next = NEXT(item_icmp6_nd_opt),
2095 [ITEM_ICMP6_ND_OPT_TYPE] = {
2097 .help = "ND option type",
2098 .next = NEXT(item_icmp6_nd_opt, NEXT_ENTRY(UNSIGNED),
2100 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_opt,
2103 [ITEM_ICMP6_ND_OPT_SLA_ETH] = {
2104 .name = "icmp6_nd_opt_sla_eth",
2105 .help = "match ICMPv6 neighbor discovery source Ethernet"
2106 " link-layer address option",
2108 (ICMP6_ND_OPT_SLA_ETH,
2109 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
2110 .next = NEXT(item_icmp6_nd_opt_sla_eth),
2113 [ITEM_ICMP6_ND_OPT_SLA_ETH_SLA] = {
2115 .help = "source Ethernet LLA",
2116 .next = NEXT(item_icmp6_nd_opt_sla_eth, NEXT_ENTRY(MAC_ADDR),
2118 .args = ARGS(ARGS_ENTRY_HTON
2119 (struct rte_flow_item_icmp6_nd_opt_sla_eth, sla)),
2121 [ITEM_ICMP6_ND_OPT_TLA_ETH] = {
2122 .name = "icmp6_nd_opt_tla_eth",
2123 .help = "match ICMPv6 neighbor discovery target Ethernet"
2124 " link-layer address option",
2126 (ICMP6_ND_OPT_TLA_ETH,
2127 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
2128 .next = NEXT(item_icmp6_nd_opt_tla_eth),
2131 [ITEM_ICMP6_ND_OPT_TLA_ETH_TLA] = {
2133 .help = "target Ethernet LLA",
2134 .next = NEXT(item_icmp6_nd_opt_tla_eth, NEXT_ENTRY(MAC_ADDR),
2136 .args = ARGS(ARGS_ENTRY_HTON
2137 (struct rte_flow_item_icmp6_nd_opt_tla_eth, tla)),
2141 .help = "match metadata header",
2142 .priv = PRIV_ITEM(META, sizeof(struct rte_flow_item_meta)),
2143 .next = NEXT(item_meta),
2146 [ITEM_META_DATA] = {
2148 .help = "metadata value",
2149 .next = NEXT(item_meta, NEXT_ENTRY(UNSIGNED), item_param),
2150 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_meta,
2151 data, "\xff\xff\xff\xff")),
2154 /* Validate/create actions. */
2157 .help = "submit a list of associated actions",
2158 .next = NEXT(next_action),
2163 .help = "specify next action",
2164 .next = NEXT(next_action),
2168 .help = "end list of actions",
2169 .priv = PRIV_ACTION(END, 0),
2174 .help = "no-op action",
2175 .priv = PRIV_ACTION(VOID, 0),
2176 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2179 [ACTION_PASSTHRU] = {
2181 .help = "let subsequent rule process matched packets",
2182 .priv = PRIV_ACTION(PASSTHRU, 0),
2183 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2188 .help = "redirect traffic to a given group",
2189 .priv = PRIV_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
2190 .next = NEXT(action_jump),
2193 [ACTION_JUMP_GROUP] = {
2195 .help = "group to redirect traffic to",
2196 .next = NEXT(action_jump, NEXT_ENTRY(UNSIGNED)),
2197 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_jump, group)),
2198 .call = parse_vc_conf,
2202 .help = "attach 32 bit value to packets",
2203 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
2204 .next = NEXT(action_mark),
2207 [ACTION_MARK_ID] = {
2209 .help = "32 bit value to return with packets",
2210 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
2211 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
2212 .call = parse_vc_conf,
2216 .help = "flag packets",
2217 .priv = PRIV_ACTION(FLAG, 0),
2218 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2223 .help = "assign packets to a given queue index",
2224 .priv = PRIV_ACTION(QUEUE,
2225 sizeof(struct rte_flow_action_queue)),
2226 .next = NEXT(action_queue),
2229 [ACTION_QUEUE_INDEX] = {
2231 .help = "queue index to use",
2232 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
2233 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
2234 .call = parse_vc_conf,
2238 .help = "drop packets (note: passthru has priority)",
2239 .priv = PRIV_ACTION(DROP, 0),
2240 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2245 .help = "enable counters for this rule",
2246 .priv = PRIV_ACTION(COUNT,
2247 sizeof(struct rte_flow_action_count)),
2248 .next = NEXT(action_count),
2251 [ACTION_COUNT_ID] = {
2252 .name = "identifier",
2253 .help = "counter identifier to use",
2254 .next = NEXT(action_count, NEXT_ENTRY(UNSIGNED)),
2255 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_count, id)),
2256 .call = parse_vc_conf,
2258 [ACTION_COUNT_SHARED] = {
2260 .help = "shared counter",
2261 .next = NEXT(action_count, NEXT_ENTRY(BOOLEAN)),
2262 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_count,
2264 .call = parse_vc_conf,
2268 .help = "spread packets among several queues",
2269 .priv = PRIV_ACTION(RSS, sizeof(struct action_rss_data)),
2270 .next = NEXT(action_rss),
2271 .call = parse_vc_action_rss,
2273 [ACTION_RSS_FUNC] = {
2275 .help = "RSS hash function to apply",
2276 .next = NEXT(action_rss,
2277 NEXT_ENTRY(ACTION_RSS_FUNC_DEFAULT,
2278 ACTION_RSS_FUNC_TOEPLITZ,
2279 ACTION_RSS_FUNC_SIMPLE_XOR)),
2281 [ACTION_RSS_FUNC_DEFAULT] = {
2283 .help = "default hash function",
2284 .call = parse_vc_action_rss_func,
2286 [ACTION_RSS_FUNC_TOEPLITZ] = {
2288 .help = "Toeplitz hash function",
2289 .call = parse_vc_action_rss_func,
2291 [ACTION_RSS_FUNC_SIMPLE_XOR] = {
2292 .name = "simple_xor",
2293 .help = "simple XOR hash function",
2294 .call = parse_vc_action_rss_func,
2296 [ACTION_RSS_LEVEL] = {
2298 .help = "encapsulation level for \"types\"",
2299 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2300 .args = ARGS(ARGS_ENTRY_ARB
2301 (offsetof(struct action_rss_data, conf) +
2302 offsetof(struct rte_flow_action_rss, level),
2303 sizeof(((struct rte_flow_action_rss *)0)->
2306 [ACTION_RSS_TYPES] = {
2308 .help = "specific RSS hash types",
2309 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)),
2311 [ACTION_RSS_TYPE] = {
2313 .help = "RSS hash type",
2314 .call = parse_vc_action_rss_type,
2315 .comp = comp_vc_action_rss_type,
2317 [ACTION_RSS_KEY] = {
2319 .help = "RSS hash key",
2320 .next = NEXT(action_rss, NEXT_ENTRY(HEX)),
2321 .args = ARGS(ARGS_ENTRY_ARB(0, 0),
2323 (offsetof(struct action_rss_data, conf) +
2324 offsetof(struct rte_flow_action_rss, key_len),
2325 sizeof(((struct rte_flow_action_rss *)0)->
2327 ARGS_ENTRY(struct action_rss_data, key)),
2329 [ACTION_RSS_KEY_LEN] = {
2331 .help = "RSS hash key length in bytes",
2332 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2333 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
2334 (offsetof(struct action_rss_data, conf) +
2335 offsetof(struct rte_flow_action_rss, key_len),
2336 sizeof(((struct rte_flow_action_rss *)0)->
2339 RSS_HASH_KEY_LENGTH)),
2341 [ACTION_RSS_QUEUES] = {
2343 .help = "queue indices to use",
2344 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
2345 .call = parse_vc_conf,
2347 [ACTION_RSS_QUEUE] = {
2349 .help = "queue index",
2350 .call = parse_vc_action_rss_queue,
2351 .comp = comp_vc_action_rss_queue,
2355 .help = "direct traffic to physical function",
2356 .priv = PRIV_ACTION(PF, 0),
2357 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2362 .help = "direct traffic to a virtual function ID",
2363 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
2364 .next = NEXT(action_vf),
2367 [ACTION_VF_ORIGINAL] = {
2369 .help = "use original VF ID if possible",
2370 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
2371 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
2373 .call = parse_vc_conf,
2378 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
2379 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
2380 .call = parse_vc_conf,
2382 [ACTION_PHY_PORT] = {
2384 .help = "direct packets to physical port index",
2385 .priv = PRIV_ACTION(PHY_PORT,
2386 sizeof(struct rte_flow_action_phy_port)),
2387 .next = NEXT(action_phy_port),
2390 [ACTION_PHY_PORT_ORIGINAL] = {
2392 .help = "use original port index if possible",
2393 .next = NEXT(action_phy_port, NEXT_ENTRY(BOOLEAN)),
2394 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_phy_port,
2396 .call = parse_vc_conf,
2398 [ACTION_PHY_PORT_INDEX] = {
2400 .help = "physical port index",
2401 .next = NEXT(action_phy_port, NEXT_ENTRY(UNSIGNED)),
2402 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_phy_port,
2404 .call = parse_vc_conf,
2406 [ACTION_PORT_ID] = {
2408 .help = "direct matching traffic to a given DPDK port ID",
2409 .priv = PRIV_ACTION(PORT_ID,
2410 sizeof(struct rte_flow_action_port_id)),
2411 .next = NEXT(action_port_id),
2414 [ACTION_PORT_ID_ORIGINAL] = {
2416 .help = "use original DPDK port ID if possible",
2417 .next = NEXT(action_port_id, NEXT_ENTRY(BOOLEAN)),
2418 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_port_id,
2420 .call = parse_vc_conf,
2422 [ACTION_PORT_ID_ID] = {
2424 .help = "DPDK port ID",
2425 .next = NEXT(action_port_id, NEXT_ENTRY(UNSIGNED)),
2426 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_port_id, id)),
2427 .call = parse_vc_conf,
2431 .help = "meter the directed packets at given id",
2432 .priv = PRIV_ACTION(METER,
2433 sizeof(struct rte_flow_action_meter)),
2434 .next = NEXT(action_meter),
2437 [ACTION_METER_ID] = {
2439 .help = "meter id to use",
2440 .next = NEXT(action_meter, NEXT_ENTRY(UNSIGNED)),
2441 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
2442 .call = parse_vc_conf,
2444 [ACTION_OF_SET_MPLS_TTL] = {
2445 .name = "of_set_mpls_ttl",
2446 .help = "OpenFlow's OFPAT_SET_MPLS_TTL",
2449 sizeof(struct rte_flow_action_of_set_mpls_ttl)),
2450 .next = NEXT(action_of_set_mpls_ttl),
2453 [ACTION_OF_SET_MPLS_TTL_MPLS_TTL] = {
2456 .next = NEXT(action_of_set_mpls_ttl, NEXT_ENTRY(UNSIGNED)),
2457 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_mpls_ttl,
2459 .call = parse_vc_conf,
2461 [ACTION_OF_DEC_MPLS_TTL] = {
2462 .name = "of_dec_mpls_ttl",
2463 .help = "OpenFlow's OFPAT_DEC_MPLS_TTL",
2464 .priv = PRIV_ACTION(OF_DEC_MPLS_TTL, 0),
2465 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2468 [ACTION_OF_SET_NW_TTL] = {
2469 .name = "of_set_nw_ttl",
2470 .help = "OpenFlow's OFPAT_SET_NW_TTL",
2473 sizeof(struct rte_flow_action_of_set_nw_ttl)),
2474 .next = NEXT(action_of_set_nw_ttl),
2477 [ACTION_OF_SET_NW_TTL_NW_TTL] = {
2480 .next = NEXT(action_of_set_nw_ttl, NEXT_ENTRY(UNSIGNED)),
2481 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_nw_ttl,
2483 .call = parse_vc_conf,
2485 [ACTION_OF_DEC_NW_TTL] = {
2486 .name = "of_dec_nw_ttl",
2487 .help = "OpenFlow's OFPAT_DEC_NW_TTL",
2488 .priv = PRIV_ACTION(OF_DEC_NW_TTL, 0),
2489 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2492 [ACTION_OF_COPY_TTL_OUT] = {
2493 .name = "of_copy_ttl_out",
2494 .help = "OpenFlow's OFPAT_COPY_TTL_OUT",
2495 .priv = PRIV_ACTION(OF_COPY_TTL_OUT, 0),
2496 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2499 [ACTION_OF_COPY_TTL_IN] = {
2500 .name = "of_copy_ttl_in",
2501 .help = "OpenFlow's OFPAT_COPY_TTL_IN",
2502 .priv = PRIV_ACTION(OF_COPY_TTL_IN, 0),
2503 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2506 [ACTION_OF_POP_VLAN] = {
2507 .name = "of_pop_vlan",
2508 .help = "OpenFlow's OFPAT_POP_VLAN",
2509 .priv = PRIV_ACTION(OF_POP_VLAN, 0),
2510 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2513 [ACTION_OF_PUSH_VLAN] = {
2514 .name = "of_push_vlan",
2515 .help = "OpenFlow's OFPAT_PUSH_VLAN",
2518 sizeof(struct rte_flow_action_of_push_vlan)),
2519 .next = NEXT(action_of_push_vlan),
2522 [ACTION_OF_PUSH_VLAN_ETHERTYPE] = {
2523 .name = "ethertype",
2524 .help = "EtherType",
2525 .next = NEXT(action_of_push_vlan, NEXT_ENTRY(UNSIGNED)),
2526 .args = ARGS(ARGS_ENTRY_HTON
2527 (struct rte_flow_action_of_push_vlan,
2529 .call = parse_vc_conf,
2531 [ACTION_OF_SET_VLAN_VID] = {
2532 .name = "of_set_vlan_vid",
2533 .help = "OpenFlow's OFPAT_SET_VLAN_VID",
2536 sizeof(struct rte_flow_action_of_set_vlan_vid)),
2537 .next = NEXT(action_of_set_vlan_vid),
2540 [ACTION_OF_SET_VLAN_VID_VLAN_VID] = {
2543 .next = NEXT(action_of_set_vlan_vid, NEXT_ENTRY(UNSIGNED)),
2544 .args = ARGS(ARGS_ENTRY_HTON
2545 (struct rte_flow_action_of_set_vlan_vid,
2547 .call = parse_vc_conf,
2549 [ACTION_OF_SET_VLAN_PCP] = {
2550 .name = "of_set_vlan_pcp",
2551 .help = "OpenFlow's OFPAT_SET_VLAN_PCP",
2554 sizeof(struct rte_flow_action_of_set_vlan_pcp)),
2555 .next = NEXT(action_of_set_vlan_pcp),
2558 [ACTION_OF_SET_VLAN_PCP_VLAN_PCP] = {
2560 .help = "VLAN priority",
2561 .next = NEXT(action_of_set_vlan_pcp, NEXT_ENTRY(UNSIGNED)),
2562 .args = ARGS(ARGS_ENTRY_HTON
2563 (struct rte_flow_action_of_set_vlan_pcp,
2565 .call = parse_vc_conf,
2567 [ACTION_OF_POP_MPLS] = {
2568 .name = "of_pop_mpls",
2569 .help = "OpenFlow's OFPAT_POP_MPLS",
2570 .priv = PRIV_ACTION(OF_POP_MPLS,
2571 sizeof(struct rte_flow_action_of_pop_mpls)),
2572 .next = NEXT(action_of_pop_mpls),
2575 [ACTION_OF_POP_MPLS_ETHERTYPE] = {
2576 .name = "ethertype",
2577 .help = "EtherType",
2578 .next = NEXT(action_of_pop_mpls, NEXT_ENTRY(UNSIGNED)),
2579 .args = ARGS(ARGS_ENTRY_HTON
2580 (struct rte_flow_action_of_pop_mpls,
2582 .call = parse_vc_conf,
2584 [ACTION_OF_PUSH_MPLS] = {
2585 .name = "of_push_mpls",
2586 .help = "OpenFlow's OFPAT_PUSH_MPLS",
2589 sizeof(struct rte_flow_action_of_push_mpls)),
2590 .next = NEXT(action_of_push_mpls),
2593 [ACTION_OF_PUSH_MPLS_ETHERTYPE] = {
2594 .name = "ethertype",
2595 .help = "EtherType",
2596 .next = NEXT(action_of_push_mpls, NEXT_ENTRY(UNSIGNED)),
2597 .args = ARGS(ARGS_ENTRY_HTON
2598 (struct rte_flow_action_of_push_mpls,
2600 .call = parse_vc_conf,
2602 [ACTION_VXLAN_ENCAP] = {
2603 .name = "vxlan_encap",
2604 .help = "VXLAN encapsulation, uses configuration set by \"set"
2606 .priv = PRIV_ACTION(VXLAN_ENCAP,
2607 sizeof(struct action_vxlan_encap_data)),
2608 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2609 .call = parse_vc_action_vxlan_encap,
2611 [ACTION_VXLAN_DECAP] = {
2612 .name = "vxlan_decap",
2613 .help = "Performs a decapsulation action by stripping all"
2614 " headers of the VXLAN tunnel network overlay from the"
2616 .priv = PRIV_ACTION(VXLAN_DECAP, 0),
2617 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2620 [ACTION_NVGRE_ENCAP] = {
2621 .name = "nvgre_encap",
2622 .help = "NVGRE encapsulation, uses configuration set by \"set"
2624 .priv = PRIV_ACTION(NVGRE_ENCAP,
2625 sizeof(struct action_nvgre_encap_data)),
2626 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2627 .call = parse_vc_action_nvgre_encap,
2629 [ACTION_NVGRE_DECAP] = {
2630 .name = "nvgre_decap",
2631 .help = "Performs a decapsulation action by stripping all"
2632 " headers of the NVGRE tunnel network overlay from the"
2634 .priv = PRIV_ACTION(NVGRE_DECAP, 0),
2635 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2638 [ACTION_L2_ENCAP] = {
2640 .help = "l2 encap, uses configuration set by"
2641 " \"set l2_encap\"",
2642 .priv = PRIV_ACTION(RAW_ENCAP,
2643 sizeof(struct action_raw_encap_data)),
2644 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2645 .call = parse_vc_action_l2_encap,
2647 [ACTION_L2_DECAP] = {
2649 .help = "l2 decap, uses configuration set by"
2650 " \"set l2_decap\"",
2651 .priv = PRIV_ACTION(RAW_DECAP,
2652 sizeof(struct action_raw_decap_data)),
2653 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2654 .call = parse_vc_action_l2_decap,
2656 [ACTION_MPLSOGRE_ENCAP] = {
2657 .name = "mplsogre_encap",
2658 .help = "mplsogre encapsulation, uses configuration set by"
2659 " \"set mplsogre_encap\"",
2660 .priv = PRIV_ACTION(RAW_ENCAP,
2661 sizeof(struct action_raw_encap_data)),
2662 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2663 .call = parse_vc_action_mplsogre_encap,
2665 [ACTION_MPLSOGRE_DECAP] = {
2666 .name = "mplsogre_decap",
2667 .help = "mplsogre decapsulation, uses configuration set by"
2668 " \"set mplsogre_decap\"",
2669 .priv = PRIV_ACTION(RAW_DECAP,
2670 sizeof(struct action_raw_decap_data)),
2671 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2672 .call = parse_vc_action_mplsogre_decap,
2674 [ACTION_MPLSOUDP_ENCAP] = {
2675 .name = "mplsoudp_encap",
2676 .help = "mplsoudp encapsulation, uses configuration set by"
2677 " \"set mplsoudp_encap\"",
2678 .priv = PRIV_ACTION(RAW_ENCAP,
2679 sizeof(struct action_raw_encap_data)),
2680 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2681 .call = parse_vc_action_mplsoudp_encap,
2683 [ACTION_MPLSOUDP_DECAP] = {
2684 .name = "mplsoudp_decap",
2685 .help = "mplsoudp decapsulation, uses configuration set by"
2686 " \"set mplsoudp_decap\"",
2687 .priv = PRIV_ACTION(RAW_DECAP,
2688 sizeof(struct action_raw_decap_data)),
2689 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2690 .call = parse_vc_action_mplsoudp_decap,
2692 [ACTION_SET_IPV4_SRC] = {
2693 .name = "set_ipv4_src",
2694 .help = "Set a new IPv4 source address in the outermost"
2696 .priv = PRIV_ACTION(SET_IPV4_SRC,
2697 sizeof(struct rte_flow_action_set_ipv4)),
2698 .next = NEXT(action_set_ipv4_src),
2701 [ACTION_SET_IPV4_SRC_IPV4_SRC] = {
2702 .name = "ipv4_addr",
2703 .help = "new IPv4 source address to set",
2704 .next = NEXT(action_set_ipv4_src, NEXT_ENTRY(IPV4_ADDR)),
2705 .args = ARGS(ARGS_ENTRY_HTON
2706 (struct rte_flow_action_set_ipv4, ipv4_addr)),
2707 .call = parse_vc_conf,
2709 [ACTION_SET_IPV4_DST] = {
2710 .name = "set_ipv4_dst",
2711 .help = "Set a new IPv4 destination address in the outermost"
2713 .priv = PRIV_ACTION(SET_IPV4_DST,
2714 sizeof(struct rte_flow_action_set_ipv4)),
2715 .next = NEXT(action_set_ipv4_dst),
2718 [ACTION_SET_IPV4_DST_IPV4_DST] = {
2719 .name = "ipv4_addr",
2720 .help = "new IPv4 destination address to set",
2721 .next = NEXT(action_set_ipv4_dst, NEXT_ENTRY(IPV4_ADDR)),
2722 .args = ARGS(ARGS_ENTRY_HTON
2723 (struct rte_flow_action_set_ipv4, ipv4_addr)),
2724 .call = parse_vc_conf,
2726 [ACTION_SET_IPV6_SRC] = {
2727 .name = "set_ipv6_src",
2728 .help = "Set a new IPv6 source address in the outermost"
2730 .priv = PRIV_ACTION(SET_IPV6_SRC,
2731 sizeof(struct rte_flow_action_set_ipv6)),
2732 .next = NEXT(action_set_ipv6_src),
2735 [ACTION_SET_IPV6_SRC_IPV6_SRC] = {
2736 .name = "ipv6_addr",
2737 .help = "new IPv6 source address to set",
2738 .next = NEXT(action_set_ipv6_src, NEXT_ENTRY(IPV6_ADDR)),
2739 .args = ARGS(ARGS_ENTRY_HTON
2740 (struct rte_flow_action_set_ipv6, ipv6_addr)),
2741 .call = parse_vc_conf,
2743 [ACTION_SET_IPV6_DST] = {
2744 .name = "set_ipv6_dst",
2745 .help = "Set a new IPv6 destination address in the outermost"
2747 .priv = PRIV_ACTION(SET_IPV6_DST,
2748 sizeof(struct rte_flow_action_set_ipv6)),
2749 .next = NEXT(action_set_ipv6_dst),
2752 [ACTION_SET_IPV6_DST_IPV6_DST] = {
2753 .name = "ipv6_addr",
2754 .help = "new IPv6 destination address to set",
2755 .next = NEXT(action_set_ipv6_dst, NEXT_ENTRY(IPV6_ADDR)),
2756 .args = ARGS(ARGS_ENTRY_HTON
2757 (struct rte_flow_action_set_ipv6, ipv6_addr)),
2758 .call = parse_vc_conf,
2760 [ACTION_SET_TP_SRC] = {
2761 .name = "set_tp_src",
2762 .help = "set a new source port number in the outermost"
2764 .priv = PRIV_ACTION(SET_TP_SRC,
2765 sizeof(struct rte_flow_action_set_tp)),
2766 .next = NEXT(action_set_tp_src),
2769 [ACTION_SET_TP_SRC_TP_SRC] = {
2771 .help = "new source port number to set",
2772 .next = NEXT(action_set_tp_src, NEXT_ENTRY(UNSIGNED)),
2773 .args = ARGS(ARGS_ENTRY_HTON
2774 (struct rte_flow_action_set_tp, port)),
2775 .call = parse_vc_conf,
2777 [ACTION_SET_TP_DST] = {
2778 .name = "set_tp_dst",
2779 .help = "set a new destination port number in the outermost"
2781 .priv = PRIV_ACTION(SET_TP_DST,
2782 sizeof(struct rte_flow_action_set_tp)),
2783 .next = NEXT(action_set_tp_dst),
2786 [ACTION_SET_TP_DST_TP_DST] = {
2788 .help = "new destination port number to set",
2789 .next = NEXT(action_set_tp_dst, NEXT_ENTRY(UNSIGNED)),
2790 .args = ARGS(ARGS_ENTRY_HTON
2791 (struct rte_flow_action_set_tp, port)),
2792 .call = parse_vc_conf,
2794 [ACTION_MAC_SWAP] = {
2796 .help = "Swap the source and destination MAC addresses"
2797 " in the outermost Ethernet header",
2798 .priv = PRIV_ACTION(MAC_SWAP, 0),
2799 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2802 [ACTION_DEC_TTL] = {
2804 .help = "decrease network TTL if available",
2805 .priv = PRIV_ACTION(DEC_TTL, 0),
2806 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2809 [ACTION_SET_TTL] = {
2811 .help = "set ttl value",
2812 .priv = PRIV_ACTION(SET_TTL,
2813 sizeof(struct rte_flow_action_set_ttl)),
2814 .next = NEXT(action_set_ttl),
2817 [ACTION_SET_TTL_TTL] = {
2818 .name = "ttl_value",
2819 .help = "new ttl value to set",
2820 .next = NEXT(action_set_ttl, NEXT_ENTRY(UNSIGNED)),
2821 .args = ARGS(ARGS_ENTRY_HTON
2822 (struct rte_flow_action_set_ttl, ttl_value)),
2823 .call = parse_vc_conf,
2825 [ACTION_SET_MAC_SRC] = {
2826 .name = "set_mac_src",
2827 .help = "set source mac address",
2828 .priv = PRIV_ACTION(SET_MAC_SRC,
2829 sizeof(struct rte_flow_action_set_mac)),
2830 .next = NEXT(action_set_mac_src),
2833 [ACTION_SET_MAC_SRC_MAC_SRC] = {
2835 .help = "new source mac address",
2836 .next = NEXT(action_set_mac_src, NEXT_ENTRY(MAC_ADDR)),
2837 .args = ARGS(ARGS_ENTRY_HTON
2838 (struct rte_flow_action_set_mac, mac_addr)),
2839 .call = parse_vc_conf,
2841 [ACTION_SET_MAC_DST] = {
2842 .name = "set_mac_dst",
2843 .help = "set destination mac address",
2844 .priv = PRIV_ACTION(SET_MAC_DST,
2845 sizeof(struct rte_flow_action_set_mac)),
2846 .next = NEXT(action_set_mac_dst),
2849 [ACTION_SET_MAC_DST_MAC_DST] = {
2851 .help = "new destination mac address to set",
2852 .next = NEXT(action_set_mac_dst, NEXT_ENTRY(MAC_ADDR)),
2853 .args = ARGS(ARGS_ENTRY_HTON
2854 (struct rte_flow_action_set_mac, mac_addr)),
2855 .call = parse_vc_conf,
2859 /** Remove and return last entry from argument stack. */
2860 static const struct arg *
2861 pop_args(struct context *ctx)
2863 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
2866 /** Add entry on top of the argument stack. */
2868 push_args(struct context *ctx, const struct arg *arg)
2870 if (ctx->args_num == CTX_STACK_SIZE)
2872 ctx->args[ctx->args_num++] = arg;
2876 /** Spread value into buffer according to bit-mask. */
2878 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
2880 uint32_t i = arg->size;
2888 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2897 unsigned int shift = 0;
2898 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
2900 for (shift = 0; arg->mask[i] >> shift; ++shift) {
2901 if (!(arg->mask[i] & (1 << shift)))
2906 *buf &= ~(1 << shift);
2907 *buf |= (val & 1) << shift;
2915 /** Compare a string with a partial one of a given length. */
2917 strcmp_partial(const char *full, const char *partial, size_t partial_len)
2919 int r = strncmp(full, partial, partial_len);
2923 if (strlen(full) <= partial_len)
2925 return full[partial_len];
2929 * Parse a prefix length and generate a bit-mask.
2931 * Last argument (ctx->args) is retrieved to determine mask size, storage
2932 * location and whether the result must use network byte ordering.
2935 parse_prefix(struct context *ctx, const struct token *token,
2936 const char *str, unsigned int len,
2937 void *buf, unsigned int size)
2939 const struct arg *arg = pop_args(ctx);
2940 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
2947 /* Argument is expected. */
2951 u = strtoumax(str, &end, 0);
2952 if (errno || (size_t)(end - str) != len)
2957 extra = arg_entry_bf_fill(NULL, 0, arg);
2966 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
2967 !arg_entry_bf_fill(ctx->objmask, -1, arg))
2974 if (bytes > size || bytes + !!extra > size)
2978 buf = (uint8_t *)ctx->object + arg->offset;
2979 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2981 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
2982 memset(buf, 0x00, size - bytes);
2984 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
2988 memset(buf, 0xff, bytes);
2989 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
2991 ((uint8_t *)buf)[bytes] = conv[extra];
2994 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2997 push_args(ctx, arg);
3001 /** Default parsing function for token name matching. */
3003 parse_default(struct context *ctx, const struct token *token,
3004 const char *str, unsigned int len,
3005 void *buf, unsigned int size)
3010 if (strcmp_partial(token->name, str, len))
3015 /** Parse flow command, initialize output buffer for subsequent tokens. */
3017 parse_init(struct context *ctx, const struct token *token,
3018 const char *str, unsigned int len,
3019 void *buf, unsigned int size)
3021 struct buffer *out = buf;
3023 /* Token name must match. */
3024 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3026 /* Nothing else to do if there is no buffer. */
3029 /* Make sure buffer is large enough. */
3030 if (size < sizeof(*out))
3032 /* Initialize buffer. */
3033 memset(out, 0x00, sizeof(*out));
3034 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
3037 ctx->objmask = NULL;
3041 /** Parse tokens for validate/create commands. */
3043 parse_vc(struct context *ctx, const struct token *token,
3044 const char *str, unsigned int len,
3045 void *buf, unsigned int size)
3047 struct buffer *out = buf;
3051 /* Token name must match. */
3052 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3054 /* Nothing else to do if there is no buffer. */
3057 if (!out->command) {
3058 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
3060 if (sizeof(*out) > size)
3062 out->command = ctx->curr;
3065 ctx->objmask = NULL;
3066 out->args.vc.data = (uint8_t *)out + size;
3070 ctx->object = &out->args.vc.attr;
3071 ctx->objmask = NULL;
3072 switch (ctx->curr) {
3077 out->args.vc.attr.ingress = 1;
3080 out->args.vc.attr.egress = 1;
3083 out->args.vc.attr.transfer = 1;
3086 out->args.vc.pattern =
3087 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
3089 ctx->object = out->args.vc.pattern;
3090 ctx->objmask = NULL;
3093 out->args.vc.actions =
3094 (void *)RTE_ALIGN_CEIL((uintptr_t)
3095 (out->args.vc.pattern +
3096 out->args.vc.pattern_n),
3098 ctx->object = out->args.vc.actions;
3099 ctx->objmask = NULL;
3106 if (!out->args.vc.actions) {
3107 const struct parse_item_priv *priv = token->priv;
3108 struct rte_flow_item *item =
3109 out->args.vc.pattern + out->args.vc.pattern_n;
3111 data_size = priv->size * 3; /* spec, last, mask */
3112 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
3113 (out->args.vc.data - data_size),
3115 if ((uint8_t *)item + sizeof(*item) > data)
3117 *item = (struct rte_flow_item){
3120 ++out->args.vc.pattern_n;
3122 ctx->objmask = NULL;
3124 const struct parse_action_priv *priv = token->priv;
3125 struct rte_flow_action *action =
3126 out->args.vc.actions + out->args.vc.actions_n;
3128 data_size = priv->size; /* configuration */
3129 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
3130 (out->args.vc.data - data_size),
3132 if ((uint8_t *)action + sizeof(*action) > data)
3134 *action = (struct rte_flow_action){
3136 .conf = data_size ? data : NULL,
3138 ++out->args.vc.actions_n;
3139 ctx->object = action;
3140 ctx->objmask = NULL;
3142 memset(data, 0, data_size);
3143 out->args.vc.data = data;
3144 ctx->objdata = data_size;
3148 /** Parse pattern item parameter type. */
3150 parse_vc_spec(struct context *ctx, const struct token *token,
3151 const char *str, unsigned int len,
3152 void *buf, unsigned int size)
3154 struct buffer *out = buf;
3155 struct rte_flow_item *item;
3161 /* Token name must match. */
3162 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3164 /* Parse parameter types. */
3165 switch (ctx->curr) {
3166 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
3172 case ITEM_PARAM_SPEC:
3175 case ITEM_PARAM_LAST:
3178 case ITEM_PARAM_PREFIX:
3179 /* Modify next token to expect a prefix. */
3180 if (ctx->next_num < 2)
3182 ctx->next[ctx->next_num - 2] = prefix;
3184 case ITEM_PARAM_MASK:
3190 /* Nothing else to do if there is no buffer. */
3193 if (!out->args.vc.pattern_n)
3195 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
3196 data_size = ctx->objdata / 3; /* spec, last, mask */
3197 /* Point to selected object. */
3198 ctx->object = out->args.vc.data + (data_size * index);
3200 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
3201 item->mask = ctx->objmask;
3203 ctx->objmask = NULL;
3204 /* Update relevant item pointer. */
3205 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
3210 /** Parse action configuration field. */
3212 parse_vc_conf(struct context *ctx, const struct token *token,
3213 const char *str, unsigned int len,
3214 void *buf, unsigned int size)
3216 struct buffer *out = buf;
3219 /* Token name must match. */
3220 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3222 /* Nothing else to do if there is no buffer. */
3225 /* Point to selected object. */
3226 ctx->object = out->args.vc.data;
3227 ctx->objmask = NULL;
3231 /** Parse RSS action. */
3233 parse_vc_action_rss(struct context *ctx, const struct token *token,
3234 const char *str, unsigned int len,
3235 void *buf, unsigned int size)
3237 struct buffer *out = buf;
3238 struct rte_flow_action *action;
3239 struct action_rss_data *action_rss_data;
3243 ret = parse_vc(ctx, token, str, len, buf, size);
3246 /* Nothing else to do if there is no buffer. */
3249 if (!out->args.vc.actions_n)
3251 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3252 /* Point to selected object. */
3253 ctx->object = out->args.vc.data;
3254 ctx->objmask = NULL;
3255 /* Set up default configuration. */
3256 action_rss_data = ctx->object;
3257 *action_rss_data = (struct action_rss_data){
3258 .conf = (struct rte_flow_action_rss){
3259 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
3262 .key_len = sizeof(action_rss_data->key),
3263 .queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
3264 .key = action_rss_data->key,
3265 .queue = action_rss_data->queue,
3267 .key = "testpmd's default RSS hash key, "
3268 "override it for better balancing",
3271 for (i = 0; i < action_rss_data->conf.queue_num; ++i)
3272 action_rss_data->queue[i] = i;
3273 if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
3274 ctx->port != (portid_t)RTE_PORT_ALL) {
3275 struct rte_eth_dev_info info;
3277 rte_eth_dev_info_get(ctx->port, &info);
3278 action_rss_data->conf.key_len =
3279 RTE_MIN(sizeof(action_rss_data->key),
3280 info.hash_key_size);
3282 action->conf = &action_rss_data->conf;
3287 * Parse func field for RSS action.
3289 * The RTE_ETH_HASH_FUNCTION_* value to assign is derived from the
3290 * ACTION_RSS_FUNC_* index that called this function.
3293 parse_vc_action_rss_func(struct context *ctx, const struct token *token,
3294 const char *str, unsigned int len,
3295 void *buf, unsigned int size)
3297 struct action_rss_data *action_rss_data;
3298 enum rte_eth_hash_function func;
3302 /* Token name must match. */
3303 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3305 switch (ctx->curr) {
3306 case ACTION_RSS_FUNC_DEFAULT:
3307 func = RTE_ETH_HASH_FUNCTION_DEFAULT;
3309 case ACTION_RSS_FUNC_TOEPLITZ:
3310 func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
3312 case ACTION_RSS_FUNC_SIMPLE_XOR:
3313 func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
3320 action_rss_data = ctx->object;
3321 action_rss_data->conf.func = func;
3326 * Parse type field for RSS action.
3328 * Valid tokens are type field names and the "end" token.
3331 parse_vc_action_rss_type(struct context *ctx, const struct token *token,
3332 const char *str, unsigned int len,
3333 void *buf, unsigned int size)
3335 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_TYPE);
3336 struct action_rss_data *action_rss_data;
3342 if (ctx->curr != ACTION_RSS_TYPE)
3344 if (!(ctx->objdata >> 16) && ctx->object) {
3345 action_rss_data = ctx->object;
3346 action_rss_data->conf.types = 0;
3348 if (!strcmp_partial("end", str, len)) {
3349 ctx->objdata &= 0xffff;
3352 for (i = 0; rss_type_table[i].str; ++i)
3353 if (!strcmp_partial(rss_type_table[i].str, str, len))
3355 if (!rss_type_table[i].str)
3357 ctx->objdata = 1 << 16 | (ctx->objdata & 0xffff);
3359 if (ctx->next_num == RTE_DIM(ctx->next))
3361 ctx->next[ctx->next_num++] = next;
3364 action_rss_data = ctx->object;
3365 action_rss_data->conf.types |= rss_type_table[i].rss_type;
3370 * Parse queue field for RSS action.
3372 * Valid tokens are queue indices and the "end" token.
3375 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
3376 const char *str, unsigned int len,
3377 void *buf, unsigned int size)
3379 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
3380 struct action_rss_data *action_rss_data;
3387 if (ctx->curr != ACTION_RSS_QUEUE)
3389 i = ctx->objdata >> 16;
3390 if (!strcmp_partial("end", str, len)) {
3391 ctx->objdata &= 0xffff;
3394 if (i >= ACTION_RSS_QUEUE_NUM)
3397 ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) +
3398 i * sizeof(action_rss_data->queue[i]),
3399 sizeof(action_rss_data->queue[i]))))
3401 ret = parse_int(ctx, token, str, len, NULL, 0);
3407 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
3409 if (ctx->next_num == RTE_DIM(ctx->next))
3411 ctx->next[ctx->next_num++] = next;
3415 action_rss_data = ctx->object;
3416 action_rss_data->conf.queue_num = i;
3417 action_rss_data->conf.queue = i ? action_rss_data->queue : NULL;
3421 /** Parse VXLAN encap action. */
3423 parse_vc_action_vxlan_encap(struct context *ctx, const struct token *token,
3424 const char *str, unsigned int len,
3425 void *buf, unsigned int size)
3427 struct buffer *out = buf;
3428 struct rte_flow_action *action;
3429 struct action_vxlan_encap_data *action_vxlan_encap_data;
3432 ret = parse_vc(ctx, token, str, len, buf, size);
3435 /* Nothing else to do if there is no buffer. */
3438 if (!out->args.vc.actions_n)
3440 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3441 /* Point to selected object. */
3442 ctx->object = out->args.vc.data;
3443 ctx->objmask = NULL;
3444 /* Set up default configuration. */
3445 action_vxlan_encap_data = ctx->object;
3446 *action_vxlan_encap_data = (struct action_vxlan_encap_data){
3447 .conf = (struct rte_flow_action_vxlan_encap){
3448 .definition = action_vxlan_encap_data->items,
3452 .type = RTE_FLOW_ITEM_TYPE_ETH,
3453 .spec = &action_vxlan_encap_data->item_eth,
3454 .mask = &rte_flow_item_eth_mask,
3457 .type = RTE_FLOW_ITEM_TYPE_VLAN,
3458 .spec = &action_vxlan_encap_data->item_vlan,
3459 .mask = &rte_flow_item_vlan_mask,
3462 .type = RTE_FLOW_ITEM_TYPE_IPV4,
3463 .spec = &action_vxlan_encap_data->item_ipv4,
3464 .mask = &rte_flow_item_ipv4_mask,
3467 .type = RTE_FLOW_ITEM_TYPE_UDP,
3468 .spec = &action_vxlan_encap_data->item_udp,
3469 .mask = &rte_flow_item_udp_mask,
3472 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
3473 .spec = &action_vxlan_encap_data->item_vxlan,
3474 .mask = &rte_flow_item_vxlan_mask,
3477 .type = RTE_FLOW_ITEM_TYPE_END,
3482 .tci = vxlan_encap_conf.vlan_tci,
3486 .src_addr = vxlan_encap_conf.ipv4_src,
3487 .dst_addr = vxlan_encap_conf.ipv4_dst,
3490 .src_port = vxlan_encap_conf.udp_src,
3491 .dst_port = vxlan_encap_conf.udp_dst,
3493 .item_vxlan.flags = 0,
3495 memcpy(action_vxlan_encap_data->item_eth.dst.addr_bytes,
3496 vxlan_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
3497 memcpy(action_vxlan_encap_data->item_eth.src.addr_bytes,
3498 vxlan_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
3499 if (!vxlan_encap_conf.select_ipv4) {
3500 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr,
3501 &vxlan_encap_conf.ipv6_src,
3502 sizeof(vxlan_encap_conf.ipv6_src));
3503 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.dst_addr,
3504 &vxlan_encap_conf.ipv6_dst,
3505 sizeof(vxlan_encap_conf.ipv6_dst));
3506 action_vxlan_encap_data->items[2] = (struct rte_flow_item){
3507 .type = RTE_FLOW_ITEM_TYPE_IPV6,
3508 .spec = &action_vxlan_encap_data->item_ipv6,
3509 .mask = &rte_flow_item_ipv6_mask,
3512 if (!vxlan_encap_conf.select_vlan)
3513 action_vxlan_encap_data->items[1].type =
3514 RTE_FLOW_ITEM_TYPE_VOID;
3515 if (vxlan_encap_conf.select_tos_ttl) {
3516 if (vxlan_encap_conf.select_ipv4) {
3517 static struct rte_flow_item_ipv4 ipv4_mask_tos;
3519 memcpy(&ipv4_mask_tos, &rte_flow_item_ipv4_mask,
3520 sizeof(ipv4_mask_tos));
3521 ipv4_mask_tos.hdr.type_of_service = 0xff;
3522 ipv4_mask_tos.hdr.time_to_live = 0xff;
3523 action_vxlan_encap_data->item_ipv4.hdr.type_of_service =
3524 vxlan_encap_conf.ip_tos;
3525 action_vxlan_encap_data->item_ipv4.hdr.time_to_live =
3526 vxlan_encap_conf.ip_ttl;
3527 action_vxlan_encap_data->items[2].mask =
3530 static struct rte_flow_item_ipv6 ipv6_mask_tos;
3532 memcpy(&ipv6_mask_tos, &rte_flow_item_ipv6_mask,
3533 sizeof(ipv6_mask_tos));
3534 ipv6_mask_tos.hdr.vtc_flow |=
3535 RTE_BE32(0xfful << RTE_IPV6_HDR_TC_SHIFT);
3536 ipv6_mask_tos.hdr.hop_limits = 0xff;
3537 action_vxlan_encap_data->item_ipv6.hdr.vtc_flow |=
3539 ((uint32_t)vxlan_encap_conf.ip_tos <<
3540 RTE_IPV6_HDR_TC_SHIFT);
3541 action_vxlan_encap_data->item_ipv6.hdr.hop_limits =
3542 vxlan_encap_conf.ip_ttl;
3543 action_vxlan_encap_data->items[2].mask =
3547 memcpy(action_vxlan_encap_data->item_vxlan.vni, vxlan_encap_conf.vni,
3548 RTE_DIM(vxlan_encap_conf.vni));
3549 action->conf = &action_vxlan_encap_data->conf;
3553 /** Parse NVGRE encap action. */
3555 parse_vc_action_nvgre_encap(struct context *ctx, const struct token *token,
3556 const char *str, unsigned int len,
3557 void *buf, unsigned int size)
3559 struct buffer *out = buf;
3560 struct rte_flow_action *action;
3561 struct action_nvgre_encap_data *action_nvgre_encap_data;
3564 ret = parse_vc(ctx, token, str, len, buf, size);
3567 /* Nothing else to do if there is no buffer. */
3570 if (!out->args.vc.actions_n)
3572 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3573 /* Point to selected object. */
3574 ctx->object = out->args.vc.data;
3575 ctx->objmask = NULL;
3576 /* Set up default configuration. */
3577 action_nvgre_encap_data = ctx->object;
3578 *action_nvgre_encap_data = (struct action_nvgre_encap_data){
3579 .conf = (struct rte_flow_action_nvgre_encap){
3580 .definition = action_nvgre_encap_data->items,
3584 .type = RTE_FLOW_ITEM_TYPE_ETH,
3585 .spec = &action_nvgre_encap_data->item_eth,
3586 .mask = &rte_flow_item_eth_mask,
3589 .type = RTE_FLOW_ITEM_TYPE_VLAN,
3590 .spec = &action_nvgre_encap_data->item_vlan,
3591 .mask = &rte_flow_item_vlan_mask,
3594 .type = RTE_FLOW_ITEM_TYPE_IPV4,
3595 .spec = &action_nvgre_encap_data->item_ipv4,
3596 .mask = &rte_flow_item_ipv4_mask,
3599 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
3600 .spec = &action_nvgre_encap_data->item_nvgre,
3601 .mask = &rte_flow_item_nvgre_mask,
3604 .type = RTE_FLOW_ITEM_TYPE_END,
3609 .tci = nvgre_encap_conf.vlan_tci,
3613 .src_addr = nvgre_encap_conf.ipv4_src,
3614 .dst_addr = nvgre_encap_conf.ipv4_dst,
3616 .item_nvgre.flow_id = 0,
3618 memcpy(action_nvgre_encap_data->item_eth.dst.addr_bytes,
3619 nvgre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
3620 memcpy(action_nvgre_encap_data->item_eth.src.addr_bytes,
3621 nvgre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
3622 if (!nvgre_encap_conf.select_ipv4) {
3623 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.src_addr,
3624 &nvgre_encap_conf.ipv6_src,
3625 sizeof(nvgre_encap_conf.ipv6_src));
3626 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.dst_addr,
3627 &nvgre_encap_conf.ipv6_dst,
3628 sizeof(nvgre_encap_conf.ipv6_dst));
3629 action_nvgre_encap_data->items[2] = (struct rte_flow_item){
3630 .type = RTE_FLOW_ITEM_TYPE_IPV6,
3631 .spec = &action_nvgre_encap_data->item_ipv6,
3632 .mask = &rte_flow_item_ipv6_mask,
3635 if (!nvgre_encap_conf.select_vlan)
3636 action_nvgre_encap_data->items[1].type =
3637 RTE_FLOW_ITEM_TYPE_VOID;
3638 memcpy(action_nvgre_encap_data->item_nvgre.tni, nvgre_encap_conf.tni,
3639 RTE_DIM(nvgre_encap_conf.tni));
3640 action->conf = &action_nvgre_encap_data->conf;
3644 /** Parse l2 encap action. */
3646 parse_vc_action_l2_encap(struct context *ctx, const struct token *token,
3647 const char *str, unsigned int len,
3648 void *buf, unsigned int size)
3650 struct buffer *out = buf;
3651 struct rte_flow_action *action;
3652 struct action_raw_encap_data *action_encap_data;
3653 struct rte_flow_item_eth eth = { .type = 0, };
3654 struct rte_flow_item_vlan vlan = {
3655 .tci = mplsoudp_encap_conf.vlan_tci,
3661 ret = parse_vc(ctx, token, str, len, buf, size);
3664 /* Nothing else to do if there is no buffer. */
3667 if (!out->args.vc.actions_n)
3669 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3670 /* Point to selected object. */
3671 ctx->object = out->args.vc.data;
3672 ctx->objmask = NULL;
3673 /* Copy the headers to the buffer. */
3674 action_encap_data = ctx->object;
3675 *action_encap_data = (struct action_raw_encap_data) {
3676 .conf = (struct rte_flow_action_raw_encap){
3677 .data = action_encap_data->data,
3681 header = action_encap_data->data;
3682 if (l2_encap_conf.select_vlan)
3683 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
3684 else if (l2_encap_conf.select_ipv4)
3685 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
3687 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
3688 memcpy(eth.dst.addr_bytes,
3689 l2_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
3690 memcpy(eth.src.addr_bytes,
3691 l2_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
3692 memcpy(header, ð, sizeof(eth));
3693 header += sizeof(eth);
3694 if (l2_encap_conf.select_vlan) {
3695 if (l2_encap_conf.select_ipv4)
3696 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
3698 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
3699 memcpy(header, &vlan, sizeof(vlan));
3700 header += sizeof(vlan);
3702 action_encap_data->conf.size = header -
3703 action_encap_data->data;
3704 action->conf = &action_encap_data->conf;
3708 /** Parse l2 decap action. */
3710 parse_vc_action_l2_decap(struct context *ctx, const struct token *token,
3711 const char *str, unsigned int len,
3712 void *buf, unsigned int size)
3714 struct buffer *out = buf;
3715 struct rte_flow_action *action;
3716 struct action_raw_decap_data *action_decap_data;
3717 struct rte_flow_item_eth eth = { .type = 0, };
3718 struct rte_flow_item_vlan vlan = {
3719 .tci = mplsoudp_encap_conf.vlan_tci,
3725 ret = parse_vc(ctx, token, str, len, buf, size);
3728 /* Nothing else to do if there is no buffer. */
3731 if (!out->args.vc.actions_n)
3733 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3734 /* Point to selected object. */
3735 ctx->object = out->args.vc.data;
3736 ctx->objmask = NULL;
3737 /* Copy the headers to the buffer. */
3738 action_decap_data = ctx->object;
3739 *action_decap_data = (struct action_raw_decap_data) {
3740 .conf = (struct rte_flow_action_raw_decap){
3741 .data = action_decap_data->data,
3745 header = action_decap_data->data;
3746 if (l2_decap_conf.select_vlan)
3747 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
3748 memcpy(header, ð, sizeof(eth));
3749 header += sizeof(eth);
3750 if (l2_decap_conf.select_vlan) {
3751 memcpy(header, &vlan, sizeof(vlan));
3752 header += sizeof(vlan);
3754 action_decap_data->conf.size = header -
3755 action_decap_data->data;
3756 action->conf = &action_decap_data->conf;
3760 #define ETHER_TYPE_MPLS_UNICAST 0x8847
3762 /** Parse MPLSOGRE encap action. */
3764 parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token,
3765 const char *str, unsigned int len,
3766 void *buf, unsigned int size)
3768 struct buffer *out = buf;
3769 struct rte_flow_action *action;
3770 struct action_raw_encap_data *action_encap_data;
3771 struct rte_flow_item_eth eth = { .type = 0, };
3772 struct rte_flow_item_vlan vlan = {
3773 .tci = mplsogre_encap_conf.vlan_tci,
3776 struct rte_flow_item_ipv4 ipv4 = {
3778 .src_addr = mplsogre_encap_conf.ipv4_src,
3779 .dst_addr = mplsogre_encap_conf.ipv4_dst,
3780 .next_proto_id = IPPROTO_GRE,
3783 struct rte_flow_item_ipv6 ipv6 = {
3785 .proto = IPPROTO_GRE,
3788 struct rte_flow_item_gre gre = {
3789 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
3791 struct rte_flow_item_mpls mpls;
3795 ret = parse_vc(ctx, token, str, len, buf, size);
3798 /* Nothing else to do if there is no buffer. */
3801 if (!out->args.vc.actions_n)
3803 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3804 /* Point to selected object. */
3805 ctx->object = out->args.vc.data;
3806 ctx->objmask = NULL;
3807 /* Copy the headers to the buffer. */
3808 action_encap_data = ctx->object;
3809 *action_encap_data = (struct action_raw_encap_data) {
3810 .conf = (struct rte_flow_action_raw_encap){
3811 .data = action_encap_data->data,
3816 header = action_encap_data->data;
3817 if (mplsogre_encap_conf.select_vlan)
3818 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
3819 else if (mplsogre_encap_conf.select_ipv4)
3820 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
3822 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
3823 memcpy(eth.dst.addr_bytes,
3824 mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
3825 memcpy(eth.src.addr_bytes,
3826 mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
3827 memcpy(header, ð, sizeof(eth));
3828 header += sizeof(eth);
3829 if (mplsogre_encap_conf.select_vlan) {
3830 if (mplsogre_encap_conf.select_ipv4)
3831 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
3833 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
3834 memcpy(header, &vlan, sizeof(vlan));
3835 header += sizeof(vlan);
3837 if (mplsogre_encap_conf.select_ipv4) {
3838 memcpy(header, &ipv4, sizeof(ipv4));
3839 header += sizeof(ipv4);
3841 memcpy(&ipv6.hdr.src_addr,
3842 &mplsogre_encap_conf.ipv6_src,
3843 sizeof(mplsogre_encap_conf.ipv6_src));
3844 memcpy(&ipv6.hdr.dst_addr,
3845 &mplsogre_encap_conf.ipv6_dst,
3846 sizeof(mplsogre_encap_conf.ipv6_dst));
3847 memcpy(header, &ipv6, sizeof(ipv6));
3848 header += sizeof(ipv6);
3850 memcpy(header, &gre, sizeof(gre));
3851 header += sizeof(gre);
3852 memcpy(mpls.label_tc_s, mplsogre_encap_conf.label,
3853 RTE_DIM(mplsogre_encap_conf.label));
3854 mpls.label_tc_s[2] |= 0x1;
3855 memcpy(header, &mpls, sizeof(mpls));
3856 header += sizeof(mpls);
3857 action_encap_data->conf.size = header -
3858 action_encap_data->data;
3859 action->conf = &action_encap_data->conf;
3863 /** Parse MPLSOGRE decap action. */
3865 parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token,
3866 const char *str, unsigned int len,
3867 void *buf, unsigned int size)
3869 struct buffer *out = buf;
3870 struct rte_flow_action *action;
3871 struct action_raw_decap_data *action_decap_data;
3872 struct rte_flow_item_eth eth = { .type = 0, };
3873 struct rte_flow_item_vlan vlan = {.tci = 0};
3874 struct rte_flow_item_ipv4 ipv4 = {
3876 .next_proto_id = IPPROTO_GRE,
3879 struct rte_flow_item_ipv6 ipv6 = {
3881 .proto = IPPROTO_GRE,
3884 struct rte_flow_item_gre gre = {
3885 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
3887 struct rte_flow_item_mpls mpls;
3891 ret = parse_vc(ctx, token, str, len, buf, size);
3894 /* Nothing else to do if there is no buffer. */
3897 if (!out->args.vc.actions_n)
3899 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3900 /* Point to selected object. */
3901 ctx->object = out->args.vc.data;
3902 ctx->objmask = NULL;
3903 /* Copy the headers to the buffer. */
3904 action_decap_data = ctx->object;
3905 *action_decap_data = (struct action_raw_decap_data) {
3906 .conf = (struct rte_flow_action_raw_decap){
3907 .data = action_decap_data->data,
3911 header = action_decap_data->data;
3912 if (mplsogre_decap_conf.select_vlan)
3913 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
3914 else if (mplsogre_encap_conf.select_ipv4)
3915 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
3917 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
3918 memcpy(eth.dst.addr_bytes,
3919 mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
3920 memcpy(eth.src.addr_bytes,
3921 mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
3922 memcpy(header, ð, sizeof(eth));
3923 header += sizeof(eth);
3924 if (mplsogre_encap_conf.select_vlan) {
3925 if (mplsogre_encap_conf.select_ipv4)
3926 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
3928 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
3929 memcpy(header, &vlan, sizeof(vlan));
3930 header += sizeof(vlan);
3932 if (mplsogre_encap_conf.select_ipv4) {
3933 memcpy(header, &ipv4, sizeof(ipv4));
3934 header += sizeof(ipv4);
3936 memcpy(header, &ipv6, sizeof(ipv6));
3937 header += sizeof(ipv6);
3939 memcpy(header, &gre, sizeof(gre));
3940 header += sizeof(gre);
3941 memset(&mpls, 0, sizeof(mpls));
3942 memcpy(header, &mpls, sizeof(mpls));
3943 header += sizeof(mpls);
3944 action_decap_data->conf.size = header -
3945 action_decap_data->data;
3946 action->conf = &action_decap_data->conf;
3950 /** Parse MPLSOUDP encap action. */
3952 parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token,
3953 const char *str, unsigned int len,
3954 void *buf, unsigned int size)
3956 struct buffer *out = buf;
3957 struct rte_flow_action *action;
3958 struct action_raw_encap_data *action_encap_data;
3959 struct rte_flow_item_eth eth = { .type = 0, };
3960 struct rte_flow_item_vlan vlan = {
3961 .tci = mplsoudp_encap_conf.vlan_tci,
3964 struct rte_flow_item_ipv4 ipv4 = {
3966 .src_addr = mplsoudp_encap_conf.ipv4_src,
3967 .dst_addr = mplsoudp_encap_conf.ipv4_dst,
3968 .next_proto_id = IPPROTO_UDP,
3971 struct rte_flow_item_ipv6 ipv6 = {
3973 .proto = IPPROTO_UDP,
3976 struct rte_flow_item_udp udp = {
3978 .src_port = mplsoudp_encap_conf.udp_src,
3979 .dst_port = mplsoudp_encap_conf.udp_dst,
3982 struct rte_flow_item_mpls mpls;
3986 ret = parse_vc(ctx, token, str, len, buf, size);
3989 /* Nothing else to do if there is no buffer. */
3992 if (!out->args.vc.actions_n)
3994 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3995 /* Point to selected object. */
3996 ctx->object = out->args.vc.data;
3997 ctx->objmask = NULL;
3998 /* Copy the headers to the buffer. */
3999 action_encap_data = ctx->object;
4000 *action_encap_data = (struct action_raw_encap_data) {
4001 .conf = (struct rte_flow_action_raw_encap){
4002 .data = action_encap_data->data,
4007 header = action_encap_data->data;
4008 if (mplsoudp_encap_conf.select_vlan)
4009 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4010 else if (mplsoudp_encap_conf.select_ipv4)
4011 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
4013 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
4014 memcpy(eth.dst.addr_bytes,
4015 mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4016 memcpy(eth.src.addr_bytes,
4017 mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4018 memcpy(header, ð, sizeof(eth));
4019 header += sizeof(eth);
4020 if (mplsoudp_encap_conf.select_vlan) {
4021 if (mplsoudp_encap_conf.select_ipv4)
4022 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
4024 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
4025 memcpy(header, &vlan, sizeof(vlan));
4026 header += sizeof(vlan);
4028 if (mplsoudp_encap_conf.select_ipv4) {
4029 memcpy(header, &ipv4, sizeof(ipv4));
4030 header += sizeof(ipv4);
4032 memcpy(&ipv6.hdr.src_addr,
4033 &mplsoudp_encap_conf.ipv6_src,
4034 sizeof(mplsoudp_encap_conf.ipv6_src));
4035 memcpy(&ipv6.hdr.dst_addr,
4036 &mplsoudp_encap_conf.ipv6_dst,
4037 sizeof(mplsoudp_encap_conf.ipv6_dst));
4038 memcpy(header, &ipv6, sizeof(ipv6));
4039 header += sizeof(ipv6);
4041 memcpy(header, &udp, sizeof(udp));
4042 header += sizeof(udp);
4043 memcpy(mpls.label_tc_s, mplsoudp_encap_conf.label,
4044 RTE_DIM(mplsoudp_encap_conf.label));
4045 mpls.label_tc_s[2] |= 0x1;
4046 memcpy(header, &mpls, sizeof(mpls));
4047 header += sizeof(mpls);
4048 action_encap_data->conf.size = header -
4049 action_encap_data->data;
4050 action->conf = &action_encap_data->conf;
4054 /** Parse MPLSOUDP decap action. */
4056 parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token,
4057 const char *str, unsigned int len,
4058 void *buf, unsigned int size)
4060 struct buffer *out = buf;
4061 struct rte_flow_action *action;
4062 struct action_raw_decap_data *action_decap_data;
4063 struct rte_flow_item_eth eth = { .type = 0, };
4064 struct rte_flow_item_vlan vlan = {.tci = 0};
4065 struct rte_flow_item_ipv4 ipv4 = {
4067 .next_proto_id = IPPROTO_UDP,
4070 struct rte_flow_item_ipv6 ipv6 = {
4072 .proto = IPPROTO_UDP,
4075 struct rte_flow_item_udp udp = {
4077 .dst_port = rte_cpu_to_be_16(6635),
4080 struct rte_flow_item_mpls mpls;
4084 ret = parse_vc(ctx, token, str, len, buf, size);
4087 /* Nothing else to do if there is no buffer. */
4090 if (!out->args.vc.actions_n)
4092 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4093 /* Point to selected object. */
4094 ctx->object = out->args.vc.data;
4095 ctx->objmask = NULL;
4096 /* Copy the headers to the buffer. */
4097 action_decap_data = ctx->object;
4098 *action_decap_data = (struct action_raw_decap_data) {
4099 .conf = (struct rte_flow_action_raw_decap){
4100 .data = action_decap_data->data,
4104 header = action_decap_data->data;
4105 if (mplsoudp_decap_conf.select_vlan)
4106 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4107 else if (mplsoudp_encap_conf.select_ipv4)
4108 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
4110 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
4111 memcpy(eth.dst.addr_bytes,
4112 mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4113 memcpy(eth.src.addr_bytes,
4114 mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4115 memcpy(header, ð, sizeof(eth));
4116 header += sizeof(eth);
4117 if (mplsoudp_encap_conf.select_vlan) {
4118 if (mplsoudp_encap_conf.select_ipv4)
4119 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
4121 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
4122 memcpy(header, &vlan, sizeof(vlan));
4123 header += sizeof(vlan);
4125 if (mplsoudp_encap_conf.select_ipv4) {
4126 memcpy(header, &ipv4, sizeof(ipv4));
4127 header += sizeof(ipv4);
4129 memcpy(header, &ipv6, sizeof(ipv6));
4130 header += sizeof(ipv6);
4132 memcpy(header, &udp, sizeof(udp));
4133 header += sizeof(udp);
4134 memset(&mpls, 0, sizeof(mpls));
4135 memcpy(header, &mpls, sizeof(mpls));
4136 header += sizeof(mpls);
4137 action_decap_data->conf.size = header -
4138 action_decap_data->data;
4139 action->conf = &action_decap_data->conf;
4143 /** Parse tokens for destroy command. */
4145 parse_destroy(struct context *ctx, const struct token *token,
4146 const char *str, unsigned int len,
4147 void *buf, unsigned int size)
4149 struct buffer *out = buf;
4151 /* Token name must match. */
4152 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4154 /* Nothing else to do if there is no buffer. */
4157 if (!out->command) {
4158 if (ctx->curr != DESTROY)
4160 if (sizeof(*out) > size)
4162 out->command = ctx->curr;
4165 ctx->objmask = NULL;
4166 out->args.destroy.rule =
4167 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
4171 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
4172 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
4175 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
4176 ctx->objmask = NULL;
4180 /** Parse tokens for flush command. */
4182 parse_flush(struct context *ctx, const struct token *token,
4183 const char *str, unsigned int len,
4184 void *buf, unsigned int size)
4186 struct buffer *out = buf;
4188 /* Token name must match. */
4189 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4191 /* Nothing else to do if there is no buffer. */
4194 if (!out->command) {
4195 if (ctx->curr != FLUSH)
4197 if (sizeof(*out) > size)
4199 out->command = ctx->curr;
4202 ctx->objmask = NULL;
4207 /** Parse tokens for query command. */
4209 parse_query(struct context *ctx, const struct token *token,
4210 const char *str, unsigned int len,
4211 void *buf, unsigned int size)
4213 struct buffer *out = buf;
4215 /* Token name must match. */
4216 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4218 /* Nothing else to do if there is no buffer. */
4221 if (!out->command) {
4222 if (ctx->curr != QUERY)
4224 if (sizeof(*out) > size)
4226 out->command = ctx->curr;
4229 ctx->objmask = NULL;
4234 /** Parse action names. */
4236 parse_action(struct context *ctx, const struct token *token,
4237 const char *str, unsigned int len,
4238 void *buf, unsigned int size)
4240 struct buffer *out = buf;
4241 const struct arg *arg = pop_args(ctx);
4245 /* Argument is expected. */
4248 /* Parse action name. */
4249 for (i = 0; next_action[i]; ++i) {
4250 const struct parse_action_priv *priv;
4252 token = &token_list[next_action[i]];
4253 if (strcmp_partial(token->name, str, len))
4259 memcpy((uint8_t *)ctx->object + arg->offset,
4265 push_args(ctx, arg);
4269 /** Parse tokens for list command. */
4271 parse_list(struct context *ctx, const struct token *token,
4272 const char *str, unsigned int len,
4273 void *buf, unsigned int size)
4275 struct buffer *out = buf;
4277 /* Token name must match. */
4278 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4280 /* Nothing else to do if there is no buffer. */
4283 if (!out->command) {
4284 if (ctx->curr != LIST)
4286 if (sizeof(*out) > size)
4288 out->command = ctx->curr;
4291 ctx->objmask = NULL;
4292 out->args.list.group =
4293 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
4297 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
4298 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
4301 ctx->object = out->args.list.group + out->args.list.group_n++;
4302 ctx->objmask = NULL;
4306 /** Parse tokens for isolate command. */
4308 parse_isolate(struct context *ctx, const struct token *token,
4309 const char *str, unsigned int len,
4310 void *buf, unsigned int size)
4312 struct buffer *out = buf;
4314 /* Token name must match. */
4315 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4317 /* Nothing else to do if there is no buffer. */
4320 if (!out->command) {
4321 if (ctx->curr != ISOLATE)
4323 if (sizeof(*out) > size)
4325 out->command = ctx->curr;
4328 ctx->objmask = NULL;
4334 * Parse signed/unsigned integers 8 to 64-bit long.
4336 * Last argument (ctx->args) is retrieved to determine integer type and
4340 parse_int(struct context *ctx, const struct token *token,
4341 const char *str, unsigned int len,
4342 void *buf, unsigned int size)
4344 const struct arg *arg = pop_args(ctx);
4349 /* Argument is expected. */
4354 (uintmax_t)strtoimax(str, &end, 0) :
4355 strtoumax(str, &end, 0);
4356 if (errno || (size_t)(end - str) != len)
4359 ((arg->sign && ((intmax_t)u < (intmax_t)arg->min ||
4360 (intmax_t)u > (intmax_t)arg->max)) ||
4361 (!arg->sign && (u < arg->min || u > arg->max))))
4366 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
4367 !arg_entry_bf_fill(ctx->objmask, -1, arg))
4371 buf = (uint8_t *)ctx->object + arg->offset;
4373 if (u > RTE_LEN2MASK(size * CHAR_BIT, uint64_t))
4377 case sizeof(uint8_t):
4378 *(uint8_t *)buf = u;
4380 case sizeof(uint16_t):
4381 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
4383 case sizeof(uint8_t [3]):
4384 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
4386 ((uint8_t *)buf)[0] = u;
4387 ((uint8_t *)buf)[1] = u >> 8;
4388 ((uint8_t *)buf)[2] = u >> 16;
4392 ((uint8_t *)buf)[0] = u >> 16;
4393 ((uint8_t *)buf)[1] = u >> 8;
4394 ((uint8_t *)buf)[2] = u;
4396 case sizeof(uint32_t):
4397 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
4399 case sizeof(uint64_t):
4400 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
4405 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
4407 buf = (uint8_t *)ctx->objmask + arg->offset;
4412 push_args(ctx, arg);
4419 * Three arguments (ctx->args) are retrieved from the stack to store data,
4420 * its actual length and address (in that order).
4423 parse_string(struct context *ctx, const struct token *token,
4424 const char *str, unsigned int len,
4425 void *buf, unsigned int size)
4427 const struct arg *arg_data = pop_args(ctx);
4428 const struct arg *arg_len = pop_args(ctx);
4429 const struct arg *arg_addr = pop_args(ctx);
4430 char tmp[16]; /* Ought to be enough. */
4433 /* Arguments are expected. */
4437 push_args(ctx, arg_data);
4441 push_args(ctx, arg_len);
4442 push_args(ctx, arg_data);
4445 size = arg_data->size;
4446 /* Bit-mask fill is not supported. */
4447 if (arg_data->mask || size < len)
4451 /* Let parse_int() fill length information first. */
4452 ret = snprintf(tmp, sizeof(tmp), "%u", len);
4455 push_args(ctx, arg_len);
4456 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
4461 buf = (uint8_t *)ctx->object + arg_data->offset;
4462 /* Output buffer is not necessarily NUL-terminated. */
4463 memcpy(buf, str, len);
4464 memset((uint8_t *)buf + len, 0x00, size - len);
4466 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
4467 /* Save address if requested. */
4468 if (arg_addr->size) {
4469 memcpy((uint8_t *)ctx->object + arg_addr->offset,
4471 (uint8_t *)ctx->object + arg_data->offset
4475 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
4477 (uint8_t *)ctx->objmask + arg_data->offset
4483 push_args(ctx, arg_addr);
4484 push_args(ctx, arg_len);
4485 push_args(ctx, arg_data);
4490 parse_hex_string(const char *src, uint8_t *dst, uint32_t *size)
4496 /* Check input parameters */
4497 if ((src == NULL) ||
4503 /* Convert chars to bytes */
4504 for (i = 0, len = 0; i < *size; i += 2) {
4505 snprintf(tmp, 3, "%s", src + i);
4506 dst[len++] = strtoul(tmp, &c, 16);
4521 parse_hex(struct context *ctx, const struct token *token,
4522 const char *str, unsigned int len,
4523 void *buf, unsigned int size)
4525 const struct arg *arg_data = pop_args(ctx);
4526 const struct arg *arg_len = pop_args(ctx);
4527 const struct arg *arg_addr = pop_args(ctx);
4528 char tmp[16]; /* Ought to be enough. */
4530 unsigned int hexlen = len;
4531 unsigned int length = 256;
4532 uint8_t hex_tmp[length];
4534 /* Arguments are expected. */
4538 push_args(ctx, arg_data);
4542 push_args(ctx, arg_len);
4543 push_args(ctx, arg_data);
4546 size = arg_data->size;
4547 /* Bit-mask fill is not supported. */
4553 /* translate bytes string to array. */
4554 if (str[0] == '0' && ((str[1] == 'x') ||
4559 if (hexlen > length)
4561 ret = parse_hex_string(str, hex_tmp, &hexlen);
4564 /* Let parse_int() fill length information first. */
4565 ret = snprintf(tmp, sizeof(tmp), "%u", hexlen);
4568 push_args(ctx, arg_len);
4569 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
4574 buf = (uint8_t *)ctx->object + arg_data->offset;
4575 /* Output buffer is not necessarily NUL-terminated. */
4576 memcpy(buf, hex_tmp, hexlen);
4577 memset((uint8_t *)buf + hexlen, 0x00, size - hexlen);
4579 memset((uint8_t *)ctx->objmask + arg_data->offset,
4581 /* Save address if requested. */
4582 if (arg_addr->size) {
4583 memcpy((uint8_t *)ctx->object + arg_addr->offset,
4585 (uint8_t *)ctx->object + arg_data->offset
4589 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
4591 (uint8_t *)ctx->objmask + arg_data->offset
4597 push_args(ctx, arg_addr);
4598 push_args(ctx, arg_len);
4599 push_args(ctx, arg_data);
4605 * Parse a MAC address.
4607 * Last argument (ctx->args) is retrieved to determine storage size and
4611 parse_mac_addr(struct context *ctx, const struct token *token,
4612 const char *str, unsigned int len,
4613 void *buf, unsigned int size)
4615 const struct arg *arg = pop_args(ctx);
4616 struct rte_ether_addr tmp;
4620 /* Argument is expected. */
4624 /* Bit-mask fill is not supported. */
4625 if (arg->mask || size != sizeof(tmp))
4627 /* Only network endian is supported. */
4630 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
4631 if (ret < 0 || (unsigned int)ret != len)
4635 buf = (uint8_t *)ctx->object + arg->offset;
4636 memcpy(buf, &tmp, size);
4638 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
4641 push_args(ctx, arg);
4646 * Parse an IPv4 address.
4648 * Last argument (ctx->args) is retrieved to determine storage size and
4652 parse_ipv4_addr(struct context *ctx, const struct token *token,
4653 const char *str, unsigned int len,
4654 void *buf, unsigned int size)
4656 const struct arg *arg = pop_args(ctx);
4661 /* Argument is expected. */
4665 /* Bit-mask fill is not supported. */
4666 if (arg->mask || size != sizeof(tmp))
4668 /* Only network endian is supported. */
4671 memcpy(str2, str, len);
4673 ret = inet_pton(AF_INET, str2, &tmp);
4675 /* Attempt integer parsing. */
4676 push_args(ctx, arg);
4677 return parse_int(ctx, token, str, len, buf, size);
4681 buf = (uint8_t *)ctx->object + arg->offset;
4682 memcpy(buf, &tmp, size);
4684 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
4687 push_args(ctx, arg);
4692 * Parse an IPv6 address.
4694 * Last argument (ctx->args) is retrieved to determine storage size and
4698 parse_ipv6_addr(struct context *ctx, const struct token *token,
4699 const char *str, unsigned int len,
4700 void *buf, unsigned int size)
4702 const struct arg *arg = pop_args(ctx);
4704 struct in6_addr tmp;
4708 /* Argument is expected. */
4712 /* Bit-mask fill is not supported. */
4713 if (arg->mask || size != sizeof(tmp))
4715 /* Only network endian is supported. */
4718 memcpy(str2, str, len);
4720 ret = inet_pton(AF_INET6, str2, &tmp);
4725 buf = (uint8_t *)ctx->object + arg->offset;
4726 memcpy(buf, &tmp, size);
4728 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
4731 push_args(ctx, arg);
4735 /** Boolean values (even indices stand for false). */
4736 static const char *const boolean_name[] = {
4746 * Parse a boolean value.
4748 * Last argument (ctx->args) is retrieved to determine storage size and
4752 parse_boolean(struct context *ctx, const struct token *token,
4753 const char *str, unsigned int len,
4754 void *buf, unsigned int size)
4756 const struct arg *arg = pop_args(ctx);
4760 /* Argument is expected. */
4763 for (i = 0; boolean_name[i]; ++i)
4764 if (!strcmp_partial(boolean_name[i], str, len))
4766 /* Process token as integer. */
4767 if (boolean_name[i])
4768 str = i & 1 ? "1" : "0";
4769 push_args(ctx, arg);
4770 ret = parse_int(ctx, token, str, strlen(str), buf, size);
4771 return ret > 0 ? (int)len : ret;
4774 /** Parse port and update context. */
4776 parse_port(struct context *ctx, const struct token *token,
4777 const char *str, unsigned int len,
4778 void *buf, unsigned int size)
4780 struct buffer *out = &(struct buffer){ .port = 0 };
4788 ctx->objmask = NULL;
4789 size = sizeof(*out);
4791 ret = parse_int(ctx, token, str, len, out, size);
4793 ctx->port = out->port;
4799 /** No completion. */
4801 comp_none(struct context *ctx, const struct token *token,
4802 unsigned int ent, char *buf, unsigned int size)
4812 /** Complete boolean values. */
4814 comp_boolean(struct context *ctx, const struct token *token,
4815 unsigned int ent, char *buf, unsigned int size)
4821 for (i = 0; boolean_name[i]; ++i)
4822 if (buf && i == ent)
4823 return strlcpy(buf, boolean_name[i], size);
4829 /** Complete action names. */
4831 comp_action(struct context *ctx, const struct token *token,
4832 unsigned int ent, char *buf, unsigned int size)
4838 for (i = 0; next_action[i]; ++i)
4839 if (buf && i == ent)
4840 return strlcpy(buf, token_list[next_action[i]].name,
4847 /** Complete available ports. */
4849 comp_port(struct context *ctx, const struct token *token,
4850 unsigned int ent, char *buf, unsigned int size)
4857 RTE_ETH_FOREACH_DEV(p) {
4858 if (buf && i == ent)
4859 return snprintf(buf, size, "%u", p);
4867 /** Complete available rule IDs. */
4869 comp_rule_id(struct context *ctx, const struct token *token,
4870 unsigned int ent, char *buf, unsigned int size)
4873 struct rte_port *port;
4874 struct port_flow *pf;
4877 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
4878 ctx->port == (portid_t)RTE_PORT_ALL)
4880 port = &ports[ctx->port];
4881 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
4882 if (buf && i == ent)
4883 return snprintf(buf, size, "%u", pf->id);
4891 /** Complete type field for RSS action. */
4893 comp_vc_action_rss_type(struct context *ctx, const struct token *token,
4894 unsigned int ent, char *buf, unsigned int size)
4900 for (i = 0; rss_type_table[i].str; ++i)
4905 return strlcpy(buf, rss_type_table[ent].str, size);
4907 return snprintf(buf, size, "end");
4911 /** Complete queue field for RSS action. */
4913 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
4914 unsigned int ent, char *buf, unsigned int size)
4921 return snprintf(buf, size, "%u", ent);
4923 return snprintf(buf, size, "end");
4927 /** Internal context. */
4928 static struct context cmd_flow_context;
4930 /** Global parser instance (cmdline API). */
4931 cmdline_parse_inst_t cmd_flow;
4933 /** Initialize context. */
4935 cmd_flow_context_init(struct context *ctx)
4937 /* A full memset() is not necessary. */
4947 ctx->objmask = NULL;
4950 /** Parse a token (cmdline API). */
4952 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
4955 struct context *ctx = &cmd_flow_context;
4956 const struct token *token;
4957 const enum index *list;
4962 token = &token_list[ctx->curr];
4963 /* Check argument length. */
4966 for (len = 0; src[len]; ++len)
4967 if (src[len] == '#' || isspace(src[len]))
4971 /* Last argument and EOL detection. */
4972 for (i = len; src[i]; ++i)
4973 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
4975 else if (!isspace(src[i])) {
4980 if (src[i] == '\r' || src[i] == '\n') {
4984 /* Initialize context if necessary. */
4985 if (!ctx->next_num) {
4988 ctx->next[ctx->next_num++] = token->next[0];
4990 /* Process argument through candidates. */
4991 ctx->prev = ctx->curr;
4992 list = ctx->next[ctx->next_num - 1];
4993 for (i = 0; list[i]; ++i) {
4994 const struct token *next = &token_list[list[i]];
4997 ctx->curr = list[i];
4999 tmp = next->call(ctx, next, src, len, result, size);
5001 tmp = parse_default(ctx, next, src, len, result, size);
5002 if (tmp == -1 || tmp != len)
5010 /* Push subsequent tokens if any. */
5012 for (i = 0; token->next[i]; ++i) {
5013 if (ctx->next_num == RTE_DIM(ctx->next))
5015 ctx->next[ctx->next_num++] = token->next[i];
5017 /* Push arguments if any. */
5019 for (i = 0; token->args[i]; ++i) {
5020 if (ctx->args_num == RTE_DIM(ctx->args))
5022 ctx->args[ctx->args_num++] = token->args[i];
5027 /** Return number of completion entries (cmdline API). */
5029 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
5031 struct context *ctx = &cmd_flow_context;
5032 const struct token *token = &token_list[ctx->curr];
5033 const enum index *list;
5037 /* Count number of tokens in current list. */
5039 list = ctx->next[ctx->next_num - 1];
5041 list = token->next[0];
5042 for (i = 0; list[i]; ++i)
5047 * If there is a single token, use its completion callback, otherwise
5048 * return the number of entries.
5050 token = &token_list[list[0]];
5051 if (i == 1 && token->comp) {
5052 /* Save index for cmd_flow_get_help(). */
5053 ctx->prev = list[0];
5054 return token->comp(ctx, token, 0, NULL, 0);
5059 /** Return a completion entry (cmdline API). */
5061 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
5062 char *dst, unsigned int size)
5064 struct context *ctx = &cmd_flow_context;
5065 const struct token *token = &token_list[ctx->curr];
5066 const enum index *list;
5070 /* Count number of tokens in current list. */
5072 list = ctx->next[ctx->next_num - 1];
5074 list = token->next[0];
5075 for (i = 0; list[i]; ++i)
5079 /* If there is a single token, use its completion callback. */
5080 token = &token_list[list[0]];
5081 if (i == 1 && token->comp) {
5082 /* Save index for cmd_flow_get_help(). */
5083 ctx->prev = list[0];
5084 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
5086 /* Otherwise make sure the index is valid and use defaults. */
5089 token = &token_list[list[index]];
5090 strlcpy(dst, token->name, size);
5091 /* Save index for cmd_flow_get_help(). */
5092 ctx->prev = list[index];
5096 /** Populate help strings for current token (cmdline API). */
5098 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
5100 struct context *ctx = &cmd_flow_context;
5101 const struct token *token = &token_list[ctx->prev];
5106 /* Set token type and update global help with details. */
5107 strlcpy(dst, (token->type ? token->type : "TOKEN"), size);
5109 cmd_flow.help_str = token->help;
5111 cmd_flow.help_str = token->name;
5115 /** Token definition template (cmdline API). */
5116 static struct cmdline_token_hdr cmd_flow_token_hdr = {
5117 .ops = &(struct cmdline_token_ops){
5118 .parse = cmd_flow_parse,
5119 .complete_get_nb = cmd_flow_complete_get_nb,
5120 .complete_get_elt = cmd_flow_complete_get_elt,
5121 .get_help = cmd_flow_get_help,
5126 /** Populate the next dynamic token. */
5128 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
5129 cmdline_parse_token_hdr_t **hdr_inst)
5131 struct context *ctx = &cmd_flow_context;
5133 /* Always reinitialize context before requesting the first token. */
5134 if (!(hdr_inst - cmd_flow.tokens))
5135 cmd_flow_context_init(ctx);
5136 /* Return NULL when no more tokens are expected. */
5137 if (!ctx->next_num && ctx->curr) {
5141 /* Determine if command should end here. */
5142 if (ctx->eol && ctx->last && ctx->next_num) {
5143 const enum index *list = ctx->next[ctx->next_num - 1];
5146 for (i = 0; list[i]; ++i) {
5153 *hdr = &cmd_flow_token_hdr;
5156 /** Dispatch parsed buffer to function calls. */
5158 cmd_flow_parsed(const struct buffer *in)
5160 switch (in->command) {
5162 port_flow_validate(in->port, &in->args.vc.attr,
5163 in->args.vc.pattern, in->args.vc.actions);
5166 port_flow_create(in->port, &in->args.vc.attr,
5167 in->args.vc.pattern, in->args.vc.actions);
5170 port_flow_destroy(in->port, in->args.destroy.rule_n,
5171 in->args.destroy.rule);
5174 port_flow_flush(in->port);
5177 port_flow_query(in->port, in->args.query.rule,
5178 &in->args.query.action);
5181 port_flow_list(in->port, in->args.list.group_n,
5182 in->args.list.group);
5185 port_flow_isolate(in->port, in->args.isolate.set);
5192 /** Token generator and output processing callback (cmdline API). */
5194 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
5197 cmd_flow_tok(arg0, arg2);
5199 cmd_flow_parsed(arg0);
5202 /** Global parser instance (cmdline API). */
5203 cmdline_parse_inst_t cmd_flow = {
5205 .data = NULL, /**< Unused. */
5206 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
5209 }, /**< Tokens are returned by cmd_flow_tok(). */