1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
13 #include <arpa/inet.h>
14 #include <sys/socket.h>
16 #include <rte_string_fns.h>
17 #include <rte_common.h>
18 #include <rte_eth_ctrl.h>
19 #include <rte_ethdev.h>
20 #include <rte_byteorder.h>
21 #include <cmdline_parse.h>
22 #include <cmdline_parse_etheraddr.h>
27 /** Parser token indices. */
48 /* Top-level command. */
51 /* Sub-level commands. */
60 /* Destroy arguments. */
63 /* Query arguments. */
69 /* Validate/create arguments. */
76 /* Validate/create pattern. */
113 ITEM_VLAN_INNER_TYPE,
145 ITEM_E_TAG_GRP_ECID_B,
164 ITEM_ARP_ETH_IPV4_SHA,
165 ITEM_ARP_ETH_IPV4_SPA,
166 ITEM_ARP_ETH_IPV4_THA,
167 ITEM_ARP_ETH_IPV4_TPA,
169 ITEM_IPV6_EXT_NEXT_HDR,
174 ITEM_ICMP6_ND_NS_TARGET_ADDR,
176 ITEM_ICMP6_ND_NA_TARGET_ADDR,
178 ITEM_ICMP6_ND_OPT_TYPE,
179 ITEM_ICMP6_ND_OPT_SLA_ETH,
180 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
181 ITEM_ICMP6_ND_OPT_TLA_ETH,
182 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
186 /* Validate/create actions. */
206 ACTION_RSS_FUNC_DEFAULT,
207 ACTION_RSS_FUNC_TOEPLITZ,
208 ACTION_RSS_FUNC_SIMPLE_XOR,
220 ACTION_PHY_PORT_ORIGINAL,
221 ACTION_PHY_PORT_INDEX,
223 ACTION_PORT_ID_ORIGINAL,
227 ACTION_OF_SET_MPLS_TTL,
228 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
229 ACTION_OF_DEC_MPLS_TTL,
230 ACTION_OF_SET_NW_TTL,
231 ACTION_OF_SET_NW_TTL_NW_TTL,
232 ACTION_OF_DEC_NW_TTL,
233 ACTION_OF_COPY_TTL_OUT,
234 ACTION_OF_COPY_TTL_IN,
237 ACTION_OF_PUSH_VLAN_ETHERTYPE,
238 ACTION_OF_SET_VLAN_VID,
239 ACTION_OF_SET_VLAN_VID_VLAN_VID,
240 ACTION_OF_SET_VLAN_PCP,
241 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
243 ACTION_OF_POP_MPLS_ETHERTYPE,
245 ACTION_OF_PUSH_MPLS_ETHERTYPE,
252 ACTION_MPLSOGRE_ENCAP,
253 ACTION_MPLSOGRE_DECAP,
254 ACTION_MPLSOUDP_ENCAP,
255 ACTION_MPLSOUDP_DECAP,
257 ACTION_SET_IPV4_SRC_IPV4_SRC,
259 ACTION_SET_IPV4_DST_IPV4_DST,
261 ACTION_SET_IPV6_SRC_IPV6_SRC,
263 ACTION_SET_IPV6_DST_IPV6_DST,
265 ACTION_SET_TP_SRC_TP_SRC,
267 ACTION_SET_TP_DST_TP_DST,
273 ACTION_SET_MAC_SRC_MAC_SRC,
275 ACTION_SET_MAC_DST_MAC_DST,
278 /** Maximum size for pattern in struct rte_flow_item_raw. */
279 #define ITEM_RAW_PATTERN_SIZE 40
281 /** Storage size for struct rte_flow_item_raw including pattern. */
282 #define ITEM_RAW_SIZE \
283 (sizeof(struct rte_flow_item_raw) + ITEM_RAW_PATTERN_SIZE)
285 /** Maximum number of queue indices in struct rte_flow_action_rss. */
286 #define ACTION_RSS_QUEUE_NUM 32
288 /** Storage for struct rte_flow_action_rss including external data. */
289 struct action_rss_data {
290 struct rte_flow_action_rss conf;
291 uint8_t key[RSS_HASH_KEY_LENGTH];
292 uint16_t queue[ACTION_RSS_QUEUE_NUM];
295 /** Maximum number of items in struct rte_flow_action_vxlan_encap. */
296 #define ACTION_VXLAN_ENCAP_ITEMS_NUM 6
298 /** Storage for struct rte_flow_action_vxlan_encap including external data. */
299 struct action_vxlan_encap_data {
300 struct rte_flow_action_vxlan_encap conf;
301 struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM];
302 struct rte_flow_item_eth item_eth;
303 struct rte_flow_item_vlan item_vlan;
305 struct rte_flow_item_ipv4 item_ipv4;
306 struct rte_flow_item_ipv6 item_ipv6;
308 struct rte_flow_item_udp item_udp;
309 struct rte_flow_item_vxlan item_vxlan;
312 /** Maximum number of items in struct rte_flow_action_nvgre_encap. */
313 #define ACTION_NVGRE_ENCAP_ITEMS_NUM 5
315 /** Storage for struct rte_flow_action_nvgre_encap including external data. */
316 struct action_nvgre_encap_data {
317 struct rte_flow_action_nvgre_encap conf;
318 struct rte_flow_item items[ACTION_NVGRE_ENCAP_ITEMS_NUM];
319 struct rte_flow_item_eth item_eth;
320 struct rte_flow_item_vlan item_vlan;
322 struct rte_flow_item_ipv4 item_ipv4;
323 struct rte_flow_item_ipv6 item_ipv6;
325 struct rte_flow_item_nvgre item_nvgre;
328 /** Maximum data size in struct rte_flow_action_raw_encap. */
329 #define ACTION_RAW_ENCAP_MAX_DATA 128
331 /** Storage for struct rte_flow_action_raw_encap including external data. */
332 struct action_raw_encap_data {
333 struct rte_flow_action_raw_encap conf;
334 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
335 uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA];
338 /** Storage for struct rte_flow_action_raw_decap including external data. */
339 struct action_raw_decap_data {
340 struct rte_flow_action_raw_decap conf;
341 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
344 /** Maximum number of subsequent tokens and arguments on the stack. */
345 #define CTX_STACK_SIZE 16
347 /** Parser context. */
349 /** Stack of subsequent token lists to process. */
350 const enum index *next[CTX_STACK_SIZE];
351 /** Arguments for stacked tokens. */
352 const void *args[CTX_STACK_SIZE];
353 enum index curr; /**< Current token index. */
354 enum index prev; /**< Index of the last token seen. */
355 int next_num; /**< Number of entries in next[]. */
356 int args_num; /**< Number of entries in args[]. */
357 uint32_t eol:1; /**< EOL has been detected. */
358 uint32_t last:1; /**< No more arguments. */
359 portid_t port; /**< Current port ID (for completions). */
360 uint32_t objdata; /**< Object-specific data. */
361 void *object; /**< Address of current object for relative offsets. */
362 void *objmask; /**< Object a full mask must be written to. */
365 /** Token argument. */
367 uint32_t hton:1; /**< Use network byte ordering. */
368 uint32_t sign:1; /**< Value is signed. */
369 uint32_t bounded:1; /**< Value is bounded. */
370 uintmax_t min; /**< Minimum value if bounded. */
371 uintmax_t max; /**< Maximum value if bounded. */
372 uint32_t offset; /**< Relative offset from ctx->object. */
373 uint32_t size; /**< Field size. */
374 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
377 /** Parser token definition. */
379 /** Type displayed during completion (defaults to "TOKEN"). */
381 /** Help displayed during completion (defaults to token name). */
383 /** Private data used by parser functions. */
386 * Lists of subsequent tokens to push on the stack. Each call to the
387 * parser consumes the last entry of that stack.
389 const enum index *const *next;
390 /** Arguments stack for subsequent tokens that need them. */
391 const struct arg *const *args;
393 * Token-processing callback, returns -1 in case of error, the
394 * length of the matched string otherwise. If NULL, attempts to
395 * match the token name.
397 * If buf is not NULL, the result should be stored in it according
398 * to context. An error is returned if not large enough.
400 int (*call)(struct context *ctx, const struct token *token,
401 const char *str, unsigned int len,
402 void *buf, unsigned int size);
404 * Callback that provides possible values for this token, used for
405 * completion. Returns -1 in case of error, the number of possible
406 * values otherwise. If NULL, the token name is used.
408 * If buf is not NULL, entry index ent is written to buf and the
409 * full length of the entry is returned (same behavior as
412 int (*comp)(struct context *ctx, const struct token *token,
413 unsigned int ent, char *buf, unsigned int size);
414 /** Mandatory token name, no default value. */
418 /** Static initializer for the next field. */
419 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
421 /** Static initializer for a NEXT() entry. */
422 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
424 /** Static initializer for the args field. */
425 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
427 /** Static initializer for ARGS() to target a field. */
428 #define ARGS_ENTRY(s, f) \
429 (&(const struct arg){ \
430 .offset = offsetof(s, f), \
431 .size = sizeof(((s *)0)->f), \
434 /** Static initializer for ARGS() to target a bit-field. */
435 #define ARGS_ENTRY_BF(s, f, b) \
436 (&(const struct arg){ \
438 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
441 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
442 #define ARGS_ENTRY_MASK(s, f, m) \
443 (&(const struct arg){ \
444 .offset = offsetof(s, f), \
445 .size = sizeof(((s *)0)->f), \
446 .mask = (const void *)(m), \
449 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
450 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
451 (&(const struct arg){ \
453 .offset = offsetof(s, f), \
454 .size = sizeof(((s *)0)->f), \
455 .mask = (const void *)(m), \
458 /** Static initializer for ARGS() to target a pointer. */
459 #define ARGS_ENTRY_PTR(s, f) \
460 (&(const struct arg){ \
461 .size = sizeof(*((s *)0)->f), \
464 /** Static initializer for ARGS() with arbitrary offset and size. */
465 #define ARGS_ENTRY_ARB(o, s) \
466 (&(const struct arg){ \
471 /** Same as ARGS_ENTRY_ARB() with bounded values. */
472 #define ARGS_ENTRY_ARB_BOUNDED(o, s, i, a) \
473 (&(const struct arg){ \
481 /** Same as ARGS_ENTRY() using network byte ordering. */
482 #define ARGS_ENTRY_HTON(s, f) \
483 (&(const struct arg){ \
485 .offset = offsetof(s, f), \
486 .size = sizeof(((s *)0)->f), \
489 /** Parser output buffer layout expected by cmd_flow_parsed(). */
491 enum index command; /**< Flow command. */
492 portid_t port; /**< Affected port ID. */
495 struct rte_flow_attr attr;
496 struct rte_flow_item *pattern;
497 struct rte_flow_action *actions;
501 } vc; /**< Validate/create arguments. */
505 } destroy; /**< Destroy arguments. */
508 struct rte_flow_action action;
509 } query; /**< Query arguments. */
513 } list; /**< List arguments. */
516 } isolate; /**< Isolated mode arguments. */
517 } args; /**< Command arguments. */
520 /** Private data for pattern items. */
521 struct parse_item_priv {
522 enum rte_flow_item_type type; /**< Item type. */
523 uint32_t size; /**< Size of item specification structure. */
526 #define PRIV_ITEM(t, s) \
527 (&(const struct parse_item_priv){ \
528 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
532 /** Private data for actions. */
533 struct parse_action_priv {
534 enum rte_flow_action_type type; /**< Action type. */
535 uint32_t size; /**< Size of action configuration structure. */
538 #define PRIV_ACTION(t, s) \
539 (&(const struct parse_action_priv){ \
540 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
544 static const enum index next_vc_attr[] = {
554 static const enum index next_destroy_attr[] = {
560 static const enum index next_list_attr[] = {
566 static const enum index item_param[] = {
575 static const enum index next_item[] = {
611 ITEM_ICMP6_ND_OPT_SLA_ETH,
612 ITEM_ICMP6_ND_OPT_TLA_ETH,
617 static const enum index item_fuzzy[] = {
623 static const enum index item_any[] = {
629 static const enum index item_vf[] = {
635 static const enum index item_phy_port[] = {
641 static const enum index item_port_id[] = {
647 static const enum index item_mark[] = {
653 static const enum index item_raw[] = {
663 static const enum index item_eth[] = {
671 static const enum index item_vlan[] = {
676 ITEM_VLAN_INNER_TYPE,
681 static const enum index item_ipv4[] = {
691 static const enum index item_ipv6[] = {
702 static const enum index item_icmp[] = {
709 static const enum index item_udp[] = {
716 static const enum index item_tcp[] = {
724 static const enum index item_sctp[] = {
733 static const enum index item_vxlan[] = {
739 static const enum index item_e_tag[] = {
740 ITEM_E_TAG_GRP_ECID_B,
745 static const enum index item_nvgre[] = {
751 static const enum index item_mpls[] = {
757 static const enum index item_gre[] = {
763 static const enum index item_gtp[] = {
769 static const enum index item_geneve[] = {
776 static const enum index item_vxlan_gpe[] = {
782 static const enum index item_arp_eth_ipv4[] = {
783 ITEM_ARP_ETH_IPV4_SHA,
784 ITEM_ARP_ETH_IPV4_SPA,
785 ITEM_ARP_ETH_IPV4_THA,
786 ITEM_ARP_ETH_IPV4_TPA,
791 static const enum index item_ipv6_ext[] = {
792 ITEM_IPV6_EXT_NEXT_HDR,
797 static const enum index item_icmp6[] = {
804 static const enum index item_icmp6_nd_ns[] = {
805 ITEM_ICMP6_ND_NS_TARGET_ADDR,
810 static const enum index item_icmp6_nd_na[] = {
811 ITEM_ICMP6_ND_NA_TARGET_ADDR,
816 static const enum index item_icmp6_nd_opt[] = {
817 ITEM_ICMP6_ND_OPT_TYPE,
822 static const enum index item_icmp6_nd_opt_sla_eth[] = {
823 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
828 static const enum index item_icmp6_nd_opt_tla_eth[] = {
829 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
834 static const enum index item_meta[] = {
840 static const enum index next_action[] = {
856 ACTION_OF_SET_MPLS_TTL,
857 ACTION_OF_DEC_MPLS_TTL,
858 ACTION_OF_SET_NW_TTL,
859 ACTION_OF_DEC_NW_TTL,
860 ACTION_OF_COPY_TTL_OUT,
861 ACTION_OF_COPY_TTL_IN,
864 ACTION_OF_SET_VLAN_VID,
865 ACTION_OF_SET_VLAN_PCP,
874 ACTION_MPLSOGRE_ENCAP,
875 ACTION_MPLSOGRE_DECAP,
876 ACTION_MPLSOUDP_ENCAP,
877 ACTION_MPLSOUDP_DECAP,
892 static const enum index action_mark[] = {
898 static const enum index action_queue[] = {
904 static const enum index action_count[] = {
911 static const enum index action_rss[] = {
922 static const enum index action_vf[] = {
929 static const enum index action_phy_port[] = {
930 ACTION_PHY_PORT_ORIGINAL,
931 ACTION_PHY_PORT_INDEX,
936 static const enum index action_port_id[] = {
937 ACTION_PORT_ID_ORIGINAL,
943 static const enum index action_meter[] = {
949 static const enum index action_of_set_mpls_ttl[] = {
950 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
955 static const enum index action_of_set_nw_ttl[] = {
956 ACTION_OF_SET_NW_TTL_NW_TTL,
961 static const enum index action_of_push_vlan[] = {
962 ACTION_OF_PUSH_VLAN_ETHERTYPE,
967 static const enum index action_of_set_vlan_vid[] = {
968 ACTION_OF_SET_VLAN_VID_VLAN_VID,
973 static const enum index action_of_set_vlan_pcp[] = {
974 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
979 static const enum index action_of_pop_mpls[] = {
980 ACTION_OF_POP_MPLS_ETHERTYPE,
985 static const enum index action_of_push_mpls[] = {
986 ACTION_OF_PUSH_MPLS_ETHERTYPE,
991 static const enum index action_set_ipv4_src[] = {
992 ACTION_SET_IPV4_SRC_IPV4_SRC,
997 static const enum index action_set_mac_src[] = {
998 ACTION_SET_MAC_SRC_MAC_SRC,
1003 static const enum index action_set_ipv4_dst[] = {
1004 ACTION_SET_IPV4_DST_IPV4_DST,
1009 static const enum index action_set_ipv6_src[] = {
1010 ACTION_SET_IPV6_SRC_IPV6_SRC,
1015 static const enum index action_set_ipv6_dst[] = {
1016 ACTION_SET_IPV6_DST_IPV6_DST,
1021 static const enum index action_set_tp_src[] = {
1022 ACTION_SET_TP_SRC_TP_SRC,
1027 static const enum index action_set_tp_dst[] = {
1028 ACTION_SET_TP_DST_TP_DST,
1033 static const enum index action_set_ttl[] = {
1039 static const enum index action_jump[] = {
1045 static const enum index action_set_mac_dst[] = {
1046 ACTION_SET_MAC_DST_MAC_DST,
1051 static int parse_init(struct context *, const struct token *,
1052 const char *, unsigned int,
1053 void *, unsigned int);
1054 static int parse_vc(struct context *, const struct token *,
1055 const char *, unsigned int,
1056 void *, unsigned int);
1057 static int parse_vc_spec(struct context *, const struct token *,
1058 const char *, unsigned int, void *, unsigned int);
1059 static int parse_vc_conf(struct context *, const struct token *,
1060 const char *, unsigned int, void *, unsigned int);
1061 static int parse_vc_action_rss(struct context *, const struct token *,
1062 const char *, unsigned int, void *,
1064 static int parse_vc_action_rss_func(struct context *, const struct token *,
1065 const char *, unsigned int, void *,
1067 static int parse_vc_action_rss_type(struct context *, const struct token *,
1068 const char *, unsigned int, void *,
1070 static int parse_vc_action_rss_queue(struct context *, const struct token *,
1071 const char *, unsigned int, void *,
1073 static int parse_vc_action_vxlan_encap(struct context *, const struct token *,
1074 const char *, unsigned int, void *,
1076 static int parse_vc_action_nvgre_encap(struct context *, const struct token *,
1077 const char *, unsigned int, void *,
1079 static int parse_vc_action_l2_encap(struct context *, const struct token *,
1080 const char *, unsigned int, void *,
1082 static int parse_vc_action_l2_decap(struct context *, const struct token *,
1083 const char *, unsigned int, void *,
1085 static int parse_vc_action_mplsogre_encap(struct context *,
1086 const struct token *, const char *,
1087 unsigned int, void *, unsigned int);
1088 static int parse_vc_action_mplsogre_decap(struct context *,
1089 const struct token *, const char *,
1090 unsigned int, void *, unsigned int);
1091 static int parse_vc_action_mplsoudp_encap(struct context *,
1092 const struct token *, const char *,
1093 unsigned int, void *, unsigned int);
1094 static int parse_vc_action_mplsoudp_decap(struct context *,
1095 const struct token *, const char *,
1096 unsigned int, void *, unsigned int);
1097 static int parse_destroy(struct context *, const struct token *,
1098 const char *, unsigned int,
1099 void *, unsigned int);
1100 static int parse_flush(struct context *, const struct token *,
1101 const char *, unsigned int,
1102 void *, unsigned int);
1103 static int parse_query(struct context *, const struct token *,
1104 const char *, unsigned int,
1105 void *, unsigned int);
1106 static int parse_action(struct context *, const struct token *,
1107 const char *, unsigned int,
1108 void *, unsigned int);
1109 static int parse_list(struct context *, const struct token *,
1110 const char *, unsigned int,
1111 void *, unsigned int);
1112 static int parse_isolate(struct context *, const struct token *,
1113 const char *, unsigned int,
1114 void *, unsigned int);
1115 static int parse_int(struct context *, const struct token *,
1116 const char *, unsigned int,
1117 void *, unsigned int);
1118 static int parse_prefix(struct context *, const struct token *,
1119 const char *, unsigned int,
1120 void *, unsigned int);
1121 static int parse_boolean(struct context *, const struct token *,
1122 const char *, unsigned int,
1123 void *, unsigned int);
1124 static int parse_string(struct context *, const struct token *,
1125 const char *, unsigned int,
1126 void *, unsigned int);
1127 static int parse_hex(struct context *ctx, const struct token *token,
1128 const char *str, unsigned int len,
1129 void *buf, unsigned int size);
1130 static int parse_mac_addr(struct context *, const struct token *,
1131 const char *, unsigned int,
1132 void *, unsigned int);
1133 static int parse_ipv4_addr(struct context *, const struct token *,
1134 const char *, unsigned int,
1135 void *, unsigned int);
1136 static int parse_ipv6_addr(struct context *, const struct token *,
1137 const char *, unsigned int,
1138 void *, unsigned int);
1139 static int parse_port(struct context *, const struct token *,
1140 const char *, unsigned int,
1141 void *, unsigned int);
1142 static int comp_none(struct context *, const struct token *,
1143 unsigned int, char *, unsigned int);
1144 static int comp_boolean(struct context *, const struct token *,
1145 unsigned int, char *, unsigned int);
1146 static int comp_action(struct context *, const struct token *,
1147 unsigned int, char *, unsigned int);
1148 static int comp_port(struct context *, const struct token *,
1149 unsigned int, char *, unsigned int);
1150 static int comp_rule_id(struct context *, const struct token *,
1151 unsigned int, char *, unsigned int);
1152 static int comp_vc_action_rss_type(struct context *, const struct token *,
1153 unsigned int, char *, unsigned int);
1154 static int comp_vc_action_rss_queue(struct context *, const struct token *,
1155 unsigned int, char *, unsigned int);
1157 /** Token definitions. */
1158 static const struct token token_list[] = {
1159 /* Special tokens. */
1162 .help = "null entry, abused as the entry point",
1163 .next = NEXT(NEXT_ENTRY(FLOW)),
1168 .help = "command may end here",
1170 /* Common tokens. */
1174 .help = "integer value",
1179 .name = "{unsigned}",
1181 .help = "unsigned integer value",
1188 .help = "prefix length for bit-mask",
1189 .call = parse_prefix,
1193 .name = "{boolean}",
1195 .help = "any boolean value",
1196 .call = parse_boolean,
1197 .comp = comp_boolean,
1202 .help = "fixed string",
1203 .call = parse_string,
1209 .help = "fixed string",
1214 .name = "{MAC address}",
1216 .help = "standard MAC address notation",
1217 .call = parse_mac_addr,
1221 .name = "{IPv4 address}",
1222 .type = "IPV4 ADDRESS",
1223 .help = "standard IPv4 address notation",
1224 .call = parse_ipv4_addr,
1228 .name = "{IPv6 address}",
1229 .type = "IPV6 ADDRESS",
1230 .help = "standard IPv6 address notation",
1231 .call = parse_ipv6_addr,
1235 .name = "{rule id}",
1237 .help = "rule identifier",
1239 .comp = comp_rule_id,
1242 .name = "{port_id}",
1244 .help = "port identifier",
1249 .name = "{group_id}",
1251 .help = "group identifier",
1255 [PRIORITY_LEVEL] = {
1258 .help = "priority level",
1262 /* Top-level command. */
1265 .type = "{command} {port_id} [{arg} [...]]",
1266 .help = "manage ingress/egress flow rules",
1267 .next = NEXT(NEXT_ENTRY
1277 /* Sub-level commands. */
1280 .help = "check whether a flow rule can be created",
1281 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1282 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1287 .help = "create a flow rule",
1288 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1289 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1294 .help = "destroy specific flow rules",
1295 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
1296 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1297 .call = parse_destroy,
1301 .help = "destroy all flow rules",
1302 .next = NEXT(NEXT_ENTRY(PORT_ID)),
1303 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1304 .call = parse_flush,
1308 .help = "query an existing flow rule",
1309 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
1310 NEXT_ENTRY(RULE_ID),
1311 NEXT_ENTRY(PORT_ID)),
1312 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action.type),
1313 ARGS_ENTRY(struct buffer, args.query.rule),
1314 ARGS_ENTRY(struct buffer, port)),
1315 .call = parse_query,
1319 .help = "list existing flow rules",
1320 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
1321 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1326 .help = "restrict ingress traffic to the defined flow rules",
1327 .next = NEXT(NEXT_ENTRY(BOOLEAN),
1328 NEXT_ENTRY(PORT_ID)),
1329 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
1330 ARGS_ENTRY(struct buffer, port)),
1331 .call = parse_isolate,
1333 /* Destroy arguments. */
1336 .help = "specify a rule identifier",
1337 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
1338 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
1339 .call = parse_destroy,
1341 /* Query arguments. */
1345 .help = "action to query, must be part of the rule",
1346 .call = parse_action,
1347 .comp = comp_action,
1349 /* List arguments. */
1352 .help = "specify a group",
1353 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
1354 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
1357 /* Validate/create attributes. */
1360 .help = "specify a group",
1361 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
1362 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
1367 .help = "specify a priority level",
1368 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
1369 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
1374 .help = "affect rule to ingress",
1375 .next = NEXT(next_vc_attr),
1380 .help = "affect rule to egress",
1381 .next = NEXT(next_vc_attr),
1386 .help = "apply rule directly to endpoints found in pattern",
1387 .next = NEXT(next_vc_attr),
1390 /* Validate/create pattern. */
1393 .help = "submit a list of pattern items",
1394 .next = NEXT(next_item),
1399 .help = "match value perfectly (with full bit-mask)",
1400 .call = parse_vc_spec,
1402 [ITEM_PARAM_SPEC] = {
1404 .help = "match value according to configured bit-mask",
1405 .call = parse_vc_spec,
1407 [ITEM_PARAM_LAST] = {
1409 .help = "specify upper bound to establish a range",
1410 .call = parse_vc_spec,
1412 [ITEM_PARAM_MASK] = {
1414 .help = "specify bit-mask with relevant bits set to one",
1415 .call = parse_vc_spec,
1417 [ITEM_PARAM_PREFIX] = {
1419 .help = "generate bit-mask from a prefix length",
1420 .call = parse_vc_spec,
1424 .help = "specify next pattern item",
1425 .next = NEXT(next_item),
1429 .help = "end list of pattern items",
1430 .priv = PRIV_ITEM(END, 0),
1431 .next = NEXT(NEXT_ENTRY(ACTIONS)),
1436 .help = "no-op pattern item",
1437 .priv = PRIV_ITEM(VOID, 0),
1438 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1443 .help = "perform actions when pattern does not match",
1444 .priv = PRIV_ITEM(INVERT, 0),
1445 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1450 .help = "match any protocol for the current layer",
1451 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
1452 .next = NEXT(item_any),
1457 .help = "number of layers covered",
1458 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
1459 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
1463 .help = "match traffic from/to the physical function",
1464 .priv = PRIV_ITEM(PF, 0),
1465 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1470 .help = "match traffic from/to a virtual function ID",
1471 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
1472 .next = NEXT(item_vf),
1478 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
1479 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
1483 .help = "match traffic from/to a specific physical port",
1484 .priv = PRIV_ITEM(PHY_PORT,
1485 sizeof(struct rte_flow_item_phy_port)),
1486 .next = NEXT(item_phy_port),
1489 [ITEM_PHY_PORT_INDEX] = {
1491 .help = "physical port index",
1492 .next = NEXT(item_phy_port, NEXT_ENTRY(UNSIGNED), item_param),
1493 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_phy_port, index)),
1497 .help = "match traffic from/to a given DPDK port ID",
1498 .priv = PRIV_ITEM(PORT_ID,
1499 sizeof(struct rte_flow_item_port_id)),
1500 .next = NEXT(item_port_id),
1503 [ITEM_PORT_ID_ID] = {
1505 .help = "DPDK port ID",
1506 .next = NEXT(item_port_id, NEXT_ENTRY(UNSIGNED), item_param),
1507 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port_id, id)),
1511 .help = "match traffic against value set in previously matched rule",
1512 .priv = PRIV_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
1513 .next = NEXT(item_mark),
1518 .help = "Integer value to match against",
1519 .next = NEXT(item_mark, NEXT_ENTRY(UNSIGNED), item_param),
1520 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_mark, id)),
1524 .help = "match an arbitrary byte string",
1525 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1526 .next = NEXT(item_raw),
1529 [ITEM_RAW_RELATIVE] = {
1531 .help = "look for pattern after the previous item",
1532 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1533 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1536 [ITEM_RAW_SEARCH] = {
1538 .help = "search pattern from offset (see also limit)",
1539 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1540 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1543 [ITEM_RAW_OFFSET] = {
1545 .help = "absolute or relative offset for pattern",
1546 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1547 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1549 [ITEM_RAW_LIMIT] = {
1551 .help = "search area limit for start of pattern",
1552 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1553 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1555 [ITEM_RAW_PATTERN] = {
1557 .help = "byte string to look for",
1558 .next = NEXT(item_raw,
1560 NEXT_ENTRY(ITEM_PARAM_IS,
1563 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, pattern),
1564 ARGS_ENTRY(struct rte_flow_item_raw, length),
1565 ARGS_ENTRY_ARB(sizeof(struct rte_flow_item_raw),
1566 ITEM_RAW_PATTERN_SIZE)),
1570 .help = "match Ethernet header",
1571 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1572 .next = NEXT(item_eth),
1577 .help = "destination MAC",
1578 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1579 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1583 .help = "source MAC",
1584 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1585 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1589 .help = "EtherType",
1590 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1591 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1595 .help = "match 802.1Q/ad VLAN tag",
1596 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1597 .next = NEXT(item_vlan),
1602 .help = "tag control information",
1603 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1604 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1608 .help = "priority code point",
1609 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1610 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1615 .help = "drop eligible indicator",
1616 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1617 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1622 .help = "VLAN identifier",
1623 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1624 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1627 [ITEM_VLAN_INNER_TYPE] = {
1628 .name = "inner_type",
1629 .help = "inner EtherType",
1630 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1631 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan,
1636 .help = "match IPv4 header",
1637 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1638 .next = NEXT(item_ipv4),
1643 .help = "type of service",
1644 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1645 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1646 hdr.type_of_service)),
1650 .help = "time to live",
1651 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1652 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1655 [ITEM_IPV4_PROTO] = {
1657 .help = "next protocol ID",
1658 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1659 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1660 hdr.next_proto_id)),
1664 .help = "source address",
1665 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1666 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1671 .help = "destination address",
1672 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1673 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1678 .help = "match IPv6 header",
1679 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1680 .next = NEXT(item_ipv6),
1685 .help = "traffic class",
1686 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1687 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1689 "\x0f\xf0\x00\x00")),
1691 [ITEM_IPV6_FLOW] = {
1693 .help = "flow label",
1694 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1695 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1697 "\x00\x0f\xff\xff")),
1699 [ITEM_IPV6_PROTO] = {
1701 .help = "protocol (next header)",
1702 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1703 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1708 .help = "hop limit",
1709 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1710 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1715 .help = "source address",
1716 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1717 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1722 .help = "destination address",
1723 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1724 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1729 .help = "match ICMP header",
1730 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1731 .next = NEXT(item_icmp),
1734 [ITEM_ICMP_TYPE] = {
1736 .help = "ICMP packet type",
1737 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1738 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1741 [ITEM_ICMP_CODE] = {
1743 .help = "ICMP packet code",
1744 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1745 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1750 .help = "match UDP header",
1751 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1752 .next = NEXT(item_udp),
1757 .help = "UDP source port",
1758 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1759 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1764 .help = "UDP destination port",
1765 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1766 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1771 .help = "match TCP header",
1772 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1773 .next = NEXT(item_tcp),
1778 .help = "TCP source port",
1779 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1780 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1785 .help = "TCP destination port",
1786 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1787 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1790 [ITEM_TCP_FLAGS] = {
1792 .help = "TCP flags",
1793 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1794 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1799 .help = "match SCTP header",
1800 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1801 .next = NEXT(item_sctp),
1806 .help = "SCTP source port",
1807 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1808 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1813 .help = "SCTP destination port",
1814 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1815 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1820 .help = "validation tag",
1821 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1822 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1825 [ITEM_SCTP_CKSUM] = {
1828 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1829 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1834 .help = "match VXLAN header",
1835 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1836 .next = NEXT(item_vxlan),
1839 [ITEM_VXLAN_VNI] = {
1841 .help = "VXLAN identifier",
1842 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1843 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
1847 .help = "match E-Tag header",
1848 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
1849 .next = NEXT(item_e_tag),
1852 [ITEM_E_TAG_GRP_ECID_B] = {
1853 .name = "grp_ecid_b",
1854 .help = "GRP and E-CID base",
1855 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
1856 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
1862 .help = "match NVGRE header",
1863 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
1864 .next = NEXT(item_nvgre),
1867 [ITEM_NVGRE_TNI] = {
1869 .help = "virtual subnet ID",
1870 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
1871 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
1875 .help = "match MPLS header",
1876 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
1877 .next = NEXT(item_mpls),
1880 [ITEM_MPLS_LABEL] = {
1882 .help = "MPLS label",
1883 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
1884 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
1890 .help = "match GRE header",
1891 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
1892 .next = NEXT(item_gre),
1895 [ITEM_GRE_PROTO] = {
1897 .help = "GRE protocol type",
1898 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
1899 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
1904 .help = "fuzzy pattern match, expect faster than default",
1905 .priv = PRIV_ITEM(FUZZY,
1906 sizeof(struct rte_flow_item_fuzzy)),
1907 .next = NEXT(item_fuzzy),
1910 [ITEM_FUZZY_THRESH] = {
1912 .help = "match accuracy threshold",
1913 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
1914 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
1919 .help = "match GTP header",
1920 .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
1921 .next = NEXT(item_gtp),
1926 .help = "tunnel endpoint identifier",
1927 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
1928 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
1932 .help = "match GTP header",
1933 .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
1934 .next = NEXT(item_gtp),
1939 .help = "match GTP header",
1940 .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
1941 .next = NEXT(item_gtp),
1946 .help = "match GENEVE header",
1947 .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
1948 .next = NEXT(item_geneve),
1951 [ITEM_GENEVE_VNI] = {
1953 .help = "virtual network identifier",
1954 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1955 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)),
1957 [ITEM_GENEVE_PROTO] = {
1959 .help = "GENEVE protocol type",
1960 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1961 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve,
1964 [ITEM_VXLAN_GPE] = {
1965 .name = "vxlan-gpe",
1966 .help = "match VXLAN-GPE header",
1967 .priv = PRIV_ITEM(VXLAN_GPE,
1968 sizeof(struct rte_flow_item_vxlan_gpe)),
1969 .next = NEXT(item_vxlan_gpe),
1972 [ITEM_VXLAN_GPE_VNI] = {
1974 .help = "VXLAN-GPE identifier",
1975 .next = NEXT(item_vxlan_gpe, NEXT_ENTRY(UNSIGNED), item_param),
1976 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan_gpe,
1979 [ITEM_ARP_ETH_IPV4] = {
1980 .name = "arp_eth_ipv4",
1981 .help = "match ARP header for Ethernet/IPv4",
1982 .priv = PRIV_ITEM(ARP_ETH_IPV4,
1983 sizeof(struct rte_flow_item_arp_eth_ipv4)),
1984 .next = NEXT(item_arp_eth_ipv4),
1987 [ITEM_ARP_ETH_IPV4_SHA] = {
1989 .help = "sender hardware address",
1990 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
1992 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
1995 [ITEM_ARP_ETH_IPV4_SPA] = {
1997 .help = "sender IPv4 address",
1998 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
2000 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2003 [ITEM_ARP_ETH_IPV4_THA] = {
2005 .help = "target hardware address",
2006 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
2008 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2011 [ITEM_ARP_ETH_IPV4_TPA] = {
2013 .help = "target IPv4 address",
2014 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
2016 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2021 .help = "match presence of any IPv6 extension header",
2022 .priv = PRIV_ITEM(IPV6_EXT,
2023 sizeof(struct rte_flow_item_ipv6_ext)),
2024 .next = NEXT(item_ipv6_ext),
2027 [ITEM_IPV6_EXT_NEXT_HDR] = {
2029 .help = "next header",
2030 .next = NEXT(item_ipv6_ext, NEXT_ENTRY(UNSIGNED), item_param),
2031 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6_ext,
2036 .help = "match any ICMPv6 header",
2037 .priv = PRIV_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
2038 .next = NEXT(item_icmp6),
2041 [ITEM_ICMP6_TYPE] = {
2043 .help = "ICMPv6 type",
2044 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2045 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2048 [ITEM_ICMP6_CODE] = {
2050 .help = "ICMPv6 code",
2051 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2052 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2055 [ITEM_ICMP6_ND_NS] = {
2056 .name = "icmp6_nd_ns",
2057 .help = "match ICMPv6 neighbor discovery solicitation",
2058 .priv = PRIV_ITEM(ICMP6_ND_NS,
2059 sizeof(struct rte_flow_item_icmp6_nd_ns)),
2060 .next = NEXT(item_icmp6_nd_ns),
2063 [ITEM_ICMP6_ND_NS_TARGET_ADDR] = {
2064 .name = "target_addr",
2065 .help = "target address",
2066 .next = NEXT(item_icmp6_nd_ns, NEXT_ENTRY(IPV6_ADDR),
2068 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_ns,
2071 [ITEM_ICMP6_ND_NA] = {
2072 .name = "icmp6_nd_na",
2073 .help = "match ICMPv6 neighbor discovery advertisement",
2074 .priv = PRIV_ITEM(ICMP6_ND_NA,
2075 sizeof(struct rte_flow_item_icmp6_nd_na)),
2076 .next = NEXT(item_icmp6_nd_na),
2079 [ITEM_ICMP6_ND_NA_TARGET_ADDR] = {
2080 .name = "target_addr",
2081 .help = "target address",
2082 .next = NEXT(item_icmp6_nd_na, NEXT_ENTRY(IPV6_ADDR),
2084 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_na,
2087 [ITEM_ICMP6_ND_OPT] = {
2088 .name = "icmp6_nd_opt",
2089 .help = "match presence of any ICMPv6 neighbor discovery"
2091 .priv = PRIV_ITEM(ICMP6_ND_OPT,
2092 sizeof(struct rte_flow_item_icmp6_nd_opt)),
2093 .next = NEXT(item_icmp6_nd_opt),
2096 [ITEM_ICMP6_ND_OPT_TYPE] = {
2098 .help = "ND option type",
2099 .next = NEXT(item_icmp6_nd_opt, NEXT_ENTRY(UNSIGNED),
2101 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_opt,
2104 [ITEM_ICMP6_ND_OPT_SLA_ETH] = {
2105 .name = "icmp6_nd_opt_sla_eth",
2106 .help = "match ICMPv6 neighbor discovery source Ethernet"
2107 " link-layer address option",
2109 (ICMP6_ND_OPT_SLA_ETH,
2110 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
2111 .next = NEXT(item_icmp6_nd_opt_sla_eth),
2114 [ITEM_ICMP6_ND_OPT_SLA_ETH_SLA] = {
2116 .help = "source Ethernet LLA",
2117 .next = NEXT(item_icmp6_nd_opt_sla_eth, NEXT_ENTRY(MAC_ADDR),
2119 .args = ARGS(ARGS_ENTRY_HTON
2120 (struct rte_flow_item_icmp6_nd_opt_sla_eth, sla)),
2122 [ITEM_ICMP6_ND_OPT_TLA_ETH] = {
2123 .name = "icmp6_nd_opt_tla_eth",
2124 .help = "match ICMPv6 neighbor discovery target Ethernet"
2125 " link-layer address option",
2127 (ICMP6_ND_OPT_TLA_ETH,
2128 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
2129 .next = NEXT(item_icmp6_nd_opt_tla_eth),
2132 [ITEM_ICMP6_ND_OPT_TLA_ETH_TLA] = {
2134 .help = "target Ethernet LLA",
2135 .next = NEXT(item_icmp6_nd_opt_tla_eth, NEXT_ENTRY(MAC_ADDR),
2137 .args = ARGS(ARGS_ENTRY_HTON
2138 (struct rte_flow_item_icmp6_nd_opt_tla_eth, tla)),
2142 .help = "match metadata header",
2143 .priv = PRIV_ITEM(META, sizeof(struct rte_flow_item_meta)),
2144 .next = NEXT(item_meta),
2147 [ITEM_META_DATA] = {
2149 .help = "metadata value",
2150 .next = NEXT(item_meta, NEXT_ENTRY(UNSIGNED), item_param),
2151 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_meta,
2152 data, "\xff\xff\xff\xff")),
2155 /* Validate/create actions. */
2158 .help = "submit a list of associated actions",
2159 .next = NEXT(next_action),
2164 .help = "specify next action",
2165 .next = NEXT(next_action),
2169 .help = "end list of actions",
2170 .priv = PRIV_ACTION(END, 0),
2175 .help = "no-op action",
2176 .priv = PRIV_ACTION(VOID, 0),
2177 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2180 [ACTION_PASSTHRU] = {
2182 .help = "let subsequent rule process matched packets",
2183 .priv = PRIV_ACTION(PASSTHRU, 0),
2184 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2189 .help = "redirect traffic to a given group",
2190 .priv = PRIV_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
2191 .next = NEXT(action_jump),
2194 [ACTION_JUMP_GROUP] = {
2196 .help = "group to redirect traffic to",
2197 .next = NEXT(action_jump, NEXT_ENTRY(UNSIGNED)),
2198 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_jump, group)),
2199 .call = parse_vc_conf,
2203 .help = "attach 32 bit value to packets",
2204 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
2205 .next = NEXT(action_mark),
2208 [ACTION_MARK_ID] = {
2210 .help = "32 bit value to return with packets",
2211 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
2212 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
2213 .call = parse_vc_conf,
2217 .help = "flag packets",
2218 .priv = PRIV_ACTION(FLAG, 0),
2219 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2224 .help = "assign packets to a given queue index",
2225 .priv = PRIV_ACTION(QUEUE,
2226 sizeof(struct rte_flow_action_queue)),
2227 .next = NEXT(action_queue),
2230 [ACTION_QUEUE_INDEX] = {
2232 .help = "queue index to use",
2233 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
2234 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
2235 .call = parse_vc_conf,
2239 .help = "drop packets (note: passthru has priority)",
2240 .priv = PRIV_ACTION(DROP, 0),
2241 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2246 .help = "enable counters for this rule",
2247 .priv = PRIV_ACTION(COUNT,
2248 sizeof(struct rte_flow_action_count)),
2249 .next = NEXT(action_count),
2252 [ACTION_COUNT_ID] = {
2253 .name = "identifier",
2254 .help = "counter identifier to use",
2255 .next = NEXT(action_count, NEXT_ENTRY(UNSIGNED)),
2256 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_count, id)),
2257 .call = parse_vc_conf,
2259 [ACTION_COUNT_SHARED] = {
2261 .help = "shared counter",
2262 .next = NEXT(action_count, NEXT_ENTRY(BOOLEAN)),
2263 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_count,
2265 .call = parse_vc_conf,
2269 .help = "spread packets among several queues",
2270 .priv = PRIV_ACTION(RSS, sizeof(struct action_rss_data)),
2271 .next = NEXT(action_rss),
2272 .call = parse_vc_action_rss,
2274 [ACTION_RSS_FUNC] = {
2276 .help = "RSS hash function to apply",
2277 .next = NEXT(action_rss,
2278 NEXT_ENTRY(ACTION_RSS_FUNC_DEFAULT,
2279 ACTION_RSS_FUNC_TOEPLITZ,
2280 ACTION_RSS_FUNC_SIMPLE_XOR)),
2282 [ACTION_RSS_FUNC_DEFAULT] = {
2284 .help = "default hash function",
2285 .call = parse_vc_action_rss_func,
2287 [ACTION_RSS_FUNC_TOEPLITZ] = {
2289 .help = "Toeplitz hash function",
2290 .call = parse_vc_action_rss_func,
2292 [ACTION_RSS_FUNC_SIMPLE_XOR] = {
2293 .name = "simple_xor",
2294 .help = "simple XOR hash function",
2295 .call = parse_vc_action_rss_func,
2297 [ACTION_RSS_LEVEL] = {
2299 .help = "encapsulation level for \"types\"",
2300 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2301 .args = ARGS(ARGS_ENTRY_ARB
2302 (offsetof(struct action_rss_data, conf) +
2303 offsetof(struct rte_flow_action_rss, level),
2304 sizeof(((struct rte_flow_action_rss *)0)->
2307 [ACTION_RSS_TYPES] = {
2309 .help = "specific RSS hash types",
2310 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)),
2312 [ACTION_RSS_TYPE] = {
2314 .help = "RSS hash type",
2315 .call = parse_vc_action_rss_type,
2316 .comp = comp_vc_action_rss_type,
2318 [ACTION_RSS_KEY] = {
2320 .help = "RSS hash key",
2321 .next = NEXT(action_rss, NEXT_ENTRY(HEX)),
2322 .args = ARGS(ARGS_ENTRY_ARB(0, 0),
2324 (offsetof(struct action_rss_data, conf) +
2325 offsetof(struct rte_flow_action_rss, key_len),
2326 sizeof(((struct rte_flow_action_rss *)0)->
2328 ARGS_ENTRY(struct action_rss_data, key)),
2330 [ACTION_RSS_KEY_LEN] = {
2332 .help = "RSS hash key length in bytes",
2333 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2334 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
2335 (offsetof(struct action_rss_data, conf) +
2336 offsetof(struct rte_flow_action_rss, key_len),
2337 sizeof(((struct rte_flow_action_rss *)0)->
2340 RSS_HASH_KEY_LENGTH)),
2342 [ACTION_RSS_QUEUES] = {
2344 .help = "queue indices to use",
2345 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
2346 .call = parse_vc_conf,
2348 [ACTION_RSS_QUEUE] = {
2350 .help = "queue index",
2351 .call = parse_vc_action_rss_queue,
2352 .comp = comp_vc_action_rss_queue,
2356 .help = "direct traffic to physical function",
2357 .priv = PRIV_ACTION(PF, 0),
2358 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2363 .help = "direct traffic to a virtual function ID",
2364 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
2365 .next = NEXT(action_vf),
2368 [ACTION_VF_ORIGINAL] = {
2370 .help = "use original VF ID if possible",
2371 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
2372 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
2374 .call = parse_vc_conf,
2379 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
2380 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
2381 .call = parse_vc_conf,
2383 [ACTION_PHY_PORT] = {
2385 .help = "direct packets to physical port index",
2386 .priv = PRIV_ACTION(PHY_PORT,
2387 sizeof(struct rte_flow_action_phy_port)),
2388 .next = NEXT(action_phy_port),
2391 [ACTION_PHY_PORT_ORIGINAL] = {
2393 .help = "use original port index if possible",
2394 .next = NEXT(action_phy_port, NEXT_ENTRY(BOOLEAN)),
2395 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_phy_port,
2397 .call = parse_vc_conf,
2399 [ACTION_PHY_PORT_INDEX] = {
2401 .help = "physical port index",
2402 .next = NEXT(action_phy_port, NEXT_ENTRY(UNSIGNED)),
2403 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_phy_port,
2405 .call = parse_vc_conf,
2407 [ACTION_PORT_ID] = {
2409 .help = "direct matching traffic to a given DPDK port ID",
2410 .priv = PRIV_ACTION(PORT_ID,
2411 sizeof(struct rte_flow_action_port_id)),
2412 .next = NEXT(action_port_id),
2415 [ACTION_PORT_ID_ORIGINAL] = {
2417 .help = "use original DPDK port ID if possible",
2418 .next = NEXT(action_port_id, NEXT_ENTRY(BOOLEAN)),
2419 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_port_id,
2421 .call = parse_vc_conf,
2423 [ACTION_PORT_ID_ID] = {
2425 .help = "DPDK port ID",
2426 .next = NEXT(action_port_id, NEXT_ENTRY(UNSIGNED)),
2427 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_port_id, id)),
2428 .call = parse_vc_conf,
2432 .help = "meter the directed packets at given id",
2433 .priv = PRIV_ACTION(METER,
2434 sizeof(struct rte_flow_action_meter)),
2435 .next = NEXT(action_meter),
2438 [ACTION_METER_ID] = {
2440 .help = "meter id to use",
2441 .next = NEXT(action_meter, NEXT_ENTRY(UNSIGNED)),
2442 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
2443 .call = parse_vc_conf,
2445 [ACTION_OF_SET_MPLS_TTL] = {
2446 .name = "of_set_mpls_ttl",
2447 .help = "OpenFlow's OFPAT_SET_MPLS_TTL",
2450 sizeof(struct rte_flow_action_of_set_mpls_ttl)),
2451 .next = NEXT(action_of_set_mpls_ttl),
2454 [ACTION_OF_SET_MPLS_TTL_MPLS_TTL] = {
2457 .next = NEXT(action_of_set_mpls_ttl, NEXT_ENTRY(UNSIGNED)),
2458 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_mpls_ttl,
2460 .call = parse_vc_conf,
2462 [ACTION_OF_DEC_MPLS_TTL] = {
2463 .name = "of_dec_mpls_ttl",
2464 .help = "OpenFlow's OFPAT_DEC_MPLS_TTL",
2465 .priv = PRIV_ACTION(OF_DEC_MPLS_TTL, 0),
2466 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2469 [ACTION_OF_SET_NW_TTL] = {
2470 .name = "of_set_nw_ttl",
2471 .help = "OpenFlow's OFPAT_SET_NW_TTL",
2474 sizeof(struct rte_flow_action_of_set_nw_ttl)),
2475 .next = NEXT(action_of_set_nw_ttl),
2478 [ACTION_OF_SET_NW_TTL_NW_TTL] = {
2481 .next = NEXT(action_of_set_nw_ttl, NEXT_ENTRY(UNSIGNED)),
2482 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_nw_ttl,
2484 .call = parse_vc_conf,
2486 [ACTION_OF_DEC_NW_TTL] = {
2487 .name = "of_dec_nw_ttl",
2488 .help = "OpenFlow's OFPAT_DEC_NW_TTL",
2489 .priv = PRIV_ACTION(OF_DEC_NW_TTL, 0),
2490 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2493 [ACTION_OF_COPY_TTL_OUT] = {
2494 .name = "of_copy_ttl_out",
2495 .help = "OpenFlow's OFPAT_COPY_TTL_OUT",
2496 .priv = PRIV_ACTION(OF_COPY_TTL_OUT, 0),
2497 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2500 [ACTION_OF_COPY_TTL_IN] = {
2501 .name = "of_copy_ttl_in",
2502 .help = "OpenFlow's OFPAT_COPY_TTL_IN",
2503 .priv = PRIV_ACTION(OF_COPY_TTL_IN, 0),
2504 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2507 [ACTION_OF_POP_VLAN] = {
2508 .name = "of_pop_vlan",
2509 .help = "OpenFlow's OFPAT_POP_VLAN",
2510 .priv = PRIV_ACTION(OF_POP_VLAN, 0),
2511 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2514 [ACTION_OF_PUSH_VLAN] = {
2515 .name = "of_push_vlan",
2516 .help = "OpenFlow's OFPAT_PUSH_VLAN",
2519 sizeof(struct rte_flow_action_of_push_vlan)),
2520 .next = NEXT(action_of_push_vlan),
2523 [ACTION_OF_PUSH_VLAN_ETHERTYPE] = {
2524 .name = "ethertype",
2525 .help = "EtherType",
2526 .next = NEXT(action_of_push_vlan, NEXT_ENTRY(UNSIGNED)),
2527 .args = ARGS(ARGS_ENTRY_HTON
2528 (struct rte_flow_action_of_push_vlan,
2530 .call = parse_vc_conf,
2532 [ACTION_OF_SET_VLAN_VID] = {
2533 .name = "of_set_vlan_vid",
2534 .help = "OpenFlow's OFPAT_SET_VLAN_VID",
2537 sizeof(struct rte_flow_action_of_set_vlan_vid)),
2538 .next = NEXT(action_of_set_vlan_vid),
2541 [ACTION_OF_SET_VLAN_VID_VLAN_VID] = {
2544 .next = NEXT(action_of_set_vlan_vid, NEXT_ENTRY(UNSIGNED)),
2545 .args = ARGS(ARGS_ENTRY_HTON
2546 (struct rte_flow_action_of_set_vlan_vid,
2548 .call = parse_vc_conf,
2550 [ACTION_OF_SET_VLAN_PCP] = {
2551 .name = "of_set_vlan_pcp",
2552 .help = "OpenFlow's OFPAT_SET_VLAN_PCP",
2555 sizeof(struct rte_flow_action_of_set_vlan_pcp)),
2556 .next = NEXT(action_of_set_vlan_pcp),
2559 [ACTION_OF_SET_VLAN_PCP_VLAN_PCP] = {
2561 .help = "VLAN priority",
2562 .next = NEXT(action_of_set_vlan_pcp, NEXT_ENTRY(UNSIGNED)),
2563 .args = ARGS(ARGS_ENTRY_HTON
2564 (struct rte_flow_action_of_set_vlan_pcp,
2566 .call = parse_vc_conf,
2568 [ACTION_OF_POP_MPLS] = {
2569 .name = "of_pop_mpls",
2570 .help = "OpenFlow's OFPAT_POP_MPLS",
2571 .priv = PRIV_ACTION(OF_POP_MPLS,
2572 sizeof(struct rte_flow_action_of_pop_mpls)),
2573 .next = NEXT(action_of_pop_mpls),
2576 [ACTION_OF_POP_MPLS_ETHERTYPE] = {
2577 .name = "ethertype",
2578 .help = "EtherType",
2579 .next = NEXT(action_of_pop_mpls, NEXT_ENTRY(UNSIGNED)),
2580 .args = ARGS(ARGS_ENTRY_HTON
2581 (struct rte_flow_action_of_pop_mpls,
2583 .call = parse_vc_conf,
2585 [ACTION_OF_PUSH_MPLS] = {
2586 .name = "of_push_mpls",
2587 .help = "OpenFlow's OFPAT_PUSH_MPLS",
2590 sizeof(struct rte_flow_action_of_push_mpls)),
2591 .next = NEXT(action_of_push_mpls),
2594 [ACTION_OF_PUSH_MPLS_ETHERTYPE] = {
2595 .name = "ethertype",
2596 .help = "EtherType",
2597 .next = NEXT(action_of_push_mpls, NEXT_ENTRY(UNSIGNED)),
2598 .args = ARGS(ARGS_ENTRY_HTON
2599 (struct rte_flow_action_of_push_mpls,
2601 .call = parse_vc_conf,
2603 [ACTION_VXLAN_ENCAP] = {
2604 .name = "vxlan_encap",
2605 .help = "VXLAN encapsulation, uses configuration set by \"set"
2607 .priv = PRIV_ACTION(VXLAN_ENCAP,
2608 sizeof(struct action_vxlan_encap_data)),
2609 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2610 .call = parse_vc_action_vxlan_encap,
2612 [ACTION_VXLAN_DECAP] = {
2613 .name = "vxlan_decap",
2614 .help = "Performs a decapsulation action by stripping all"
2615 " headers of the VXLAN tunnel network overlay from the"
2617 .priv = PRIV_ACTION(VXLAN_DECAP, 0),
2618 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2621 [ACTION_NVGRE_ENCAP] = {
2622 .name = "nvgre_encap",
2623 .help = "NVGRE encapsulation, uses configuration set by \"set"
2625 .priv = PRIV_ACTION(NVGRE_ENCAP,
2626 sizeof(struct action_nvgre_encap_data)),
2627 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2628 .call = parse_vc_action_nvgre_encap,
2630 [ACTION_NVGRE_DECAP] = {
2631 .name = "nvgre_decap",
2632 .help = "Performs a decapsulation action by stripping all"
2633 " headers of the NVGRE tunnel network overlay from the"
2635 .priv = PRIV_ACTION(NVGRE_DECAP, 0),
2636 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2639 [ACTION_L2_ENCAP] = {
2641 .help = "l2 encap, uses configuration set by"
2642 " \"set l2_encap\"",
2643 .priv = PRIV_ACTION(RAW_ENCAP,
2644 sizeof(struct action_raw_encap_data)),
2645 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2646 .call = parse_vc_action_l2_encap,
2648 [ACTION_L2_DECAP] = {
2650 .help = "l2 decap, uses configuration set by"
2651 " \"set l2_decap\"",
2652 .priv = PRIV_ACTION(RAW_DECAP,
2653 sizeof(struct action_raw_decap_data)),
2654 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2655 .call = parse_vc_action_l2_decap,
2657 [ACTION_MPLSOGRE_ENCAP] = {
2658 .name = "mplsogre_encap",
2659 .help = "mplsogre encapsulation, uses configuration set by"
2660 " \"set mplsogre_encap\"",
2661 .priv = PRIV_ACTION(RAW_ENCAP,
2662 sizeof(struct action_raw_encap_data)),
2663 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2664 .call = parse_vc_action_mplsogre_encap,
2666 [ACTION_MPLSOGRE_DECAP] = {
2667 .name = "mplsogre_decap",
2668 .help = "mplsogre decapsulation, uses configuration set by"
2669 " \"set mplsogre_decap\"",
2670 .priv = PRIV_ACTION(RAW_DECAP,
2671 sizeof(struct action_raw_decap_data)),
2672 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2673 .call = parse_vc_action_mplsogre_decap,
2675 [ACTION_MPLSOUDP_ENCAP] = {
2676 .name = "mplsoudp_encap",
2677 .help = "mplsoudp encapsulation, uses configuration set by"
2678 " \"set mplsoudp_encap\"",
2679 .priv = PRIV_ACTION(RAW_ENCAP,
2680 sizeof(struct action_raw_encap_data)),
2681 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2682 .call = parse_vc_action_mplsoudp_encap,
2684 [ACTION_MPLSOUDP_DECAP] = {
2685 .name = "mplsoudp_decap",
2686 .help = "mplsoudp decapsulation, uses configuration set by"
2687 " \"set mplsoudp_decap\"",
2688 .priv = PRIV_ACTION(RAW_DECAP,
2689 sizeof(struct action_raw_decap_data)),
2690 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2691 .call = parse_vc_action_mplsoudp_decap,
2693 [ACTION_SET_IPV4_SRC] = {
2694 .name = "set_ipv4_src",
2695 .help = "Set a new IPv4 source address in the outermost"
2697 .priv = PRIV_ACTION(SET_IPV4_SRC,
2698 sizeof(struct rte_flow_action_set_ipv4)),
2699 .next = NEXT(action_set_ipv4_src),
2702 [ACTION_SET_IPV4_SRC_IPV4_SRC] = {
2703 .name = "ipv4_addr",
2704 .help = "new IPv4 source address to set",
2705 .next = NEXT(action_set_ipv4_src, NEXT_ENTRY(IPV4_ADDR)),
2706 .args = ARGS(ARGS_ENTRY_HTON
2707 (struct rte_flow_action_set_ipv4, ipv4_addr)),
2708 .call = parse_vc_conf,
2710 [ACTION_SET_IPV4_DST] = {
2711 .name = "set_ipv4_dst",
2712 .help = "Set a new IPv4 destination address in the outermost"
2714 .priv = PRIV_ACTION(SET_IPV4_DST,
2715 sizeof(struct rte_flow_action_set_ipv4)),
2716 .next = NEXT(action_set_ipv4_dst),
2719 [ACTION_SET_IPV4_DST_IPV4_DST] = {
2720 .name = "ipv4_addr",
2721 .help = "new IPv4 destination address to set",
2722 .next = NEXT(action_set_ipv4_dst, NEXT_ENTRY(IPV4_ADDR)),
2723 .args = ARGS(ARGS_ENTRY_HTON
2724 (struct rte_flow_action_set_ipv4, ipv4_addr)),
2725 .call = parse_vc_conf,
2727 [ACTION_SET_IPV6_SRC] = {
2728 .name = "set_ipv6_src",
2729 .help = "Set a new IPv6 source address in the outermost"
2731 .priv = PRIV_ACTION(SET_IPV6_SRC,
2732 sizeof(struct rte_flow_action_set_ipv6)),
2733 .next = NEXT(action_set_ipv6_src),
2736 [ACTION_SET_IPV6_SRC_IPV6_SRC] = {
2737 .name = "ipv6_addr",
2738 .help = "new IPv6 source address to set",
2739 .next = NEXT(action_set_ipv6_src, NEXT_ENTRY(IPV6_ADDR)),
2740 .args = ARGS(ARGS_ENTRY_HTON
2741 (struct rte_flow_action_set_ipv6, ipv6_addr)),
2742 .call = parse_vc_conf,
2744 [ACTION_SET_IPV6_DST] = {
2745 .name = "set_ipv6_dst",
2746 .help = "Set a new IPv6 destination address in the outermost"
2748 .priv = PRIV_ACTION(SET_IPV6_DST,
2749 sizeof(struct rte_flow_action_set_ipv6)),
2750 .next = NEXT(action_set_ipv6_dst),
2753 [ACTION_SET_IPV6_DST_IPV6_DST] = {
2754 .name = "ipv6_addr",
2755 .help = "new IPv6 destination address to set",
2756 .next = NEXT(action_set_ipv6_dst, NEXT_ENTRY(IPV6_ADDR)),
2757 .args = ARGS(ARGS_ENTRY_HTON
2758 (struct rte_flow_action_set_ipv6, ipv6_addr)),
2759 .call = parse_vc_conf,
2761 [ACTION_SET_TP_SRC] = {
2762 .name = "set_tp_src",
2763 .help = "set a new source port number in the outermost"
2765 .priv = PRIV_ACTION(SET_TP_SRC,
2766 sizeof(struct rte_flow_action_set_tp)),
2767 .next = NEXT(action_set_tp_src),
2770 [ACTION_SET_TP_SRC_TP_SRC] = {
2772 .help = "new source port number to set",
2773 .next = NEXT(action_set_tp_src, NEXT_ENTRY(UNSIGNED)),
2774 .args = ARGS(ARGS_ENTRY_HTON
2775 (struct rte_flow_action_set_tp, port)),
2776 .call = parse_vc_conf,
2778 [ACTION_SET_TP_DST] = {
2779 .name = "set_tp_dst",
2780 .help = "set a new destination port number in the outermost"
2782 .priv = PRIV_ACTION(SET_TP_DST,
2783 sizeof(struct rte_flow_action_set_tp)),
2784 .next = NEXT(action_set_tp_dst),
2787 [ACTION_SET_TP_DST_TP_DST] = {
2789 .help = "new destination port number to set",
2790 .next = NEXT(action_set_tp_dst, NEXT_ENTRY(UNSIGNED)),
2791 .args = ARGS(ARGS_ENTRY_HTON
2792 (struct rte_flow_action_set_tp, port)),
2793 .call = parse_vc_conf,
2795 [ACTION_MAC_SWAP] = {
2797 .help = "Swap the source and destination MAC addresses"
2798 " in the outermost Ethernet header",
2799 .priv = PRIV_ACTION(MAC_SWAP, 0),
2800 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2803 [ACTION_DEC_TTL] = {
2805 .help = "decrease network TTL if available",
2806 .priv = PRIV_ACTION(DEC_TTL, 0),
2807 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2810 [ACTION_SET_TTL] = {
2812 .help = "set ttl value",
2813 .priv = PRIV_ACTION(SET_TTL,
2814 sizeof(struct rte_flow_action_set_ttl)),
2815 .next = NEXT(action_set_ttl),
2818 [ACTION_SET_TTL_TTL] = {
2819 .name = "ttl_value",
2820 .help = "new ttl value to set",
2821 .next = NEXT(action_set_ttl, NEXT_ENTRY(UNSIGNED)),
2822 .args = ARGS(ARGS_ENTRY_HTON
2823 (struct rte_flow_action_set_ttl, ttl_value)),
2824 .call = parse_vc_conf,
2826 [ACTION_SET_MAC_SRC] = {
2827 .name = "set_mac_src",
2828 .help = "set source mac address",
2829 .priv = PRIV_ACTION(SET_MAC_SRC,
2830 sizeof(struct rte_flow_action_set_mac)),
2831 .next = NEXT(action_set_mac_src),
2834 [ACTION_SET_MAC_SRC_MAC_SRC] = {
2836 .help = "new source mac address",
2837 .next = NEXT(action_set_mac_src, NEXT_ENTRY(MAC_ADDR)),
2838 .args = ARGS(ARGS_ENTRY_HTON
2839 (struct rte_flow_action_set_mac, mac_addr)),
2840 .call = parse_vc_conf,
2842 [ACTION_SET_MAC_DST] = {
2843 .name = "set_mac_dst",
2844 .help = "set destination mac address",
2845 .priv = PRIV_ACTION(SET_MAC_DST,
2846 sizeof(struct rte_flow_action_set_mac)),
2847 .next = NEXT(action_set_mac_dst),
2850 [ACTION_SET_MAC_DST_MAC_DST] = {
2852 .help = "new destination mac address to set",
2853 .next = NEXT(action_set_mac_dst, NEXT_ENTRY(MAC_ADDR)),
2854 .args = ARGS(ARGS_ENTRY_HTON
2855 (struct rte_flow_action_set_mac, mac_addr)),
2856 .call = parse_vc_conf,
2860 /** Remove and return last entry from argument stack. */
2861 static const struct arg *
2862 pop_args(struct context *ctx)
2864 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
2867 /** Add entry on top of the argument stack. */
2869 push_args(struct context *ctx, const struct arg *arg)
2871 if (ctx->args_num == CTX_STACK_SIZE)
2873 ctx->args[ctx->args_num++] = arg;
2877 /** Spread value into buffer according to bit-mask. */
2879 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
2881 uint32_t i = arg->size;
2889 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2898 unsigned int shift = 0;
2899 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
2901 for (shift = 0; arg->mask[i] >> shift; ++shift) {
2902 if (!(arg->mask[i] & (1 << shift)))
2907 *buf &= ~(1 << shift);
2908 *buf |= (val & 1) << shift;
2916 /** Compare a string with a partial one of a given length. */
2918 strcmp_partial(const char *full, const char *partial, size_t partial_len)
2920 int r = strncmp(full, partial, partial_len);
2924 if (strlen(full) <= partial_len)
2926 return full[partial_len];
2930 * Parse a prefix length and generate a bit-mask.
2932 * Last argument (ctx->args) is retrieved to determine mask size, storage
2933 * location and whether the result must use network byte ordering.
2936 parse_prefix(struct context *ctx, const struct token *token,
2937 const char *str, unsigned int len,
2938 void *buf, unsigned int size)
2940 const struct arg *arg = pop_args(ctx);
2941 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
2948 /* Argument is expected. */
2952 u = strtoumax(str, &end, 0);
2953 if (errno || (size_t)(end - str) != len)
2958 extra = arg_entry_bf_fill(NULL, 0, arg);
2967 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
2968 !arg_entry_bf_fill(ctx->objmask, -1, arg))
2975 if (bytes > size || bytes + !!extra > size)
2979 buf = (uint8_t *)ctx->object + arg->offset;
2980 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2982 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
2983 memset(buf, 0x00, size - bytes);
2985 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
2989 memset(buf, 0xff, bytes);
2990 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
2992 ((uint8_t *)buf)[bytes] = conv[extra];
2995 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2998 push_args(ctx, arg);
3002 /** Default parsing function for token name matching. */
3004 parse_default(struct context *ctx, const struct token *token,
3005 const char *str, unsigned int len,
3006 void *buf, unsigned int size)
3011 if (strcmp_partial(token->name, str, len))
3016 /** Parse flow command, initialize output buffer for subsequent tokens. */
3018 parse_init(struct context *ctx, const struct token *token,
3019 const char *str, unsigned int len,
3020 void *buf, unsigned int size)
3022 struct buffer *out = buf;
3024 /* Token name must match. */
3025 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3027 /* Nothing else to do if there is no buffer. */
3030 /* Make sure buffer is large enough. */
3031 if (size < sizeof(*out))
3033 /* Initialize buffer. */
3034 memset(out, 0x00, sizeof(*out));
3035 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
3038 ctx->objmask = NULL;
3042 /** Parse tokens for validate/create commands. */
3044 parse_vc(struct context *ctx, const struct token *token,
3045 const char *str, unsigned int len,
3046 void *buf, unsigned int size)
3048 struct buffer *out = buf;
3052 /* Token name must match. */
3053 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3055 /* Nothing else to do if there is no buffer. */
3058 if (!out->command) {
3059 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
3061 if (sizeof(*out) > size)
3063 out->command = ctx->curr;
3066 ctx->objmask = NULL;
3067 out->args.vc.data = (uint8_t *)out + size;
3071 ctx->object = &out->args.vc.attr;
3072 ctx->objmask = NULL;
3073 switch (ctx->curr) {
3078 out->args.vc.attr.ingress = 1;
3081 out->args.vc.attr.egress = 1;
3084 out->args.vc.attr.transfer = 1;
3087 out->args.vc.pattern =
3088 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
3090 ctx->object = out->args.vc.pattern;
3091 ctx->objmask = NULL;
3094 out->args.vc.actions =
3095 (void *)RTE_ALIGN_CEIL((uintptr_t)
3096 (out->args.vc.pattern +
3097 out->args.vc.pattern_n),
3099 ctx->object = out->args.vc.actions;
3100 ctx->objmask = NULL;
3107 if (!out->args.vc.actions) {
3108 const struct parse_item_priv *priv = token->priv;
3109 struct rte_flow_item *item =
3110 out->args.vc.pattern + out->args.vc.pattern_n;
3112 data_size = priv->size * 3; /* spec, last, mask */
3113 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
3114 (out->args.vc.data - data_size),
3116 if ((uint8_t *)item + sizeof(*item) > data)
3118 *item = (struct rte_flow_item){
3121 ++out->args.vc.pattern_n;
3123 ctx->objmask = NULL;
3125 const struct parse_action_priv *priv = token->priv;
3126 struct rte_flow_action *action =
3127 out->args.vc.actions + out->args.vc.actions_n;
3129 data_size = priv->size; /* configuration */
3130 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
3131 (out->args.vc.data - data_size),
3133 if ((uint8_t *)action + sizeof(*action) > data)
3135 *action = (struct rte_flow_action){
3137 .conf = data_size ? data : NULL,
3139 ++out->args.vc.actions_n;
3140 ctx->object = action;
3141 ctx->objmask = NULL;
3143 memset(data, 0, data_size);
3144 out->args.vc.data = data;
3145 ctx->objdata = data_size;
3149 /** Parse pattern item parameter type. */
3151 parse_vc_spec(struct context *ctx, const struct token *token,
3152 const char *str, unsigned int len,
3153 void *buf, unsigned int size)
3155 struct buffer *out = buf;
3156 struct rte_flow_item *item;
3162 /* Token name must match. */
3163 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3165 /* Parse parameter types. */
3166 switch (ctx->curr) {
3167 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
3173 case ITEM_PARAM_SPEC:
3176 case ITEM_PARAM_LAST:
3179 case ITEM_PARAM_PREFIX:
3180 /* Modify next token to expect a prefix. */
3181 if (ctx->next_num < 2)
3183 ctx->next[ctx->next_num - 2] = prefix;
3185 case ITEM_PARAM_MASK:
3191 /* Nothing else to do if there is no buffer. */
3194 if (!out->args.vc.pattern_n)
3196 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
3197 data_size = ctx->objdata / 3; /* spec, last, mask */
3198 /* Point to selected object. */
3199 ctx->object = out->args.vc.data + (data_size * index);
3201 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
3202 item->mask = ctx->objmask;
3204 ctx->objmask = NULL;
3205 /* Update relevant item pointer. */
3206 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
3211 /** Parse action configuration field. */
3213 parse_vc_conf(struct context *ctx, const struct token *token,
3214 const char *str, unsigned int len,
3215 void *buf, unsigned int size)
3217 struct buffer *out = buf;
3220 /* Token name must match. */
3221 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3223 /* Nothing else to do if there is no buffer. */
3226 /* Point to selected object. */
3227 ctx->object = out->args.vc.data;
3228 ctx->objmask = NULL;
3232 /** Parse RSS action. */
3234 parse_vc_action_rss(struct context *ctx, const struct token *token,
3235 const char *str, unsigned int len,
3236 void *buf, unsigned int size)
3238 struct buffer *out = buf;
3239 struct rte_flow_action *action;
3240 struct action_rss_data *action_rss_data;
3244 ret = parse_vc(ctx, token, str, len, buf, size);
3247 /* Nothing else to do if there is no buffer. */
3250 if (!out->args.vc.actions_n)
3252 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3253 /* Point to selected object. */
3254 ctx->object = out->args.vc.data;
3255 ctx->objmask = NULL;
3256 /* Set up default configuration. */
3257 action_rss_data = ctx->object;
3258 *action_rss_data = (struct action_rss_data){
3259 .conf = (struct rte_flow_action_rss){
3260 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
3263 .key_len = sizeof(action_rss_data->key),
3264 .queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
3265 .key = action_rss_data->key,
3266 .queue = action_rss_data->queue,
3268 .key = "testpmd's default RSS hash key, "
3269 "override it for better balancing",
3272 for (i = 0; i < action_rss_data->conf.queue_num; ++i)
3273 action_rss_data->queue[i] = i;
3274 if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
3275 ctx->port != (portid_t)RTE_PORT_ALL) {
3276 struct rte_eth_dev_info info;
3278 rte_eth_dev_info_get(ctx->port, &info);
3279 action_rss_data->conf.key_len =
3280 RTE_MIN(sizeof(action_rss_data->key),
3281 info.hash_key_size);
3283 action->conf = &action_rss_data->conf;
3288 * Parse func field for RSS action.
3290 * The RTE_ETH_HASH_FUNCTION_* value to assign is derived from the
3291 * ACTION_RSS_FUNC_* index that called this function.
3294 parse_vc_action_rss_func(struct context *ctx, const struct token *token,
3295 const char *str, unsigned int len,
3296 void *buf, unsigned int size)
3298 struct action_rss_data *action_rss_data;
3299 enum rte_eth_hash_function func;
3303 /* Token name must match. */
3304 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3306 switch (ctx->curr) {
3307 case ACTION_RSS_FUNC_DEFAULT:
3308 func = RTE_ETH_HASH_FUNCTION_DEFAULT;
3310 case ACTION_RSS_FUNC_TOEPLITZ:
3311 func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
3313 case ACTION_RSS_FUNC_SIMPLE_XOR:
3314 func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
3321 action_rss_data = ctx->object;
3322 action_rss_data->conf.func = func;
3327 * Parse type field for RSS action.
3329 * Valid tokens are type field names and the "end" token.
3332 parse_vc_action_rss_type(struct context *ctx, const struct token *token,
3333 const char *str, unsigned int len,
3334 void *buf, unsigned int size)
3336 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_TYPE);
3337 struct action_rss_data *action_rss_data;
3343 if (ctx->curr != ACTION_RSS_TYPE)
3345 if (!(ctx->objdata >> 16) && ctx->object) {
3346 action_rss_data = ctx->object;
3347 action_rss_data->conf.types = 0;
3349 if (!strcmp_partial("end", str, len)) {
3350 ctx->objdata &= 0xffff;
3353 for (i = 0; rss_type_table[i].str; ++i)
3354 if (!strcmp_partial(rss_type_table[i].str, str, len))
3356 if (!rss_type_table[i].str)
3358 ctx->objdata = 1 << 16 | (ctx->objdata & 0xffff);
3360 if (ctx->next_num == RTE_DIM(ctx->next))
3362 ctx->next[ctx->next_num++] = next;
3365 action_rss_data = ctx->object;
3366 action_rss_data->conf.types |= rss_type_table[i].rss_type;
3371 * Parse queue field for RSS action.
3373 * Valid tokens are queue indices and the "end" token.
3376 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
3377 const char *str, unsigned int len,
3378 void *buf, unsigned int size)
3380 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
3381 struct action_rss_data *action_rss_data;
3388 if (ctx->curr != ACTION_RSS_QUEUE)
3390 i = ctx->objdata >> 16;
3391 if (!strcmp_partial("end", str, len)) {
3392 ctx->objdata &= 0xffff;
3395 if (i >= ACTION_RSS_QUEUE_NUM)
3398 ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) +
3399 i * sizeof(action_rss_data->queue[i]),
3400 sizeof(action_rss_data->queue[i]))))
3402 ret = parse_int(ctx, token, str, len, NULL, 0);
3408 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
3410 if (ctx->next_num == RTE_DIM(ctx->next))
3412 ctx->next[ctx->next_num++] = next;
3416 action_rss_data = ctx->object;
3417 action_rss_data->conf.queue_num = i;
3418 action_rss_data->conf.queue = i ? action_rss_data->queue : NULL;
3422 /** Parse VXLAN encap action. */
3424 parse_vc_action_vxlan_encap(struct context *ctx, const struct token *token,
3425 const char *str, unsigned int len,
3426 void *buf, unsigned int size)
3428 struct buffer *out = buf;
3429 struct rte_flow_action *action;
3430 struct action_vxlan_encap_data *action_vxlan_encap_data;
3433 ret = parse_vc(ctx, token, str, len, buf, size);
3436 /* Nothing else to do if there is no buffer. */
3439 if (!out->args.vc.actions_n)
3441 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3442 /* Point to selected object. */
3443 ctx->object = out->args.vc.data;
3444 ctx->objmask = NULL;
3445 /* Set up default configuration. */
3446 action_vxlan_encap_data = ctx->object;
3447 *action_vxlan_encap_data = (struct action_vxlan_encap_data){
3448 .conf = (struct rte_flow_action_vxlan_encap){
3449 .definition = action_vxlan_encap_data->items,
3453 .type = RTE_FLOW_ITEM_TYPE_ETH,
3454 .spec = &action_vxlan_encap_data->item_eth,
3455 .mask = &rte_flow_item_eth_mask,
3458 .type = RTE_FLOW_ITEM_TYPE_VLAN,
3459 .spec = &action_vxlan_encap_data->item_vlan,
3460 .mask = &rte_flow_item_vlan_mask,
3463 .type = RTE_FLOW_ITEM_TYPE_IPV4,
3464 .spec = &action_vxlan_encap_data->item_ipv4,
3465 .mask = &rte_flow_item_ipv4_mask,
3468 .type = RTE_FLOW_ITEM_TYPE_UDP,
3469 .spec = &action_vxlan_encap_data->item_udp,
3470 .mask = &rte_flow_item_udp_mask,
3473 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
3474 .spec = &action_vxlan_encap_data->item_vxlan,
3475 .mask = &rte_flow_item_vxlan_mask,
3478 .type = RTE_FLOW_ITEM_TYPE_END,
3483 .tci = vxlan_encap_conf.vlan_tci,
3487 .src_addr = vxlan_encap_conf.ipv4_src,
3488 .dst_addr = vxlan_encap_conf.ipv4_dst,
3491 .src_port = vxlan_encap_conf.udp_src,
3492 .dst_port = vxlan_encap_conf.udp_dst,
3494 .item_vxlan.flags = 0,
3496 memcpy(action_vxlan_encap_data->item_eth.dst.addr_bytes,
3497 vxlan_encap_conf.eth_dst, ETHER_ADDR_LEN);
3498 memcpy(action_vxlan_encap_data->item_eth.src.addr_bytes,
3499 vxlan_encap_conf.eth_src, ETHER_ADDR_LEN);
3500 if (!vxlan_encap_conf.select_ipv4) {
3501 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr,
3502 &vxlan_encap_conf.ipv6_src,
3503 sizeof(vxlan_encap_conf.ipv6_src));
3504 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.dst_addr,
3505 &vxlan_encap_conf.ipv6_dst,
3506 sizeof(vxlan_encap_conf.ipv6_dst));
3507 action_vxlan_encap_data->items[2] = (struct rte_flow_item){
3508 .type = RTE_FLOW_ITEM_TYPE_IPV6,
3509 .spec = &action_vxlan_encap_data->item_ipv6,
3510 .mask = &rte_flow_item_ipv6_mask,
3513 if (!vxlan_encap_conf.select_vlan)
3514 action_vxlan_encap_data->items[1].type =
3515 RTE_FLOW_ITEM_TYPE_VOID;
3516 if (vxlan_encap_conf.select_tos_ttl) {
3517 if (vxlan_encap_conf.select_ipv4) {
3518 static struct rte_flow_item_ipv4 ipv4_mask_tos;
3520 memcpy(&ipv4_mask_tos, &rte_flow_item_ipv4_mask,
3521 sizeof(ipv4_mask_tos));
3522 ipv4_mask_tos.hdr.type_of_service = 0xff;
3523 ipv4_mask_tos.hdr.time_to_live = 0xff;
3524 action_vxlan_encap_data->item_ipv4.hdr.type_of_service =
3525 vxlan_encap_conf.ip_tos;
3526 action_vxlan_encap_data->item_ipv4.hdr.time_to_live =
3527 vxlan_encap_conf.ip_ttl;
3528 action_vxlan_encap_data->items[2].mask =
3531 static struct rte_flow_item_ipv6 ipv6_mask_tos;
3533 memcpy(&ipv6_mask_tos, &rte_flow_item_ipv6_mask,
3534 sizeof(ipv6_mask_tos));
3535 ipv6_mask_tos.hdr.vtc_flow |=
3536 RTE_BE32(0xfful << IPV6_HDR_TC_SHIFT);
3537 ipv6_mask_tos.hdr.hop_limits = 0xff;
3538 action_vxlan_encap_data->item_ipv6.hdr.vtc_flow |=
3540 ((uint32_t)vxlan_encap_conf.ip_tos <<
3542 action_vxlan_encap_data->item_ipv6.hdr.hop_limits =
3543 vxlan_encap_conf.ip_ttl;
3544 action_vxlan_encap_data->items[2].mask =
3548 memcpy(action_vxlan_encap_data->item_vxlan.vni, vxlan_encap_conf.vni,
3549 RTE_DIM(vxlan_encap_conf.vni));
3550 action->conf = &action_vxlan_encap_data->conf;
3554 /** Parse NVGRE encap action. */
3556 parse_vc_action_nvgre_encap(struct context *ctx, const struct token *token,
3557 const char *str, unsigned int len,
3558 void *buf, unsigned int size)
3560 struct buffer *out = buf;
3561 struct rte_flow_action *action;
3562 struct action_nvgre_encap_data *action_nvgre_encap_data;
3565 ret = parse_vc(ctx, token, str, len, buf, size);
3568 /* Nothing else to do if there is no buffer. */
3571 if (!out->args.vc.actions_n)
3573 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3574 /* Point to selected object. */
3575 ctx->object = out->args.vc.data;
3576 ctx->objmask = NULL;
3577 /* Set up default configuration. */
3578 action_nvgre_encap_data = ctx->object;
3579 *action_nvgre_encap_data = (struct action_nvgre_encap_data){
3580 .conf = (struct rte_flow_action_nvgre_encap){
3581 .definition = action_nvgre_encap_data->items,
3585 .type = RTE_FLOW_ITEM_TYPE_ETH,
3586 .spec = &action_nvgre_encap_data->item_eth,
3587 .mask = &rte_flow_item_eth_mask,
3590 .type = RTE_FLOW_ITEM_TYPE_VLAN,
3591 .spec = &action_nvgre_encap_data->item_vlan,
3592 .mask = &rte_flow_item_vlan_mask,
3595 .type = RTE_FLOW_ITEM_TYPE_IPV4,
3596 .spec = &action_nvgre_encap_data->item_ipv4,
3597 .mask = &rte_flow_item_ipv4_mask,
3600 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
3601 .spec = &action_nvgre_encap_data->item_nvgre,
3602 .mask = &rte_flow_item_nvgre_mask,
3605 .type = RTE_FLOW_ITEM_TYPE_END,
3610 .tci = nvgre_encap_conf.vlan_tci,
3614 .src_addr = nvgre_encap_conf.ipv4_src,
3615 .dst_addr = nvgre_encap_conf.ipv4_dst,
3617 .item_nvgre.flow_id = 0,
3619 memcpy(action_nvgre_encap_data->item_eth.dst.addr_bytes,
3620 nvgre_encap_conf.eth_dst, ETHER_ADDR_LEN);
3621 memcpy(action_nvgre_encap_data->item_eth.src.addr_bytes,
3622 nvgre_encap_conf.eth_src, ETHER_ADDR_LEN);
3623 if (!nvgre_encap_conf.select_ipv4) {
3624 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.src_addr,
3625 &nvgre_encap_conf.ipv6_src,
3626 sizeof(nvgre_encap_conf.ipv6_src));
3627 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.dst_addr,
3628 &nvgre_encap_conf.ipv6_dst,
3629 sizeof(nvgre_encap_conf.ipv6_dst));
3630 action_nvgre_encap_data->items[2] = (struct rte_flow_item){
3631 .type = RTE_FLOW_ITEM_TYPE_IPV6,
3632 .spec = &action_nvgre_encap_data->item_ipv6,
3633 .mask = &rte_flow_item_ipv6_mask,
3636 if (!nvgre_encap_conf.select_vlan)
3637 action_nvgre_encap_data->items[1].type =
3638 RTE_FLOW_ITEM_TYPE_VOID;
3639 memcpy(action_nvgre_encap_data->item_nvgre.tni, nvgre_encap_conf.tni,
3640 RTE_DIM(nvgre_encap_conf.tni));
3641 action->conf = &action_nvgre_encap_data->conf;
3645 /** Parse l2 encap action. */
3647 parse_vc_action_l2_encap(struct context *ctx, const struct token *token,
3648 const char *str, unsigned int len,
3649 void *buf, unsigned int size)
3651 struct buffer *out = buf;
3652 struct rte_flow_action *action;
3653 struct action_raw_encap_data *action_encap_data;
3654 struct rte_flow_item_eth eth = { .type = 0, };
3655 struct rte_flow_item_vlan vlan = {
3656 .tci = mplsoudp_encap_conf.vlan_tci,
3662 ret = parse_vc(ctx, token, str, len, buf, size);
3665 /* Nothing else to do if there is no buffer. */
3668 if (!out->args.vc.actions_n)
3670 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3671 /* Point to selected object. */
3672 ctx->object = out->args.vc.data;
3673 ctx->objmask = NULL;
3674 /* Copy the headers to the buffer. */
3675 action_encap_data = ctx->object;
3676 *action_encap_data = (struct action_raw_encap_data) {
3677 .conf = (struct rte_flow_action_raw_encap){
3678 .data = action_encap_data->data,
3682 header = action_encap_data->data;
3683 if (l2_encap_conf.select_vlan)
3684 eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
3685 else if (l2_encap_conf.select_ipv4)
3686 eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
3688 eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
3689 memcpy(eth.dst.addr_bytes,
3690 l2_encap_conf.eth_dst, ETHER_ADDR_LEN);
3691 memcpy(eth.src.addr_bytes,
3692 l2_encap_conf.eth_src, ETHER_ADDR_LEN);
3693 memcpy(header, ð, sizeof(eth));
3694 header += sizeof(eth);
3695 if (l2_encap_conf.select_vlan) {
3696 if (l2_encap_conf.select_ipv4)
3697 vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
3699 vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
3700 memcpy(header, &vlan, sizeof(vlan));
3701 header += sizeof(vlan);
3703 action_encap_data->conf.size = header -
3704 action_encap_data->data;
3705 action->conf = &action_encap_data->conf;
3709 /** Parse l2 decap action. */
3711 parse_vc_action_l2_decap(struct context *ctx, const struct token *token,
3712 const char *str, unsigned int len,
3713 void *buf, unsigned int size)
3715 struct buffer *out = buf;
3716 struct rte_flow_action *action;
3717 struct action_raw_decap_data *action_decap_data;
3718 struct rte_flow_item_eth eth = { .type = 0, };
3719 struct rte_flow_item_vlan vlan = {
3720 .tci = mplsoudp_encap_conf.vlan_tci,
3726 ret = parse_vc(ctx, token, str, len, buf, size);
3729 /* Nothing else to do if there is no buffer. */
3732 if (!out->args.vc.actions_n)
3734 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3735 /* Point to selected object. */
3736 ctx->object = out->args.vc.data;
3737 ctx->objmask = NULL;
3738 /* Copy the headers to the buffer. */
3739 action_decap_data = ctx->object;
3740 *action_decap_data = (struct action_raw_decap_data) {
3741 .conf = (struct rte_flow_action_raw_decap){
3742 .data = action_decap_data->data,
3746 header = action_decap_data->data;
3747 if (l2_decap_conf.select_vlan)
3748 eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
3749 memcpy(header, ð, sizeof(eth));
3750 header += sizeof(eth);
3751 if (l2_decap_conf.select_vlan) {
3752 memcpy(header, &vlan, sizeof(vlan));
3753 header += sizeof(vlan);
3755 action_decap_data->conf.size = header -
3756 action_decap_data->data;
3757 action->conf = &action_decap_data->conf;
3761 #define ETHER_TYPE_MPLS_UNICAST 0x8847
3763 /** Parse MPLSOGRE encap action. */
3765 parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token,
3766 const char *str, unsigned int len,
3767 void *buf, unsigned int size)
3769 struct buffer *out = buf;
3770 struct rte_flow_action *action;
3771 struct action_raw_encap_data *action_encap_data;
3772 struct rte_flow_item_eth eth = { .type = 0, };
3773 struct rte_flow_item_vlan vlan = {
3774 .tci = mplsogre_encap_conf.vlan_tci,
3777 struct rte_flow_item_ipv4 ipv4 = {
3779 .src_addr = mplsogre_encap_conf.ipv4_src,
3780 .dst_addr = mplsogre_encap_conf.ipv4_dst,
3781 .next_proto_id = IPPROTO_GRE,
3784 struct rte_flow_item_ipv6 ipv6 = {
3786 .proto = IPPROTO_GRE,
3789 struct rte_flow_item_gre gre = {
3790 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
3792 struct rte_flow_item_mpls mpls;
3796 ret = parse_vc(ctx, token, str, len, buf, size);
3799 /* Nothing else to do if there is no buffer. */
3802 if (!out->args.vc.actions_n)
3804 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3805 /* Point to selected object. */
3806 ctx->object = out->args.vc.data;
3807 ctx->objmask = NULL;
3808 /* Copy the headers to the buffer. */
3809 action_encap_data = ctx->object;
3810 *action_encap_data = (struct action_raw_encap_data) {
3811 .conf = (struct rte_flow_action_raw_encap){
3812 .data = action_encap_data->data,
3817 header = action_encap_data->data;
3818 if (mplsogre_encap_conf.select_vlan)
3819 eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
3820 else if (mplsogre_encap_conf.select_ipv4)
3821 eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
3823 eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
3824 memcpy(eth.dst.addr_bytes,
3825 mplsogre_encap_conf.eth_dst, ETHER_ADDR_LEN);
3826 memcpy(eth.src.addr_bytes,
3827 mplsogre_encap_conf.eth_src, ETHER_ADDR_LEN);
3828 memcpy(header, ð, sizeof(eth));
3829 header += sizeof(eth);
3830 if (mplsogre_encap_conf.select_vlan) {
3831 if (mplsogre_encap_conf.select_ipv4)
3832 vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
3834 vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
3835 memcpy(header, &vlan, sizeof(vlan));
3836 header += sizeof(vlan);
3838 if (mplsogre_encap_conf.select_ipv4) {
3839 memcpy(header, &ipv4, sizeof(ipv4));
3840 header += sizeof(ipv4);
3842 memcpy(&ipv6.hdr.src_addr,
3843 &mplsogre_encap_conf.ipv6_src,
3844 sizeof(mplsogre_encap_conf.ipv6_src));
3845 memcpy(&ipv6.hdr.dst_addr,
3846 &mplsogre_encap_conf.ipv6_dst,
3847 sizeof(mplsogre_encap_conf.ipv6_dst));
3848 memcpy(header, &ipv6, sizeof(ipv6));
3849 header += sizeof(ipv6);
3851 memcpy(header, &gre, sizeof(gre));
3852 header += sizeof(gre);
3853 memcpy(mpls.label_tc_s, mplsogre_encap_conf.label,
3854 RTE_DIM(mplsogre_encap_conf.label));
3855 mpls.label_tc_s[2] |= 0x1;
3856 memcpy(header, &mpls, sizeof(mpls));
3857 header += sizeof(mpls);
3858 action_encap_data->conf.size = header -
3859 action_encap_data->data;
3860 action->conf = &action_encap_data->conf;
3864 /** Parse MPLSOGRE decap action. */
3866 parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token,
3867 const char *str, unsigned int len,
3868 void *buf, unsigned int size)
3870 struct buffer *out = buf;
3871 struct rte_flow_action *action;
3872 struct action_raw_decap_data *action_decap_data;
3873 struct rte_flow_item_eth eth = { .type = 0, };
3874 struct rte_flow_item_vlan vlan = {.tci = 0};
3875 struct rte_flow_item_ipv4 ipv4 = {
3877 .next_proto_id = IPPROTO_GRE,
3880 struct rte_flow_item_ipv6 ipv6 = {
3882 .proto = IPPROTO_GRE,
3885 struct rte_flow_item_gre gre = {
3886 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
3888 struct rte_flow_item_mpls mpls;
3892 ret = parse_vc(ctx, token, str, len, buf, size);
3895 /* Nothing else to do if there is no buffer. */
3898 if (!out->args.vc.actions_n)
3900 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3901 /* Point to selected object. */
3902 ctx->object = out->args.vc.data;
3903 ctx->objmask = NULL;
3904 /* Copy the headers to the buffer. */
3905 action_decap_data = ctx->object;
3906 *action_decap_data = (struct action_raw_decap_data) {
3907 .conf = (struct rte_flow_action_raw_decap){
3908 .data = action_decap_data->data,
3912 header = action_decap_data->data;
3913 if (mplsogre_decap_conf.select_vlan)
3914 eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
3915 else if (mplsogre_encap_conf.select_ipv4)
3916 eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
3918 eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
3919 memcpy(eth.dst.addr_bytes,
3920 mplsogre_encap_conf.eth_dst, ETHER_ADDR_LEN);
3921 memcpy(eth.src.addr_bytes,
3922 mplsogre_encap_conf.eth_src, ETHER_ADDR_LEN);
3923 memcpy(header, ð, sizeof(eth));
3924 header += sizeof(eth);
3925 if (mplsogre_encap_conf.select_vlan) {
3926 if (mplsogre_encap_conf.select_ipv4)
3927 vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
3929 vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
3930 memcpy(header, &vlan, sizeof(vlan));
3931 header += sizeof(vlan);
3933 if (mplsogre_encap_conf.select_ipv4) {
3934 memcpy(header, &ipv4, sizeof(ipv4));
3935 header += sizeof(ipv4);
3937 memcpy(header, &ipv6, sizeof(ipv6));
3938 header += sizeof(ipv6);
3940 memcpy(header, &gre, sizeof(gre));
3941 header += sizeof(gre);
3942 memset(&mpls, 0, sizeof(mpls));
3943 memcpy(header, &mpls, sizeof(mpls));
3944 header += sizeof(mpls);
3945 action_decap_data->conf.size = header -
3946 action_decap_data->data;
3947 action->conf = &action_decap_data->conf;
3951 /** Parse MPLSOUDP encap action. */
3953 parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token,
3954 const char *str, unsigned int len,
3955 void *buf, unsigned int size)
3957 struct buffer *out = buf;
3958 struct rte_flow_action *action;
3959 struct action_raw_encap_data *action_encap_data;
3960 struct rte_flow_item_eth eth = { .type = 0, };
3961 struct rte_flow_item_vlan vlan = {
3962 .tci = mplsoudp_encap_conf.vlan_tci,
3965 struct rte_flow_item_ipv4 ipv4 = {
3967 .src_addr = mplsoudp_encap_conf.ipv4_src,
3968 .dst_addr = mplsoudp_encap_conf.ipv4_dst,
3969 .next_proto_id = IPPROTO_UDP,
3972 struct rte_flow_item_ipv6 ipv6 = {
3974 .proto = IPPROTO_UDP,
3977 struct rte_flow_item_udp udp = {
3979 .src_port = mplsoudp_encap_conf.udp_src,
3980 .dst_port = mplsoudp_encap_conf.udp_dst,
3983 struct rte_flow_item_mpls mpls;
3987 ret = parse_vc(ctx, token, str, len, buf, size);
3990 /* Nothing else to do if there is no buffer. */
3993 if (!out->args.vc.actions_n)
3995 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3996 /* Point to selected object. */
3997 ctx->object = out->args.vc.data;
3998 ctx->objmask = NULL;
3999 /* Copy the headers to the buffer. */
4000 action_encap_data = ctx->object;
4001 *action_encap_data = (struct action_raw_encap_data) {
4002 .conf = (struct rte_flow_action_raw_encap){
4003 .data = action_encap_data->data,
4008 header = action_encap_data->data;
4009 if (mplsoudp_encap_conf.select_vlan)
4010 eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
4011 else if (mplsoudp_encap_conf.select_ipv4)
4012 eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
4014 eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
4015 memcpy(eth.dst.addr_bytes,
4016 mplsoudp_encap_conf.eth_dst, ETHER_ADDR_LEN);
4017 memcpy(eth.src.addr_bytes,
4018 mplsoudp_encap_conf.eth_src, ETHER_ADDR_LEN);
4019 memcpy(header, ð, sizeof(eth));
4020 header += sizeof(eth);
4021 if (mplsoudp_encap_conf.select_vlan) {
4022 if (mplsoudp_encap_conf.select_ipv4)
4023 vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
4025 vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
4026 memcpy(header, &vlan, sizeof(vlan));
4027 header += sizeof(vlan);
4029 if (mplsoudp_encap_conf.select_ipv4) {
4030 memcpy(header, &ipv4, sizeof(ipv4));
4031 header += sizeof(ipv4);
4033 memcpy(&ipv6.hdr.src_addr,
4034 &mplsoudp_encap_conf.ipv6_src,
4035 sizeof(mplsoudp_encap_conf.ipv6_src));
4036 memcpy(&ipv6.hdr.dst_addr,
4037 &mplsoudp_encap_conf.ipv6_dst,
4038 sizeof(mplsoudp_encap_conf.ipv6_dst));
4039 memcpy(header, &ipv6, sizeof(ipv6));
4040 header += sizeof(ipv6);
4042 memcpy(header, &udp, sizeof(udp));
4043 header += sizeof(udp);
4044 memcpy(mpls.label_tc_s, mplsoudp_encap_conf.label,
4045 RTE_DIM(mplsoudp_encap_conf.label));
4046 mpls.label_tc_s[2] |= 0x1;
4047 memcpy(header, &mpls, sizeof(mpls));
4048 header += sizeof(mpls);
4049 action_encap_data->conf.size = header -
4050 action_encap_data->data;
4051 action->conf = &action_encap_data->conf;
4055 /** Parse MPLSOUDP decap action. */
4057 parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token,
4058 const char *str, unsigned int len,
4059 void *buf, unsigned int size)
4061 struct buffer *out = buf;
4062 struct rte_flow_action *action;
4063 struct action_raw_decap_data *action_decap_data;
4064 struct rte_flow_item_eth eth = { .type = 0, };
4065 struct rte_flow_item_vlan vlan = {.tci = 0};
4066 struct rte_flow_item_ipv4 ipv4 = {
4068 .next_proto_id = IPPROTO_UDP,
4071 struct rte_flow_item_ipv6 ipv6 = {
4073 .proto = IPPROTO_UDP,
4076 struct rte_flow_item_udp udp = {
4078 .dst_port = rte_cpu_to_be_16(6635),
4081 struct rte_flow_item_mpls mpls;
4085 ret = parse_vc(ctx, token, str, len, buf, size);
4088 /* Nothing else to do if there is no buffer. */
4091 if (!out->args.vc.actions_n)
4093 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4094 /* Point to selected object. */
4095 ctx->object = out->args.vc.data;
4096 ctx->objmask = NULL;
4097 /* Copy the headers to the buffer. */
4098 action_decap_data = ctx->object;
4099 *action_decap_data = (struct action_raw_decap_data) {
4100 .conf = (struct rte_flow_action_raw_decap){
4101 .data = action_decap_data->data,
4105 header = action_decap_data->data;
4106 if (mplsoudp_decap_conf.select_vlan)
4107 eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
4108 else if (mplsoudp_encap_conf.select_ipv4)
4109 eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
4111 eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
4112 memcpy(eth.dst.addr_bytes,
4113 mplsoudp_encap_conf.eth_dst, ETHER_ADDR_LEN);
4114 memcpy(eth.src.addr_bytes,
4115 mplsoudp_encap_conf.eth_src, ETHER_ADDR_LEN);
4116 memcpy(header, ð, sizeof(eth));
4117 header += sizeof(eth);
4118 if (mplsoudp_encap_conf.select_vlan) {
4119 if (mplsoudp_encap_conf.select_ipv4)
4120 vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
4122 vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
4123 memcpy(header, &vlan, sizeof(vlan));
4124 header += sizeof(vlan);
4126 if (mplsoudp_encap_conf.select_ipv4) {
4127 memcpy(header, &ipv4, sizeof(ipv4));
4128 header += sizeof(ipv4);
4130 memcpy(header, &ipv6, sizeof(ipv6));
4131 header += sizeof(ipv6);
4133 memcpy(header, &udp, sizeof(udp));
4134 header += sizeof(udp);
4135 memset(&mpls, 0, sizeof(mpls));
4136 memcpy(header, &mpls, sizeof(mpls));
4137 header += sizeof(mpls);
4138 action_decap_data->conf.size = header -
4139 action_decap_data->data;
4140 action->conf = &action_decap_data->conf;
4144 /** Parse tokens for destroy command. */
4146 parse_destroy(struct context *ctx, const struct token *token,
4147 const char *str, unsigned int len,
4148 void *buf, unsigned int size)
4150 struct buffer *out = buf;
4152 /* Token name must match. */
4153 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4155 /* Nothing else to do if there is no buffer. */
4158 if (!out->command) {
4159 if (ctx->curr != DESTROY)
4161 if (sizeof(*out) > size)
4163 out->command = ctx->curr;
4166 ctx->objmask = NULL;
4167 out->args.destroy.rule =
4168 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
4172 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
4173 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
4176 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
4177 ctx->objmask = NULL;
4181 /** Parse tokens for flush command. */
4183 parse_flush(struct context *ctx, const struct token *token,
4184 const char *str, unsigned int len,
4185 void *buf, unsigned int size)
4187 struct buffer *out = buf;
4189 /* Token name must match. */
4190 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4192 /* Nothing else to do if there is no buffer. */
4195 if (!out->command) {
4196 if (ctx->curr != FLUSH)
4198 if (sizeof(*out) > size)
4200 out->command = ctx->curr;
4203 ctx->objmask = NULL;
4208 /** Parse tokens for query command. */
4210 parse_query(struct context *ctx, const struct token *token,
4211 const char *str, unsigned int len,
4212 void *buf, unsigned int size)
4214 struct buffer *out = buf;
4216 /* Token name must match. */
4217 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4219 /* Nothing else to do if there is no buffer. */
4222 if (!out->command) {
4223 if (ctx->curr != QUERY)
4225 if (sizeof(*out) > size)
4227 out->command = ctx->curr;
4230 ctx->objmask = NULL;
4235 /** Parse action names. */
4237 parse_action(struct context *ctx, const struct token *token,
4238 const char *str, unsigned int len,
4239 void *buf, unsigned int size)
4241 struct buffer *out = buf;
4242 const struct arg *arg = pop_args(ctx);
4246 /* Argument is expected. */
4249 /* Parse action name. */
4250 for (i = 0; next_action[i]; ++i) {
4251 const struct parse_action_priv *priv;
4253 token = &token_list[next_action[i]];
4254 if (strcmp_partial(token->name, str, len))
4260 memcpy((uint8_t *)ctx->object + arg->offset,
4266 push_args(ctx, arg);
4270 /** Parse tokens for list command. */
4272 parse_list(struct context *ctx, const struct token *token,
4273 const char *str, unsigned int len,
4274 void *buf, unsigned int size)
4276 struct buffer *out = buf;
4278 /* Token name must match. */
4279 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4281 /* Nothing else to do if there is no buffer. */
4284 if (!out->command) {
4285 if (ctx->curr != LIST)
4287 if (sizeof(*out) > size)
4289 out->command = ctx->curr;
4292 ctx->objmask = NULL;
4293 out->args.list.group =
4294 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
4298 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
4299 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
4302 ctx->object = out->args.list.group + out->args.list.group_n++;
4303 ctx->objmask = NULL;
4307 /** Parse tokens for isolate command. */
4309 parse_isolate(struct context *ctx, const struct token *token,
4310 const char *str, unsigned int len,
4311 void *buf, unsigned int size)
4313 struct buffer *out = buf;
4315 /* Token name must match. */
4316 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4318 /* Nothing else to do if there is no buffer. */
4321 if (!out->command) {
4322 if (ctx->curr != ISOLATE)
4324 if (sizeof(*out) > size)
4326 out->command = ctx->curr;
4329 ctx->objmask = NULL;
4335 * Parse signed/unsigned integers 8 to 64-bit long.
4337 * Last argument (ctx->args) is retrieved to determine integer type and
4341 parse_int(struct context *ctx, const struct token *token,
4342 const char *str, unsigned int len,
4343 void *buf, unsigned int size)
4345 const struct arg *arg = pop_args(ctx);
4350 /* Argument is expected. */
4355 (uintmax_t)strtoimax(str, &end, 0) :
4356 strtoumax(str, &end, 0);
4357 if (errno || (size_t)(end - str) != len)
4360 ((arg->sign && ((intmax_t)u < (intmax_t)arg->min ||
4361 (intmax_t)u > (intmax_t)arg->max)) ||
4362 (!arg->sign && (u < arg->min || u > arg->max))))
4367 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
4368 !arg_entry_bf_fill(ctx->objmask, -1, arg))
4372 buf = (uint8_t *)ctx->object + arg->offset;
4374 if (u > RTE_LEN2MASK(size * CHAR_BIT, uint64_t))
4378 case sizeof(uint8_t):
4379 *(uint8_t *)buf = u;
4381 case sizeof(uint16_t):
4382 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
4384 case sizeof(uint8_t [3]):
4385 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
4387 ((uint8_t *)buf)[0] = u;
4388 ((uint8_t *)buf)[1] = u >> 8;
4389 ((uint8_t *)buf)[2] = u >> 16;
4393 ((uint8_t *)buf)[0] = u >> 16;
4394 ((uint8_t *)buf)[1] = u >> 8;
4395 ((uint8_t *)buf)[2] = u;
4397 case sizeof(uint32_t):
4398 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
4400 case sizeof(uint64_t):
4401 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
4406 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
4408 buf = (uint8_t *)ctx->objmask + arg->offset;
4413 push_args(ctx, arg);
4420 * Three arguments (ctx->args) are retrieved from the stack to store data,
4421 * its actual length and address (in that order).
4424 parse_string(struct context *ctx, const struct token *token,
4425 const char *str, unsigned int len,
4426 void *buf, unsigned int size)
4428 const struct arg *arg_data = pop_args(ctx);
4429 const struct arg *arg_len = pop_args(ctx);
4430 const struct arg *arg_addr = pop_args(ctx);
4431 char tmp[16]; /* Ought to be enough. */
4434 /* Arguments are expected. */
4438 push_args(ctx, arg_data);
4442 push_args(ctx, arg_len);
4443 push_args(ctx, arg_data);
4446 size = arg_data->size;
4447 /* Bit-mask fill is not supported. */
4448 if (arg_data->mask || size < len)
4452 /* Let parse_int() fill length information first. */
4453 ret = snprintf(tmp, sizeof(tmp), "%u", len);
4456 push_args(ctx, arg_len);
4457 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
4462 buf = (uint8_t *)ctx->object + arg_data->offset;
4463 /* Output buffer is not necessarily NUL-terminated. */
4464 memcpy(buf, str, len);
4465 memset((uint8_t *)buf + len, 0x00, size - len);
4467 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
4468 /* Save address if requested. */
4469 if (arg_addr->size) {
4470 memcpy((uint8_t *)ctx->object + arg_addr->offset,
4472 (uint8_t *)ctx->object + arg_data->offset
4476 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
4478 (uint8_t *)ctx->objmask + arg_data->offset
4484 push_args(ctx, arg_addr);
4485 push_args(ctx, arg_len);
4486 push_args(ctx, arg_data);
4491 parse_hex_string(const char *src, uint8_t *dst, uint32_t *size)
4497 /* Check input parameters */
4498 if ((src == NULL) ||
4504 /* Convert chars to bytes */
4505 for (i = 0, len = 0; i < *size; i += 2) {
4506 snprintf(tmp, 3, "%s", src + i);
4507 dst[len++] = strtoul(tmp, &c, 16);
4522 parse_hex(struct context *ctx, const struct token *token,
4523 const char *str, unsigned int len,
4524 void *buf, unsigned int size)
4526 const struct arg *arg_data = pop_args(ctx);
4527 const struct arg *arg_len = pop_args(ctx);
4528 const struct arg *arg_addr = pop_args(ctx);
4529 char tmp[16]; /* Ought to be enough. */
4531 unsigned int hexlen = len;
4532 unsigned int length = 256;
4533 uint8_t hex_tmp[length];
4535 /* Arguments are expected. */
4539 push_args(ctx, arg_data);
4543 push_args(ctx, arg_len);
4544 push_args(ctx, arg_data);
4547 size = arg_data->size;
4548 /* Bit-mask fill is not supported. */
4554 /* translate bytes string to array. */
4555 if (str[0] == '0' && ((str[1] == 'x') ||
4560 if (hexlen > length)
4562 ret = parse_hex_string(str, hex_tmp, &hexlen);
4565 /* Let parse_int() fill length information first. */
4566 ret = snprintf(tmp, sizeof(tmp), "%u", hexlen);
4569 push_args(ctx, arg_len);
4570 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
4575 buf = (uint8_t *)ctx->object + arg_data->offset;
4576 /* Output buffer is not necessarily NUL-terminated. */
4577 memcpy(buf, hex_tmp, hexlen);
4578 memset((uint8_t *)buf + hexlen, 0x00, size - hexlen);
4580 memset((uint8_t *)ctx->objmask + arg_data->offset,
4582 /* Save address if requested. */
4583 if (arg_addr->size) {
4584 memcpy((uint8_t *)ctx->object + arg_addr->offset,
4586 (uint8_t *)ctx->object + arg_data->offset
4590 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
4592 (uint8_t *)ctx->objmask + arg_data->offset
4598 push_args(ctx, arg_addr);
4599 push_args(ctx, arg_len);
4600 push_args(ctx, arg_data);
4606 * Parse a MAC address.
4608 * Last argument (ctx->args) is retrieved to determine storage size and
4612 parse_mac_addr(struct context *ctx, const struct token *token,
4613 const char *str, unsigned int len,
4614 void *buf, unsigned int size)
4616 const struct arg *arg = pop_args(ctx);
4617 struct ether_addr tmp;
4621 /* Argument is expected. */
4625 /* Bit-mask fill is not supported. */
4626 if (arg->mask || size != sizeof(tmp))
4628 /* Only network endian is supported. */
4631 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
4632 if (ret < 0 || (unsigned int)ret != len)
4636 buf = (uint8_t *)ctx->object + arg->offset;
4637 memcpy(buf, &tmp, size);
4639 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
4642 push_args(ctx, arg);
4647 * Parse an IPv4 address.
4649 * Last argument (ctx->args) is retrieved to determine storage size and
4653 parse_ipv4_addr(struct context *ctx, const struct token *token,
4654 const char *str, unsigned int len,
4655 void *buf, unsigned int size)
4657 const struct arg *arg = pop_args(ctx);
4662 /* Argument is expected. */
4666 /* Bit-mask fill is not supported. */
4667 if (arg->mask || size != sizeof(tmp))
4669 /* Only network endian is supported. */
4672 memcpy(str2, str, len);
4674 ret = inet_pton(AF_INET, str2, &tmp);
4676 /* Attempt integer parsing. */
4677 push_args(ctx, arg);
4678 return parse_int(ctx, token, str, len, buf, size);
4682 buf = (uint8_t *)ctx->object + arg->offset;
4683 memcpy(buf, &tmp, size);
4685 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
4688 push_args(ctx, arg);
4693 * Parse an IPv6 address.
4695 * Last argument (ctx->args) is retrieved to determine storage size and
4699 parse_ipv6_addr(struct context *ctx, const struct token *token,
4700 const char *str, unsigned int len,
4701 void *buf, unsigned int size)
4703 const struct arg *arg = pop_args(ctx);
4705 struct in6_addr tmp;
4709 /* Argument is expected. */
4713 /* Bit-mask fill is not supported. */
4714 if (arg->mask || size != sizeof(tmp))
4716 /* Only network endian is supported. */
4719 memcpy(str2, str, len);
4721 ret = inet_pton(AF_INET6, str2, &tmp);
4726 buf = (uint8_t *)ctx->object + arg->offset;
4727 memcpy(buf, &tmp, size);
4729 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
4732 push_args(ctx, arg);
4736 /** Boolean values (even indices stand for false). */
4737 static const char *const boolean_name[] = {
4747 * Parse a boolean value.
4749 * Last argument (ctx->args) is retrieved to determine storage size and
4753 parse_boolean(struct context *ctx, const struct token *token,
4754 const char *str, unsigned int len,
4755 void *buf, unsigned int size)
4757 const struct arg *arg = pop_args(ctx);
4761 /* Argument is expected. */
4764 for (i = 0; boolean_name[i]; ++i)
4765 if (!strcmp_partial(boolean_name[i], str, len))
4767 /* Process token as integer. */
4768 if (boolean_name[i])
4769 str = i & 1 ? "1" : "0";
4770 push_args(ctx, arg);
4771 ret = parse_int(ctx, token, str, strlen(str), buf, size);
4772 return ret > 0 ? (int)len : ret;
4775 /** Parse port and update context. */
4777 parse_port(struct context *ctx, const struct token *token,
4778 const char *str, unsigned int len,
4779 void *buf, unsigned int size)
4781 struct buffer *out = &(struct buffer){ .port = 0 };
4789 ctx->objmask = NULL;
4790 size = sizeof(*out);
4792 ret = parse_int(ctx, token, str, len, out, size);
4794 ctx->port = out->port;
4800 /** No completion. */
4802 comp_none(struct context *ctx, const struct token *token,
4803 unsigned int ent, char *buf, unsigned int size)
4813 /** Complete boolean values. */
4815 comp_boolean(struct context *ctx, const struct token *token,
4816 unsigned int ent, char *buf, unsigned int size)
4822 for (i = 0; boolean_name[i]; ++i)
4823 if (buf && i == ent)
4824 return strlcpy(buf, boolean_name[i], size);
4830 /** Complete action names. */
4832 comp_action(struct context *ctx, const struct token *token,
4833 unsigned int ent, char *buf, unsigned int size)
4839 for (i = 0; next_action[i]; ++i)
4840 if (buf && i == ent)
4841 return strlcpy(buf, token_list[next_action[i]].name,
4848 /** Complete available ports. */
4850 comp_port(struct context *ctx, const struct token *token,
4851 unsigned int ent, char *buf, unsigned int size)
4858 RTE_ETH_FOREACH_DEV(p) {
4859 if (buf && i == ent)
4860 return snprintf(buf, size, "%u", p);
4868 /** Complete available rule IDs. */
4870 comp_rule_id(struct context *ctx, const struct token *token,
4871 unsigned int ent, char *buf, unsigned int size)
4874 struct rte_port *port;
4875 struct port_flow *pf;
4878 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
4879 ctx->port == (portid_t)RTE_PORT_ALL)
4881 port = &ports[ctx->port];
4882 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
4883 if (buf && i == ent)
4884 return snprintf(buf, size, "%u", pf->id);
4892 /** Complete type field for RSS action. */
4894 comp_vc_action_rss_type(struct context *ctx, const struct token *token,
4895 unsigned int ent, char *buf, unsigned int size)
4901 for (i = 0; rss_type_table[i].str; ++i)
4906 return strlcpy(buf, rss_type_table[ent].str, size);
4908 return snprintf(buf, size, "end");
4912 /** Complete queue field for RSS action. */
4914 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
4915 unsigned int ent, char *buf, unsigned int size)
4922 return snprintf(buf, size, "%u", ent);
4924 return snprintf(buf, size, "end");
4928 /** Internal context. */
4929 static struct context cmd_flow_context;
4931 /** Global parser instance (cmdline API). */
4932 cmdline_parse_inst_t cmd_flow;
4934 /** Initialize context. */
4936 cmd_flow_context_init(struct context *ctx)
4938 /* A full memset() is not necessary. */
4948 ctx->objmask = NULL;
4951 /** Parse a token (cmdline API). */
4953 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
4956 struct context *ctx = &cmd_flow_context;
4957 const struct token *token;
4958 const enum index *list;
4963 token = &token_list[ctx->curr];
4964 /* Check argument length. */
4967 for (len = 0; src[len]; ++len)
4968 if (src[len] == '#' || isspace(src[len]))
4972 /* Last argument and EOL detection. */
4973 for (i = len; src[i]; ++i)
4974 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
4976 else if (!isspace(src[i])) {
4981 if (src[i] == '\r' || src[i] == '\n') {
4985 /* Initialize context if necessary. */
4986 if (!ctx->next_num) {
4989 ctx->next[ctx->next_num++] = token->next[0];
4991 /* Process argument through candidates. */
4992 ctx->prev = ctx->curr;
4993 list = ctx->next[ctx->next_num - 1];
4994 for (i = 0; list[i]; ++i) {
4995 const struct token *next = &token_list[list[i]];
4998 ctx->curr = list[i];
5000 tmp = next->call(ctx, next, src, len, result, size);
5002 tmp = parse_default(ctx, next, src, len, result, size);
5003 if (tmp == -1 || tmp != len)
5011 /* Push subsequent tokens if any. */
5013 for (i = 0; token->next[i]; ++i) {
5014 if (ctx->next_num == RTE_DIM(ctx->next))
5016 ctx->next[ctx->next_num++] = token->next[i];
5018 /* Push arguments if any. */
5020 for (i = 0; token->args[i]; ++i) {
5021 if (ctx->args_num == RTE_DIM(ctx->args))
5023 ctx->args[ctx->args_num++] = token->args[i];
5028 /** Return number of completion entries (cmdline API). */
5030 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
5032 struct context *ctx = &cmd_flow_context;
5033 const struct token *token = &token_list[ctx->curr];
5034 const enum index *list;
5038 /* Count number of tokens in current list. */
5040 list = ctx->next[ctx->next_num - 1];
5042 list = token->next[0];
5043 for (i = 0; list[i]; ++i)
5048 * If there is a single token, use its completion callback, otherwise
5049 * return the number of entries.
5051 token = &token_list[list[0]];
5052 if (i == 1 && token->comp) {
5053 /* Save index for cmd_flow_get_help(). */
5054 ctx->prev = list[0];
5055 return token->comp(ctx, token, 0, NULL, 0);
5060 /** Return a completion entry (cmdline API). */
5062 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
5063 char *dst, unsigned int size)
5065 struct context *ctx = &cmd_flow_context;
5066 const struct token *token = &token_list[ctx->curr];
5067 const enum index *list;
5071 /* Count number of tokens in current list. */
5073 list = ctx->next[ctx->next_num - 1];
5075 list = token->next[0];
5076 for (i = 0; list[i]; ++i)
5080 /* If there is a single token, use its completion callback. */
5081 token = &token_list[list[0]];
5082 if (i == 1 && token->comp) {
5083 /* Save index for cmd_flow_get_help(). */
5084 ctx->prev = list[0];
5085 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
5087 /* Otherwise make sure the index is valid and use defaults. */
5090 token = &token_list[list[index]];
5091 strlcpy(dst, token->name, size);
5092 /* Save index for cmd_flow_get_help(). */
5093 ctx->prev = list[index];
5097 /** Populate help strings for current token (cmdline API). */
5099 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
5101 struct context *ctx = &cmd_flow_context;
5102 const struct token *token = &token_list[ctx->prev];
5107 /* Set token type and update global help with details. */
5108 strlcpy(dst, (token->type ? token->type : "TOKEN"), size);
5110 cmd_flow.help_str = token->help;
5112 cmd_flow.help_str = token->name;
5116 /** Token definition template (cmdline API). */
5117 static struct cmdline_token_hdr cmd_flow_token_hdr = {
5118 .ops = &(struct cmdline_token_ops){
5119 .parse = cmd_flow_parse,
5120 .complete_get_nb = cmd_flow_complete_get_nb,
5121 .complete_get_elt = cmd_flow_complete_get_elt,
5122 .get_help = cmd_flow_get_help,
5127 /** Populate the next dynamic token. */
5129 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
5130 cmdline_parse_token_hdr_t **hdr_inst)
5132 struct context *ctx = &cmd_flow_context;
5134 /* Always reinitialize context before requesting the first token. */
5135 if (!(hdr_inst - cmd_flow.tokens))
5136 cmd_flow_context_init(ctx);
5137 /* Return NULL when no more tokens are expected. */
5138 if (!ctx->next_num && ctx->curr) {
5142 /* Determine if command should end here. */
5143 if (ctx->eol && ctx->last && ctx->next_num) {
5144 const enum index *list = ctx->next[ctx->next_num - 1];
5147 for (i = 0; list[i]; ++i) {
5154 *hdr = &cmd_flow_token_hdr;
5157 /** Dispatch parsed buffer to function calls. */
5159 cmd_flow_parsed(const struct buffer *in)
5161 switch (in->command) {
5163 port_flow_validate(in->port, &in->args.vc.attr,
5164 in->args.vc.pattern, in->args.vc.actions);
5167 port_flow_create(in->port, &in->args.vc.attr,
5168 in->args.vc.pattern, in->args.vc.actions);
5171 port_flow_destroy(in->port, in->args.destroy.rule_n,
5172 in->args.destroy.rule);
5175 port_flow_flush(in->port);
5178 port_flow_query(in->port, in->args.query.rule,
5179 &in->args.query.action);
5182 port_flow_list(in->port, in->args.list.group_n,
5183 in->args.list.group);
5186 port_flow_isolate(in->port, in->args.isolate.set);
5193 /** Token generator and output processing callback (cmdline API). */
5195 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
5198 cmd_flow_tok(arg0, arg2);
5200 cmd_flow_parsed(arg0);
5203 /** Global parser instance (cmdline API). */
5204 cmdline_parse_inst_t cmd_flow = {
5206 .data = NULL, /**< Unused. */
5207 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
5210 }, /**< Tokens are returned by cmd_flow_tok(). */