1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
13 #include <arpa/inet.h>
14 #include <sys/socket.h>
16 #include <rte_string_fns.h>
17 #include <rte_common.h>
18 #include <rte_ethdev.h>
19 #include <rte_byteorder.h>
20 #include <cmdline_parse.h>
25 /** Parser token indices. */
48 /* Top-level command. */
50 /* Sub-leve commands. */
54 /* Top-level command. */
56 /* Sub-level commands. */
65 /* Destroy arguments. */
68 /* Query arguments. */
74 /* Validate/create arguments. */
81 /* Validate/create pattern. */
118 ITEM_VLAN_INNER_TYPE,
150 ITEM_E_TAG_GRP_ECID_B,
157 ITEM_GRE_C_RSVD0_VER,
173 ITEM_ARP_ETH_IPV4_SHA,
174 ITEM_ARP_ETH_IPV4_SPA,
175 ITEM_ARP_ETH_IPV4_THA,
176 ITEM_ARP_ETH_IPV4_TPA,
178 ITEM_IPV6_EXT_NEXT_HDR,
183 ITEM_ICMP6_ND_NS_TARGET_ADDR,
185 ITEM_ICMP6_ND_NA_TARGET_ADDR,
187 ITEM_ICMP6_ND_OPT_TYPE,
188 ITEM_ICMP6_ND_OPT_SLA_ETH,
189 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
190 ITEM_ICMP6_ND_OPT_TLA_ETH,
191 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
197 /* Validate/create actions. */
217 ACTION_RSS_FUNC_DEFAULT,
218 ACTION_RSS_FUNC_TOEPLITZ,
219 ACTION_RSS_FUNC_SIMPLE_XOR,
231 ACTION_PHY_PORT_ORIGINAL,
232 ACTION_PHY_PORT_INDEX,
234 ACTION_PORT_ID_ORIGINAL,
238 ACTION_OF_SET_MPLS_TTL,
239 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
240 ACTION_OF_DEC_MPLS_TTL,
241 ACTION_OF_SET_NW_TTL,
242 ACTION_OF_SET_NW_TTL_NW_TTL,
243 ACTION_OF_DEC_NW_TTL,
244 ACTION_OF_COPY_TTL_OUT,
245 ACTION_OF_COPY_TTL_IN,
248 ACTION_OF_PUSH_VLAN_ETHERTYPE,
249 ACTION_OF_SET_VLAN_VID,
250 ACTION_OF_SET_VLAN_VID_VLAN_VID,
251 ACTION_OF_SET_VLAN_PCP,
252 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
254 ACTION_OF_POP_MPLS_ETHERTYPE,
256 ACTION_OF_PUSH_MPLS_ETHERTYPE,
263 ACTION_MPLSOGRE_ENCAP,
264 ACTION_MPLSOGRE_DECAP,
265 ACTION_MPLSOUDP_ENCAP,
266 ACTION_MPLSOUDP_DECAP,
268 ACTION_SET_IPV4_SRC_IPV4_SRC,
270 ACTION_SET_IPV4_DST_IPV4_DST,
272 ACTION_SET_IPV6_SRC_IPV6_SRC,
274 ACTION_SET_IPV6_DST_IPV6_DST,
276 ACTION_SET_TP_SRC_TP_SRC,
278 ACTION_SET_TP_DST_TP_DST,
284 ACTION_SET_MAC_SRC_MAC_SRC,
286 ACTION_SET_MAC_DST_MAC_DST,
288 ACTION_INC_TCP_SEQ_VALUE,
290 ACTION_DEC_TCP_SEQ_VALUE,
292 ACTION_INC_TCP_ACK_VALUE,
294 ACTION_DEC_TCP_ACK_VALUE,
299 /** Maximum size for pattern in struct rte_flow_item_raw. */
300 #define ITEM_RAW_PATTERN_SIZE 40
302 /** Storage size for struct rte_flow_item_raw including pattern. */
303 #define ITEM_RAW_SIZE \
304 (sizeof(struct rte_flow_item_raw) + ITEM_RAW_PATTERN_SIZE)
306 /** Maximum number of queue indices in struct rte_flow_action_rss. */
307 #define ACTION_RSS_QUEUE_NUM 32
309 /** Storage for struct rte_flow_action_rss including external data. */
310 struct action_rss_data {
311 struct rte_flow_action_rss conf;
312 uint8_t key[RSS_HASH_KEY_LENGTH];
313 uint16_t queue[ACTION_RSS_QUEUE_NUM];
316 /** Maximum number of items in struct rte_flow_action_vxlan_encap. */
317 #define ACTION_VXLAN_ENCAP_ITEMS_NUM 6
319 #define ACTION_RAW_ENCAP_MAX_DATA 128
321 /** Storage for struct rte_flow_action_raw_encap. */
322 struct raw_encap_conf {
323 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
324 uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA];
328 struct raw_encap_conf raw_encap_conf = {.size = 0};
330 /** Storage for struct rte_flow_action_raw_decap. */
331 struct raw_decap_conf {
332 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
336 struct raw_decap_conf raw_decap_conf = {.size = 0};
338 /** Storage for struct rte_flow_action_vxlan_encap including external data. */
339 struct action_vxlan_encap_data {
340 struct rte_flow_action_vxlan_encap conf;
341 struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM];
342 struct rte_flow_item_eth item_eth;
343 struct rte_flow_item_vlan item_vlan;
345 struct rte_flow_item_ipv4 item_ipv4;
346 struct rte_flow_item_ipv6 item_ipv6;
348 struct rte_flow_item_udp item_udp;
349 struct rte_flow_item_vxlan item_vxlan;
352 /** Maximum number of items in struct rte_flow_action_nvgre_encap. */
353 #define ACTION_NVGRE_ENCAP_ITEMS_NUM 5
355 /** Storage for struct rte_flow_action_nvgre_encap including external data. */
356 struct action_nvgre_encap_data {
357 struct rte_flow_action_nvgre_encap conf;
358 struct rte_flow_item items[ACTION_NVGRE_ENCAP_ITEMS_NUM];
359 struct rte_flow_item_eth item_eth;
360 struct rte_flow_item_vlan item_vlan;
362 struct rte_flow_item_ipv4 item_ipv4;
363 struct rte_flow_item_ipv6 item_ipv6;
365 struct rte_flow_item_nvgre item_nvgre;
368 /** Maximum data size in struct rte_flow_action_raw_encap. */
369 #define ACTION_RAW_ENCAP_MAX_DATA 128
371 /** Storage for struct rte_flow_action_raw_encap including external data. */
372 struct action_raw_encap_data {
373 struct rte_flow_action_raw_encap conf;
374 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
375 uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA];
378 /** Storage for struct rte_flow_action_raw_decap including external data. */
379 struct action_raw_decap_data {
380 struct rte_flow_action_raw_decap conf;
381 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
384 /** Maximum number of subsequent tokens and arguments on the stack. */
385 #define CTX_STACK_SIZE 16
387 /** Parser context. */
389 /** Stack of subsequent token lists to process. */
390 const enum index *next[CTX_STACK_SIZE];
391 /** Arguments for stacked tokens. */
392 const void *args[CTX_STACK_SIZE];
393 enum index curr; /**< Current token index. */
394 enum index prev; /**< Index of the last token seen. */
395 int next_num; /**< Number of entries in next[]. */
396 int args_num; /**< Number of entries in args[]. */
397 uint32_t eol:1; /**< EOL has been detected. */
398 uint32_t last:1; /**< No more arguments. */
399 portid_t port; /**< Current port ID (for completions). */
400 uint32_t objdata; /**< Object-specific data. */
401 void *object; /**< Address of current object for relative offsets. */
402 void *objmask; /**< Object a full mask must be written to. */
405 /** Token argument. */
407 uint32_t hton:1; /**< Use network byte ordering. */
408 uint32_t sign:1; /**< Value is signed. */
409 uint32_t bounded:1; /**< Value is bounded. */
410 uintmax_t min; /**< Minimum value if bounded. */
411 uintmax_t max; /**< Maximum value if bounded. */
412 uint32_t offset; /**< Relative offset from ctx->object. */
413 uint32_t size; /**< Field size. */
414 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
417 /** Parser token definition. */
419 /** Type displayed during completion (defaults to "TOKEN"). */
421 /** Help displayed during completion (defaults to token name). */
423 /** Private data used by parser functions. */
426 * Lists of subsequent tokens to push on the stack. Each call to the
427 * parser consumes the last entry of that stack.
429 const enum index *const *next;
430 /** Arguments stack for subsequent tokens that need them. */
431 const struct arg *const *args;
433 * Token-processing callback, returns -1 in case of error, the
434 * length of the matched string otherwise. If NULL, attempts to
435 * match the token name.
437 * If buf is not NULL, the result should be stored in it according
438 * to context. An error is returned if not large enough.
440 int (*call)(struct context *ctx, const struct token *token,
441 const char *str, unsigned int len,
442 void *buf, unsigned int size);
444 * Callback that provides possible values for this token, used for
445 * completion. Returns -1 in case of error, the number of possible
446 * values otherwise. If NULL, the token name is used.
448 * If buf is not NULL, entry index ent is written to buf and the
449 * full length of the entry is returned (same behavior as
452 int (*comp)(struct context *ctx, const struct token *token,
453 unsigned int ent, char *buf, unsigned int size);
454 /** Mandatory token name, no default value. */
458 /** Static initializer for the next field. */
459 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
461 /** Static initializer for a NEXT() entry. */
462 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
464 /** Static initializer for the args field. */
465 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
467 /** Static initializer for ARGS() to target a field. */
468 #define ARGS_ENTRY(s, f) \
469 (&(const struct arg){ \
470 .offset = offsetof(s, f), \
471 .size = sizeof(((s *)0)->f), \
474 /** Static initializer for ARGS() to target a bit-field. */
475 #define ARGS_ENTRY_BF(s, f, b) \
476 (&(const struct arg){ \
478 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
481 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
482 #define ARGS_ENTRY_MASK(s, f, m) \
483 (&(const struct arg){ \
484 .offset = offsetof(s, f), \
485 .size = sizeof(((s *)0)->f), \
486 .mask = (const void *)(m), \
489 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
490 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
491 (&(const struct arg){ \
493 .offset = offsetof(s, f), \
494 .size = sizeof(((s *)0)->f), \
495 .mask = (const void *)(m), \
498 /** Static initializer for ARGS() to target a pointer. */
499 #define ARGS_ENTRY_PTR(s, f) \
500 (&(const struct arg){ \
501 .size = sizeof(*((s *)0)->f), \
504 /** Static initializer for ARGS() with arbitrary offset and size. */
505 #define ARGS_ENTRY_ARB(o, s) \
506 (&(const struct arg){ \
511 /** Same as ARGS_ENTRY_ARB() with bounded values. */
512 #define ARGS_ENTRY_ARB_BOUNDED(o, s, i, a) \
513 (&(const struct arg){ \
521 /** Same as ARGS_ENTRY() using network byte ordering. */
522 #define ARGS_ENTRY_HTON(s, f) \
523 (&(const struct arg){ \
525 .offset = offsetof(s, f), \
526 .size = sizeof(((s *)0)->f), \
529 /** Same as ARGS_ENTRY_HTON() for a single argument, without structure. */
530 #define ARG_ENTRY_HTON(s) \
531 (&(const struct arg){ \
537 /** Parser output buffer layout expected by cmd_flow_parsed(). */
539 enum index command; /**< Flow command. */
540 portid_t port; /**< Affected port ID. */
543 struct rte_flow_attr attr;
544 struct rte_flow_item *pattern;
545 struct rte_flow_action *actions;
549 } vc; /**< Validate/create arguments. */
553 } destroy; /**< Destroy arguments. */
556 struct rte_flow_action action;
557 } query; /**< Query arguments. */
561 } list; /**< List arguments. */
564 } isolate; /**< Isolated mode arguments. */
565 } args; /**< Command arguments. */
568 /** Private data for pattern items. */
569 struct parse_item_priv {
570 enum rte_flow_item_type type; /**< Item type. */
571 uint32_t size; /**< Size of item specification structure. */
574 #define PRIV_ITEM(t, s) \
575 (&(const struct parse_item_priv){ \
576 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
580 /** Private data for actions. */
581 struct parse_action_priv {
582 enum rte_flow_action_type type; /**< Action type. */
583 uint32_t size; /**< Size of action configuration structure. */
586 #define PRIV_ACTION(t, s) \
587 (&(const struct parse_action_priv){ \
588 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
592 static const enum index next_vc_attr[] = {
602 static const enum index next_destroy_attr[] = {
608 static const enum index next_list_attr[] = {
614 static const enum index item_param[] = {
623 static const enum index next_item[] = {
659 ITEM_ICMP6_ND_OPT_SLA_ETH,
660 ITEM_ICMP6_ND_OPT_TLA_ETH,
667 static const enum index item_fuzzy[] = {
673 static const enum index item_any[] = {
679 static const enum index item_vf[] = {
685 static const enum index item_phy_port[] = {
691 static const enum index item_port_id[] = {
697 static const enum index item_mark[] = {
703 static const enum index item_raw[] = {
713 static const enum index item_eth[] = {
721 static const enum index item_vlan[] = {
726 ITEM_VLAN_INNER_TYPE,
731 static const enum index item_ipv4[] = {
741 static const enum index item_ipv6[] = {
752 static const enum index item_icmp[] = {
759 static const enum index item_udp[] = {
766 static const enum index item_tcp[] = {
774 static const enum index item_sctp[] = {
783 static const enum index item_vxlan[] = {
789 static const enum index item_e_tag[] = {
790 ITEM_E_TAG_GRP_ECID_B,
795 static const enum index item_nvgre[] = {
801 static const enum index item_mpls[] = {
807 static const enum index item_gre[] = {
809 ITEM_GRE_C_RSVD0_VER,
817 static const enum index item_gre_key[] = {
823 static const enum index item_gtp[] = {
829 static const enum index item_geneve[] = {
836 static const enum index item_vxlan_gpe[] = {
842 static const enum index item_arp_eth_ipv4[] = {
843 ITEM_ARP_ETH_IPV4_SHA,
844 ITEM_ARP_ETH_IPV4_SPA,
845 ITEM_ARP_ETH_IPV4_THA,
846 ITEM_ARP_ETH_IPV4_TPA,
851 static const enum index item_ipv6_ext[] = {
852 ITEM_IPV6_EXT_NEXT_HDR,
857 static const enum index item_icmp6[] = {
864 static const enum index item_icmp6_nd_ns[] = {
865 ITEM_ICMP6_ND_NS_TARGET_ADDR,
870 static const enum index item_icmp6_nd_na[] = {
871 ITEM_ICMP6_ND_NA_TARGET_ADDR,
876 static const enum index item_icmp6_nd_opt[] = {
877 ITEM_ICMP6_ND_OPT_TYPE,
882 static const enum index item_icmp6_nd_opt_sla_eth[] = {
883 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
888 static const enum index item_icmp6_nd_opt_tla_eth[] = {
889 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
894 static const enum index item_meta[] = {
900 static const enum index next_action[] = {
916 ACTION_OF_SET_MPLS_TTL,
917 ACTION_OF_DEC_MPLS_TTL,
918 ACTION_OF_SET_NW_TTL,
919 ACTION_OF_DEC_NW_TTL,
920 ACTION_OF_COPY_TTL_OUT,
921 ACTION_OF_COPY_TTL_IN,
924 ACTION_OF_SET_VLAN_VID,
925 ACTION_OF_SET_VLAN_PCP,
934 ACTION_MPLSOGRE_ENCAP,
935 ACTION_MPLSOGRE_DECAP,
936 ACTION_MPLSOUDP_ENCAP,
937 ACTION_MPLSOUDP_DECAP,
958 static const enum index action_mark[] = {
964 static const enum index action_queue[] = {
970 static const enum index action_count[] = {
977 static const enum index action_rss[] = {
988 static const enum index action_vf[] = {
995 static const enum index action_phy_port[] = {
996 ACTION_PHY_PORT_ORIGINAL,
997 ACTION_PHY_PORT_INDEX,
1002 static const enum index action_port_id[] = {
1003 ACTION_PORT_ID_ORIGINAL,
1009 static const enum index action_meter[] = {
1015 static const enum index action_of_set_mpls_ttl[] = {
1016 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
1021 static const enum index action_of_set_nw_ttl[] = {
1022 ACTION_OF_SET_NW_TTL_NW_TTL,
1027 static const enum index action_of_push_vlan[] = {
1028 ACTION_OF_PUSH_VLAN_ETHERTYPE,
1033 static const enum index action_of_set_vlan_vid[] = {
1034 ACTION_OF_SET_VLAN_VID_VLAN_VID,
1039 static const enum index action_of_set_vlan_pcp[] = {
1040 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
1045 static const enum index action_of_pop_mpls[] = {
1046 ACTION_OF_POP_MPLS_ETHERTYPE,
1051 static const enum index action_of_push_mpls[] = {
1052 ACTION_OF_PUSH_MPLS_ETHERTYPE,
1057 static const enum index action_set_ipv4_src[] = {
1058 ACTION_SET_IPV4_SRC_IPV4_SRC,
1063 static const enum index action_set_mac_src[] = {
1064 ACTION_SET_MAC_SRC_MAC_SRC,
1069 static const enum index action_set_ipv4_dst[] = {
1070 ACTION_SET_IPV4_DST_IPV4_DST,
1075 static const enum index action_set_ipv6_src[] = {
1076 ACTION_SET_IPV6_SRC_IPV6_SRC,
1081 static const enum index action_set_ipv6_dst[] = {
1082 ACTION_SET_IPV6_DST_IPV6_DST,
1087 static const enum index action_set_tp_src[] = {
1088 ACTION_SET_TP_SRC_TP_SRC,
1093 static const enum index action_set_tp_dst[] = {
1094 ACTION_SET_TP_DST_TP_DST,
1099 static const enum index action_set_ttl[] = {
1105 static const enum index action_jump[] = {
1111 static const enum index action_set_mac_dst[] = {
1112 ACTION_SET_MAC_DST_MAC_DST,
1117 static const enum index action_inc_tcp_seq[] = {
1118 ACTION_INC_TCP_SEQ_VALUE,
1123 static const enum index action_dec_tcp_seq[] = {
1124 ACTION_DEC_TCP_SEQ_VALUE,
1129 static const enum index action_inc_tcp_ack[] = {
1130 ACTION_INC_TCP_ACK_VALUE,
1135 static const enum index action_dec_tcp_ack[] = {
1136 ACTION_DEC_TCP_ACK_VALUE,
1141 static int parse_set_raw_encap_decap(struct context *, const struct token *,
1142 const char *, unsigned int,
1143 void *, unsigned int);
1144 static int parse_set_init(struct context *, const struct token *,
1145 const char *, unsigned int,
1146 void *, unsigned int);
1147 static int parse_init(struct context *, const struct token *,
1148 const char *, unsigned int,
1149 void *, unsigned int);
1150 static int parse_vc(struct context *, const struct token *,
1151 const char *, unsigned int,
1152 void *, unsigned int);
1153 static int parse_vc_spec(struct context *, const struct token *,
1154 const char *, unsigned int, void *, unsigned int);
1155 static int parse_vc_conf(struct context *, const struct token *,
1156 const char *, unsigned int, void *, unsigned int);
1157 static int parse_vc_action_rss(struct context *, const struct token *,
1158 const char *, unsigned int, void *,
1160 static int parse_vc_action_rss_func(struct context *, const struct token *,
1161 const char *, unsigned int, void *,
1163 static int parse_vc_action_rss_type(struct context *, const struct token *,
1164 const char *, unsigned int, void *,
1166 static int parse_vc_action_rss_queue(struct context *, const struct token *,
1167 const char *, unsigned int, void *,
1169 static int parse_vc_action_vxlan_encap(struct context *, const struct token *,
1170 const char *, unsigned int, void *,
1172 static int parse_vc_action_nvgre_encap(struct context *, const struct token *,
1173 const char *, unsigned int, void *,
1175 static int parse_vc_action_l2_encap(struct context *, const struct token *,
1176 const char *, unsigned int, void *,
1178 static int parse_vc_action_l2_decap(struct context *, const struct token *,
1179 const char *, unsigned int, void *,
1181 static int parse_vc_action_mplsogre_encap(struct context *,
1182 const struct token *, const char *,
1183 unsigned int, void *, unsigned int);
1184 static int parse_vc_action_mplsogre_decap(struct context *,
1185 const struct token *, const char *,
1186 unsigned int, void *, unsigned int);
1187 static int parse_vc_action_mplsoudp_encap(struct context *,
1188 const struct token *, const char *,
1189 unsigned int, void *, unsigned int);
1190 static int parse_vc_action_mplsoudp_decap(struct context *,
1191 const struct token *, const char *,
1192 unsigned int, void *, unsigned int);
1193 static int parse_vc_action_raw_encap(struct context *,
1194 const struct token *, const char *,
1195 unsigned int, void *, unsigned int);
1196 static int parse_vc_action_raw_decap(struct context *,
1197 const struct token *, const char *,
1198 unsigned int, void *, unsigned int);
1199 static int parse_destroy(struct context *, const struct token *,
1200 const char *, unsigned int,
1201 void *, unsigned int);
1202 static int parse_flush(struct context *, const struct token *,
1203 const char *, unsigned int,
1204 void *, unsigned int);
1205 static int parse_query(struct context *, const struct token *,
1206 const char *, unsigned int,
1207 void *, unsigned int);
1208 static int parse_action(struct context *, const struct token *,
1209 const char *, unsigned int,
1210 void *, unsigned int);
1211 static int parse_list(struct context *, const struct token *,
1212 const char *, unsigned int,
1213 void *, unsigned int);
1214 static int parse_isolate(struct context *, const struct token *,
1215 const char *, unsigned int,
1216 void *, unsigned int);
1217 static int parse_int(struct context *, const struct token *,
1218 const char *, unsigned int,
1219 void *, unsigned int);
1220 static int parse_prefix(struct context *, const struct token *,
1221 const char *, unsigned int,
1222 void *, unsigned int);
1223 static int parse_boolean(struct context *, const struct token *,
1224 const char *, unsigned int,
1225 void *, unsigned int);
1226 static int parse_string(struct context *, const struct token *,
1227 const char *, unsigned int,
1228 void *, unsigned int);
1229 static int parse_hex(struct context *ctx, const struct token *token,
1230 const char *str, unsigned int len,
1231 void *buf, unsigned int size);
1232 static int parse_mac_addr(struct context *, const struct token *,
1233 const char *, unsigned int,
1234 void *, unsigned int);
1235 static int parse_ipv4_addr(struct context *, const struct token *,
1236 const char *, unsigned int,
1237 void *, unsigned int);
1238 static int parse_ipv6_addr(struct context *, const struct token *,
1239 const char *, unsigned int,
1240 void *, unsigned int);
1241 static int parse_port(struct context *, const struct token *,
1242 const char *, unsigned int,
1243 void *, unsigned int);
1244 static int comp_none(struct context *, const struct token *,
1245 unsigned int, char *, unsigned int);
1246 static int comp_boolean(struct context *, const struct token *,
1247 unsigned int, char *, unsigned int);
1248 static int comp_action(struct context *, const struct token *,
1249 unsigned int, char *, unsigned int);
1250 static int comp_port(struct context *, const struct token *,
1251 unsigned int, char *, unsigned int);
1252 static int comp_rule_id(struct context *, const struct token *,
1253 unsigned int, char *, unsigned int);
1254 static int comp_vc_action_rss_type(struct context *, const struct token *,
1255 unsigned int, char *, unsigned int);
1256 static int comp_vc_action_rss_queue(struct context *, const struct token *,
1257 unsigned int, char *, unsigned int);
1259 /** Token definitions. */
1260 static const struct token token_list[] = {
1261 /* Special tokens. */
1264 .help = "null entry, abused as the entry point",
1265 .next = NEXT(NEXT_ENTRY(FLOW)),
1270 .help = "command may end here",
1273 .name = "START_SET",
1274 .help = "null entry, abused as the entry point for set",
1275 .next = NEXT(NEXT_ENTRY(SET)),
1280 .help = "set command may end here",
1282 /* Common tokens. */
1286 .help = "integer value",
1291 .name = "{unsigned}",
1293 .help = "unsigned integer value",
1300 .help = "prefix length for bit-mask",
1301 .call = parse_prefix,
1305 .name = "{boolean}",
1307 .help = "any boolean value",
1308 .call = parse_boolean,
1309 .comp = comp_boolean,
1314 .help = "fixed string",
1315 .call = parse_string,
1321 .help = "fixed string",
1326 .name = "{MAC address}",
1328 .help = "standard MAC address notation",
1329 .call = parse_mac_addr,
1333 .name = "{IPv4 address}",
1334 .type = "IPV4 ADDRESS",
1335 .help = "standard IPv4 address notation",
1336 .call = parse_ipv4_addr,
1340 .name = "{IPv6 address}",
1341 .type = "IPV6 ADDRESS",
1342 .help = "standard IPv6 address notation",
1343 .call = parse_ipv6_addr,
1347 .name = "{rule id}",
1349 .help = "rule identifier",
1351 .comp = comp_rule_id,
1354 .name = "{port_id}",
1356 .help = "port identifier",
1361 .name = "{group_id}",
1363 .help = "group identifier",
1367 [PRIORITY_LEVEL] = {
1370 .help = "priority level",
1374 /* Top-level command. */
1377 .type = "{command} {port_id} [{arg} [...]]",
1378 .help = "manage ingress/egress flow rules",
1379 .next = NEXT(NEXT_ENTRY
1389 /* Sub-level commands. */
1392 .help = "check whether a flow rule can be created",
1393 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1394 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1399 .help = "create a flow rule",
1400 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1401 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1406 .help = "destroy specific flow rules",
1407 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
1408 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1409 .call = parse_destroy,
1413 .help = "destroy all flow rules",
1414 .next = NEXT(NEXT_ENTRY(PORT_ID)),
1415 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1416 .call = parse_flush,
1420 .help = "query an existing flow rule",
1421 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
1422 NEXT_ENTRY(RULE_ID),
1423 NEXT_ENTRY(PORT_ID)),
1424 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action.type),
1425 ARGS_ENTRY(struct buffer, args.query.rule),
1426 ARGS_ENTRY(struct buffer, port)),
1427 .call = parse_query,
1431 .help = "list existing flow rules",
1432 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
1433 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1438 .help = "restrict ingress traffic to the defined flow rules",
1439 .next = NEXT(NEXT_ENTRY(BOOLEAN),
1440 NEXT_ENTRY(PORT_ID)),
1441 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
1442 ARGS_ENTRY(struct buffer, port)),
1443 .call = parse_isolate,
1445 /* Destroy arguments. */
1448 .help = "specify a rule identifier",
1449 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
1450 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
1451 .call = parse_destroy,
1453 /* Query arguments. */
1457 .help = "action to query, must be part of the rule",
1458 .call = parse_action,
1459 .comp = comp_action,
1461 /* List arguments. */
1464 .help = "specify a group",
1465 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
1466 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
1469 /* Validate/create attributes. */
1472 .help = "specify a group",
1473 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
1474 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
1479 .help = "specify a priority level",
1480 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
1481 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
1486 .help = "affect rule to ingress",
1487 .next = NEXT(next_vc_attr),
1492 .help = "affect rule to egress",
1493 .next = NEXT(next_vc_attr),
1498 .help = "apply rule directly to endpoints found in pattern",
1499 .next = NEXT(next_vc_attr),
1502 /* Validate/create pattern. */
1505 .help = "submit a list of pattern items",
1506 .next = NEXT(next_item),
1511 .help = "match value perfectly (with full bit-mask)",
1512 .call = parse_vc_spec,
1514 [ITEM_PARAM_SPEC] = {
1516 .help = "match value according to configured bit-mask",
1517 .call = parse_vc_spec,
1519 [ITEM_PARAM_LAST] = {
1521 .help = "specify upper bound to establish a range",
1522 .call = parse_vc_spec,
1524 [ITEM_PARAM_MASK] = {
1526 .help = "specify bit-mask with relevant bits set to one",
1527 .call = parse_vc_spec,
1529 [ITEM_PARAM_PREFIX] = {
1531 .help = "generate bit-mask from a prefix length",
1532 .call = parse_vc_spec,
1536 .help = "specify next pattern item",
1537 .next = NEXT(next_item),
1541 .help = "end list of pattern items",
1542 .priv = PRIV_ITEM(END, 0),
1543 .next = NEXT(NEXT_ENTRY(ACTIONS)),
1548 .help = "no-op pattern item",
1549 .priv = PRIV_ITEM(VOID, 0),
1550 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1555 .help = "perform actions when pattern does not match",
1556 .priv = PRIV_ITEM(INVERT, 0),
1557 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1562 .help = "match any protocol for the current layer",
1563 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
1564 .next = NEXT(item_any),
1569 .help = "number of layers covered",
1570 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
1571 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
1575 .help = "match traffic from/to the physical function",
1576 .priv = PRIV_ITEM(PF, 0),
1577 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1582 .help = "match traffic from/to a virtual function ID",
1583 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
1584 .next = NEXT(item_vf),
1590 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
1591 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
1595 .help = "match traffic from/to a specific physical port",
1596 .priv = PRIV_ITEM(PHY_PORT,
1597 sizeof(struct rte_flow_item_phy_port)),
1598 .next = NEXT(item_phy_port),
1601 [ITEM_PHY_PORT_INDEX] = {
1603 .help = "physical port index",
1604 .next = NEXT(item_phy_port, NEXT_ENTRY(UNSIGNED), item_param),
1605 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_phy_port, index)),
1609 .help = "match traffic from/to a given DPDK port ID",
1610 .priv = PRIV_ITEM(PORT_ID,
1611 sizeof(struct rte_flow_item_port_id)),
1612 .next = NEXT(item_port_id),
1615 [ITEM_PORT_ID_ID] = {
1617 .help = "DPDK port ID",
1618 .next = NEXT(item_port_id, NEXT_ENTRY(UNSIGNED), item_param),
1619 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port_id, id)),
1623 .help = "match traffic against value set in previously matched rule",
1624 .priv = PRIV_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
1625 .next = NEXT(item_mark),
1630 .help = "Integer value to match against",
1631 .next = NEXT(item_mark, NEXT_ENTRY(UNSIGNED), item_param),
1632 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_mark, id)),
1636 .help = "match an arbitrary byte string",
1637 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1638 .next = NEXT(item_raw),
1641 [ITEM_RAW_RELATIVE] = {
1643 .help = "look for pattern after the previous item",
1644 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1645 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1648 [ITEM_RAW_SEARCH] = {
1650 .help = "search pattern from offset (see also limit)",
1651 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1652 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1655 [ITEM_RAW_OFFSET] = {
1657 .help = "absolute or relative offset for pattern",
1658 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1659 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1661 [ITEM_RAW_LIMIT] = {
1663 .help = "search area limit for start of pattern",
1664 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1665 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1667 [ITEM_RAW_PATTERN] = {
1669 .help = "byte string to look for",
1670 .next = NEXT(item_raw,
1672 NEXT_ENTRY(ITEM_PARAM_IS,
1675 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, pattern),
1676 ARGS_ENTRY(struct rte_flow_item_raw, length),
1677 ARGS_ENTRY_ARB(sizeof(struct rte_flow_item_raw),
1678 ITEM_RAW_PATTERN_SIZE)),
1682 .help = "match Ethernet header",
1683 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1684 .next = NEXT(item_eth),
1689 .help = "destination MAC",
1690 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1691 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1695 .help = "source MAC",
1696 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1697 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1701 .help = "EtherType",
1702 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1703 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1707 .help = "match 802.1Q/ad VLAN tag",
1708 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1709 .next = NEXT(item_vlan),
1714 .help = "tag control information",
1715 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1716 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1720 .help = "priority code point",
1721 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1722 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1727 .help = "drop eligible indicator",
1728 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1729 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1734 .help = "VLAN identifier",
1735 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1736 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1739 [ITEM_VLAN_INNER_TYPE] = {
1740 .name = "inner_type",
1741 .help = "inner EtherType",
1742 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1743 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan,
1748 .help = "match IPv4 header",
1749 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1750 .next = NEXT(item_ipv4),
1755 .help = "type of service",
1756 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1757 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1758 hdr.type_of_service)),
1762 .help = "time to live",
1763 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1764 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1767 [ITEM_IPV4_PROTO] = {
1769 .help = "next protocol ID",
1770 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1771 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1772 hdr.next_proto_id)),
1776 .help = "source address",
1777 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1778 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1783 .help = "destination address",
1784 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1785 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1790 .help = "match IPv6 header",
1791 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1792 .next = NEXT(item_ipv6),
1797 .help = "traffic class",
1798 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1799 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1801 "\x0f\xf0\x00\x00")),
1803 [ITEM_IPV6_FLOW] = {
1805 .help = "flow label",
1806 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1807 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1809 "\x00\x0f\xff\xff")),
1811 [ITEM_IPV6_PROTO] = {
1813 .help = "protocol (next header)",
1814 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1815 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1820 .help = "hop limit",
1821 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1822 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1827 .help = "source address",
1828 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1829 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1834 .help = "destination address",
1835 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1836 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1841 .help = "match ICMP header",
1842 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1843 .next = NEXT(item_icmp),
1846 [ITEM_ICMP_TYPE] = {
1848 .help = "ICMP packet type",
1849 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1850 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1853 [ITEM_ICMP_CODE] = {
1855 .help = "ICMP packet code",
1856 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1857 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1862 .help = "match UDP header",
1863 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1864 .next = NEXT(item_udp),
1869 .help = "UDP source port",
1870 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1871 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1876 .help = "UDP destination port",
1877 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1878 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1883 .help = "match TCP header",
1884 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1885 .next = NEXT(item_tcp),
1890 .help = "TCP source port",
1891 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1892 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1897 .help = "TCP destination port",
1898 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1899 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1902 [ITEM_TCP_FLAGS] = {
1904 .help = "TCP flags",
1905 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1906 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1911 .help = "match SCTP header",
1912 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1913 .next = NEXT(item_sctp),
1918 .help = "SCTP source port",
1919 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1920 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1925 .help = "SCTP destination port",
1926 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1927 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1932 .help = "validation tag",
1933 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1934 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1937 [ITEM_SCTP_CKSUM] = {
1940 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1941 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1946 .help = "match VXLAN header",
1947 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1948 .next = NEXT(item_vxlan),
1951 [ITEM_VXLAN_VNI] = {
1953 .help = "VXLAN identifier",
1954 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1955 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
1959 .help = "match E-Tag header",
1960 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
1961 .next = NEXT(item_e_tag),
1964 [ITEM_E_TAG_GRP_ECID_B] = {
1965 .name = "grp_ecid_b",
1966 .help = "GRP and E-CID base",
1967 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
1968 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
1974 .help = "match NVGRE header",
1975 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
1976 .next = NEXT(item_nvgre),
1979 [ITEM_NVGRE_TNI] = {
1981 .help = "virtual subnet ID",
1982 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
1983 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
1987 .help = "match MPLS header",
1988 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
1989 .next = NEXT(item_mpls),
1992 [ITEM_MPLS_LABEL] = {
1994 .help = "MPLS label",
1995 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
1996 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2002 .help = "match GRE header",
2003 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
2004 .next = NEXT(item_gre),
2007 [ITEM_GRE_PROTO] = {
2009 .help = "GRE protocol type",
2010 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
2011 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
2014 [ITEM_GRE_C_RSVD0_VER] = {
2015 .name = "c_rsvd0_ver",
2017 "checksum (1b), undefined (1b), key bit (1b),"
2018 " sequence number (1b), reserved 0 (9b),"
2020 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
2021 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
2024 [ITEM_GRE_C_BIT] = {
2026 .help = "checksum bit (C)",
2027 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2028 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2030 "\x80\x00\x00\x00")),
2032 [ITEM_GRE_S_BIT] = {
2034 .help = "sequence number bit (S)",
2035 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2036 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2038 "\x10\x00\x00\x00")),
2040 [ITEM_GRE_K_BIT] = {
2042 .help = "key bit (K)",
2043 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2044 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2046 "\x20\x00\x00\x00")),
2050 .help = "fuzzy pattern match, expect faster than default",
2051 .priv = PRIV_ITEM(FUZZY,
2052 sizeof(struct rte_flow_item_fuzzy)),
2053 .next = NEXT(item_fuzzy),
2056 [ITEM_FUZZY_THRESH] = {
2058 .help = "match accuracy threshold",
2059 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
2060 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
2065 .help = "match GTP header",
2066 .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
2067 .next = NEXT(item_gtp),
2072 .help = "tunnel endpoint identifier",
2073 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
2074 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
2078 .help = "match GTP header",
2079 .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
2080 .next = NEXT(item_gtp),
2085 .help = "match GTP header",
2086 .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
2087 .next = NEXT(item_gtp),
2092 .help = "match GENEVE header",
2093 .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
2094 .next = NEXT(item_geneve),
2097 [ITEM_GENEVE_VNI] = {
2099 .help = "virtual network identifier",
2100 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
2101 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)),
2103 [ITEM_GENEVE_PROTO] = {
2105 .help = "GENEVE protocol type",
2106 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
2107 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve,
2110 [ITEM_VXLAN_GPE] = {
2111 .name = "vxlan-gpe",
2112 .help = "match VXLAN-GPE header",
2113 .priv = PRIV_ITEM(VXLAN_GPE,
2114 sizeof(struct rte_flow_item_vxlan_gpe)),
2115 .next = NEXT(item_vxlan_gpe),
2118 [ITEM_VXLAN_GPE_VNI] = {
2120 .help = "VXLAN-GPE identifier",
2121 .next = NEXT(item_vxlan_gpe, NEXT_ENTRY(UNSIGNED), item_param),
2122 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan_gpe,
2125 [ITEM_ARP_ETH_IPV4] = {
2126 .name = "arp_eth_ipv4",
2127 .help = "match ARP header for Ethernet/IPv4",
2128 .priv = PRIV_ITEM(ARP_ETH_IPV4,
2129 sizeof(struct rte_flow_item_arp_eth_ipv4)),
2130 .next = NEXT(item_arp_eth_ipv4),
2133 [ITEM_ARP_ETH_IPV4_SHA] = {
2135 .help = "sender hardware address",
2136 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
2138 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2141 [ITEM_ARP_ETH_IPV4_SPA] = {
2143 .help = "sender IPv4 address",
2144 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
2146 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2149 [ITEM_ARP_ETH_IPV4_THA] = {
2151 .help = "target hardware address",
2152 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
2154 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2157 [ITEM_ARP_ETH_IPV4_TPA] = {
2159 .help = "target IPv4 address",
2160 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
2162 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2167 .help = "match presence of any IPv6 extension header",
2168 .priv = PRIV_ITEM(IPV6_EXT,
2169 sizeof(struct rte_flow_item_ipv6_ext)),
2170 .next = NEXT(item_ipv6_ext),
2173 [ITEM_IPV6_EXT_NEXT_HDR] = {
2175 .help = "next header",
2176 .next = NEXT(item_ipv6_ext, NEXT_ENTRY(UNSIGNED), item_param),
2177 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6_ext,
2182 .help = "match any ICMPv6 header",
2183 .priv = PRIV_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
2184 .next = NEXT(item_icmp6),
2187 [ITEM_ICMP6_TYPE] = {
2189 .help = "ICMPv6 type",
2190 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2191 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2194 [ITEM_ICMP6_CODE] = {
2196 .help = "ICMPv6 code",
2197 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2198 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2201 [ITEM_ICMP6_ND_NS] = {
2202 .name = "icmp6_nd_ns",
2203 .help = "match ICMPv6 neighbor discovery solicitation",
2204 .priv = PRIV_ITEM(ICMP6_ND_NS,
2205 sizeof(struct rte_flow_item_icmp6_nd_ns)),
2206 .next = NEXT(item_icmp6_nd_ns),
2209 [ITEM_ICMP6_ND_NS_TARGET_ADDR] = {
2210 .name = "target_addr",
2211 .help = "target address",
2212 .next = NEXT(item_icmp6_nd_ns, NEXT_ENTRY(IPV6_ADDR),
2214 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_ns,
2217 [ITEM_ICMP6_ND_NA] = {
2218 .name = "icmp6_nd_na",
2219 .help = "match ICMPv6 neighbor discovery advertisement",
2220 .priv = PRIV_ITEM(ICMP6_ND_NA,
2221 sizeof(struct rte_flow_item_icmp6_nd_na)),
2222 .next = NEXT(item_icmp6_nd_na),
2225 [ITEM_ICMP6_ND_NA_TARGET_ADDR] = {
2226 .name = "target_addr",
2227 .help = "target address",
2228 .next = NEXT(item_icmp6_nd_na, NEXT_ENTRY(IPV6_ADDR),
2230 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_na,
2233 [ITEM_ICMP6_ND_OPT] = {
2234 .name = "icmp6_nd_opt",
2235 .help = "match presence of any ICMPv6 neighbor discovery"
2237 .priv = PRIV_ITEM(ICMP6_ND_OPT,
2238 sizeof(struct rte_flow_item_icmp6_nd_opt)),
2239 .next = NEXT(item_icmp6_nd_opt),
2242 [ITEM_ICMP6_ND_OPT_TYPE] = {
2244 .help = "ND option type",
2245 .next = NEXT(item_icmp6_nd_opt, NEXT_ENTRY(UNSIGNED),
2247 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_opt,
2250 [ITEM_ICMP6_ND_OPT_SLA_ETH] = {
2251 .name = "icmp6_nd_opt_sla_eth",
2252 .help = "match ICMPv6 neighbor discovery source Ethernet"
2253 " link-layer address option",
2255 (ICMP6_ND_OPT_SLA_ETH,
2256 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
2257 .next = NEXT(item_icmp6_nd_opt_sla_eth),
2260 [ITEM_ICMP6_ND_OPT_SLA_ETH_SLA] = {
2262 .help = "source Ethernet LLA",
2263 .next = NEXT(item_icmp6_nd_opt_sla_eth, NEXT_ENTRY(MAC_ADDR),
2265 .args = ARGS(ARGS_ENTRY_HTON
2266 (struct rte_flow_item_icmp6_nd_opt_sla_eth, sla)),
2268 [ITEM_ICMP6_ND_OPT_TLA_ETH] = {
2269 .name = "icmp6_nd_opt_tla_eth",
2270 .help = "match ICMPv6 neighbor discovery target Ethernet"
2271 " link-layer address option",
2273 (ICMP6_ND_OPT_TLA_ETH,
2274 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
2275 .next = NEXT(item_icmp6_nd_opt_tla_eth),
2278 [ITEM_ICMP6_ND_OPT_TLA_ETH_TLA] = {
2280 .help = "target Ethernet LLA",
2281 .next = NEXT(item_icmp6_nd_opt_tla_eth, NEXT_ENTRY(MAC_ADDR),
2283 .args = ARGS(ARGS_ENTRY_HTON
2284 (struct rte_flow_item_icmp6_nd_opt_tla_eth, tla)),
2288 .help = "match metadata header",
2289 .priv = PRIV_ITEM(META, sizeof(struct rte_flow_item_meta)),
2290 .next = NEXT(item_meta),
2293 [ITEM_META_DATA] = {
2295 .help = "metadata value",
2296 .next = NEXT(item_meta, NEXT_ENTRY(UNSIGNED), item_param),
2297 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_meta,
2298 data, "\xff\xff\xff\xff")),
2302 .help = "match GRE key",
2303 .priv = PRIV_ITEM(GRE_KEY, sizeof(rte_be32_t)),
2304 .next = NEXT(item_gre_key),
2307 [ITEM_GRE_KEY_VALUE] = {
2309 .help = "key value",
2310 .next = NEXT(item_gre_key, NEXT_ENTRY(UNSIGNED), item_param),
2311 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
2314 /* Validate/create actions. */
2317 .help = "submit a list of associated actions",
2318 .next = NEXT(next_action),
2323 .help = "specify next action",
2324 .next = NEXT(next_action),
2328 .help = "end list of actions",
2329 .priv = PRIV_ACTION(END, 0),
2334 .help = "no-op action",
2335 .priv = PRIV_ACTION(VOID, 0),
2336 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2339 [ACTION_PASSTHRU] = {
2341 .help = "let subsequent rule process matched packets",
2342 .priv = PRIV_ACTION(PASSTHRU, 0),
2343 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2348 .help = "redirect traffic to a given group",
2349 .priv = PRIV_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
2350 .next = NEXT(action_jump),
2353 [ACTION_JUMP_GROUP] = {
2355 .help = "group to redirect traffic to",
2356 .next = NEXT(action_jump, NEXT_ENTRY(UNSIGNED)),
2357 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_jump, group)),
2358 .call = parse_vc_conf,
2362 .help = "attach 32 bit value to packets",
2363 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
2364 .next = NEXT(action_mark),
2367 [ACTION_MARK_ID] = {
2369 .help = "32 bit value to return with packets",
2370 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
2371 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
2372 .call = parse_vc_conf,
2376 .help = "flag packets",
2377 .priv = PRIV_ACTION(FLAG, 0),
2378 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2383 .help = "assign packets to a given queue index",
2384 .priv = PRIV_ACTION(QUEUE,
2385 sizeof(struct rte_flow_action_queue)),
2386 .next = NEXT(action_queue),
2389 [ACTION_QUEUE_INDEX] = {
2391 .help = "queue index to use",
2392 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
2393 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
2394 .call = parse_vc_conf,
2398 .help = "drop packets (note: passthru has priority)",
2399 .priv = PRIV_ACTION(DROP, 0),
2400 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2405 .help = "enable counters for this rule",
2406 .priv = PRIV_ACTION(COUNT,
2407 sizeof(struct rte_flow_action_count)),
2408 .next = NEXT(action_count),
2411 [ACTION_COUNT_ID] = {
2412 .name = "identifier",
2413 .help = "counter identifier to use",
2414 .next = NEXT(action_count, NEXT_ENTRY(UNSIGNED)),
2415 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_count, id)),
2416 .call = parse_vc_conf,
2418 [ACTION_COUNT_SHARED] = {
2420 .help = "shared counter",
2421 .next = NEXT(action_count, NEXT_ENTRY(BOOLEAN)),
2422 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_count,
2424 .call = parse_vc_conf,
2428 .help = "spread packets among several queues",
2429 .priv = PRIV_ACTION(RSS, sizeof(struct action_rss_data)),
2430 .next = NEXT(action_rss),
2431 .call = parse_vc_action_rss,
2433 [ACTION_RSS_FUNC] = {
2435 .help = "RSS hash function to apply",
2436 .next = NEXT(action_rss,
2437 NEXT_ENTRY(ACTION_RSS_FUNC_DEFAULT,
2438 ACTION_RSS_FUNC_TOEPLITZ,
2439 ACTION_RSS_FUNC_SIMPLE_XOR)),
2441 [ACTION_RSS_FUNC_DEFAULT] = {
2443 .help = "default hash function",
2444 .call = parse_vc_action_rss_func,
2446 [ACTION_RSS_FUNC_TOEPLITZ] = {
2448 .help = "Toeplitz hash function",
2449 .call = parse_vc_action_rss_func,
2451 [ACTION_RSS_FUNC_SIMPLE_XOR] = {
2452 .name = "simple_xor",
2453 .help = "simple XOR hash function",
2454 .call = parse_vc_action_rss_func,
2456 [ACTION_RSS_LEVEL] = {
2458 .help = "encapsulation level for \"types\"",
2459 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2460 .args = ARGS(ARGS_ENTRY_ARB
2461 (offsetof(struct action_rss_data, conf) +
2462 offsetof(struct rte_flow_action_rss, level),
2463 sizeof(((struct rte_flow_action_rss *)0)->
2466 [ACTION_RSS_TYPES] = {
2468 .help = "specific RSS hash types",
2469 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)),
2471 [ACTION_RSS_TYPE] = {
2473 .help = "RSS hash type",
2474 .call = parse_vc_action_rss_type,
2475 .comp = comp_vc_action_rss_type,
2477 [ACTION_RSS_KEY] = {
2479 .help = "RSS hash key",
2480 .next = NEXT(action_rss, NEXT_ENTRY(HEX)),
2481 .args = ARGS(ARGS_ENTRY_ARB(0, 0),
2483 (offsetof(struct action_rss_data, conf) +
2484 offsetof(struct rte_flow_action_rss, key_len),
2485 sizeof(((struct rte_flow_action_rss *)0)->
2487 ARGS_ENTRY(struct action_rss_data, key)),
2489 [ACTION_RSS_KEY_LEN] = {
2491 .help = "RSS hash key length in bytes",
2492 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2493 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
2494 (offsetof(struct action_rss_data, conf) +
2495 offsetof(struct rte_flow_action_rss, key_len),
2496 sizeof(((struct rte_flow_action_rss *)0)->
2499 RSS_HASH_KEY_LENGTH)),
2501 [ACTION_RSS_QUEUES] = {
2503 .help = "queue indices to use",
2504 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
2505 .call = parse_vc_conf,
2507 [ACTION_RSS_QUEUE] = {
2509 .help = "queue index",
2510 .call = parse_vc_action_rss_queue,
2511 .comp = comp_vc_action_rss_queue,
2515 .help = "direct traffic to physical function",
2516 .priv = PRIV_ACTION(PF, 0),
2517 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2522 .help = "direct traffic to a virtual function ID",
2523 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
2524 .next = NEXT(action_vf),
2527 [ACTION_VF_ORIGINAL] = {
2529 .help = "use original VF ID if possible",
2530 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
2531 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
2533 .call = parse_vc_conf,
2538 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
2539 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
2540 .call = parse_vc_conf,
2542 [ACTION_PHY_PORT] = {
2544 .help = "direct packets to physical port index",
2545 .priv = PRIV_ACTION(PHY_PORT,
2546 sizeof(struct rte_flow_action_phy_port)),
2547 .next = NEXT(action_phy_port),
2550 [ACTION_PHY_PORT_ORIGINAL] = {
2552 .help = "use original port index if possible",
2553 .next = NEXT(action_phy_port, NEXT_ENTRY(BOOLEAN)),
2554 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_phy_port,
2556 .call = parse_vc_conf,
2558 [ACTION_PHY_PORT_INDEX] = {
2560 .help = "physical port index",
2561 .next = NEXT(action_phy_port, NEXT_ENTRY(UNSIGNED)),
2562 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_phy_port,
2564 .call = parse_vc_conf,
2566 [ACTION_PORT_ID] = {
2568 .help = "direct matching traffic to a given DPDK port ID",
2569 .priv = PRIV_ACTION(PORT_ID,
2570 sizeof(struct rte_flow_action_port_id)),
2571 .next = NEXT(action_port_id),
2574 [ACTION_PORT_ID_ORIGINAL] = {
2576 .help = "use original DPDK port ID if possible",
2577 .next = NEXT(action_port_id, NEXT_ENTRY(BOOLEAN)),
2578 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_port_id,
2580 .call = parse_vc_conf,
2582 [ACTION_PORT_ID_ID] = {
2584 .help = "DPDK port ID",
2585 .next = NEXT(action_port_id, NEXT_ENTRY(UNSIGNED)),
2586 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_port_id, id)),
2587 .call = parse_vc_conf,
2591 .help = "meter the directed packets at given id",
2592 .priv = PRIV_ACTION(METER,
2593 sizeof(struct rte_flow_action_meter)),
2594 .next = NEXT(action_meter),
2597 [ACTION_METER_ID] = {
2599 .help = "meter id to use",
2600 .next = NEXT(action_meter, NEXT_ENTRY(UNSIGNED)),
2601 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
2602 .call = parse_vc_conf,
2604 [ACTION_OF_SET_MPLS_TTL] = {
2605 .name = "of_set_mpls_ttl",
2606 .help = "OpenFlow's OFPAT_SET_MPLS_TTL",
2609 sizeof(struct rte_flow_action_of_set_mpls_ttl)),
2610 .next = NEXT(action_of_set_mpls_ttl),
2613 [ACTION_OF_SET_MPLS_TTL_MPLS_TTL] = {
2616 .next = NEXT(action_of_set_mpls_ttl, NEXT_ENTRY(UNSIGNED)),
2617 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_mpls_ttl,
2619 .call = parse_vc_conf,
2621 [ACTION_OF_DEC_MPLS_TTL] = {
2622 .name = "of_dec_mpls_ttl",
2623 .help = "OpenFlow's OFPAT_DEC_MPLS_TTL",
2624 .priv = PRIV_ACTION(OF_DEC_MPLS_TTL, 0),
2625 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2628 [ACTION_OF_SET_NW_TTL] = {
2629 .name = "of_set_nw_ttl",
2630 .help = "OpenFlow's OFPAT_SET_NW_TTL",
2633 sizeof(struct rte_flow_action_of_set_nw_ttl)),
2634 .next = NEXT(action_of_set_nw_ttl),
2637 [ACTION_OF_SET_NW_TTL_NW_TTL] = {
2640 .next = NEXT(action_of_set_nw_ttl, NEXT_ENTRY(UNSIGNED)),
2641 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_nw_ttl,
2643 .call = parse_vc_conf,
2645 [ACTION_OF_DEC_NW_TTL] = {
2646 .name = "of_dec_nw_ttl",
2647 .help = "OpenFlow's OFPAT_DEC_NW_TTL",
2648 .priv = PRIV_ACTION(OF_DEC_NW_TTL, 0),
2649 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2652 [ACTION_OF_COPY_TTL_OUT] = {
2653 .name = "of_copy_ttl_out",
2654 .help = "OpenFlow's OFPAT_COPY_TTL_OUT",
2655 .priv = PRIV_ACTION(OF_COPY_TTL_OUT, 0),
2656 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2659 [ACTION_OF_COPY_TTL_IN] = {
2660 .name = "of_copy_ttl_in",
2661 .help = "OpenFlow's OFPAT_COPY_TTL_IN",
2662 .priv = PRIV_ACTION(OF_COPY_TTL_IN, 0),
2663 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2666 [ACTION_OF_POP_VLAN] = {
2667 .name = "of_pop_vlan",
2668 .help = "OpenFlow's OFPAT_POP_VLAN",
2669 .priv = PRIV_ACTION(OF_POP_VLAN, 0),
2670 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2673 [ACTION_OF_PUSH_VLAN] = {
2674 .name = "of_push_vlan",
2675 .help = "OpenFlow's OFPAT_PUSH_VLAN",
2678 sizeof(struct rte_flow_action_of_push_vlan)),
2679 .next = NEXT(action_of_push_vlan),
2682 [ACTION_OF_PUSH_VLAN_ETHERTYPE] = {
2683 .name = "ethertype",
2684 .help = "EtherType",
2685 .next = NEXT(action_of_push_vlan, NEXT_ENTRY(UNSIGNED)),
2686 .args = ARGS(ARGS_ENTRY_HTON
2687 (struct rte_flow_action_of_push_vlan,
2689 .call = parse_vc_conf,
2691 [ACTION_OF_SET_VLAN_VID] = {
2692 .name = "of_set_vlan_vid",
2693 .help = "OpenFlow's OFPAT_SET_VLAN_VID",
2696 sizeof(struct rte_flow_action_of_set_vlan_vid)),
2697 .next = NEXT(action_of_set_vlan_vid),
2700 [ACTION_OF_SET_VLAN_VID_VLAN_VID] = {
2703 .next = NEXT(action_of_set_vlan_vid, NEXT_ENTRY(UNSIGNED)),
2704 .args = ARGS(ARGS_ENTRY_HTON
2705 (struct rte_flow_action_of_set_vlan_vid,
2707 .call = parse_vc_conf,
2709 [ACTION_OF_SET_VLAN_PCP] = {
2710 .name = "of_set_vlan_pcp",
2711 .help = "OpenFlow's OFPAT_SET_VLAN_PCP",
2714 sizeof(struct rte_flow_action_of_set_vlan_pcp)),
2715 .next = NEXT(action_of_set_vlan_pcp),
2718 [ACTION_OF_SET_VLAN_PCP_VLAN_PCP] = {
2720 .help = "VLAN priority",
2721 .next = NEXT(action_of_set_vlan_pcp, NEXT_ENTRY(UNSIGNED)),
2722 .args = ARGS(ARGS_ENTRY_HTON
2723 (struct rte_flow_action_of_set_vlan_pcp,
2725 .call = parse_vc_conf,
2727 [ACTION_OF_POP_MPLS] = {
2728 .name = "of_pop_mpls",
2729 .help = "OpenFlow's OFPAT_POP_MPLS",
2730 .priv = PRIV_ACTION(OF_POP_MPLS,
2731 sizeof(struct rte_flow_action_of_pop_mpls)),
2732 .next = NEXT(action_of_pop_mpls),
2735 [ACTION_OF_POP_MPLS_ETHERTYPE] = {
2736 .name = "ethertype",
2737 .help = "EtherType",
2738 .next = NEXT(action_of_pop_mpls, NEXT_ENTRY(UNSIGNED)),
2739 .args = ARGS(ARGS_ENTRY_HTON
2740 (struct rte_flow_action_of_pop_mpls,
2742 .call = parse_vc_conf,
2744 [ACTION_OF_PUSH_MPLS] = {
2745 .name = "of_push_mpls",
2746 .help = "OpenFlow's OFPAT_PUSH_MPLS",
2749 sizeof(struct rte_flow_action_of_push_mpls)),
2750 .next = NEXT(action_of_push_mpls),
2753 [ACTION_OF_PUSH_MPLS_ETHERTYPE] = {
2754 .name = "ethertype",
2755 .help = "EtherType",
2756 .next = NEXT(action_of_push_mpls, NEXT_ENTRY(UNSIGNED)),
2757 .args = ARGS(ARGS_ENTRY_HTON
2758 (struct rte_flow_action_of_push_mpls,
2760 .call = parse_vc_conf,
2762 [ACTION_VXLAN_ENCAP] = {
2763 .name = "vxlan_encap",
2764 .help = "VXLAN encapsulation, uses configuration set by \"set"
2766 .priv = PRIV_ACTION(VXLAN_ENCAP,
2767 sizeof(struct action_vxlan_encap_data)),
2768 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2769 .call = parse_vc_action_vxlan_encap,
2771 [ACTION_VXLAN_DECAP] = {
2772 .name = "vxlan_decap",
2773 .help = "Performs a decapsulation action by stripping all"
2774 " headers of the VXLAN tunnel network overlay from the"
2776 .priv = PRIV_ACTION(VXLAN_DECAP, 0),
2777 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2780 [ACTION_NVGRE_ENCAP] = {
2781 .name = "nvgre_encap",
2782 .help = "NVGRE encapsulation, uses configuration set by \"set"
2784 .priv = PRIV_ACTION(NVGRE_ENCAP,
2785 sizeof(struct action_nvgre_encap_data)),
2786 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2787 .call = parse_vc_action_nvgre_encap,
2789 [ACTION_NVGRE_DECAP] = {
2790 .name = "nvgre_decap",
2791 .help = "Performs a decapsulation action by stripping all"
2792 " headers of the NVGRE tunnel network overlay from the"
2794 .priv = PRIV_ACTION(NVGRE_DECAP, 0),
2795 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2798 [ACTION_L2_ENCAP] = {
2800 .help = "l2 encap, uses configuration set by"
2801 " \"set l2_encap\"",
2802 .priv = PRIV_ACTION(RAW_ENCAP,
2803 sizeof(struct action_raw_encap_data)),
2804 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2805 .call = parse_vc_action_l2_encap,
2807 [ACTION_L2_DECAP] = {
2809 .help = "l2 decap, uses configuration set by"
2810 " \"set l2_decap\"",
2811 .priv = PRIV_ACTION(RAW_DECAP,
2812 sizeof(struct action_raw_decap_data)),
2813 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2814 .call = parse_vc_action_l2_decap,
2816 [ACTION_MPLSOGRE_ENCAP] = {
2817 .name = "mplsogre_encap",
2818 .help = "mplsogre encapsulation, uses configuration set by"
2819 " \"set mplsogre_encap\"",
2820 .priv = PRIV_ACTION(RAW_ENCAP,
2821 sizeof(struct action_raw_encap_data)),
2822 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2823 .call = parse_vc_action_mplsogre_encap,
2825 [ACTION_MPLSOGRE_DECAP] = {
2826 .name = "mplsogre_decap",
2827 .help = "mplsogre decapsulation, uses configuration set by"
2828 " \"set mplsogre_decap\"",
2829 .priv = PRIV_ACTION(RAW_DECAP,
2830 sizeof(struct action_raw_decap_data)),
2831 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2832 .call = parse_vc_action_mplsogre_decap,
2834 [ACTION_MPLSOUDP_ENCAP] = {
2835 .name = "mplsoudp_encap",
2836 .help = "mplsoudp encapsulation, uses configuration set by"
2837 " \"set mplsoudp_encap\"",
2838 .priv = PRIV_ACTION(RAW_ENCAP,
2839 sizeof(struct action_raw_encap_data)),
2840 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2841 .call = parse_vc_action_mplsoudp_encap,
2843 [ACTION_MPLSOUDP_DECAP] = {
2844 .name = "mplsoudp_decap",
2845 .help = "mplsoudp decapsulation, uses configuration set by"
2846 " \"set mplsoudp_decap\"",
2847 .priv = PRIV_ACTION(RAW_DECAP,
2848 sizeof(struct action_raw_decap_data)),
2849 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2850 .call = parse_vc_action_mplsoudp_decap,
2852 [ACTION_SET_IPV4_SRC] = {
2853 .name = "set_ipv4_src",
2854 .help = "Set a new IPv4 source address in the outermost"
2856 .priv = PRIV_ACTION(SET_IPV4_SRC,
2857 sizeof(struct rte_flow_action_set_ipv4)),
2858 .next = NEXT(action_set_ipv4_src),
2861 [ACTION_SET_IPV4_SRC_IPV4_SRC] = {
2862 .name = "ipv4_addr",
2863 .help = "new IPv4 source address to set",
2864 .next = NEXT(action_set_ipv4_src, NEXT_ENTRY(IPV4_ADDR)),
2865 .args = ARGS(ARGS_ENTRY_HTON
2866 (struct rte_flow_action_set_ipv4, ipv4_addr)),
2867 .call = parse_vc_conf,
2869 [ACTION_SET_IPV4_DST] = {
2870 .name = "set_ipv4_dst",
2871 .help = "Set a new IPv4 destination address in the outermost"
2873 .priv = PRIV_ACTION(SET_IPV4_DST,
2874 sizeof(struct rte_flow_action_set_ipv4)),
2875 .next = NEXT(action_set_ipv4_dst),
2878 [ACTION_SET_IPV4_DST_IPV4_DST] = {
2879 .name = "ipv4_addr",
2880 .help = "new IPv4 destination address to set",
2881 .next = NEXT(action_set_ipv4_dst, NEXT_ENTRY(IPV4_ADDR)),
2882 .args = ARGS(ARGS_ENTRY_HTON
2883 (struct rte_flow_action_set_ipv4, ipv4_addr)),
2884 .call = parse_vc_conf,
2886 [ACTION_SET_IPV6_SRC] = {
2887 .name = "set_ipv6_src",
2888 .help = "Set a new IPv6 source address in the outermost"
2890 .priv = PRIV_ACTION(SET_IPV6_SRC,
2891 sizeof(struct rte_flow_action_set_ipv6)),
2892 .next = NEXT(action_set_ipv6_src),
2895 [ACTION_SET_IPV6_SRC_IPV6_SRC] = {
2896 .name = "ipv6_addr",
2897 .help = "new IPv6 source address to set",
2898 .next = NEXT(action_set_ipv6_src, NEXT_ENTRY(IPV6_ADDR)),
2899 .args = ARGS(ARGS_ENTRY_HTON
2900 (struct rte_flow_action_set_ipv6, ipv6_addr)),
2901 .call = parse_vc_conf,
2903 [ACTION_SET_IPV6_DST] = {
2904 .name = "set_ipv6_dst",
2905 .help = "Set a new IPv6 destination address in the outermost"
2907 .priv = PRIV_ACTION(SET_IPV6_DST,
2908 sizeof(struct rte_flow_action_set_ipv6)),
2909 .next = NEXT(action_set_ipv6_dst),
2912 [ACTION_SET_IPV6_DST_IPV6_DST] = {
2913 .name = "ipv6_addr",
2914 .help = "new IPv6 destination address to set",
2915 .next = NEXT(action_set_ipv6_dst, NEXT_ENTRY(IPV6_ADDR)),
2916 .args = ARGS(ARGS_ENTRY_HTON
2917 (struct rte_flow_action_set_ipv6, ipv6_addr)),
2918 .call = parse_vc_conf,
2920 [ACTION_SET_TP_SRC] = {
2921 .name = "set_tp_src",
2922 .help = "set a new source port number in the outermost"
2924 .priv = PRIV_ACTION(SET_TP_SRC,
2925 sizeof(struct rte_flow_action_set_tp)),
2926 .next = NEXT(action_set_tp_src),
2929 [ACTION_SET_TP_SRC_TP_SRC] = {
2931 .help = "new source port number to set",
2932 .next = NEXT(action_set_tp_src, NEXT_ENTRY(UNSIGNED)),
2933 .args = ARGS(ARGS_ENTRY_HTON
2934 (struct rte_flow_action_set_tp, port)),
2935 .call = parse_vc_conf,
2937 [ACTION_SET_TP_DST] = {
2938 .name = "set_tp_dst",
2939 .help = "set a new destination port number in the outermost"
2941 .priv = PRIV_ACTION(SET_TP_DST,
2942 sizeof(struct rte_flow_action_set_tp)),
2943 .next = NEXT(action_set_tp_dst),
2946 [ACTION_SET_TP_DST_TP_DST] = {
2948 .help = "new destination port number to set",
2949 .next = NEXT(action_set_tp_dst, NEXT_ENTRY(UNSIGNED)),
2950 .args = ARGS(ARGS_ENTRY_HTON
2951 (struct rte_flow_action_set_tp, port)),
2952 .call = parse_vc_conf,
2954 [ACTION_MAC_SWAP] = {
2956 .help = "Swap the source and destination MAC addresses"
2957 " in the outermost Ethernet header",
2958 .priv = PRIV_ACTION(MAC_SWAP, 0),
2959 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2962 [ACTION_DEC_TTL] = {
2964 .help = "decrease network TTL if available",
2965 .priv = PRIV_ACTION(DEC_TTL, 0),
2966 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2969 [ACTION_SET_TTL] = {
2971 .help = "set ttl value",
2972 .priv = PRIV_ACTION(SET_TTL,
2973 sizeof(struct rte_flow_action_set_ttl)),
2974 .next = NEXT(action_set_ttl),
2977 [ACTION_SET_TTL_TTL] = {
2978 .name = "ttl_value",
2979 .help = "new ttl value to set",
2980 .next = NEXT(action_set_ttl, NEXT_ENTRY(UNSIGNED)),
2981 .args = ARGS(ARGS_ENTRY_HTON
2982 (struct rte_flow_action_set_ttl, ttl_value)),
2983 .call = parse_vc_conf,
2985 [ACTION_SET_MAC_SRC] = {
2986 .name = "set_mac_src",
2987 .help = "set source mac address",
2988 .priv = PRIV_ACTION(SET_MAC_SRC,
2989 sizeof(struct rte_flow_action_set_mac)),
2990 .next = NEXT(action_set_mac_src),
2993 [ACTION_SET_MAC_SRC_MAC_SRC] = {
2995 .help = "new source mac address",
2996 .next = NEXT(action_set_mac_src, NEXT_ENTRY(MAC_ADDR)),
2997 .args = ARGS(ARGS_ENTRY_HTON
2998 (struct rte_flow_action_set_mac, mac_addr)),
2999 .call = parse_vc_conf,
3001 [ACTION_SET_MAC_DST] = {
3002 .name = "set_mac_dst",
3003 .help = "set destination mac address",
3004 .priv = PRIV_ACTION(SET_MAC_DST,
3005 sizeof(struct rte_flow_action_set_mac)),
3006 .next = NEXT(action_set_mac_dst),
3009 [ACTION_SET_MAC_DST_MAC_DST] = {
3011 .help = "new destination mac address to set",
3012 .next = NEXT(action_set_mac_dst, NEXT_ENTRY(MAC_ADDR)),
3013 .args = ARGS(ARGS_ENTRY_HTON
3014 (struct rte_flow_action_set_mac, mac_addr)),
3015 .call = parse_vc_conf,
3017 [ACTION_INC_TCP_SEQ] = {
3018 .name = "inc_tcp_seq",
3019 .help = "increase TCP sequence number",
3020 .priv = PRIV_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
3021 .next = NEXT(action_inc_tcp_seq),
3024 [ACTION_INC_TCP_SEQ_VALUE] = {
3026 .help = "the value to increase TCP sequence number by",
3027 .next = NEXT(action_inc_tcp_seq, NEXT_ENTRY(UNSIGNED)),
3028 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3029 .call = parse_vc_conf,
3031 [ACTION_DEC_TCP_SEQ] = {
3032 .name = "dec_tcp_seq",
3033 .help = "decrease TCP sequence number",
3034 .priv = PRIV_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
3035 .next = NEXT(action_dec_tcp_seq),
3038 [ACTION_DEC_TCP_SEQ_VALUE] = {
3040 .help = "the value to decrease TCP sequence number by",
3041 .next = NEXT(action_dec_tcp_seq, NEXT_ENTRY(UNSIGNED)),
3042 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3043 .call = parse_vc_conf,
3045 [ACTION_INC_TCP_ACK] = {
3046 .name = "inc_tcp_ack",
3047 .help = "increase TCP acknowledgment number",
3048 .priv = PRIV_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
3049 .next = NEXT(action_inc_tcp_ack),
3052 [ACTION_INC_TCP_ACK_VALUE] = {
3054 .help = "the value to increase TCP acknowledgment number by",
3055 .next = NEXT(action_inc_tcp_ack, NEXT_ENTRY(UNSIGNED)),
3056 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3057 .call = parse_vc_conf,
3059 [ACTION_DEC_TCP_ACK] = {
3060 .name = "dec_tcp_ack",
3061 .help = "decrease TCP acknowledgment number",
3062 .priv = PRIV_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
3063 .next = NEXT(action_dec_tcp_ack),
3066 [ACTION_DEC_TCP_ACK_VALUE] = {
3068 .help = "the value to decrease TCP acknowledgment number by",
3069 .next = NEXT(action_dec_tcp_ack, NEXT_ENTRY(UNSIGNED)),
3070 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3071 .call = parse_vc_conf,
3073 [ACTION_RAW_ENCAP] = {
3074 .name = "raw_encap",
3075 .help = "encapsulation data, defined by set raw_encap",
3076 .priv = PRIV_ACTION(RAW_ENCAP,
3077 sizeof(struct rte_flow_action_raw_encap)),
3078 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3079 .call = parse_vc_action_raw_encap,
3081 [ACTION_RAW_DECAP] = {
3082 .name = "raw_decap",
3083 .help = "decapsulation data, defined by set raw_encap",
3084 .priv = PRIV_ACTION(RAW_DECAP,
3085 sizeof(struct rte_flow_action_raw_decap)),
3086 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3087 .call = parse_vc_action_raw_decap,
3089 /* Top level command. */
3092 .help = "set raw encap/decap data",
3093 .type = "set raw_encap|raw_decap <pattern>",
3094 .next = NEXT(NEXT_ENTRY
3097 .call = parse_set_init,
3099 /* Sub-level commands. */
3101 .name = "raw_encap",
3102 .help = "set raw encap data",
3103 .next = NEXT(next_item),
3104 .call = parse_set_raw_encap_decap,
3107 .name = "raw_decap",
3108 .help = "set raw decap data",
3109 .next = NEXT(next_item),
3110 .call = parse_set_raw_encap_decap,
3114 /** Remove and return last entry from argument stack. */
3115 static const struct arg *
3116 pop_args(struct context *ctx)
3118 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
3121 /** Add entry on top of the argument stack. */
3123 push_args(struct context *ctx, const struct arg *arg)
3125 if (ctx->args_num == CTX_STACK_SIZE)
3127 ctx->args[ctx->args_num++] = arg;
3131 /** Spread value into buffer according to bit-mask. */
3133 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
3135 uint32_t i = arg->size;
3143 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3152 unsigned int shift = 0;
3153 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
3155 for (shift = 0; arg->mask[i] >> shift; ++shift) {
3156 if (!(arg->mask[i] & (1 << shift)))
3161 *buf &= ~(1 << shift);
3162 *buf |= (val & 1) << shift;
3170 /** Compare a string with a partial one of a given length. */
3172 strcmp_partial(const char *full, const char *partial, size_t partial_len)
3174 int r = strncmp(full, partial, partial_len);
3178 if (strlen(full) <= partial_len)
3180 return full[partial_len];
3184 * Parse a prefix length and generate a bit-mask.
3186 * Last argument (ctx->args) is retrieved to determine mask size, storage
3187 * location and whether the result must use network byte ordering.
3190 parse_prefix(struct context *ctx, const struct token *token,
3191 const char *str, unsigned int len,
3192 void *buf, unsigned int size)
3194 const struct arg *arg = pop_args(ctx);
3195 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
3202 /* Argument is expected. */
3206 u = strtoumax(str, &end, 0);
3207 if (errno || (size_t)(end - str) != len)
3212 extra = arg_entry_bf_fill(NULL, 0, arg);
3221 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
3222 !arg_entry_bf_fill(ctx->objmask, -1, arg))
3229 if (bytes > size || bytes + !!extra > size)
3233 buf = (uint8_t *)ctx->object + arg->offset;
3234 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3236 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
3237 memset(buf, 0x00, size - bytes);
3239 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
3243 memset(buf, 0xff, bytes);
3244 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
3246 ((uint8_t *)buf)[bytes] = conv[extra];
3249 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
3252 push_args(ctx, arg);
3256 /** Default parsing function for token name matching. */
3258 parse_default(struct context *ctx, const struct token *token,
3259 const char *str, unsigned int len,
3260 void *buf, unsigned int size)
3265 if (strcmp_partial(token->name, str, len))
3270 /** Parse flow command, initialize output buffer for subsequent tokens. */
3272 parse_init(struct context *ctx, const struct token *token,
3273 const char *str, unsigned int len,
3274 void *buf, unsigned int size)
3276 struct buffer *out = buf;
3278 /* Token name must match. */
3279 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3281 /* Nothing else to do if there is no buffer. */
3284 /* Make sure buffer is large enough. */
3285 if (size < sizeof(*out))
3287 /* Initialize buffer. */
3288 memset(out, 0x00, sizeof(*out));
3289 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
3292 ctx->objmask = NULL;
3296 /** Parse tokens for validate/create commands. */
3298 parse_vc(struct context *ctx, const struct token *token,
3299 const char *str, unsigned int len,
3300 void *buf, unsigned int size)
3302 struct buffer *out = buf;
3306 /* Token name must match. */
3307 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3309 /* Nothing else to do if there is no buffer. */
3312 if (!out->command) {
3313 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
3315 if (sizeof(*out) > size)
3317 out->command = ctx->curr;
3320 ctx->objmask = NULL;
3321 out->args.vc.data = (uint8_t *)out + size;
3325 ctx->object = &out->args.vc.attr;
3326 ctx->objmask = NULL;
3327 switch (ctx->curr) {
3332 out->args.vc.attr.ingress = 1;
3335 out->args.vc.attr.egress = 1;
3338 out->args.vc.attr.transfer = 1;
3341 out->args.vc.pattern =
3342 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
3344 ctx->object = out->args.vc.pattern;
3345 ctx->objmask = NULL;
3348 out->args.vc.actions =
3349 (void *)RTE_ALIGN_CEIL((uintptr_t)
3350 (out->args.vc.pattern +
3351 out->args.vc.pattern_n),
3353 ctx->object = out->args.vc.actions;
3354 ctx->objmask = NULL;
3361 if (!out->args.vc.actions) {
3362 const struct parse_item_priv *priv = token->priv;
3363 struct rte_flow_item *item =
3364 out->args.vc.pattern + out->args.vc.pattern_n;
3366 data_size = priv->size * 3; /* spec, last, mask */
3367 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
3368 (out->args.vc.data - data_size),
3370 if ((uint8_t *)item + sizeof(*item) > data)
3372 *item = (struct rte_flow_item){
3375 ++out->args.vc.pattern_n;
3377 ctx->objmask = NULL;
3379 const struct parse_action_priv *priv = token->priv;
3380 struct rte_flow_action *action =
3381 out->args.vc.actions + out->args.vc.actions_n;
3383 data_size = priv->size; /* configuration */
3384 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
3385 (out->args.vc.data - data_size),
3387 if ((uint8_t *)action + sizeof(*action) > data)
3389 *action = (struct rte_flow_action){
3391 .conf = data_size ? data : NULL,
3393 ++out->args.vc.actions_n;
3394 ctx->object = action;
3395 ctx->objmask = NULL;
3397 memset(data, 0, data_size);
3398 out->args.vc.data = data;
3399 ctx->objdata = data_size;
3403 /** Parse pattern item parameter type. */
3405 parse_vc_spec(struct context *ctx, const struct token *token,
3406 const char *str, unsigned int len,
3407 void *buf, unsigned int size)
3409 struct buffer *out = buf;
3410 struct rte_flow_item *item;
3416 /* Token name must match. */
3417 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3419 /* Parse parameter types. */
3420 switch (ctx->curr) {
3421 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
3427 case ITEM_PARAM_SPEC:
3430 case ITEM_PARAM_LAST:
3433 case ITEM_PARAM_PREFIX:
3434 /* Modify next token to expect a prefix. */
3435 if (ctx->next_num < 2)
3437 ctx->next[ctx->next_num - 2] = prefix;
3439 case ITEM_PARAM_MASK:
3445 /* Nothing else to do if there is no buffer. */
3448 if (!out->args.vc.pattern_n)
3450 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
3451 data_size = ctx->objdata / 3; /* spec, last, mask */
3452 /* Point to selected object. */
3453 ctx->object = out->args.vc.data + (data_size * index);
3455 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
3456 item->mask = ctx->objmask;
3458 ctx->objmask = NULL;
3459 /* Update relevant item pointer. */
3460 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
3465 /** Parse action configuration field. */
3467 parse_vc_conf(struct context *ctx, const struct token *token,
3468 const char *str, unsigned int len,
3469 void *buf, unsigned int size)
3471 struct buffer *out = buf;
3474 /* Token name must match. */
3475 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3477 /* Nothing else to do if there is no buffer. */
3480 /* Point to selected object. */
3481 ctx->object = out->args.vc.data;
3482 ctx->objmask = NULL;
3486 /** Parse RSS action. */
3488 parse_vc_action_rss(struct context *ctx, const struct token *token,
3489 const char *str, unsigned int len,
3490 void *buf, unsigned int size)
3492 struct buffer *out = buf;
3493 struct rte_flow_action *action;
3494 struct action_rss_data *action_rss_data;
3498 ret = parse_vc(ctx, token, str, len, buf, size);
3501 /* Nothing else to do if there is no buffer. */
3504 if (!out->args.vc.actions_n)
3506 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3507 /* Point to selected object. */
3508 ctx->object = out->args.vc.data;
3509 ctx->objmask = NULL;
3510 /* Set up default configuration. */
3511 action_rss_data = ctx->object;
3512 *action_rss_data = (struct action_rss_data){
3513 .conf = (struct rte_flow_action_rss){
3514 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
3517 .key_len = sizeof(action_rss_data->key),
3518 .queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
3519 .key = action_rss_data->key,
3520 .queue = action_rss_data->queue,
3522 .key = "testpmd's default RSS hash key, "
3523 "override it for better balancing",
3526 for (i = 0; i < action_rss_data->conf.queue_num; ++i)
3527 action_rss_data->queue[i] = i;
3528 if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
3529 ctx->port != (portid_t)RTE_PORT_ALL) {
3530 struct rte_eth_dev_info info;
3532 rte_eth_dev_info_get(ctx->port, &info);
3533 action_rss_data->conf.key_len =
3534 RTE_MIN(sizeof(action_rss_data->key),
3535 info.hash_key_size);
3537 action->conf = &action_rss_data->conf;
3542 * Parse func field for RSS action.
3544 * The RTE_ETH_HASH_FUNCTION_* value to assign is derived from the
3545 * ACTION_RSS_FUNC_* index that called this function.
3548 parse_vc_action_rss_func(struct context *ctx, const struct token *token,
3549 const char *str, unsigned int len,
3550 void *buf, unsigned int size)
3552 struct action_rss_data *action_rss_data;
3553 enum rte_eth_hash_function func;
3557 /* Token name must match. */
3558 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3560 switch (ctx->curr) {
3561 case ACTION_RSS_FUNC_DEFAULT:
3562 func = RTE_ETH_HASH_FUNCTION_DEFAULT;
3564 case ACTION_RSS_FUNC_TOEPLITZ:
3565 func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
3567 case ACTION_RSS_FUNC_SIMPLE_XOR:
3568 func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
3575 action_rss_data = ctx->object;
3576 action_rss_data->conf.func = func;
3581 * Parse type field for RSS action.
3583 * Valid tokens are type field names and the "end" token.
3586 parse_vc_action_rss_type(struct context *ctx, const struct token *token,
3587 const char *str, unsigned int len,
3588 void *buf, unsigned int size)
3590 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_TYPE);
3591 struct action_rss_data *action_rss_data;
3597 if (ctx->curr != ACTION_RSS_TYPE)
3599 if (!(ctx->objdata >> 16) && ctx->object) {
3600 action_rss_data = ctx->object;
3601 action_rss_data->conf.types = 0;
3603 if (!strcmp_partial("end", str, len)) {
3604 ctx->objdata &= 0xffff;
3607 for (i = 0; rss_type_table[i].str; ++i)
3608 if (!strcmp_partial(rss_type_table[i].str, str, len))
3610 if (!rss_type_table[i].str)
3612 ctx->objdata = 1 << 16 | (ctx->objdata & 0xffff);
3614 if (ctx->next_num == RTE_DIM(ctx->next))
3616 ctx->next[ctx->next_num++] = next;
3619 action_rss_data = ctx->object;
3620 action_rss_data->conf.types |= rss_type_table[i].rss_type;
3625 * Parse queue field for RSS action.
3627 * Valid tokens are queue indices and the "end" token.
3630 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
3631 const char *str, unsigned int len,
3632 void *buf, unsigned int size)
3634 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
3635 struct action_rss_data *action_rss_data;
3636 const struct arg *arg;
3643 if (ctx->curr != ACTION_RSS_QUEUE)
3645 i = ctx->objdata >> 16;
3646 if (!strcmp_partial("end", str, len)) {
3647 ctx->objdata &= 0xffff;
3650 if (i >= ACTION_RSS_QUEUE_NUM)
3652 arg = ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) +
3653 i * sizeof(action_rss_data->queue[i]),
3654 sizeof(action_rss_data->queue[i]));
3655 if (push_args(ctx, arg))
3657 ret = parse_int(ctx, token, str, len, NULL, 0);
3663 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
3665 if (ctx->next_num == RTE_DIM(ctx->next))
3667 ctx->next[ctx->next_num++] = next;
3671 action_rss_data = ctx->object;
3672 action_rss_data->conf.queue_num = i;
3673 action_rss_data->conf.queue = i ? action_rss_data->queue : NULL;
3677 /** Parse VXLAN encap action. */
3679 parse_vc_action_vxlan_encap(struct context *ctx, const struct token *token,
3680 const char *str, unsigned int len,
3681 void *buf, unsigned int size)
3683 struct buffer *out = buf;
3684 struct rte_flow_action *action;
3685 struct action_vxlan_encap_data *action_vxlan_encap_data;
3688 ret = parse_vc(ctx, token, str, len, buf, size);
3691 /* Nothing else to do if there is no buffer. */
3694 if (!out->args.vc.actions_n)
3696 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3697 /* Point to selected object. */
3698 ctx->object = out->args.vc.data;
3699 ctx->objmask = NULL;
3700 /* Set up default configuration. */
3701 action_vxlan_encap_data = ctx->object;
3702 *action_vxlan_encap_data = (struct action_vxlan_encap_data){
3703 .conf = (struct rte_flow_action_vxlan_encap){
3704 .definition = action_vxlan_encap_data->items,
3708 .type = RTE_FLOW_ITEM_TYPE_ETH,
3709 .spec = &action_vxlan_encap_data->item_eth,
3710 .mask = &rte_flow_item_eth_mask,
3713 .type = RTE_FLOW_ITEM_TYPE_VLAN,
3714 .spec = &action_vxlan_encap_data->item_vlan,
3715 .mask = &rte_flow_item_vlan_mask,
3718 .type = RTE_FLOW_ITEM_TYPE_IPV4,
3719 .spec = &action_vxlan_encap_data->item_ipv4,
3720 .mask = &rte_flow_item_ipv4_mask,
3723 .type = RTE_FLOW_ITEM_TYPE_UDP,
3724 .spec = &action_vxlan_encap_data->item_udp,
3725 .mask = &rte_flow_item_udp_mask,
3728 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
3729 .spec = &action_vxlan_encap_data->item_vxlan,
3730 .mask = &rte_flow_item_vxlan_mask,
3733 .type = RTE_FLOW_ITEM_TYPE_END,
3738 .tci = vxlan_encap_conf.vlan_tci,
3742 .src_addr = vxlan_encap_conf.ipv4_src,
3743 .dst_addr = vxlan_encap_conf.ipv4_dst,
3746 .src_port = vxlan_encap_conf.udp_src,
3747 .dst_port = vxlan_encap_conf.udp_dst,
3749 .item_vxlan.flags = 0,
3751 memcpy(action_vxlan_encap_data->item_eth.dst.addr_bytes,
3752 vxlan_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
3753 memcpy(action_vxlan_encap_data->item_eth.src.addr_bytes,
3754 vxlan_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
3755 if (!vxlan_encap_conf.select_ipv4) {
3756 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr,
3757 &vxlan_encap_conf.ipv6_src,
3758 sizeof(vxlan_encap_conf.ipv6_src));
3759 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.dst_addr,
3760 &vxlan_encap_conf.ipv6_dst,
3761 sizeof(vxlan_encap_conf.ipv6_dst));
3762 action_vxlan_encap_data->items[2] = (struct rte_flow_item){
3763 .type = RTE_FLOW_ITEM_TYPE_IPV6,
3764 .spec = &action_vxlan_encap_data->item_ipv6,
3765 .mask = &rte_flow_item_ipv6_mask,
3768 if (!vxlan_encap_conf.select_vlan)
3769 action_vxlan_encap_data->items[1].type =
3770 RTE_FLOW_ITEM_TYPE_VOID;
3771 if (vxlan_encap_conf.select_tos_ttl) {
3772 if (vxlan_encap_conf.select_ipv4) {
3773 static struct rte_flow_item_ipv4 ipv4_mask_tos;
3775 memcpy(&ipv4_mask_tos, &rte_flow_item_ipv4_mask,
3776 sizeof(ipv4_mask_tos));
3777 ipv4_mask_tos.hdr.type_of_service = 0xff;
3778 ipv4_mask_tos.hdr.time_to_live = 0xff;
3779 action_vxlan_encap_data->item_ipv4.hdr.type_of_service =
3780 vxlan_encap_conf.ip_tos;
3781 action_vxlan_encap_data->item_ipv4.hdr.time_to_live =
3782 vxlan_encap_conf.ip_ttl;
3783 action_vxlan_encap_data->items[2].mask =
3786 static struct rte_flow_item_ipv6 ipv6_mask_tos;
3788 memcpy(&ipv6_mask_tos, &rte_flow_item_ipv6_mask,
3789 sizeof(ipv6_mask_tos));
3790 ipv6_mask_tos.hdr.vtc_flow |=
3791 RTE_BE32(0xfful << RTE_IPV6_HDR_TC_SHIFT);
3792 ipv6_mask_tos.hdr.hop_limits = 0xff;
3793 action_vxlan_encap_data->item_ipv6.hdr.vtc_flow |=
3795 ((uint32_t)vxlan_encap_conf.ip_tos <<
3796 RTE_IPV6_HDR_TC_SHIFT);
3797 action_vxlan_encap_data->item_ipv6.hdr.hop_limits =
3798 vxlan_encap_conf.ip_ttl;
3799 action_vxlan_encap_data->items[2].mask =
3803 memcpy(action_vxlan_encap_data->item_vxlan.vni, vxlan_encap_conf.vni,
3804 RTE_DIM(vxlan_encap_conf.vni));
3805 action->conf = &action_vxlan_encap_data->conf;
3809 /** Parse NVGRE encap action. */
3811 parse_vc_action_nvgre_encap(struct context *ctx, const struct token *token,
3812 const char *str, unsigned int len,
3813 void *buf, unsigned int size)
3815 struct buffer *out = buf;
3816 struct rte_flow_action *action;
3817 struct action_nvgre_encap_data *action_nvgre_encap_data;
3820 ret = parse_vc(ctx, token, str, len, buf, size);
3823 /* Nothing else to do if there is no buffer. */
3826 if (!out->args.vc.actions_n)
3828 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3829 /* Point to selected object. */
3830 ctx->object = out->args.vc.data;
3831 ctx->objmask = NULL;
3832 /* Set up default configuration. */
3833 action_nvgre_encap_data = ctx->object;
3834 *action_nvgre_encap_data = (struct action_nvgre_encap_data){
3835 .conf = (struct rte_flow_action_nvgre_encap){
3836 .definition = action_nvgre_encap_data->items,
3840 .type = RTE_FLOW_ITEM_TYPE_ETH,
3841 .spec = &action_nvgre_encap_data->item_eth,
3842 .mask = &rte_flow_item_eth_mask,
3845 .type = RTE_FLOW_ITEM_TYPE_VLAN,
3846 .spec = &action_nvgre_encap_data->item_vlan,
3847 .mask = &rte_flow_item_vlan_mask,
3850 .type = RTE_FLOW_ITEM_TYPE_IPV4,
3851 .spec = &action_nvgre_encap_data->item_ipv4,
3852 .mask = &rte_flow_item_ipv4_mask,
3855 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
3856 .spec = &action_nvgre_encap_data->item_nvgre,
3857 .mask = &rte_flow_item_nvgre_mask,
3860 .type = RTE_FLOW_ITEM_TYPE_END,
3865 .tci = nvgre_encap_conf.vlan_tci,
3869 .src_addr = nvgre_encap_conf.ipv4_src,
3870 .dst_addr = nvgre_encap_conf.ipv4_dst,
3872 .item_nvgre.flow_id = 0,
3874 memcpy(action_nvgre_encap_data->item_eth.dst.addr_bytes,
3875 nvgre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
3876 memcpy(action_nvgre_encap_data->item_eth.src.addr_bytes,
3877 nvgre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
3878 if (!nvgre_encap_conf.select_ipv4) {
3879 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.src_addr,
3880 &nvgre_encap_conf.ipv6_src,
3881 sizeof(nvgre_encap_conf.ipv6_src));
3882 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.dst_addr,
3883 &nvgre_encap_conf.ipv6_dst,
3884 sizeof(nvgre_encap_conf.ipv6_dst));
3885 action_nvgre_encap_data->items[2] = (struct rte_flow_item){
3886 .type = RTE_FLOW_ITEM_TYPE_IPV6,
3887 .spec = &action_nvgre_encap_data->item_ipv6,
3888 .mask = &rte_flow_item_ipv6_mask,
3891 if (!nvgre_encap_conf.select_vlan)
3892 action_nvgre_encap_data->items[1].type =
3893 RTE_FLOW_ITEM_TYPE_VOID;
3894 memcpy(action_nvgre_encap_data->item_nvgre.tni, nvgre_encap_conf.tni,
3895 RTE_DIM(nvgre_encap_conf.tni));
3896 action->conf = &action_nvgre_encap_data->conf;
3900 /** Parse l2 encap action. */
3902 parse_vc_action_l2_encap(struct context *ctx, const struct token *token,
3903 const char *str, unsigned int len,
3904 void *buf, unsigned int size)
3906 struct buffer *out = buf;
3907 struct rte_flow_action *action;
3908 struct action_raw_encap_data *action_encap_data;
3909 struct rte_flow_item_eth eth = { .type = 0, };
3910 struct rte_flow_item_vlan vlan = {
3911 .tci = mplsoudp_encap_conf.vlan_tci,
3917 ret = parse_vc(ctx, token, str, len, buf, size);
3920 /* Nothing else to do if there is no buffer. */
3923 if (!out->args.vc.actions_n)
3925 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3926 /* Point to selected object. */
3927 ctx->object = out->args.vc.data;
3928 ctx->objmask = NULL;
3929 /* Copy the headers to the buffer. */
3930 action_encap_data = ctx->object;
3931 *action_encap_data = (struct action_raw_encap_data) {
3932 .conf = (struct rte_flow_action_raw_encap){
3933 .data = action_encap_data->data,
3937 header = action_encap_data->data;
3938 if (l2_encap_conf.select_vlan)
3939 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
3940 else if (l2_encap_conf.select_ipv4)
3941 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
3943 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
3944 memcpy(eth.dst.addr_bytes,
3945 l2_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
3946 memcpy(eth.src.addr_bytes,
3947 l2_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
3948 memcpy(header, ð, sizeof(eth));
3949 header += sizeof(eth);
3950 if (l2_encap_conf.select_vlan) {
3951 if (l2_encap_conf.select_ipv4)
3952 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
3954 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
3955 memcpy(header, &vlan, sizeof(vlan));
3956 header += sizeof(vlan);
3958 action_encap_data->conf.size = header -
3959 action_encap_data->data;
3960 action->conf = &action_encap_data->conf;
3964 /** Parse l2 decap action. */
3966 parse_vc_action_l2_decap(struct context *ctx, const struct token *token,
3967 const char *str, unsigned int len,
3968 void *buf, unsigned int size)
3970 struct buffer *out = buf;
3971 struct rte_flow_action *action;
3972 struct action_raw_decap_data *action_decap_data;
3973 struct rte_flow_item_eth eth = { .type = 0, };
3974 struct rte_flow_item_vlan vlan = {
3975 .tci = mplsoudp_encap_conf.vlan_tci,
3981 ret = parse_vc(ctx, token, str, len, buf, size);
3984 /* Nothing else to do if there is no buffer. */
3987 if (!out->args.vc.actions_n)
3989 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3990 /* Point to selected object. */
3991 ctx->object = out->args.vc.data;
3992 ctx->objmask = NULL;
3993 /* Copy the headers to the buffer. */
3994 action_decap_data = ctx->object;
3995 *action_decap_data = (struct action_raw_decap_data) {
3996 .conf = (struct rte_flow_action_raw_decap){
3997 .data = action_decap_data->data,
4001 header = action_decap_data->data;
4002 if (l2_decap_conf.select_vlan)
4003 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4004 memcpy(header, ð, sizeof(eth));
4005 header += sizeof(eth);
4006 if (l2_decap_conf.select_vlan) {
4007 memcpy(header, &vlan, sizeof(vlan));
4008 header += sizeof(vlan);
4010 action_decap_data->conf.size = header -
4011 action_decap_data->data;
4012 action->conf = &action_decap_data->conf;
4016 #define ETHER_TYPE_MPLS_UNICAST 0x8847
4018 /** Parse MPLSOGRE encap action. */
4020 parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token,
4021 const char *str, unsigned int len,
4022 void *buf, unsigned int size)
4024 struct buffer *out = buf;
4025 struct rte_flow_action *action;
4026 struct action_raw_encap_data *action_encap_data;
4027 struct rte_flow_item_eth eth = { .type = 0, };
4028 struct rte_flow_item_vlan vlan = {
4029 .tci = mplsogre_encap_conf.vlan_tci,
4032 struct rte_flow_item_ipv4 ipv4 = {
4034 .src_addr = mplsogre_encap_conf.ipv4_src,
4035 .dst_addr = mplsogre_encap_conf.ipv4_dst,
4036 .next_proto_id = IPPROTO_GRE,
4037 .version_ihl = RTE_IPV4_VHL_DEF,
4038 .time_to_live = IPDEFTTL,
4041 struct rte_flow_item_ipv6 ipv6 = {
4043 .proto = IPPROTO_GRE,
4044 .hop_limits = IPDEFTTL,
4047 struct rte_flow_item_gre gre = {
4048 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
4050 struct rte_flow_item_mpls mpls;
4054 ret = parse_vc(ctx, token, str, len, buf, size);
4057 /* Nothing else to do if there is no buffer. */
4060 if (!out->args.vc.actions_n)
4062 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4063 /* Point to selected object. */
4064 ctx->object = out->args.vc.data;
4065 ctx->objmask = NULL;
4066 /* Copy the headers to the buffer. */
4067 action_encap_data = ctx->object;
4068 *action_encap_data = (struct action_raw_encap_data) {
4069 .conf = (struct rte_flow_action_raw_encap){
4070 .data = action_encap_data->data,
4075 header = action_encap_data->data;
4076 if (mplsogre_encap_conf.select_vlan)
4077 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4078 else if (mplsogre_encap_conf.select_ipv4)
4079 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4081 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4082 memcpy(eth.dst.addr_bytes,
4083 mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4084 memcpy(eth.src.addr_bytes,
4085 mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4086 memcpy(header, ð, sizeof(eth));
4087 header += sizeof(eth);
4088 if (mplsogre_encap_conf.select_vlan) {
4089 if (mplsogre_encap_conf.select_ipv4)
4090 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4092 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4093 memcpy(header, &vlan, sizeof(vlan));
4094 header += sizeof(vlan);
4096 if (mplsogre_encap_conf.select_ipv4) {
4097 memcpy(header, &ipv4, sizeof(ipv4));
4098 header += sizeof(ipv4);
4100 memcpy(&ipv6.hdr.src_addr,
4101 &mplsogre_encap_conf.ipv6_src,
4102 sizeof(mplsogre_encap_conf.ipv6_src));
4103 memcpy(&ipv6.hdr.dst_addr,
4104 &mplsogre_encap_conf.ipv6_dst,
4105 sizeof(mplsogre_encap_conf.ipv6_dst));
4106 memcpy(header, &ipv6, sizeof(ipv6));
4107 header += sizeof(ipv6);
4109 memcpy(header, &gre, sizeof(gre));
4110 header += sizeof(gre);
4111 memcpy(mpls.label_tc_s, mplsogre_encap_conf.label,
4112 RTE_DIM(mplsogre_encap_conf.label));
4113 mpls.label_tc_s[2] |= 0x1;
4114 memcpy(header, &mpls, sizeof(mpls));
4115 header += sizeof(mpls);
4116 action_encap_data->conf.size = header -
4117 action_encap_data->data;
4118 action->conf = &action_encap_data->conf;
4122 /** Parse MPLSOGRE decap action. */
4124 parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token,
4125 const char *str, unsigned int len,
4126 void *buf, unsigned int size)
4128 struct buffer *out = buf;
4129 struct rte_flow_action *action;
4130 struct action_raw_decap_data *action_decap_data;
4131 struct rte_flow_item_eth eth = { .type = 0, };
4132 struct rte_flow_item_vlan vlan = {.tci = 0};
4133 struct rte_flow_item_ipv4 ipv4 = {
4135 .next_proto_id = IPPROTO_GRE,
4138 struct rte_flow_item_ipv6 ipv6 = {
4140 .proto = IPPROTO_GRE,
4143 struct rte_flow_item_gre gre = {
4144 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
4146 struct rte_flow_item_mpls mpls;
4150 ret = parse_vc(ctx, token, str, len, buf, size);
4153 /* Nothing else to do if there is no buffer. */
4156 if (!out->args.vc.actions_n)
4158 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4159 /* Point to selected object. */
4160 ctx->object = out->args.vc.data;
4161 ctx->objmask = NULL;
4162 /* Copy the headers to the buffer. */
4163 action_decap_data = ctx->object;
4164 *action_decap_data = (struct action_raw_decap_data) {
4165 .conf = (struct rte_flow_action_raw_decap){
4166 .data = action_decap_data->data,
4170 header = action_decap_data->data;
4171 if (mplsogre_decap_conf.select_vlan)
4172 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4173 else if (mplsogre_encap_conf.select_ipv4)
4174 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4176 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4177 memcpy(eth.dst.addr_bytes,
4178 mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4179 memcpy(eth.src.addr_bytes,
4180 mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4181 memcpy(header, ð, sizeof(eth));
4182 header += sizeof(eth);
4183 if (mplsogre_encap_conf.select_vlan) {
4184 if (mplsogre_encap_conf.select_ipv4)
4185 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4187 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4188 memcpy(header, &vlan, sizeof(vlan));
4189 header += sizeof(vlan);
4191 if (mplsogre_encap_conf.select_ipv4) {
4192 memcpy(header, &ipv4, sizeof(ipv4));
4193 header += sizeof(ipv4);
4195 memcpy(header, &ipv6, sizeof(ipv6));
4196 header += sizeof(ipv6);
4198 memcpy(header, &gre, sizeof(gre));
4199 header += sizeof(gre);
4200 memset(&mpls, 0, sizeof(mpls));
4201 memcpy(header, &mpls, sizeof(mpls));
4202 header += sizeof(mpls);
4203 action_decap_data->conf.size = header -
4204 action_decap_data->data;
4205 action->conf = &action_decap_data->conf;
4209 /** Parse MPLSOUDP encap action. */
4211 parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token,
4212 const char *str, unsigned int len,
4213 void *buf, unsigned int size)
4215 struct buffer *out = buf;
4216 struct rte_flow_action *action;
4217 struct action_raw_encap_data *action_encap_data;
4218 struct rte_flow_item_eth eth = { .type = 0, };
4219 struct rte_flow_item_vlan vlan = {
4220 .tci = mplsoudp_encap_conf.vlan_tci,
4223 struct rte_flow_item_ipv4 ipv4 = {
4225 .src_addr = mplsoudp_encap_conf.ipv4_src,
4226 .dst_addr = mplsoudp_encap_conf.ipv4_dst,
4227 .next_proto_id = IPPROTO_UDP,
4228 .version_ihl = RTE_IPV4_VHL_DEF,
4229 .time_to_live = IPDEFTTL,
4232 struct rte_flow_item_ipv6 ipv6 = {
4234 .proto = IPPROTO_UDP,
4235 .hop_limits = IPDEFTTL,
4238 struct rte_flow_item_udp udp = {
4240 .src_port = mplsoudp_encap_conf.udp_src,
4241 .dst_port = mplsoudp_encap_conf.udp_dst,
4244 struct rte_flow_item_mpls mpls;
4248 ret = parse_vc(ctx, token, str, len, buf, size);
4251 /* Nothing else to do if there is no buffer. */
4254 if (!out->args.vc.actions_n)
4256 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4257 /* Point to selected object. */
4258 ctx->object = out->args.vc.data;
4259 ctx->objmask = NULL;
4260 /* Copy the headers to the buffer. */
4261 action_encap_data = ctx->object;
4262 *action_encap_data = (struct action_raw_encap_data) {
4263 .conf = (struct rte_flow_action_raw_encap){
4264 .data = action_encap_data->data,
4269 header = action_encap_data->data;
4270 if (mplsoudp_encap_conf.select_vlan)
4271 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4272 else if (mplsoudp_encap_conf.select_ipv4)
4273 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4275 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4276 memcpy(eth.dst.addr_bytes,
4277 mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4278 memcpy(eth.src.addr_bytes,
4279 mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4280 memcpy(header, ð, sizeof(eth));
4281 header += sizeof(eth);
4282 if (mplsoudp_encap_conf.select_vlan) {
4283 if (mplsoudp_encap_conf.select_ipv4)
4284 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4286 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4287 memcpy(header, &vlan, sizeof(vlan));
4288 header += sizeof(vlan);
4290 if (mplsoudp_encap_conf.select_ipv4) {
4291 memcpy(header, &ipv4, sizeof(ipv4));
4292 header += sizeof(ipv4);
4294 memcpy(&ipv6.hdr.src_addr,
4295 &mplsoudp_encap_conf.ipv6_src,
4296 sizeof(mplsoudp_encap_conf.ipv6_src));
4297 memcpy(&ipv6.hdr.dst_addr,
4298 &mplsoudp_encap_conf.ipv6_dst,
4299 sizeof(mplsoudp_encap_conf.ipv6_dst));
4300 memcpy(header, &ipv6, sizeof(ipv6));
4301 header += sizeof(ipv6);
4303 memcpy(header, &udp, sizeof(udp));
4304 header += sizeof(udp);
4305 memcpy(mpls.label_tc_s, mplsoudp_encap_conf.label,
4306 RTE_DIM(mplsoudp_encap_conf.label));
4307 mpls.label_tc_s[2] |= 0x1;
4308 memcpy(header, &mpls, sizeof(mpls));
4309 header += sizeof(mpls);
4310 action_encap_data->conf.size = header -
4311 action_encap_data->data;
4312 action->conf = &action_encap_data->conf;
4316 /** Parse MPLSOUDP decap action. */
4318 parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token,
4319 const char *str, unsigned int len,
4320 void *buf, unsigned int size)
4322 struct buffer *out = buf;
4323 struct rte_flow_action *action;
4324 struct action_raw_decap_data *action_decap_data;
4325 struct rte_flow_item_eth eth = { .type = 0, };
4326 struct rte_flow_item_vlan vlan = {.tci = 0};
4327 struct rte_flow_item_ipv4 ipv4 = {
4329 .next_proto_id = IPPROTO_UDP,
4332 struct rte_flow_item_ipv6 ipv6 = {
4334 .proto = IPPROTO_UDP,
4337 struct rte_flow_item_udp udp = {
4339 .dst_port = rte_cpu_to_be_16(6635),
4342 struct rte_flow_item_mpls mpls;
4346 ret = parse_vc(ctx, token, str, len, buf, size);
4349 /* Nothing else to do if there is no buffer. */
4352 if (!out->args.vc.actions_n)
4354 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4355 /* Point to selected object. */
4356 ctx->object = out->args.vc.data;
4357 ctx->objmask = NULL;
4358 /* Copy the headers to the buffer. */
4359 action_decap_data = ctx->object;
4360 *action_decap_data = (struct action_raw_decap_data) {
4361 .conf = (struct rte_flow_action_raw_decap){
4362 .data = action_decap_data->data,
4366 header = action_decap_data->data;
4367 if (mplsoudp_decap_conf.select_vlan)
4368 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4369 else if (mplsoudp_encap_conf.select_ipv4)
4370 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4372 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4373 memcpy(eth.dst.addr_bytes,
4374 mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4375 memcpy(eth.src.addr_bytes,
4376 mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4377 memcpy(header, ð, sizeof(eth));
4378 header += sizeof(eth);
4379 if (mplsoudp_encap_conf.select_vlan) {
4380 if (mplsoudp_encap_conf.select_ipv4)
4381 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4383 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4384 memcpy(header, &vlan, sizeof(vlan));
4385 header += sizeof(vlan);
4387 if (mplsoudp_encap_conf.select_ipv4) {
4388 memcpy(header, &ipv4, sizeof(ipv4));
4389 header += sizeof(ipv4);
4391 memcpy(header, &ipv6, sizeof(ipv6));
4392 header += sizeof(ipv6);
4394 memcpy(header, &udp, sizeof(udp));
4395 header += sizeof(udp);
4396 memset(&mpls, 0, sizeof(mpls));
4397 memcpy(header, &mpls, sizeof(mpls));
4398 header += sizeof(mpls);
4399 action_decap_data->conf.size = header -
4400 action_decap_data->data;
4401 action->conf = &action_decap_data->conf;
4406 parse_vc_action_raw_encap(struct context *ctx, const struct token *token,
4407 const char *str, unsigned int len, void *buf,
4410 struct buffer *out = buf;
4411 struct rte_flow_action *action;
4412 struct rte_flow_action_raw_encap *action_raw_encap_conf = NULL;
4413 uint8_t *data = NULL;
4416 ret = parse_vc(ctx, token, str, len, buf, size);
4419 /* Nothing else to do if there is no buffer. */
4422 if (!out->args.vc.actions_n)
4424 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4425 /* Point to selected object. */
4426 ctx->object = out->args.vc.data;
4427 ctx->objmask = NULL;
4428 /* Copy the headers to the buffer. */
4429 action_raw_encap_conf = ctx->object;
4430 /* data stored from tail of data buffer */
4431 data = (uint8_t *)&(raw_encap_conf.data) +
4432 ACTION_RAW_ENCAP_MAX_DATA - raw_encap_conf.size;
4433 action_raw_encap_conf->data = data;
4434 action_raw_encap_conf->preserve = NULL;
4435 action_raw_encap_conf->size = raw_encap_conf.size;
4436 action->conf = action_raw_encap_conf;
4441 parse_vc_action_raw_decap(struct context *ctx, const struct token *token,
4442 const char *str, unsigned int len, void *buf,
4445 struct buffer *out = buf;
4446 struct rte_flow_action *action;
4447 struct rte_flow_action_raw_decap *action_raw_decap_conf = NULL;
4448 uint8_t *data = NULL;
4451 ret = parse_vc(ctx, token, str, len, buf, size);
4454 /* Nothing else to do if there is no buffer. */
4457 if (!out->args.vc.actions_n)
4459 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4460 /* Point to selected object. */
4461 ctx->object = out->args.vc.data;
4462 ctx->objmask = NULL;
4463 /* Copy the headers to the buffer. */
4464 action_raw_decap_conf = ctx->object;
4465 /* data stored from tail of data buffer */
4466 data = (uint8_t *)&(raw_decap_conf.data) +
4467 ACTION_RAW_ENCAP_MAX_DATA - raw_decap_conf.size;
4468 action_raw_decap_conf->data = data;
4469 action_raw_decap_conf->size = raw_decap_conf.size;
4470 action->conf = action_raw_decap_conf;
4474 /** Parse tokens for destroy command. */
4476 parse_destroy(struct context *ctx, const struct token *token,
4477 const char *str, unsigned int len,
4478 void *buf, unsigned int size)
4480 struct buffer *out = buf;
4482 /* Token name must match. */
4483 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4485 /* Nothing else to do if there is no buffer. */
4488 if (!out->command) {
4489 if (ctx->curr != DESTROY)
4491 if (sizeof(*out) > size)
4493 out->command = ctx->curr;
4496 ctx->objmask = NULL;
4497 out->args.destroy.rule =
4498 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
4502 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
4503 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
4506 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
4507 ctx->objmask = NULL;
4511 /** Parse tokens for flush command. */
4513 parse_flush(struct context *ctx, const struct token *token,
4514 const char *str, unsigned int len,
4515 void *buf, unsigned int size)
4517 struct buffer *out = buf;
4519 /* Token name must match. */
4520 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4522 /* Nothing else to do if there is no buffer. */
4525 if (!out->command) {
4526 if (ctx->curr != FLUSH)
4528 if (sizeof(*out) > size)
4530 out->command = ctx->curr;
4533 ctx->objmask = NULL;
4538 /** Parse tokens for query command. */
4540 parse_query(struct context *ctx, const struct token *token,
4541 const char *str, unsigned int len,
4542 void *buf, unsigned int size)
4544 struct buffer *out = buf;
4546 /* Token name must match. */
4547 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4549 /* Nothing else to do if there is no buffer. */
4552 if (!out->command) {
4553 if (ctx->curr != QUERY)
4555 if (sizeof(*out) > size)
4557 out->command = ctx->curr;
4560 ctx->objmask = NULL;
4565 /** Parse action names. */
4567 parse_action(struct context *ctx, const struct token *token,
4568 const char *str, unsigned int len,
4569 void *buf, unsigned int size)
4571 struct buffer *out = buf;
4572 const struct arg *arg = pop_args(ctx);
4576 /* Argument is expected. */
4579 /* Parse action name. */
4580 for (i = 0; next_action[i]; ++i) {
4581 const struct parse_action_priv *priv;
4583 token = &token_list[next_action[i]];
4584 if (strcmp_partial(token->name, str, len))
4590 memcpy((uint8_t *)ctx->object + arg->offset,
4596 push_args(ctx, arg);
4600 /** Parse tokens for list command. */
4602 parse_list(struct context *ctx, const struct token *token,
4603 const char *str, unsigned int len,
4604 void *buf, unsigned int size)
4606 struct buffer *out = buf;
4608 /* Token name must match. */
4609 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4611 /* Nothing else to do if there is no buffer. */
4614 if (!out->command) {
4615 if (ctx->curr != LIST)
4617 if (sizeof(*out) > size)
4619 out->command = ctx->curr;
4622 ctx->objmask = NULL;
4623 out->args.list.group =
4624 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
4628 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
4629 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
4632 ctx->object = out->args.list.group + out->args.list.group_n++;
4633 ctx->objmask = NULL;
4637 /** Parse tokens for isolate command. */
4639 parse_isolate(struct context *ctx, const struct token *token,
4640 const char *str, unsigned int len,
4641 void *buf, unsigned int size)
4643 struct buffer *out = buf;
4645 /* Token name must match. */
4646 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4648 /* Nothing else to do if there is no buffer. */
4651 if (!out->command) {
4652 if (ctx->curr != ISOLATE)
4654 if (sizeof(*out) > size)
4656 out->command = ctx->curr;
4659 ctx->objmask = NULL;
4665 * Parse signed/unsigned integers 8 to 64-bit long.
4667 * Last argument (ctx->args) is retrieved to determine integer type and
4671 parse_int(struct context *ctx, const struct token *token,
4672 const char *str, unsigned int len,
4673 void *buf, unsigned int size)
4675 const struct arg *arg = pop_args(ctx);
4680 /* Argument is expected. */
4685 (uintmax_t)strtoimax(str, &end, 0) :
4686 strtoumax(str, &end, 0);
4687 if (errno || (size_t)(end - str) != len)
4690 ((arg->sign && ((intmax_t)u < (intmax_t)arg->min ||
4691 (intmax_t)u > (intmax_t)arg->max)) ||
4692 (!arg->sign && (u < arg->min || u > arg->max))))
4697 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
4698 !arg_entry_bf_fill(ctx->objmask, -1, arg))
4702 buf = (uint8_t *)ctx->object + arg->offset;
4704 if (u > RTE_LEN2MASK(size * CHAR_BIT, uint64_t))
4708 case sizeof(uint8_t):
4709 *(uint8_t *)buf = u;
4711 case sizeof(uint16_t):
4712 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
4714 case sizeof(uint8_t [3]):
4715 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
4717 ((uint8_t *)buf)[0] = u;
4718 ((uint8_t *)buf)[1] = u >> 8;
4719 ((uint8_t *)buf)[2] = u >> 16;
4723 ((uint8_t *)buf)[0] = u >> 16;
4724 ((uint8_t *)buf)[1] = u >> 8;
4725 ((uint8_t *)buf)[2] = u;
4727 case sizeof(uint32_t):
4728 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
4730 case sizeof(uint64_t):
4731 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
4736 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
4738 buf = (uint8_t *)ctx->objmask + arg->offset;
4743 push_args(ctx, arg);
4750 * Three arguments (ctx->args) are retrieved from the stack to store data,
4751 * its actual length and address (in that order).
4754 parse_string(struct context *ctx, const struct token *token,
4755 const char *str, unsigned int len,
4756 void *buf, unsigned int size)
4758 const struct arg *arg_data = pop_args(ctx);
4759 const struct arg *arg_len = pop_args(ctx);
4760 const struct arg *arg_addr = pop_args(ctx);
4761 char tmp[16]; /* Ought to be enough. */
4764 /* Arguments are expected. */
4768 push_args(ctx, arg_data);
4772 push_args(ctx, arg_len);
4773 push_args(ctx, arg_data);
4776 size = arg_data->size;
4777 /* Bit-mask fill is not supported. */
4778 if (arg_data->mask || size < len)
4782 /* Let parse_int() fill length information first. */
4783 ret = snprintf(tmp, sizeof(tmp), "%u", len);
4786 push_args(ctx, arg_len);
4787 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
4792 buf = (uint8_t *)ctx->object + arg_data->offset;
4793 /* Output buffer is not necessarily NUL-terminated. */
4794 memcpy(buf, str, len);
4795 memset((uint8_t *)buf + len, 0x00, size - len);
4797 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
4798 /* Save address if requested. */
4799 if (arg_addr->size) {
4800 memcpy((uint8_t *)ctx->object + arg_addr->offset,
4802 (uint8_t *)ctx->object + arg_data->offset
4806 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
4808 (uint8_t *)ctx->objmask + arg_data->offset
4814 push_args(ctx, arg_addr);
4815 push_args(ctx, arg_len);
4816 push_args(ctx, arg_data);
4821 parse_hex_string(const char *src, uint8_t *dst, uint32_t *size)
4827 /* Check input parameters */
4828 if ((src == NULL) ||
4834 /* Convert chars to bytes */
4835 for (i = 0, len = 0; i < *size; i += 2) {
4836 snprintf(tmp, 3, "%s", src + i);
4837 dst[len++] = strtoul(tmp, &c, 16);
4852 parse_hex(struct context *ctx, const struct token *token,
4853 const char *str, unsigned int len,
4854 void *buf, unsigned int size)
4856 const struct arg *arg_data = pop_args(ctx);
4857 const struct arg *arg_len = pop_args(ctx);
4858 const struct arg *arg_addr = pop_args(ctx);
4859 char tmp[16]; /* Ought to be enough. */
4861 unsigned int hexlen = len;
4862 unsigned int length = 256;
4863 uint8_t hex_tmp[length];
4865 /* Arguments are expected. */
4869 push_args(ctx, arg_data);
4873 push_args(ctx, arg_len);
4874 push_args(ctx, arg_data);
4877 size = arg_data->size;
4878 /* Bit-mask fill is not supported. */
4884 /* translate bytes string to array. */
4885 if (str[0] == '0' && ((str[1] == 'x') ||
4890 if (hexlen > length)
4892 ret = parse_hex_string(str, hex_tmp, &hexlen);
4895 /* Let parse_int() fill length information first. */
4896 ret = snprintf(tmp, sizeof(tmp), "%u", hexlen);
4899 push_args(ctx, arg_len);
4900 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
4905 buf = (uint8_t *)ctx->object + arg_data->offset;
4906 /* Output buffer is not necessarily NUL-terminated. */
4907 memcpy(buf, hex_tmp, hexlen);
4908 memset((uint8_t *)buf + hexlen, 0x00, size - hexlen);
4910 memset((uint8_t *)ctx->objmask + arg_data->offset,
4912 /* Save address if requested. */
4913 if (arg_addr->size) {
4914 memcpy((uint8_t *)ctx->object + arg_addr->offset,
4916 (uint8_t *)ctx->object + arg_data->offset
4920 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
4922 (uint8_t *)ctx->objmask + arg_data->offset
4928 push_args(ctx, arg_addr);
4929 push_args(ctx, arg_len);
4930 push_args(ctx, arg_data);
4936 * Parse a MAC address.
4938 * Last argument (ctx->args) is retrieved to determine storage size and
4942 parse_mac_addr(struct context *ctx, const struct token *token,
4943 const char *str, unsigned int len,
4944 void *buf, unsigned int size)
4946 const struct arg *arg = pop_args(ctx);
4947 struct rte_ether_addr tmp;
4951 /* Argument is expected. */
4955 /* Bit-mask fill is not supported. */
4956 if (arg->mask || size != sizeof(tmp))
4958 /* Only network endian is supported. */
4961 ret = rte_ether_unformat_addr(str, &tmp);
4966 buf = (uint8_t *)ctx->object + arg->offset;
4967 memcpy(buf, &tmp, size);
4969 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
4972 push_args(ctx, arg);
4977 * Parse an IPv4 address.
4979 * Last argument (ctx->args) is retrieved to determine storage size and
4983 parse_ipv4_addr(struct context *ctx, const struct token *token,
4984 const char *str, unsigned int len,
4985 void *buf, unsigned int size)
4987 const struct arg *arg = pop_args(ctx);
4992 /* Argument is expected. */
4996 /* Bit-mask fill is not supported. */
4997 if (arg->mask || size != sizeof(tmp))
4999 /* Only network endian is supported. */
5002 memcpy(str2, str, len);
5004 ret = inet_pton(AF_INET, str2, &tmp);
5006 /* Attempt integer parsing. */
5007 push_args(ctx, arg);
5008 return parse_int(ctx, token, str, len, buf, size);
5012 buf = (uint8_t *)ctx->object + arg->offset;
5013 memcpy(buf, &tmp, size);
5015 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
5018 push_args(ctx, arg);
5023 * Parse an IPv6 address.
5025 * Last argument (ctx->args) is retrieved to determine storage size and
5029 parse_ipv6_addr(struct context *ctx, const struct token *token,
5030 const char *str, unsigned int len,
5031 void *buf, unsigned int size)
5033 const struct arg *arg = pop_args(ctx);
5035 struct in6_addr tmp;
5039 /* Argument is expected. */
5043 /* Bit-mask fill is not supported. */
5044 if (arg->mask || size != sizeof(tmp))
5046 /* Only network endian is supported. */
5049 memcpy(str2, str, len);
5051 ret = inet_pton(AF_INET6, str2, &tmp);
5056 buf = (uint8_t *)ctx->object + arg->offset;
5057 memcpy(buf, &tmp, size);
5059 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
5062 push_args(ctx, arg);
5066 /** Boolean values (even indices stand for false). */
5067 static const char *const boolean_name[] = {
5077 * Parse a boolean value.
5079 * Last argument (ctx->args) is retrieved to determine storage size and
5083 parse_boolean(struct context *ctx, const struct token *token,
5084 const char *str, unsigned int len,
5085 void *buf, unsigned int size)
5087 const struct arg *arg = pop_args(ctx);
5091 /* Argument is expected. */
5094 for (i = 0; boolean_name[i]; ++i)
5095 if (!strcmp_partial(boolean_name[i], str, len))
5097 /* Process token as integer. */
5098 if (boolean_name[i])
5099 str = i & 1 ? "1" : "0";
5100 push_args(ctx, arg);
5101 ret = parse_int(ctx, token, str, strlen(str), buf, size);
5102 return ret > 0 ? (int)len : ret;
5105 /** Parse port and update context. */
5107 parse_port(struct context *ctx, const struct token *token,
5108 const char *str, unsigned int len,
5109 void *buf, unsigned int size)
5111 struct buffer *out = &(struct buffer){ .port = 0 };
5119 ctx->objmask = NULL;
5120 size = sizeof(*out);
5122 ret = parse_int(ctx, token, str, len, out, size);
5124 ctx->port = out->port;
5130 /** Parse set command, initialize output buffer for subsequent tokens. */
5132 parse_set_raw_encap_decap(struct context *ctx, const struct token *token,
5133 const char *str, unsigned int len,
5134 void *buf, unsigned int size)
5136 struct buffer *out = buf;
5138 /* Token name must match. */
5139 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5141 /* Nothing else to do if there is no buffer. */
5144 /* Make sure buffer is large enough. */
5145 if (size < sizeof(*out))
5148 ctx->objmask = NULL;
5151 out->command = ctx->curr;
5156 * Parse set raw_encap/raw_decap command,
5157 * initialize output buffer for subsequent tokens.
5160 parse_set_init(struct context *ctx, const struct token *token,
5161 const char *str, unsigned int len,
5162 void *buf, unsigned int size)
5164 struct buffer *out = buf;
5166 /* Token name must match. */
5167 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5169 /* Nothing else to do if there is no buffer. */
5172 /* Make sure buffer is large enough. */
5173 if (size < sizeof(*out))
5175 /* Initialize buffer. */
5176 memset(out, 0x00, sizeof(*out));
5177 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
5180 ctx->objmask = NULL;
5181 if (!out->command) {
5182 if (ctx->curr != SET)
5184 if (sizeof(*out) > size)
5186 out->command = ctx->curr;
5187 out->args.vc.data = (uint8_t *)out + size;
5188 /* All we need is pattern */
5189 out->args.vc.pattern =
5190 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
5192 ctx->object = out->args.vc.pattern;
5197 /** No completion. */
5199 comp_none(struct context *ctx, const struct token *token,
5200 unsigned int ent, char *buf, unsigned int size)
5210 /** Complete boolean values. */
5212 comp_boolean(struct context *ctx, const struct token *token,
5213 unsigned int ent, char *buf, unsigned int size)
5219 for (i = 0; boolean_name[i]; ++i)
5220 if (buf && i == ent)
5221 return strlcpy(buf, boolean_name[i], size);
5227 /** Complete action names. */
5229 comp_action(struct context *ctx, const struct token *token,
5230 unsigned int ent, char *buf, unsigned int size)
5236 for (i = 0; next_action[i]; ++i)
5237 if (buf && i == ent)
5238 return strlcpy(buf, token_list[next_action[i]].name,
5245 /** Complete available ports. */
5247 comp_port(struct context *ctx, const struct token *token,
5248 unsigned int ent, char *buf, unsigned int size)
5255 RTE_ETH_FOREACH_DEV(p) {
5256 if (buf && i == ent)
5257 return snprintf(buf, size, "%u", p);
5265 /** Complete available rule IDs. */
5267 comp_rule_id(struct context *ctx, const struct token *token,
5268 unsigned int ent, char *buf, unsigned int size)
5271 struct rte_port *port;
5272 struct port_flow *pf;
5275 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
5276 ctx->port == (portid_t)RTE_PORT_ALL)
5278 port = &ports[ctx->port];
5279 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
5280 if (buf && i == ent)
5281 return snprintf(buf, size, "%u", pf->id);
5289 /** Complete type field for RSS action. */
5291 comp_vc_action_rss_type(struct context *ctx, const struct token *token,
5292 unsigned int ent, char *buf, unsigned int size)
5298 for (i = 0; rss_type_table[i].str; ++i)
5303 return strlcpy(buf, rss_type_table[ent].str, size);
5305 return snprintf(buf, size, "end");
5309 /** Complete queue field for RSS action. */
5311 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
5312 unsigned int ent, char *buf, unsigned int size)
5319 return snprintf(buf, size, "%u", ent);
5321 return snprintf(buf, size, "end");
5325 /** Internal context. */
5326 static struct context cmd_flow_context;
5328 /** Global parser instance (cmdline API). */
5329 cmdline_parse_inst_t cmd_flow;
5330 cmdline_parse_inst_t cmd_set_raw;
5332 /** Initialize context. */
5334 cmd_flow_context_init(struct context *ctx)
5336 /* A full memset() is not necessary. */
5346 ctx->objmask = NULL;
5349 /** Parse a token (cmdline API). */
5351 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
5354 struct context *ctx = &cmd_flow_context;
5355 const struct token *token;
5356 const enum index *list;
5361 token = &token_list[ctx->curr];
5362 /* Check argument length. */
5365 for (len = 0; src[len]; ++len)
5366 if (src[len] == '#' || isspace(src[len]))
5370 /* Last argument and EOL detection. */
5371 for (i = len; src[i]; ++i)
5372 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
5374 else if (!isspace(src[i])) {
5379 if (src[i] == '\r' || src[i] == '\n') {
5383 /* Initialize context if necessary. */
5384 if (!ctx->next_num) {
5387 ctx->next[ctx->next_num++] = token->next[0];
5389 /* Process argument through candidates. */
5390 ctx->prev = ctx->curr;
5391 list = ctx->next[ctx->next_num - 1];
5392 for (i = 0; list[i]; ++i) {
5393 const struct token *next = &token_list[list[i]];
5396 ctx->curr = list[i];
5398 tmp = next->call(ctx, next, src, len, result, size);
5400 tmp = parse_default(ctx, next, src, len, result, size);
5401 if (tmp == -1 || tmp != len)
5409 /* Push subsequent tokens if any. */
5411 for (i = 0; token->next[i]; ++i) {
5412 if (ctx->next_num == RTE_DIM(ctx->next))
5414 ctx->next[ctx->next_num++] = token->next[i];
5416 /* Push arguments if any. */
5418 for (i = 0; token->args[i]; ++i) {
5419 if (ctx->args_num == RTE_DIM(ctx->args))
5421 ctx->args[ctx->args_num++] = token->args[i];
5426 /** Return number of completion entries (cmdline API). */
5428 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
5430 struct context *ctx = &cmd_flow_context;
5431 const struct token *token = &token_list[ctx->curr];
5432 const enum index *list;
5436 /* Count number of tokens in current list. */
5438 list = ctx->next[ctx->next_num - 1];
5440 list = token->next[0];
5441 for (i = 0; list[i]; ++i)
5446 * If there is a single token, use its completion callback, otherwise
5447 * return the number of entries.
5449 token = &token_list[list[0]];
5450 if (i == 1 && token->comp) {
5451 /* Save index for cmd_flow_get_help(). */
5452 ctx->prev = list[0];
5453 return token->comp(ctx, token, 0, NULL, 0);
5458 /** Return a completion entry (cmdline API). */
5460 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
5461 char *dst, unsigned int size)
5463 struct context *ctx = &cmd_flow_context;
5464 const struct token *token = &token_list[ctx->curr];
5465 const enum index *list;
5469 /* Count number of tokens in current list. */
5471 list = ctx->next[ctx->next_num - 1];
5473 list = token->next[0];
5474 for (i = 0; list[i]; ++i)
5478 /* If there is a single token, use its completion callback. */
5479 token = &token_list[list[0]];
5480 if (i == 1 && token->comp) {
5481 /* Save index for cmd_flow_get_help(). */
5482 ctx->prev = list[0];
5483 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
5485 /* Otherwise make sure the index is valid and use defaults. */
5488 token = &token_list[list[index]];
5489 strlcpy(dst, token->name, size);
5490 /* Save index for cmd_flow_get_help(). */
5491 ctx->prev = list[index];
5495 /** Populate help strings for current token (cmdline API). */
5497 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
5499 struct context *ctx = &cmd_flow_context;
5500 const struct token *token = &token_list[ctx->prev];
5505 /* Set token type and update global help with details. */
5506 strlcpy(dst, (token->type ? token->type : "TOKEN"), size);
5508 cmd_flow.help_str = token->help;
5510 cmd_flow.help_str = token->name;
5514 /** Token definition template (cmdline API). */
5515 static struct cmdline_token_hdr cmd_flow_token_hdr = {
5516 .ops = &(struct cmdline_token_ops){
5517 .parse = cmd_flow_parse,
5518 .complete_get_nb = cmd_flow_complete_get_nb,
5519 .complete_get_elt = cmd_flow_complete_get_elt,
5520 .get_help = cmd_flow_get_help,
5525 /** Populate the next dynamic token. */
5527 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
5528 cmdline_parse_token_hdr_t **hdr_inst)
5530 struct context *ctx = &cmd_flow_context;
5532 /* Always reinitialize context before requesting the first token. */
5533 if (!(hdr_inst - cmd_flow.tokens))
5534 cmd_flow_context_init(ctx);
5535 /* Return NULL when no more tokens are expected. */
5536 if (!ctx->next_num && ctx->curr) {
5540 /* Determine if command should end here. */
5541 if (ctx->eol && ctx->last && ctx->next_num) {
5542 const enum index *list = ctx->next[ctx->next_num - 1];
5545 for (i = 0; list[i]; ++i) {
5552 *hdr = &cmd_flow_token_hdr;
5555 /** Dispatch parsed buffer to function calls. */
5557 cmd_flow_parsed(const struct buffer *in)
5559 switch (in->command) {
5561 port_flow_validate(in->port, &in->args.vc.attr,
5562 in->args.vc.pattern, in->args.vc.actions);
5565 port_flow_create(in->port, &in->args.vc.attr,
5566 in->args.vc.pattern, in->args.vc.actions);
5569 port_flow_destroy(in->port, in->args.destroy.rule_n,
5570 in->args.destroy.rule);
5573 port_flow_flush(in->port);
5576 port_flow_query(in->port, in->args.query.rule,
5577 &in->args.query.action);
5580 port_flow_list(in->port, in->args.list.group_n,
5581 in->args.list.group);
5584 port_flow_isolate(in->port, in->args.isolate.set);
5591 /** Token generator and output processing callback (cmdline API). */
5593 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
5596 cmd_flow_tok(arg0, arg2);
5598 cmd_flow_parsed(arg0);
5601 /** Global parser instance (cmdline API). */
5602 cmdline_parse_inst_t cmd_flow = {
5604 .data = NULL, /**< Unused. */
5605 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
5608 }, /**< Tokens are returned by cmd_flow_tok(). */
5611 /** set cmd facility. Reuse cmd flow's infrastructure as much as possible. */
5614 update_fields(uint8_t *buf, struct rte_flow_item *item, uint16_t next_proto)
5616 struct rte_flow_item_ipv4 *ipv4;
5617 struct rte_flow_item_eth *eth;
5618 struct rte_flow_item_ipv6 *ipv6;
5619 struct rte_flow_item_vxlan *vxlan;
5620 struct rte_flow_item_vxlan_gpe *gpe;
5621 struct rte_flow_item_nvgre *nvgre;
5622 uint32_t ipv6_vtc_flow;
5624 switch (item->type) {
5625 case RTE_FLOW_ITEM_TYPE_ETH:
5626 eth = (struct rte_flow_item_eth *)buf;
5628 eth->type = rte_cpu_to_be_16(next_proto);
5630 case RTE_FLOW_ITEM_TYPE_IPV4:
5631 ipv4 = (struct rte_flow_item_ipv4 *)buf;
5632 ipv4->hdr.version_ihl = 0x45;
5633 ipv4->hdr.next_proto_id = (uint8_t)next_proto;
5635 case RTE_FLOW_ITEM_TYPE_IPV6:
5636 ipv6 = (struct rte_flow_item_ipv6 *)buf;
5637 ipv6->hdr.proto = (uint8_t)next_proto;
5638 ipv6_vtc_flow = rte_be_to_cpu_32(ipv6->hdr.vtc_flow);
5639 ipv6_vtc_flow &= 0x0FFFFFFF; /*< reset version bits. */
5640 ipv6_vtc_flow |= 0x60000000; /*< set ipv6 version. */
5641 ipv6->hdr.vtc_flow = rte_cpu_to_be_32(ipv6_vtc_flow);
5643 case RTE_FLOW_ITEM_TYPE_VXLAN:
5644 vxlan = (struct rte_flow_item_vxlan *)buf;
5645 vxlan->flags = 0x08;
5647 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5648 gpe = (struct rte_flow_item_vxlan_gpe *)buf;
5651 case RTE_FLOW_ITEM_TYPE_NVGRE:
5652 nvgre = (struct rte_flow_item_nvgre *)buf;
5653 nvgre->protocol = rte_cpu_to_be_16(0x6558);
5654 nvgre->c_k_s_rsvd0_ver = rte_cpu_to_be_16(0x2000);
5661 /** Helper of get item's default mask. */
5663 flow_item_default_mask(const struct rte_flow_item *item)
5665 const void *mask = NULL;
5667 switch (item->type) {
5668 case RTE_FLOW_ITEM_TYPE_ANY:
5669 mask = &rte_flow_item_any_mask;
5671 case RTE_FLOW_ITEM_TYPE_VF:
5672 mask = &rte_flow_item_vf_mask;
5674 case RTE_FLOW_ITEM_TYPE_PORT_ID:
5675 mask = &rte_flow_item_port_id_mask;
5677 case RTE_FLOW_ITEM_TYPE_RAW:
5678 mask = &rte_flow_item_raw_mask;
5680 case RTE_FLOW_ITEM_TYPE_ETH:
5681 mask = &rte_flow_item_eth_mask;
5683 case RTE_FLOW_ITEM_TYPE_VLAN:
5684 mask = &rte_flow_item_vlan_mask;
5686 case RTE_FLOW_ITEM_TYPE_IPV4:
5687 mask = &rte_flow_item_ipv4_mask;
5689 case RTE_FLOW_ITEM_TYPE_IPV6:
5690 mask = &rte_flow_item_ipv6_mask;
5692 case RTE_FLOW_ITEM_TYPE_ICMP:
5693 mask = &rte_flow_item_icmp_mask;
5695 case RTE_FLOW_ITEM_TYPE_UDP:
5696 mask = &rte_flow_item_udp_mask;
5698 case RTE_FLOW_ITEM_TYPE_TCP:
5699 mask = &rte_flow_item_tcp_mask;
5701 case RTE_FLOW_ITEM_TYPE_SCTP:
5702 mask = &rte_flow_item_sctp_mask;
5704 case RTE_FLOW_ITEM_TYPE_VXLAN:
5705 mask = &rte_flow_item_vxlan_mask;
5707 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5708 mask = &rte_flow_item_vxlan_gpe_mask;
5710 case RTE_FLOW_ITEM_TYPE_E_TAG:
5711 mask = &rte_flow_item_e_tag_mask;
5713 case RTE_FLOW_ITEM_TYPE_NVGRE:
5714 mask = &rte_flow_item_nvgre_mask;
5716 case RTE_FLOW_ITEM_TYPE_MPLS:
5717 mask = &rte_flow_item_mpls_mask;
5719 case RTE_FLOW_ITEM_TYPE_GRE:
5720 mask = &rte_flow_item_gre_mask;
5722 case RTE_FLOW_ITEM_TYPE_META:
5723 mask = &rte_flow_item_meta_mask;
5725 case RTE_FLOW_ITEM_TYPE_FUZZY:
5726 mask = &rte_flow_item_fuzzy_mask;
5728 case RTE_FLOW_ITEM_TYPE_GTP:
5729 mask = &rte_flow_item_gtp_mask;
5731 case RTE_FLOW_ITEM_TYPE_ESP:
5732 mask = &rte_flow_item_esp_mask;
5742 /** Dispatch parsed buffer to function calls. */
5744 cmd_set_raw_parsed(const struct buffer *in)
5746 uint32_t n = in->args.vc.pattern_n;
5748 struct rte_flow_item *item = NULL;
5750 uint8_t *data = NULL;
5751 uint8_t *data_tail = NULL;
5752 size_t *total_size = NULL;
5753 uint16_t upper_layer = 0;
5756 RTE_ASSERT(in->command == SET_RAW_ENCAP ||
5757 in->command == SET_RAW_DECAP);
5758 if (in->command == SET_RAW_ENCAP) {
5759 total_size = &raw_encap_conf.size;
5760 data = (uint8_t *)&raw_encap_conf.data;
5762 total_size = &raw_decap_conf.size;
5763 data = (uint8_t *)&raw_decap_conf.data;
5766 memset(data, 0x00, ACTION_RAW_ENCAP_MAX_DATA);
5767 /* process hdr from upper layer to low layer (L3/L4 -> L2). */
5768 data_tail = data + ACTION_RAW_ENCAP_MAX_DATA;
5769 for (i = n - 1 ; i >= 0; --i) {
5770 item = in->args.vc.pattern + i;
5771 if (item->spec == NULL)
5772 item->spec = flow_item_default_mask(item);
5773 switch (item->type) {
5774 case RTE_FLOW_ITEM_TYPE_ETH:
5775 size = sizeof(struct rte_flow_item_eth);
5777 case RTE_FLOW_ITEM_TYPE_VLAN:
5778 size = sizeof(struct rte_flow_item_vlan);
5779 proto = RTE_ETHER_TYPE_VLAN;
5781 case RTE_FLOW_ITEM_TYPE_IPV4:
5782 size = sizeof(struct rte_flow_item_ipv4);
5783 proto = RTE_ETHER_TYPE_IPV4;
5785 case RTE_FLOW_ITEM_TYPE_IPV6:
5786 size = sizeof(struct rte_flow_item_ipv6);
5787 proto = RTE_ETHER_TYPE_IPV6;
5789 case RTE_FLOW_ITEM_TYPE_UDP:
5790 size = sizeof(struct rte_flow_item_udp);
5793 case RTE_FLOW_ITEM_TYPE_TCP:
5794 size = sizeof(struct rte_flow_item_tcp);
5797 case RTE_FLOW_ITEM_TYPE_VXLAN:
5798 size = sizeof(struct rte_flow_item_vxlan);
5800 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5801 size = sizeof(struct rte_flow_item_vxlan_gpe);
5803 case RTE_FLOW_ITEM_TYPE_GRE:
5804 size = sizeof(struct rte_flow_item_gre);
5807 case RTE_FLOW_ITEM_TYPE_MPLS:
5808 size = sizeof(struct rte_flow_item_mpls);
5810 case RTE_FLOW_ITEM_TYPE_NVGRE:
5811 size = sizeof(struct rte_flow_item_nvgre);
5815 printf("Error - Not supported item\n");
5817 memset(data, 0x00, ACTION_RAW_ENCAP_MAX_DATA);
5820 *total_size += size;
5821 rte_memcpy(data_tail - (*total_size), item->spec, size);
5822 /* update some fields which cannot be set by cmdline */
5823 update_fields((data_tail - (*total_size)), item,
5825 upper_layer = proto;
5827 if (verbose_level & 0x1)
5828 printf("total data size is %zu\n", (*total_size));
5829 RTE_ASSERT((*total_size) <= ACTION_RAW_ENCAP_MAX_DATA);
5832 /** Populate help strings for current token (cmdline API). */
5834 cmd_set_raw_get_help(cmdline_parse_token_hdr_t *hdr, char *dst,
5837 struct context *ctx = &cmd_flow_context;
5838 const struct token *token = &token_list[ctx->prev];
5843 /* Set token type and update global help with details. */
5844 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
5846 cmd_set_raw.help_str = token->help;
5848 cmd_set_raw.help_str = token->name;
5852 /** Token definition template (cmdline API). */
5853 static struct cmdline_token_hdr cmd_set_raw_token_hdr = {
5854 .ops = &(struct cmdline_token_ops){
5855 .parse = cmd_flow_parse,
5856 .complete_get_nb = cmd_flow_complete_get_nb,
5857 .complete_get_elt = cmd_flow_complete_get_elt,
5858 .get_help = cmd_set_raw_get_help,
5863 /** Populate the next dynamic token. */
5865 cmd_set_raw_tok(cmdline_parse_token_hdr_t **hdr,
5866 cmdline_parse_token_hdr_t **hdr_inst)
5868 struct context *ctx = &cmd_flow_context;
5870 /* Always reinitialize context before requesting the first token. */
5871 if (!(hdr_inst - cmd_set_raw.tokens)) {
5872 cmd_flow_context_init(ctx);
5873 ctx->curr = START_SET;
5875 /* Return NULL when no more tokens are expected. */
5876 if (!ctx->next_num && (ctx->curr != START_SET)) {
5880 /* Determine if command should end here. */
5881 if (ctx->eol && ctx->last && ctx->next_num) {
5882 const enum index *list = ctx->next[ctx->next_num - 1];
5885 for (i = 0; list[i]; ++i) {
5892 *hdr = &cmd_set_raw_token_hdr;
5895 /** Token generator and output processing callback (cmdline API). */
5897 cmd_set_raw_cb(void *arg0, struct cmdline *cl, void *arg2)
5900 cmd_set_raw_tok(arg0, arg2);
5902 cmd_set_raw_parsed(arg0);
5905 /** Global parser instance (cmdline API). */
5906 cmdline_parse_inst_t cmd_set_raw = {
5907 .f = cmd_set_raw_cb,
5908 .data = NULL, /**< Unused. */
5909 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
5912 }, /**< Tokens are returned by cmd_flow_tok(). */