1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
13 #include <arpa/inet.h>
14 #include <sys/socket.h>
16 #include <rte_string_fns.h>
17 #include <rte_common.h>
18 #include <rte_ethdev.h>
19 #include <rte_byteorder.h>
20 #include <cmdline_parse.h>
25 /** Parser token indices. */
48 /* Top-level command. */
50 /* Sub-leve commands. */
54 /* Top-level command. */
56 /* Sub-level commands. */
65 /* Destroy arguments. */
68 /* Query arguments. */
74 /* Validate/create arguments. */
81 /* Validate/create pattern. */
118 ITEM_VLAN_INNER_TYPE,
150 ITEM_E_TAG_GRP_ECID_B,
159 ITEM_GRE_C_RSVD0_VER,
175 ITEM_ARP_ETH_IPV4_SHA,
176 ITEM_ARP_ETH_IPV4_SPA,
177 ITEM_ARP_ETH_IPV4_THA,
178 ITEM_ARP_ETH_IPV4_TPA,
180 ITEM_IPV6_EXT_NEXT_HDR,
185 ITEM_ICMP6_ND_NS_TARGET_ADDR,
187 ITEM_ICMP6_ND_NA_TARGET_ADDR,
189 ITEM_ICMP6_ND_OPT_TYPE,
190 ITEM_ICMP6_ND_OPT_SLA_ETH,
191 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
192 ITEM_ICMP6_ND_OPT_TLA_ETH,
193 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
199 /* Validate/create actions. */
219 ACTION_RSS_FUNC_DEFAULT,
220 ACTION_RSS_FUNC_TOEPLITZ,
221 ACTION_RSS_FUNC_SIMPLE_XOR,
233 ACTION_PHY_PORT_ORIGINAL,
234 ACTION_PHY_PORT_INDEX,
236 ACTION_PORT_ID_ORIGINAL,
240 ACTION_OF_SET_MPLS_TTL,
241 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
242 ACTION_OF_DEC_MPLS_TTL,
243 ACTION_OF_SET_NW_TTL,
244 ACTION_OF_SET_NW_TTL_NW_TTL,
245 ACTION_OF_DEC_NW_TTL,
246 ACTION_OF_COPY_TTL_OUT,
247 ACTION_OF_COPY_TTL_IN,
250 ACTION_OF_PUSH_VLAN_ETHERTYPE,
251 ACTION_OF_SET_VLAN_VID,
252 ACTION_OF_SET_VLAN_VID_VLAN_VID,
253 ACTION_OF_SET_VLAN_PCP,
254 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
256 ACTION_OF_POP_MPLS_ETHERTYPE,
258 ACTION_OF_PUSH_MPLS_ETHERTYPE,
265 ACTION_MPLSOGRE_ENCAP,
266 ACTION_MPLSOGRE_DECAP,
267 ACTION_MPLSOUDP_ENCAP,
268 ACTION_MPLSOUDP_DECAP,
270 ACTION_SET_IPV4_SRC_IPV4_SRC,
272 ACTION_SET_IPV4_DST_IPV4_DST,
274 ACTION_SET_IPV6_SRC_IPV6_SRC,
276 ACTION_SET_IPV6_DST_IPV6_DST,
278 ACTION_SET_TP_SRC_TP_SRC,
280 ACTION_SET_TP_DST_TP_DST,
286 ACTION_SET_MAC_SRC_MAC_SRC,
288 ACTION_SET_MAC_DST_MAC_DST,
290 ACTION_INC_TCP_SEQ_VALUE,
292 ACTION_DEC_TCP_SEQ_VALUE,
294 ACTION_INC_TCP_ACK_VALUE,
296 ACTION_DEC_TCP_ACK_VALUE,
301 /** Maximum size for pattern in struct rte_flow_item_raw. */
302 #define ITEM_RAW_PATTERN_SIZE 40
304 /** Storage size for struct rte_flow_item_raw including pattern. */
305 #define ITEM_RAW_SIZE \
306 (sizeof(struct rte_flow_item_raw) + ITEM_RAW_PATTERN_SIZE)
308 /** Maximum number of queue indices in struct rte_flow_action_rss. */
309 #define ACTION_RSS_QUEUE_NUM 32
311 /** Storage for struct rte_flow_action_rss including external data. */
312 struct action_rss_data {
313 struct rte_flow_action_rss conf;
314 uint8_t key[RSS_HASH_KEY_LENGTH];
315 uint16_t queue[ACTION_RSS_QUEUE_NUM];
318 /** Maximum number of items in struct rte_flow_action_vxlan_encap. */
319 #define ACTION_VXLAN_ENCAP_ITEMS_NUM 6
321 #define ACTION_RAW_ENCAP_MAX_DATA 128
323 /** Storage for struct rte_flow_action_raw_encap. */
324 struct raw_encap_conf {
325 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
326 uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA];
330 struct raw_encap_conf raw_encap_conf = {.size = 0};
332 /** Storage for struct rte_flow_action_raw_decap. */
333 struct raw_decap_conf {
334 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
338 struct raw_decap_conf raw_decap_conf = {.size = 0};
340 /** Storage for struct rte_flow_action_vxlan_encap including external data. */
341 struct action_vxlan_encap_data {
342 struct rte_flow_action_vxlan_encap conf;
343 struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM];
344 struct rte_flow_item_eth item_eth;
345 struct rte_flow_item_vlan item_vlan;
347 struct rte_flow_item_ipv4 item_ipv4;
348 struct rte_flow_item_ipv6 item_ipv6;
350 struct rte_flow_item_udp item_udp;
351 struct rte_flow_item_vxlan item_vxlan;
354 /** Maximum number of items in struct rte_flow_action_nvgre_encap. */
355 #define ACTION_NVGRE_ENCAP_ITEMS_NUM 5
357 /** Storage for struct rte_flow_action_nvgre_encap including external data. */
358 struct action_nvgre_encap_data {
359 struct rte_flow_action_nvgre_encap conf;
360 struct rte_flow_item items[ACTION_NVGRE_ENCAP_ITEMS_NUM];
361 struct rte_flow_item_eth item_eth;
362 struct rte_flow_item_vlan item_vlan;
364 struct rte_flow_item_ipv4 item_ipv4;
365 struct rte_flow_item_ipv6 item_ipv6;
367 struct rte_flow_item_nvgre item_nvgre;
370 /** Maximum data size in struct rte_flow_action_raw_encap. */
371 #define ACTION_RAW_ENCAP_MAX_DATA 128
373 /** Storage for struct rte_flow_action_raw_encap including external data. */
374 struct action_raw_encap_data {
375 struct rte_flow_action_raw_encap conf;
376 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
377 uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA];
380 /** Storage for struct rte_flow_action_raw_decap including external data. */
381 struct action_raw_decap_data {
382 struct rte_flow_action_raw_decap conf;
383 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
386 /** Maximum number of subsequent tokens and arguments on the stack. */
387 #define CTX_STACK_SIZE 16
389 /** Parser context. */
391 /** Stack of subsequent token lists to process. */
392 const enum index *next[CTX_STACK_SIZE];
393 /** Arguments for stacked tokens. */
394 const void *args[CTX_STACK_SIZE];
395 enum index curr; /**< Current token index. */
396 enum index prev; /**< Index of the last token seen. */
397 int next_num; /**< Number of entries in next[]. */
398 int args_num; /**< Number of entries in args[]. */
399 uint32_t eol:1; /**< EOL has been detected. */
400 uint32_t last:1; /**< No more arguments. */
401 portid_t port; /**< Current port ID (for completions). */
402 uint32_t objdata; /**< Object-specific data. */
403 void *object; /**< Address of current object for relative offsets. */
404 void *objmask; /**< Object a full mask must be written to. */
407 /** Token argument. */
409 uint32_t hton:1; /**< Use network byte ordering. */
410 uint32_t sign:1; /**< Value is signed. */
411 uint32_t bounded:1; /**< Value is bounded. */
412 uintmax_t min; /**< Minimum value if bounded. */
413 uintmax_t max; /**< Maximum value if bounded. */
414 uint32_t offset; /**< Relative offset from ctx->object. */
415 uint32_t size; /**< Field size. */
416 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
419 /** Parser token definition. */
421 /** Type displayed during completion (defaults to "TOKEN"). */
423 /** Help displayed during completion (defaults to token name). */
425 /** Private data used by parser functions. */
428 * Lists of subsequent tokens to push on the stack. Each call to the
429 * parser consumes the last entry of that stack.
431 const enum index *const *next;
432 /** Arguments stack for subsequent tokens that need them. */
433 const struct arg *const *args;
435 * Token-processing callback, returns -1 in case of error, the
436 * length of the matched string otherwise. If NULL, attempts to
437 * match the token name.
439 * If buf is not NULL, the result should be stored in it according
440 * to context. An error is returned if not large enough.
442 int (*call)(struct context *ctx, const struct token *token,
443 const char *str, unsigned int len,
444 void *buf, unsigned int size);
446 * Callback that provides possible values for this token, used for
447 * completion. Returns -1 in case of error, the number of possible
448 * values otherwise. If NULL, the token name is used.
450 * If buf is not NULL, entry index ent is written to buf and the
451 * full length of the entry is returned (same behavior as
454 int (*comp)(struct context *ctx, const struct token *token,
455 unsigned int ent, char *buf, unsigned int size);
456 /** Mandatory token name, no default value. */
460 /** Static initializer for the next field. */
461 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
463 /** Static initializer for a NEXT() entry. */
464 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
466 /** Static initializer for the args field. */
467 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
469 /** Static initializer for ARGS() to target a field. */
470 #define ARGS_ENTRY(s, f) \
471 (&(const struct arg){ \
472 .offset = offsetof(s, f), \
473 .size = sizeof(((s *)0)->f), \
476 /** Static initializer for ARGS() to target a bit-field. */
477 #define ARGS_ENTRY_BF(s, f, b) \
478 (&(const struct arg){ \
480 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
483 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
484 #define ARGS_ENTRY_MASK(s, f, m) \
485 (&(const struct arg){ \
486 .offset = offsetof(s, f), \
487 .size = sizeof(((s *)0)->f), \
488 .mask = (const void *)(m), \
491 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
492 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
493 (&(const struct arg){ \
495 .offset = offsetof(s, f), \
496 .size = sizeof(((s *)0)->f), \
497 .mask = (const void *)(m), \
500 /** Static initializer for ARGS() to target a pointer. */
501 #define ARGS_ENTRY_PTR(s, f) \
502 (&(const struct arg){ \
503 .size = sizeof(*((s *)0)->f), \
506 /** Static initializer for ARGS() with arbitrary offset and size. */
507 #define ARGS_ENTRY_ARB(o, s) \
508 (&(const struct arg){ \
513 /** Same as ARGS_ENTRY_ARB() with bounded values. */
514 #define ARGS_ENTRY_ARB_BOUNDED(o, s, i, a) \
515 (&(const struct arg){ \
523 /** Same as ARGS_ENTRY() using network byte ordering. */
524 #define ARGS_ENTRY_HTON(s, f) \
525 (&(const struct arg){ \
527 .offset = offsetof(s, f), \
528 .size = sizeof(((s *)0)->f), \
531 /** Same as ARGS_ENTRY_HTON() for a single argument, without structure. */
532 #define ARG_ENTRY_HTON(s) \
533 (&(const struct arg){ \
539 /** Parser output buffer layout expected by cmd_flow_parsed(). */
541 enum index command; /**< Flow command. */
542 portid_t port; /**< Affected port ID. */
545 struct rte_flow_attr attr;
546 struct rte_flow_item *pattern;
547 struct rte_flow_action *actions;
551 } vc; /**< Validate/create arguments. */
555 } destroy; /**< Destroy arguments. */
558 struct rte_flow_action action;
559 } query; /**< Query arguments. */
563 } list; /**< List arguments. */
566 } isolate; /**< Isolated mode arguments. */
567 } args; /**< Command arguments. */
570 /** Private data for pattern items. */
571 struct parse_item_priv {
572 enum rte_flow_item_type type; /**< Item type. */
573 uint32_t size; /**< Size of item specification structure. */
576 #define PRIV_ITEM(t, s) \
577 (&(const struct parse_item_priv){ \
578 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
582 /** Private data for actions. */
583 struct parse_action_priv {
584 enum rte_flow_action_type type; /**< Action type. */
585 uint32_t size; /**< Size of action configuration structure. */
588 #define PRIV_ACTION(t, s) \
589 (&(const struct parse_action_priv){ \
590 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
594 static const enum index next_vc_attr[] = {
604 static const enum index next_destroy_attr[] = {
610 static const enum index next_list_attr[] = {
616 static const enum index item_param[] = {
625 static const enum index next_item[] = {
661 ITEM_ICMP6_ND_OPT_SLA_ETH,
662 ITEM_ICMP6_ND_OPT_TLA_ETH,
669 static const enum index item_fuzzy[] = {
675 static const enum index item_any[] = {
681 static const enum index item_vf[] = {
687 static const enum index item_phy_port[] = {
693 static const enum index item_port_id[] = {
699 static const enum index item_mark[] = {
705 static const enum index item_raw[] = {
715 static const enum index item_eth[] = {
723 static const enum index item_vlan[] = {
728 ITEM_VLAN_INNER_TYPE,
733 static const enum index item_ipv4[] = {
743 static const enum index item_ipv6[] = {
754 static const enum index item_icmp[] = {
761 static const enum index item_udp[] = {
768 static const enum index item_tcp[] = {
776 static const enum index item_sctp[] = {
785 static const enum index item_vxlan[] = {
791 static const enum index item_e_tag[] = {
792 ITEM_E_TAG_GRP_ECID_B,
797 static const enum index item_nvgre[] = {
803 static const enum index item_mpls[] = {
811 static const enum index item_gre[] = {
813 ITEM_GRE_C_RSVD0_VER,
821 static const enum index item_gre_key[] = {
827 static const enum index item_gtp[] = {
833 static const enum index item_geneve[] = {
840 static const enum index item_vxlan_gpe[] = {
846 static const enum index item_arp_eth_ipv4[] = {
847 ITEM_ARP_ETH_IPV4_SHA,
848 ITEM_ARP_ETH_IPV4_SPA,
849 ITEM_ARP_ETH_IPV4_THA,
850 ITEM_ARP_ETH_IPV4_TPA,
855 static const enum index item_ipv6_ext[] = {
856 ITEM_IPV6_EXT_NEXT_HDR,
861 static const enum index item_icmp6[] = {
868 static const enum index item_icmp6_nd_ns[] = {
869 ITEM_ICMP6_ND_NS_TARGET_ADDR,
874 static const enum index item_icmp6_nd_na[] = {
875 ITEM_ICMP6_ND_NA_TARGET_ADDR,
880 static const enum index item_icmp6_nd_opt[] = {
881 ITEM_ICMP6_ND_OPT_TYPE,
886 static const enum index item_icmp6_nd_opt_sla_eth[] = {
887 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
892 static const enum index item_icmp6_nd_opt_tla_eth[] = {
893 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
898 static const enum index item_meta[] = {
904 static const enum index next_action[] = {
920 ACTION_OF_SET_MPLS_TTL,
921 ACTION_OF_DEC_MPLS_TTL,
922 ACTION_OF_SET_NW_TTL,
923 ACTION_OF_DEC_NW_TTL,
924 ACTION_OF_COPY_TTL_OUT,
925 ACTION_OF_COPY_TTL_IN,
928 ACTION_OF_SET_VLAN_VID,
929 ACTION_OF_SET_VLAN_PCP,
938 ACTION_MPLSOGRE_ENCAP,
939 ACTION_MPLSOGRE_DECAP,
940 ACTION_MPLSOUDP_ENCAP,
941 ACTION_MPLSOUDP_DECAP,
962 static const enum index action_mark[] = {
968 static const enum index action_queue[] = {
974 static const enum index action_count[] = {
981 static const enum index action_rss[] = {
992 static const enum index action_vf[] = {
999 static const enum index action_phy_port[] = {
1000 ACTION_PHY_PORT_ORIGINAL,
1001 ACTION_PHY_PORT_INDEX,
1006 static const enum index action_port_id[] = {
1007 ACTION_PORT_ID_ORIGINAL,
1013 static const enum index action_meter[] = {
1019 static const enum index action_of_set_mpls_ttl[] = {
1020 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
1025 static const enum index action_of_set_nw_ttl[] = {
1026 ACTION_OF_SET_NW_TTL_NW_TTL,
1031 static const enum index action_of_push_vlan[] = {
1032 ACTION_OF_PUSH_VLAN_ETHERTYPE,
1037 static const enum index action_of_set_vlan_vid[] = {
1038 ACTION_OF_SET_VLAN_VID_VLAN_VID,
1043 static const enum index action_of_set_vlan_pcp[] = {
1044 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
1049 static const enum index action_of_pop_mpls[] = {
1050 ACTION_OF_POP_MPLS_ETHERTYPE,
1055 static const enum index action_of_push_mpls[] = {
1056 ACTION_OF_PUSH_MPLS_ETHERTYPE,
1061 static const enum index action_set_ipv4_src[] = {
1062 ACTION_SET_IPV4_SRC_IPV4_SRC,
1067 static const enum index action_set_mac_src[] = {
1068 ACTION_SET_MAC_SRC_MAC_SRC,
1073 static const enum index action_set_ipv4_dst[] = {
1074 ACTION_SET_IPV4_DST_IPV4_DST,
1079 static const enum index action_set_ipv6_src[] = {
1080 ACTION_SET_IPV6_SRC_IPV6_SRC,
1085 static const enum index action_set_ipv6_dst[] = {
1086 ACTION_SET_IPV6_DST_IPV6_DST,
1091 static const enum index action_set_tp_src[] = {
1092 ACTION_SET_TP_SRC_TP_SRC,
1097 static const enum index action_set_tp_dst[] = {
1098 ACTION_SET_TP_DST_TP_DST,
1103 static const enum index action_set_ttl[] = {
1109 static const enum index action_jump[] = {
1115 static const enum index action_set_mac_dst[] = {
1116 ACTION_SET_MAC_DST_MAC_DST,
1121 static const enum index action_inc_tcp_seq[] = {
1122 ACTION_INC_TCP_SEQ_VALUE,
1127 static const enum index action_dec_tcp_seq[] = {
1128 ACTION_DEC_TCP_SEQ_VALUE,
1133 static const enum index action_inc_tcp_ack[] = {
1134 ACTION_INC_TCP_ACK_VALUE,
1139 static const enum index action_dec_tcp_ack[] = {
1140 ACTION_DEC_TCP_ACK_VALUE,
1145 static int parse_set_raw_encap_decap(struct context *, const struct token *,
1146 const char *, unsigned int,
1147 void *, unsigned int);
1148 static int parse_set_init(struct context *, const struct token *,
1149 const char *, unsigned int,
1150 void *, unsigned int);
1151 static int parse_init(struct context *, const struct token *,
1152 const char *, unsigned int,
1153 void *, unsigned int);
1154 static int parse_vc(struct context *, const struct token *,
1155 const char *, unsigned int,
1156 void *, unsigned int);
1157 static int parse_vc_spec(struct context *, const struct token *,
1158 const char *, unsigned int, void *, unsigned int);
1159 static int parse_vc_conf(struct context *, const struct token *,
1160 const char *, unsigned int, void *, unsigned int);
1161 static int parse_vc_action_rss(struct context *, const struct token *,
1162 const char *, unsigned int, void *,
1164 static int parse_vc_action_rss_func(struct context *, const struct token *,
1165 const char *, unsigned int, void *,
1167 static int parse_vc_action_rss_type(struct context *, const struct token *,
1168 const char *, unsigned int, void *,
1170 static int parse_vc_action_rss_queue(struct context *, const struct token *,
1171 const char *, unsigned int, void *,
1173 static int parse_vc_action_vxlan_encap(struct context *, const struct token *,
1174 const char *, unsigned int, void *,
1176 static int parse_vc_action_nvgre_encap(struct context *, const struct token *,
1177 const char *, unsigned int, void *,
1179 static int parse_vc_action_l2_encap(struct context *, const struct token *,
1180 const char *, unsigned int, void *,
1182 static int parse_vc_action_l2_decap(struct context *, const struct token *,
1183 const char *, unsigned int, void *,
1185 static int parse_vc_action_mplsogre_encap(struct context *,
1186 const struct token *, const char *,
1187 unsigned int, void *, unsigned int);
1188 static int parse_vc_action_mplsogre_decap(struct context *,
1189 const struct token *, const char *,
1190 unsigned int, void *, unsigned int);
1191 static int parse_vc_action_mplsoudp_encap(struct context *,
1192 const struct token *, const char *,
1193 unsigned int, void *, unsigned int);
1194 static int parse_vc_action_mplsoudp_decap(struct context *,
1195 const struct token *, const char *,
1196 unsigned int, void *, unsigned int);
1197 static int parse_vc_action_raw_encap(struct context *,
1198 const struct token *, const char *,
1199 unsigned int, void *, unsigned int);
1200 static int parse_vc_action_raw_decap(struct context *,
1201 const struct token *, const char *,
1202 unsigned int, void *, unsigned int);
1203 static int parse_destroy(struct context *, const struct token *,
1204 const char *, unsigned int,
1205 void *, unsigned int);
1206 static int parse_flush(struct context *, const struct token *,
1207 const char *, unsigned int,
1208 void *, unsigned int);
1209 static int parse_query(struct context *, const struct token *,
1210 const char *, unsigned int,
1211 void *, unsigned int);
1212 static int parse_action(struct context *, const struct token *,
1213 const char *, unsigned int,
1214 void *, unsigned int);
1215 static int parse_list(struct context *, const struct token *,
1216 const char *, unsigned int,
1217 void *, unsigned int);
1218 static int parse_isolate(struct context *, const struct token *,
1219 const char *, unsigned int,
1220 void *, unsigned int);
1221 static int parse_int(struct context *, const struct token *,
1222 const char *, unsigned int,
1223 void *, unsigned int);
1224 static int parse_prefix(struct context *, const struct token *,
1225 const char *, unsigned int,
1226 void *, unsigned int);
1227 static int parse_boolean(struct context *, const struct token *,
1228 const char *, unsigned int,
1229 void *, unsigned int);
1230 static int parse_string(struct context *, const struct token *,
1231 const char *, unsigned int,
1232 void *, unsigned int);
1233 static int parse_hex(struct context *ctx, const struct token *token,
1234 const char *str, unsigned int len,
1235 void *buf, unsigned int size);
1236 static int parse_mac_addr(struct context *, const struct token *,
1237 const char *, unsigned int,
1238 void *, unsigned int);
1239 static int parse_ipv4_addr(struct context *, const struct token *,
1240 const char *, unsigned int,
1241 void *, unsigned int);
1242 static int parse_ipv6_addr(struct context *, const struct token *,
1243 const char *, unsigned int,
1244 void *, unsigned int);
1245 static int parse_port(struct context *, const struct token *,
1246 const char *, unsigned int,
1247 void *, unsigned int);
1248 static int comp_none(struct context *, const struct token *,
1249 unsigned int, char *, unsigned int);
1250 static int comp_boolean(struct context *, const struct token *,
1251 unsigned int, char *, unsigned int);
1252 static int comp_action(struct context *, const struct token *,
1253 unsigned int, char *, unsigned int);
1254 static int comp_port(struct context *, const struct token *,
1255 unsigned int, char *, unsigned int);
1256 static int comp_rule_id(struct context *, const struct token *,
1257 unsigned int, char *, unsigned int);
1258 static int comp_vc_action_rss_type(struct context *, const struct token *,
1259 unsigned int, char *, unsigned int);
1260 static int comp_vc_action_rss_queue(struct context *, const struct token *,
1261 unsigned int, char *, unsigned int);
1263 /** Token definitions. */
1264 static const struct token token_list[] = {
1265 /* Special tokens. */
1268 .help = "null entry, abused as the entry point",
1269 .next = NEXT(NEXT_ENTRY(FLOW)),
1274 .help = "command may end here",
1277 .name = "START_SET",
1278 .help = "null entry, abused as the entry point for set",
1279 .next = NEXT(NEXT_ENTRY(SET)),
1284 .help = "set command may end here",
1286 /* Common tokens. */
1290 .help = "integer value",
1295 .name = "{unsigned}",
1297 .help = "unsigned integer value",
1304 .help = "prefix length for bit-mask",
1305 .call = parse_prefix,
1309 .name = "{boolean}",
1311 .help = "any boolean value",
1312 .call = parse_boolean,
1313 .comp = comp_boolean,
1318 .help = "fixed string",
1319 .call = parse_string,
1325 .help = "fixed string",
1330 .name = "{MAC address}",
1332 .help = "standard MAC address notation",
1333 .call = parse_mac_addr,
1337 .name = "{IPv4 address}",
1338 .type = "IPV4 ADDRESS",
1339 .help = "standard IPv4 address notation",
1340 .call = parse_ipv4_addr,
1344 .name = "{IPv6 address}",
1345 .type = "IPV6 ADDRESS",
1346 .help = "standard IPv6 address notation",
1347 .call = parse_ipv6_addr,
1351 .name = "{rule id}",
1353 .help = "rule identifier",
1355 .comp = comp_rule_id,
1358 .name = "{port_id}",
1360 .help = "port identifier",
1365 .name = "{group_id}",
1367 .help = "group identifier",
1371 [PRIORITY_LEVEL] = {
1374 .help = "priority level",
1378 /* Top-level command. */
1381 .type = "{command} {port_id} [{arg} [...]]",
1382 .help = "manage ingress/egress flow rules",
1383 .next = NEXT(NEXT_ENTRY
1393 /* Sub-level commands. */
1396 .help = "check whether a flow rule can be created",
1397 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1398 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1403 .help = "create a flow rule",
1404 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1405 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1410 .help = "destroy specific flow rules",
1411 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
1412 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1413 .call = parse_destroy,
1417 .help = "destroy all flow rules",
1418 .next = NEXT(NEXT_ENTRY(PORT_ID)),
1419 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1420 .call = parse_flush,
1424 .help = "query an existing flow rule",
1425 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
1426 NEXT_ENTRY(RULE_ID),
1427 NEXT_ENTRY(PORT_ID)),
1428 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action.type),
1429 ARGS_ENTRY(struct buffer, args.query.rule),
1430 ARGS_ENTRY(struct buffer, port)),
1431 .call = parse_query,
1435 .help = "list existing flow rules",
1436 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
1437 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1442 .help = "restrict ingress traffic to the defined flow rules",
1443 .next = NEXT(NEXT_ENTRY(BOOLEAN),
1444 NEXT_ENTRY(PORT_ID)),
1445 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
1446 ARGS_ENTRY(struct buffer, port)),
1447 .call = parse_isolate,
1449 /* Destroy arguments. */
1452 .help = "specify a rule identifier",
1453 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
1454 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
1455 .call = parse_destroy,
1457 /* Query arguments. */
1461 .help = "action to query, must be part of the rule",
1462 .call = parse_action,
1463 .comp = comp_action,
1465 /* List arguments. */
1468 .help = "specify a group",
1469 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
1470 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
1473 /* Validate/create attributes. */
1476 .help = "specify a group",
1477 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
1478 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
1483 .help = "specify a priority level",
1484 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
1485 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
1490 .help = "affect rule to ingress",
1491 .next = NEXT(next_vc_attr),
1496 .help = "affect rule to egress",
1497 .next = NEXT(next_vc_attr),
1502 .help = "apply rule directly to endpoints found in pattern",
1503 .next = NEXT(next_vc_attr),
1506 /* Validate/create pattern. */
1509 .help = "submit a list of pattern items",
1510 .next = NEXT(next_item),
1515 .help = "match value perfectly (with full bit-mask)",
1516 .call = parse_vc_spec,
1518 [ITEM_PARAM_SPEC] = {
1520 .help = "match value according to configured bit-mask",
1521 .call = parse_vc_spec,
1523 [ITEM_PARAM_LAST] = {
1525 .help = "specify upper bound to establish a range",
1526 .call = parse_vc_spec,
1528 [ITEM_PARAM_MASK] = {
1530 .help = "specify bit-mask with relevant bits set to one",
1531 .call = parse_vc_spec,
1533 [ITEM_PARAM_PREFIX] = {
1535 .help = "generate bit-mask from a prefix length",
1536 .call = parse_vc_spec,
1540 .help = "specify next pattern item",
1541 .next = NEXT(next_item),
1545 .help = "end list of pattern items",
1546 .priv = PRIV_ITEM(END, 0),
1547 .next = NEXT(NEXT_ENTRY(ACTIONS)),
1552 .help = "no-op pattern item",
1553 .priv = PRIV_ITEM(VOID, 0),
1554 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1559 .help = "perform actions when pattern does not match",
1560 .priv = PRIV_ITEM(INVERT, 0),
1561 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1566 .help = "match any protocol for the current layer",
1567 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
1568 .next = NEXT(item_any),
1573 .help = "number of layers covered",
1574 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
1575 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
1579 .help = "match traffic from/to the physical function",
1580 .priv = PRIV_ITEM(PF, 0),
1581 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1586 .help = "match traffic from/to a virtual function ID",
1587 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
1588 .next = NEXT(item_vf),
1594 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
1595 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
1599 .help = "match traffic from/to a specific physical port",
1600 .priv = PRIV_ITEM(PHY_PORT,
1601 sizeof(struct rte_flow_item_phy_port)),
1602 .next = NEXT(item_phy_port),
1605 [ITEM_PHY_PORT_INDEX] = {
1607 .help = "physical port index",
1608 .next = NEXT(item_phy_port, NEXT_ENTRY(UNSIGNED), item_param),
1609 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_phy_port, index)),
1613 .help = "match traffic from/to a given DPDK port ID",
1614 .priv = PRIV_ITEM(PORT_ID,
1615 sizeof(struct rte_flow_item_port_id)),
1616 .next = NEXT(item_port_id),
1619 [ITEM_PORT_ID_ID] = {
1621 .help = "DPDK port ID",
1622 .next = NEXT(item_port_id, NEXT_ENTRY(UNSIGNED), item_param),
1623 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port_id, id)),
1627 .help = "match traffic against value set in previously matched rule",
1628 .priv = PRIV_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
1629 .next = NEXT(item_mark),
1634 .help = "Integer value to match against",
1635 .next = NEXT(item_mark, NEXT_ENTRY(UNSIGNED), item_param),
1636 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_mark, id)),
1640 .help = "match an arbitrary byte string",
1641 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1642 .next = NEXT(item_raw),
1645 [ITEM_RAW_RELATIVE] = {
1647 .help = "look for pattern after the previous item",
1648 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1649 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1652 [ITEM_RAW_SEARCH] = {
1654 .help = "search pattern from offset (see also limit)",
1655 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1656 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1659 [ITEM_RAW_OFFSET] = {
1661 .help = "absolute or relative offset for pattern",
1662 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1663 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1665 [ITEM_RAW_LIMIT] = {
1667 .help = "search area limit for start of pattern",
1668 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1669 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1671 [ITEM_RAW_PATTERN] = {
1673 .help = "byte string to look for",
1674 .next = NEXT(item_raw,
1676 NEXT_ENTRY(ITEM_PARAM_IS,
1679 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, pattern),
1680 ARGS_ENTRY(struct rte_flow_item_raw, length),
1681 ARGS_ENTRY_ARB(sizeof(struct rte_flow_item_raw),
1682 ITEM_RAW_PATTERN_SIZE)),
1686 .help = "match Ethernet header",
1687 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1688 .next = NEXT(item_eth),
1693 .help = "destination MAC",
1694 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1695 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1699 .help = "source MAC",
1700 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1701 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1705 .help = "EtherType",
1706 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1707 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1711 .help = "match 802.1Q/ad VLAN tag",
1712 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1713 .next = NEXT(item_vlan),
1718 .help = "tag control information",
1719 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1720 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1724 .help = "priority code point",
1725 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1726 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1731 .help = "drop eligible indicator",
1732 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1733 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1738 .help = "VLAN identifier",
1739 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1740 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1743 [ITEM_VLAN_INNER_TYPE] = {
1744 .name = "inner_type",
1745 .help = "inner EtherType",
1746 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1747 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan,
1752 .help = "match IPv4 header",
1753 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1754 .next = NEXT(item_ipv4),
1759 .help = "type of service",
1760 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1761 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1762 hdr.type_of_service)),
1766 .help = "time to live",
1767 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1768 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1771 [ITEM_IPV4_PROTO] = {
1773 .help = "next protocol ID",
1774 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1775 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1776 hdr.next_proto_id)),
1780 .help = "source address",
1781 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1782 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1787 .help = "destination address",
1788 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1789 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1794 .help = "match IPv6 header",
1795 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1796 .next = NEXT(item_ipv6),
1801 .help = "traffic class",
1802 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1803 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1805 "\x0f\xf0\x00\x00")),
1807 [ITEM_IPV6_FLOW] = {
1809 .help = "flow label",
1810 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1811 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1813 "\x00\x0f\xff\xff")),
1815 [ITEM_IPV6_PROTO] = {
1817 .help = "protocol (next header)",
1818 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1819 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1824 .help = "hop limit",
1825 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1826 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1831 .help = "source address",
1832 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1833 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1838 .help = "destination address",
1839 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1840 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1845 .help = "match ICMP header",
1846 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1847 .next = NEXT(item_icmp),
1850 [ITEM_ICMP_TYPE] = {
1852 .help = "ICMP packet type",
1853 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1854 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1857 [ITEM_ICMP_CODE] = {
1859 .help = "ICMP packet code",
1860 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1861 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1866 .help = "match UDP header",
1867 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1868 .next = NEXT(item_udp),
1873 .help = "UDP source port",
1874 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1875 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1880 .help = "UDP destination port",
1881 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1882 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1887 .help = "match TCP header",
1888 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1889 .next = NEXT(item_tcp),
1894 .help = "TCP source port",
1895 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1896 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1901 .help = "TCP destination port",
1902 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1903 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1906 [ITEM_TCP_FLAGS] = {
1908 .help = "TCP flags",
1909 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1910 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1915 .help = "match SCTP header",
1916 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1917 .next = NEXT(item_sctp),
1922 .help = "SCTP source port",
1923 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1924 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1929 .help = "SCTP destination port",
1930 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1931 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1936 .help = "validation tag",
1937 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1938 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1941 [ITEM_SCTP_CKSUM] = {
1944 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1945 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1950 .help = "match VXLAN header",
1951 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1952 .next = NEXT(item_vxlan),
1955 [ITEM_VXLAN_VNI] = {
1957 .help = "VXLAN identifier",
1958 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1959 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
1963 .help = "match E-Tag header",
1964 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
1965 .next = NEXT(item_e_tag),
1968 [ITEM_E_TAG_GRP_ECID_B] = {
1969 .name = "grp_ecid_b",
1970 .help = "GRP and E-CID base",
1971 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
1972 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
1978 .help = "match NVGRE header",
1979 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
1980 .next = NEXT(item_nvgre),
1983 [ITEM_NVGRE_TNI] = {
1985 .help = "virtual subnet ID",
1986 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
1987 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
1991 .help = "match MPLS header",
1992 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
1993 .next = NEXT(item_mpls),
1996 [ITEM_MPLS_LABEL] = {
1998 .help = "MPLS label",
1999 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2000 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2006 .help = "MPLS Traffic Class",
2007 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2008 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2014 .help = "MPLS Bottom-of-Stack",
2015 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2016 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2022 .help = "match GRE header",
2023 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
2024 .next = NEXT(item_gre),
2027 [ITEM_GRE_PROTO] = {
2029 .help = "GRE protocol type",
2030 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
2031 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
2034 [ITEM_GRE_C_RSVD0_VER] = {
2035 .name = "c_rsvd0_ver",
2037 "checksum (1b), undefined (1b), key bit (1b),"
2038 " sequence number (1b), reserved 0 (9b),"
2040 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
2041 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
2044 [ITEM_GRE_C_BIT] = {
2046 .help = "checksum bit (C)",
2047 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2048 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2050 "\x80\x00\x00\x00")),
2052 [ITEM_GRE_S_BIT] = {
2054 .help = "sequence number bit (S)",
2055 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2056 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2058 "\x10\x00\x00\x00")),
2060 [ITEM_GRE_K_BIT] = {
2062 .help = "key bit (K)",
2063 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2064 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2066 "\x20\x00\x00\x00")),
2070 .help = "fuzzy pattern match, expect faster than default",
2071 .priv = PRIV_ITEM(FUZZY,
2072 sizeof(struct rte_flow_item_fuzzy)),
2073 .next = NEXT(item_fuzzy),
2076 [ITEM_FUZZY_THRESH] = {
2078 .help = "match accuracy threshold",
2079 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
2080 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
2085 .help = "match GTP header",
2086 .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
2087 .next = NEXT(item_gtp),
2092 .help = "tunnel endpoint identifier",
2093 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
2094 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
2098 .help = "match GTP header",
2099 .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
2100 .next = NEXT(item_gtp),
2105 .help = "match GTP header",
2106 .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
2107 .next = NEXT(item_gtp),
2112 .help = "match GENEVE header",
2113 .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
2114 .next = NEXT(item_geneve),
2117 [ITEM_GENEVE_VNI] = {
2119 .help = "virtual network identifier",
2120 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
2121 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)),
2123 [ITEM_GENEVE_PROTO] = {
2125 .help = "GENEVE protocol type",
2126 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
2127 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve,
2130 [ITEM_VXLAN_GPE] = {
2131 .name = "vxlan-gpe",
2132 .help = "match VXLAN-GPE header",
2133 .priv = PRIV_ITEM(VXLAN_GPE,
2134 sizeof(struct rte_flow_item_vxlan_gpe)),
2135 .next = NEXT(item_vxlan_gpe),
2138 [ITEM_VXLAN_GPE_VNI] = {
2140 .help = "VXLAN-GPE identifier",
2141 .next = NEXT(item_vxlan_gpe, NEXT_ENTRY(UNSIGNED), item_param),
2142 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan_gpe,
2145 [ITEM_ARP_ETH_IPV4] = {
2146 .name = "arp_eth_ipv4",
2147 .help = "match ARP header for Ethernet/IPv4",
2148 .priv = PRIV_ITEM(ARP_ETH_IPV4,
2149 sizeof(struct rte_flow_item_arp_eth_ipv4)),
2150 .next = NEXT(item_arp_eth_ipv4),
2153 [ITEM_ARP_ETH_IPV4_SHA] = {
2155 .help = "sender hardware address",
2156 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
2158 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2161 [ITEM_ARP_ETH_IPV4_SPA] = {
2163 .help = "sender IPv4 address",
2164 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
2166 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2169 [ITEM_ARP_ETH_IPV4_THA] = {
2171 .help = "target hardware address",
2172 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
2174 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2177 [ITEM_ARP_ETH_IPV4_TPA] = {
2179 .help = "target IPv4 address",
2180 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
2182 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2187 .help = "match presence of any IPv6 extension header",
2188 .priv = PRIV_ITEM(IPV6_EXT,
2189 sizeof(struct rte_flow_item_ipv6_ext)),
2190 .next = NEXT(item_ipv6_ext),
2193 [ITEM_IPV6_EXT_NEXT_HDR] = {
2195 .help = "next header",
2196 .next = NEXT(item_ipv6_ext, NEXT_ENTRY(UNSIGNED), item_param),
2197 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6_ext,
2202 .help = "match any ICMPv6 header",
2203 .priv = PRIV_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
2204 .next = NEXT(item_icmp6),
2207 [ITEM_ICMP6_TYPE] = {
2209 .help = "ICMPv6 type",
2210 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2211 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2214 [ITEM_ICMP6_CODE] = {
2216 .help = "ICMPv6 code",
2217 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2218 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2221 [ITEM_ICMP6_ND_NS] = {
2222 .name = "icmp6_nd_ns",
2223 .help = "match ICMPv6 neighbor discovery solicitation",
2224 .priv = PRIV_ITEM(ICMP6_ND_NS,
2225 sizeof(struct rte_flow_item_icmp6_nd_ns)),
2226 .next = NEXT(item_icmp6_nd_ns),
2229 [ITEM_ICMP6_ND_NS_TARGET_ADDR] = {
2230 .name = "target_addr",
2231 .help = "target address",
2232 .next = NEXT(item_icmp6_nd_ns, NEXT_ENTRY(IPV6_ADDR),
2234 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_ns,
2237 [ITEM_ICMP6_ND_NA] = {
2238 .name = "icmp6_nd_na",
2239 .help = "match ICMPv6 neighbor discovery advertisement",
2240 .priv = PRIV_ITEM(ICMP6_ND_NA,
2241 sizeof(struct rte_flow_item_icmp6_nd_na)),
2242 .next = NEXT(item_icmp6_nd_na),
2245 [ITEM_ICMP6_ND_NA_TARGET_ADDR] = {
2246 .name = "target_addr",
2247 .help = "target address",
2248 .next = NEXT(item_icmp6_nd_na, NEXT_ENTRY(IPV6_ADDR),
2250 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_na,
2253 [ITEM_ICMP6_ND_OPT] = {
2254 .name = "icmp6_nd_opt",
2255 .help = "match presence of any ICMPv6 neighbor discovery"
2257 .priv = PRIV_ITEM(ICMP6_ND_OPT,
2258 sizeof(struct rte_flow_item_icmp6_nd_opt)),
2259 .next = NEXT(item_icmp6_nd_opt),
2262 [ITEM_ICMP6_ND_OPT_TYPE] = {
2264 .help = "ND option type",
2265 .next = NEXT(item_icmp6_nd_opt, NEXT_ENTRY(UNSIGNED),
2267 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_opt,
2270 [ITEM_ICMP6_ND_OPT_SLA_ETH] = {
2271 .name = "icmp6_nd_opt_sla_eth",
2272 .help = "match ICMPv6 neighbor discovery source Ethernet"
2273 " link-layer address option",
2275 (ICMP6_ND_OPT_SLA_ETH,
2276 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
2277 .next = NEXT(item_icmp6_nd_opt_sla_eth),
2280 [ITEM_ICMP6_ND_OPT_SLA_ETH_SLA] = {
2282 .help = "source Ethernet LLA",
2283 .next = NEXT(item_icmp6_nd_opt_sla_eth, NEXT_ENTRY(MAC_ADDR),
2285 .args = ARGS(ARGS_ENTRY_HTON
2286 (struct rte_flow_item_icmp6_nd_opt_sla_eth, sla)),
2288 [ITEM_ICMP6_ND_OPT_TLA_ETH] = {
2289 .name = "icmp6_nd_opt_tla_eth",
2290 .help = "match ICMPv6 neighbor discovery target Ethernet"
2291 " link-layer address option",
2293 (ICMP6_ND_OPT_TLA_ETH,
2294 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
2295 .next = NEXT(item_icmp6_nd_opt_tla_eth),
2298 [ITEM_ICMP6_ND_OPT_TLA_ETH_TLA] = {
2300 .help = "target Ethernet LLA",
2301 .next = NEXT(item_icmp6_nd_opt_tla_eth, NEXT_ENTRY(MAC_ADDR),
2303 .args = ARGS(ARGS_ENTRY_HTON
2304 (struct rte_flow_item_icmp6_nd_opt_tla_eth, tla)),
2308 .help = "match metadata header",
2309 .priv = PRIV_ITEM(META, sizeof(struct rte_flow_item_meta)),
2310 .next = NEXT(item_meta),
2313 [ITEM_META_DATA] = {
2315 .help = "metadata value",
2316 .next = NEXT(item_meta, NEXT_ENTRY(UNSIGNED), item_param),
2317 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_meta,
2318 data, "\xff\xff\xff\xff")),
2322 .help = "match GRE key",
2323 .priv = PRIV_ITEM(GRE_KEY, sizeof(rte_be32_t)),
2324 .next = NEXT(item_gre_key),
2327 [ITEM_GRE_KEY_VALUE] = {
2329 .help = "key value",
2330 .next = NEXT(item_gre_key, NEXT_ENTRY(UNSIGNED), item_param),
2331 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
2334 /* Validate/create actions. */
2337 .help = "submit a list of associated actions",
2338 .next = NEXT(next_action),
2343 .help = "specify next action",
2344 .next = NEXT(next_action),
2348 .help = "end list of actions",
2349 .priv = PRIV_ACTION(END, 0),
2354 .help = "no-op action",
2355 .priv = PRIV_ACTION(VOID, 0),
2356 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2359 [ACTION_PASSTHRU] = {
2361 .help = "let subsequent rule process matched packets",
2362 .priv = PRIV_ACTION(PASSTHRU, 0),
2363 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2368 .help = "redirect traffic to a given group",
2369 .priv = PRIV_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
2370 .next = NEXT(action_jump),
2373 [ACTION_JUMP_GROUP] = {
2375 .help = "group to redirect traffic to",
2376 .next = NEXT(action_jump, NEXT_ENTRY(UNSIGNED)),
2377 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_jump, group)),
2378 .call = parse_vc_conf,
2382 .help = "attach 32 bit value to packets",
2383 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
2384 .next = NEXT(action_mark),
2387 [ACTION_MARK_ID] = {
2389 .help = "32 bit value to return with packets",
2390 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
2391 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
2392 .call = parse_vc_conf,
2396 .help = "flag packets",
2397 .priv = PRIV_ACTION(FLAG, 0),
2398 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2403 .help = "assign packets to a given queue index",
2404 .priv = PRIV_ACTION(QUEUE,
2405 sizeof(struct rte_flow_action_queue)),
2406 .next = NEXT(action_queue),
2409 [ACTION_QUEUE_INDEX] = {
2411 .help = "queue index to use",
2412 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
2413 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
2414 .call = parse_vc_conf,
2418 .help = "drop packets (note: passthru has priority)",
2419 .priv = PRIV_ACTION(DROP, 0),
2420 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2425 .help = "enable counters for this rule",
2426 .priv = PRIV_ACTION(COUNT,
2427 sizeof(struct rte_flow_action_count)),
2428 .next = NEXT(action_count),
2431 [ACTION_COUNT_ID] = {
2432 .name = "identifier",
2433 .help = "counter identifier to use",
2434 .next = NEXT(action_count, NEXT_ENTRY(UNSIGNED)),
2435 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_count, id)),
2436 .call = parse_vc_conf,
2438 [ACTION_COUNT_SHARED] = {
2440 .help = "shared counter",
2441 .next = NEXT(action_count, NEXT_ENTRY(BOOLEAN)),
2442 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_count,
2444 .call = parse_vc_conf,
2448 .help = "spread packets among several queues",
2449 .priv = PRIV_ACTION(RSS, sizeof(struct action_rss_data)),
2450 .next = NEXT(action_rss),
2451 .call = parse_vc_action_rss,
2453 [ACTION_RSS_FUNC] = {
2455 .help = "RSS hash function to apply",
2456 .next = NEXT(action_rss,
2457 NEXT_ENTRY(ACTION_RSS_FUNC_DEFAULT,
2458 ACTION_RSS_FUNC_TOEPLITZ,
2459 ACTION_RSS_FUNC_SIMPLE_XOR)),
2461 [ACTION_RSS_FUNC_DEFAULT] = {
2463 .help = "default hash function",
2464 .call = parse_vc_action_rss_func,
2466 [ACTION_RSS_FUNC_TOEPLITZ] = {
2468 .help = "Toeplitz hash function",
2469 .call = parse_vc_action_rss_func,
2471 [ACTION_RSS_FUNC_SIMPLE_XOR] = {
2472 .name = "simple_xor",
2473 .help = "simple XOR hash function",
2474 .call = parse_vc_action_rss_func,
2476 [ACTION_RSS_LEVEL] = {
2478 .help = "encapsulation level for \"types\"",
2479 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2480 .args = ARGS(ARGS_ENTRY_ARB
2481 (offsetof(struct action_rss_data, conf) +
2482 offsetof(struct rte_flow_action_rss, level),
2483 sizeof(((struct rte_flow_action_rss *)0)->
2486 [ACTION_RSS_TYPES] = {
2488 .help = "specific RSS hash types",
2489 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)),
2491 [ACTION_RSS_TYPE] = {
2493 .help = "RSS hash type",
2494 .call = parse_vc_action_rss_type,
2495 .comp = comp_vc_action_rss_type,
2497 [ACTION_RSS_KEY] = {
2499 .help = "RSS hash key",
2500 .next = NEXT(action_rss, NEXT_ENTRY(HEX)),
2501 .args = ARGS(ARGS_ENTRY_ARB(0, 0),
2503 (offsetof(struct action_rss_data, conf) +
2504 offsetof(struct rte_flow_action_rss, key_len),
2505 sizeof(((struct rte_flow_action_rss *)0)->
2507 ARGS_ENTRY(struct action_rss_data, key)),
2509 [ACTION_RSS_KEY_LEN] = {
2511 .help = "RSS hash key length in bytes",
2512 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2513 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
2514 (offsetof(struct action_rss_data, conf) +
2515 offsetof(struct rte_flow_action_rss, key_len),
2516 sizeof(((struct rte_flow_action_rss *)0)->
2519 RSS_HASH_KEY_LENGTH)),
2521 [ACTION_RSS_QUEUES] = {
2523 .help = "queue indices to use",
2524 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
2525 .call = parse_vc_conf,
2527 [ACTION_RSS_QUEUE] = {
2529 .help = "queue index",
2530 .call = parse_vc_action_rss_queue,
2531 .comp = comp_vc_action_rss_queue,
2535 .help = "direct traffic to physical function",
2536 .priv = PRIV_ACTION(PF, 0),
2537 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2542 .help = "direct traffic to a virtual function ID",
2543 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
2544 .next = NEXT(action_vf),
2547 [ACTION_VF_ORIGINAL] = {
2549 .help = "use original VF ID if possible",
2550 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
2551 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
2553 .call = parse_vc_conf,
2558 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
2559 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
2560 .call = parse_vc_conf,
2562 [ACTION_PHY_PORT] = {
2564 .help = "direct packets to physical port index",
2565 .priv = PRIV_ACTION(PHY_PORT,
2566 sizeof(struct rte_flow_action_phy_port)),
2567 .next = NEXT(action_phy_port),
2570 [ACTION_PHY_PORT_ORIGINAL] = {
2572 .help = "use original port index if possible",
2573 .next = NEXT(action_phy_port, NEXT_ENTRY(BOOLEAN)),
2574 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_phy_port,
2576 .call = parse_vc_conf,
2578 [ACTION_PHY_PORT_INDEX] = {
2580 .help = "physical port index",
2581 .next = NEXT(action_phy_port, NEXT_ENTRY(UNSIGNED)),
2582 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_phy_port,
2584 .call = parse_vc_conf,
2586 [ACTION_PORT_ID] = {
2588 .help = "direct matching traffic to a given DPDK port ID",
2589 .priv = PRIV_ACTION(PORT_ID,
2590 sizeof(struct rte_flow_action_port_id)),
2591 .next = NEXT(action_port_id),
2594 [ACTION_PORT_ID_ORIGINAL] = {
2596 .help = "use original DPDK port ID if possible",
2597 .next = NEXT(action_port_id, NEXT_ENTRY(BOOLEAN)),
2598 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_port_id,
2600 .call = parse_vc_conf,
2602 [ACTION_PORT_ID_ID] = {
2604 .help = "DPDK port ID",
2605 .next = NEXT(action_port_id, NEXT_ENTRY(UNSIGNED)),
2606 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_port_id, id)),
2607 .call = parse_vc_conf,
2611 .help = "meter the directed packets at given id",
2612 .priv = PRIV_ACTION(METER,
2613 sizeof(struct rte_flow_action_meter)),
2614 .next = NEXT(action_meter),
2617 [ACTION_METER_ID] = {
2619 .help = "meter id to use",
2620 .next = NEXT(action_meter, NEXT_ENTRY(UNSIGNED)),
2621 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
2622 .call = parse_vc_conf,
2624 [ACTION_OF_SET_MPLS_TTL] = {
2625 .name = "of_set_mpls_ttl",
2626 .help = "OpenFlow's OFPAT_SET_MPLS_TTL",
2629 sizeof(struct rte_flow_action_of_set_mpls_ttl)),
2630 .next = NEXT(action_of_set_mpls_ttl),
2633 [ACTION_OF_SET_MPLS_TTL_MPLS_TTL] = {
2636 .next = NEXT(action_of_set_mpls_ttl, NEXT_ENTRY(UNSIGNED)),
2637 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_mpls_ttl,
2639 .call = parse_vc_conf,
2641 [ACTION_OF_DEC_MPLS_TTL] = {
2642 .name = "of_dec_mpls_ttl",
2643 .help = "OpenFlow's OFPAT_DEC_MPLS_TTL",
2644 .priv = PRIV_ACTION(OF_DEC_MPLS_TTL, 0),
2645 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2648 [ACTION_OF_SET_NW_TTL] = {
2649 .name = "of_set_nw_ttl",
2650 .help = "OpenFlow's OFPAT_SET_NW_TTL",
2653 sizeof(struct rte_flow_action_of_set_nw_ttl)),
2654 .next = NEXT(action_of_set_nw_ttl),
2657 [ACTION_OF_SET_NW_TTL_NW_TTL] = {
2660 .next = NEXT(action_of_set_nw_ttl, NEXT_ENTRY(UNSIGNED)),
2661 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_nw_ttl,
2663 .call = parse_vc_conf,
2665 [ACTION_OF_DEC_NW_TTL] = {
2666 .name = "of_dec_nw_ttl",
2667 .help = "OpenFlow's OFPAT_DEC_NW_TTL",
2668 .priv = PRIV_ACTION(OF_DEC_NW_TTL, 0),
2669 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2672 [ACTION_OF_COPY_TTL_OUT] = {
2673 .name = "of_copy_ttl_out",
2674 .help = "OpenFlow's OFPAT_COPY_TTL_OUT",
2675 .priv = PRIV_ACTION(OF_COPY_TTL_OUT, 0),
2676 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2679 [ACTION_OF_COPY_TTL_IN] = {
2680 .name = "of_copy_ttl_in",
2681 .help = "OpenFlow's OFPAT_COPY_TTL_IN",
2682 .priv = PRIV_ACTION(OF_COPY_TTL_IN, 0),
2683 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2686 [ACTION_OF_POP_VLAN] = {
2687 .name = "of_pop_vlan",
2688 .help = "OpenFlow's OFPAT_POP_VLAN",
2689 .priv = PRIV_ACTION(OF_POP_VLAN, 0),
2690 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2693 [ACTION_OF_PUSH_VLAN] = {
2694 .name = "of_push_vlan",
2695 .help = "OpenFlow's OFPAT_PUSH_VLAN",
2698 sizeof(struct rte_flow_action_of_push_vlan)),
2699 .next = NEXT(action_of_push_vlan),
2702 [ACTION_OF_PUSH_VLAN_ETHERTYPE] = {
2703 .name = "ethertype",
2704 .help = "EtherType",
2705 .next = NEXT(action_of_push_vlan, NEXT_ENTRY(UNSIGNED)),
2706 .args = ARGS(ARGS_ENTRY_HTON
2707 (struct rte_flow_action_of_push_vlan,
2709 .call = parse_vc_conf,
2711 [ACTION_OF_SET_VLAN_VID] = {
2712 .name = "of_set_vlan_vid",
2713 .help = "OpenFlow's OFPAT_SET_VLAN_VID",
2716 sizeof(struct rte_flow_action_of_set_vlan_vid)),
2717 .next = NEXT(action_of_set_vlan_vid),
2720 [ACTION_OF_SET_VLAN_VID_VLAN_VID] = {
2723 .next = NEXT(action_of_set_vlan_vid, NEXT_ENTRY(UNSIGNED)),
2724 .args = ARGS(ARGS_ENTRY_HTON
2725 (struct rte_flow_action_of_set_vlan_vid,
2727 .call = parse_vc_conf,
2729 [ACTION_OF_SET_VLAN_PCP] = {
2730 .name = "of_set_vlan_pcp",
2731 .help = "OpenFlow's OFPAT_SET_VLAN_PCP",
2734 sizeof(struct rte_flow_action_of_set_vlan_pcp)),
2735 .next = NEXT(action_of_set_vlan_pcp),
2738 [ACTION_OF_SET_VLAN_PCP_VLAN_PCP] = {
2740 .help = "VLAN priority",
2741 .next = NEXT(action_of_set_vlan_pcp, NEXT_ENTRY(UNSIGNED)),
2742 .args = ARGS(ARGS_ENTRY_HTON
2743 (struct rte_flow_action_of_set_vlan_pcp,
2745 .call = parse_vc_conf,
2747 [ACTION_OF_POP_MPLS] = {
2748 .name = "of_pop_mpls",
2749 .help = "OpenFlow's OFPAT_POP_MPLS",
2750 .priv = PRIV_ACTION(OF_POP_MPLS,
2751 sizeof(struct rte_flow_action_of_pop_mpls)),
2752 .next = NEXT(action_of_pop_mpls),
2755 [ACTION_OF_POP_MPLS_ETHERTYPE] = {
2756 .name = "ethertype",
2757 .help = "EtherType",
2758 .next = NEXT(action_of_pop_mpls, NEXT_ENTRY(UNSIGNED)),
2759 .args = ARGS(ARGS_ENTRY_HTON
2760 (struct rte_flow_action_of_pop_mpls,
2762 .call = parse_vc_conf,
2764 [ACTION_OF_PUSH_MPLS] = {
2765 .name = "of_push_mpls",
2766 .help = "OpenFlow's OFPAT_PUSH_MPLS",
2769 sizeof(struct rte_flow_action_of_push_mpls)),
2770 .next = NEXT(action_of_push_mpls),
2773 [ACTION_OF_PUSH_MPLS_ETHERTYPE] = {
2774 .name = "ethertype",
2775 .help = "EtherType",
2776 .next = NEXT(action_of_push_mpls, NEXT_ENTRY(UNSIGNED)),
2777 .args = ARGS(ARGS_ENTRY_HTON
2778 (struct rte_flow_action_of_push_mpls,
2780 .call = parse_vc_conf,
2782 [ACTION_VXLAN_ENCAP] = {
2783 .name = "vxlan_encap",
2784 .help = "VXLAN encapsulation, uses configuration set by \"set"
2786 .priv = PRIV_ACTION(VXLAN_ENCAP,
2787 sizeof(struct action_vxlan_encap_data)),
2788 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2789 .call = parse_vc_action_vxlan_encap,
2791 [ACTION_VXLAN_DECAP] = {
2792 .name = "vxlan_decap",
2793 .help = "Performs a decapsulation action by stripping all"
2794 " headers of the VXLAN tunnel network overlay from the"
2796 .priv = PRIV_ACTION(VXLAN_DECAP, 0),
2797 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2800 [ACTION_NVGRE_ENCAP] = {
2801 .name = "nvgre_encap",
2802 .help = "NVGRE encapsulation, uses configuration set by \"set"
2804 .priv = PRIV_ACTION(NVGRE_ENCAP,
2805 sizeof(struct action_nvgre_encap_data)),
2806 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2807 .call = parse_vc_action_nvgre_encap,
2809 [ACTION_NVGRE_DECAP] = {
2810 .name = "nvgre_decap",
2811 .help = "Performs a decapsulation action by stripping all"
2812 " headers of the NVGRE tunnel network overlay from the"
2814 .priv = PRIV_ACTION(NVGRE_DECAP, 0),
2815 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2818 [ACTION_L2_ENCAP] = {
2820 .help = "l2 encap, uses configuration set by"
2821 " \"set l2_encap\"",
2822 .priv = PRIV_ACTION(RAW_ENCAP,
2823 sizeof(struct action_raw_encap_data)),
2824 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2825 .call = parse_vc_action_l2_encap,
2827 [ACTION_L2_DECAP] = {
2829 .help = "l2 decap, uses configuration set by"
2830 " \"set l2_decap\"",
2831 .priv = PRIV_ACTION(RAW_DECAP,
2832 sizeof(struct action_raw_decap_data)),
2833 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2834 .call = parse_vc_action_l2_decap,
2836 [ACTION_MPLSOGRE_ENCAP] = {
2837 .name = "mplsogre_encap",
2838 .help = "mplsogre encapsulation, uses configuration set by"
2839 " \"set mplsogre_encap\"",
2840 .priv = PRIV_ACTION(RAW_ENCAP,
2841 sizeof(struct action_raw_encap_data)),
2842 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2843 .call = parse_vc_action_mplsogre_encap,
2845 [ACTION_MPLSOGRE_DECAP] = {
2846 .name = "mplsogre_decap",
2847 .help = "mplsogre decapsulation, uses configuration set by"
2848 " \"set mplsogre_decap\"",
2849 .priv = PRIV_ACTION(RAW_DECAP,
2850 sizeof(struct action_raw_decap_data)),
2851 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2852 .call = parse_vc_action_mplsogre_decap,
2854 [ACTION_MPLSOUDP_ENCAP] = {
2855 .name = "mplsoudp_encap",
2856 .help = "mplsoudp encapsulation, uses configuration set by"
2857 " \"set mplsoudp_encap\"",
2858 .priv = PRIV_ACTION(RAW_ENCAP,
2859 sizeof(struct action_raw_encap_data)),
2860 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2861 .call = parse_vc_action_mplsoudp_encap,
2863 [ACTION_MPLSOUDP_DECAP] = {
2864 .name = "mplsoudp_decap",
2865 .help = "mplsoudp decapsulation, uses configuration set by"
2866 " \"set mplsoudp_decap\"",
2867 .priv = PRIV_ACTION(RAW_DECAP,
2868 sizeof(struct action_raw_decap_data)),
2869 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2870 .call = parse_vc_action_mplsoudp_decap,
2872 [ACTION_SET_IPV4_SRC] = {
2873 .name = "set_ipv4_src",
2874 .help = "Set a new IPv4 source address in the outermost"
2876 .priv = PRIV_ACTION(SET_IPV4_SRC,
2877 sizeof(struct rte_flow_action_set_ipv4)),
2878 .next = NEXT(action_set_ipv4_src),
2881 [ACTION_SET_IPV4_SRC_IPV4_SRC] = {
2882 .name = "ipv4_addr",
2883 .help = "new IPv4 source address to set",
2884 .next = NEXT(action_set_ipv4_src, NEXT_ENTRY(IPV4_ADDR)),
2885 .args = ARGS(ARGS_ENTRY_HTON
2886 (struct rte_flow_action_set_ipv4, ipv4_addr)),
2887 .call = parse_vc_conf,
2889 [ACTION_SET_IPV4_DST] = {
2890 .name = "set_ipv4_dst",
2891 .help = "Set a new IPv4 destination address in the outermost"
2893 .priv = PRIV_ACTION(SET_IPV4_DST,
2894 sizeof(struct rte_flow_action_set_ipv4)),
2895 .next = NEXT(action_set_ipv4_dst),
2898 [ACTION_SET_IPV4_DST_IPV4_DST] = {
2899 .name = "ipv4_addr",
2900 .help = "new IPv4 destination address to set",
2901 .next = NEXT(action_set_ipv4_dst, NEXT_ENTRY(IPV4_ADDR)),
2902 .args = ARGS(ARGS_ENTRY_HTON
2903 (struct rte_flow_action_set_ipv4, ipv4_addr)),
2904 .call = parse_vc_conf,
2906 [ACTION_SET_IPV6_SRC] = {
2907 .name = "set_ipv6_src",
2908 .help = "Set a new IPv6 source address in the outermost"
2910 .priv = PRIV_ACTION(SET_IPV6_SRC,
2911 sizeof(struct rte_flow_action_set_ipv6)),
2912 .next = NEXT(action_set_ipv6_src),
2915 [ACTION_SET_IPV6_SRC_IPV6_SRC] = {
2916 .name = "ipv6_addr",
2917 .help = "new IPv6 source address to set",
2918 .next = NEXT(action_set_ipv6_src, NEXT_ENTRY(IPV6_ADDR)),
2919 .args = ARGS(ARGS_ENTRY_HTON
2920 (struct rte_flow_action_set_ipv6, ipv6_addr)),
2921 .call = parse_vc_conf,
2923 [ACTION_SET_IPV6_DST] = {
2924 .name = "set_ipv6_dst",
2925 .help = "Set a new IPv6 destination address in the outermost"
2927 .priv = PRIV_ACTION(SET_IPV6_DST,
2928 sizeof(struct rte_flow_action_set_ipv6)),
2929 .next = NEXT(action_set_ipv6_dst),
2932 [ACTION_SET_IPV6_DST_IPV6_DST] = {
2933 .name = "ipv6_addr",
2934 .help = "new IPv6 destination address to set",
2935 .next = NEXT(action_set_ipv6_dst, NEXT_ENTRY(IPV6_ADDR)),
2936 .args = ARGS(ARGS_ENTRY_HTON
2937 (struct rte_flow_action_set_ipv6, ipv6_addr)),
2938 .call = parse_vc_conf,
2940 [ACTION_SET_TP_SRC] = {
2941 .name = "set_tp_src",
2942 .help = "set a new source port number in the outermost"
2944 .priv = PRIV_ACTION(SET_TP_SRC,
2945 sizeof(struct rte_flow_action_set_tp)),
2946 .next = NEXT(action_set_tp_src),
2949 [ACTION_SET_TP_SRC_TP_SRC] = {
2951 .help = "new source port number to set",
2952 .next = NEXT(action_set_tp_src, NEXT_ENTRY(UNSIGNED)),
2953 .args = ARGS(ARGS_ENTRY_HTON
2954 (struct rte_flow_action_set_tp, port)),
2955 .call = parse_vc_conf,
2957 [ACTION_SET_TP_DST] = {
2958 .name = "set_tp_dst",
2959 .help = "set a new destination port number in the outermost"
2961 .priv = PRIV_ACTION(SET_TP_DST,
2962 sizeof(struct rte_flow_action_set_tp)),
2963 .next = NEXT(action_set_tp_dst),
2966 [ACTION_SET_TP_DST_TP_DST] = {
2968 .help = "new destination port number to set",
2969 .next = NEXT(action_set_tp_dst, NEXT_ENTRY(UNSIGNED)),
2970 .args = ARGS(ARGS_ENTRY_HTON
2971 (struct rte_flow_action_set_tp, port)),
2972 .call = parse_vc_conf,
2974 [ACTION_MAC_SWAP] = {
2976 .help = "Swap the source and destination MAC addresses"
2977 " in the outermost Ethernet header",
2978 .priv = PRIV_ACTION(MAC_SWAP, 0),
2979 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2982 [ACTION_DEC_TTL] = {
2984 .help = "decrease network TTL if available",
2985 .priv = PRIV_ACTION(DEC_TTL, 0),
2986 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2989 [ACTION_SET_TTL] = {
2991 .help = "set ttl value",
2992 .priv = PRIV_ACTION(SET_TTL,
2993 sizeof(struct rte_flow_action_set_ttl)),
2994 .next = NEXT(action_set_ttl),
2997 [ACTION_SET_TTL_TTL] = {
2998 .name = "ttl_value",
2999 .help = "new ttl value to set",
3000 .next = NEXT(action_set_ttl, NEXT_ENTRY(UNSIGNED)),
3001 .args = ARGS(ARGS_ENTRY_HTON
3002 (struct rte_flow_action_set_ttl, ttl_value)),
3003 .call = parse_vc_conf,
3005 [ACTION_SET_MAC_SRC] = {
3006 .name = "set_mac_src",
3007 .help = "set source mac address",
3008 .priv = PRIV_ACTION(SET_MAC_SRC,
3009 sizeof(struct rte_flow_action_set_mac)),
3010 .next = NEXT(action_set_mac_src),
3013 [ACTION_SET_MAC_SRC_MAC_SRC] = {
3015 .help = "new source mac address",
3016 .next = NEXT(action_set_mac_src, NEXT_ENTRY(MAC_ADDR)),
3017 .args = ARGS(ARGS_ENTRY_HTON
3018 (struct rte_flow_action_set_mac, mac_addr)),
3019 .call = parse_vc_conf,
3021 [ACTION_SET_MAC_DST] = {
3022 .name = "set_mac_dst",
3023 .help = "set destination mac address",
3024 .priv = PRIV_ACTION(SET_MAC_DST,
3025 sizeof(struct rte_flow_action_set_mac)),
3026 .next = NEXT(action_set_mac_dst),
3029 [ACTION_SET_MAC_DST_MAC_DST] = {
3031 .help = "new destination mac address to set",
3032 .next = NEXT(action_set_mac_dst, NEXT_ENTRY(MAC_ADDR)),
3033 .args = ARGS(ARGS_ENTRY_HTON
3034 (struct rte_flow_action_set_mac, mac_addr)),
3035 .call = parse_vc_conf,
3037 [ACTION_INC_TCP_SEQ] = {
3038 .name = "inc_tcp_seq",
3039 .help = "increase TCP sequence number",
3040 .priv = PRIV_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
3041 .next = NEXT(action_inc_tcp_seq),
3044 [ACTION_INC_TCP_SEQ_VALUE] = {
3046 .help = "the value to increase TCP sequence number by",
3047 .next = NEXT(action_inc_tcp_seq, NEXT_ENTRY(UNSIGNED)),
3048 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3049 .call = parse_vc_conf,
3051 [ACTION_DEC_TCP_SEQ] = {
3052 .name = "dec_tcp_seq",
3053 .help = "decrease TCP sequence number",
3054 .priv = PRIV_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
3055 .next = NEXT(action_dec_tcp_seq),
3058 [ACTION_DEC_TCP_SEQ_VALUE] = {
3060 .help = "the value to decrease TCP sequence number by",
3061 .next = NEXT(action_dec_tcp_seq, NEXT_ENTRY(UNSIGNED)),
3062 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3063 .call = parse_vc_conf,
3065 [ACTION_INC_TCP_ACK] = {
3066 .name = "inc_tcp_ack",
3067 .help = "increase TCP acknowledgment number",
3068 .priv = PRIV_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
3069 .next = NEXT(action_inc_tcp_ack),
3072 [ACTION_INC_TCP_ACK_VALUE] = {
3074 .help = "the value to increase TCP acknowledgment number by",
3075 .next = NEXT(action_inc_tcp_ack, NEXT_ENTRY(UNSIGNED)),
3076 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3077 .call = parse_vc_conf,
3079 [ACTION_DEC_TCP_ACK] = {
3080 .name = "dec_tcp_ack",
3081 .help = "decrease TCP acknowledgment number",
3082 .priv = PRIV_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
3083 .next = NEXT(action_dec_tcp_ack),
3086 [ACTION_DEC_TCP_ACK_VALUE] = {
3088 .help = "the value to decrease TCP acknowledgment number by",
3089 .next = NEXT(action_dec_tcp_ack, NEXT_ENTRY(UNSIGNED)),
3090 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3091 .call = parse_vc_conf,
3093 [ACTION_RAW_ENCAP] = {
3094 .name = "raw_encap",
3095 .help = "encapsulation data, defined by set raw_encap",
3096 .priv = PRIV_ACTION(RAW_ENCAP,
3097 sizeof(struct rte_flow_action_raw_encap)),
3098 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3099 .call = parse_vc_action_raw_encap,
3101 [ACTION_RAW_DECAP] = {
3102 .name = "raw_decap",
3103 .help = "decapsulation data, defined by set raw_encap",
3104 .priv = PRIV_ACTION(RAW_DECAP,
3105 sizeof(struct rte_flow_action_raw_decap)),
3106 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3107 .call = parse_vc_action_raw_decap,
3109 /* Top level command. */
3112 .help = "set raw encap/decap data",
3113 .type = "set raw_encap|raw_decap <pattern>",
3114 .next = NEXT(NEXT_ENTRY
3117 .call = parse_set_init,
3119 /* Sub-level commands. */
3121 .name = "raw_encap",
3122 .help = "set raw encap data",
3123 .next = NEXT(next_item),
3124 .call = parse_set_raw_encap_decap,
3127 .name = "raw_decap",
3128 .help = "set raw decap data",
3129 .next = NEXT(next_item),
3130 .call = parse_set_raw_encap_decap,
3134 /** Remove and return last entry from argument stack. */
3135 static const struct arg *
3136 pop_args(struct context *ctx)
3138 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
3141 /** Add entry on top of the argument stack. */
3143 push_args(struct context *ctx, const struct arg *arg)
3145 if (ctx->args_num == CTX_STACK_SIZE)
3147 ctx->args[ctx->args_num++] = arg;
3151 /** Spread value into buffer according to bit-mask. */
3153 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
3155 uint32_t i = arg->size;
3163 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3172 unsigned int shift = 0;
3173 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
3175 for (shift = 0; arg->mask[i] >> shift; ++shift) {
3176 if (!(arg->mask[i] & (1 << shift)))
3181 *buf &= ~(1 << shift);
3182 *buf |= (val & 1) << shift;
3190 /** Compare a string with a partial one of a given length. */
3192 strcmp_partial(const char *full, const char *partial, size_t partial_len)
3194 int r = strncmp(full, partial, partial_len);
3198 if (strlen(full) <= partial_len)
3200 return full[partial_len];
3204 * Parse a prefix length and generate a bit-mask.
3206 * Last argument (ctx->args) is retrieved to determine mask size, storage
3207 * location and whether the result must use network byte ordering.
3210 parse_prefix(struct context *ctx, const struct token *token,
3211 const char *str, unsigned int len,
3212 void *buf, unsigned int size)
3214 const struct arg *arg = pop_args(ctx);
3215 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
3222 /* Argument is expected. */
3226 u = strtoumax(str, &end, 0);
3227 if (errno || (size_t)(end - str) != len)
3232 extra = arg_entry_bf_fill(NULL, 0, arg);
3241 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
3242 !arg_entry_bf_fill(ctx->objmask, -1, arg))
3249 if (bytes > size || bytes + !!extra > size)
3253 buf = (uint8_t *)ctx->object + arg->offset;
3254 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3256 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
3257 memset(buf, 0x00, size - bytes);
3259 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
3263 memset(buf, 0xff, bytes);
3264 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
3266 ((uint8_t *)buf)[bytes] = conv[extra];
3269 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
3272 push_args(ctx, arg);
3276 /** Default parsing function for token name matching. */
3278 parse_default(struct context *ctx, const struct token *token,
3279 const char *str, unsigned int len,
3280 void *buf, unsigned int size)
3285 if (strcmp_partial(token->name, str, len))
3290 /** Parse flow command, initialize output buffer for subsequent tokens. */
3292 parse_init(struct context *ctx, const struct token *token,
3293 const char *str, unsigned int len,
3294 void *buf, unsigned int size)
3296 struct buffer *out = buf;
3298 /* Token name must match. */
3299 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3301 /* Nothing else to do if there is no buffer. */
3304 /* Make sure buffer is large enough. */
3305 if (size < sizeof(*out))
3307 /* Initialize buffer. */
3308 memset(out, 0x00, sizeof(*out));
3309 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
3312 ctx->objmask = NULL;
3316 /** Parse tokens for validate/create commands. */
3318 parse_vc(struct context *ctx, const struct token *token,
3319 const char *str, unsigned int len,
3320 void *buf, unsigned int size)
3322 struct buffer *out = buf;
3326 /* Token name must match. */
3327 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3329 /* Nothing else to do if there is no buffer. */
3332 if (!out->command) {
3333 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
3335 if (sizeof(*out) > size)
3337 out->command = ctx->curr;
3340 ctx->objmask = NULL;
3341 out->args.vc.data = (uint8_t *)out + size;
3345 ctx->object = &out->args.vc.attr;
3346 ctx->objmask = NULL;
3347 switch (ctx->curr) {
3352 out->args.vc.attr.ingress = 1;
3355 out->args.vc.attr.egress = 1;
3358 out->args.vc.attr.transfer = 1;
3361 out->args.vc.pattern =
3362 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
3364 ctx->object = out->args.vc.pattern;
3365 ctx->objmask = NULL;
3368 out->args.vc.actions =
3369 (void *)RTE_ALIGN_CEIL((uintptr_t)
3370 (out->args.vc.pattern +
3371 out->args.vc.pattern_n),
3373 ctx->object = out->args.vc.actions;
3374 ctx->objmask = NULL;
3381 if (!out->args.vc.actions) {
3382 const struct parse_item_priv *priv = token->priv;
3383 struct rte_flow_item *item =
3384 out->args.vc.pattern + out->args.vc.pattern_n;
3386 data_size = priv->size * 3; /* spec, last, mask */
3387 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
3388 (out->args.vc.data - data_size),
3390 if ((uint8_t *)item + sizeof(*item) > data)
3392 *item = (struct rte_flow_item){
3395 ++out->args.vc.pattern_n;
3397 ctx->objmask = NULL;
3399 const struct parse_action_priv *priv = token->priv;
3400 struct rte_flow_action *action =
3401 out->args.vc.actions + out->args.vc.actions_n;
3403 data_size = priv->size; /* configuration */
3404 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
3405 (out->args.vc.data - data_size),
3407 if ((uint8_t *)action + sizeof(*action) > data)
3409 *action = (struct rte_flow_action){
3411 .conf = data_size ? data : NULL,
3413 ++out->args.vc.actions_n;
3414 ctx->object = action;
3415 ctx->objmask = NULL;
3417 memset(data, 0, data_size);
3418 out->args.vc.data = data;
3419 ctx->objdata = data_size;
3423 /** Parse pattern item parameter type. */
3425 parse_vc_spec(struct context *ctx, const struct token *token,
3426 const char *str, unsigned int len,
3427 void *buf, unsigned int size)
3429 struct buffer *out = buf;
3430 struct rte_flow_item *item;
3436 /* Token name must match. */
3437 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3439 /* Parse parameter types. */
3440 switch (ctx->curr) {
3441 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
3447 case ITEM_PARAM_SPEC:
3450 case ITEM_PARAM_LAST:
3453 case ITEM_PARAM_PREFIX:
3454 /* Modify next token to expect a prefix. */
3455 if (ctx->next_num < 2)
3457 ctx->next[ctx->next_num - 2] = prefix;
3459 case ITEM_PARAM_MASK:
3465 /* Nothing else to do if there is no buffer. */
3468 if (!out->args.vc.pattern_n)
3470 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
3471 data_size = ctx->objdata / 3; /* spec, last, mask */
3472 /* Point to selected object. */
3473 ctx->object = out->args.vc.data + (data_size * index);
3475 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
3476 item->mask = ctx->objmask;
3478 ctx->objmask = NULL;
3479 /* Update relevant item pointer. */
3480 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
3485 /** Parse action configuration field. */
3487 parse_vc_conf(struct context *ctx, const struct token *token,
3488 const char *str, unsigned int len,
3489 void *buf, unsigned int size)
3491 struct buffer *out = buf;
3494 /* Token name must match. */
3495 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3497 /* Nothing else to do if there is no buffer. */
3500 /* Point to selected object. */
3501 ctx->object = out->args.vc.data;
3502 ctx->objmask = NULL;
3506 /** Parse RSS action. */
3508 parse_vc_action_rss(struct context *ctx, const struct token *token,
3509 const char *str, unsigned int len,
3510 void *buf, unsigned int size)
3512 struct buffer *out = buf;
3513 struct rte_flow_action *action;
3514 struct action_rss_data *action_rss_data;
3518 ret = parse_vc(ctx, token, str, len, buf, size);
3521 /* Nothing else to do if there is no buffer. */
3524 if (!out->args.vc.actions_n)
3526 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3527 /* Point to selected object. */
3528 ctx->object = out->args.vc.data;
3529 ctx->objmask = NULL;
3530 /* Set up default configuration. */
3531 action_rss_data = ctx->object;
3532 *action_rss_data = (struct action_rss_data){
3533 .conf = (struct rte_flow_action_rss){
3534 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
3537 .key_len = sizeof(action_rss_data->key),
3538 .queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
3539 .key = action_rss_data->key,
3540 .queue = action_rss_data->queue,
3542 .key = "testpmd's default RSS hash key, "
3543 "override it for better balancing",
3546 for (i = 0; i < action_rss_data->conf.queue_num; ++i)
3547 action_rss_data->queue[i] = i;
3548 if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
3549 ctx->port != (portid_t)RTE_PORT_ALL) {
3550 struct rte_eth_dev_info info;
3552 rte_eth_dev_info_get(ctx->port, &info);
3553 action_rss_data->conf.key_len =
3554 RTE_MIN(sizeof(action_rss_data->key),
3555 info.hash_key_size);
3557 action->conf = &action_rss_data->conf;
3562 * Parse func field for RSS action.
3564 * The RTE_ETH_HASH_FUNCTION_* value to assign is derived from the
3565 * ACTION_RSS_FUNC_* index that called this function.
3568 parse_vc_action_rss_func(struct context *ctx, const struct token *token,
3569 const char *str, unsigned int len,
3570 void *buf, unsigned int size)
3572 struct action_rss_data *action_rss_data;
3573 enum rte_eth_hash_function func;
3577 /* Token name must match. */
3578 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3580 switch (ctx->curr) {
3581 case ACTION_RSS_FUNC_DEFAULT:
3582 func = RTE_ETH_HASH_FUNCTION_DEFAULT;
3584 case ACTION_RSS_FUNC_TOEPLITZ:
3585 func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
3587 case ACTION_RSS_FUNC_SIMPLE_XOR:
3588 func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
3595 action_rss_data = ctx->object;
3596 action_rss_data->conf.func = func;
3601 * Parse type field for RSS action.
3603 * Valid tokens are type field names and the "end" token.
3606 parse_vc_action_rss_type(struct context *ctx, const struct token *token,
3607 const char *str, unsigned int len,
3608 void *buf, unsigned int size)
3610 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_TYPE);
3611 struct action_rss_data *action_rss_data;
3617 if (ctx->curr != ACTION_RSS_TYPE)
3619 if (!(ctx->objdata >> 16) && ctx->object) {
3620 action_rss_data = ctx->object;
3621 action_rss_data->conf.types = 0;
3623 if (!strcmp_partial("end", str, len)) {
3624 ctx->objdata &= 0xffff;
3627 for (i = 0; rss_type_table[i].str; ++i)
3628 if (!strcmp_partial(rss_type_table[i].str, str, len))
3630 if (!rss_type_table[i].str)
3632 ctx->objdata = 1 << 16 | (ctx->objdata & 0xffff);
3634 if (ctx->next_num == RTE_DIM(ctx->next))
3636 ctx->next[ctx->next_num++] = next;
3639 action_rss_data = ctx->object;
3640 action_rss_data->conf.types |= rss_type_table[i].rss_type;
3645 * Parse queue field for RSS action.
3647 * Valid tokens are queue indices and the "end" token.
3650 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
3651 const char *str, unsigned int len,
3652 void *buf, unsigned int size)
3654 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
3655 struct action_rss_data *action_rss_data;
3656 const struct arg *arg;
3663 if (ctx->curr != ACTION_RSS_QUEUE)
3665 i = ctx->objdata >> 16;
3666 if (!strcmp_partial("end", str, len)) {
3667 ctx->objdata &= 0xffff;
3670 if (i >= ACTION_RSS_QUEUE_NUM)
3672 arg = ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) +
3673 i * sizeof(action_rss_data->queue[i]),
3674 sizeof(action_rss_data->queue[i]));
3675 if (push_args(ctx, arg))
3677 ret = parse_int(ctx, token, str, len, NULL, 0);
3683 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
3685 if (ctx->next_num == RTE_DIM(ctx->next))
3687 ctx->next[ctx->next_num++] = next;
3691 action_rss_data = ctx->object;
3692 action_rss_data->conf.queue_num = i;
3693 action_rss_data->conf.queue = i ? action_rss_data->queue : NULL;
3697 /** Parse VXLAN encap action. */
3699 parse_vc_action_vxlan_encap(struct context *ctx, const struct token *token,
3700 const char *str, unsigned int len,
3701 void *buf, unsigned int size)
3703 struct buffer *out = buf;
3704 struct rte_flow_action *action;
3705 struct action_vxlan_encap_data *action_vxlan_encap_data;
3708 ret = parse_vc(ctx, token, str, len, buf, size);
3711 /* Nothing else to do if there is no buffer. */
3714 if (!out->args.vc.actions_n)
3716 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3717 /* Point to selected object. */
3718 ctx->object = out->args.vc.data;
3719 ctx->objmask = NULL;
3720 /* Set up default configuration. */
3721 action_vxlan_encap_data = ctx->object;
3722 *action_vxlan_encap_data = (struct action_vxlan_encap_data){
3723 .conf = (struct rte_flow_action_vxlan_encap){
3724 .definition = action_vxlan_encap_data->items,
3728 .type = RTE_FLOW_ITEM_TYPE_ETH,
3729 .spec = &action_vxlan_encap_data->item_eth,
3730 .mask = &rte_flow_item_eth_mask,
3733 .type = RTE_FLOW_ITEM_TYPE_VLAN,
3734 .spec = &action_vxlan_encap_data->item_vlan,
3735 .mask = &rte_flow_item_vlan_mask,
3738 .type = RTE_FLOW_ITEM_TYPE_IPV4,
3739 .spec = &action_vxlan_encap_data->item_ipv4,
3740 .mask = &rte_flow_item_ipv4_mask,
3743 .type = RTE_FLOW_ITEM_TYPE_UDP,
3744 .spec = &action_vxlan_encap_data->item_udp,
3745 .mask = &rte_flow_item_udp_mask,
3748 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
3749 .spec = &action_vxlan_encap_data->item_vxlan,
3750 .mask = &rte_flow_item_vxlan_mask,
3753 .type = RTE_FLOW_ITEM_TYPE_END,
3758 .tci = vxlan_encap_conf.vlan_tci,
3762 .src_addr = vxlan_encap_conf.ipv4_src,
3763 .dst_addr = vxlan_encap_conf.ipv4_dst,
3766 .src_port = vxlan_encap_conf.udp_src,
3767 .dst_port = vxlan_encap_conf.udp_dst,
3769 .item_vxlan.flags = 0,
3771 memcpy(action_vxlan_encap_data->item_eth.dst.addr_bytes,
3772 vxlan_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
3773 memcpy(action_vxlan_encap_data->item_eth.src.addr_bytes,
3774 vxlan_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
3775 if (!vxlan_encap_conf.select_ipv4) {
3776 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr,
3777 &vxlan_encap_conf.ipv6_src,
3778 sizeof(vxlan_encap_conf.ipv6_src));
3779 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.dst_addr,
3780 &vxlan_encap_conf.ipv6_dst,
3781 sizeof(vxlan_encap_conf.ipv6_dst));
3782 action_vxlan_encap_data->items[2] = (struct rte_flow_item){
3783 .type = RTE_FLOW_ITEM_TYPE_IPV6,
3784 .spec = &action_vxlan_encap_data->item_ipv6,
3785 .mask = &rte_flow_item_ipv6_mask,
3788 if (!vxlan_encap_conf.select_vlan)
3789 action_vxlan_encap_data->items[1].type =
3790 RTE_FLOW_ITEM_TYPE_VOID;
3791 if (vxlan_encap_conf.select_tos_ttl) {
3792 if (vxlan_encap_conf.select_ipv4) {
3793 static struct rte_flow_item_ipv4 ipv4_mask_tos;
3795 memcpy(&ipv4_mask_tos, &rte_flow_item_ipv4_mask,
3796 sizeof(ipv4_mask_tos));
3797 ipv4_mask_tos.hdr.type_of_service = 0xff;
3798 ipv4_mask_tos.hdr.time_to_live = 0xff;
3799 action_vxlan_encap_data->item_ipv4.hdr.type_of_service =
3800 vxlan_encap_conf.ip_tos;
3801 action_vxlan_encap_data->item_ipv4.hdr.time_to_live =
3802 vxlan_encap_conf.ip_ttl;
3803 action_vxlan_encap_data->items[2].mask =
3806 static struct rte_flow_item_ipv6 ipv6_mask_tos;
3808 memcpy(&ipv6_mask_tos, &rte_flow_item_ipv6_mask,
3809 sizeof(ipv6_mask_tos));
3810 ipv6_mask_tos.hdr.vtc_flow |=
3811 RTE_BE32(0xfful << RTE_IPV6_HDR_TC_SHIFT);
3812 ipv6_mask_tos.hdr.hop_limits = 0xff;
3813 action_vxlan_encap_data->item_ipv6.hdr.vtc_flow |=
3815 ((uint32_t)vxlan_encap_conf.ip_tos <<
3816 RTE_IPV6_HDR_TC_SHIFT);
3817 action_vxlan_encap_data->item_ipv6.hdr.hop_limits =
3818 vxlan_encap_conf.ip_ttl;
3819 action_vxlan_encap_data->items[2].mask =
3823 memcpy(action_vxlan_encap_data->item_vxlan.vni, vxlan_encap_conf.vni,
3824 RTE_DIM(vxlan_encap_conf.vni));
3825 action->conf = &action_vxlan_encap_data->conf;
3829 /** Parse NVGRE encap action. */
3831 parse_vc_action_nvgre_encap(struct context *ctx, const struct token *token,
3832 const char *str, unsigned int len,
3833 void *buf, unsigned int size)
3835 struct buffer *out = buf;
3836 struct rte_flow_action *action;
3837 struct action_nvgre_encap_data *action_nvgre_encap_data;
3840 ret = parse_vc(ctx, token, str, len, buf, size);
3843 /* Nothing else to do if there is no buffer. */
3846 if (!out->args.vc.actions_n)
3848 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3849 /* Point to selected object. */
3850 ctx->object = out->args.vc.data;
3851 ctx->objmask = NULL;
3852 /* Set up default configuration. */
3853 action_nvgre_encap_data = ctx->object;
3854 *action_nvgre_encap_data = (struct action_nvgre_encap_data){
3855 .conf = (struct rte_flow_action_nvgre_encap){
3856 .definition = action_nvgre_encap_data->items,
3860 .type = RTE_FLOW_ITEM_TYPE_ETH,
3861 .spec = &action_nvgre_encap_data->item_eth,
3862 .mask = &rte_flow_item_eth_mask,
3865 .type = RTE_FLOW_ITEM_TYPE_VLAN,
3866 .spec = &action_nvgre_encap_data->item_vlan,
3867 .mask = &rte_flow_item_vlan_mask,
3870 .type = RTE_FLOW_ITEM_TYPE_IPV4,
3871 .spec = &action_nvgre_encap_data->item_ipv4,
3872 .mask = &rte_flow_item_ipv4_mask,
3875 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
3876 .spec = &action_nvgre_encap_data->item_nvgre,
3877 .mask = &rte_flow_item_nvgre_mask,
3880 .type = RTE_FLOW_ITEM_TYPE_END,
3885 .tci = nvgre_encap_conf.vlan_tci,
3889 .src_addr = nvgre_encap_conf.ipv4_src,
3890 .dst_addr = nvgre_encap_conf.ipv4_dst,
3892 .item_nvgre.flow_id = 0,
3894 memcpy(action_nvgre_encap_data->item_eth.dst.addr_bytes,
3895 nvgre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
3896 memcpy(action_nvgre_encap_data->item_eth.src.addr_bytes,
3897 nvgre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
3898 if (!nvgre_encap_conf.select_ipv4) {
3899 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.src_addr,
3900 &nvgre_encap_conf.ipv6_src,
3901 sizeof(nvgre_encap_conf.ipv6_src));
3902 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.dst_addr,
3903 &nvgre_encap_conf.ipv6_dst,
3904 sizeof(nvgre_encap_conf.ipv6_dst));
3905 action_nvgre_encap_data->items[2] = (struct rte_flow_item){
3906 .type = RTE_FLOW_ITEM_TYPE_IPV6,
3907 .spec = &action_nvgre_encap_data->item_ipv6,
3908 .mask = &rte_flow_item_ipv6_mask,
3911 if (!nvgre_encap_conf.select_vlan)
3912 action_nvgre_encap_data->items[1].type =
3913 RTE_FLOW_ITEM_TYPE_VOID;
3914 memcpy(action_nvgre_encap_data->item_nvgre.tni, nvgre_encap_conf.tni,
3915 RTE_DIM(nvgre_encap_conf.tni));
3916 action->conf = &action_nvgre_encap_data->conf;
3920 /** Parse l2 encap action. */
3922 parse_vc_action_l2_encap(struct context *ctx, const struct token *token,
3923 const char *str, unsigned int len,
3924 void *buf, unsigned int size)
3926 struct buffer *out = buf;
3927 struct rte_flow_action *action;
3928 struct action_raw_encap_data *action_encap_data;
3929 struct rte_flow_item_eth eth = { .type = 0, };
3930 struct rte_flow_item_vlan vlan = {
3931 .tci = mplsoudp_encap_conf.vlan_tci,
3937 ret = parse_vc(ctx, token, str, len, buf, size);
3940 /* Nothing else to do if there is no buffer. */
3943 if (!out->args.vc.actions_n)
3945 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3946 /* Point to selected object. */
3947 ctx->object = out->args.vc.data;
3948 ctx->objmask = NULL;
3949 /* Copy the headers to the buffer. */
3950 action_encap_data = ctx->object;
3951 *action_encap_data = (struct action_raw_encap_data) {
3952 .conf = (struct rte_flow_action_raw_encap){
3953 .data = action_encap_data->data,
3957 header = action_encap_data->data;
3958 if (l2_encap_conf.select_vlan)
3959 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
3960 else if (l2_encap_conf.select_ipv4)
3961 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
3963 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
3964 memcpy(eth.dst.addr_bytes,
3965 l2_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
3966 memcpy(eth.src.addr_bytes,
3967 l2_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
3968 memcpy(header, ð, sizeof(eth));
3969 header += sizeof(eth);
3970 if (l2_encap_conf.select_vlan) {
3971 if (l2_encap_conf.select_ipv4)
3972 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
3974 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
3975 memcpy(header, &vlan, sizeof(vlan));
3976 header += sizeof(vlan);
3978 action_encap_data->conf.size = header -
3979 action_encap_data->data;
3980 action->conf = &action_encap_data->conf;
3984 /** Parse l2 decap action. */
3986 parse_vc_action_l2_decap(struct context *ctx, const struct token *token,
3987 const char *str, unsigned int len,
3988 void *buf, unsigned int size)
3990 struct buffer *out = buf;
3991 struct rte_flow_action *action;
3992 struct action_raw_decap_data *action_decap_data;
3993 struct rte_flow_item_eth eth = { .type = 0, };
3994 struct rte_flow_item_vlan vlan = {
3995 .tci = mplsoudp_encap_conf.vlan_tci,
4001 ret = parse_vc(ctx, token, str, len, buf, size);
4004 /* Nothing else to do if there is no buffer. */
4007 if (!out->args.vc.actions_n)
4009 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4010 /* Point to selected object. */
4011 ctx->object = out->args.vc.data;
4012 ctx->objmask = NULL;
4013 /* Copy the headers to the buffer. */
4014 action_decap_data = ctx->object;
4015 *action_decap_data = (struct action_raw_decap_data) {
4016 .conf = (struct rte_flow_action_raw_decap){
4017 .data = action_decap_data->data,
4021 header = action_decap_data->data;
4022 if (l2_decap_conf.select_vlan)
4023 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4024 memcpy(header, ð, sizeof(eth));
4025 header += sizeof(eth);
4026 if (l2_decap_conf.select_vlan) {
4027 memcpy(header, &vlan, sizeof(vlan));
4028 header += sizeof(vlan);
4030 action_decap_data->conf.size = header -
4031 action_decap_data->data;
4032 action->conf = &action_decap_data->conf;
4036 #define ETHER_TYPE_MPLS_UNICAST 0x8847
4038 /** Parse MPLSOGRE encap action. */
4040 parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token,
4041 const char *str, unsigned int len,
4042 void *buf, unsigned int size)
4044 struct buffer *out = buf;
4045 struct rte_flow_action *action;
4046 struct action_raw_encap_data *action_encap_data;
4047 struct rte_flow_item_eth eth = { .type = 0, };
4048 struct rte_flow_item_vlan vlan = {
4049 .tci = mplsogre_encap_conf.vlan_tci,
4052 struct rte_flow_item_ipv4 ipv4 = {
4054 .src_addr = mplsogre_encap_conf.ipv4_src,
4055 .dst_addr = mplsogre_encap_conf.ipv4_dst,
4056 .next_proto_id = IPPROTO_GRE,
4057 .version_ihl = RTE_IPV4_VHL_DEF,
4058 .time_to_live = IPDEFTTL,
4061 struct rte_flow_item_ipv6 ipv6 = {
4063 .proto = IPPROTO_GRE,
4064 .hop_limits = IPDEFTTL,
4067 struct rte_flow_item_gre gre = {
4068 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
4070 struct rte_flow_item_mpls mpls;
4074 ret = parse_vc(ctx, token, str, len, buf, size);
4077 /* Nothing else to do if there is no buffer. */
4080 if (!out->args.vc.actions_n)
4082 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4083 /* Point to selected object. */
4084 ctx->object = out->args.vc.data;
4085 ctx->objmask = NULL;
4086 /* Copy the headers to the buffer. */
4087 action_encap_data = ctx->object;
4088 *action_encap_data = (struct action_raw_encap_data) {
4089 .conf = (struct rte_flow_action_raw_encap){
4090 .data = action_encap_data->data,
4095 header = action_encap_data->data;
4096 if (mplsogre_encap_conf.select_vlan)
4097 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4098 else if (mplsogre_encap_conf.select_ipv4)
4099 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4101 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4102 memcpy(eth.dst.addr_bytes,
4103 mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4104 memcpy(eth.src.addr_bytes,
4105 mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4106 memcpy(header, ð, sizeof(eth));
4107 header += sizeof(eth);
4108 if (mplsogre_encap_conf.select_vlan) {
4109 if (mplsogre_encap_conf.select_ipv4)
4110 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4112 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4113 memcpy(header, &vlan, sizeof(vlan));
4114 header += sizeof(vlan);
4116 if (mplsogre_encap_conf.select_ipv4) {
4117 memcpy(header, &ipv4, sizeof(ipv4));
4118 header += sizeof(ipv4);
4120 memcpy(&ipv6.hdr.src_addr,
4121 &mplsogre_encap_conf.ipv6_src,
4122 sizeof(mplsogre_encap_conf.ipv6_src));
4123 memcpy(&ipv6.hdr.dst_addr,
4124 &mplsogre_encap_conf.ipv6_dst,
4125 sizeof(mplsogre_encap_conf.ipv6_dst));
4126 memcpy(header, &ipv6, sizeof(ipv6));
4127 header += sizeof(ipv6);
4129 memcpy(header, &gre, sizeof(gre));
4130 header += sizeof(gre);
4131 memcpy(mpls.label_tc_s, mplsogre_encap_conf.label,
4132 RTE_DIM(mplsogre_encap_conf.label));
4133 mpls.label_tc_s[2] |= 0x1;
4134 memcpy(header, &mpls, sizeof(mpls));
4135 header += sizeof(mpls);
4136 action_encap_data->conf.size = header -
4137 action_encap_data->data;
4138 action->conf = &action_encap_data->conf;
4142 /** Parse MPLSOGRE decap action. */
4144 parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token,
4145 const char *str, unsigned int len,
4146 void *buf, unsigned int size)
4148 struct buffer *out = buf;
4149 struct rte_flow_action *action;
4150 struct action_raw_decap_data *action_decap_data;
4151 struct rte_flow_item_eth eth = { .type = 0, };
4152 struct rte_flow_item_vlan vlan = {.tci = 0};
4153 struct rte_flow_item_ipv4 ipv4 = {
4155 .next_proto_id = IPPROTO_GRE,
4158 struct rte_flow_item_ipv6 ipv6 = {
4160 .proto = IPPROTO_GRE,
4163 struct rte_flow_item_gre gre = {
4164 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
4166 struct rte_flow_item_mpls mpls;
4170 ret = parse_vc(ctx, token, str, len, buf, size);
4173 /* Nothing else to do if there is no buffer. */
4176 if (!out->args.vc.actions_n)
4178 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4179 /* Point to selected object. */
4180 ctx->object = out->args.vc.data;
4181 ctx->objmask = NULL;
4182 /* Copy the headers to the buffer. */
4183 action_decap_data = ctx->object;
4184 *action_decap_data = (struct action_raw_decap_data) {
4185 .conf = (struct rte_flow_action_raw_decap){
4186 .data = action_decap_data->data,
4190 header = action_decap_data->data;
4191 if (mplsogre_decap_conf.select_vlan)
4192 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4193 else if (mplsogre_encap_conf.select_ipv4)
4194 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4196 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4197 memcpy(eth.dst.addr_bytes,
4198 mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4199 memcpy(eth.src.addr_bytes,
4200 mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4201 memcpy(header, ð, sizeof(eth));
4202 header += sizeof(eth);
4203 if (mplsogre_encap_conf.select_vlan) {
4204 if (mplsogre_encap_conf.select_ipv4)
4205 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4207 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4208 memcpy(header, &vlan, sizeof(vlan));
4209 header += sizeof(vlan);
4211 if (mplsogre_encap_conf.select_ipv4) {
4212 memcpy(header, &ipv4, sizeof(ipv4));
4213 header += sizeof(ipv4);
4215 memcpy(header, &ipv6, sizeof(ipv6));
4216 header += sizeof(ipv6);
4218 memcpy(header, &gre, sizeof(gre));
4219 header += sizeof(gre);
4220 memset(&mpls, 0, sizeof(mpls));
4221 memcpy(header, &mpls, sizeof(mpls));
4222 header += sizeof(mpls);
4223 action_decap_data->conf.size = header -
4224 action_decap_data->data;
4225 action->conf = &action_decap_data->conf;
4229 /** Parse MPLSOUDP encap action. */
4231 parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token,
4232 const char *str, unsigned int len,
4233 void *buf, unsigned int size)
4235 struct buffer *out = buf;
4236 struct rte_flow_action *action;
4237 struct action_raw_encap_data *action_encap_data;
4238 struct rte_flow_item_eth eth = { .type = 0, };
4239 struct rte_flow_item_vlan vlan = {
4240 .tci = mplsoudp_encap_conf.vlan_tci,
4243 struct rte_flow_item_ipv4 ipv4 = {
4245 .src_addr = mplsoudp_encap_conf.ipv4_src,
4246 .dst_addr = mplsoudp_encap_conf.ipv4_dst,
4247 .next_proto_id = IPPROTO_UDP,
4248 .version_ihl = RTE_IPV4_VHL_DEF,
4249 .time_to_live = IPDEFTTL,
4252 struct rte_flow_item_ipv6 ipv6 = {
4254 .proto = IPPROTO_UDP,
4255 .hop_limits = IPDEFTTL,
4258 struct rte_flow_item_udp udp = {
4260 .src_port = mplsoudp_encap_conf.udp_src,
4261 .dst_port = mplsoudp_encap_conf.udp_dst,
4264 struct rte_flow_item_mpls mpls;
4268 ret = parse_vc(ctx, token, str, len, buf, size);
4271 /* Nothing else to do if there is no buffer. */
4274 if (!out->args.vc.actions_n)
4276 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4277 /* Point to selected object. */
4278 ctx->object = out->args.vc.data;
4279 ctx->objmask = NULL;
4280 /* Copy the headers to the buffer. */
4281 action_encap_data = ctx->object;
4282 *action_encap_data = (struct action_raw_encap_data) {
4283 .conf = (struct rte_flow_action_raw_encap){
4284 .data = action_encap_data->data,
4289 header = action_encap_data->data;
4290 if (mplsoudp_encap_conf.select_vlan)
4291 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4292 else if (mplsoudp_encap_conf.select_ipv4)
4293 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4295 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4296 memcpy(eth.dst.addr_bytes,
4297 mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4298 memcpy(eth.src.addr_bytes,
4299 mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4300 memcpy(header, ð, sizeof(eth));
4301 header += sizeof(eth);
4302 if (mplsoudp_encap_conf.select_vlan) {
4303 if (mplsoudp_encap_conf.select_ipv4)
4304 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4306 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4307 memcpy(header, &vlan, sizeof(vlan));
4308 header += sizeof(vlan);
4310 if (mplsoudp_encap_conf.select_ipv4) {
4311 memcpy(header, &ipv4, sizeof(ipv4));
4312 header += sizeof(ipv4);
4314 memcpy(&ipv6.hdr.src_addr,
4315 &mplsoudp_encap_conf.ipv6_src,
4316 sizeof(mplsoudp_encap_conf.ipv6_src));
4317 memcpy(&ipv6.hdr.dst_addr,
4318 &mplsoudp_encap_conf.ipv6_dst,
4319 sizeof(mplsoudp_encap_conf.ipv6_dst));
4320 memcpy(header, &ipv6, sizeof(ipv6));
4321 header += sizeof(ipv6);
4323 memcpy(header, &udp, sizeof(udp));
4324 header += sizeof(udp);
4325 memcpy(mpls.label_tc_s, mplsoudp_encap_conf.label,
4326 RTE_DIM(mplsoudp_encap_conf.label));
4327 mpls.label_tc_s[2] |= 0x1;
4328 memcpy(header, &mpls, sizeof(mpls));
4329 header += sizeof(mpls);
4330 action_encap_data->conf.size = header -
4331 action_encap_data->data;
4332 action->conf = &action_encap_data->conf;
4336 /** Parse MPLSOUDP decap action. */
4338 parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token,
4339 const char *str, unsigned int len,
4340 void *buf, unsigned int size)
4342 struct buffer *out = buf;
4343 struct rte_flow_action *action;
4344 struct action_raw_decap_data *action_decap_data;
4345 struct rte_flow_item_eth eth = { .type = 0, };
4346 struct rte_flow_item_vlan vlan = {.tci = 0};
4347 struct rte_flow_item_ipv4 ipv4 = {
4349 .next_proto_id = IPPROTO_UDP,
4352 struct rte_flow_item_ipv6 ipv6 = {
4354 .proto = IPPROTO_UDP,
4357 struct rte_flow_item_udp udp = {
4359 .dst_port = rte_cpu_to_be_16(6635),
4362 struct rte_flow_item_mpls mpls;
4366 ret = parse_vc(ctx, token, str, len, buf, size);
4369 /* Nothing else to do if there is no buffer. */
4372 if (!out->args.vc.actions_n)
4374 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4375 /* Point to selected object. */
4376 ctx->object = out->args.vc.data;
4377 ctx->objmask = NULL;
4378 /* Copy the headers to the buffer. */
4379 action_decap_data = ctx->object;
4380 *action_decap_data = (struct action_raw_decap_data) {
4381 .conf = (struct rte_flow_action_raw_decap){
4382 .data = action_decap_data->data,
4386 header = action_decap_data->data;
4387 if (mplsoudp_decap_conf.select_vlan)
4388 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4389 else if (mplsoudp_encap_conf.select_ipv4)
4390 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4392 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4393 memcpy(eth.dst.addr_bytes,
4394 mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4395 memcpy(eth.src.addr_bytes,
4396 mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4397 memcpy(header, ð, sizeof(eth));
4398 header += sizeof(eth);
4399 if (mplsoudp_encap_conf.select_vlan) {
4400 if (mplsoudp_encap_conf.select_ipv4)
4401 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4403 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4404 memcpy(header, &vlan, sizeof(vlan));
4405 header += sizeof(vlan);
4407 if (mplsoudp_encap_conf.select_ipv4) {
4408 memcpy(header, &ipv4, sizeof(ipv4));
4409 header += sizeof(ipv4);
4411 memcpy(header, &ipv6, sizeof(ipv6));
4412 header += sizeof(ipv6);
4414 memcpy(header, &udp, sizeof(udp));
4415 header += sizeof(udp);
4416 memset(&mpls, 0, sizeof(mpls));
4417 memcpy(header, &mpls, sizeof(mpls));
4418 header += sizeof(mpls);
4419 action_decap_data->conf.size = header -
4420 action_decap_data->data;
4421 action->conf = &action_decap_data->conf;
4426 parse_vc_action_raw_encap(struct context *ctx, const struct token *token,
4427 const char *str, unsigned int len, void *buf,
4430 struct buffer *out = buf;
4431 struct rte_flow_action *action;
4432 struct rte_flow_action_raw_encap *action_raw_encap_conf = NULL;
4433 uint8_t *data = NULL;
4436 ret = parse_vc(ctx, token, str, len, buf, size);
4439 /* Nothing else to do if there is no buffer. */
4442 if (!out->args.vc.actions_n)
4444 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4445 /* Point to selected object. */
4446 ctx->object = out->args.vc.data;
4447 ctx->objmask = NULL;
4448 /* Copy the headers to the buffer. */
4449 action_raw_encap_conf = ctx->object;
4450 /* data stored from tail of data buffer */
4451 data = (uint8_t *)&(raw_encap_conf.data) +
4452 ACTION_RAW_ENCAP_MAX_DATA - raw_encap_conf.size;
4453 action_raw_encap_conf->data = data;
4454 action_raw_encap_conf->preserve = NULL;
4455 action_raw_encap_conf->size = raw_encap_conf.size;
4456 action->conf = action_raw_encap_conf;
4461 parse_vc_action_raw_decap(struct context *ctx, const struct token *token,
4462 const char *str, unsigned int len, void *buf,
4465 struct buffer *out = buf;
4466 struct rte_flow_action *action;
4467 struct rte_flow_action_raw_decap *action_raw_decap_conf = NULL;
4468 uint8_t *data = NULL;
4471 ret = parse_vc(ctx, token, str, len, buf, size);
4474 /* Nothing else to do if there is no buffer. */
4477 if (!out->args.vc.actions_n)
4479 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4480 /* Point to selected object. */
4481 ctx->object = out->args.vc.data;
4482 ctx->objmask = NULL;
4483 /* Copy the headers to the buffer. */
4484 action_raw_decap_conf = ctx->object;
4485 /* data stored from tail of data buffer */
4486 data = (uint8_t *)&(raw_decap_conf.data) +
4487 ACTION_RAW_ENCAP_MAX_DATA - raw_decap_conf.size;
4488 action_raw_decap_conf->data = data;
4489 action_raw_decap_conf->size = raw_decap_conf.size;
4490 action->conf = action_raw_decap_conf;
4494 /** Parse tokens for destroy command. */
4496 parse_destroy(struct context *ctx, const struct token *token,
4497 const char *str, unsigned int len,
4498 void *buf, unsigned int size)
4500 struct buffer *out = buf;
4502 /* Token name must match. */
4503 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4505 /* Nothing else to do if there is no buffer. */
4508 if (!out->command) {
4509 if (ctx->curr != DESTROY)
4511 if (sizeof(*out) > size)
4513 out->command = ctx->curr;
4516 ctx->objmask = NULL;
4517 out->args.destroy.rule =
4518 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
4522 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
4523 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
4526 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
4527 ctx->objmask = NULL;
4531 /** Parse tokens for flush command. */
4533 parse_flush(struct context *ctx, const struct token *token,
4534 const char *str, unsigned int len,
4535 void *buf, unsigned int size)
4537 struct buffer *out = buf;
4539 /* Token name must match. */
4540 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4542 /* Nothing else to do if there is no buffer. */
4545 if (!out->command) {
4546 if (ctx->curr != FLUSH)
4548 if (sizeof(*out) > size)
4550 out->command = ctx->curr;
4553 ctx->objmask = NULL;
4558 /** Parse tokens for query command. */
4560 parse_query(struct context *ctx, const struct token *token,
4561 const char *str, unsigned int len,
4562 void *buf, unsigned int size)
4564 struct buffer *out = buf;
4566 /* Token name must match. */
4567 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4569 /* Nothing else to do if there is no buffer. */
4572 if (!out->command) {
4573 if (ctx->curr != QUERY)
4575 if (sizeof(*out) > size)
4577 out->command = ctx->curr;
4580 ctx->objmask = NULL;
4585 /** Parse action names. */
4587 parse_action(struct context *ctx, const struct token *token,
4588 const char *str, unsigned int len,
4589 void *buf, unsigned int size)
4591 struct buffer *out = buf;
4592 const struct arg *arg = pop_args(ctx);
4596 /* Argument is expected. */
4599 /* Parse action name. */
4600 for (i = 0; next_action[i]; ++i) {
4601 const struct parse_action_priv *priv;
4603 token = &token_list[next_action[i]];
4604 if (strcmp_partial(token->name, str, len))
4610 memcpy((uint8_t *)ctx->object + arg->offset,
4616 push_args(ctx, arg);
4620 /** Parse tokens for list command. */
4622 parse_list(struct context *ctx, const struct token *token,
4623 const char *str, unsigned int len,
4624 void *buf, unsigned int size)
4626 struct buffer *out = buf;
4628 /* Token name must match. */
4629 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4631 /* Nothing else to do if there is no buffer. */
4634 if (!out->command) {
4635 if (ctx->curr != LIST)
4637 if (sizeof(*out) > size)
4639 out->command = ctx->curr;
4642 ctx->objmask = NULL;
4643 out->args.list.group =
4644 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
4648 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
4649 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
4652 ctx->object = out->args.list.group + out->args.list.group_n++;
4653 ctx->objmask = NULL;
4657 /** Parse tokens for isolate command. */
4659 parse_isolate(struct context *ctx, const struct token *token,
4660 const char *str, unsigned int len,
4661 void *buf, unsigned int size)
4663 struct buffer *out = buf;
4665 /* Token name must match. */
4666 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4668 /* Nothing else to do if there is no buffer. */
4671 if (!out->command) {
4672 if (ctx->curr != ISOLATE)
4674 if (sizeof(*out) > size)
4676 out->command = ctx->curr;
4679 ctx->objmask = NULL;
4685 * Parse signed/unsigned integers 8 to 64-bit long.
4687 * Last argument (ctx->args) is retrieved to determine integer type and
4691 parse_int(struct context *ctx, const struct token *token,
4692 const char *str, unsigned int len,
4693 void *buf, unsigned int size)
4695 const struct arg *arg = pop_args(ctx);
4700 /* Argument is expected. */
4705 (uintmax_t)strtoimax(str, &end, 0) :
4706 strtoumax(str, &end, 0);
4707 if (errno || (size_t)(end - str) != len)
4710 ((arg->sign && ((intmax_t)u < (intmax_t)arg->min ||
4711 (intmax_t)u > (intmax_t)arg->max)) ||
4712 (!arg->sign && (u < arg->min || u > arg->max))))
4717 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
4718 !arg_entry_bf_fill(ctx->objmask, -1, arg))
4722 buf = (uint8_t *)ctx->object + arg->offset;
4724 if (u > RTE_LEN2MASK(size * CHAR_BIT, uint64_t))
4728 case sizeof(uint8_t):
4729 *(uint8_t *)buf = u;
4731 case sizeof(uint16_t):
4732 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
4734 case sizeof(uint8_t [3]):
4735 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
4737 ((uint8_t *)buf)[0] = u;
4738 ((uint8_t *)buf)[1] = u >> 8;
4739 ((uint8_t *)buf)[2] = u >> 16;
4743 ((uint8_t *)buf)[0] = u >> 16;
4744 ((uint8_t *)buf)[1] = u >> 8;
4745 ((uint8_t *)buf)[2] = u;
4747 case sizeof(uint32_t):
4748 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
4750 case sizeof(uint64_t):
4751 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
4756 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
4758 buf = (uint8_t *)ctx->objmask + arg->offset;
4763 push_args(ctx, arg);
4770 * Three arguments (ctx->args) are retrieved from the stack to store data,
4771 * its actual length and address (in that order).
4774 parse_string(struct context *ctx, const struct token *token,
4775 const char *str, unsigned int len,
4776 void *buf, unsigned int size)
4778 const struct arg *arg_data = pop_args(ctx);
4779 const struct arg *arg_len = pop_args(ctx);
4780 const struct arg *arg_addr = pop_args(ctx);
4781 char tmp[16]; /* Ought to be enough. */
4784 /* Arguments are expected. */
4788 push_args(ctx, arg_data);
4792 push_args(ctx, arg_len);
4793 push_args(ctx, arg_data);
4796 size = arg_data->size;
4797 /* Bit-mask fill is not supported. */
4798 if (arg_data->mask || size < len)
4802 /* Let parse_int() fill length information first. */
4803 ret = snprintf(tmp, sizeof(tmp), "%u", len);
4806 push_args(ctx, arg_len);
4807 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
4812 buf = (uint8_t *)ctx->object + arg_data->offset;
4813 /* Output buffer is not necessarily NUL-terminated. */
4814 memcpy(buf, str, len);
4815 memset((uint8_t *)buf + len, 0x00, size - len);
4817 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
4818 /* Save address if requested. */
4819 if (arg_addr->size) {
4820 memcpy((uint8_t *)ctx->object + arg_addr->offset,
4822 (uint8_t *)ctx->object + arg_data->offset
4826 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
4828 (uint8_t *)ctx->objmask + arg_data->offset
4834 push_args(ctx, arg_addr);
4835 push_args(ctx, arg_len);
4836 push_args(ctx, arg_data);
4841 parse_hex_string(const char *src, uint8_t *dst, uint32_t *size)
4847 /* Check input parameters */
4848 if ((src == NULL) ||
4854 /* Convert chars to bytes */
4855 for (i = 0, len = 0; i < *size; i += 2) {
4856 snprintf(tmp, 3, "%s", src + i);
4857 dst[len++] = strtoul(tmp, &c, 16);
4872 parse_hex(struct context *ctx, const struct token *token,
4873 const char *str, unsigned int len,
4874 void *buf, unsigned int size)
4876 const struct arg *arg_data = pop_args(ctx);
4877 const struct arg *arg_len = pop_args(ctx);
4878 const struct arg *arg_addr = pop_args(ctx);
4879 char tmp[16]; /* Ought to be enough. */
4881 unsigned int hexlen = len;
4882 unsigned int length = 256;
4883 uint8_t hex_tmp[length];
4885 /* Arguments are expected. */
4889 push_args(ctx, arg_data);
4893 push_args(ctx, arg_len);
4894 push_args(ctx, arg_data);
4897 size = arg_data->size;
4898 /* Bit-mask fill is not supported. */
4904 /* translate bytes string to array. */
4905 if (str[0] == '0' && ((str[1] == 'x') ||
4910 if (hexlen > length)
4912 ret = parse_hex_string(str, hex_tmp, &hexlen);
4915 /* Let parse_int() fill length information first. */
4916 ret = snprintf(tmp, sizeof(tmp), "%u", hexlen);
4919 push_args(ctx, arg_len);
4920 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
4925 buf = (uint8_t *)ctx->object + arg_data->offset;
4926 /* Output buffer is not necessarily NUL-terminated. */
4927 memcpy(buf, hex_tmp, hexlen);
4928 memset((uint8_t *)buf + hexlen, 0x00, size - hexlen);
4930 memset((uint8_t *)ctx->objmask + arg_data->offset,
4932 /* Save address if requested. */
4933 if (arg_addr->size) {
4934 memcpy((uint8_t *)ctx->object + arg_addr->offset,
4936 (uint8_t *)ctx->object + arg_data->offset
4940 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
4942 (uint8_t *)ctx->objmask + arg_data->offset
4948 push_args(ctx, arg_addr);
4949 push_args(ctx, arg_len);
4950 push_args(ctx, arg_data);
4956 * Parse a MAC address.
4958 * Last argument (ctx->args) is retrieved to determine storage size and
4962 parse_mac_addr(struct context *ctx, const struct token *token,
4963 const char *str, unsigned int len,
4964 void *buf, unsigned int size)
4966 const struct arg *arg = pop_args(ctx);
4967 struct rte_ether_addr tmp;
4971 /* Argument is expected. */
4975 /* Bit-mask fill is not supported. */
4976 if (arg->mask || size != sizeof(tmp))
4978 /* Only network endian is supported. */
4981 ret = rte_ether_unformat_addr(str, &tmp);
4986 buf = (uint8_t *)ctx->object + arg->offset;
4987 memcpy(buf, &tmp, size);
4989 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
4992 push_args(ctx, arg);
4997 * Parse an IPv4 address.
4999 * Last argument (ctx->args) is retrieved to determine storage size and
5003 parse_ipv4_addr(struct context *ctx, const struct token *token,
5004 const char *str, unsigned int len,
5005 void *buf, unsigned int size)
5007 const struct arg *arg = pop_args(ctx);
5012 /* Argument is expected. */
5016 /* Bit-mask fill is not supported. */
5017 if (arg->mask || size != sizeof(tmp))
5019 /* Only network endian is supported. */
5022 memcpy(str2, str, len);
5024 ret = inet_pton(AF_INET, str2, &tmp);
5026 /* Attempt integer parsing. */
5027 push_args(ctx, arg);
5028 return parse_int(ctx, token, str, len, buf, size);
5032 buf = (uint8_t *)ctx->object + arg->offset;
5033 memcpy(buf, &tmp, size);
5035 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
5038 push_args(ctx, arg);
5043 * Parse an IPv6 address.
5045 * Last argument (ctx->args) is retrieved to determine storage size and
5049 parse_ipv6_addr(struct context *ctx, const struct token *token,
5050 const char *str, unsigned int len,
5051 void *buf, unsigned int size)
5053 const struct arg *arg = pop_args(ctx);
5055 struct in6_addr tmp;
5059 /* Argument is expected. */
5063 /* Bit-mask fill is not supported. */
5064 if (arg->mask || size != sizeof(tmp))
5066 /* Only network endian is supported. */
5069 memcpy(str2, str, len);
5071 ret = inet_pton(AF_INET6, str2, &tmp);
5076 buf = (uint8_t *)ctx->object + arg->offset;
5077 memcpy(buf, &tmp, size);
5079 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
5082 push_args(ctx, arg);
5086 /** Boolean values (even indices stand for false). */
5087 static const char *const boolean_name[] = {
5097 * Parse a boolean value.
5099 * Last argument (ctx->args) is retrieved to determine storage size and
5103 parse_boolean(struct context *ctx, const struct token *token,
5104 const char *str, unsigned int len,
5105 void *buf, unsigned int size)
5107 const struct arg *arg = pop_args(ctx);
5111 /* Argument is expected. */
5114 for (i = 0; boolean_name[i]; ++i)
5115 if (!strcmp_partial(boolean_name[i], str, len))
5117 /* Process token as integer. */
5118 if (boolean_name[i])
5119 str = i & 1 ? "1" : "0";
5120 push_args(ctx, arg);
5121 ret = parse_int(ctx, token, str, strlen(str), buf, size);
5122 return ret > 0 ? (int)len : ret;
5125 /** Parse port and update context. */
5127 parse_port(struct context *ctx, const struct token *token,
5128 const char *str, unsigned int len,
5129 void *buf, unsigned int size)
5131 struct buffer *out = &(struct buffer){ .port = 0 };
5139 ctx->objmask = NULL;
5140 size = sizeof(*out);
5142 ret = parse_int(ctx, token, str, len, out, size);
5144 ctx->port = out->port;
5150 /** Parse set command, initialize output buffer for subsequent tokens. */
5152 parse_set_raw_encap_decap(struct context *ctx, const struct token *token,
5153 const char *str, unsigned int len,
5154 void *buf, unsigned int size)
5156 struct buffer *out = buf;
5158 /* Token name must match. */
5159 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5161 /* Nothing else to do if there is no buffer. */
5164 /* Make sure buffer is large enough. */
5165 if (size < sizeof(*out))
5168 ctx->objmask = NULL;
5171 out->command = ctx->curr;
5176 * Parse set raw_encap/raw_decap command,
5177 * initialize output buffer for subsequent tokens.
5180 parse_set_init(struct context *ctx, const struct token *token,
5181 const char *str, unsigned int len,
5182 void *buf, unsigned int size)
5184 struct buffer *out = buf;
5186 /* Token name must match. */
5187 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5189 /* Nothing else to do if there is no buffer. */
5192 /* Make sure buffer is large enough. */
5193 if (size < sizeof(*out))
5195 /* Initialize buffer. */
5196 memset(out, 0x00, sizeof(*out));
5197 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
5200 ctx->objmask = NULL;
5201 if (!out->command) {
5202 if (ctx->curr != SET)
5204 if (sizeof(*out) > size)
5206 out->command = ctx->curr;
5207 out->args.vc.data = (uint8_t *)out + size;
5208 /* All we need is pattern */
5209 out->args.vc.pattern =
5210 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
5212 ctx->object = out->args.vc.pattern;
5217 /** No completion. */
5219 comp_none(struct context *ctx, const struct token *token,
5220 unsigned int ent, char *buf, unsigned int size)
5230 /** Complete boolean values. */
5232 comp_boolean(struct context *ctx, const struct token *token,
5233 unsigned int ent, char *buf, unsigned int size)
5239 for (i = 0; boolean_name[i]; ++i)
5240 if (buf && i == ent)
5241 return strlcpy(buf, boolean_name[i], size);
5247 /** Complete action names. */
5249 comp_action(struct context *ctx, const struct token *token,
5250 unsigned int ent, char *buf, unsigned int size)
5256 for (i = 0; next_action[i]; ++i)
5257 if (buf && i == ent)
5258 return strlcpy(buf, token_list[next_action[i]].name,
5265 /** Complete available ports. */
5267 comp_port(struct context *ctx, const struct token *token,
5268 unsigned int ent, char *buf, unsigned int size)
5275 RTE_ETH_FOREACH_DEV(p) {
5276 if (buf && i == ent)
5277 return snprintf(buf, size, "%u", p);
5285 /** Complete available rule IDs. */
5287 comp_rule_id(struct context *ctx, const struct token *token,
5288 unsigned int ent, char *buf, unsigned int size)
5291 struct rte_port *port;
5292 struct port_flow *pf;
5295 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
5296 ctx->port == (portid_t)RTE_PORT_ALL)
5298 port = &ports[ctx->port];
5299 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
5300 if (buf && i == ent)
5301 return snprintf(buf, size, "%u", pf->id);
5309 /** Complete type field for RSS action. */
5311 comp_vc_action_rss_type(struct context *ctx, const struct token *token,
5312 unsigned int ent, char *buf, unsigned int size)
5318 for (i = 0; rss_type_table[i].str; ++i)
5323 return strlcpy(buf, rss_type_table[ent].str, size);
5325 return snprintf(buf, size, "end");
5329 /** Complete queue field for RSS action. */
5331 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
5332 unsigned int ent, char *buf, unsigned int size)
5339 return snprintf(buf, size, "%u", ent);
5341 return snprintf(buf, size, "end");
5345 /** Internal context. */
5346 static struct context cmd_flow_context;
5348 /** Global parser instance (cmdline API). */
5349 cmdline_parse_inst_t cmd_flow;
5350 cmdline_parse_inst_t cmd_set_raw;
5352 /** Initialize context. */
5354 cmd_flow_context_init(struct context *ctx)
5356 /* A full memset() is not necessary. */
5366 ctx->objmask = NULL;
5369 /** Parse a token (cmdline API). */
5371 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
5374 struct context *ctx = &cmd_flow_context;
5375 const struct token *token;
5376 const enum index *list;
5381 token = &token_list[ctx->curr];
5382 /* Check argument length. */
5385 for (len = 0; src[len]; ++len)
5386 if (src[len] == '#' || isspace(src[len]))
5390 /* Last argument and EOL detection. */
5391 for (i = len; src[i]; ++i)
5392 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
5394 else if (!isspace(src[i])) {
5399 if (src[i] == '\r' || src[i] == '\n') {
5403 /* Initialize context if necessary. */
5404 if (!ctx->next_num) {
5407 ctx->next[ctx->next_num++] = token->next[0];
5409 /* Process argument through candidates. */
5410 ctx->prev = ctx->curr;
5411 list = ctx->next[ctx->next_num - 1];
5412 for (i = 0; list[i]; ++i) {
5413 const struct token *next = &token_list[list[i]];
5416 ctx->curr = list[i];
5418 tmp = next->call(ctx, next, src, len, result, size);
5420 tmp = parse_default(ctx, next, src, len, result, size);
5421 if (tmp == -1 || tmp != len)
5429 /* Push subsequent tokens if any. */
5431 for (i = 0; token->next[i]; ++i) {
5432 if (ctx->next_num == RTE_DIM(ctx->next))
5434 ctx->next[ctx->next_num++] = token->next[i];
5436 /* Push arguments if any. */
5438 for (i = 0; token->args[i]; ++i) {
5439 if (ctx->args_num == RTE_DIM(ctx->args))
5441 ctx->args[ctx->args_num++] = token->args[i];
5446 /** Return number of completion entries (cmdline API). */
5448 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
5450 struct context *ctx = &cmd_flow_context;
5451 const struct token *token = &token_list[ctx->curr];
5452 const enum index *list;
5456 /* Count number of tokens in current list. */
5458 list = ctx->next[ctx->next_num - 1];
5460 list = token->next[0];
5461 for (i = 0; list[i]; ++i)
5466 * If there is a single token, use its completion callback, otherwise
5467 * return the number of entries.
5469 token = &token_list[list[0]];
5470 if (i == 1 && token->comp) {
5471 /* Save index for cmd_flow_get_help(). */
5472 ctx->prev = list[0];
5473 return token->comp(ctx, token, 0, NULL, 0);
5478 /** Return a completion entry (cmdline API). */
5480 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
5481 char *dst, unsigned int size)
5483 struct context *ctx = &cmd_flow_context;
5484 const struct token *token = &token_list[ctx->curr];
5485 const enum index *list;
5489 /* Count number of tokens in current list. */
5491 list = ctx->next[ctx->next_num - 1];
5493 list = token->next[0];
5494 for (i = 0; list[i]; ++i)
5498 /* If there is a single token, use its completion callback. */
5499 token = &token_list[list[0]];
5500 if (i == 1 && token->comp) {
5501 /* Save index for cmd_flow_get_help(). */
5502 ctx->prev = list[0];
5503 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
5505 /* Otherwise make sure the index is valid and use defaults. */
5508 token = &token_list[list[index]];
5509 strlcpy(dst, token->name, size);
5510 /* Save index for cmd_flow_get_help(). */
5511 ctx->prev = list[index];
5515 /** Populate help strings for current token (cmdline API). */
5517 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
5519 struct context *ctx = &cmd_flow_context;
5520 const struct token *token = &token_list[ctx->prev];
5525 /* Set token type and update global help with details. */
5526 strlcpy(dst, (token->type ? token->type : "TOKEN"), size);
5528 cmd_flow.help_str = token->help;
5530 cmd_flow.help_str = token->name;
5534 /** Token definition template (cmdline API). */
5535 static struct cmdline_token_hdr cmd_flow_token_hdr = {
5536 .ops = &(struct cmdline_token_ops){
5537 .parse = cmd_flow_parse,
5538 .complete_get_nb = cmd_flow_complete_get_nb,
5539 .complete_get_elt = cmd_flow_complete_get_elt,
5540 .get_help = cmd_flow_get_help,
5545 /** Populate the next dynamic token. */
5547 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
5548 cmdline_parse_token_hdr_t **hdr_inst)
5550 struct context *ctx = &cmd_flow_context;
5552 /* Always reinitialize context before requesting the first token. */
5553 if (!(hdr_inst - cmd_flow.tokens))
5554 cmd_flow_context_init(ctx);
5555 /* Return NULL when no more tokens are expected. */
5556 if (!ctx->next_num && ctx->curr) {
5560 /* Determine if command should end here. */
5561 if (ctx->eol && ctx->last && ctx->next_num) {
5562 const enum index *list = ctx->next[ctx->next_num - 1];
5565 for (i = 0; list[i]; ++i) {
5572 *hdr = &cmd_flow_token_hdr;
5575 /** Dispatch parsed buffer to function calls. */
5577 cmd_flow_parsed(const struct buffer *in)
5579 switch (in->command) {
5581 port_flow_validate(in->port, &in->args.vc.attr,
5582 in->args.vc.pattern, in->args.vc.actions);
5585 port_flow_create(in->port, &in->args.vc.attr,
5586 in->args.vc.pattern, in->args.vc.actions);
5589 port_flow_destroy(in->port, in->args.destroy.rule_n,
5590 in->args.destroy.rule);
5593 port_flow_flush(in->port);
5596 port_flow_query(in->port, in->args.query.rule,
5597 &in->args.query.action);
5600 port_flow_list(in->port, in->args.list.group_n,
5601 in->args.list.group);
5604 port_flow_isolate(in->port, in->args.isolate.set);
5611 /** Token generator and output processing callback (cmdline API). */
5613 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
5616 cmd_flow_tok(arg0, arg2);
5618 cmd_flow_parsed(arg0);
5621 /** Global parser instance (cmdline API). */
5622 cmdline_parse_inst_t cmd_flow = {
5624 .data = NULL, /**< Unused. */
5625 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
5628 }, /**< Tokens are returned by cmd_flow_tok(). */
5631 /** set cmd facility. Reuse cmd flow's infrastructure as much as possible. */
5634 update_fields(uint8_t *buf, struct rte_flow_item *item, uint16_t next_proto)
5636 struct rte_flow_item_ipv4 *ipv4;
5637 struct rte_flow_item_eth *eth;
5638 struct rte_flow_item_ipv6 *ipv6;
5639 struct rte_flow_item_vxlan *vxlan;
5640 struct rte_flow_item_vxlan_gpe *gpe;
5641 struct rte_flow_item_nvgre *nvgre;
5642 uint32_t ipv6_vtc_flow;
5644 switch (item->type) {
5645 case RTE_FLOW_ITEM_TYPE_ETH:
5646 eth = (struct rte_flow_item_eth *)buf;
5648 eth->type = rte_cpu_to_be_16(next_proto);
5650 case RTE_FLOW_ITEM_TYPE_IPV4:
5651 ipv4 = (struct rte_flow_item_ipv4 *)buf;
5652 ipv4->hdr.version_ihl = 0x45;
5653 ipv4->hdr.next_proto_id = (uint8_t)next_proto;
5655 case RTE_FLOW_ITEM_TYPE_IPV6:
5656 ipv6 = (struct rte_flow_item_ipv6 *)buf;
5657 ipv6->hdr.proto = (uint8_t)next_proto;
5658 ipv6_vtc_flow = rte_be_to_cpu_32(ipv6->hdr.vtc_flow);
5659 ipv6_vtc_flow &= 0x0FFFFFFF; /*< reset version bits. */
5660 ipv6_vtc_flow |= 0x60000000; /*< set ipv6 version. */
5661 ipv6->hdr.vtc_flow = rte_cpu_to_be_32(ipv6_vtc_flow);
5663 case RTE_FLOW_ITEM_TYPE_VXLAN:
5664 vxlan = (struct rte_flow_item_vxlan *)buf;
5665 vxlan->flags = 0x08;
5667 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5668 gpe = (struct rte_flow_item_vxlan_gpe *)buf;
5671 case RTE_FLOW_ITEM_TYPE_NVGRE:
5672 nvgre = (struct rte_flow_item_nvgre *)buf;
5673 nvgre->protocol = rte_cpu_to_be_16(0x6558);
5674 nvgre->c_k_s_rsvd0_ver = rte_cpu_to_be_16(0x2000);
5681 /** Helper of get item's default mask. */
5683 flow_item_default_mask(const struct rte_flow_item *item)
5685 const void *mask = NULL;
5686 static rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
5688 switch (item->type) {
5689 case RTE_FLOW_ITEM_TYPE_ANY:
5690 mask = &rte_flow_item_any_mask;
5692 case RTE_FLOW_ITEM_TYPE_VF:
5693 mask = &rte_flow_item_vf_mask;
5695 case RTE_FLOW_ITEM_TYPE_PORT_ID:
5696 mask = &rte_flow_item_port_id_mask;
5698 case RTE_FLOW_ITEM_TYPE_RAW:
5699 mask = &rte_flow_item_raw_mask;
5701 case RTE_FLOW_ITEM_TYPE_ETH:
5702 mask = &rte_flow_item_eth_mask;
5704 case RTE_FLOW_ITEM_TYPE_VLAN:
5705 mask = &rte_flow_item_vlan_mask;
5707 case RTE_FLOW_ITEM_TYPE_IPV4:
5708 mask = &rte_flow_item_ipv4_mask;
5710 case RTE_FLOW_ITEM_TYPE_IPV6:
5711 mask = &rte_flow_item_ipv6_mask;
5713 case RTE_FLOW_ITEM_TYPE_ICMP:
5714 mask = &rte_flow_item_icmp_mask;
5716 case RTE_FLOW_ITEM_TYPE_UDP:
5717 mask = &rte_flow_item_udp_mask;
5719 case RTE_FLOW_ITEM_TYPE_TCP:
5720 mask = &rte_flow_item_tcp_mask;
5722 case RTE_FLOW_ITEM_TYPE_SCTP:
5723 mask = &rte_flow_item_sctp_mask;
5725 case RTE_FLOW_ITEM_TYPE_VXLAN:
5726 mask = &rte_flow_item_vxlan_mask;
5728 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5729 mask = &rte_flow_item_vxlan_gpe_mask;
5731 case RTE_FLOW_ITEM_TYPE_E_TAG:
5732 mask = &rte_flow_item_e_tag_mask;
5734 case RTE_FLOW_ITEM_TYPE_NVGRE:
5735 mask = &rte_flow_item_nvgre_mask;
5737 case RTE_FLOW_ITEM_TYPE_MPLS:
5738 mask = &rte_flow_item_mpls_mask;
5740 case RTE_FLOW_ITEM_TYPE_GRE:
5741 mask = &rte_flow_item_gre_mask;
5743 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
5744 mask = &gre_key_default_mask;
5746 case RTE_FLOW_ITEM_TYPE_META:
5747 mask = &rte_flow_item_meta_mask;
5749 case RTE_FLOW_ITEM_TYPE_FUZZY:
5750 mask = &rte_flow_item_fuzzy_mask;
5752 case RTE_FLOW_ITEM_TYPE_GTP:
5753 mask = &rte_flow_item_gtp_mask;
5755 case RTE_FLOW_ITEM_TYPE_ESP:
5756 mask = &rte_flow_item_esp_mask;
5766 /** Dispatch parsed buffer to function calls. */
5768 cmd_set_raw_parsed(const struct buffer *in)
5770 uint32_t n = in->args.vc.pattern_n;
5772 struct rte_flow_item *item = NULL;
5774 uint8_t *data = NULL;
5775 uint8_t *data_tail = NULL;
5776 size_t *total_size = NULL;
5777 uint16_t upper_layer = 0;
5780 RTE_ASSERT(in->command == SET_RAW_ENCAP ||
5781 in->command == SET_RAW_DECAP);
5782 if (in->command == SET_RAW_ENCAP) {
5783 total_size = &raw_encap_conf.size;
5784 data = (uint8_t *)&raw_encap_conf.data;
5786 total_size = &raw_decap_conf.size;
5787 data = (uint8_t *)&raw_decap_conf.data;
5790 memset(data, 0x00, ACTION_RAW_ENCAP_MAX_DATA);
5791 /* process hdr from upper layer to low layer (L3/L4 -> L2). */
5792 data_tail = data + ACTION_RAW_ENCAP_MAX_DATA;
5793 for (i = n - 1 ; i >= 0; --i) {
5794 item = in->args.vc.pattern + i;
5795 if (item->spec == NULL)
5796 item->spec = flow_item_default_mask(item);
5797 switch (item->type) {
5798 case RTE_FLOW_ITEM_TYPE_ETH:
5799 size = sizeof(struct rte_flow_item_eth);
5801 case RTE_FLOW_ITEM_TYPE_VLAN:
5802 size = sizeof(struct rte_flow_item_vlan);
5803 proto = RTE_ETHER_TYPE_VLAN;
5805 case RTE_FLOW_ITEM_TYPE_IPV4:
5806 size = sizeof(struct rte_flow_item_ipv4);
5807 proto = RTE_ETHER_TYPE_IPV4;
5809 case RTE_FLOW_ITEM_TYPE_IPV6:
5810 size = sizeof(struct rte_flow_item_ipv6);
5811 proto = RTE_ETHER_TYPE_IPV6;
5813 case RTE_FLOW_ITEM_TYPE_UDP:
5814 size = sizeof(struct rte_flow_item_udp);
5817 case RTE_FLOW_ITEM_TYPE_TCP:
5818 size = sizeof(struct rte_flow_item_tcp);
5821 case RTE_FLOW_ITEM_TYPE_VXLAN:
5822 size = sizeof(struct rte_flow_item_vxlan);
5824 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5825 size = sizeof(struct rte_flow_item_vxlan_gpe);
5827 case RTE_FLOW_ITEM_TYPE_GRE:
5828 size = sizeof(struct rte_flow_item_gre);
5831 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
5832 size = sizeof(rte_be32_t);
5834 case RTE_FLOW_ITEM_TYPE_MPLS:
5835 size = sizeof(struct rte_flow_item_mpls);
5837 case RTE_FLOW_ITEM_TYPE_NVGRE:
5838 size = sizeof(struct rte_flow_item_nvgre);
5842 printf("Error - Not supported item\n");
5844 memset(data, 0x00, ACTION_RAW_ENCAP_MAX_DATA);
5847 *total_size += size;
5848 rte_memcpy(data_tail - (*total_size), item->spec, size);
5849 /* update some fields which cannot be set by cmdline */
5850 update_fields((data_tail - (*total_size)), item,
5852 upper_layer = proto;
5854 if (verbose_level & 0x1)
5855 printf("total data size is %zu\n", (*total_size));
5856 RTE_ASSERT((*total_size) <= ACTION_RAW_ENCAP_MAX_DATA);
5859 /** Populate help strings for current token (cmdline API). */
5861 cmd_set_raw_get_help(cmdline_parse_token_hdr_t *hdr, char *dst,
5864 struct context *ctx = &cmd_flow_context;
5865 const struct token *token = &token_list[ctx->prev];
5870 /* Set token type and update global help with details. */
5871 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
5873 cmd_set_raw.help_str = token->help;
5875 cmd_set_raw.help_str = token->name;
5879 /** Token definition template (cmdline API). */
5880 static struct cmdline_token_hdr cmd_set_raw_token_hdr = {
5881 .ops = &(struct cmdline_token_ops){
5882 .parse = cmd_flow_parse,
5883 .complete_get_nb = cmd_flow_complete_get_nb,
5884 .complete_get_elt = cmd_flow_complete_get_elt,
5885 .get_help = cmd_set_raw_get_help,
5890 /** Populate the next dynamic token. */
5892 cmd_set_raw_tok(cmdline_parse_token_hdr_t **hdr,
5893 cmdline_parse_token_hdr_t **hdr_inst)
5895 struct context *ctx = &cmd_flow_context;
5897 /* Always reinitialize context before requesting the first token. */
5898 if (!(hdr_inst - cmd_set_raw.tokens)) {
5899 cmd_flow_context_init(ctx);
5900 ctx->curr = START_SET;
5902 /* Return NULL when no more tokens are expected. */
5903 if (!ctx->next_num && (ctx->curr != START_SET)) {
5907 /* Determine if command should end here. */
5908 if (ctx->eol && ctx->last && ctx->next_num) {
5909 const enum index *list = ctx->next[ctx->next_num - 1];
5912 for (i = 0; list[i]; ++i) {
5919 *hdr = &cmd_set_raw_token_hdr;
5922 /** Token generator and output processing callback (cmdline API). */
5924 cmd_set_raw_cb(void *arg0, struct cmdline *cl, void *arg2)
5927 cmd_set_raw_tok(arg0, arg2);
5929 cmd_set_raw_parsed(arg0);
5932 /** Global parser instance (cmdline API). */
5933 cmdline_parse_inst_t cmd_set_raw = {
5934 .f = cmd_set_raw_cb,
5935 .data = NULL, /**< Unused. */
5936 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
5939 }, /**< Tokens are returned by cmd_flow_tok(). */