1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
13 #include <arpa/inet.h>
14 #include <sys/socket.h>
16 #include <rte_string_fns.h>
17 #include <rte_common.h>
18 #include <rte_ethdev.h>
19 #include <rte_byteorder.h>
20 #include <cmdline_parse.h>
21 #include <cmdline_parse_etheraddr.h>
22 #include <cmdline_parse_string.h>
23 #include <cmdline_parse_num.h>
25 #include <rte_hexdump.h>
29 /** Parser token indices. */
52 /* Top-level command. */
54 /* Sub-leve commands. */
59 /* Top-level command. */
61 /* Sub-level commands. */
70 /* Destroy arguments. */
73 /* Query arguments. */
79 /* Validate/create arguments. */
86 /* Validate/create pattern. */
123 ITEM_VLAN_INNER_TYPE,
155 ITEM_E_TAG_GRP_ECID_B,
164 ITEM_GRE_C_RSVD0_VER,
180 ITEM_ARP_ETH_IPV4_SHA,
181 ITEM_ARP_ETH_IPV4_SPA,
182 ITEM_ARP_ETH_IPV4_THA,
183 ITEM_ARP_ETH_IPV4_TPA,
185 ITEM_IPV6_EXT_NEXT_HDR,
190 ITEM_ICMP6_ND_NS_TARGET_ADDR,
192 ITEM_ICMP6_ND_NA_TARGET_ADDR,
194 ITEM_ICMP6_ND_OPT_TYPE,
195 ITEM_ICMP6_ND_OPT_SLA_ETH,
196 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
197 ITEM_ICMP6_ND_OPT_TLA_ETH,
198 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
211 ITEM_HIGIG2_CLASSIFICATION,
214 /* Validate/create actions. */
234 ACTION_RSS_FUNC_DEFAULT,
235 ACTION_RSS_FUNC_TOEPLITZ,
236 ACTION_RSS_FUNC_SIMPLE_XOR,
237 ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ,
249 ACTION_PHY_PORT_ORIGINAL,
250 ACTION_PHY_PORT_INDEX,
252 ACTION_PORT_ID_ORIGINAL,
256 ACTION_OF_SET_MPLS_TTL,
257 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
258 ACTION_OF_DEC_MPLS_TTL,
259 ACTION_OF_SET_NW_TTL,
260 ACTION_OF_SET_NW_TTL_NW_TTL,
261 ACTION_OF_DEC_NW_TTL,
262 ACTION_OF_COPY_TTL_OUT,
263 ACTION_OF_COPY_TTL_IN,
266 ACTION_OF_PUSH_VLAN_ETHERTYPE,
267 ACTION_OF_SET_VLAN_VID,
268 ACTION_OF_SET_VLAN_VID_VLAN_VID,
269 ACTION_OF_SET_VLAN_PCP,
270 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
272 ACTION_OF_POP_MPLS_ETHERTYPE,
274 ACTION_OF_PUSH_MPLS_ETHERTYPE,
281 ACTION_MPLSOGRE_ENCAP,
282 ACTION_MPLSOGRE_DECAP,
283 ACTION_MPLSOUDP_ENCAP,
284 ACTION_MPLSOUDP_DECAP,
286 ACTION_SET_IPV4_SRC_IPV4_SRC,
288 ACTION_SET_IPV4_DST_IPV4_DST,
290 ACTION_SET_IPV6_SRC_IPV6_SRC,
292 ACTION_SET_IPV6_DST_IPV6_DST,
294 ACTION_SET_TP_SRC_TP_SRC,
296 ACTION_SET_TP_DST_TP_DST,
302 ACTION_SET_MAC_SRC_MAC_SRC,
304 ACTION_SET_MAC_DST_MAC_DST,
306 ACTION_INC_TCP_SEQ_VALUE,
308 ACTION_DEC_TCP_SEQ_VALUE,
310 ACTION_INC_TCP_ACK_VALUE,
312 ACTION_DEC_TCP_ACK_VALUE,
315 ACTION_RAW_ENCAP_INDEX,
316 ACTION_RAW_ENCAP_INDEX_VALUE,
317 ACTION_RAW_DECAP_INDEX,
318 ACTION_RAW_DECAP_INDEX_VALUE,
321 /** Maximum size for pattern in struct rte_flow_item_raw. */
322 #define ITEM_RAW_PATTERN_SIZE 40
324 /** Storage size for struct rte_flow_item_raw including pattern. */
325 #define ITEM_RAW_SIZE \
326 (sizeof(struct rte_flow_item_raw) + ITEM_RAW_PATTERN_SIZE)
328 /** Maximum number of queue indices in struct rte_flow_action_rss. */
329 #define ACTION_RSS_QUEUE_NUM 128
331 /** Storage for struct rte_flow_action_rss including external data. */
332 struct action_rss_data {
333 struct rte_flow_action_rss conf;
334 uint8_t key[RSS_HASH_KEY_LENGTH];
335 uint16_t queue[ACTION_RSS_QUEUE_NUM];
338 /** Maximum data size in struct rte_flow_action_raw_encap. */
339 #define ACTION_RAW_ENCAP_MAX_DATA 128
340 #define RAW_ENCAP_CONFS_MAX_NUM 8
342 /** Storage for struct rte_flow_action_raw_encap. */
343 struct raw_encap_conf {
344 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
345 uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA];
349 struct raw_encap_conf raw_encap_confs[RAW_ENCAP_CONFS_MAX_NUM];
351 /** Storage for struct rte_flow_action_raw_encap including external data. */
352 struct action_raw_encap_data {
353 struct rte_flow_action_raw_encap conf;
354 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
355 uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA];
359 /** Storage for struct rte_flow_action_raw_decap. */
360 struct raw_decap_conf {
361 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
365 struct raw_decap_conf raw_decap_confs[RAW_ENCAP_CONFS_MAX_NUM];
367 /** Storage for struct rte_flow_action_raw_decap including external data. */
368 struct action_raw_decap_data {
369 struct rte_flow_action_raw_decap conf;
370 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
374 struct vxlan_encap_conf vxlan_encap_conf = {
378 .vni = "\x00\x00\x00",
380 .udp_dst = RTE_BE16(4789),
381 .ipv4_src = RTE_IPV4(127, 0, 0, 1),
382 .ipv4_dst = RTE_IPV4(255, 255, 255, 255),
383 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
384 "\x00\x00\x00\x00\x00\x00\x00\x01",
385 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
386 "\x00\x00\x00\x00\x00\x00\x11\x11",
390 .eth_src = "\x00\x00\x00\x00\x00\x00",
391 .eth_dst = "\xff\xff\xff\xff\xff\xff",
394 /** Maximum number of items in struct rte_flow_action_vxlan_encap. */
395 #define ACTION_VXLAN_ENCAP_ITEMS_NUM 6
397 /** Storage for struct rte_flow_action_vxlan_encap including external data. */
398 struct action_vxlan_encap_data {
399 struct rte_flow_action_vxlan_encap conf;
400 struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM];
401 struct rte_flow_item_eth item_eth;
402 struct rte_flow_item_vlan item_vlan;
404 struct rte_flow_item_ipv4 item_ipv4;
405 struct rte_flow_item_ipv6 item_ipv6;
407 struct rte_flow_item_udp item_udp;
408 struct rte_flow_item_vxlan item_vxlan;
411 struct nvgre_encap_conf nvgre_encap_conf = {
414 .tni = "\x00\x00\x00",
415 .ipv4_src = RTE_IPV4(127, 0, 0, 1),
416 .ipv4_dst = RTE_IPV4(255, 255, 255, 255),
417 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
418 "\x00\x00\x00\x00\x00\x00\x00\x01",
419 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
420 "\x00\x00\x00\x00\x00\x00\x11\x11",
422 .eth_src = "\x00\x00\x00\x00\x00\x00",
423 .eth_dst = "\xff\xff\xff\xff\xff\xff",
426 /** Maximum number of items in struct rte_flow_action_nvgre_encap. */
427 #define ACTION_NVGRE_ENCAP_ITEMS_NUM 5
429 /** Storage for struct rte_flow_action_nvgre_encap including external data. */
430 struct action_nvgre_encap_data {
431 struct rte_flow_action_nvgre_encap conf;
432 struct rte_flow_item items[ACTION_NVGRE_ENCAP_ITEMS_NUM];
433 struct rte_flow_item_eth item_eth;
434 struct rte_flow_item_vlan item_vlan;
436 struct rte_flow_item_ipv4 item_ipv4;
437 struct rte_flow_item_ipv6 item_ipv6;
439 struct rte_flow_item_nvgre item_nvgre;
442 struct l2_encap_conf l2_encap_conf;
444 struct l2_decap_conf l2_decap_conf;
446 struct mplsogre_encap_conf mplsogre_encap_conf;
448 struct mplsogre_decap_conf mplsogre_decap_conf;
450 struct mplsoudp_encap_conf mplsoudp_encap_conf;
452 struct mplsoudp_decap_conf mplsoudp_decap_conf;
454 /** Maximum number of subsequent tokens and arguments on the stack. */
455 #define CTX_STACK_SIZE 16
457 /** Parser context. */
459 /** Stack of subsequent token lists to process. */
460 const enum index *next[CTX_STACK_SIZE];
461 /** Arguments for stacked tokens. */
462 const void *args[CTX_STACK_SIZE];
463 enum index curr; /**< Current token index. */
464 enum index prev; /**< Index of the last token seen. */
465 int next_num; /**< Number of entries in next[]. */
466 int args_num; /**< Number of entries in args[]. */
467 uint32_t eol:1; /**< EOL has been detected. */
468 uint32_t last:1; /**< No more arguments. */
469 portid_t port; /**< Current port ID (for completions). */
470 uint32_t objdata; /**< Object-specific data. */
471 void *object; /**< Address of current object for relative offsets. */
472 void *objmask; /**< Object a full mask must be written to. */
475 /** Token argument. */
477 uint32_t hton:1; /**< Use network byte ordering. */
478 uint32_t sign:1; /**< Value is signed. */
479 uint32_t bounded:1; /**< Value is bounded. */
480 uintmax_t min; /**< Minimum value if bounded. */
481 uintmax_t max; /**< Maximum value if bounded. */
482 uint32_t offset; /**< Relative offset from ctx->object. */
483 uint32_t size; /**< Field size. */
484 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
487 /** Parser token definition. */
489 /** Type displayed during completion (defaults to "TOKEN"). */
491 /** Help displayed during completion (defaults to token name). */
493 /** Private data used by parser functions. */
496 * Lists of subsequent tokens to push on the stack. Each call to the
497 * parser consumes the last entry of that stack.
499 const enum index *const *next;
500 /** Arguments stack for subsequent tokens that need them. */
501 const struct arg *const *args;
503 * Token-processing callback, returns -1 in case of error, the
504 * length of the matched string otherwise. If NULL, attempts to
505 * match the token name.
507 * If buf is not NULL, the result should be stored in it according
508 * to context. An error is returned if not large enough.
510 int (*call)(struct context *ctx, const struct token *token,
511 const char *str, unsigned int len,
512 void *buf, unsigned int size);
514 * Callback that provides possible values for this token, used for
515 * completion. Returns -1 in case of error, the number of possible
516 * values otherwise. If NULL, the token name is used.
518 * If buf is not NULL, entry index ent is written to buf and the
519 * full length of the entry is returned (same behavior as
522 int (*comp)(struct context *ctx, const struct token *token,
523 unsigned int ent, char *buf, unsigned int size);
524 /** Mandatory token name, no default value. */
528 /** Static initializer for the next field. */
529 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
531 /** Static initializer for a NEXT() entry. */
532 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
534 /** Static initializer for the args field. */
535 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
537 /** Static initializer for ARGS() to target a field. */
538 #define ARGS_ENTRY(s, f) \
539 (&(const struct arg){ \
540 .offset = offsetof(s, f), \
541 .size = sizeof(((s *)0)->f), \
544 /** Static initializer for ARGS() to target a bit-field. */
545 #define ARGS_ENTRY_BF(s, f, b) \
546 (&(const struct arg){ \
548 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
551 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
552 #define ARGS_ENTRY_MASK(s, f, m) \
553 (&(const struct arg){ \
554 .offset = offsetof(s, f), \
555 .size = sizeof(((s *)0)->f), \
556 .mask = (const void *)(m), \
559 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
560 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
561 (&(const struct arg){ \
563 .offset = offsetof(s, f), \
564 .size = sizeof(((s *)0)->f), \
565 .mask = (const void *)(m), \
568 /** Static initializer for ARGS() to target a pointer. */
569 #define ARGS_ENTRY_PTR(s, f) \
570 (&(const struct arg){ \
571 .size = sizeof(*((s *)0)->f), \
574 /** Static initializer for ARGS() with arbitrary offset and size. */
575 #define ARGS_ENTRY_ARB(o, s) \
576 (&(const struct arg){ \
581 /** Same as ARGS_ENTRY_ARB() with bounded values. */
582 #define ARGS_ENTRY_ARB_BOUNDED(o, s, i, a) \
583 (&(const struct arg){ \
591 /** Same as ARGS_ENTRY() using network byte ordering. */
592 #define ARGS_ENTRY_HTON(s, f) \
593 (&(const struct arg){ \
595 .offset = offsetof(s, f), \
596 .size = sizeof(((s *)0)->f), \
599 /** Same as ARGS_ENTRY_HTON() for a single argument, without structure. */
600 #define ARG_ENTRY_HTON(s) \
601 (&(const struct arg){ \
607 /** Parser output buffer layout expected by cmd_flow_parsed(). */
609 enum index command; /**< Flow command. */
610 portid_t port; /**< Affected port ID. */
613 struct rte_flow_attr attr;
614 struct rte_flow_item *pattern;
615 struct rte_flow_action *actions;
619 } vc; /**< Validate/create arguments. */
623 } destroy; /**< Destroy arguments. */
626 struct rte_flow_action action;
627 } query; /**< Query arguments. */
631 } list; /**< List arguments. */
634 } isolate; /**< Isolated mode arguments. */
635 } args; /**< Command arguments. */
638 /** Private data for pattern items. */
639 struct parse_item_priv {
640 enum rte_flow_item_type type; /**< Item type. */
641 uint32_t size; /**< Size of item specification structure. */
644 #define PRIV_ITEM(t, s) \
645 (&(const struct parse_item_priv){ \
646 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
650 /** Private data for actions. */
651 struct parse_action_priv {
652 enum rte_flow_action_type type; /**< Action type. */
653 uint32_t size; /**< Size of action configuration structure. */
656 #define PRIV_ACTION(t, s) \
657 (&(const struct parse_action_priv){ \
658 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
662 static const enum index next_vc_attr[] = {
672 static const enum index next_destroy_attr[] = {
678 static const enum index next_list_attr[] = {
684 static const enum index item_param[] = {
693 static const enum index next_item[] = {
729 ITEM_ICMP6_ND_OPT_SLA_ETH,
730 ITEM_ICMP6_ND_OPT_TLA_ETH,
742 static const enum index item_fuzzy[] = {
748 static const enum index item_any[] = {
754 static const enum index item_vf[] = {
760 static const enum index item_phy_port[] = {
766 static const enum index item_port_id[] = {
772 static const enum index item_mark[] = {
778 static const enum index item_raw[] = {
788 static const enum index item_eth[] = {
796 static const enum index item_vlan[] = {
801 ITEM_VLAN_INNER_TYPE,
806 static const enum index item_ipv4[] = {
816 static const enum index item_ipv6[] = {
827 static const enum index item_icmp[] = {
834 static const enum index item_udp[] = {
841 static const enum index item_tcp[] = {
849 static const enum index item_sctp[] = {
858 static const enum index item_vxlan[] = {
864 static const enum index item_e_tag[] = {
865 ITEM_E_TAG_GRP_ECID_B,
870 static const enum index item_nvgre[] = {
876 static const enum index item_mpls[] = {
884 static const enum index item_gre[] = {
886 ITEM_GRE_C_RSVD0_VER,
894 static const enum index item_gre_key[] = {
900 static const enum index item_gtp[] = {
906 static const enum index item_geneve[] = {
913 static const enum index item_vxlan_gpe[] = {
919 static const enum index item_arp_eth_ipv4[] = {
920 ITEM_ARP_ETH_IPV4_SHA,
921 ITEM_ARP_ETH_IPV4_SPA,
922 ITEM_ARP_ETH_IPV4_THA,
923 ITEM_ARP_ETH_IPV4_TPA,
928 static const enum index item_ipv6_ext[] = {
929 ITEM_IPV6_EXT_NEXT_HDR,
934 static const enum index item_icmp6[] = {
941 static const enum index item_icmp6_nd_ns[] = {
942 ITEM_ICMP6_ND_NS_TARGET_ADDR,
947 static const enum index item_icmp6_nd_na[] = {
948 ITEM_ICMP6_ND_NA_TARGET_ADDR,
953 static const enum index item_icmp6_nd_opt[] = {
954 ITEM_ICMP6_ND_OPT_TYPE,
959 static const enum index item_icmp6_nd_opt_sla_eth[] = {
960 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
965 static const enum index item_icmp6_nd_opt_tla_eth[] = {
966 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
971 static const enum index item_meta[] = {
977 static const enum index item_gtp_psc[] = {
984 static const enum index item_pppoed[] = {
990 static const enum index item_pppoes[] = {
996 static const enum index item_pppoe_proto_id[] = {
1002 static const enum index item_higig2[] = {
1003 ITEM_HIGIG2_CLASSIFICATION,
1009 static const enum index next_set_raw[] = {
1015 static const enum index next_action[] = {
1031 ACTION_OF_SET_MPLS_TTL,
1032 ACTION_OF_DEC_MPLS_TTL,
1033 ACTION_OF_SET_NW_TTL,
1034 ACTION_OF_DEC_NW_TTL,
1035 ACTION_OF_COPY_TTL_OUT,
1036 ACTION_OF_COPY_TTL_IN,
1038 ACTION_OF_PUSH_VLAN,
1039 ACTION_OF_SET_VLAN_VID,
1040 ACTION_OF_SET_VLAN_PCP,
1042 ACTION_OF_PUSH_MPLS,
1049 ACTION_MPLSOGRE_ENCAP,
1050 ACTION_MPLSOGRE_DECAP,
1051 ACTION_MPLSOUDP_ENCAP,
1052 ACTION_MPLSOUDP_DECAP,
1053 ACTION_SET_IPV4_SRC,
1054 ACTION_SET_IPV4_DST,
1055 ACTION_SET_IPV6_SRC,
1056 ACTION_SET_IPV6_DST,
1073 static const enum index action_mark[] = {
1079 static const enum index action_queue[] = {
1085 static const enum index action_count[] = {
1087 ACTION_COUNT_SHARED,
1092 static const enum index action_rss[] = {
1103 static const enum index action_vf[] = {
1110 static const enum index action_phy_port[] = {
1111 ACTION_PHY_PORT_ORIGINAL,
1112 ACTION_PHY_PORT_INDEX,
1117 static const enum index action_port_id[] = {
1118 ACTION_PORT_ID_ORIGINAL,
1124 static const enum index action_meter[] = {
1130 static const enum index action_of_set_mpls_ttl[] = {
1131 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
1136 static const enum index action_of_set_nw_ttl[] = {
1137 ACTION_OF_SET_NW_TTL_NW_TTL,
1142 static const enum index action_of_push_vlan[] = {
1143 ACTION_OF_PUSH_VLAN_ETHERTYPE,
1148 static const enum index action_of_set_vlan_vid[] = {
1149 ACTION_OF_SET_VLAN_VID_VLAN_VID,
1154 static const enum index action_of_set_vlan_pcp[] = {
1155 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
1160 static const enum index action_of_pop_mpls[] = {
1161 ACTION_OF_POP_MPLS_ETHERTYPE,
1166 static const enum index action_of_push_mpls[] = {
1167 ACTION_OF_PUSH_MPLS_ETHERTYPE,
1172 static const enum index action_set_ipv4_src[] = {
1173 ACTION_SET_IPV4_SRC_IPV4_SRC,
1178 static const enum index action_set_mac_src[] = {
1179 ACTION_SET_MAC_SRC_MAC_SRC,
1184 static const enum index action_set_ipv4_dst[] = {
1185 ACTION_SET_IPV4_DST_IPV4_DST,
1190 static const enum index action_set_ipv6_src[] = {
1191 ACTION_SET_IPV6_SRC_IPV6_SRC,
1196 static const enum index action_set_ipv6_dst[] = {
1197 ACTION_SET_IPV6_DST_IPV6_DST,
1202 static const enum index action_set_tp_src[] = {
1203 ACTION_SET_TP_SRC_TP_SRC,
1208 static const enum index action_set_tp_dst[] = {
1209 ACTION_SET_TP_DST_TP_DST,
1214 static const enum index action_set_ttl[] = {
1220 static const enum index action_jump[] = {
1226 static const enum index action_set_mac_dst[] = {
1227 ACTION_SET_MAC_DST_MAC_DST,
1232 static const enum index action_inc_tcp_seq[] = {
1233 ACTION_INC_TCP_SEQ_VALUE,
1238 static const enum index action_dec_tcp_seq[] = {
1239 ACTION_DEC_TCP_SEQ_VALUE,
1244 static const enum index action_inc_tcp_ack[] = {
1245 ACTION_INC_TCP_ACK_VALUE,
1250 static const enum index action_dec_tcp_ack[] = {
1251 ACTION_DEC_TCP_ACK_VALUE,
1256 static const enum index action_raw_encap[] = {
1257 ACTION_RAW_ENCAP_INDEX,
1262 static const enum index action_raw_decap[] = {
1263 ACTION_RAW_DECAP_INDEX,
1268 static int parse_set_raw_encap_decap(struct context *, const struct token *,
1269 const char *, unsigned int,
1270 void *, unsigned int);
1271 static int parse_set_init(struct context *, const struct token *,
1272 const char *, unsigned int,
1273 void *, unsigned int);
1274 static int parse_init(struct context *, const struct token *,
1275 const char *, unsigned int,
1276 void *, unsigned int);
1277 static int parse_vc(struct context *, const struct token *,
1278 const char *, unsigned int,
1279 void *, unsigned int);
1280 static int parse_vc_spec(struct context *, const struct token *,
1281 const char *, unsigned int, void *, unsigned int);
1282 static int parse_vc_conf(struct context *, const struct token *,
1283 const char *, unsigned int, void *, unsigned int);
1284 static int parse_vc_action_rss(struct context *, const struct token *,
1285 const char *, unsigned int, void *,
1287 static int parse_vc_action_rss_func(struct context *, const struct token *,
1288 const char *, unsigned int, void *,
1290 static int parse_vc_action_rss_type(struct context *, const struct token *,
1291 const char *, unsigned int, void *,
1293 static int parse_vc_action_rss_queue(struct context *, const struct token *,
1294 const char *, unsigned int, void *,
1296 static int parse_vc_action_vxlan_encap(struct context *, const struct token *,
1297 const char *, unsigned int, void *,
1299 static int parse_vc_action_nvgre_encap(struct context *, const struct token *,
1300 const char *, unsigned int, void *,
1302 static int parse_vc_action_l2_encap(struct context *, const struct token *,
1303 const char *, unsigned int, void *,
1305 static int parse_vc_action_l2_decap(struct context *, const struct token *,
1306 const char *, unsigned int, void *,
1308 static int parse_vc_action_mplsogre_encap(struct context *,
1309 const struct token *, const char *,
1310 unsigned int, void *, unsigned int);
1311 static int parse_vc_action_mplsogre_decap(struct context *,
1312 const struct token *, const char *,
1313 unsigned int, void *, unsigned int);
1314 static int parse_vc_action_mplsoudp_encap(struct context *,
1315 const struct token *, const char *,
1316 unsigned int, void *, unsigned int);
1317 static int parse_vc_action_mplsoudp_decap(struct context *,
1318 const struct token *, const char *,
1319 unsigned int, void *, unsigned int);
1320 static int parse_vc_action_raw_encap(struct context *,
1321 const struct token *, const char *,
1322 unsigned int, void *, unsigned int);
1323 static int parse_vc_action_raw_decap(struct context *,
1324 const struct token *, const char *,
1325 unsigned int, void *, unsigned int);
1326 static int parse_vc_action_raw_encap_index(struct context *,
1327 const struct token *, const char *,
1328 unsigned int, void *, unsigned int);
1329 static int parse_vc_action_raw_decap_index(struct context *,
1330 const struct token *, const char *,
1331 unsigned int, void *, unsigned int);
1332 static int parse_destroy(struct context *, const struct token *,
1333 const char *, unsigned int,
1334 void *, unsigned int);
1335 static int parse_flush(struct context *, const struct token *,
1336 const char *, unsigned int,
1337 void *, unsigned int);
1338 static int parse_query(struct context *, const struct token *,
1339 const char *, unsigned int,
1340 void *, unsigned int);
1341 static int parse_action(struct context *, const struct token *,
1342 const char *, unsigned int,
1343 void *, unsigned int);
1344 static int parse_list(struct context *, const struct token *,
1345 const char *, unsigned int,
1346 void *, unsigned int);
1347 static int parse_isolate(struct context *, const struct token *,
1348 const char *, unsigned int,
1349 void *, unsigned int);
1350 static int parse_int(struct context *, const struct token *,
1351 const char *, unsigned int,
1352 void *, unsigned int);
1353 static int parse_prefix(struct context *, const struct token *,
1354 const char *, unsigned int,
1355 void *, unsigned int);
1356 static int parse_boolean(struct context *, const struct token *,
1357 const char *, unsigned int,
1358 void *, unsigned int);
1359 static int parse_string(struct context *, const struct token *,
1360 const char *, unsigned int,
1361 void *, unsigned int);
1362 static int parse_hex(struct context *ctx, const struct token *token,
1363 const char *str, unsigned int len,
1364 void *buf, unsigned int size);
1365 static int parse_mac_addr(struct context *, const struct token *,
1366 const char *, unsigned int,
1367 void *, unsigned int);
1368 static int parse_ipv4_addr(struct context *, const struct token *,
1369 const char *, unsigned int,
1370 void *, unsigned int);
1371 static int parse_ipv6_addr(struct context *, const struct token *,
1372 const char *, unsigned int,
1373 void *, unsigned int);
1374 static int parse_port(struct context *, const struct token *,
1375 const char *, unsigned int,
1376 void *, unsigned int);
1377 static int comp_none(struct context *, const struct token *,
1378 unsigned int, char *, unsigned int);
1379 static int comp_boolean(struct context *, const struct token *,
1380 unsigned int, char *, unsigned int);
1381 static int comp_action(struct context *, const struct token *,
1382 unsigned int, char *, unsigned int);
1383 static int comp_port(struct context *, const struct token *,
1384 unsigned int, char *, unsigned int);
1385 static int comp_rule_id(struct context *, const struct token *,
1386 unsigned int, char *, unsigned int);
1387 static int comp_vc_action_rss_type(struct context *, const struct token *,
1388 unsigned int, char *, unsigned int);
1389 static int comp_vc_action_rss_queue(struct context *, const struct token *,
1390 unsigned int, char *, unsigned int);
1391 static int comp_set_raw_index(struct context *, const struct token *,
1392 unsigned int, char *, unsigned int);
1394 /** Token definitions. */
1395 static const struct token token_list[] = {
1396 /* Special tokens. */
1399 .help = "null entry, abused as the entry point",
1400 .next = NEXT(NEXT_ENTRY(FLOW)),
1405 .help = "command may end here",
1408 .name = "START_SET",
1409 .help = "null entry, abused as the entry point for set",
1410 .next = NEXT(NEXT_ENTRY(SET)),
1415 .help = "set command may end here",
1417 /* Common tokens. */
1421 .help = "integer value",
1426 .name = "{unsigned}",
1428 .help = "unsigned integer value",
1435 .help = "prefix length for bit-mask",
1436 .call = parse_prefix,
1440 .name = "{boolean}",
1442 .help = "any boolean value",
1443 .call = parse_boolean,
1444 .comp = comp_boolean,
1449 .help = "fixed string",
1450 .call = parse_string,
1456 .help = "fixed string",
1461 .name = "{MAC address}",
1463 .help = "standard MAC address notation",
1464 .call = parse_mac_addr,
1468 .name = "{IPv4 address}",
1469 .type = "IPV4 ADDRESS",
1470 .help = "standard IPv4 address notation",
1471 .call = parse_ipv4_addr,
1475 .name = "{IPv6 address}",
1476 .type = "IPV6 ADDRESS",
1477 .help = "standard IPv6 address notation",
1478 .call = parse_ipv6_addr,
1482 .name = "{rule id}",
1484 .help = "rule identifier",
1486 .comp = comp_rule_id,
1489 .name = "{port_id}",
1491 .help = "port identifier",
1496 .name = "{group_id}",
1498 .help = "group identifier",
1502 [PRIORITY_LEVEL] = {
1505 .help = "priority level",
1509 /* Top-level command. */
1512 .type = "{command} {port_id} [{arg} [...]]",
1513 .help = "manage ingress/egress flow rules",
1514 .next = NEXT(NEXT_ENTRY
1524 /* Sub-level commands. */
1527 .help = "check whether a flow rule can be created",
1528 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1529 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1534 .help = "create a flow rule",
1535 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1536 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1541 .help = "destroy specific flow rules",
1542 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
1543 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1544 .call = parse_destroy,
1548 .help = "destroy all flow rules",
1549 .next = NEXT(NEXT_ENTRY(PORT_ID)),
1550 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1551 .call = parse_flush,
1555 .help = "query an existing flow rule",
1556 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
1557 NEXT_ENTRY(RULE_ID),
1558 NEXT_ENTRY(PORT_ID)),
1559 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action.type),
1560 ARGS_ENTRY(struct buffer, args.query.rule),
1561 ARGS_ENTRY(struct buffer, port)),
1562 .call = parse_query,
1566 .help = "list existing flow rules",
1567 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
1568 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1573 .help = "restrict ingress traffic to the defined flow rules",
1574 .next = NEXT(NEXT_ENTRY(BOOLEAN),
1575 NEXT_ENTRY(PORT_ID)),
1576 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
1577 ARGS_ENTRY(struct buffer, port)),
1578 .call = parse_isolate,
1580 /* Destroy arguments. */
1583 .help = "specify a rule identifier",
1584 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
1585 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
1586 .call = parse_destroy,
1588 /* Query arguments. */
1592 .help = "action to query, must be part of the rule",
1593 .call = parse_action,
1594 .comp = comp_action,
1596 /* List arguments. */
1599 .help = "specify a group",
1600 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
1601 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
1604 /* Validate/create attributes. */
1607 .help = "specify a group",
1608 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
1609 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
1614 .help = "specify a priority level",
1615 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
1616 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
1621 .help = "affect rule to ingress",
1622 .next = NEXT(next_vc_attr),
1627 .help = "affect rule to egress",
1628 .next = NEXT(next_vc_attr),
1633 .help = "apply rule directly to endpoints found in pattern",
1634 .next = NEXT(next_vc_attr),
1637 /* Validate/create pattern. */
1640 .help = "submit a list of pattern items",
1641 .next = NEXT(next_item),
1646 .help = "match value perfectly (with full bit-mask)",
1647 .call = parse_vc_spec,
1649 [ITEM_PARAM_SPEC] = {
1651 .help = "match value according to configured bit-mask",
1652 .call = parse_vc_spec,
1654 [ITEM_PARAM_LAST] = {
1656 .help = "specify upper bound to establish a range",
1657 .call = parse_vc_spec,
1659 [ITEM_PARAM_MASK] = {
1661 .help = "specify bit-mask with relevant bits set to one",
1662 .call = parse_vc_spec,
1664 [ITEM_PARAM_PREFIX] = {
1666 .help = "generate bit-mask from a prefix length",
1667 .call = parse_vc_spec,
1671 .help = "specify next pattern item",
1672 .next = NEXT(next_item),
1676 .help = "end list of pattern items",
1677 .priv = PRIV_ITEM(END, 0),
1678 .next = NEXT(NEXT_ENTRY(ACTIONS)),
1683 .help = "no-op pattern item",
1684 .priv = PRIV_ITEM(VOID, 0),
1685 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1690 .help = "perform actions when pattern does not match",
1691 .priv = PRIV_ITEM(INVERT, 0),
1692 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1697 .help = "match any protocol for the current layer",
1698 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
1699 .next = NEXT(item_any),
1704 .help = "number of layers covered",
1705 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
1706 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
1710 .help = "match traffic from/to the physical function",
1711 .priv = PRIV_ITEM(PF, 0),
1712 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1717 .help = "match traffic from/to a virtual function ID",
1718 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
1719 .next = NEXT(item_vf),
1725 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
1726 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
1730 .help = "match traffic from/to a specific physical port",
1731 .priv = PRIV_ITEM(PHY_PORT,
1732 sizeof(struct rte_flow_item_phy_port)),
1733 .next = NEXT(item_phy_port),
1736 [ITEM_PHY_PORT_INDEX] = {
1738 .help = "physical port index",
1739 .next = NEXT(item_phy_port, NEXT_ENTRY(UNSIGNED), item_param),
1740 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_phy_port, index)),
1744 .help = "match traffic from/to a given DPDK port ID",
1745 .priv = PRIV_ITEM(PORT_ID,
1746 sizeof(struct rte_flow_item_port_id)),
1747 .next = NEXT(item_port_id),
1750 [ITEM_PORT_ID_ID] = {
1752 .help = "DPDK port ID",
1753 .next = NEXT(item_port_id, NEXT_ENTRY(UNSIGNED), item_param),
1754 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port_id, id)),
1758 .help = "match traffic against value set in previously matched rule",
1759 .priv = PRIV_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
1760 .next = NEXT(item_mark),
1765 .help = "Integer value to match against",
1766 .next = NEXT(item_mark, NEXT_ENTRY(UNSIGNED), item_param),
1767 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_mark, id)),
1771 .help = "match an arbitrary byte string",
1772 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1773 .next = NEXT(item_raw),
1776 [ITEM_RAW_RELATIVE] = {
1778 .help = "look for pattern after the previous item",
1779 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1780 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1783 [ITEM_RAW_SEARCH] = {
1785 .help = "search pattern from offset (see also limit)",
1786 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1787 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1790 [ITEM_RAW_OFFSET] = {
1792 .help = "absolute or relative offset for pattern",
1793 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1794 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1796 [ITEM_RAW_LIMIT] = {
1798 .help = "search area limit for start of pattern",
1799 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1800 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1802 [ITEM_RAW_PATTERN] = {
1804 .help = "byte string to look for",
1805 .next = NEXT(item_raw,
1807 NEXT_ENTRY(ITEM_PARAM_IS,
1810 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, pattern),
1811 ARGS_ENTRY(struct rte_flow_item_raw, length),
1812 ARGS_ENTRY_ARB(sizeof(struct rte_flow_item_raw),
1813 ITEM_RAW_PATTERN_SIZE)),
1817 .help = "match Ethernet header",
1818 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1819 .next = NEXT(item_eth),
1824 .help = "destination MAC",
1825 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1826 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1830 .help = "source MAC",
1831 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1832 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1836 .help = "EtherType",
1837 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1838 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1842 .help = "match 802.1Q/ad VLAN tag",
1843 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1844 .next = NEXT(item_vlan),
1849 .help = "tag control information",
1850 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1851 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1855 .help = "priority code point",
1856 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1857 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1862 .help = "drop eligible indicator",
1863 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1864 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1869 .help = "VLAN identifier",
1870 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1871 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1874 [ITEM_VLAN_INNER_TYPE] = {
1875 .name = "inner_type",
1876 .help = "inner EtherType",
1877 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1878 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan,
1883 .help = "match IPv4 header",
1884 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1885 .next = NEXT(item_ipv4),
1890 .help = "type of service",
1891 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1892 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1893 hdr.type_of_service)),
1897 .help = "time to live",
1898 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1899 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1902 [ITEM_IPV4_PROTO] = {
1904 .help = "next protocol ID",
1905 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1906 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1907 hdr.next_proto_id)),
1911 .help = "source address",
1912 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1913 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1918 .help = "destination address",
1919 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1920 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1925 .help = "match IPv6 header",
1926 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1927 .next = NEXT(item_ipv6),
1932 .help = "traffic class",
1933 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1934 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1936 "\x0f\xf0\x00\x00")),
1938 [ITEM_IPV6_FLOW] = {
1940 .help = "flow label",
1941 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1942 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1944 "\x00\x0f\xff\xff")),
1946 [ITEM_IPV6_PROTO] = {
1948 .help = "protocol (next header)",
1949 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1950 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1955 .help = "hop limit",
1956 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1957 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1962 .help = "source address",
1963 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1964 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1969 .help = "destination address",
1970 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1971 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1976 .help = "match ICMP header",
1977 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1978 .next = NEXT(item_icmp),
1981 [ITEM_ICMP_TYPE] = {
1983 .help = "ICMP packet type",
1984 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1985 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1988 [ITEM_ICMP_CODE] = {
1990 .help = "ICMP packet code",
1991 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1992 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1997 .help = "match UDP header",
1998 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1999 .next = NEXT(item_udp),
2004 .help = "UDP source port",
2005 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
2006 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
2011 .help = "UDP destination port",
2012 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
2013 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
2018 .help = "match TCP header",
2019 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
2020 .next = NEXT(item_tcp),
2025 .help = "TCP source port",
2026 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
2027 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
2032 .help = "TCP destination port",
2033 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
2034 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
2037 [ITEM_TCP_FLAGS] = {
2039 .help = "TCP flags",
2040 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
2041 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
2046 .help = "match SCTP header",
2047 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
2048 .next = NEXT(item_sctp),
2053 .help = "SCTP source port",
2054 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
2055 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
2060 .help = "SCTP destination port",
2061 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
2062 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
2067 .help = "validation tag",
2068 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
2069 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
2072 [ITEM_SCTP_CKSUM] = {
2075 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
2076 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
2081 .help = "match VXLAN header",
2082 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
2083 .next = NEXT(item_vxlan),
2086 [ITEM_VXLAN_VNI] = {
2088 .help = "VXLAN identifier",
2089 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
2090 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
2094 .help = "match E-Tag header",
2095 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
2096 .next = NEXT(item_e_tag),
2099 [ITEM_E_TAG_GRP_ECID_B] = {
2100 .name = "grp_ecid_b",
2101 .help = "GRP and E-CID base",
2102 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
2103 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
2109 .help = "match NVGRE header",
2110 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
2111 .next = NEXT(item_nvgre),
2114 [ITEM_NVGRE_TNI] = {
2116 .help = "virtual subnet ID",
2117 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
2118 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
2122 .help = "match MPLS header",
2123 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
2124 .next = NEXT(item_mpls),
2127 [ITEM_MPLS_LABEL] = {
2129 .help = "MPLS label",
2130 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2131 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2137 .help = "MPLS Traffic Class",
2138 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2139 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2145 .help = "MPLS Bottom-of-Stack",
2146 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2147 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2153 .help = "match GRE header",
2154 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
2155 .next = NEXT(item_gre),
2158 [ITEM_GRE_PROTO] = {
2160 .help = "GRE protocol type",
2161 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
2162 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
2165 [ITEM_GRE_C_RSVD0_VER] = {
2166 .name = "c_rsvd0_ver",
2168 "checksum (1b), undefined (1b), key bit (1b),"
2169 " sequence number (1b), reserved 0 (9b),"
2171 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
2172 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
2175 [ITEM_GRE_C_BIT] = {
2177 .help = "checksum bit (C)",
2178 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2179 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2181 "\x80\x00\x00\x00")),
2183 [ITEM_GRE_S_BIT] = {
2185 .help = "sequence number bit (S)",
2186 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2187 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2189 "\x10\x00\x00\x00")),
2191 [ITEM_GRE_K_BIT] = {
2193 .help = "key bit (K)",
2194 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2195 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2197 "\x20\x00\x00\x00")),
2201 .help = "fuzzy pattern match, expect faster than default",
2202 .priv = PRIV_ITEM(FUZZY,
2203 sizeof(struct rte_flow_item_fuzzy)),
2204 .next = NEXT(item_fuzzy),
2207 [ITEM_FUZZY_THRESH] = {
2209 .help = "match accuracy threshold",
2210 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
2211 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
2216 .help = "match GTP header",
2217 .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
2218 .next = NEXT(item_gtp),
2223 .help = "tunnel endpoint identifier",
2224 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
2225 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
2229 .help = "match GTP header",
2230 .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
2231 .next = NEXT(item_gtp),
2236 .help = "match GTP header",
2237 .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
2238 .next = NEXT(item_gtp),
2243 .help = "match GENEVE header",
2244 .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
2245 .next = NEXT(item_geneve),
2248 [ITEM_GENEVE_VNI] = {
2250 .help = "virtual network identifier",
2251 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
2252 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)),
2254 [ITEM_GENEVE_PROTO] = {
2256 .help = "GENEVE protocol type",
2257 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
2258 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve,
2261 [ITEM_VXLAN_GPE] = {
2262 .name = "vxlan-gpe",
2263 .help = "match VXLAN-GPE header",
2264 .priv = PRIV_ITEM(VXLAN_GPE,
2265 sizeof(struct rte_flow_item_vxlan_gpe)),
2266 .next = NEXT(item_vxlan_gpe),
2269 [ITEM_VXLAN_GPE_VNI] = {
2271 .help = "VXLAN-GPE identifier",
2272 .next = NEXT(item_vxlan_gpe, NEXT_ENTRY(UNSIGNED), item_param),
2273 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan_gpe,
2276 [ITEM_ARP_ETH_IPV4] = {
2277 .name = "arp_eth_ipv4",
2278 .help = "match ARP header for Ethernet/IPv4",
2279 .priv = PRIV_ITEM(ARP_ETH_IPV4,
2280 sizeof(struct rte_flow_item_arp_eth_ipv4)),
2281 .next = NEXT(item_arp_eth_ipv4),
2284 [ITEM_ARP_ETH_IPV4_SHA] = {
2286 .help = "sender hardware address",
2287 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
2289 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2292 [ITEM_ARP_ETH_IPV4_SPA] = {
2294 .help = "sender IPv4 address",
2295 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
2297 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2300 [ITEM_ARP_ETH_IPV4_THA] = {
2302 .help = "target hardware address",
2303 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
2305 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2308 [ITEM_ARP_ETH_IPV4_TPA] = {
2310 .help = "target IPv4 address",
2311 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
2313 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2318 .help = "match presence of any IPv6 extension header",
2319 .priv = PRIV_ITEM(IPV6_EXT,
2320 sizeof(struct rte_flow_item_ipv6_ext)),
2321 .next = NEXT(item_ipv6_ext),
2324 [ITEM_IPV6_EXT_NEXT_HDR] = {
2326 .help = "next header",
2327 .next = NEXT(item_ipv6_ext, NEXT_ENTRY(UNSIGNED), item_param),
2328 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6_ext,
2333 .help = "match any ICMPv6 header",
2334 .priv = PRIV_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
2335 .next = NEXT(item_icmp6),
2338 [ITEM_ICMP6_TYPE] = {
2340 .help = "ICMPv6 type",
2341 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2342 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2345 [ITEM_ICMP6_CODE] = {
2347 .help = "ICMPv6 code",
2348 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2349 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2352 [ITEM_ICMP6_ND_NS] = {
2353 .name = "icmp6_nd_ns",
2354 .help = "match ICMPv6 neighbor discovery solicitation",
2355 .priv = PRIV_ITEM(ICMP6_ND_NS,
2356 sizeof(struct rte_flow_item_icmp6_nd_ns)),
2357 .next = NEXT(item_icmp6_nd_ns),
2360 [ITEM_ICMP6_ND_NS_TARGET_ADDR] = {
2361 .name = "target_addr",
2362 .help = "target address",
2363 .next = NEXT(item_icmp6_nd_ns, NEXT_ENTRY(IPV6_ADDR),
2365 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_ns,
2368 [ITEM_ICMP6_ND_NA] = {
2369 .name = "icmp6_nd_na",
2370 .help = "match ICMPv6 neighbor discovery advertisement",
2371 .priv = PRIV_ITEM(ICMP6_ND_NA,
2372 sizeof(struct rte_flow_item_icmp6_nd_na)),
2373 .next = NEXT(item_icmp6_nd_na),
2376 [ITEM_ICMP6_ND_NA_TARGET_ADDR] = {
2377 .name = "target_addr",
2378 .help = "target address",
2379 .next = NEXT(item_icmp6_nd_na, NEXT_ENTRY(IPV6_ADDR),
2381 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_na,
2384 [ITEM_ICMP6_ND_OPT] = {
2385 .name = "icmp6_nd_opt",
2386 .help = "match presence of any ICMPv6 neighbor discovery"
2388 .priv = PRIV_ITEM(ICMP6_ND_OPT,
2389 sizeof(struct rte_flow_item_icmp6_nd_opt)),
2390 .next = NEXT(item_icmp6_nd_opt),
2393 [ITEM_ICMP6_ND_OPT_TYPE] = {
2395 .help = "ND option type",
2396 .next = NEXT(item_icmp6_nd_opt, NEXT_ENTRY(UNSIGNED),
2398 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_opt,
2401 [ITEM_ICMP6_ND_OPT_SLA_ETH] = {
2402 .name = "icmp6_nd_opt_sla_eth",
2403 .help = "match ICMPv6 neighbor discovery source Ethernet"
2404 " link-layer address option",
2406 (ICMP6_ND_OPT_SLA_ETH,
2407 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
2408 .next = NEXT(item_icmp6_nd_opt_sla_eth),
2411 [ITEM_ICMP6_ND_OPT_SLA_ETH_SLA] = {
2413 .help = "source Ethernet LLA",
2414 .next = NEXT(item_icmp6_nd_opt_sla_eth, NEXT_ENTRY(MAC_ADDR),
2416 .args = ARGS(ARGS_ENTRY_HTON
2417 (struct rte_flow_item_icmp6_nd_opt_sla_eth, sla)),
2419 [ITEM_ICMP6_ND_OPT_TLA_ETH] = {
2420 .name = "icmp6_nd_opt_tla_eth",
2421 .help = "match ICMPv6 neighbor discovery target Ethernet"
2422 " link-layer address option",
2424 (ICMP6_ND_OPT_TLA_ETH,
2425 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
2426 .next = NEXT(item_icmp6_nd_opt_tla_eth),
2429 [ITEM_ICMP6_ND_OPT_TLA_ETH_TLA] = {
2431 .help = "target Ethernet LLA",
2432 .next = NEXT(item_icmp6_nd_opt_tla_eth, NEXT_ENTRY(MAC_ADDR),
2434 .args = ARGS(ARGS_ENTRY_HTON
2435 (struct rte_flow_item_icmp6_nd_opt_tla_eth, tla)),
2439 .help = "match metadata header",
2440 .priv = PRIV_ITEM(META, sizeof(struct rte_flow_item_meta)),
2441 .next = NEXT(item_meta),
2444 [ITEM_META_DATA] = {
2446 .help = "metadata value",
2447 .next = NEXT(item_meta, NEXT_ENTRY(UNSIGNED), item_param),
2448 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_meta,
2449 data, "\xff\xff\xff\xff")),
2453 .help = "match GRE key",
2454 .priv = PRIV_ITEM(GRE_KEY, sizeof(rte_be32_t)),
2455 .next = NEXT(item_gre_key),
2458 [ITEM_GRE_KEY_VALUE] = {
2460 .help = "key value",
2461 .next = NEXT(item_gre_key, NEXT_ENTRY(UNSIGNED), item_param),
2462 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
2466 .help = "match GTP extension header with type 0x85",
2467 .priv = PRIV_ITEM(GTP_PSC,
2468 sizeof(struct rte_flow_item_gtp_psc)),
2469 .next = NEXT(item_gtp_psc),
2472 [ITEM_GTP_PSC_QFI] = {
2474 .help = "QoS flow identifier",
2475 .next = NEXT(item_gtp_psc, NEXT_ENTRY(UNSIGNED), item_param),
2476 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp_psc,
2479 [ITEM_GTP_PSC_PDU_T] = {
2482 .next = NEXT(item_gtp_psc, NEXT_ENTRY(UNSIGNED), item_param),
2483 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp_psc,
2488 .help = "match PPPoE session header",
2489 .priv = PRIV_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
2490 .next = NEXT(item_pppoes),
2495 .help = "match PPPoE discovery header",
2496 .priv = PRIV_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
2497 .next = NEXT(item_pppoed),
2500 [ITEM_PPPOE_SEID] = {
2502 .help = "session identifier",
2503 .next = NEXT(item_pppoes, NEXT_ENTRY(UNSIGNED), item_param),
2504 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_pppoe,
2507 [ITEM_PPPOE_PROTO_ID] = {
2509 .help = "match PPPoE session protocol identifier",
2510 .priv = PRIV_ITEM(PPPOE_PROTO_ID,
2511 sizeof(struct rte_flow_item_pppoe_proto_id)),
2512 .next = NEXT(item_pppoe_proto_id),
2517 .help = "matches higig2 header",
2518 .priv = PRIV_ITEM(HIGIG2,
2519 sizeof(struct rte_flow_item_higig2_hdr)),
2520 .next = NEXT(item_higig2),
2523 [ITEM_HIGIG2_CLASSIFICATION] = {
2524 .name = "classification",
2525 .help = "matches classification of higig2 header",
2526 .next = NEXT(item_higig2, NEXT_ENTRY(UNSIGNED), item_param),
2527 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_higig2_hdr,
2528 hdr.ppt1.classification)),
2530 [ITEM_HIGIG2_VID] = {
2532 .help = "matches vid of higig2 header",
2533 .next = NEXT(item_higig2, NEXT_ENTRY(UNSIGNED), item_param),
2534 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_higig2_hdr,
2537 /* Validate/create actions. */
2540 .help = "submit a list of associated actions",
2541 .next = NEXT(next_action),
2546 .help = "specify next action",
2547 .next = NEXT(next_action),
2551 .help = "end list of actions",
2552 .priv = PRIV_ACTION(END, 0),
2557 .help = "no-op action",
2558 .priv = PRIV_ACTION(VOID, 0),
2559 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2562 [ACTION_PASSTHRU] = {
2564 .help = "let subsequent rule process matched packets",
2565 .priv = PRIV_ACTION(PASSTHRU, 0),
2566 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2571 .help = "redirect traffic to a given group",
2572 .priv = PRIV_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
2573 .next = NEXT(action_jump),
2576 [ACTION_JUMP_GROUP] = {
2578 .help = "group to redirect traffic to",
2579 .next = NEXT(action_jump, NEXT_ENTRY(UNSIGNED)),
2580 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_jump, group)),
2581 .call = parse_vc_conf,
2585 .help = "attach 32 bit value to packets",
2586 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
2587 .next = NEXT(action_mark),
2590 [ACTION_MARK_ID] = {
2592 .help = "32 bit value to return with packets",
2593 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
2594 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
2595 .call = parse_vc_conf,
2599 .help = "flag packets",
2600 .priv = PRIV_ACTION(FLAG, 0),
2601 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2606 .help = "assign packets to a given queue index",
2607 .priv = PRIV_ACTION(QUEUE,
2608 sizeof(struct rte_flow_action_queue)),
2609 .next = NEXT(action_queue),
2612 [ACTION_QUEUE_INDEX] = {
2614 .help = "queue index to use",
2615 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
2616 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
2617 .call = parse_vc_conf,
2621 .help = "drop packets (note: passthru has priority)",
2622 .priv = PRIV_ACTION(DROP, 0),
2623 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2628 .help = "enable counters for this rule",
2629 .priv = PRIV_ACTION(COUNT,
2630 sizeof(struct rte_flow_action_count)),
2631 .next = NEXT(action_count),
2634 [ACTION_COUNT_ID] = {
2635 .name = "identifier",
2636 .help = "counter identifier to use",
2637 .next = NEXT(action_count, NEXT_ENTRY(UNSIGNED)),
2638 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_count, id)),
2639 .call = parse_vc_conf,
2641 [ACTION_COUNT_SHARED] = {
2643 .help = "shared counter",
2644 .next = NEXT(action_count, NEXT_ENTRY(BOOLEAN)),
2645 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_count,
2647 .call = parse_vc_conf,
2651 .help = "spread packets among several queues",
2652 .priv = PRIV_ACTION(RSS, sizeof(struct action_rss_data)),
2653 .next = NEXT(action_rss),
2654 .call = parse_vc_action_rss,
2656 [ACTION_RSS_FUNC] = {
2658 .help = "RSS hash function to apply",
2659 .next = NEXT(action_rss,
2660 NEXT_ENTRY(ACTION_RSS_FUNC_DEFAULT,
2661 ACTION_RSS_FUNC_TOEPLITZ,
2662 ACTION_RSS_FUNC_SIMPLE_XOR,
2663 ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ)),
2665 [ACTION_RSS_FUNC_DEFAULT] = {
2667 .help = "default hash function",
2668 .call = parse_vc_action_rss_func,
2670 [ACTION_RSS_FUNC_TOEPLITZ] = {
2672 .help = "Toeplitz hash function",
2673 .call = parse_vc_action_rss_func,
2675 [ACTION_RSS_FUNC_SIMPLE_XOR] = {
2676 .name = "simple_xor",
2677 .help = "simple XOR hash function",
2678 .call = parse_vc_action_rss_func,
2680 [ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ] = {
2681 .name = "symmetric_toeplitz",
2682 .help = "Symmetric Toeplitz hash function",
2683 .call = parse_vc_action_rss_func,
2685 [ACTION_RSS_LEVEL] = {
2687 .help = "encapsulation level for \"types\"",
2688 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2689 .args = ARGS(ARGS_ENTRY_ARB
2690 (offsetof(struct action_rss_data, conf) +
2691 offsetof(struct rte_flow_action_rss, level),
2692 sizeof(((struct rte_flow_action_rss *)0)->
2695 [ACTION_RSS_TYPES] = {
2697 .help = "specific RSS hash types",
2698 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)),
2700 [ACTION_RSS_TYPE] = {
2702 .help = "RSS hash type",
2703 .call = parse_vc_action_rss_type,
2704 .comp = comp_vc_action_rss_type,
2706 [ACTION_RSS_KEY] = {
2708 .help = "RSS hash key",
2709 .next = NEXT(action_rss, NEXT_ENTRY(HEX)),
2710 .args = ARGS(ARGS_ENTRY_ARB(0, 0),
2712 (offsetof(struct action_rss_data, conf) +
2713 offsetof(struct rte_flow_action_rss, key_len),
2714 sizeof(((struct rte_flow_action_rss *)0)->
2716 ARGS_ENTRY(struct action_rss_data, key)),
2718 [ACTION_RSS_KEY_LEN] = {
2720 .help = "RSS hash key length in bytes",
2721 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2722 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
2723 (offsetof(struct action_rss_data, conf) +
2724 offsetof(struct rte_flow_action_rss, key_len),
2725 sizeof(((struct rte_flow_action_rss *)0)->
2728 RSS_HASH_KEY_LENGTH)),
2730 [ACTION_RSS_QUEUES] = {
2732 .help = "queue indices to use",
2733 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
2734 .call = parse_vc_conf,
2736 [ACTION_RSS_QUEUE] = {
2738 .help = "queue index",
2739 .call = parse_vc_action_rss_queue,
2740 .comp = comp_vc_action_rss_queue,
2744 .help = "direct traffic to physical function",
2745 .priv = PRIV_ACTION(PF, 0),
2746 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2751 .help = "direct traffic to a virtual function ID",
2752 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
2753 .next = NEXT(action_vf),
2756 [ACTION_VF_ORIGINAL] = {
2758 .help = "use original VF ID if possible",
2759 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
2760 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
2762 .call = parse_vc_conf,
2767 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
2768 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
2769 .call = parse_vc_conf,
2771 [ACTION_PHY_PORT] = {
2773 .help = "direct packets to physical port index",
2774 .priv = PRIV_ACTION(PHY_PORT,
2775 sizeof(struct rte_flow_action_phy_port)),
2776 .next = NEXT(action_phy_port),
2779 [ACTION_PHY_PORT_ORIGINAL] = {
2781 .help = "use original port index if possible",
2782 .next = NEXT(action_phy_port, NEXT_ENTRY(BOOLEAN)),
2783 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_phy_port,
2785 .call = parse_vc_conf,
2787 [ACTION_PHY_PORT_INDEX] = {
2789 .help = "physical port index",
2790 .next = NEXT(action_phy_port, NEXT_ENTRY(UNSIGNED)),
2791 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_phy_port,
2793 .call = parse_vc_conf,
2795 [ACTION_PORT_ID] = {
2797 .help = "direct matching traffic to a given DPDK port ID",
2798 .priv = PRIV_ACTION(PORT_ID,
2799 sizeof(struct rte_flow_action_port_id)),
2800 .next = NEXT(action_port_id),
2803 [ACTION_PORT_ID_ORIGINAL] = {
2805 .help = "use original DPDK port ID if possible",
2806 .next = NEXT(action_port_id, NEXT_ENTRY(BOOLEAN)),
2807 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_port_id,
2809 .call = parse_vc_conf,
2811 [ACTION_PORT_ID_ID] = {
2813 .help = "DPDK port ID",
2814 .next = NEXT(action_port_id, NEXT_ENTRY(UNSIGNED)),
2815 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_port_id, id)),
2816 .call = parse_vc_conf,
2820 .help = "meter the directed packets at given id",
2821 .priv = PRIV_ACTION(METER,
2822 sizeof(struct rte_flow_action_meter)),
2823 .next = NEXT(action_meter),
2826 [ACTION_METER_ID] = {
2828 .help = "meter id to use",
2829 .next = NEXT(action_meter, NEXT_ENTRY(UNSIGNED)),
2830 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
2831 .call = parse_vc_conf,
2833 [ACTION_OF_SET_MPLS_TTL] = {
2834 .name = "of_set_mpls_ttl",
2835 .help = "OpenFlow's OFPAT_SET_MPLS_TTL",
2838 sizeof(struct rte_flow_action_of_set_mpls_ttl)),
2839 .next = NEXT(action_of_set_mpls_ttl),
2842 [ACTION_OF_SET_MPLS_TTL_MPLS_TTL] = {
2845 .next = NEXT(action_of_set_mpls_ttl, NEXT_ENTRY(UNSIGNED)),
2846 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_mpls_ttl,
2848 .call = parse_vc_conf,
2850 [ACTION_OF_DEC_MPLS_TTL] = {
2851 .name = "of_dec_mpls_ttl",
2852 .help = "OpenFlow's OFPAT_DEC_MPLS_TTL",
2853 .priv = PRIV_ACTION(OF_DEC_MPLS_TTL, 0),
2854 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2857 [ACTION_OF_SET_NW_TTL] = {
2858 .name = "of_set_nw_ttl",
2859 .help = "OpenFlow's OFPAT_SET_NW_TTL",
2862 sizeof(struct rte_flow_action_of_set_nw_ttl)),
2863 .next = NEXT(action_of_set_nw_ttl),
2866 [ACTION_OF_SET_NW_TTL_NW_TTL] = {
2869 .next = NEXT(action_of_set_nw_ttl, NEXT_ENTRY(UNSIGNED)),
2870 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_nw_ttl,
2872 .call = parse_vc_conf,
2874 [ACTION_OF_DEC_NW_TTL] = {
2875 .name = "of_dec_nw_ttl",
2876 .help = "OpenFlow's OFPAT_DEC_NW_TTL",
2877 .priv = PRIV_ACTION(OF_DEC_NW_TTL, 0),
2878 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2881 [ACTION_OF_COPY_TTL_OUT] = {
2882 .name = "of_copy_ttl_out",
2883 .help = "OpenFlow's OFPAT_COPY_TTL_OUT",
2884 .priv = PRIV_ACTION(OF_COPY_TTL_OUT, 0),
2885 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2888 [ACTION_OF_COPY_TTL_IN] = {
2889 .name = "of_copy_ttl_in",
2890 .help = "OpenFlow's OFPAT_COPY_TTL_IN",
2891 .priv = PRIV_ACTION(OF_COPY_TTL_IN, 0),
2892 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2895 [ACTION_OF_POP_VLAN] = {
2896 .name = "of_pop_vlan",
2897 .help = "OpenFlow's OFPAT_POP_VLAN",
2898 .priv = PRIV_ACTION(OF_POP_VLAN, 0),
2899 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2902 [ACTION_OF_PUSH_VLAN] = {
2903 .name = "of_push_vlan",
2904 .help = "OpenFlow's OFPAT_PUSH_VLAN",
2907 sizeof(struct rte_flow_action_of_push_vlan)),
2908 .next = NEXT(action_of_push_vlan),
2911 [ACTION_OF_PUSH_VLAN_ETHERTYPE] = {
2912 .name = "ethertype",
2913 .help = "EtherType",
2914 .next = NEXT(action_of_push_vlan, NEXT_ENTRY(UNSIGNED)),
2915 .args = ARGS(ARGS_ENTRY_HTON
2916 (struct rte_flow_action_of_push_vlan,
2918 .call = parse_vc_conf,
2920 [ACTION_OF_SET_VLAN_VID] = {
2921 .name = "of_set_vlan_vid",
2922 .help = "OpenFlow's OFPAT_SET_VLAN_VID",
2925 sizeof(struct rte_flow_action_of_set_vlan_vid)),
2926 .next = NEXT(action_of_set_vlan_vid),
2929 [ACTION_OF_SET_VLAN_VID_VLAN_VID] = {
2932 .next = NEXT(action_of_set_vlan_vid, NEXT_ENTRY(UNSIGNED)),
2933 .args = ARGS(ARGS_ENTRY_HTON
2934 (struct rte_flow_action_of_set_vlan_vid,
2936 .call = parse_vc_conf,
2938 [ACTION_OF_SET_VLAN_PCP] = {
2939 .name = "of_set_vlan_pcp",
2940 .help = "OpenFlow's OFPAT_SET_VLAN_PCP",
2943 sizeof(struct rte_flow_action_of_set_vlan_pcp)),
2944 .next = NEXT(action_of_set_vlan_pcp),
2947 [ACTION_OF_SET_VLAN_PCP_VLAN_PCP] = {
2949 .help = "VLAN priority",
2950 .next = NEXT(action_of_set_vlan_pcp, NEXT_ENTRY(UNSIGNED)),
2951 .args = ARGS(ARGS_ENTRY_HTON
2952 (struct rte_flow_action_of_set_vlan_pcp,
2954 .call = parse_vc_conf,
2956 [ACTION_OF_POP_MPLS] = {
2957 .name = "of_pop_mpls",
2958 .help = "OpenFlow's OFPAT_POP_MPLS",
2959 .priv = PRIV_ACTION(OF_POP_MPLS,
2960 sizeof(struct rte_flow_action_of_pop_mpls)),
2961 .next = NEXT(action_of_pop_mpls),
2964 [ACTION_OF_POP_MPLS_ETHERTYPE] = {
2965 .name = "ethertype",
2966 .help = "EtherType",
2967 .next = NEXT(action_of_pop_mpls, NEXT_ENTRY(UNSIGNED)),
2968 .args = ARGS(ARGS_ENTRY_HTON
2969 (struct rte_flow_action_of_pop_mpls,
2971 .call = parse_vc_conf,
2973 [ACTION_OF_PUSH_MPLS] = {
2974 .name = "of_push_mpls",
2975 .help = "OpenFlow's OFPAT_PUSH_MPLS",
2978 sizeof(struct rte_flow_action_of_push_mpls)),
2979 .next = NEXT(action_of_push_mpls),
2982 [ACTION_OF_PUSH_MPLS_ETHERTYPE] = {
2983 .name = "ethertype",
2984 .help = "EtherType",
2985 .next = NEXT(action_of_push_mpls, NEXT_ENTRY(UNSIGNED)),
2986 .args = ARGS(ARGS_ENTRY_HTON
2987 (struct rte_flow_action_of_push_mpls,
2989 .call = parse_vc_conf,
2991 [ACTION_VXLAN_ENCAP] = {
2992 .name = "vxlan_encap",
2993 .help = "VXLAN encapsulation, uses configuration set by \"set"
2995 .priv = PRIV_ACTION(VXLAN_ENCAP,
2996 sizeof(struct action_vxlan_encap_data)),
2997 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2998 .call = parse_vc_action_vxlan_encap,
3000 [ACTION_VXLAN_DECAP] = {
3001 .name = "vxlan_decap",
3002 .help = "Performs a decapsulation action by stripping all"
3003 " headers of the VXLAN tunnel network overlay from the"
3005 .priv = PRIV_ACTION(VXLAN_DECAP, 0),
3006 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3009 [ACTION_NVGRE_ENCAP] = {
3010 .name = "nvgre_encap",
3011 .help = "NVGRE encapsulation, uses configuration set by \"set"
3013 .priv = PRIV_ACTION(NVGRE_ENCAP,
3014 sizeof(struct action_nvgre_encap_data)),
3015 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3016 .call = parse_vc_action_nvgre_encap,
3018 [ACTION_NVGRE_DECAP] = {
3019 .name = "nvgre_decap",
3020 .help = "Performs a decapsulation action by stripping all"
3021 " headers of the NVGRE tunnel network overlay from the"
3023 .priv = PRIV_ACTION(NVGRE_DECAP, 0),
3024 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3027 [ACTION_L2_ENCAP] = {
3029 .help = "l2 encap, uses configuration set by"
3030 " \"set l2_encap\"",
3031 .priv = PRIV_ACTION(RAW_ENCAP,
3032 sizeof(struct action_raw_encap_data)),
3033 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3034 .call = parse_vc_action_l2_encap,
3036 [ACTION_L2_DECAP] = {
3038 .help = "l2 decap, uses configuration set by"
3039 " \"set l2_decap\"",
3040 .priv = PRIV_ACTION(RAW_DECAP,
3041 sizeof(struct action_raw_decap_data)),
3042 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3043 .call = parse_vc_action_l2_decap,
3045 [ACTION_MPLSOGRE_ENCAP] = {
3046 .name = "mplsogre_encap",
3047 .help = "mplsogre encapsulation, uses configuration set by"
3048 " \"set mplsogre_encap\"",
3049 .priv = PRIV_ACTION(RAW_ENCAP,
3050 sizeof(struct action_raw_encap_data)),
3051 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3052 .call = parse_vc_action_mplsogre_encap,
3054 [ACTION_MPLSOGRE_DECAP] = {
3055 .name = "mplsogre_decap",
3056 .help = "mplsogre decapsulation, uses configuration set by"
3057 " \"set mplsogre_decap\"",
3058 .priv = PRIV_ACTION(RAW_DECAP,
3059 sizeof(struct action_raw_decap_data)),
3060 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3061 .call = parse_vc_action_mplsogre_decap,
3063 [ACTION_MPLSOUDP_ENCAP] = {
3064 .name = "mplsoudp_encap",
3065 .help = "mplsoudp encapsulation, uses configuration set by"
3066 " \"set mplsoudp_encap\"",
3067 .priv = PRIV_ACTION(RAW_ENCAP,
3068 sizeof(struct action_raw_encap_data)),
3069 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3070 .call = parse_vc_action_mplsoudp_encap,
3072 [ACTION_MPLSOUDP_DECAP] = {
3073 .name = "mplsoudp_decap",
3074 .help = "mplsoudp decapsulation, uses configuration set by"
3075 " \"set mplsoudp_decap\"",
3076 .priv = PRIV_ACTION(RAW_DECAP,
3077 sizeof(struct action_raw_decap_data)),
3078 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3079 .call = parse_vc_action_mplsoudp_decap,
3081 [ACTION_SET_IPV4_SRC] = {
3082 .name = "set_ipv4_src",
3083 .help = "Set a new IPv4 source address in the outermost"
3085 .priv = PRIV_ACTION(SET_IPV4_SRC,
3086 sizeof(struct rte_flow_action_set_ipv4)),
3087 .next = NEXT(action_set_ipv4_src),
3090 [ACTION_SET_IPV4_SRC_IPV4_SRC] = {
3091 .name = "ipv4_addr",
3092 .help = "new IPv4 source address to set",
3093 .next = NEXT(action_set_ipv4_src, NEXT_ENTRY(IPV4_ADDR)),
3094 .args = ARGS(ARGS_ENTRY_HTON
3095 (struct rte_flow_action_set_ipv4, ipv4_addr)),
3096 .call = parse_vc_conf,
3098 [ACTION_SET_IPV4_DST] = {
3099 .name = "set_ipv4_dst",
3100 .help = "Set a new IPv4 destination address in the outermost"
3102 .priv = PRIV_ACTION(SET_IPV4_DST,
3103 sizeof(struct rte_flow_action_set_ipv4)),
3104 .next = NEXT(action_set_ipv4_dst),
3107 [ACTION_SET_IPV4_DST_IPV4_DST] = {
3108 .name = "ipv4_addr",
3109 .help = "new IPv4 destination address to set",
3110 .next = NEXT(action_set_ipv4_dst, NEXT_ENTRY(IPV4_ADDR)),
3111 .args = ARGS(ARGS_ENTRY_HTON
3112 (struct rte_flow_action_set_ipv4, ipv4_addr)),
3113 .call = parse_vc_conf,
3115 [ACTION_SET_IPV6_SRC] = {
3116 .name = "set_ipv6_src",
3117 .help = "Set a new IPv6 source address in the outermost"
3119 .priv = PRIV_ACTION(SET_IPV6_SRC,
3120 sizeof(struct rte_flow_action_set_ipv6)),
3121 .next = NEXT(action_set_ipv6_src),
3124 [ACTION_SET_IPV6_SRC_IPV6_SRC] = {
3125 .name = "ipv6_addr",
3126 .help = "new IPv6 source address to set",
3127 .next = NEXT(action_set_ipv6_src, NEXT_ENTRY(IPV6_ADDR)),
3128 .args = ARGS(ARGS_ENTRY_HTON
3129 (struct rte_flow_action_set_ipv6, ipv6_addr)),
3130 .call = parse_vc_conf,
3132 [ACTION_SET_IPV6_DST] = {
3133 .name = "set_ipv6_dst",
3134 .help = "Set a new IPv6 destination address in the outermost"
3136 .priv = PRIV_ACTION(SET_IPV6_DST,
3137 sizeof(struct rte_flow_action_set_ipv6)),
3138 .next = NEXT(action_set_ipv6_dst),
3141 [ACTION_SET_IPV6_DST_IPV6_DST] = {
3142 .name = "ipv6_addr",
3143 .help = "new IPv6 destination address to set",
3144 .next = NEXT(action_set_ipv6_dst, NEXT_ENTRY(IPV6_ADDR)),
3145 .args = ARGS(ARGS_ENTRY_HTON
3146 (struct rte_flow_action_set_ipv6, ipv6_addr)),
3147 .call = parse_vc_conf,
3149 [ACTION_SET_TP_SRC] = {
3150 .name = "set_tp_src",
3151 .help = "set a new source port number in the outermost"
3153 .priv = PRIV_ACTION(SET_TP_SRC,
3154 sizeof(struct rte_flow_action_set_tp)),
3155 .next = NEXT(action_set_tp_src),
3158 [ACTION_SET_TP_SRC_TP_SRC] = {
3160 .help = "new source port number to set",
3161 .next = NEXT(action_set_tp_src, NEXT_ENTRY(UNSIGNED)),
3162 .args = ARGS(ARGS_ENTRY_HTON
3163 (struct rte_flow_action_set_tp, port)),
3164 .call = parse_vc_conf,
3166 [ACTION_SET_TP_DST] = {
3167 .name = "set_tp_dst",
3168 .help = "set a new destination port number in the outermost"
3170 .priv = PRIV_ACTION(SET_TP_DST,
3171 sizeof(struct rte_flow_action_set_tp)),
3172 .next = NEXT(action_set_tp_dst),
3175 [ACTION_SET_TP_DST_TP_DST] = {
3177 .help = "new destination port number to set",
3178 .next = NEXT(action_set_tp_dst, NEXT_ENTRY(UNSIGNED)),
3179 .args = ARGS(ARGS_ENTRY_HTON
3180 (struct rte_flow_action_set_tp, port)),
3181 .call = parse_vc_conf,
3183 [ACTION_MAC_SWAP] = {
3185 .help = "Swap the source and destination MAC addresses"
3186 " in the outermost Ethernet header",
3187 .priv = PRIV_ACTION(MAC_SWAP, 0),
3188 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3191 [ACTION_DEC_TTL] = {
3193 .help = "decrease network TTL if available",
3194 .priv = PRIV_ACTION(DEC_TTL, 0),
3195 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3198 [ACTION_SET_TTL] = {
3200 .help = "set ttl value",
3201 .priv = PRIV_ACTION(SET_TTL,
3202 sizeof(struct rte_flow_action_set_ttl)),
3203 .next = NEXT(action_set_ttl),
3206 [ACTION_SET_TTL_TTL] = {
3207 .name = "ttl_value",
3208 .help = "new ttl value to set",
3209 .next = NEXT(action_set_ttl, NEXT_ENTRY(UNSIGNED)),
3210 .args = ARGS(ARGS_ENTRY_HTON
3211 (struct rte_flow_action_set_ttl, ttl_value)),
3212 .call = parse_vc_conf,
3214 [ACTION_SET_MAC_SRC] = {
3215 .name = "set_mac_src",
3216 .help = "set source mac address",
3217 .priv = PRIV_ACTION(SET_MAC_SRC,
3218 sizeof(struct rte_flow_action_set_mac)),
3219 .next = NEXT(action_set_mac_src),
3222 [ACTION_SET_MAC_SRC_MAC_SRC] = {
3224 .help = "new source mac address",
3225 .next = NEXT(action_set_mac_src, NEXT_ENTRY(MAC_ADDR)),
3226 .args = ARGS(ARGS_ENTRY_HTON
3227 (struct rte_flow_action_set_mac, mac_addr)),
3228 .call = parse_vc_conf,
3230 [ACTION_SET_MAC_DST] = {
3231 .name = "set_mac_dst",
3232 .help = "set destination mac address",
3233 .priv = PRIV_ACTION(SET_MAC_DST,
3234 sizeof(struct rte_flow_action_set_mac)),
3235 .next = NEXT(action_set_mac_dst),
3238 [ACTION_SET_MAC_DST_MAC_DST] = {
3240 .help = "new destination mac address to set",
3241 .next = NEXT(action_set_mac_dst, NEXT_ENTRY(MAC_ADDR)),
3242 .args = ARGS(ARGS_ENTRY_HTON
3243 (struct rte_flow_action_set_mac, mac_addr)),
3244 .call = parse_vc_conf,
3246 [ACTION_INC_TCP_SEQ] = {
3247 .name = "inc_tcp_seq",
3248 .help = "increase TCP sequence number",
3249 .priv = PRIV_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
3250 .next = NEXT(action_inc_tcp_seq),
3253 [ACTION_INC_TCP_SEQ_VALUE] = {
3255 .help = "the value to increase TCP sequence number by",
3256 .next = NEXT(action_inc_tcp_seq, NEXT_ENTRY(UNSIGNED)),
3257 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3258 .call = parse_vc_conf,
3260 [ACTION_DEC_TCP_SEQ] = {
3261 .name = "dec_tcp_seq",
3262 .help = "decrease TCP sequence number",
3263 .priv = PRIV_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
3264 .next = NEXT(action_dec_tcp_seq),
3267 [ACTION_DEC_TCP_SEQ_VALUE] = {
3269 .help = "the value to decrease TCP sequence number by",
3270 .next = NEXT(action_dec_tcp_seq, NEXT_ENTRY(UNSIGNED)),
3271 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3272 .call = parse_vc_conf,
3274 [ACTION_INC_TCP_ACK] = {
3275 .name = "inc_tcp_ack",
3276 .help = "increase TCP acknowledgment number",
3277 .priv = PRIV_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
3278 .next = NEXT(action_inc_tcp_ack),
3281 [ACTION_INC_TCP_ACK_VALUE] = {
3283 .help = "the value to increase TCP acknowledgment number by",
3284 .next = NEXT(action_inc_tcp_ack, NEXT_ENTRY(UNSIGNED)),
3285 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3286 .call = parse_vc_conf,
3288 [ACTION_DEC_TCP_ACK] = {
3289 .name = "dec_tcp_ack",
3290 .help = "decrease TCP acknowledgment number",
3291 .priv = PRIV_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
3292 .next = NEXT(action_dec_tcp_ack),
3295 [ACTION_DEC_TCP_ACK_VALUE] = {
3297 .help = "the value to decrease TCP acknowledgment number by",
3298 .next = NEXT(action_dec_tcp_ack, NEXT_ENTRY(UNSIGNED)),
3299 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3300 .call = parse_vc_conf,
3302 [ACTION_RAW_ENCAP] = {
3303 .name = "raw_encap",
3304 .help = "encapsulation data, defined by set raw_encap",
3305 .priv = PRIV_ACTION(RAW_ENCAP,
3306 sizeof(struct action_raw_encap_data)),
3307 .next = NEXT(action_raw_encap),
3308 .call = parse_vc_action_raw_encap,
3310 [ACTION_RAW_ENCAP_INDEX] = {
3312 .help = "the index of raw_encap_confs",
3313 .next = NEXT(NEXT_ENTRY(ACTION_RAW_ENCAP_INDEX_VALUE)),
3315 [ACTION_RAW_ENCAP_INDEX_VALUE] = {
3318 .help = "unsigned integer value",
3319 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3320 .call = parse_vc_action_raw_encap_index,
3321 .comp = comp_set_raw_index,
3323 [ACTION_RAW_DECAP] = {
3324 .name = "raw_decap",
3325 .help = "decapsulation data, defined by set raw_encap",
3326 .priv = PRIV_ACTION(RAW_DECAP,
3327 sizeof(struct action_raw_decap_data)),
3328 .next = NEXT(action_raw_decap),
3329 .call = parse_vc_action_raw_decap,
3331 [ACTION_RAW_DECAP_INDEX] = {
3333 .help = "the index of raw_encap_confs",
3334 .next = NEXT(NEXT_ENTRY(ACTION_RAW_DECAP_INDEX_VALUE)),
3336 [ACTION_RAW_DECAP_INDEX_VALUE] = {
3339 .help = "unsigned integer value",
3340 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3341 .call = parse_vc_action_raw_decap_index,
3342 .comp = comp_set_raw_index,
3344 /* Top level command. */
3347 .help = "set raw encap/decap data",
3348 .type = "set raw_encap|raw_decap <index> <pattern>",
3349 .next = NEXT(NEXT_ENTRY
3352 .call = parse_set_init,
3354 /* Sub-level commands. */
3356 .name = "raw_encap",
3357 .help = "set raw encap data",
3358 .next = NEXT(next_set_raw),
3359 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
3360 (offsetof(struct buffer, port),
3361 sizeof(((struct buffer *)0)->port),
3362 0, RAW_ENCAP_CONFS_MAX_NUM - 1)),
3363 .call = parse_set_raw_encap_decap,
3366 .name = "raw_decap",
3367 .help = "set raw decap data",
3368 .next = NEXT(next_set_raw),
3369 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
3370 (offsetof(struct buffer, port),
3371 sizeof(((struct buffer *)0)->port),
3372 0, RAW_ENCAP_CONFS_MAX_NUM - 1)),
3373 .call = parse_set_raw_encap_decap,
3378 .help = "index of raw_encap/raw_decap data",
3379 .next = NEXT(next_item),
3384 /** Remove and return last entry from argument stack. */
3385 static const struct arg *
3386 pop_args(struct context *ctx)
3388 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
3391 /** Add entry on top of the argument stack. */
3393 push_args(struct context *ctx, const struct arg *arg)
3395 if (ctx->args_num == CTX_STACK_SIZE)
3397 ctx->args[ctx->args_num++] = arg;
3401 /** Spread value into buffer according to bit-mask. */
3403 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
3405 uint32_t i = arg->size;
3413 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3422 unsigned int shift = 0;
3423 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
3425 for (shift = 0; arg->mask[i] >> shift; ++shift) {
3426 if (!(arg->mask[i] & (1 << shift)))
3431 *buf &= ~(1 << shift);
3432 *buf |= (val & 1) << shift;
3440 /** Compare a string with a partial one of a given length. */
3442 strcmp_partial(const char *full, const char *partial, size_t partial_len)
3444 int r = strncmp(full, partial, partial_len);
3448 if (strlen(full) <= partial_len)
3450 return full[partial_len];
3454 * Parse a prefix length and generate a bit-mask.
3456 * Last argument (ctx->args) is retrieved to determine mask size, storage
3457 * location and whether the result must use network byte ordering.
3460 parse_prefix(struct context *ctx, const struct token *token,
3461 const char *str, unsigned int len,
3462 void *buf, unsigned int size)
3464 const struct arg *arg = pop_args(ctx);
3465 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
3472 /* Argument is expected. */
3476 u = strtoumax(str, &end, 0);
3477 if (errno || (size_t)(end - str) != len)
3482 extra = arg_entry_bf_fill(NULL, 0, arg);
3491 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
3492 !arg_entry_bf_fill(ctx->objmask, -1, arg))
3499 if (bytes > size || bytes + !!extra > size)
3503 buf = (uint8_t *)ctx->object + arg->offset;
3504 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3506 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
3507 memset(buf, 0x00, size - bytes);
3509 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
3513 memset(buf, 0xff, bytes);
3514 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
3516 ((uint8_t *)buf)[bytes] = conv[extra];
3519 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
3522 push_args(ctx, arg);
3526 /** Default parsing function for token name matching. */
3528 parse_default(struct context *ctx, const struct token *token,
3529 const char *str, unsigned int len,
3530 void *buf, unsigned int size)
3535 if (strcmp_partial(token->name, str, len))
3540 /** Parse flow command, initialize output buffer for subsequent tokens. */
3542 parse_init(struct context *ctx, const struct token *token,
3543 const char *str, unsigned int len,
3544 void *buf, unsigned int size)
3546 struct buffer *out = buf;
3548 /* Token name must match. */
3549 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3551 /* Nothing else to do if there is no buffer. */
3554 /* Make sure buffer is large enough. */
3555 if (size < sizeof(*out))
3557 /* Initialize buffer. */
3558 memset(out, 0x00, sizeof(*out));
3559 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
3562 ctx->objmask = NULL;
3566 /** Parse tokens for validate/create commands. */
3568 parse_vc(struct context *ctx, const struct token *token,
3569 const char *str, unsigned int len,
3570 void *buf, unsigned int size)
3572 struct buffer *out = buf;
3576 /* Token name must match. */
3577 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3579 /* Nothing else to do if there is no buffer. */
3582 if (!out->command) {
3583 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
3585 if (sizeof(*out) > size)
3587 out->command = ctx->curr;
3590 ctx->objmask = NULL;
3591 out->args.vc.data = (uint8_t *)out + size;
3595 ctx->object = &out->args.vc.attr;
3596 ctx->objmask = NULL;
3597 switch (ctx->curr) {
3602 out->args.vc.attr.ingress = 1;
3605 out->args.vc.attr.egress = 1;
3608 out->args.vc.attr.transfer = 1;
3611 out->args.vc.pattern =
3612 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
3614 ctx->object = out->args.vc.pattern;
3615 ctx->objmask = NULL;
3618 out->args.vc.actions =
3619 (void *)RTE_ALIGN_CEIL((uintptr_t)
3620 (out->args.vc.pattern +
3621 out->args.vc.pattern_n),
3623 ctx->object = out->args.vc.actions;
3624 ctx->objmask = NULL;
3631 if (!out->args.vc.actions) {
3632 const struct parse_item_priv *priv = token->priv;
3633 struct rte_flow_item *item =
3634 out->args.vc.pattern + out->args.vc.pattern_n;
3636 data_size = priv->size * 3; /* spec, last, mask */
3637 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
3638 (out->args.vc.data - data_size),
3640 if ((uint8_t *)item + sizeof(*item) > data)
3642 *item = (struct rte_flow_item){
3645 ++out->args.vc.pattern_n;
3647 ctx->objmask = NULL;
3649 const struct parse_action_priv *priv = token->priv;
3650 struct rte_flow_action *action =
3651 out->args.vc.actions + out->args.vc.actions_n;
3653 data_size = priv->size; /* configuration */
3654 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
3655 (out->args.vc.data - data_size),
3657 if ((uint8_t *)action + sizeof(*action) > data)
3659 *action = (struct rte_flow_action){
3661 .conf = data_size ? data : NULL,
3663 ++out->args.vc.actions_n;
3664 ctx->object = action;
3665 ctx->objmask = NULL;
3667 memset(data, 0, data_size);
3668 out->args.vc.data = data;
3669 ctx->objdata = data_size;
3673 /** Parse pattern item parameter type. */
3675 parse_vc_spec(struct context *ctx, const struct token *token,
3676 const char *str, unsigned int len,
3677 void *buf, unsigned int size)
3679 struct buffer *out = buf;
3680 struct rte_flow_item *item;
3686 /* Token name must match. */
3687 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3689 /* Parse parameter types. */
3690 switch (ctx->curr) {
3691 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
3697 case ITEM_PARAM_SPEC:
3700 case ITEM_PARAM_LAST:
3703 case ITEM_PARAM_PREFIX:
3704 /* Modify next token to expect a prefix. */
3705 if (ctx->next_num < 2)
3707 ctx->next[ctx->next_num - 2] = prefix;
3709 case ITEM_PARAM_MASK:
3715 /* Nothing else to do if there is no buffer. */
3718 if (!out->args.vc.pattern_n)
3720 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
3721 data_size = ctx->objdata / 3; /* spec, last, mask */
3722 /* Point to selected object. */
3723 ctx->object = out->args.vc.data + (data_size * index);
3725 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
3726 item->mask = ctx->objmask;
3728 ctx->objmask = NULL;
3729 /* Update relevant item pointer. */
3730 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
3735 /** Parse action configuration field. */
3737 parse_vc_conf(struct context *ctx, const struct token *token,
3738 const char *str, unsigned int len,
3739 void *buf, unsigned int size)
3741 struct buffer *out = buf;
3744 /* Token name must match. */
3745 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3747 /* Nothing else to do if there is no buffer. */
3750 /* Point to selected object. */
3751 ctx->object = out->args.vc.data;
3752 ctx->objmask = NULL;
3756 /** Parse RSS action. */
3758 parse_vc_action_rss(struct context *ctx, const struct token *token,
3759 const char *str, unsigned int len,
3760 void *buf, unsigned int size)
3762 struct buffer *out = buf;
3763 struct rte_flow_action *action;
3764 struct action_rss_data *action_rss_data;
3768 ret = parse_vc(ctx, token, str, len, buf, size);
3771 /* Nothing else to do if there is no buffer. */
3774 if (!out->args.vc.actions_n)
3776 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3777 /* Point to selected object. */
3778 ctx->object = out->args.vc.data;
3779 ctx->objmask = NULL;
3780 /* Set up default configuration. */
3781 action_rss_data = ctx->object;
3782 *action_rss_data = (struct action_rss_data){
3783 .conf = (struct rte_flow_action_rss){
3784 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
3787 .key_len = sizeof(action_rss_data->key),
3788 .queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
3789 .key = action_rss_data->key,
3790 .queue = action_rss_data->queue,
3792 .key = "testpmd's default RSS hash key, "
3793 "override it for better balancing",
3796 for (i = 0; i < action_rss_data->conf.queue_num; ++i)
3797 action_rss_data->queue[i] = i;
3798 if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
3799 ctx->port != (portid_t)RTE_PORT_ALL) {
3800 struct rte_eth_dev_info info;
3803 ret2 = rte_eth_dev_info_get(ctx->port, &info);
3807 action_rss_data->conf.key_len =
3808 RTE_MIN(sizeof(action_rss_data->key),
3809 info.hash_key_size);
3811 action->conf = &action_rss_data->conf;
3816 * Parse func field for RSS action.
3818 * The RTE_ETH_HASH_FUNCTION_* value to assign is derived from the
3819 * ACTION_RSS_FUNC_* index that called this function.
3822 parse_vc_action_rss_func(struct context *ctx, const struct token *token,
3823 const char *str, unsigned int len,
3824 void *buf, unsigned int size)
3826 struct action_rss_data *action_rss_data;
3827 enum rte_eth_hash_function func;
3831 /* Token name must match. */
3832 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3834 switch (ctx->curr) {
3835 case ACTION_RSS_FUNC_DEFAULT:
3836 func = RTE_ETH_HASH_FUNCTION_DEFAULT;
3838 case ACTION_RSS_FUNC_TOEPLITZ:
3839 func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
3841 case ACTION_RSS_FUNC_SIMPLE_XOR:
3842 func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
3844 case ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ:
3845 func = RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ;
3852 action_rss_data = ctx->object;
3853 action_rss_data->conf.func = func;
3858 * Parse type field for RSS action.
3860 * Valid tokens are type field names and the "end" token.
3863 parse_vc_action_rss_type(struct context *ctx, const struct token *token,
3864 const char *str, unsigned int len,
3865 void *buf, unsigned int size)
3867 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_TYPE);
3868 struct action_rss_data *action_rss_data;
3874 if (ctx->curr != ACTION_RSS_TYPE)
3876 if (!(ctx->objdata >> 16) && ctx->object) {
3877 action_rss_data = ctx->object;
3878 action_rss_data->conf.types = 0;
3880 if (!strcmp_partial("end", str, len)) {
3881 ctx->objdata &= 0xffff;
3884 for (i = 0; rss_type_table[i].str; ++i)
3885 if (!strcmp_partial(rss_type_table[i].str, str, len))
3887 if (!rss_type_table[i].str)
3889 ctx->objdata = 1 << 16 | (ctx->objdata & 0xffff);
3891 if (ctx->next_num == RTE_DIM(ctx->next))
3893 ctx->next[ctx->next_num++] = next;
3896 action_rss_data = ctx->object;
3897 action_rss_data->conf.types |= rss_type_table[i].rss_type;
3902 * Parse queue field for RSS action.
3904 * Valid tokens are queue indices and the "end" token.
3907 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
3908 const char *str, unsigned int len,
3909 void *buf, unsigned int size)
3911 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
3912 struct action_rss_data *action_rss_data;
3913 const struct arg *arg;
3920 if (ctx->curr != ACTION_RSS_QUEUE)
3922 i = ctx->objdata >> 16;
3923 if (!strcmp_partial("end", str, len)) {
3924 ctx->objdata &= 0xffff;
3927 if (i >= ACTION_RSS_QUEUE_NUM)
3929 arg = ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) +
3930 i * sizeof(action_rss_data->queue[i]),
3931 sizeof(action_rss_data->queue[i]));
3932 if (push_args(ctx, arg))
3934 ret = parse_int(ctx, token, str, len, NULL, 0);
3940 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
3942 if (ctx->next_num == RTE_DIM(ctx->next))
3944 ctx->next[ctx->next_num++] = next;
3948 action_rss_data = ctx->object;
3949 action_rss_data->conf.queue_num = i;
3950 action_rss_data->conf.queue = i ? action_rss_data->queue : NULL;
3954 /** Parse VXLAN encap action. */
3956 parse_vc_action_vxlan_encap(struct context *ctx, const struct token *token,
3957 const char *str, unsigned int len,
3958 void *buf, unsigned int size)
3960 struct buffer *out = buf;
3961 struct rte_flow_action *action;
3962 struct action_vxlan_encap_data *action_vxlan_encap_data;
3965 ret = parse_vc(ctx, token, str, len, buf, size);
3968 /* Nothing else to do if there is no buffer. */
3971 if (!out->args.vc.actions_n)
3973 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3974 /* Point to selected object. */
3975 ctx->object = out->args.vc.data;
3976 ctx->objmask = NULL;
3977 /* Set up default configuration. */
3978 action_vxlan_encap_data = ctx->object;
3979 *action_vxlan_encap_data = (struct action_vxlan_encap_data){
3980 .conf = (struct rte_flow_action_vxlan_encap){
3981 .definition = action_vxlan_encap_data->items,
3985 .type = RTE_FLOW_ITEM_TYPE_ETH,
3986 .spec = &action_vxlan_encap_data->item_eth,
3987 .mask = &rte_flow_item_eth_mask,
3990 .type = RTE_FLOW_ITEM_TYPE_VLAN,
3991 .spec = &action_vxlan_encap_data->item_vlan,
3992 .mask = &rte_flow_item_vlan_mask,
3995 .type = RTE_FLOW_ITEM_TYPE_IPV4,
3996 .spec = &action_vxlan_encap_data->item_ipv4,
3997 .mask = &rte_flow_item_ipv4_mask,
4000 .type = RTE_FLOW_ITEM_TYPE_UDP,
4001 .spec = &action_vxlan_encap_data->item_udp,
4002 .mask = &rte_flow_item_udp_mask,
4005 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
4006 .spec = &action_vxlan_encap_data->item_vxlan,
4007 .mask = &rte_flow_item_vxlan_mask,
4010 .type = RTE_FLOW_ITEM_TYPE_END,
4015 .tci = vxlan_encap_conf.vlan_tci,
4019 .src_addr = vxlan_encap_conf.ipv4_src,
4020 .dst_addr = vxlan_encap_conf.ipv4_dst,
4023 .src_port = vxlan_encap_conf.udp_src,
4024 .dst_port = vxlan_encap_conf.udp_dst,
4026 .item_vxlan.flags = 0,
4028 memcpy(action_vxlan_encap_data->item_eth.dst.addr_bytes,
4029 vxlan_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4030 memcpy(action_vxlan_encap_data->item_eth.src.addr_bytes,
4031 vxlan_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4032 if (!vxlan_encap_conf.select_ipv4) {
4033 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr,
4034 &vxlan_encap_conf.ipv6_src,
4035 sizeof(vxlan_encap_conf.ipv6_src));
4036 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.dst_addr,
4037 &vxlan_encap_conf.ipv6_dst,
4038 sizeof(vxlan_encap_conf.ipv6_dst));
4039 action_vxlan_encap_data->items[2] = (struct rte_flow_item){
4040 .type = RTE_FLOW_ITEM_TYPE_IPV6,
4041 .spec = &action_vxlan_encap_data->item_ipv6,
4042 .mask = &rte_flow_item_ipv6_mask,
4045 if (!vxlan_encap_conf.select_vlan)
4046 action_vxlan_encap_data->items[1].type =
4047 RTE_FLOW_ITEM_TYPE_VOID;
4048 if (vxlan_encap_conf.select_tos_ttl) {
4049 if (vxlan_encap_conf.select_ipv4) {
4050 static struct rte_flow_item_ipv4 ipv4_mask_tos;
4052 memcpy(&ipv4_mask_tos, &rte_flow_item_ipv4_mask,
4053 sizeof(ipv4_mask_tos));
4054 ipv4_mask_tos.hdr.type_of_service = 0xff;
4055 ipv4_mask_tos.hdr.time_to_live = 0xff;
4056 action_vxlan_encap_data->item_ipv4.hdr.type_of_service =
4057 vxlan_encap_conf.ip_tos;
4058 action_vxlan_encap_data->item_ipv4.hdr.time_to_live =
4059 vxlan_encap_conf.ip_ttl;
4060 action_vxlan_encap_data->items[2].mask =
4063 static struct rte_flow_item_ipv6 ipv6_mask_tos;
4065 memcpy(&ipv6_mask_tos, &rte_flow_item_ipv6_mask,
4066 sizeof(ipv6_mask_tos));
4067 ipv6_mask_tos.hdr.vtc_flow |=
4068 RTE_BE32(0xfful << RTE_IPV6_HDR_TC_SHIFT);
4069 ipv6_mask_tos.hdr.hop_limits = 0xff;
4070 action_vxlan_encap_data->item_ipv6.hdr.vtc_flow |=
4072 ((uint32_t)vxlan_encap_conf.ip_tos <<
4073 RTE_IPV6_HDR_TC_SHIFT);
4074 action_vxlan_encap_data->item_ipv6.hdr.hop_limits =
4075 vxlan_encap_conf.ip_ttl;
4076 action_vxlan_encap_data->items[2].mask =
4080 memcpy(action_vxlan_encap_data->item_vxlan.vni, vxlan_encap_conf.vni,
4081 RTE_DIM(vxlan_encap_conf.vni));
4082 action->conf = &action_vxlan_encap_data->conf;
4086 /** Parse NVGRE encap action. */
4088 parse_vc_action_nvgre_encap(struct context *ctx, const struct token *token,
4089 const char *str, unsigned int len,
4090 void *buf, unsigned int size)
4092 struct buffer *out = buf;
4093 struct rte_flow_action *action;
4094 struct action_nvgre_encap_data *action_nvgre_encap_data;
4097 ret = parse_vc(ctx, token, str, len, buf, size);
4100 /* Nothing else to do if there is no buffer. */
4103 if (!out->args.vc.actions_n)
4105 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4106 /* Point to selected object. */
4107 ctx->object = out->args.vc.data;
4108 ctx->objmask = NULL;
4109 /* Set up default configuration. */
4110 action_nvgre_encap_data = ctx->object;
4111 *action_nvgre_encap_data = (struct action_nvgre_encap_data){
4112 .conf = (struct rte_flow_action_nvgre_encap){
4113 .definition = action_nvgre_encap_data->items,
4117 .type = RTE_FLOW_ITEM_TYPE_ETH,
4118 .spec = &action_nvgre_encap_data->item_eth,
4119 .mask = &rte_flow_item_eth_mask,
4122 .type = RTE_FLOW_ITEM_TYPE_VLAN,
4123 .spec = &action_nvgre_encap_data->item_vlan,
4124 .mask = &rte_flow_item_vlan_mask,
4127 .type = RTE_FLOW_ITEM_TYPE_IPV4,
4128 .spec = &action_nvgre_encap_data->item_ipv4,
4129 .mask = &rte_flow_item_ipv4_mask,
4132 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
4133 .spec = &action_nvgre_encap_data->item_nvgre,
4134 .mask = &rte_flow_item_nvgre_mask,
4137 .type = RTE_FLOW_ITEM_TYPE_END,
4142 .tci = nvgre_encap_conf.vlan_tci,
4146 .src_addr = nvgre_encap_conf.ipv4_src,
4147 .dst_addr = nvgre_encap_conf.ipv4_dst,
4149 .item_nvgre.flow_id = 0,
4151 memcpy(action_nvgre_encap_data->item_eth.dst.addr_bytes,
4152 nvgre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4153 memcpy(action_nvgre_encap_data->item_eth.src.addr_bytes,
4154 nvgre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4155 if (!nvgre_encap_conf.select_ipv4) {
4156 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.src_addr,
4157 &nvgre_encap_conf.ipv6_src,
4158 sizeof(nvgre_encap_conf.ipv6_src));
4159 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.dst_addr,
4160 &nvgre_encap_conf.ipv6_dst,
4161 sizeof(nvgre_encap_conf.ipv6_dst));
4162 action_nvgre_encap_data->items[2] = (struct rte_flow_item){
4163 .type = RTE_FLOW_ITEM_TYPE_IPV6,
4164 .spec = &action_nvgre_encap_data->item_ipv6,
4165 .mask = &rte_flow_item_ipv6_mask,
4168 if (!nvgre_encap_conf.select_vlan)
4169 action_nvgre_encap_data->items[1].type =
4170 RTE_FLOW_ITEM_TYPE_VOID;
4171 memcpy(action_nvgre_encap_data->item_nvgre.tni, nvgre_encap_conf.tni,
4172 RTE_DIM(nvgre_encap_conf.tni));
4173 action->conf = &action_nvgre_encap_data->conf;
4177 /** Parse l2 encap action. */
4179 parse_vc_action_l2_encap(struct context *ctx, const struct token *token,
4180 const char *str, unsigned int len,
4181 void *buf, unsigned int size)
4183 struct buffer *out = buf;
4184 struct rte_flow_action *action;
4185 struct action_raw_encap_data *action_encap_data;
4186 struct rte_flow_item_eth eth = { .type = 0, };
4187 struct rte_flow_item_vlan vlan = {
4188 .tci = mplsoudp_encap_conf.vlan_tci,
4194 ret = parse_vc(ctx, token, str, len, buf, size);
4197 /* Nothing else to do if there is no buffer. */
4200 if (!out->args.vc.actions_n)
4202 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4203 /* Point to selected object. */
4204 ctx->object = out->args.vc.data;
4205 ctx->objmask = NULL;
4206 /* Copy the headers to the buffer. */
4207 action_encap_data = ctx->object;
4208 *action_encap_data = (struct action_raw_encap_data) {
4209 .conf = (struct rte_flow_action_raw_encap){
4210 .data = action_encap_data->data,
4214 header = action_encap_data->data;
4215 if (l2_encap_conf.select_vlan)
4216 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4217 else if (l2_encap_conf.select_ipv4)
4218 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4220 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4221 memcpy(eth.dst.addr_bytes,
4222 l2_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4223 memcpy(eth.src.addr_bytes,
4224 l2_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4225 memcpy(header, ð, sizeof(eth));
4226 header += sizeof(eth);
4227 if (l2_encap_conf.select_vlan) {
4228 if (l2_encap_conf.select_ipv4)
4229 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4231 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4232 memcpy(header, &vlan, sizeof(vlan));
4233 header += sizeof(vlan);
4235 action_encap_data->conf.size = header -
4236 action_encap_data->data;
4237 action->conf = &action_encap_data->conf;
4241 /** Parse l2 decap action. */
4243 parse_vc_action_l2_decap(struct context *ctx, const struct token *token,
4244 const char *str, unsigned int len,
4245 void *buf, unsigned int size)
4247 struct buffer *out = buf;
4248 struct rte_flow_action *action;
4249 struct action_raw_decap_data *action_decap_data;
4250 struct rte_flow_item_eth eth = { .type = 0, };
4251 struct rte_flow_item_vlan vlan = {
4252 .tci = mplsoudp_encap_conf.vlan_tci,
4258 ret = parse_vc(ctx, token, str, len, buf, size);
4261 /* Nothing else to do if there is no buffer. */
4264 if (!out->args.vc.actions_n)
4266 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4267 /* Point to selected object. */
4268 ctx->object = out->args.vc.data;
4269 ctx->objmask = NULL;
4270 /* Copy the headers to the buffer. */
4271 action_decap_data = ctx->object;
4272 *action_decap_data = (struct action_raw_decap_data) {
4273 .conf = (struct rte_flow_action_raw_decap){
4274 .data = action_decap_data->data,
4278 header = action_decap_data->data;
4279 if (l2_decap_conf.select_vlan)
4280 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4281 memcpy(header, ð, sizeof(eth));
4282 header += sizeof(eth);
4283 if (l2_decap_conf.select_vlan) {
4284 memcpy(header, &vlan, sizeof(vlan));
4285 header += sizeof(vlan);
4287 action_decap_data->conf.size = header -
4288 action_decap_data->data;
4289 action->conf = &action_decap_data->conf;
4293 #define ETHER_TYPE_MPLS_UNICAST 0x8847
4295 /** Parse MPLSOGRE encap action. */
4297 parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token,
4298 const char *str, unsigned int len,
4299 void *buf, unsigned int size)
4301 struct buffer *out = buf;
4302 struct rte_flow_action *action;
4303 struct action_raw_encap_data *action_encap_data;
4304 struct rte_flow_item_eth eth = { .type = 0, };
4305 struct rte_flow_item_vlan vlan = {
4306 .tci = mplsogre_encap_conf.vlan_tci,
4309 struct rte_flow_item_ipv4 ipv4 = {
4311 .src_addr = mplsogre_encap_conf.ipv4_src,
4312 .dst_addr = mplsogre_encap_conf.ipv4_dst,
4313 .next_proto_id = IPPROTO_GRE,
4314 .version_ihl = RTE_IPV4_VHL_DEF,
4315 .time_to_live = IPDEFTTL,
4318 struct rte_flow_item_ipv6 ipv6 = {
4320 .proto = IPPROTO_GRE,
4321 .hop_limits = IPDEFTTL,
4324 struct rte_flow_item_gre gre = {
4325 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
4327 struct rte_flow_item_mpls mpls;
4331 ret = parse_vc(ctx, token, str, len, buf, size);
4334 /* Nothing else to do if there is no buffer. */
4337 if (!out->args.vc.actions_n)
4339 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4340 /* Point to selected object. */
4341 ctx->object = out->args.vc.data;
4342 ctx->objmask = NULL;
4343 /* Copy the headers to the buffer. */
4344 action_encap_data = ctx->object;
4345 *action_encap_data = (struct action_raw_encap_data) {
4346 .conf = (struct rte_flow_action_raw_encap){
4347 .data = action_encap_data->data,
4352 header = action_encap_data->data;
4353 if (mplsogre_encap_conf.select_vlan)
4354 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4355 else if (mplsogre_encap_conf.select_ipv4)
4356 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4358 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4359 memcpy(eth.dst.addr_bytes,
4360 mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4361 memcpy(eth.src.addr_bytes,
4362 mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4363 memcpy(header, ð, sizeof(eth));
4364 header += sizeof(eth);
4365 if (mplsogre_encap_conf.select_vlan) {
4366 if (mplsogre_encap_conf.select_ipv4)
4367 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4369 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4370 memcpy(header, &vlan, sizeof(vlan));
4371 header += sizeof(vlan);
4373 if (mplsogre_encap_conf.select_ipv4) {
4374 memcpy(header, &ipv4, sizeof(ipv4));
4375 header += sizeof(ipv4);
4377 memcpy(&ipv6.hdr.src_addr,
4378 &mplsogre_encap_conf.ipv6_src,
4379 sizeof(mplsogre_encap_conf.ipv6_src));
4380 memcpy(&ipv6.hdr.dst_addr,
4381 &mplsogre_encap_conf.ipv6_dst,
4382 sizeof(mplsogre_encap_conf.ipv6_dst));
4383 memcpy(header, &ipv6, sizeof(ipv6));
4384 header += sizeof(ipv6);
4386 memcpy(header, &gre, sizeof(gre));
4387 header += sizeof(gre);
4388 memcpy(mpls.label_tc_s, mplsogre_encap_conf.label,
4389 RTE_DIM(mplsogre_encap_conf.label));
4390 mpls.label_tc_s[2] |= 0x1;
4391 memcpy(header, &mpls, sizeof(mpls));
4392 header += sizeof(mpls);
4393 action_encap_data->conf.size = header -
4394 action_encap_data->data;
4395 action->conf = &action_encap_data->conf;
4399 /** Parse MPLSOGRE decap action. */
4401 parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token,
4402 const char *str, unsigned int len,
4403 void *buf, unsigned int size)
4405 struct buffer *out = buf;
4406 struct rte_flow_action *action;
4407 struct action_raw_decap_data *action_decap_data;
4408 struct rte_flow_item_eth eth = { .type = 0, };
4409 struct rte_flow_item_vlan vlan = {.tci = 0};
4410 struct rte_flow_item_ipv4 ipv4 = {
4412 .next_proto_id = IPPROTO_GRE,
4415 struct rte_flow_item_ipv6 ipv6 = {
4417 .proto = IPPROTO_GRE,
4420 struct rte_flow_item_gre gre = {
4421 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
4423 struct rte_flow_item_mpls mpls;
4427 ret = parse_vc(ctx, token, str, len, buf, size);
4430 /* Nothing else to do if there is no buffer. */
4433 if (!out->args.vc.actions_n)
4435 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4436 /* Point to selected object. */
4437 ctx->object = out->args.vc.data;
4438 ctx->objmask = NULL;
4439 /* Copy the headers to the buffer. */
4440 action_decap_data = ctx->object;
4441 *action_decap_data = (struct action_raw_decap_data) {
4442 .conf = (struct rte_flow_action_raw_decap){
4443 .data = action_decap_data->data,
4447 header = action_decap_data->data;
4448 if (mplsogre_decap_conf.select_vlan)
4449 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4450 else if (mplsogre_encap_conf.select_ipv4)
4451 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4453 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4454 memcpy(eth.dst.addr_bytes,
4455 mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4456 memcpy(eth.src.addr_bytes,
4457 mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4458 memcpy(header, ð, sizeof(eth));
4459 header += sizeof(eth);
4460 if (mplsogre_encap_conf.select_vlan) {
4461 if (mplsogre_encap_conf.select_ipv4)
4462 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4464 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4465 memcpy(header, &vlan, sizeof(vlan));
4466 header += sizeof(vlan);
4468 if (mplsogre_encap_conf.select_ipv4) {
4469 memcpy(header, &ipv4, sizeof(ipv4));
4470 header += sizeof(ipv4);
4472 memcpy(header, &ipv6, sizeof(ipv6));
4473 header += sizeof(ipv6);
4475 memcpy(header, &gre, sizeof(gre));
4476 header += sizeof(gre);
4477 memset(&mpls, 0, sizeof(mpls));
4478 memcpy(header, &mpls, sizeof(mpls));
4479 header += sizeof(mpls);
4480 action_decap_data->conf.size = header -
4481 action_decap_data->data;
4482 action->conf = &action_decap_data->conf;
4486 /** Parse MPLSOUDP encap action. */
4488 parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token,
4489 const char *str, unsigned int len,
4490 void *buf, unsigned int size)
4492 struct buffer *out = buf;
4493 struct rte_flow_action *action;
4494 struct action_raw_encap_data *action_encap_data;
4495 struct rte_flow_item_eth eth = { .type = 0, };
4496 struct rte_flow_item_vlan vlan = {
4497 .tci = mplsoudp_encap_conf.vlan_tci,
4500 struct rte_flow_item_ipv4 ipv4 = {
4502 .src_addr = mplsoudp_encap_conf.ipv4_src,
4503 .dst_addr = mplsoudp_encap_conf.ipv4_dst,
4504 .next_proto_id = IPPROTO_UDP,
4505 .version_ihl = RTE_IPV4_VHL_DEF,
4506 .time_to_live = IPDEFTTL,
4509 struct rte_flow_item_ipv6 ipv6 = {
4511 .proto = IPPROTO_UDP,
4512 .hop_limits = IPDEFTTL,
4515 struct rte_flow_item_udp udp = {
4517 .src_port = mplsoudp_encap_conf.udp_src,
4518 .dst_port = mplsoudp_encap_conf.udp_dst,
4521 struct rte_flow_item_mpls mpls;
4525 ret = parse_vc(ctx, token, str, len, buf, size);
4528 /* Nothing else to do if there is no buffer. */
4531 if (!out->args.vc.actions_n)
4533 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4534 /* Point to selected object. */
4535 ctx->object = out->args.vc.data;
4536 ctx->objmask = NULL;
4537 /* Copy the headers to the buffer. */
4538 action_encap_data = ctx->object;
4539 *action_encap_data = (struct action_raw_encap_data) {
4540 .conf = (struct rte_flow_action_raw_encap){
4541 .data = action_encap_data->data,
4546 header = action_encap_data->data;
4547 if (mplsoudp_encap_conf.select_vlan)
4548 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4549 else if (mplsoudp_encap_conf.select_ipv4)
4550 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4552 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4553 memcpy(eth.dst.addr_bytes,
4554 mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4555 memcpy(eth.src.addr_bytes,
4556 mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4557 memcpy(header, ð, sizeof(eth));
4558 header += sizeof(eth);
4559 if (mplsoudp_encap_conf.select_vlan) {
4560 if (mplsoudp_encap_conf.select_ipv4)
4561 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4563 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4564 memcpy(header, &vlan, sizeof(vlan));
4565 header += sizeof(vlan);
4567 if (mplsoudp_encap_conf.select_ipv4) {
4568 memcpy(header, &ipv4, sizeof(ipv4));
4569 header += sizeof(ipv4);
4571 memcpy(&ipv6.hdr.src_addr,
4572 &mplsoudp_encap_conf.ipv6_src,
4573 sizeof(mplsoudp_encap_conf.ipv6_src));
4574 memcpy(&ipv6.hdr.dst_addr,
4575 &mplsoudp_encap_conf.ipv6_dst,
4576 sizeof(mplsoudp_encap_conf.ipv6_dst));
4577 memcpy(header, &ipv6, sizeof(ipv6));
4578 header += sizeof(ipv6);
4580 memcpy(header, &udp, sizeof(udp));
4581 header += sizeof(udp);
4582 memcpy(mpls.label_tc_s, mplsoudp_encap_conf.label,
4583 RTE_DIM(mplsoudp_encap_conf.label));
4584 mpls.label_tc_s[2] |= 0x1;
4585 memcpy(header, &mpls, sizeof(mpls));
4586 header += sizeof(mpls);
4587 action_encap_data->conf.size = header -
4588 action_encap_data->data;
4589 action->conf = &action_encap_data->conf;
4593 /** Parse MPLSOUDP decap action. */
4595 parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token,
4596 const char *str, unsigned int len,
4597 void *buf, unsigned int size)
4599 struct buffer *out = buf;
4600 struct rte_flow_action *action;
4601 struct action_raw_decap_data *action_decap_data;
4602 struct rte_flow_item_eth eth = { .type = 0, };
4603 struct rte_flow_item_vlan vlan = {.tci = 0};
4604 struct rte_flow_item_ipv4 ipv4 = {
4606 .next_proto_id = IPPROTO_UDP,
4609 struct rte_flow_item_ipv6 ipv6 = {
4611 .proto = IPPROTO_UDP,
4614 struct rte_flow_item_udp udp = {
4616 .dst_port = rte_cpu_to_be_16(6635),
4619 struct rte_flow_item_mpls mpls;
4623 ret = parse_vc(ctx, token, str, len, buf, size);
4626 /* Nothing else to do if there is no buffer. */
4629 if (!out->args.vc.actions_n)
4631 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4632 /* Point to selected object. */
4633 ctx->object = out->args.vc.data;
4634 ctx->objmask = NULL;
4635 /* Copy the headers to the buffer. */
4636 action_decap_data = ctx->object;
4637 *action_decap_data = (struct action_raw_decap_data) {
4638 .conf = (struct rte_flow_action_raw_decap){
4639 .data = action_decap_data->data,
4643 header = action_decap_data->data;
4644 if (mplsoudp_decap_conf.select_vlan)
4645 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4646 else if (mplsoudp_encap_conf.select_ipv4)
4647 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4649 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4650 memcpy(eth.dst.addr_bytes,
4651 mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4652 memcpy(eth.src.addr_bytes,
4653 mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4654 memcpy(header, ð, sizeof(eth));
4655 header += sizeof(eth);
4656 if (mplsoudp_encap_conf.select_vlan) {
4657 if (mplsoudp_encap_conf.select_ipv4)
4658 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4660 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4661 memcpy(header, &vlan, sizeof(vlan));
4662 header += sizeof(vlan);
4664 if (mplsoudp_encap_conf.select_ipv4) {
4665 memcpy(header, &ipv4, sizeof(ipv4));
4666 header += sizeof(ipv4);
4668 memcpy(header, &ipv6, sizeof(ipv6));
4669 header += sizeof(ipv6);
4671 memcpy(header, &udp, sizeof(udp));
4672 header += sizeof(udp);
4673 memset(&mpls, 0, sizeof(mpls));
4674 memcpy(header, &mpls, sizeof(mpls));
4675 header += sizeof(mpls);
4676 action_decap_data->conf.size = header -
4677 action_decap_data->data;
4678 action->conf = &action_decap_data->conf;
4683 parse_vc_action_raw_decap_index(struct context *ctx, const struct token *token,
4684 const char *str, unsigned int len, void *buf,
4687 struct action_raw_decap_data *action_raw_decap_data;
4688 struct rte_flow_action *action;
4689 const struct arg *arg;
4690 struct buffer *out = buf;
4694 RTE_SET_USED(token);
4697 arg = ARGS_ENTRY_ARB_BOUNDED
4698 (offsetof(struct action_raw_decap_data, idx),
4699 sizeof(((struct action_raw_decap_data *)0)->idx),
4700 0, RAW_ENCAP_CONFS_MAX_NUM - 1);
4701 if (push_args(ctx, arg))
4703 ret = parse_int(ctx, token, str, len, NULL, 0);
4710 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4711 action_raw_decap_data = ctx->object;
4712 idx = action_raw_decap_data->idx;
4713 action_raw_decap_data->conf.data = raw_decap_confs[idx].data;
4714 action_raw_decap_data->conf.size = raw_decap_confs[idx].size;
4715 action->conf = &action_raw_decap_data->conf;
4721 parse_vc_action_raw_encap_index(struct context *ctx, const struct token *token,
4722 const char *str, unsigned int len, void *buf,
4725 struct action_raw_encap_data *action_raw_encap_data;
4726 struct rte_flow_action *action;
4727 const struct arg *arg;
4728 struct buffer *out = buf;
4732 RTE_SET_USED(token);
4735 if (ctx->curr != ACTION_RAW_ENCAP_INDEX_VALUE)
4737 arg = ARGS_ENTRY_ARB_BOUNDED
4738 (offsetof(struct action_raw_encap_data, idx),
4739 sizeof(((struct action_raw_encap_data *)0)->idx),
4740 0, RAW_ENCAP_CONFS_MAX_NUM - 1);
4741 if (push_args(ctx, arg))
4743 ret = parse_int(ctx, token, str, len, NULL, 0);
4750 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4751 action_raw_encap_data = ctx->object;
4752 idx = action_raw_encap_data->idx;
4753 action_raw_encap_data->conf.data = raw_encap_confs[idx].data;
4754 action_raw_encap_data->conf.size = raw_encap_confs[idx].size;
4755 action_raw_encap_data->conf.preserve = NULL;
4756 action->conf = &action_raw_encap_data->conf;
4761 parse_vc_action_raw_encap(struct context *ctx, const struct token *token,
4762 const char *str, unsigned int len, void *buf,
4765 struct buffer *out = buf;
4766 struct rte_flow_action *action;
4767 struct action_raw_encap_data *action_raw_encap_data = NULL;
4770 ret = parse_vc(ctx, token, str, len, buf, size);
4773 /* Nothing else to do if there is no buffer. */
4776 if (!out->args.vc.actions_n)
4778 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4779 /* Point to selected object. */
4780 ctx->object = out->args.vc.data;
4781 ctx->objmask = NULL;
4782 /* Copy the headers to the buffer. */
4783 action_raw_encap_data = ctx->object;
4784 action_raw_encap_data->conf.data = raw_encap_confs[0].data;
4785 action_raw_encap_data->conf.preserve = NULL;
4786 action_raw_encap_data->conf.size = raw_encap_confs[0].size;
4787 action->conf = &action_raw_encap_data->conf;
4792 parse_vc_action_raw_decap(struct context *ctx, const struct token *token,
4793 const char *str, unsigned int len, void *buf,
4796 struct buffer *out = buf;
4797 struct rte_flow_action *action;
4798 struct action_raw_decap_data *action_raw_decap_data = NULL;
4801 ret = parse_vc(ctx, token, str, len, buf, size);
4804 /* Nothing else to do if there is no buffer. */
4807 if (!out->args.vc.actions_n)
4809 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4810 /* Point to selected object. */
4811 ctx->object = out->args.vc.data;
4812 ctx->objmask = NULL;
4813 /* Copy the headers to the buffer. */
4814 action_raw_decap_data = ctx->object;
4815 action_raw_decap_data->conf.data = raw_decap_confs[0].data;
4816 action_raw_decap_data->conf.size = raw_decap_confs[0].size;
4817 action->conf = &action_raw_decap_data->conf;
4821 /** Parse tokens for destroy command. */
4823 parse_destroy(struct context *ctx, const struct token *token,
4824 const char *str, unsigned int len,
4825 void *buf, unsigned int size)
4827 struct buffer *out = buf;
4829 /* Token name must match. */
4830 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4832 /* Nothing else to do if there is no buffer. */
4835 if (!out->command) {
4836 if (ctx->curr != DESTROY)
4838 if (sizeof(*out) > size)
4840 out->command = ctx->curr;
4843 ctx->objmask = NULL;
4844 out->args.destroy.rule =
4845 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
4849 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
4850 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
4853 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
4854 ctx->objmask = NULL;
4858 /** Parse tokens for flush command. */
4860 parse_flush(struct context *ctx, const struct token *token,
4861 const char *str, unsigned int len,
4862 void *buf, unsigned int size)
4864 struct buffer *out = buf;
4866 /* Token name must match. */
4867 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4869 /* Nothing else to do if there is no buffer. */
4872 if (!out->command) {
4873 if (ctx->curr != FLUSH)
4875 if (sizeof(*out) > size)
4877 out->command = ctx->curr;
4880 ctx->objmask = NULL;
4885 /** Parse tokens for query command. */
4887 parse_query(struct context *ctx, const struct token *token,
4888 const char *str, unsigned int len,
4889 void *buf, unsigned int size)
4891 struct buffer *out = buf;
4893 /* Token name must match. */
4894 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4896 /* Nothing else to do if there is no buffer. */
4899 if (!out->command) {
4900 if (ctx->curr != QUERY)
4902 if (sizeof(*out) > size)
4904 out->command = ctx->curr;
4907 ctx->objmask = NULL;
4912 /** Parse action names. */
4914 parse_action(struct context *ctx, const struct token *token,
4915 const char *str, unsigned int len,
4916 void *buf, unsigned int size)
4918 struct buffer *out = buf;
4919 const struct arg *arg = pop_args(ctx);
4923 /* Argument is expected. */
4926 /* Parse action name. */
4927 for (i = 0; next_action[i]; ++i) {
4928 const struct parse_action_priv *priv;
4930 token = &token_list[next_action[i]];
4931 if (strcmp_partial(token->name, str, len))
4937 memcpy((uint8_t *)ctx->object + arg->offset,
4943 push_args(ctx, arg);
4947 /** Parse tokens for list command. */
4949 parse_list(struct context *ctx, const struct token *token,
4950 const char *str, unsigned int len,
4951 void *buf, unsigned int size)
4953 struct buffer *out = buf;
4955 /* Token name must match. */
4956 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4958 /* Nothing else to do if there is no buffer. */
4961 if (!out->command) {
4962 if (ctx->curr != LIST)
4964 if (sizeof(*out) > size)
4966 out->command = ctx->curr;
4969 ctx->objmask = NULL;
4970 out->args.list.group =
4971 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
4975 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
4976 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
4979 ctx->object = out->args.list.group + out->args.list.group_n++;
4980 ctx->objmask = NULL;
4984 /** Parse tokens for isolate command. */
4986 parse_isolate(struct context *ctx, const struct token *token,
4987 const char *str, unsigned int len,
4988 void *buf, unsigned int size)
4990 struct buffer *out = buf;
4992 /* Token name must match. */
4993 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4995 /* Nothing else to do if there is no buffer. */
4998 if (!out->command) {
4999 if (ctx->curr != ISOLATE)
5001 if (sizeof(*out) > size)
5003 out->command = ctx->curr;
5006 ctx->objmask = NULL;
5012 * Parse signed/unsigned integers 8 to 64-bit long.
5014 * Last argument (ctx->args) is retrieved to determine integer type and
5018 parse_int(struct context *ctx, const struct token *token,
5019 const char *str, unsigned int len,
5020 void *buf, unsigned int size)
5022 const struct arg *arg = pop_args(ctx);
5027 /* Argument is expected. */
5032 (uintmax_t)strtoimax(str, &end, 0) :
5033 strtoumax(str, &end, 0);
5034 if (errno || (size_t)(end - str) != len)
5037 ((arg->sign && ((intmax_t)u < (intmax_t)arg->min ||
5038 (intmax_t)u > (intmax_t)arg->max)) ||
5039 (!arg->sign && (u < arg->min || u > arg->max))))
5044 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
5045 !arg_entry_bf_fill(ctx->objmask, -1, arg))
5049 buf = (uint8_t *)ctx->object + arg->offset;
5051 if (u > RTE_LEN2MASK(size * CHAR_BIT, uint64_t))
5055 case sizeof(uint8_t):
5056 *(uint8_t *)buf = u;
5058 case sizeof(uint16_t):
5059 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
5061 case sizeof(uint8_t [3]):
5062 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
5064 ((uint8_t *)buf)[0] = u;
5065 ((uint8_t *)buf)[1] = u >> 8;
5066 ((uint8_t *)buf)[2] = u >> 16;
5070 ((uint8_t *)buf)[0] = u >> 16;
5071 ((uint8_t *)buf)[1] = u >> 8;
5072 ((uint8_t *)buf)[2] = u;
5074 case sizeof(uint32_t):
5075 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
5077 case sizeof(uint64_t):
5078 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
5083 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
5085 buf = (uint8_t *)ctx->objmask + arg->offset;
5090 push_args(ctx, arg);
5097 * Three arguments (ctx->args) are retrieved from the stack to store data,
5098 * its actual length and address (in that order).
5101 parse_string(struct context *ctx, const struct token *token,
5102 const char *str, unsigned int len,
5103 void *buf, unsigned int size)
5105 const struct arg *arg_data = pop_args(ctx);
5106 const struct arg *arg_len = pop_args(ctx);
5107 const struct arg *arg_addr = pop_args(ctx);
5108 char tmp[16]; /* Ought to be enough. */
5111 /* Arguments are expected. */
5115 push_args(ctx, arg_data);
5119 push_args(ctx, arg_len);
5120 push_args(ctx, arg_data);
5123 size = arg_data->size;
5124 /* Bit-mask fill is not supported. */
5125 if (arg_data->mask || size < len)
5129 /* Let parse_int() fill length information first. */
5130 ret = snprintf(tmp, sizeof(tmp), "%u", len);
5133 push_args(ctx, arg_len);
5134 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
5139 buf = (uint8_t *)ctx->object + arg_data->offset;
5140 /* Output buffer is not necessarily NUL-terminated. */
5141 memcpy(buf, str, len);
5142 memset((uint8_t *)buf + len, 0x00, size - len);
5144 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
5145 /* Save address if requested. */
5146 if (arg_addr->size) {
5147 memcpy((uint8_t *)ctx->object + arg_addr->offset,
5149 (uint8_t *)ctx->object + arg_data->offset
5153 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
5155 (uint8_t *)ctx->objmask + arg_data->offset
5161 push_args(ctx, arg_addr);
5162 push_args(ctx, arg_len);
5163 push_args(ctx, arg_data);
5168 parse_hex_string(const char *src, uint8_t *dst, uint32_t *size)
5174 /* Check input parameters */
5175 if ((src == NULL) ||
5181 /* Convert chars to bytes */
5182 for (i = 0, len = 0; i < *size; i += 2) {
5183 snprintf(tmp, 3, "%s", src + i);
5184 dst[len++] = strtoul(tmp, &c, 16);
5199 parse_hex(struct context *ctx, const struct token *token,
5200 const char *str, unsigned int len,
5201 void *buf, unsigned int size)
5203 const struct arg *arg_data = pop_args(ctx);
5204 const struct arg *arg_len = pop_args(ctx);
5205 const struct arg *arg_addr = pop_args(ctx);
5206 char tmp[16]; /* Ought to be enough. */
5208 unsigned int hexlen = len;
5209 unsigned int length = 256;
5210 uint8_t hex_tmp[length];
5212 /* Arguments are expected. */
5216 push_args(ctx, arg_data);
5220 push_args(ctx, arg_len);
5221 push_args(ctx, arg_data);
5224 size = arg_data->size;
5225 /* Bit-mask fill is not supported. */
5231 /* translate bytes string to array. */
5232 if (str[0] == '0' && ((str[1] == 'x') ||
5237 if (hexlen > length)
5239 ret = parse_hex_string(str, hex_tmp, &hexlen);
5242 /* Let parse_int() fill length information first. */
5243 ret = snprintf(tmp, sizeof(tmp), "%u", hexlen);
5246 push_args(ctx, arg_len);
5247 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
5252 buf = (uint8_t *)ctx->object + arg_data->offset;
5253 /* Output buffer is not necessarily NUL-terminated. */
5254 memcpy(buf, hex_tmp, hexlen);
5255 memset((uint8_t *)buf + hexlen, 0x00, size - hexlen);
5257 memset((uint8_t *)ctx->objmask + arg_data->offset,
5259 /* Save address if requested. */
5260 if (arg_addr->size) {
5261 memcpy((uint8_t *)ctx->object + arg_addr->offset,
5263 (uint8_t *)ctx->object + arg_data->offset
5267 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
5269 (uint8_t *)ctx->objmask + arg_data->offset
5275 push_args(ctx, arg_addr);
5276 push_args(ctx, arg_len);
5277 push_args(ctx, arg_data);
5283 * Parse a MAC address.
5285 * Last argument (ctx->args) is retrieved to determine storage size and
5289 parse_mac_addr(struct context *ctx, const struct token *token,
5290 const char *str, unsigned int len,
5291 void *buf, unsigned int size)
5293 const struct arg *arg = pop_args(ctx);
5294 struct rte_ether_addr tmp;
5298 /* Argument is expected. */
5302 /* Bit-mask fill is not supported. */
5303 if (arg->mask || size != sizeof(tmp))
5305 /* Only network endian is supported. */
5308 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
5309 if (ret < 0 || (unsigned int)ret != len)
5313 buf = (uint8_t *)ctx->object + arg->offset;
5314 memcpy(buf, &tmp, size);
5316 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
5319 push_args(ctx, arg);
5324 * Parse an IPv4 address.
5326 * Last argument (ctx->args) is retrieved to determine storage size and
5330 parse_ipv4_addr(struct context *ctx, const struct token *token,
5331 const char *str, unsigned int len,
5332 void *buf, unsigned int size)
5334 const struct arg *arg = pop_args(ctx);
5339 /* Argument is expected. */
5343 /* Bit-mask fill is not supported. */
5344 if (arg->mask || size != sizeof(tmp))
5346 /* Only network endian is supported. */
5349 memcpy(str2, str, len);
5351 ret = inet_pton(AF_INET, str2, &tmp);
5353 /* Attempt integer parsing. */
5354 push_args(ctx, arg);
5355 return parse_int(ctx, token, str, len, buf, size);
5359 buf = (uint8_t *)ctx->object + arg->offset;
5360 memcpy(buf, &tmp, size);
5362 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
5365 push_args(ctx, arg);
5370 * Parse an IPv6 address.
5372 * Last argument (ctx->args) is retrieved to determine storage size and
5376 parse_ipv6_addr(struct context *ctx, const struct token *token,
5377 const char *str, unsigned int len,
5378 void *buf, unsigned int size)
5380 const struct arg *arg = pop_args(ctx);
5382 struct in6_addr tmp;
5386 /* Argument is expected. */
5390 /* Bit-mask fill is not supported. */
5391 if (arg->mask || size != sizeof(tmp))
5393 /* Only network endian is supported. */
5396 memcpy(str2, str, len);
5398 ret = inet_pton(AF_INET6, str2, &tmp);
5403 buf = (uint8_t *)ctx->object + arg->offset;
5404 memcpy(buf, &tmp, size);
5406 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
5409 push_args(ctx, arg);
5413 /** Boolean values (even indices stand for false). */
5414 static const char *const boolean_name[] = {
5424 * Parse a boolean value.
5426 * Last argument (ctx->args) is retrieved to determine storage size and
5430 parse_boolean(struct context *ctx, const struct token *token,
5431 const char *str, unsigned int len,
5432 void *buf, unsigned int size)
5434 const struct arg *arg = pop_args(ctx);
5438 /* Argument is expected. */
5441 for (i = 0; boolean_name[i]; ++i)
5442 if (!strcmp_partial(boolean_name[i], str, len))
5444 /* Process token as integer. */
5445 if (boolean_name[i])
5446 str = i & 1 ? "1" : "0";
5447 push_args(ctx, arg);
5448 ret = parse_int(ctx, token, str, strlen(str), buf, size);
5449 return ret > 0 ? (int)len : ret;
5452 /** Parse port and update context. */
5454 parse_port(struct context *ctx, const struct token *token,
5455 const char *str, unsigned int len,
5456 void *buf, unsigned int size)
5458 struct buffer *out = &(struct buffer){ .port = 0 };
5466 ctx->objmask = NULL;
5467 size = sizeof(*out);
5469 ret = parse_int(ctx, token, str, len, out, size);
5471 ctx->port = out->port;
5477 /** Parse set command, initialize output buffer for subsequent tokens. */
5479 parse_set_raw_encap_decap(struct context *ctx, const struct token *token,
5480 const char *str, unsigned int len,
5481 void *buf, unsigned int size)
5483 struct buffer *out = buf;
5485 /* Token name must match. */
5486 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5488 /* Nothing else to do if there is no buffer. */
5491 /* Make sure buffer is large enough. */
5492 if (size < sizeof(*out))
5495 ctx->objmask = NULL;
5499 out->command = ctx->curr;
5504 * Parse set raw_encap/raw_decap command,
5505 * initialize output buffer for subsequent tokens.
5508 parse_set_init(struct context *ctx, const struct token *token,
5509 const char *str, unsigned int len,
5510 void *buf, unsigned int size)
5512 struct buffer *out = buf;
5514 /* Token name must match. */
5515 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5517 /* Nothing else to do if there is no buffer. */
5520 /* Make sure buffer is large enough. */
5521 if (size < sizeof(*out))
5523 /* Initialize buffer. */
5524 memset(out, 0x00, sizeof(*out));
5525 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
5528 ctx->objmask = NULL;
5529 if (!out->command) {
5530 if (ctx->curr != SET)
5532 if (sizeof(*out) > size)
5534 out->command = ctx->curr;
5535 out->args.vc.data = (uint8_t *)out + size;
5536 /* All we need is pattern */
5537 out->args.vc.pattern =
5538 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
5540 ctx->object = out->args.vc.pattern;
5545 /** No completion. */
5547 comp_none(struct context *ctx, const struct token *token,
5548 unsigned int ent, char *buf, unsigned int size)
5558 /** Complete boolean values. */
5560 comp_boolean(struct context *ctx, const struct token *token,
5561 unsigned int ent, char *buf, unsigned int size)
5567 for (i = 0; boolean_name[i]; ++i)
5568 if (buf && i == ent)
5569 return strlcpy(buf, boolean_name[i], size);
5575 /** Complete action names. */
5577 comp_action(struct context *ctx, const struct token *token,
5578 unsigned int ent, char *buf, unsigned int size)
5584 for (i = 0; next_action[i]; ++i)
5585 if (buf && i == ent)
5586 return strlcpy(buf, token_list[next_action[i]].name,
5593 /** Complete available ports. */
5595 comp_port(struct context *ctx, const struct token *token,
5596 unsigned int ent, char *buf, unsigned int size)
5603 RTE_ETH_FOREACH_DEV(p) {
5604 if (buf && i == ent)
5605 return snprintf(buf, size, "%u", p);
5613 /** Complete available rule IDs. */
5615 comp_rule_id(struct context *ctx, const struct token *token,
5616 unsigned int ent, char *buf, unsigned int size)
5619 struct rte_port *port;
5620 struct port_flow *pf;
5623 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
5624 ctx->port == (portid_t)RTE_PORT_ALL)
5626 port = &ports[ctx->port];
5627 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
5628 if (buf && i == ent)
5629 return snprintf(buf, size, "%u", pf->id);
5637 /** Complete type field for RSS action. */
5639 comp_vc_action_rss_type(struct context *ctx, const struct token *token,
5640 unsigned int ent, char *buf, unsigned int size)
5646 for (i = 0; rss_type_table[i].str; ++i)
5651 return strlcpy(buf, rss_type_table[ent].str, size);
5653 return snprintf(buf, size, "end");
5657 /** Complete queue field for RSS action. */
5659 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
5660 unsigned int ent, char *buf, unsigned int size)
5667 return snprintf(buf, size, "%u", ent);
5669 return snprintf(buf, size, "end");
5673 /** Complete index number for set raw_encap/raw_decap commands. */
5675 comp_set_raw_index(struct context *ctx, const struct token *token,
5676 unsigned int ent, char *buf, unsigned int size)
5682 RTE_SET_USED(token);
5683 for (idx = 0; idx < RAW_ENCAP_CONFS_MAX_NUM; ++idx) {
5684 if (buf && idx == ent)
5685 return snprintf(buf, size, "%u", idx);
5691 /** Internal context. */
5692 static struct context cmd_flow_context;
5694 /** Global parser instance (cmdline API). */
5695 cmdline_parse_inst_t cmd_flow;
5696 cmdline_parse_inst_t cmd_set_raw;
5698 /** Initialize context. */
5700 cmd_flow_context_init(struct context *ctx)
5702 /* A full memset() is not necessary. */
5712 ctx->objmask = NULL;
5715 /** Parse a token (cmdline API). */
5717 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
5720 struct context *ctx = &cmd_flow_context;
5721 const struct token *token;
5722 const enum index *list;
5727 token = &token_list[ctx->curr];
5728 /* Check argument length. */
5731 for (len = 0; src[len]; ++len)
5732 if (src[len] == '#' || isspace(src[len]))
5736 /* Last argument and EOL detection. */
5737 for (i = len; src[i]; ++i)
5738 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
5740 else if (!isspace(src[i])) {
5745 if (src[i] == '\r' || src[i] == '\n') {
5749 /* Initialize context if necessary. */
5750 if (!ctx->next_num) {
5753 ctx->next[ctx->next_num++] = token->next[0];
5755 /* Process argument through candidates. */
5756 ctx->prev = ctx->curr;
5757 list = ctx->next[ctx->next_num - 1];
5758 for (i = 0; list[i]; ++i) {
5759 const struct token *next = &token_list[list[i]];
5762 ctx->curr = list[i];
5764 tmp = next->call(ctx, next, src, len, result, size);
5766 tmp = parse_default(ctx, next, src, len, result, size);
5767 if (tmp == -1 || tmp != len)
5775 /* Push subsequent tokens if any. */
5777 for (i = 0; token->next[i]; ++i) {
5778 if (ctx->next_num == RTE_DIM(ctx->next))
5780 ctx->next[ctx->next_num++] = token->next[i];
5782 /* Push arguments if any. */
5784 for (i = 0; token->args[i]; ++i) {
5785 if (ctx->args_num == RTE_DIM(ctx->args))
5787 ctx->args[ctx->args_num++] = token->args[i];
5792 /** Return number of completion entries (cmdline API). */
5794 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
5796 struct context *ctx = &cmd_flow_context;
5797 const struct token *token = &token_list[ctx->curr];
5798 const enum index *list;
5802 /* Count number of tokens in current list. */
5804 list = ctx->next[ctx->next_num - 1];
5806 list = token->next[0];
5807 for (i = 0; list[i]; ++i)
5812 * If there is a single token, use its completion callback, otherwise
5813 * return the number of entries.
5815 token = &token_list[list[0]];
5816 if (i == 1 && token->comp) {
5817 /* Save index for cmd_flow_get_help(). */
5818 ctx->prev = list[0];
5819 return token->comp(ctx, token, 0, NULL, 0);
5824 /** Return a completion entry (cmdline API). */
5826 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
5827 char *dst, unsigned int size)
5829 struct context *ctx = &cmd_flow_context;
5830 const struct token *token = &token_list[ctx->curr];
5831 const enum index *list;
5835 /* Count number of tokens in current list. */
5837 list = ctx->next[ctx->next_num - 1];
5839 list = token->next[0];
5840 for (i = 0; list[i]; ++i)
5844 /* If there is a single token, use its completion callback. */
5845 token = &token_list[list[0]];
5846 if (i == 1 && token->comp) {
5847 /* Save index for cmd_flow_get_help(). */
5848 ctx->prev = list[0];
5849 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
5851 /* Otherwise make sure the index is valid and use defaults. */
5854 token = &token_list[list[index]];
5855 strlcpy(dst, token->name, size);
5856 /* Save index for cmd_flow_get_help(). */
5857 ctx->prev = list[index];
5861 /** Populate help strings for current token (cmdline API). */
5863 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
5865 struct context *ctx = &cmd_flow_context;
5866 const struct token *token = &token_list[ctx->prev];
5871 /* Set token type and update global help with details. */
5872 strlcpy(dst, (token->type ? token->type : "TOKEN"), size);
5874 cmd_flow.help_str = token->help;
5876 cmd_flow.help_str = token->name;
5880 /** Token definition template (cmdline API). */
5881 static struct cmdline_token_hdr cmd_flow_token_hdr = {
5882 .ops = &(struct cmdline_token_ops){
5883 .parse = cmd_flow_parse,
5884 .complete_get_nb = cmd_flow_complete_get_nb,
5885 .complete_get_elt = cmd_flow_complete_get_elt,
5886 .get_help = cmd_flow_get_help,
5891 /** Populate the next dynamic token. */
5893 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
5894 cmdline_parse_token_hdr_t **hdr_inst)
5896 struct context *ctx = &cmd_flow_context;
5898 /* Always reinitialize context before requesting the first token. */
5899 if (!(hdr_inst - cmd_flow.tokens))
5900 cmd_flow_context_init(ctx);
5901 /* Return NULL when no more tokens are expected. */
5902 if (!ctx->next_num && ctx->curr) {
5906 /* Determine if command should end here. */
5907 if (ctx->eol && ctx->last && ctx->next_num) {
5908 const enum index *list = ctx->next[ctx->next_num - 1];
5911 for (i = 0; list[i]; ++i) {
5918 *hdr = &cmd_flow_token_hdr;
5921 /** Dispatch parsed buffer to function calls. */
5923 cmd_flow_parsed(const struct buffer *in)
5925 switch (in->command) {
5927 port_flow_validate(in->port, &in->args.vc.attr,
5928 in->args.vc.pattern, in->args.vc.actions);
5931 port_flow_create(in->port, &in->args.vc.attr,
5932 in->args.vc.pattern, in->args.vc.actions);
5935 port_flow_destroy(in->port, in->args.destroy.rule_n,
5936 in->args.destroy.rule);
5939 port_flow_flush(in->port);
5942 port_flow_query(in->port, in->args.query.rule,
5943 &in->args.query.action);
5946 port_flow_list(in->port, in->args.list.group_n,
5947 in->args.list.group);
5950 port_flow_isolate(in->port, in->args.isolate.set);
5957 /** Token generator and output processing callback (cmdline API). */
5959 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
5962 cmd_flow_tok(arg0, arg2);
5964 cmd_flow_parsed(arg0);
5967 /** Global parser instance (cmdline API). */
5968 cmdline_parse_inst_t cmd_flow = {
5970 .data = NULL, /**< Unused. */
5971 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
5974 }, /**< Tokens are returned by cmd_flow_tok(). */
5977 /** set cmd facility. Reuse cmd flow's infrastructure as much as possible. */
5980 update_fields(uint8_t *buf, struct rte_flow_item *item, uint16_t next_proto)
5982 struct rte_flow_item_ipv4 *ipv4;
5983 struct rte_flow_item_eth *eth;
5984 struct rte_flow_item_ipv6 *ipv6;
5985 struct rte_flow_item_vxlan *vxlan;
5986 struct rte_flow_item_vxlan_gpe *gpe;
5987 struct rte_flow_item_nvgre *nvgre;
5988 uint32_t ipv6_vtc_flow;
5990 switch (item->type) {
5991 case RTE_FLOW_ITEM_TYPE_ETH:
5992 eth = (struct rte_flow_item_eth *)buf;
5994 eth->type = rte_cpu_to_be_16(next_proto);
5996 case RTE_FLOW_ITEM_TYPE_IPV4:
5997 ipv4 = (struct rte_flow_item_ipv4 *)buf;
5998 ipv4->hdr.version_ihl = 0x45;
5999 ipv4->hdr.next_proto_id = (uint8_t)next_proto;
6001 case RTE_FLOW_ITEM_TYPE_IPV6:
6002 ipv6 = (struct rte_flow_item_ipv6 *)buf;
6003 ipv6->hdr.proto = (uint8_t)next_proto;
6004 ipv6_vtc_flow = rte_be_to_cpu_32(ipv6->hdr.vtc_flow);
6005 ipv6_vtc_flow &= 0x0FFFFFFF; /*< reset version bits. */
6006 ipv6_vtc_flow |= 0x60000000; /*< set ipv6 version. */
6007 ipv6->hdr.vtc_flow = rte_cpu_to_be_32(ipv6_vtc_flow);
6009 case RTE_FLOW_ITEM_TYPE_VXLAN:
6010 vxlan = (struct rte_flow_item_vxlan *)buf;
6011 vxlan->flags = 0x08;
6013 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6014 gpe = (struct rte_flow_item_vxlan_gpe *)buf;
6017 case RTE_FLOW_ITEM_TYPE_NVGRE:
6018 nvgre = (struct rte_flow_item_nvgre *)buf;
6019 nvgre->protocol = rte_cpu_to_be_16(0x6558);
6020 nvgre->c_k_s_rsvd0_ver = rte_cpu_to_be_16(0x2000);
6027 /** Helper of get item's default mask. */
6029 flow_item_default_mask(const struct rte_flow_item *item)
6031 const void *mask = NULL;
6032 static rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
6034 switch (item->type) {
6035 case RTE_FLOW_ITEM_TYPE_ANY:
6036 mask = &rte_flow_item_any_mask;
6038 case RTE_FLOW_ITEM_TYPE_VF:
6039 mask = &rte_flow_item_vf_mask;
6041 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6042 mask = &rte_flow_item_port_id_mask;
6044 case RTE_FLOW_ITEM_TYPE_RAW:
6045 mask = &rte_flow_item_raw_mask;
6047 case RTE_FLOW_ITEM_TYPE_ETH:
6048 mask = &rte_flow_item_eth_mask;
6050 case RTE_FLOW_ITEM_TYPE_VLAN:
6051 mask = &rte_flow_item_vlan_mask;
6053 case RTE_FLOW_ITEM_TYPE_IPV4:
6054 mask = &rte_flow_item_ipv4_mask;
6056 case RTE_FLOW_ITEM_TYPE_IPV6:
6057 mask = &rte_flow_item_ipv6_mask;
6059 case RTE_FLOW_ITEM_TYPE_ICMP:
6060 mask = &rte_flow_item_icmp_mask;
6062 case RTE_FLOW_ITEM_TYPE_UDP:
6063 mask = &rte_flow_item_udp_mask;
6065 case RTE_FLOW_ITEM_TYPE_TCP:
6066 mask = &rte_flow_item_tcp_mask;
6068 case RTE_FLOW_ITEM_TYPE_SCTP:
6069 mask = &rte_flow_item_sctp_mask;
6071 case RTE_FLOW_ITEM_TYPE_VXLAN:
6072 mask = &rte_flow_item_vxlan_mask;
6074 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6075 mask = &rte_flow_item_vxlan_gpe_mask;
6077 case RTE_FLOW_ITEM_TYPE_E_TAG:
6078 mask = &rte_flow_item_e_tag_mask;
6080 case RTE_FLOW_ITEM_TYPE_NVGRE:
6081 mask = &rte_flow_item_nvgre_mask;
6083 case RTE_FLOW_ITEM_TYPE_MPLS:
6084 mask = &rte_flow_item_mpls_mask;
6086 case RTE_FLOW_ITEM_TYPE_GRE:
6087 mask = &rte_flow_item_gre_mask;
6089 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
6090 mask = &gre_key_default_mask;
6092 case RTE_FLOW_ITEM_TYPE_META:
6093 mask = &rte_flow_item_meta_mask;
6095 case RTE_FLOW_ITEM_TYPE_FUZZY:
6096 mask = &rte_flow_item_fuzzy_mask;
6098 case RTE_FLOW_ITEM_TYPE_GTP:
6099 mask = &rte_flow_item_gtp_mask;
6101 case RTE_FLOW_ITEM_TYPE_ESP:
6102 mask = &rte_flow_item_esp_mask;
6104 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
6105 mask = &rte_flow_item_gtp_psc_mask;
6107 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
6108 mask = &rte_flow_item_pppoe_proto_id_mask;
6117 /** Dispatch parsed buffer to function calls. */
6119 cmd_set_raw_parsed(const struct buffer *in)
6121 uint32_t n = in->args.vc.pattern_n;
6123 struct rte_flow_item *item = NULL;
6125 uint8_t *data = NULL;
6126 uint8_t *data_tail = NULL;
6127 size_t *total_size = NULL;
6128 uint16_t upper_layer = 0;
6130 uint16_t idx = in->port; /* We borrow port field as index */
6132 RTE_ASSERT(in->command == SET_RAW_ENCAP ||
6133 in->command == SET_RAW_DECAP);
6134 if (in->command == SET_RAW_ENCAP) {
6135 total_size = &raw_encap_confs[idx].size;
6136 data = (uint8_t *)&raw_encap_confs[idx].data;
6138 total_size = &raw_decap_confs[idx].size;
6139 data = (uint8_t *)&raw_decap_confs[idx].data;
6142 memset(data, 0x00, ACTION_RAW_ENCAP_MAX_DATA);
6143 /* process hdr from upper layer to low layer (L3/L4 -> L2). */
6144 data_tail = data + ACTION_RAW_ENCAP_MAX_DATA;
6145 for (i = n - 1 ; i >= 0; --i) {
6146 item = in->args.vc.pattern + i;
6147 if (item->spec == NULL)
6148 item->spec = flow_item_default_mask(item);
6149 switch (item->type) {
6150 case RTE_FLOW_ITEM_TYPE_ETH:
6151 size = sizeof(struct rte_flow_item_eth);
6153 case RTE_FLOW_ITEM_TYPE_VLAN:
6154 size = sizeof(struct rte_flow_item_vlan);
6155 proto = RTE_ETHER_TYPE_VLAN;
6157 case RTE_FLOW_ITEM_TYPE_IPV4:
6158 size = sizeof(struct rte_flow_item_ipv4);
6159 proto = RTE_ETHER_TYPE_IPV4;
6161 case RTE_FLOW_ITEM_TYPE_IPV6:
6162 size = sizeof(struct rte_flow_item_ipv6);
6163 proto = RTE_ETHER_TYPE_IPV6;
6165 case RTE_FLOW_ITEM_TYPE_UDP:
6166 size = sizeof(struct rte_flow_item_udp);
6169 case RTE_FLOW_ITEM_TYPE_TCP:
6170 size = sizeof(struct rte_flow_item_tcp);
6173 case RTE_FLOW_ITEM_TYPE_VXLAN:
6174 size = sizeof(struct rte_flow_item_vxlan);
6176 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6177 size = sizeof(struct rte_flow_item_vxlan_gpe);
6179 case RTE_FLOW_ITEM_TYPE_GRE:
6180 size = sizeof(struct rte_flow_item_gre);
6183 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
6184 size = sizeof(rte_be32_t);
6186 case RTE_FLOW_ITEM_TYPE_MPLS:
6187 size = sizeof(struct rte_flow_item_mpls);
6189 case RTE_FLOW_ITEM_TYPE_NVGRE:
6190 size = sizeof(struct rte_flow_item_nvgre);
6193 case RTE_FLOW_ITEM_TYPE_GENEVE:
6194 size = sizeof(struct rte_flow_item_geneve);
6197 printf("Error - Not supported item\n");
6199 memset(data, 0x00, ACTION_RAW_ENCAP_MAX_DATA);
6202 *total_size += size;
6203 rte_memcpy(data_tail - (*total_size), item->spec, size);
6204 /* update some fields which cannot be set by cmdline */
6205 update_fields((data_tail - (*total_size)), item,
6207 upper_layer = proto;
6209 if (verbose_level & 0x1)
6210 printf("total data size is %zu\n", (*total_size));
6211 RTE_ASSERT((*total_size) <= ACTION_RAW_ENCAP_MAX_DATA);
6212 memmove(data, (data_tail - (*total_size)), *total_size);
6215 /** Populate help strings for current token (cmdline API). */
6217 cmd_set_raw_get_help(cmdline_parse_token_hdr_t *hdr, char *dst,
6220 struct context *ctx = &cmd_flow_context;
6221 const struct token *token = &token_list[ctx->prev];
6226 /* Set token type and update global help with details. */
6227 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
6229 cmd_set_raw.help_str = token->help;
6231 cmd_set_raw.help_str = token->name;
6235 /** Token definition template (cmdline API). */
6236 static struct cmdline_token_hdr cmd_set_raw_token_hdr = {
6237 .ops = &(struct cmdline_token_ops){
6238 .parse = cmd_flow_parse,
6239 .complete_get_nb = cmd_flow_complete_get_nb,
6240 .complete_get_elt = cmd_flow_complete_get_elt,
6241 .get_help = cmd_set_raw_get_help,
6246 /** Populate the next dynamic token. */
6248 cmd_set_raw_tok(cmdline_parse_token_hdr_t **hdr,
6249 cmdline_parse_token_hdr_t **hdr_inst)
6251 struct context *ctx = &cmd_flow_context;
6253 /* Always reinitialize context before requesting the first token. */
6254 if (!(hdr_inst - cmd_set_raw.tokens)) {
6255 cmd_flow_context_init(ctx);
6256 ctx->curr = START_SET;
6258 /* Return NULL when no more tokens are expected. */
6259 if (!ctx->next_num && (ctx->curr != START_SET)) {
6263 /* Determine if command should end here. */
6264 if (ctx->eol && ctx->last && ctx->next_num) {
6265 const enum index *list = ctx->next[ctx->next_num - 1];
6268 for (i = 0; list[i]; ++i) {
6275 *hdr = &cmd_set_raw_token_hdr;
6278 /** Token generator and output processing callback (cmdline API). */
6280 cmd_set_raw_cb(void *arg0, struct cmdline *cl, void *arg2)
6283 cmd_set_raw_tok(arg0, arg2);
6285 cmd_set_raw_parsed(arg0);
6288 /** Global parser instance (cmdline API). */
6289 cmdline_parse_inst_t cmd_set_raw = {
6290 .f = cmd_set_raw_cb,
6291 .data = NULL, /**< Unused. */
6292 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
6295 }, /**< Tokens are returned by cmd_flow_tok(). */
6298 /* *** display raw_encap/raw_decap buf */
6299 struct cmd_show_set_raw_result {
6300 cmdline_fixed_string_t cmd_show;
6301 cmdline_fixed_string_t cmd_what;
6302 cmdline_fixed_string_t cmd_all;
6307 cmd_show_set_raw_parsed(void *parsed_result, struct cmdline *cl, void *data)
6309 struct cmd_show_set_raw_result *res = parsed_result;
6310 uint16_t index = res->cmd_index;
6312 uint8_t *raw_data = NULL;
6313 size_t raw_size = 0;
6314 char title[16] = {0};
6318 if (!strcmp(res->cmd_all, "all")) {
6321 } else if (index >= RAW_ENCAP_CONFS_MAX_NUM) {
6322 printf("index should be 0-%u\n", RAW_ENCAP_CONFS_MAX_NUM - 1);
6326 if (!strcmp(res->cmd_what, "raw_encap")) {
6327 raw_data = (uint8_t *)&raw_encap_confs[index].data;
6328 raw_size = raw_encap_confs[index].size;
6329 snprintf(title, 16, "\nindex: %u", index);
6330 rte_hexdump(stdout, title, raw_data, raw_size);
6332 raw_data = (uint8_t *)&raw_decap_confs[index].data;
6333 raw_size = raw_decap_confs[index].size;
6334 snprintf(title, 16, "\nindex: %u", index);
6335 rte_hexdump(stdout, title, raw_data, raw_size);
6337 } while (all && ++index < RAW_ENCAP_CONFS_MAX_NUM);
6340 cmdline_parse_token_string_t cmd_show_set_raw_cmd_show =
6341 TOKEN_STRING_INITIALIZER(struct cmd_show_set_raw_result,
6343 cmdline_parse_token_string_t cmd_show_set_raw_cmd_what =
6344 TOKEN_STRING_INITIALIZER(struct cmd_show_set_raw_result,
6345 cmd_what, "raw_encap#raw_decap");
6346 cmdline_parse_token_num_t cmd_show_set_raw_cmd_index =
6347 TOKEN_NUM_INITIALIZER(struct cmd_show_set_raw_result,
6349 cmdline_parse_token_string_t cmd_show_set_raw_cmd_all =
6350 TOKEN_STRING_INITIALIZER(struct cmd_show_set_raw_result,
6352 cmdline_parse_inst_t cmd_show_set_raw = {
6353 .f = cmd_show_set_raw_parsed,
6355 .help_str = "show <raw_encap|raw_decap> <index>",
6357 (void *)&cmd_show_set_raw_cmd_show,
6358 (void *)&cmd_show_set_raw_cmd_what,
6359 (void *)&cmd_show_set_raw_cmd_index,
6363 cmdline_parse_inst_t cmd_show_set_raw_all = {
6364 .f = cmd_show_set_raw_parsed,
6366 .help_str = "show <raw_encap|raw_decap> all",
6368 (void *)&cmd_show_set_raw_cmd_show,
6369 (void *)&cmd_show_set_raw_cmd_what,
6370 (void *)&cmd_show_set_raw_cmd_all,